diff options
Diffstat (limited to '')
83 files changed, 12368 insertions, 0 deletions
diff --git a/arch/sparc/lib/GENbzero.S b/arch/sparc/lib/GENbzero.S new file mode 100644 index 0000000000..63d618857d --- /dev/null +++ b/arch/sparc/lib/GENbzero.S @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* GENbzero.S: Generic sparc64 memset/clear_user. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ +#include <asm/asi.h> + +#define EX_ST(x,y) \ +98: x,y; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, __retl_o1_asi;\ + .text; \ + .align 4; + + .align 32 + .text + + .globl GENmemset + .type GENmemset, #function +GENmemset: /* %o0=buf, %o1=pat, %o2=len */ + and %o1, 0xff, %o3 + mov %o2, %o1 + sllx %o3, 8, %g1 + or %g1, %o3, %o2 + sllx %o2, 16, %g1 + or %g1, %o2, %o2 + sllx %o2, 32, %g1 + ba,pt %xcc, 1f + or %g1, %o2, %o2 + + .globl GENbzero + .type GENbzero, #function +GENbzero: + clr %o2 +1: brz,pn %o1, GENbzero_return + mov %o0, %o3 + + /* %o5: saved %asi, restored at GENbzero_done + * %o4: store %asi to use + */ + rd %asi, %o5 + mov ASI_P, %o4 + wr %o4, 0x0, %asi + +GENbzero_from_clear_user: + cmp %o1, 15 + bl,pn %icc, GENbzero_tiny + andcc %o0, 0x7, %g1 + be,pt %xcc, 2f + mov 8, %g2 + sub %g2, %g1, %g1 + sub %o1, %g1, %o1 +1: EX_ST(stba %o2, [%o0 + 0x00] %asi) + subcc %g1, 1, %g1 + bne,pt %xcc, 1b + add %o0, 1, %o0 +2: cmp %o1, 128 + bl,pn %icc, GENbzero_medium + andcc %o0, (64 - 1), %g1 + be,pt %xcc, GENbzero_pre_loop + mov 64, %g2 + sub %g2, %g1, %g1 + sub %o1, %g1, %o1 +1: EX_ST(stxa %o2, [%o0 + 0x00] %asi) + subcc %g1, 8, %g1 + bne,pt %xcc, 1b + add %o0, 8, %o0 + +GENbzero_pre_loop: + andn %o1, (64 - 1), %g1 + sub %o1, %g1, %o1 +GENbzero_loop: + EX_ST(stxa %o2, [%o0 + 0x00] %asi) + EX_ST(stxa %o2, [%o0 + 0x08] %asi) + EX_ST(stxa %o2, [%o0 + 0x10] %asi) + EX_ST(stxa %o2, [%o0 + 0x18] %asi) + EX_ST(stxa %o2, [%o0 + 0x20] %asi) + EX_ST(stxa %o2, [%o0 + 0x28] %asi) + EX_ST(stxa %o2, [%o0 + 0x30] %asi) + EX_ST(stxa %o2, [%o0 + 0x38] %asi) + subcc %g1, 64, %g1 + bne,pt %xcc, GENbzero_loop + add %o0, 64, %o0 + + membar #Sync + wr %o4, 0x0, %asi + brz,pn %o1, GENbzero_done +GENbzero_medium: + andncc %o1, 0x7, %g1 + be,pn %xcc, 2f + sub %o1, %g1, %o1 +1: EX_ST(stxa %o2, [%o0 + 0x00] %asi) + subcc %g1, 8, %g1 + bne,pt %xcc, 1b + add %o0, 8, %o0 +2: brz,pt %o1, GENbzero_done + nop + +GENbzero_tiny: +1: EX_ST(stba %o2, [%o0 + 0x00] %asi) + subcc %o1, 1, %o1 + bne,pt %icc, 1b + add %o0, 1, %o0 + + /* fallthrough */ + +GENbzero_done: + wr %o5, 0x0, %asi + +GENbzero_return: + retl + mov %o3, %o0 + .size GENbzero, .-GENbzero + .size GENmemset, .-GENmemset + + .globl GENclear_user + .type GENclear_user, #function +GENclear_user: /* %o0=buf, %o1=len */ + rd %asi, %o5 + brz,pn %o1, GENbzero_done + clr %o3 + cmp %o5, ASI_AIUS + bne,pn %icc, GENbzero + clr %o2 + ba,pt %xcc, GENbzero_from_clear_user + mov ASI_AIUS, %o4 + .size GENclear_user, .-GENclear_user + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define GEN_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + .globl generic_patch_bzero + .type generic_patch_bzero,#function +generic_patch_bzero: + GEN_DO_PATCH(memset, GENmemset) + GEN_DO_PATCH(__bzero, GENbzero) + GEN_DO_PATCH(__clear_user, GENclear_user) + retl + nop + .size generic_patch_bzero,.-generic_patch_bzero diff --git a/arch/sparc/lib/GENcopy_from_user.S b/arch/sparc/lib/GENcopy_from_user.S new file mode 100644 index 0000000000..6891a5678e --- /dev/null +++ b/arch/sparc/lib/GENcopy_from_user.S @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* GENcopy_from_user.S: Generic sparc64 copy from userspace. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#define EX_LD(x,y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#define FUNC_NAME GENcopy_from_user +#define LOAD(type,addr,dest) type##a [addr] ASI_AIUS, dest +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, raw_copy_in_user; \ + nop +#endif + +#include "GENmemcpy.S" diff --git a/arch/sparc/lib/GENcopy_to_user.S b/arch/sparc/lib/GENcopy_to_user.S new file mode 100644 index 0000000000..df75b532a9 --- /dev/null +++ b/arch/sparc/lib/GENcopy_to_user.S @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* GENcopy_to_user.S: Generic sparc64 copy to userspace. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#define EX_ST(x,y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#define FUNC_NAME GENcopy_to_user +#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ + /* Writing to %asi is _expensive_ so we hardcode it. + * Reading %asi to check for KERNEL_DS is comparatively + * cheap. + */ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, raw_copy_in_user; \ + nop +#endif + +#include "GENmemcpy.S" diff --git a/arch/sparc/lib/GENmemcpy.S b/arch/sparc/lib/GENmemcpy.S new file mode 100644 index 0000000000..114340a0d3 --- /dev/null +++ b/arch/sparc/lib/GENmemcpy.S @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* GENmemcpy.S: Generic sparc64 memcpy. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#ifdef __KERNEL__ +#include <linux/linkage.h> +#define GLOBAL_SPARE %g7 +#else +#define GLOBAL_SPARE %g5 +#endif + +#ifndef EX_LD +#define EX_LD(x,y) x +#endif + +#ifndef EX_ST +#define EX_ST(x,y) x +#endif + +#ifndef LOAD +#define LOAD(type,addr,dest) type [addr], dest +#endif + +#ifndef STORE +#define STORE(type,src,addr) type src, [addr] +#endif + +#ifndef FUNC_NAME +#define FUNC_NAME GENmemcpy +#endif + +#ifndef PREAMBLE +#define PREAMBLE +#endif + +#ifndef XCC +#define XCC xcc +#endif + + .register %g2,#scratch + .register %g3,#scratch + + .text + +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +ENTRY(GEN_retl_o4_1) + add %o4, %o2, %o4 + retl + add %o4, 1, %o0 +ENDPROC(GEN_retl_o4_1) +ENTRY(GEN_retl_g1_8) + add %g1, %o2, %g1 + retl + add %g1, 8, %o0 +ENDPROC(GEN_retl_g1_8) +ENTRY(GEN_retl_o2_4) + retl + add %o2, 4, %o0 +ENDPROC(GEN_retl_o2_4) +ENTRY(GEN_retl_o2_1) + retl + add %o2, 1, %o0 +ENDPROC(GEN_retl_o2_1) +#endif + + .align 64 + + .globl FUNC_NAME + .type FUNC_NAME,#function +FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ + srlx %o2, 31, %g2 + cmp %g2, 0 + tne %XCC, 5 + PREAMBLE + mov %o0, GLOBAL_SPARE + + cmp %o2, 0 + be,pn %XCC, 85f + or %o0, %o1, %o3 + cmp %o2, 16 + blu,a,pn %XCC, 80f + or %o3, %o2, %o3 + + xor %o0, %o1, %o4 + andcc %o4, 0x7, %g0 + bne,a,pn %XCC, 90f + sub %o0, %o1, %o3 + + and %o0, 0x7, %o4 + sub %o4, 0x8, %o4 + sub %g0, %o4, %o4 + sub %o2, %o4, %o2 +1: subcc %o4, 1, %o4 + EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o4_1) + EX_ST(STORE(stb, %g1, %o0),GEN_retl_o4_1) + add %o1, 1, %o1 + bne,pt %XCC, 1b + add %o0, 1, %o0 + + andn %o2, 0x7, %g1 + sub %o2, %g1, %o2 +1: subcc %g1, 0x8, %g1 + EX_LD(LOAD(ldx, %o1, %g2),GEN_retl_g1_8) + EX_ST(STORE(stx, %g2, %o0),GEN_retl_g1_8) + add %o1, 0x8, %o1 + bne,pt %XCC, 1b + add %o0, 0x8, %o0 + + brz,pt %o2, 85f + sub %o0, %o1, %o3 + ba,a,pt %XCC, 90f + + .align 64 +80: /* 0 < len <= 16 */ + andcc %o3, 0x3, %g0 + bne,pn %XCC, 90f + sub %o0, %o1, %o3 + +1: + subcc %o2, 4, %o2 + EX_LD(LOAD(lduw, %o1, %g1),GEN_retl_o2_4) + EX_ST(STORE(stw, %g1, %o1 + %o3),GEN_retl_o2_4) + bgu,pt %XCC, 1b + add %o1, 4, %o1 + +85: retl + mov EX_RETVAL(GLOBAL_SPARE), %o0 + + .align 32 +90: + subcc %o2, 1, %o2 + EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o2_1) + EX_ST(STORE(stb, %g1, %o1 + %o3),GEN_retl_o2_1) + bgu,pt %XCC, 90b + add %o1, 1, %o1 + retl + mov EX_RETVAL(GLOBAL_SPARE), %o0 + + .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc/lib/GENpage.S b/arch/sparc/lib/GENpage.S new file mode 100644 index 0000000000..c143c4d1de --- /dev/null +++ b/arch/sparc/lib/GENpage.S @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* GENpage.S: Generic clear and copy page. + * + * Copyright (C) 2007 (davem@davemloft.net) + */ +#include <asm/page.h> + + .text + .align 32 + +GENcopy_user_page: + set PAGE_SIZE, %g7 +1: ldx [%o1 + 0x00], %o2 + ldx [%o1 + 0x08], %o3 + ldx [%o1 + 0x10], %o4 + ldx [%o1 + 0x18], %o5 + stx %o2, [%o0 + 0x00] + stx %o3, [%o0 + 0x08] + stx %o4, [%o0 + 0x10] + stx %o5, [%o0 + 0x18] + ldx [%o1 + 0x20], %o2 + ldx [%o1 + 0x28], %o3 + ldx [%o1 + 0x30], %o4 + ldx [%o1 + 0x38], %o5 + stx %o2, [%o0 + 0x20] + stx %o3, [%o0 + 0x28] + stx %o4, [%o0 + 0x30] + stx %o5, [%o0 + 0x38] + subcc %g7, 64, %g7 + add %o1, 64, %o1 + bne,pt %xcc, 1b + add %o0, 64, %o0 + retl + nop + +GENclear_page: +GENclear_user_page: + set PAGE_SIZE, %g7 +1: stx %g0, [%o0 + 0x00] + stx %g0, [%o0 + 0x08] + stx %g0, [%o0 + 0x10] + stx %g0, [%o0 + 0x18] + stx %g0, [%o0 + 0x20] + stx %g0, [%o0 + 0x28] + stx %g0, [%o0 + 0x30] + stx %g0, [%o0 + 0x38] + subcc %g7, 64, %g7 + bne,pt %xcc, 1b + add %o0, 64, %o0 + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define GEN_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + .globl generic_patch_pageops + .type generic_patch_pageops,#function +generic_patch_pageops: + GEN_DO_PATCH(copy_user_page, GENcopy_user_page) + GEN_DO_PATCH(_clear_page, GENclear_page) + GEN_DO_PATCH(clear_user_page, GENclear_user_page) + retl + nop + .size generic_patch_pageops,.-generic_patch_pageops diff --git a/arch/sparc/lib/GENpatch.S b/arch/sparc/lib/GENpatch.S new file mode 100644 index 0000000000..1ec1f02c8b --- /dev/null +++ b/arch/sparc/lib/GENpatch.S @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* GENpatch.S: Patch Ultra-I routines with generic variant. + * + * Copyright (C) 2007 David S. Miller <davem@davemloft.net> + */ + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define GEN_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + .globl generic_patch_copyops + .type generic_patch_copyops,#function +generic_patch_copyops: + GEN_DO_PATCH(memcpy, GENmemcpy) + GEN_DO_PATCH(raw_copy_from_user, GENcopy_from_user) + GEN_DO_PATCH(raw_copy_to_user, GENcopy_to_user) + retl + nop + .size generic_patch_copyops,.-generic_patch_copyops diff --git a/arch/sparc/lib/M7copy_from_user.S b/arch/sparc/lib/M7copy_from_user.S new file mode 100644 index 0000000000..66464b3e36 --- /dev/null +++ b/arch/sparc/lib/M7copy_from_user.S @@ -0,0 +1,40 @@ +/* + * M7copy_from_user.S: SPARC M7 optimized copy from userspace. + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + + +#define EX_LD(x, y) \ +98: x; \ + .section __ex_table,"a"; \ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#define EX_LD_FP(x, y) \ +98: x; \ + .section __ex_table,"a"; \ + .align 4; \ + .word 98b, y##_fp; \ + .text; \ + .align 4; + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#define FUNC_NAME M7copy_from_user +#define LOAD(type,addr,dest) type##a [addr] %asi, dest +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, raw_copy_in_user; \ + nop +#endif + +#include "M7memcpy.S" diff --git a/arch/sparc/lib/M7copy_to_user.S b/arch/sparc/lib/M7copy_to_user.S new file mode 100644 index 0000000000..a60ac467f8 --- /dev/null +++ b/arch/sparc/lib/M7copy_to_user.S @@ -0,0 +1,51 @@ +/* + * M7copy_to_user.S: SPARC M7 optimized copy to userspace. + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + + +#define EX_ST(x, y) \ +98: x; \ + .section __ex_table,"a"; \ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#define EX_ST_FP(x, y) \ +98: x; \ + .section __ex_table,"a"; \ + .align 4; \ + .word 98b, y##_fp; \ + .text; \ + .align 4; + + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#ifndef ASI_BLK_INIT_QUAD_LDD_AIUS +#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 +#endif + +#define FUNC_NAME M7copy_to_user +#define STORE(type,src,addr) type##a src, [addr] %asi +#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS +#define STORE_MRU_ASI ASI_ST_BLKINIT_MRU_S +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ + /* Writing to %asi is _expensive_ so we hardcode it. + * Reading %asi to check for KERNEL_DS is comparatively + * cheap. + */ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, raw_copy_in_user; \ + nop +#endif + +#include "M7memcpy.S" diff --git a/arch/sparc/lib/M7memcpy.S b/arch/sparc/lib/M7memcpy.S new file mode 100644 index 0000000000..cbd42ea7c3 --- /dev/null +++ b/arch/sparc/lib/M7memcpy.S @@ -0,0 +1,923 @@ +/* + * M7memcpy: Optimized SPARC M7 memcpy + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + + .file "M7memcpy.S" + +/* + * memcpy(s1, s2, len) + * + * Copy s2 to s1, always copy n bytes. + * Note: this C code does not work for overlapped copies. + * + * Fast assembler language version of the following C-program for memcpy + * which represents the `standard' for the C-library. + * + * void * + * memcpy(void *s, const void *s0, size_t n) + * { + * if (n != 0) { + * char *s1 = s; + * const char *s2 = s0; + * do { + * *s1++ = *s2++; + * } while (--n != 0); + * } + * return (s); + * } + * + * + * SPARC T7/M7 Flow : + * + * if (count < SMALL_MAX) { + * if count < SHORTCOPY (SHORTCOPY=3) + * copy bytes; exit with dst addr + * if src & dst aligned on word boundary but not long word boundary, + * copy with ldw/stw; branch to finish_up + * if src & dst aligned on long word boundary + * copy with ldx/stx; branch to finish_up + * if src & dst not aligned and length <= SHORTCHECK (SHORTCHECK=14) + * copy bytes; exit with dst addr + * move enough bytes to get src to word boundary + * if dst now on word boundary + * move_words: + * copy words; branch to finish_up + * if dst now on half word boundary + * load words, shift half words, store words; branch to finish_up + * if dst on byte 1 + * load words, shift 3 bytes, store words; branch to finish_up + * if dst on byte 3 + * load words, shift 1 byte, store words; branch to finish_up + * finish_up: + * copy bytes; exit with dst addr + * } else { More than SMALL_MAX bytes + * move bytes until dst is on long word boundary + * if( src is on long word boundary ) { + * if (count < MED_MAX) { + * finish_long: src/dst aligned on 8 bytes + * copy with ldx/stx in 8-way unrolled loop; + * copy final 0-63 bytes; exit with dst addr + * } else { src/dst aligned; count > MED_MAX + * align dst on 64 byte boundary; for main data movement: + * prefetch src data to L2 cache; let HW prefetch move data to L1 cache + * Use BIS (block initializing store) to avoid copying store cache + * lines from memory. But pre-store first element of each cache line + * ST_CHUNK lines in advance of the rest of that cache line. That + * gives time for replacement cache lines to be written back without + * excess STQ and Miss Buffer filling. Repeat until near the end, + * then finish up storing before going to finish_long. + * } + * } else { src/dst not aligned on 8 bytes + * if src is word aligned and count < MED_WMAX + * move words in 8-way unrolled loop + * move final 0-31 bytes; exit with dst addr + * if count < MED_UMAX + * use alignaddr/faligndata combined with ldd/std in 8-way + * unrolled loop to move data. + * go to unalign_done + * else + * setup alignaddr for faligndata instructions + * align dst on 64 byte boundary; prefetch src data to L1 cache + * loadx8, falign, block-store, prefetch loop + * (only use block-init-store when src/dst on 8 byte boundaries.) + * unalign_done: + * move remaining bytes for unaligned cases. exit with dst addr. + * } + * + */ + +#include <asm/visasm.h> +#include <asm/asi.h> + +#if !defined(EX_LD) && !defined(EX_ST) +#define NON_USER_COPY +#endif + +#ifndef EX_LD +#define EX_LD(x,y) x +#endif +#ifndef EX_LD_FP +#define EX_LD_FP(x,y) x +#endif + +#ifndef EX_ST +#define EX_ST(x,y) x +#endif +#ifndef EX_ST_FP +#define EX_ST_FP(x,y) x +#endif + +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +#endif + +#ifndef LOAD +#define LOAD(type,addr,dest) type [addr], dest +#endif + +#ifndef STORE +#define STORE(type,src,addr) type src, [addr] +#endif + +/* + * ASI_BLK_INIT_QUAD_LDD_P/ASI_BLK_INIT_QUAD_LDD_S marks the cache + * line as "least recently used" which means if many threads are + * active, it has a high probability of being pushed out of the cache + * between the first initializing store and the final stores. + * Thus, we use ASI_ST_BLKINIT_MRU_P/ASI_ST_BLKINIT_MRU_S which + * marks the cache line as "most recently used" for all + * but the last cache line + */ +#ifndef STORE_ASI +#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA +#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P +#else +#define STORE_ASI 0x80 /* ASI_P */ +#endif +#endif + +#ifndef STORE_MRU_ASI +#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA +#define STORE_MRU_ASI ASI_ST_BLKINIT_MRU_P +#else +#define STORE_MRU_ASI 0x80 /* ASI_P */ +#endif +#endif + +#ifndef STORE_INIT +#define STORE_INIT(src,addr) stxa src, [addr] STORE_ASI +#endif + +#ifndef STORE_INIT_MRU +#define STORE_INIT_MRU(src,addr) stxa src, [addr] STORE_MRU_ASI +#endif + +#ifndef FUNC_NAME +#define FUNC_NAME M7memcpy +#endif + +#ifndef PREAMBLE +#define PREAMBLE +#endif + +#define BLOCK_SIZE 64 +#define SHORTCOPY 3 +#define SHORTCHECK 14 +#define SHORT_LONG 64 /* max copy for short longword-aligned case */ + /* must be at least 64 */ +#define SMALL_MAX 128 +#define MED_UMAX 1024 /* max copy for medium un-aligned case */ +#define MED_WMAX 1024 /* max copy for medium word-aligned case */ +#define MED_MAX 1024 /* max copy for medium longword-aligned case */ +#define ST_CHUNK 24 /* ST_CHUNK - block of values for BIS Store */ +#define ALIGN_PRE 24 /* distance for aligned prefetch loop */ + + .register %g2,#scratch + + .section ".text" + .global FUNC_NAME + .type FUNC_NAME, #function + .align 16 +FUNC_NAME: + srlx %o2, 31, %g2 + cmp %g2, 0 + tne %xcc, 5 + PREAMBLE + mov %o0, %g1 ! save %o0 + brz,pn %o2, .Lsmallx + cmp %o2, 3 + ble,pn %icc, .Ltiny_cp + cmp %o2, 19 + ble,pn %icc, .Lsmall_cp + or %o0, %o1, %g2 + cmp %o2, SMALL_MAX + bl,pn %icc, .Lmedium_cp + nop + +.Lmedium: + neg %o0, %o5 + andcc %o5, 7, %o5 ! bytes till DST 8 byte aligned + brz,pt %o5, .Ldst_aligned_on_8 + + ! %o5 has the bytes to be written in partial store. + sub %o2, %o5, %o2 + sub %o1, %o0, %o1 ! %o1 gets the difference +7: ! dst aligning loop + add %o1, %o0, %o4 + EX_LD(LOAD(ldub, %o4, %o4), memcpy_retl_o2_plus_o5) ! load one byte + subcc %o5, 1, %o5 + EX_ST(STORE(stb, %o4, %o0), memcpy_retl_o2_plus_o5_plus_1) + bgu,pt %xcc, 7b + add %o0, 1, %o0 ! advance dst + add %o1, %o0, %o1 ! restore %o1 +.Ldst_aligned_on_8: + andcc %o1, 7, %o5 + brnz,pt %o5, .Lsrc_dst_unaligned_on_8 + nop + +.Lsrc_dst_aligned_on_8: + ! check if we are copying MED_MAX or more bytes + set MED_MAX, %o3 + cmp %o2, %o3 ! limit to store buffer size + bgu,pn %xcc, .Llarge_align8_copy + nop + +/* + * Special case for handling when src and dest are both long word aligned + * and total data to move is less than MED_MAX bytes + */ +.Lmedlong: + subcc %o2, 63, %o2 ! adjust length to allow cc test + ble,pn %xcc, .Lmedl63 ! skip big loop if less than 64 bytes + nop +.Lmedl64: + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_63) ! load + subcc %o2, 64, %o2 ! decrement length count + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_63_64) ! and store + EX_LD(LOAD(ldx, %o1+8, %o3), memcpy_retl_o2_plus_63_56) ! a block of 64 + EX_ST(STORE(stx, %o3, %o0+8), memcpy_retl_o2_plus_63_56) + EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_63_48) + EX_ST(STORE(stx, %o4, %o0+16), memcpy_retl_o2_plus_63_48) + EX_LD(LOAD(ldx, %o1+24, %o3), memcpy_retl_o2_plus_63_40) + EX_ST(STORE(stx, %o3, %o0+24), memcpy_retl_o2_plus_63_40) + EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_63_32)! load and store + EX_ST(STORE(stx, %o4, %o0+32), memcpy_retl_o2_plus_63_32) + EX_LD(LOAD(ldx, %o1+40, %o3), memcpy_retl_o2_plus_63_24)! a block of 64 + add %o1, 64, %o1 ! increase src ptr by 64 + EX_ST(STORE(stx, %o3, %o0+40), memcpy_retl_o2_plus_63_24) + EX_LD(LOAD(ldx, %o1-16, %o4), memcpy_retl_o2_plus_63_16) + add %o0, 64, %o0 ! increase dst ptr by 64 + EX_ST(STORE(stx, %o4, %o0-16), memcpy_retl_o2_plus_63_16) + EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_63_8) + bgu,pt %xcc, .Lmedl64 ! repeat if at least 64 bytes left + EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_63_8) +.Lmedl63: + addcc %o2, 32, %o2 ! adjust remaining count + ble,pt %xcc, .Lmedl31 ! to skip if 31 or fewer bytes left + nop + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_31) ! load + sub %o2, 32, %o2 ! decrement length count + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_31_32) ! and store + EX_LD(LOAD(ldx, %o1+8, %o3), memcpy_retl_o2_plus_31_24) ! a block of 32 + add %o1, 32, %o1 ! increase src ptr by 32 + EX_ST(STORE(stx, %o3, %o0+8), memcpy_retl_o2_plus_31_24) + EX_LD(LOAD(ldx, %o1-16, %o4), memcpy_retl_o2_plus_31_16) + add %o0, 32, %o0 ! increase dst ptr by 32 + EX_ST(STORE(stx, %o4, %o0-16), memcpy_retl_o2_plus_31_16) + EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_31_8) + EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_31_8) +.Lmedl31: + addcc %o2, 16, %o2 ! adjust remaining count + ble,pt %xcc, .Lmedl15 ! skip if 15 or fewer bytes left + nop ! + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_15) + add %o1, 16, %o1 ! increase src ptr by 16 + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_15) + sub %o2, 16, %o2 ! decrease count by 16 + EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_15_8) + add %o0, 16, %o0 ! increase dst ptr by 16 + EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_15_8) +.Lmedl15: + addcc %o2, 15, %o2 ! restore count + bz,pt %xcc, .Lsmallx ! exit if finished + cmp %o2, 8 + blt,pt %xcc, .Lmedw7 ! skip if 7 or fewer bytes left + tst %o2 + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) ! load 8 bytes + add %o1, 8, %o1 ! increase src ptr by 8 + add %o0, 8, %o0 ! increase dst ptr by 8 + subcc %o2, 8, %o2 ! decrease count by 8 + bnz,pn %xcc, .Lmedw7 + EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) ! and store 8 + retl + mov EX_RETVAL(%g1), %o0 ! restore %o0 + + .align 16 +.Lsrc_dst_unaligned_on_8: + ! DST is 8-byte aligned, src is not +2: + andcc %o1, 0x3, %o5 ! test word alignment + bnz,pt %xcc, .Lunalignsetup ! branch to skip if not word aligned + nop + +/* + * Handle all cases where src and dest are aligned on word + * boundaries. Use unrolled loops for better performance. + * This option wins over standard large data move when + * source and destination is in cache for.Lmedium + * to short data moves. + */ + set MED_WMAX, %o3 + cmp %o2, %o3 ! limit to store buffer size + bge,pt %xcc, .Lunalignrejoin ! otherwise rejoin main loop + nop + + subcc %o2, 31, %o2 ! adjust length to allow cc test + ! for end of loop + ble,pt %xcc, .Lmedw31 ! skip big loop if less than 16 +.Lmedw32: + EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2_plus_31)! move a block of 32 + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1+4, %o4), memcpy_retl_o2_plus_31) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_31) + subcc %o2, 32, %o2 ! decrement length count + EX_LD(LOAD(ld, %o1+8, %o4), memcpy_retl_o2_plus_31_24) + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1+12, %o4), memcpy_retl_o2_plus_31_24) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0+8), memcpy_retl_o2_plus_31_24) + add %o1, 32, %o1 ! increase src ptr by 32 + EX_LD(LOAD(ld, %o1-16, %o4), memcpy_retl_o2_plus_31_16) + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1-12, %o4), memcpy_retl_o2_plus_31_16) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0+16), memcpy_retl_o2_plus_31_16) + add %o0, 32, %o0 ! increase dst ptr by 32 + EX_LD(LOAD(ld, %o1-8, %o4), memcpy_retl_o2_plus_31_8) + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_31_8) + or %o4, %o5, %o5 + bgu,pt %xcc, .Lmedw32 ! repeat if at least 32 bytes left + EX_ST(STORE(stx, %o5, %o0-8), memcpy_retl_o2_plus_31_8) +.Lmedw31: + addcc %o2, 31, %o2 ! restore count + + bz,pt %xcc, .Lsmallx ! exit if finished + nop + cmp %o2, 16 + blt,pt %xcc, .Lmedw15 + nop + EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2)! move a block of 16 bytes + sllx %o4, 32, %o5 + subcc %o2, 16, %o2 ! decrement length count + EX_LD(LOAD(ld, %o1+4, %o4), memcpy_retl_o2_plus_16) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_16) + add %o1, 16, %o1 ! increase src ptr by 16 + EX_LD(LOAD(ld, %o1-8, %o4), memcpy_retl_o2_plus_8) + add %o0, 16, %o0 ! increase dst ptr by 16 + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_8) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0-8), memcpy_retl_o2_plus_8) +.Lmedw15: + bz,pt %xcc, .Lsmallx ! exit if finished + cmp %o2, 8 + blt,pn %xcc, .Lmedw7 ! skip if 7 or fewer bytes left + tst %o2 + EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2) ! load 4 bytes + subcc %o2, 8, %o2 ! decrease count by 8 + EX_ST(STORE(stw, %o4, %o0), memcpy_retl_o2_plus_8)! and store 4 bytes + add %o1, 8, %o1 ! increase src ptr by 8 + EX_LD(LOAD(ld, %o1-4, %o3), memcpy_retl_o2_plus_4) ! load 4 bytes + add %o0, 8, %o0 ! increase dst ptr by 8 + EX_ST(STORE(stw, %o3, %o0-4), memcpy_retl_o2_plus_4)! and store 4 bytes + bz,pt %xcc, .Lsmallx ! exit if finished +.Lmedw7: ! count is ge 1, less than 8 + cmp %o2, 4 ! check for 4 bytes left + blt,pn %xcc, .Lsmallleft3 ! skip if 3 or fewer bytes left + nop ! + EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2) ! load 4 bytes + add %o1, 4, %o1 ! increase src ptr by 4 + add %o0, 4, %o0 ! increase dst ptr by 4 + subcc %o2, 4, %o2 ! decrease count by 4 + bnz .Lsmallleft3 + EX_ST(STORE(stw, %o4, %o0-4), memcpy_retl_o2_plus_4)! and store 4 bytes + retl + mov EX_RETVAL(%g1), %o0 + + .align 16 +.Llarge_align8_copy: ! Src and dst share 8 byte alignment + ! align dst to 64 byte boundary + andcc %o0, 0x3f, %o3 ! %o3 == 0 means dst is 64 byte aligned + brz,pn %o3, .Laligned_to_64 + andcc %o0, 8, %o3 ! odd long words to move? + brz,pt %o3, .Laligned_to_16 + nop + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) + sub %o2, 8, %o2 + add %o1, 8, %o1 ! increment src ptr + add %o0, 8, %o0 ! increment dst ptr + EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) +.Laligned_to_16: + andcc %o0, 16, %o3 ! pair of long words to move? + brz,pt %o3, .Laligned_to_32 + nop + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) + sub %o2, 16, %o2 + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_16) + add %o1, 16, %o1 ! increment src ptr + EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_8) + add %o0, 16, %o0 ! increment dst ptr + EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) +.Laligned_to_32: + andcc %o0, 32, %o3 ! four long words to move? + brz,pt %o3, .Laligned_to_64 + nop + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) + sub %o2, 32, %o2 + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_32) + EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_24) + EX_ST(STORE(stx, %o4, %o0+8), memcpy_retl_o2_plus_24) + EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_16) + EX_ST(STORE(stx, %o4, %o0+16), memcpy_retl_o2_plus_16) + add %o1, 32, %o1 ! increment src ptr + EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_8) + add %o0, 32, %o0 ! increment dst ptr + EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) +.Laligned_to_64: +! +! Using block init store (BIS) instructions to avoid fetching cache +! lines from memory. Use ST_CHUNK stores to first element of each cache +! line (similar to prefetching) to avoid overfilling STQ or miss buffers. +! Gives existing cache lines time to be moved out of L1/L2/L3 cache. +! Initial stores using MRU version of BIS to keep cache line in +! cache until we are ready to store final element of cache line. +! Then store last element using the LRU version of BIS. +! + andn %o2, 0x3f, %o5 ! %o5 is multiple of block size + and %o2, 0x3f, %o2 ! residue bytes in %o2 +! +! We use STORE_MRU_ASI for the first seven stores to each cache line +! followed by STORE_ASI (mark as LRU) for the last store. That +! mixed approach reduces the probability that the cache line is removed +! before we finish setting it, while minimizing the effects on +! other cached values during a large memcpy +! +! ST_CHUNK batches up initial BIS operations for several cache lines +! to allow multiple requests to not be blocked by overflowing the +! the store miss buffer. Then the matching stores for all those +! BIS operations are executed. +! + + sub %o0, 8, %o0 ! adjust %o0 for ASI alignment +.Lalign_loop: + cmp %o5, ST_CHUNK*64 + blu,pt %xcc, .Lalign_loop_fin + mov ST_CHUNK,%o3 +.Lalign_loop_start: + prefetch [%o1 + (ALIGN_PRE * BLOCK_SIZE)], 21 + subcc %o3, 1, %o3 + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_o5) + add %o1, 64, %o1 + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + bgu %xcc,.Lalign_loop_start + add %o0, 56, %o0 + + mov ST_CHUNK,%o3 + sllx %o3, 6, %o4 ! ST_CHUNK*64 + sub %o1, %o4, %o1 ! reset %o1 + sub %o0, %o4, %o0 ! reset %o0 + +.Lalign_loop_rest: + EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_o5) + add %o0, 16, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_o5) + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + subcc %o3, 1, %o3 + EX_LD(LOAD(ldx, %o1+24, %o4), memcpy_retl_o2_plus_o5) + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_o5) + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+40, %o4), memcpy_retl_o2_plus_o5) + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+48, %o4), memcpy_retl_o2_plus_o5) + add %o1, 64, %o1 + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + add %o0, 8, %o0 + EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_o5) + sub %o5, 64, %o5 + bgu %xcc,.Lalign_loop_rest + ! mark cache line as LRU + EX_ST(STORE_INIT(%o4, %o0), memcpy_retl_o2_plus_o5_plus_64) + + cmp %o5, ST_CHUNK*64 + bgu,pt %xcc, .Lalign_loop_start + mov ST_CHUNK,%o3 + + cmp %o5, 0 + beq .Lalign_done + nop +.Lalign_loop_fin: + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_o5) + EX_ST(STORE(stx, %o4, %o0+8), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_o5) + EX_ST(STORE(stx, %o4, %o0+8+8), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_o5) + EX_ST(STORE(stx, %o4, %o0+8+16), memcpy_retl_o2_plus_o5) + subcc %o5, 64, %o5 + EX_LD(LOAD(ldx, %o1+24, %o4), memcpy_retl_o2_plus_o5_64) + EX_ST(STORE(stx, %o4, %o0+8+24), memcpy_retl_o2_plus_o5_64) + EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_o5_64) + EX_ST(STORE(stx, %o4, %o0+8+32), memcpy_retl_o2_plus_o5_64) + EX_LD(LOAD(ldx, %o1+40, %o4), memcpy_retl_o2_plus_o5_64) + EX_ST(STORE(stx, %o4, %o0+8+40), memcpy_retl_o2_plus_o5_64) + EX_LD(LOAD(ldx, %o1+48, %o4), memcpy_retl_o2_plus_o5_64) + add %o1, 64, %o1 + EX_ST(STORE(stx, %o4, %o0+8+48), memcpy_retl_o2_plus_o5_64) + add %o0, 64, %o0 + EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_o5_64) + bgu %xcc,.Lalign_loop_fin + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_o5_64) + +.Lalign_done: + add %o0, 8, %o0 ! restore %o0 from ASI alignment + membar #StoreStore + sub %o2, 63, %o2 ! adjust length to allow cc test + ba .Lmedl63 ! in .Lmedl63 + nop + + .align 16 + ! Dst is on 8 byte boundary; src is not; remaining count > SMALL_MAX +.Lunalignsetup: +.Lunalignrejoin: + mov %g1, %o3 ! save %g1 as VISEntryHalf clobbers it +#ifdef NON_USER_COPY + VISEntryHalfFast(.Lmedium_vis_entry_fail_cp) +#else + VISEntryHalf +#endif + mov %o3, %g1 ! restore %g1 + + set MED_UMAX, %o3 + cmp %o2, %o3 ! check for.Lmedium unaligned limit + bge,pt %xcc,.Lunalign_large + prefetch [%o1 + (4 * BLOCK_SIZE)], 20 + andn %o2, 0x3f, %o5 ! %o5 is multiple of block size + and %o2, 0x3f, %o2 ! residue bytes in %o2 + cmp %o2, 8 ! Insure we do not load beyond + bgt .Lunalign_adjust ! end of source buffer + andn %o1, 0x7, %o4 ! %o4 has long word aligned src address + add %o2, 64, %o2 ! adjust to leave loop + sub %o5, 64, %o5 ! early if necessary +.Lunalign_adjust: + alignaddr %o1, %g0, %g0 ! generate %gsr + add %o1, %o5, %o1 ! advance %o1 to after blocks + EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5) +.Lunalign_loop: + EX_LD_FP(LOAD(ldd, %o4+8, %f2), memcpy_retl_o2_plus_o5) + faligndata %f0, %f2, %f16 + EX_LD_FP(LOAD(ldd, %o4+16, %f4), memcpy_retl_o2_plus_o5) + subcc %o5, BLOCK_SIZE, %o5 + EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5_plus_64) + faligndata %f2, %f4, %f18 + EX_LD_FP(LOAD(ldd, %o4+24, %f6), memcpy_retl_o2_plus_o5_plus_56) + EX_ST_FP(STORE(std, %f18, %o0+8), memcpy_retl_o2_plus_o5_plus_56) + faligndata %f4, %f6, %f20 + EX_LD_FP(LOAD(ldd, %o4+32, %f8), memcpy_retl_o2_plus_o5_plus_48) + EX_ST_FP(STORE(std, %f20, %o0+16), memcpy_retl_o2_plus_o5_plus_48) + faligndata %f6, %f8, %f22 + EX_LD_FP(LOAD(ldd, %o4+40, %f10), memcpy_retl_o2_plus_o5_plus_40) + EX_ST_FP(STORE(std, %f22, %o0+24), memcpy_retl_o2_plus_o5_plus_40) + faligndata %f8, %f10, %f24 + EX_LD_FP(LOAD(ldd, %o4+48, %f12), memcpy_retl_o2_plus_o5_plus_32) + EX_ST_FP(STORE(std, %f24, %o0+32), memcpy_retl_o2_plus_o5_plus_32) + faligndata %f10, %f12, %f26 + EX_LD_FP(LOAD(ldd, %o4+56, %f14), memcpy_retl_o2_plus_o5_plus_24) + add %o4, BLOCK_SIZE, %o4 + EX_ST_FP(STORE(std, %f26, %o0+40), memcpy_retl_o2_plus_o5_plus_24) + faligndata %f12, %f14, %f28 + EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5_plus_16) + EX_ST_FP(STORE(std, %f28, %o0+48), memcpy_retl_o2_plus_o5_plus_16) + faligndata %f14, %f0, %f30 + EX_ST_FP(STORE(std, %f30, %o0+56), memcpy_retl_o2_plus_o5_plus_8) + add %o0, BLOCK_SIZE, %o0 + bgu,pt %xcc, .Lunalign_loop + prefetch [%o4 + (5 * BLOCK_SIZE)], 20 + ba .Lunalign_done + nop + +.Lunalign_large: + andcc %o0, 0x3f, %o3 ! is dst 64-byte block aligned? + bz %xcc, .Lunalignsrc + sub %o3, 64, %o3 ! %o3 will be multiple of 8 + neg %o3 ! bytes until dest is 64 byte aligned + sub %o2, %o3, %o2 ! update cnt with bytes to be moved + ! Move bytes according to source alignment + andcc %o1, 0x1, %o5 + bnz %xcc, .Lunalignbyte ! check for byte alignment + nop + andcc %o1, 2, %o5 ! check for half word alignment + bnz %xcc, .Lunalignhalf + nop + ! Src is word aligned +.Lunalignword: + EX_LD_FP(LOAD(ld, %o1, %o4), memcpy_retl_o2_plus_o3) ! load 4 bytes + add %o1, 8, %o1 ! increase src ptr by 8 + EX_ST_FP(STORE(stw, %o4, %o0), memcpy_retl_o2_plus_o3) ! and store 4 + subcc %o3, 8, %o3 ! decrease count by 8 + EX_LD_FP(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_o3_plus_4)! load 4 + add %o0, 8, %o0 ! increase dst ptr by 8 + bnz %xcc, .Lunalignword + EX_ST_FP(STORE(stw, %o4, %o0-4), memcpy_retl_o2_plus_o3_plus_4) + ba .Lunalignsrc + nop + + ! Src is half-word aligned +.Lunalignhalf: + EX_LD_FP(LOAD(lduh, %o1, %o4), memcpy_retl_o2_plus_o3) ! load 2 bytes + sllx %o4, 32, %o5 ! shift left + EX_LD_FP(LOAD(lduw, %o1+2, %o4), memcpy_retl_o2_plus_o3) + or %o4, %o5, %o5 + sllx %o5, 16, %o5 + EX_LD_FP(LOAD(lduh, %o1+6, %o4), memcpy_retl_o2_plus_o3) + or %o4, %o5, %o5 + EX_ST_FP(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_o3) + add %o1, 8, %o1 + subcc %o3, 8, %o3 + bnz %xcc, .Lunalignhalf + add %o0, 8, %o0 + ba .Lunalignsrc + nop + + ! Src is Byte aligned +.Lunalignbyte: + sub %o0, %o1, %o0 ! share pointer advance +.Lunalignbyte_loop: + EX_LD_FP(LOAD(ldub, %o1, %o4), memcpy_retl_o2_plus_o3) + sllx %o4, 56, %o5 + EX_LD_FP(LOAD(lduh, %o1+1, %o4), memcpy_retl_o2_plus_o3) + sllx %o4, 40, %o4 + or %o4, %o5, %o5 + EX_LD_FP(LOAD(lduh, %o1+3, %o4), memcpy_retl_o2_plus_o3) + sllx %o4, 24, %o4 + or %o4, %o5, %o5 + EX_LD_FP(LOAD(lduh, %o1+5, %o4), memcpy_retl_o2_plus_o3) + sllx %o4, 8, %o4 + or %o4, %o5, %o5 + EX_LD_FP(LOAD(ldub, %o1+7, %o4), memcpy_retl_o2_plus_o3) + or %o4, %o5, %o5 + add %o0, %o1, %o0 + EX_ST_FP(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_o3) + sub %o0, %o1, %o0 + subcc %o3, 8, %o3 + bnz %xcc, .Lunalignbyte_loop + add %o1, 8, %o1 + add %o0,%o1, %o0 ! restore pointer + + ! Destination is now block (64 byte aligned) +.Lunalignsrc: + andn %o2, 0x3f, %o5 ! %o5 is multiple of block size + and %o2, 0x3f, %o2 ! residue bytes in %o2 + add %o2, 64, %o2 ! Insure we do not load beyond + sub %o5, 64, %o5 ! end of source buffer + + andn %o1, 0x7, %o4 ! %o4 has long word aligned src address + alignaddr %o1, %g0, %g0 ! generate %gsr + add %o1, %o5, %o1 ! advance %o1 to after blocks + + EX_LD_FP(LOAD(ldd, %o4, %f14), memcpy_retl_o2_plus_o5) + add %o4, 8, %o4 +.Lunalign_sloop: + EX_LD_FP(LOAD(ldd, %o4, %f16), memcpy_retl_o2_plus_o5) + faligndata %f14, %f16, %f0 + EX_LD_FP(LOAD(ldd, %o4+8, %f18), memcpy_retl_o2_plus_o5) + faligndata %f16, %f18, %f2 + EX_LD_FP(LOAD(ldd, %o4+16, %f20), memcpy_retl_o2_plus_o5) + faligndata %f18, %f20, %f4 + EX_ST_FP(STORE(std, %f0, %o0), memcpy_retl_o2_plus_o5) + subcc %o5, 64, %o5 + EX_LD_FP(LOAD(ldd, %o4+24, %f22), memcpy_retl_o2_plus_o5_plus_56) + faligndata %f20, %f22, %f6 + EX_ST_FP(STORE(std, %f2, %o0+8), memcpy_retl_o2_plus_o5_plus_56) + EX_LD_FP(LOAD(ldd, %o4+32, %f24), memcpy_retl_o2_plus_o5_plus_48) + faligndata %f22, %f24, %f8 + EX_ST_FP(STORE(std, %f4, %o0+16), memcpy_retl_o2_plus_o5_plus_48) + EX_LD_FP(LOAD(ldd, %o4+40, %f26), memcpy_retl_o2_plus_o5_plus_40) + faligndata %f24, %f26, %f10 + EX_ST_FP(STORE(std, %f6, %o0+24), memcpy_retl_o2_plus_o5_plus_40) + EX_LD_FP(LOAD(ldd, %o4+48, %f28), memcpy_retl_o2_plus_o5_plus_40) + faligndata %f26, %f28, %f12 + EX_ST_FP(STORE(std, %f8, %o0+32), memcpy_retl_o2_plus_o5_plus_40) + add %o4, 64, %o4 + EX_LD_FP(LOAD(ldd, %o4-8, %f30), memcpy_retl_o2_plus_o5_plus_40) + faligndata %f28, %f30, %f14 + EX_ST_FP(STORE(std, %f10, %o0+40), memcpy_retl_o2_plus_o5_plus_40) + EX_ST_FP(STORE(std, %f12, %o0+48), memcpy_retl_o2_plus_o5_plus_40) + add %o0, 64, %o0 + EX_ST_FP(STORE(std, %f14, %o0-8), memcpy_retl_o2_plus_o5_plus_40) + fsrc2 %f30, %f14 + bgu,pt %xcc, .Lunalign_sloop + prefetch [%o4 + (8 * BLOCK_SIZE)], 20 + +.Lunalign_done: + ! Handle trailing bytes, 64 to 127 + ! Dest long word aligned, Src not long word aligned + cmp %o2, 15 + bleu %xcc, .Lunalign_short + + andn %o2, 0x7, %o5 ! %o5 is multiple of 8 + and %o2, 0x7, %o2 ! residue bytes in %o2 + add %o2, 8, %o2 + sub %o5, 8, %o5 ! insure we do not load past end of src + andn %o1, 0x7, %o4 ! %o4 has long word aligned src address + add %o1, %o5, %o1 ! advance %o1 to after multiple of 8 + EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5)! fetch partialword +.Lunalign_by8: + EX_LD_FP(LOAD(ldd, %o4+8, %f2), memcpy_retl_o2_plus_o5) + add %o4, 8, %o4 + faligndata %f0, %f2, %f16 + subcc %o5, 8, %o5 + EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5) + fsrc2 %f2, %f0 + bgu,pt %xcc, .Lunalign_by8 + add %o0, 8, %o0 + +.Lunalign_short: +#ifdef NON_USER_COPY + VISExitHalfFast +#else + VISExitHalf +#endif + ba .Lsmallrest + nop + +/* + * This is a special case of nested memcpy. This can happen when kernel + * calls unaligned memcpy back to back without saving FP registers. We need + * traps(context switch) to save/restore FP registers. If the kernel calls + * memcpy without this trap sequence we will hit FP corruption. Let's use + * the normal integer load/store method in this case. + */ + +#ifdef NON_USER_COPY +.Lmedium_vis_entry_fail_cp: + or %o0, %o1, %g2 +#endif +.Lmedium_cp: + LOAD(prefetch, %o1 + 0x40, #n_reads_strong) + andcc %g2, 0x7, %g0 + bne,pn %xcc, .Lmedium_unaligned_cp + nop + +.Lmedium_noprefetch_cp: + andncc %o2, 0x20 - 1, %o5 + be,pn %xcc, 2f + sub %o2, %o5, %o2 +1: EX_LD(LOAD(ldx, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x08, %g2), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x10, %g7), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x18, %o4), memcpy_retl_o2_plus_o5) + add %o1, 0x20, %o1 + subcc %o5, 0x20, %o5 + EX_ST(STORE(stx, %o3, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32) + EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, %g7, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8) + bne,pt %xcc, 1b + add %o0, 0x20, %o0 +2: andcc %o2, 0x18, %o5 + be,pt %xcc, 3f + sub %o2, %o5, %o2 +1: EX_LD(LOAD(ldx, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5) + add %o1, 0x08, %o1 + add %o0, 0x08, %o0 + subcc %o5, 0x08, %o5 + bne,pt %xcc, 1b + EX_ST(STORE(stx, %o3, %o0 - 0x08), memcpy_retl_o2_plus_o5_plus_8) +3: brz,pt %o2, .Lexit_cp + cmp %o2, 0x04 + bl,pn %xcc, .Ltiny_cp + nop + EX_LD(LOAD(lduw, %o1 + 0x00, %o3), memcpy_retl_o2) + add %o1, 0x04, %o1 + add %o0, 0x04, %o0 + subcc %o2, 0x04, %o2 + bne,pn %xcc, .Ltiny_cp + EX_ST(STORE(stw, %o3, %o0 - 0x04), memcpy_retl_o2_plus_4) + ba,a,pt %xcc, .Lexit_cp + +.Lmedium_unaligned_cp: + /* First get dest 8 byte aligned. */ + sub %g0, %o0, %o3 + and %o3, 0x7, %o3 + brz,pt %o3, 2f + sub %o2, %o3, %o2 + +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1) + add %o1, 1, %o1 + subcc %o3, 1, %o3 + add %o0, 1, %o0 + bne,pt %xcc, 1b + EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1) +2: + and %o1, 0x7, %o3 + brz,pn %o3, .Lmedium_noprefetch_cp + sll %o3, 3, %o3 + mov 64, %g2 + sub %g2, %o3, %g2 + andn %o1, 0x7, %o1 + EX_LD(LOAD(ldx, %o1 + 0x00, %o4), memcpy_retl_o2) + sllx %o4, %o3, %o4 + andn %o2, 0x08 - 1, %o5 + sub %o2, %o5, %o2 + +1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), memcpy_retl_o2_plus_o5) + add %o1, 0x08, %o1 + subcc %o5, 0x08, %o5 + srlx %g3, %g2, %g7 + or %g7, %o4, %g7 + EX_ST(STORE(stx, %g7, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_8) + add %o0, 0x08, %o0 + bne,pt %xcc, 1b + sllx %g3, %o3, %o4 + srl %o3, 3, %o3 + add %o1, %o3, %o1 + brz,pn %o2, .Lexit_cp + nop + ba,pt %xcc, .Lsmall_unaligned_cp + +.Ltiny_cp: + EX_LD(LOAD(ldub, %o1 + 0x00, %o3), memcpy_retl_o2) + subcc %o2, 1, %o2 + be,pn %xcc, .Lexit_cp + EX_ST(STORE(stb, %o3, %o0 + 0x00), memcpy_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x01, %o3), memcpy_retl_o2) + subcc %o2, 1, %o2 + be,pn %xcc, .Lexit_cp + EX_ST(STORE(stb, %o3, %o0 + 0x01), memcpy_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x02, %o3), memcpy_retl_o2) + ba,pt %xcc, .Lexit_cp + EX_ST(STORE(stb, %o3, %o0 + 0x02), memcpy_retl_o2) + +.Lsmall_cp: + andcc %g2, 0x3, %g0 + bne,pn %xcc, .Lsmall_unaligned_cp + andn %o2, 0x4 - 1, %o5 + sub %o2, %o5, %o2 +1: + EX_LD(LOAD(lduw, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5) + add %o1, 0x04, %o1 + subcc %o5, 0x04, %o5 + add %o0, 0x04, %o0 + bne,pt %xcc, 1b + EX_ST(STORE(stw, %o3, %o0 - 0x04), memcpy_retl_o2_plus_o5_plus_4) + brz,pt %o2, .Lexit_cp + nop + ba,a,pt %xcc, .Ltiny_cp + +.Lsmall_unaligned_cp: +1: EX_LD(LOAD(ldub, %o1 + 0x00, %o3), memcpy_retl_o2) + add %o1, 1, %o1 + add %o0, 1, %o0 + subcc %o2, 1, %o2 + bne,pt %xcc, 1b + EX_ST(STORE(stb, %o3, %o0 - 0x01), memcpy_retl_o2_plus_1) + ba,a,pt %xcc, .Lexit_cp + +.Lsmallrest: + tst %o2 + bz,pt %xcc, .Lsmallx + cmp %o2, 4 + blt,pn %xcc, .Lsmallleft3 + nop + sub %o2, 3, %o2 +.Lsmallnotalign4: + EX_LD(LOAD(ldub, %o1, %o3), memcpy_retl_o2_plus_3)! read byte + subcc %o2, 4, %o2 ! reduce count by 4 + EX_ST(STORE(stb, %o3, %o0), memcpy_retl_o2_plus_7)! write byte & repeat + EX_LD(LOAD(ldub, %o1+1, %o3), memcpy_retl_o2_plus_6)! for total of 4 + add %o1, 4, %o1 ! advance SRC by 4 + EX_ST(STORE(stb, %o3, %o0+1), memcpy_retl_o2_plus_6) + EX_LD(LOAD(ldub, %o1-2, %o3), memcpy_retl_o2_plus_5) + add %o0, 4, %o0 ! advance DST by 4 + EX_ST(STORE(stb, %o3, %o0-2), memcpy_retl_o2_plus_5) + EX_LD(LOAD(ldub, %o1-1, %o3), memcpy_retl_o2_plus_4) + bgu,pt %xcc, .Lsmallnotalign4 ! loop til 3 or fewer bytes remain + EX_ST(STORE(stb, %o3, %o0-1), memcpy_retl_o2_plus_4) + addcc %o2, 3, %o2 ! restore count + bz,pt %xcc, .Lsmallx +.Lsmallleft3: ! 1, 2, or 3 bytes remain + subcc %o2, 1, %o2 + EX_LD(LOAD(ldub, %o1, %o3), memcpy_retl_o2_plus_1) ! load one byte + bz,pt %xcc, .Lsmallx + EX_ST(STORE(stb, %o3, %o0), memcpy_retl_o2_plus_1) ! store one byte + EX_LD(LOAD(ldub, %o1+1, %o3), memcpy_retl_o2) ! load second byte + subcc %o2, 1, %o2 + bz,pt %xcc, .Lsmallx + EX_ST(STORE(stb, %o3, %o0+1), memcpy_retl_o2_plus_1)! store second byte + EX_LD(LOAD(ldub, %o1+2, %o3), memcpy_retl_o2) ! load third byte + EX_ST(STORE(stb, %o3, %o0+2), memcpy_retl_o2) ! store third byte +.Lsmallx: + retl + mov EX_RETVAL(%g1), %o0 +.Lsmallfin: + tst %o2 + bnz,pn %xcc, .Lsmallleft3 + nop + retl + mov EX_RETVAL(%g1), %o0 ! restore %o0 +.Lexit_cp: + retl + mov EX_RETVAL(%g1), %o0 + .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc/lib/M7memset.S b/arch/sparc/lib/M7memset.S new file mode 100644 index 0000000000..62ea91b3a6 --- /dev/null +++ b/arch/sparc/lib/M7memset.S @@ -0,0 +1,352 @@ +/* + * M7memset.S: SPARC M7 optimized memset. + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + +/* + * M7memset.S: M7 optimized memset. + * + * char *memset(sp, c, n) + * + * Set an array of n chars starting at sp to the character c. + * Return sp. + * + * Fast assembler language version of the following C-program for memset + * which represents the `standard' for the C-library. + * + * void * + * memset(void *sp1, int c, size_t n) + * { + * if (n != 0) { + * char *sp = sp1; + * do { + * *sp++ = (char)c; + * } while (--n != 0); + * } + * return (sp1); + * } + * + * The algorithm is as follows : + * + * For small 6 or fewer bytes stores, bytes will be stored. + * + * For less than 32 bytes stores, align the address on 4 byte boundary. + * Then store as many 4-byte chunks, followed by trailing bytes. + * + * For sizes greater than 32 bytes, align the address on 8 byte boundary. + * if (count >= 64) { + * store 8-bytes chunks to align the address on 64 byte boundary + * if (value to be set is zero && count >= MIN_ZERO) { + * Using BIS stores, set the first long word of each + * 64-byte cache line to zero which will also clear the + * other seven long words of the cache line. + * } + * else if (count >= MIN_LOOP) { + * Using BIS stores, set the first long word of each of + * ST_CHUNK cache lines (64 bytes each) before the main + * loop is entered. + * In the main loop, continue pre-setting the first long + * word of each cache line ST_CHUNK lines in advance while + * setting the other seven long words (56 bytes) of each + * cache line until fewer than ST_CHUNK*64 bytes remain. + * Then set the remaining seven long words of each cache + * line that has already had its first long word set. + * } + * store remaining data in 64-byte chunks until less than + * 64 bytes remain. + * } + * Store as many 8-byte chunks, followed by trailing bytes. + * + * BIS = Block Init Store + * Doing the advance store of the first element of the cache line + * initiates the displacement of a cache line while only using a single + * instruction in the pipeline. That avoids various pipeline delays, + * such as filling the miss buffer. The performance effect is + * similar to prefetching for normal stores. + * The special case for zero fills runs faster and uses fewer instruction + * cycles than the normal memset loop. + * + * We only use BIS for memset of greater than MIN_LOOP bytes because a sequence + * BIS stores must be followed by a membar #StoreStore. The benefit of + * the BIS store must be balanced against the cost of the membar operation. + */ + +/* + * ASI_STBI_P marks the cache line as "least recently used" + * which means if many threads are active, it has a high chance + * of being pushed out of the cache between the first initializing + * store and the final stores. + * Thus, we use ASI_STBIMRU_P which marks the cache line as + * "most recently used" for all but the last store to the cache line. + */ + +#include <asm/asi.h> +#include <asm/page.h> + +#define ASI_STBI_P ASI_BLK_INIT_QUAD_LDD_P +#define ASI_STBIMRU_P ASI_ST_BLKINIT_MRU_P + + +#define ST_CHUNK 24 /* multiple of 4 due to loop unrolling */ +#define MIN_LOOP 16320 +#define MIN_ZERO 512 + + .section ".text" + .align 32 + +/* + * Define clear_page(dest) as memset(dest, 0, PAGE_SIZE) + * (can create a more optimized version later.) + */ + .globl M7clear_page + .globl M7clear_user_page +M7clear_page: /* clear_page(dest) */ +M7clear_user_page: + set PAGE_SIZE, %o1 + /* fall through into bzero code */ + + .size M7clear_page,.-M7clear_page + .size M7clear_user_page,.-M7clear_user_page + +/* + * Define bzero(dest, n) as memset(dest, 0, n) + * (can create a more optimized version later.) + */ + .globl M7bzero +M7bzero: /* bzero(dest, size) */ + mov %o1, %o2 + mov 0, %o1 + /* fall through into memset code */ + + .size M7bzero,.-M7bzero + + .global M7memset + .type M7memset, #function + .register %g3, #scratch +M7memset: + mov %o0, %o5 ! copy sp1 before using it + cmp %o2, 7 ! if small counts, just write bytes + bleu,pn %xcc, .wrchar + and %o1, 0xff, %o1 ! o1 is (char)c + + sll %o1, 8, %o3 + or %o1, %o3, %o1 ! now o1 has 2 bytes of c + sll %o1, 16, %o3 + cmp %o2, 32 + blu,pn %xcc, .wdalign + or %o1, %o3, %o1 ! now o1 has 4 bytes of c + + sllx %o1, 32, %o3 + or %o1, %o3, %o1 ! now o1 has 8 bytes of c + +.dbalign: + andcc %o5, 7, %o3 ! is sp1 aligned on a 8 byte bound? + bz,pt %xcc, .blkalign ! already long word aligned + sub %o3, 8, %o3 ! -(bytes till long word aligned) + + add %o2, %o3, %o2 ! update o2 with new count + ! Set -(%o3) bytes till sp1 long word aligned +1: stb %o1, [%o5] ! there is at least 1 byte to set + inccc %o3 ! byte clearing loop + bl,pt %xcc, 1b + inc %o5 + + ! Now sp1 is long word aligned (sp1 is found in %o5) +.blkalign: + cmp %o2, 64 ! check if there are 64 bytes to set + blu,pn %xcc, .wrshort + mov %o2, %o3 + + andcc %o5, 63, %o3 ! is sp1 block aligned? + bz,pt %xcc, .blkwr ! now block aligned + sub %o3, 64, %o3 ! o3 is -(bytes till block aligned) + add %o2, %o3, %o2 ! o2 is the remainder + + ! Store -(%o3) bytes till dst is block (64 byte) aligned. + ! Use long word stores. + ! Recall that dst is already long word aligned +1: + addcc %o3, 8, %o3 + stx %o1, [%o5] + bl,pt %xcc, 1b + add %o5, 8, %o5 + + ! Now sp1 is block aligned +.blkwr: + andn %o2, 63, %o4 ! calculate size of blocks in bytes + brz,pn %o1, .wrzero ! special case if c == 0 + and %o2, 63, %o3 ! %o3 = bytes left after blk stores. + + set MIN_LOOP, %g1 + cmp %o4, %g1 ! check there are enough bytes to set + blu,pn %xcc, .short_set ! to justify cost of membar + ! must be > pre-cleared lines + nop + + ! initial cache-clearing stores + ! get store pipeline moving + rd %asi, %g3 ! save %asi to be restored later + wr %g0, ASI_STBIMRU_P, %asi + + ! Primary memset loop for large memsets +.wr_loop: + sub %o5, 8, %o5 ! adjust %o5 for ASI store alignment + mov ST_CHUNK, %g1 +.wr_loop_start: + stxa %o1, [%o5+8]%asi + subcc %g1, 4, %g1 + stxa %o1, [%o5+8+64]%asi + add %o5, 256, %o5 + stxa %o1, [%o5+8-128]%asi + bgu %xcc, .wr_loop_start + stxa %o1, [%o5+8-64]%asi + + sub %o5, ST_CHUNK*64, %o5 ! reset %o5 + mov ST_CHUNK, %g1 + +.wr_loop_rest: + stxa %o1, [%o5+8+8]%asi + sub %o4, 64, %o4 + stxa %o1, [%o5+16+8]%asi + subcc %g1, 1, %g1 + stxa %o1, [%o5+24+8]%asi + stxa %o1, [%o5+32+8]%asi + stxa %o1, [%o5+40+8]%asi + add %o5, 64, %o5 + stxa %o1, [%o5-8]%asi + bgu %xcc, .wr_loop_rest + stxa %o1, [%o5]ASI_STBI_P + + ! If more than ST_CHUNK*64 bytes remain to set, continue + ! setting the first long word of each cache line in advance + ! to keep the store pipeline moving. + + cmp %o4, ST_CHUNK*64 + bge,pt %xcc, .wr_loop_start + mov ST_CHUNK, %g1 + + brz,a,pn %o4, .asi_done + add %o5, 8, %o5 ! restore %o5 offset + +.wr_loop_small: + stxa %o1, [%o5+8]%asi + stxa %o1, [%o5+8+8]%asi + stxa %o1, [%o5+16+8]%asi + stxa %o1, [%o5+24+8]%asi + stxa %o1, [%o5+32+8]%asi + subcc %o4, 64, %o4 + stxa %o1, [%o5+40+8]%asi + add %o5, 64, %o5 + stxa %o1, [%o5-8]%asi + bgu,pt %xcc, .wr_loop_small + stxa %o1, [%o5]ASI_STBI_P + + ba .asi_done + add %o5, 8, %o5 ! restore %o5 offset + + ! Special case loop for zero fill memsets + ! For each 64 byte cache line, single STBI to first element + ! clears line +.wrzero: + cmp %o4, MIN_ZERO ! check if enough bytes to set + ! to pay %asi + membar cost + blu %xcc, .short_set + nop + sub %o4, 256, %o4 + +.wrzero_loop: + mov 64, %g3 + stxa %o1, [%o5]ASI_STBI_P + subcc %o4, 256, %o4 + stxa %o1, [%o5+%g3]ASI_STBI_P + add %o5, 256, %o5 + sub %g3, 192, %g3 + stxa %o1, [%o5+%g3]ASI_STBI_P + add %g3, 64, %g3 + bge,pt %xcc, .wrzero_loop + stxa %o1, [%o5+%g3]ASI_STBI_P + add %o4, 256, %o4 + + brz,pn %o4, .bsi_done + nop + +.wrzero_small: + stxa %o1, [%o5]ASI_STBI_P + subcc %o4, 64, %o4 + bgu,pt %xcc, .wrzero_small + add %o5, 64, %o5 + ba,a .bsi_done + +.asi_done: + wr %g3, 0x0, %asi ! restored saved %asi +.bsi_done: + membar #StoreStore ! required by use of Block Store Init + +.short_set: + cmp %o4, 64 ! check if 64 bytes to set + blu %xcc, 5f + nop +4: ! set final blocks of 64 bytes + stx %o1, [%o5] + stx %o1, [%o5+8] + stx %o1, [%o5+16] + stx %o1, [%o5+24] + subcc %o4, 64, %o4 + stx %o1, [%o5+32] + stx %o1, [%o5+40] + add %o5, 64, %o5 + stx %o1, [%o5-16] + bgu,pt %xcc, 4b + stx %o1, [%o5-8] + +5: + ! Set the remaining long words +.wrshort: + subcc %o3, 8, %o3 ! Can we store any long words? + blu,pn %xcc, .wrchars + and %o2, 7, %o2 ! calc bytes left after long words +6: + subcc %o3, 8, %o3 + stx %o1, [%o5] ! store the long words + bgeu,pt %xcc, 6b + add %o5, 8, %o5 + +.wrchars: ! check for extra chars + brnz %o2, .wrfin + nop + retl + nop + +.wdalign: + andcc %o5, 3, %o3 ! is sp1 aligned on a word boundary + bz,pn %xcc, .wrword + andn %o2, 3, %o3 ! create word sized count in %o3 + + dec %o2 ! decrement count + stb %o1, [%o5] ! clear a byte + b .wdalign + inc %o5 ! next byte + +.wrword: + subcc %o3, 4, %o3 + st %o1, [%o5] ! 4-byte writing loop + bnz,pt %xcc, .wrword + add %o5, 4, %o5 + + and %o2, 3, %o2 ! leftover count, if any + +.wrchar: + ! Set the remaining bytes, if any + brz %o2, .exit + nop +.wrfin: + deccc %o2 + stb %o1, [%o5] + bgu,pt %xcc, .wrfin + inc %o5 +.exit: + retl ! %o0 was preserved + nop + + .size M7memset,.-M7memset diff --git a/arch/sparc/lib/M7patch.S b/arch/sparc/lib/M7patch.S new file mode 100644 index 0000000000..9000b7bc5f --- /dev/null +++ b/arch/sparc/lib/M7patch.S @@ -0,0 +1,51 @@ +/* + * M7patch.S: Patch generic routines with M7 variant. + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + +#include <linux/linkage.h> + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define NG_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + +ENTRY(m7_patch_copyops) + NG_DO_PATCH(memcpy, M7memcpy) + NG_DO_PATCH(raw_copy_from_user, M7copy_from_user) + NG_DO_PATCH(raw_copy_to_user, M7copy_to_user) + retl + nop +ENDPROC(m7_patch_copyops) + +ENTRY(m7_patch_bzero) + NG_DO_PATCH(memset, M7memset) + NG_DO_PATCH(__bzero, M7bzero) + NG_DO_PATCH(__clear_user, NGclear_user) + NG_DO_PATCH(tsb_init, NGtsb_init) + retl + nop +ENDPROC(m7_patch_bzero) + +ENTRY(m7_patch_pageops) + NG_DO_PATCH(copy_user_page, NG4copy_user_page) + NG_DO_PATCH(_clear_page, M7clear_page) + NG_DO_PATCH(clear_user_page, M7clear_user_page) + retl + nop +ENDPROC(m7_patch_pageops) diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile new file mode 100644 index 0000000000..063556fe2c --- /dev/null +++ b/arch/sparc/lib/Makefile @@ -0,0 +1,56 @@ +# SPDX-License-Identifier: GPL-2.0 +# Makefile for Sparc library files.. +# + +asflags-y := -ansi -DST_DIV0=0x02 +ccflags-y := -Werror + +lib-$(CONFIG_SPARC32) += ashrdi3.o +lib-$(CONFIG_SPARC32) += memcpy.o memset.o +lib-y += strlen.o +lib-y += checksum_$(BITS).o +lib-$(CONFIG_SPARC32) += blockops.o +lib-y += memscan_$(BITS).o memcmp.o strncmp_$(BITS).o +lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o +lib-$(CONFIG_SPARC32) += copy_user.o locks.o +lib-$(CONFIG_SPARC64) += atomic_64.o +lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o +lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o +lib-$(CONFIG_SPARC64) += multi3.o +lib-$(CONFIG_SPARC64) += fls.o +lib-$(CONFIG_SPARC64) += fls64.o +lib-$(CONFIG_SPARC64) += NG4fls.o + +lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o +lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o +lib-$(CONFIG_SPARC64) += VISsave.o +lib-$(CONFIG_SPARC64) += bitops.o + +lib-$(CONFIG_SPARC64) += U1memcpy.o U1copy_from_user.o U1copy_to_user.o + +lib-$(CONFIG_SPARC64) += U3memcpy.o U3copy_from_user.o U3copy_to_user.o +lib-$(CONFIG_SPARC64) += U3patch.o + +lib-$(CONFIG_SPARC64) += NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o +lib-$(CONFIG_SPARC64) += NGpatch.o NGpage.o NGbzero.o + +lib-$(CONFIG_SPARC64) += NG2memcpy.o NG2copy_from_user.o NG2copy_to_user.o +lib-$(CONFIG_SPARC64) += NG2patch.o + +lib-$(CONFIG_SPARC64) += NG4memcpy.o NG4copy_from_user.o NG4copy_to_user.o +lib-$(CONFIG_SPARC64) += NG4patch.o NG4copy_page.o NG4clear_page.o NG4memset.o + +lib-$(CONFIG_SPARC64) += Memcpy_utils.o + +lib-$(CONFIG_SPARC64) += M7memcpy.o M7copy_from_user.o M7copy_to_user.o +lib-$(CONFIG_SPARC64) += M7patch.o M7memset.o + +lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o +lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o + +lib-$(CONFIG_SPARC64) += copy_in_user.o memmove.o +lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o + +obj-$(CONFIG_SPARC64) += iomap.o +obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o +obj-$(CONFIG_SPARC64) += PeeCeeI.o diff --git a/arch/sparc/lib/Memcpy_utils.S b/arch/sparc/lib/Memcpy_utils.S new file mode 100644 index 0000000000..64fbac28b3 --- /dev/null +++ b/arch/sparc/lib/Memcpy_utils.S @@ -0,0 +1,345 @@ +#ifndef __ASM_MEMCPY_UTILS +#define __ASM_MEMCPY_UTILS + +#include <linux/linkage.h> +#include <asm/asi.h> +#include <asm/visasm.h> + +ENTRY(__restore_asi_fp) + VISExitHalf + retl + wr %g0, ASI_AIUS, %asi +ENDPROC(__restore_asi_fp) + +ENTRY(__restore_asi) + retl + wr %g0, ASI_AIUS, %asi +ENDPROC(__restore_asi) + +ENTRY(memcpy_retl_o2) + ba,pt %xcc, __restore_asi + mov %o2, %o0 +ENDPROC(memcpy_retl_o2) +ENTRY(memcpy_retl_o2_plus_1) + ba,pt %xcc, __restore_asi + add %o2, 1, %o0 +ENDPROC(memcpy_retl_o2_plus_1) +ENTRY(memcpy_retl_o2_plus_3) + ba,pt %xcc, __restore_asi + add %o2, 3, %o0 +ENDPROC(memcpy_retl_o2_plus_3) +ENTRY(memcpy_retl_o2_plus_4) + ba,pt %xcc, __restore_asi + add %o2, 4, %o0 +ENDPROC(memcpy_retl_o2_plus_4) +ENTRY(memcpy_retl_o2_plus_5) + ba,pt %xcc, __restore_asi + add %o2, 5, %o0 +ENDPROC(memcpy_retl_o2_plus_5) +ENTRY(memcpy_retl_o2_plus_6) + ba,pt %xcc, __restore_asi + add %o2, 6, %o0 +ENDPROC(memcpy_retl_o2_plus_6) +ENTRY(memcpy_retl_o2_plus_7) + ba,pt %xcc, __restore_asi + add %o2, 7, %o0 +ENDPROC(memcpy_retl_o2_plus_7) +ENTRY(memcpy_retl_o2_plus_8) + ba,pt %xcc, __restore_asi + add %o2, 8, %o0 +ENDPROC(memcpy_retl_o2_plus_8) +ENTRY(memcpy_retl_o2_plus_15) + ba,pt %xcc, __restore_asi + add %o2, 15, %o0 +ENDPROC(memcpy_retl_o2_plus_15) +ENTRY(memcpy_retl_o2_plus_15_8) + add %o2, 15, %o2 + ba,pt %xcc, __restore_asi + add %o2, 8, %o0 +ENDPROC(memcpy_retl_o2_plus_15_8) +ENTRY(memcpy_retl_o2_plus_16) + ba,pt %xcc, __restore_asi + add %o2, 16, %o0 +ENDPROC(memcpy_retl_o2_plus_16) +ENTRY(memcpy_retl_o2_plus_24) + ba,pt %xcc, __restore_asi + add %o2, 24, %o0 +ENDPROC(memcpy_retl_o2_plus_24) +ENTRY(memcpy_retl_o2_plus_31) + ba,pt %xcc, __restore_asi + add %o2, 31, %o0 +ENDPROC(memcpy_retl_o2_plus_31) +ENTRY(memcpy_retl_o2_plus_32) + ba,pt %xcc, __restore_asi + add %o2, 32, %o0 +ENDPROC(memcpy_retl_o2_plus_32) +ENTRY(memcpy_retl_o2_plus_31_32) + add %o2, 31, %o2 + ba,pt %xcc, __restore_asi + add %o2, 32, %o0 +ENDPROC(memcpy_retl_o2_plus_31_32) +ENTRY(memcpy_retl_o2_plus_31_24) + add %o2, 31, %o2 + ba,pt %xcc, __restore_asi + add %o2, 24, %o0 +ENDPROC(memcpy_retl_o2_plus_31_24) +ENTRY(memcpy_retl_o2_plus_31_16) + add %o2, 31, %o2 + ba,pt %xcc, __restore_asi + add %o2, 16, %o0 +ENDPROC(memcpy_retl_o2_plus_31_16) +ENTRY(memcpy_retl_o2_plus_31_8) + add %o2, 31, %o2 + ba,pt %xcc, __restore_asi + add %o2, 8, %o0 +ENDPROC(memcpy_retl_o2_plus_31_8) +ENTRY(memcpy_retl_o2_plus_63) + ba,pt %xcc, __restore_asi + add %o2, 63, %o0 +ENDPROC(memcpy_retl_o2_plus_63) +ENTRY(memcpy_retl_o2_plus_63_64) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 64, %o0 +ENDPROC(memcpy_retl_o2_plus_63_64) +ENTRY(memcpy_retl_o2_plus_63_56) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 56, %o0 +ENDPROC(memcpy_retl_o2_plus_63_56) +ENTRY(memcpy_retl_o2_plus_63_48) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 48, %o0 +ENDPROC(memcpy_retl_o2_plus_63_48) +ENTRY(memcpy_retl_o2_plus_63_40) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 40, %o0 +ENDPROC(memcpy_retl_o2_plus_63_40) +ENTRY(memcpy_retl_o2_plus_63_32) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 32, %o0 +ENDPROC(memcpy_retl_o2_plus_63_32) +ENTRY(memcpy_retl_o2_plus_63_24) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 24, %o0 +ENDPROC(memcpy_retl_o2_plus_63_24) +ENTRY(memcpy_retl_o2_plus_63_16) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 16, %o0 +ENDPROC(memcpy_retl_o2_plus_63_16) +ENTRY(memcpy_retl_o2_plus_63_8) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 8, %o0 +ENDPROC(memcpy_retl_o2_plus_63_8) +ENTRY(memcpy_retl_o2_plus_o5) + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5) +ENTRY(memcpy_retl_o2_plus_o5_plus_1) + add %o5, 1, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_1) +ENTRY(memcpy_retl_o2_plus_o5_plus_4) + add %o5, 4, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_4) +ENTRY(memcpy_retl_o2_plus_o5_plus_8) + add %o5, 8, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_8) +ENTRY(memcpy_retl_o2_plus_o5_plus_16) + add %o5, 16, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_16) +ENTRY(memcpy_retl_o2_plus_o5_plus_24) + add %o5, 24, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_24) +ENTRY(memcpy_retl_o2_plus_o5_plus_32) + add %o5, 32, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_32) +ENTRY(memcpy_retl_o2_plus_o5_64) + add %o5, 32, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_64) +ENTRY(memcpy_retl_o2_plus_g1) + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(memcpy_retl_o2_plus_g1) +ENTRY(memcpy_retl_o2_plus_g1_plus_1) + add %g1, 1, %g1 + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(memcpy_retl_o2_plus_g1_plus_1) +ENTRY(memcpy_retl_o2_plus_g1_plus_8) + add %g1, 8, %g1 + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(memcpy_retl_o2_plus_g1_plus_8) +ENTRY(memcpy_retl_o2_plus_o4) + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4) +ENTRY(memcpy_retl_o2_plus_o4_plus_8) + add %o4, 8, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_8) +ENTRY(memcpy_retl_o2_plus_o4_plus_16) + add %o4, 16, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_16) +ENTRY(memcpy_retl_o2_plus_o4_plus_24) + add %o4, 24, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_24) +ENTRY(memcpy_retl_o2_plus_o4_plus_32) + add %o4, 32, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_32) +ENTRY(memcpy_retl_o2_plus_o4_plus_40) + add %o4, 40, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_40) +ENTRY(memcpy_retl_o2_plus_o4_plus_48) + add %o4, 48, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_48) +ENTRY(memcpy_retl_o2_plus_o4_plus_56) + add %o4, 56, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_56) +ENTRY(memcpy_retl_o2_plus_o4_plus_64) + add %o4, 64, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_64) +ENTRY(memcpy_retl_o2_plus_o5_plus_64) + add %o5, 64, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_64) +ENTRY(memcpy_retl_o2_plus_o3_fp) + ba,pt %xcc, __restore_asi_fp + add %o2, %o3, %o0 +ENDPROC(memcpy_retl_o2_plus_o3_fp) +ENTRY(memcpy_retl_o2_plus_o3_plus_1_fp) + add %o3, 1, %o3 + ba,pt %xcc, __restore_asi_fp + add %o2, %o3, %o0 +ENDPROC(memcpy_retl_o2_plus_o3_plus_1_fp) +ENTRY(memcpy_retl_o2_plus_o3_plus_4_fp) + add %o3, 4, %o3 + ba,pt %xcc, __restore_asi_fp + add %o2, %o3, %o0 +ENDPROC(memcpy_retl_o2_plus_o3_plus_4_fp) +ENTRY(memcpy_retl_o2_plus_o4_fp) + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_8_fp) + add %o4, 8, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_8_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_16_fp) + add %o4, 16, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_16_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_24_fp) + add %o4, 24, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_24_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_32_fp) + add %o4, 32, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_32_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_40_fp) + add %o4, 40, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_40_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_48_fp) + add %o4, 48, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_48_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_56_fp) + add %o4, 56, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_56_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_64_fp) + add %o4, 64, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(memcpy_retl_o2_plus_o4_plus_64_fp) +ENTRY(memcpy_retl_o2_plus_o5_fp) + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_64_fp) + add %o5, 64, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_64_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_56_fp) + add %o5, 56, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_56_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_48_fp) + add %o5, 48, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_48_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_40_fp) + add %o5, 40, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_40_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_32_fp) + add %o5, 32, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_32_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_24_fp) + add %o5, 24, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_24_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_16_fp) + add %o5, 16, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_16_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_8_fp) + add %o5, 8, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_8_fp) + +#endif diff --git a/arch/sparc/lib/NG2copy_from_user.S b/arch/sparc/lib/NG2copy_from_user.S new file mode 100644 index 0000000000..e57bc514f5 --- /dev/null +++ b/arch/sparc/lib/NG2copy_from_user.S @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* NG2copy_from_user.S: Niagara-2 optimized copy from userspace. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#define EX_LD(x,y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#define EX_LD_FP(x,y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y##_fp; \ + .text; \ + .align 4; + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#ifndef ASI_BLK_AIUS_4V +#define ASI_BLK_AIUS_4V 0x17 +#endif + +#define FUNC_NAME NG2copy_from_user +#define LOAD(type,addr,dest) type##a [addr] %asi, dest +#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_AIUS_4V, dest +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, raw_copy_in_user; \ + nop +#endif + +#include "NG2memcpy.S" diff --git a/arch/sparc/lib/NG2copy_to_user.S b/arch/sparc/lib/NG2copy_to_user.S new file mode 100644 index 0000000000..367c0bf015 --- /dev/null +++ b/arch/sparc/lib/NG2copy_to_user.S @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* NG2copy_to_user.S: Niagara-2 optimized copy to userspace. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#define EX_ST(x,y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#define EX_ST_FP(x,y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y##_fp; \ + .text; \ + .align 4; + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#ifndef ASI_BLK_AIUS_4V +#define ASI_BLK_AIUS_4V 0x17 +#endif + +#ifndef ASI_BLK_INIT_QUAD_LDD_AIUS +#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 +#endif + +#define FUNC_NAME NG2copy_to_user +#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS +#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS +#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS_4V +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ + /* Writing to %asi is _expensive_ so we hardcode it. + * Reading %asi to check for KERNEL_DS is comparatively + * cheap. + */ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, raw_copy_in_user; \ + nop +#endif + +#include "NG2memcpy.S" diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S new file mode 100644 index 0000000000..bcb21b3a82 --- /dev/null +++ b/arch/sparc/lib/NG2memcpy.S @@ -0,0 +1,594 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* NG2memcpy.S: Niagara-2 optimized memcpy. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#ifdef __KERNEL__ +#include <linux/linkage.h> +#include <asm/visasm.h> +#include <asm/asi.h> +#define GLOBAL_SPARE %g7 +#else +#define ASI_PNF 0x82 +#define ASI_BLK_P 0xf0 +#define ASI_BLK_INIT_QUAD_LDD_P 0xe2 +#define FPRS_FEF 0x04 +#ifdef MEMCPY_DEBUG +#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \ + clr %g1; clr %g2; clr %g3; clr %g5; subcc %g0, %g0, %g0; +#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs +#else +#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs +#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs +#endif +#define GLOBAL_SPARE %g5 +#endif + +#ifndef STORE_ASI +#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA +#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P +#else +#define STORE_ASI 0x80 /* ASI_P */ +#endif +#endif + +#ifndef EX_LD +#define EX_LD(x,y) x +#endif +#ifndef EX_LD_FP +#define EX_LD_FP(x,y) x +#endif + +#ifndef EX_ST +#define EX_ST(x,y) x +#endif +#ifndef EX_ST_FP +#define EX_ST_FP(x,y) x +#endif + +#ifndef LOAD +#define LOAD(type,addr,dest) type [addr], dest +#endif + +#ifndef LOAD_BLK +#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_P, dest +#endif + +#ifndef STORE +#ifndef MEMCPY_DEBUG +#define STORE(type,src,addr) type src, [addr] +#else +#define STORE(type,src,addr) type##a src, [addr] 0x80 +#endif +#endif + +#ifndef STORE_BLK +#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P +#endif + +#ifndef STORE_INIT +#define STORE_INIT(src,addr) stxa src, [addr] STORE_ASI +#endif + +#ifndef FUNC_NAME +#define FUNC_NAME NG2memcpy +#endif + +#ifndef PREAMBLE +#define PREAMBLE +#endif + +#ifndef XCC +#define XCC xcc +#endif + +#define FREG_FROB(x0, x1, x2, x3, x4, x5, x6, x7, x8) \ + faligndata %x0, %x1, %f0; \ + faligndata %x1, %x2, %f2; \ + faligndata %x2, %x3, %f4; \ + faligndata %x3, %x4, %f6; \ + faligndata %x4, %x5, %f8; \ + faligndata %x5, %x6, %f10; \ + faligndata %x6, %x7, %f12; \ + faligndata %x7, %x8, %f14; + +#define FREG_MOVE_1(x0) \ + fsrc2 %x0, %f0; +#define FREG_MOVE_2(x0, x1) \ + fsrc2 %x0, %f0; \ + fsrc2 %x1, %f2; +#define FREG_MOVE_3(x0, x1, x2) \ + fsrc2 %x0, %f0; \ + fsrc2 %x1, %f2; \ + fsrc2 %x2, %f4; +#define FREG_MOVE_4(x0, x1, x2, x3) \ + fsrc2 %x0, %f0; \ + fsrc2 %x1, %f2; \ + fsrc2 %x2, %f4; \ + fsrc2 %x3, %f6; +#define FREG_MOVE_5(x0, x1, x2, x3, x4) \ + fsrc2 %x0, %f0; \ + fsrc2 %x1, %f2; \ + fsrc2 %x2, %f4; \ + fsrc2 %x3, %f6; \ + fsrc2 %x4, %f8; +#define FREG_MOVE_6(x0, x1, x2, x3, x4, x5) \ + fsrc2 %x0, %f0; \ + fsrc2 %x1, %f2; \ + fsrc2 %x2, %f4; \ + fsrc2 %x3, %f6; \ + fsrc2 %x4, %f8; \ + fsrc2 %x5, %f10; +#define FREG_MOVE_7(x0, x1, x2, x3, x4, x5, x6) \ + fsrc2 %x0, %f0; \ + fsrc2 %x1, %f2; \ + fsrc2 %x2, %f4; \ + fsrc2 %x3, %f6; \ + fsrc2 %x4, %f8; \ + fsrc2 %x5, %f10; \ + fsrc2 %x6, %f12; +#define FREG_MOVE_8(x0, x1, x2, x3, x4, x5, x6, x7) \ + fsrc2 %x0, %f0; \ + fsrc2 %x1, %f2; \ + fsrc2 %x2, %f4; \ + fsrc2 %x3, %f6; \ + fsrc2 %x4, %f8; \ + fsrc2 %x5, %f10; \ + fsrc2 %x6, %f12; \ + fsrc2 %x7, %f14; +#define FREG_LOAD_1(base, x0) \ + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1) +#define FREG_LOAD_2(base, x0, x1) \ + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); +#define FREG_LOAD_3(base, x0, x1, x2) \ + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); +#define FREG_LOAD_4(base, x0, x1, x2, x3) \ + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); +#define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \ + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); +#define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \ + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1); +#define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \ + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x30, %x6), NG2_retl_o2_plus_g1); + + .register %g2,#scratch + .register %g3,#scratch + + .text +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +__restore_fp: + VISExitHalf +__restore_asi: + retl + wr %g0, ASI_AIUS, %asi +ENTRY(NG2_retl_o2) + ba,pt %xcc, __restore_asi + mov %o2, %o0 +ENDPROC(NG2_retl_o2) +ENTRY(NG2_retl_o2_plus_1) + ba,pt %xcc, __restore_asi + add %o2, 1, %o0 +ENDPROC(NG2_retl_o2_plus_1) +ENTRY(NG2_retl_o2_plus_4) + ba,pt %xcc, __restore_asi + add %o2, 4, %o0 +ENDPROC(NG2_retl_o2_plus_4) +ENTRY(NG2_retl_o2_plus_8) + ba,pt %xcc, __restore_asi + add %o2, 8, %o0 +ENDPROC(NG2_retl_o2_plus_8) +ENTRY(NG2_retl_o2_plus_o4_plus_1) + add %o4, 1, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG2_retl_o2_plus_o4_plus_1) +ENTRY(NG2_retl_o2_plus_o4_plus_8) + add %o4, 8, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG2_retl_o2_plus_o4_plus_8) +ENTRY(NG2_retl_o2_plus_o4_plus_16) + add %o4, 16, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG2_retl_o2_plus_o4_plus_16) +ENTRY(NG2_retl_o2_plus_g1_fp) + ba,pt %xcc, __restore_fp + add %o2, %g1, %o0 +ENDPROC(NG2_retl_o2_plus_g1_fp) +ENTRY(NG2_retl_o2_plus_g1_plus_64_fp) + add %g1, 64, %g1 + ba,pt %xcc, __restore_fp + add %o2, %g1, %o0 +ENDPROC(NG2_retl_o2_plus_g1_plus_64_fp) +ENTRY(NG2_retl_o2_plus_g1_plus_1) + add %g1, 1, %g1 + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(NG2_retl_o2_plus_g1_plus_1) +ENTRY(NG2_retl_o2_and_7_plus_o4) + and %o2, 7, %o2 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG2_retl_o2_and_7_plus_o4) +ENTRY(NG2_retl_o2_and_7_plus_o4_plus_8) + and %o2, 7, %o2 + add %o4, 8, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG2_retl_o2_and_7_plus_o4_plus_8) +#endif + + .align 64 + + .globl FUNC_NAME + .type FUNC_NAME,#function +FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ + srlx %o2, 31, %g2 + cmp %g2, 0 + tne %xcc, 5 + PREAMBLE + mov %o0, %o3 + cmp %o2, 0 + be,pn %XCC, 85f + or %o0, %o1, GLOBAL_SPARE + cmp %o2, 16 + blu,a,pn %XCC, 80f + or GLOBAL_SPARE, %o2, GLOBAL_SPARE + + /* 2 blocks (128 bytes) is the minimum we can do the block + * copy with. We need to ensure that we'll iterate at least + * once in the block copy loop. At worst we'll need to align + * the destination to a 64-byte boundary which can chew up + * to (64 - 1) bytes from the length before we perform the + * block copy loop. + * + * However, the cut-off point, performance wise, is around + * 4 64-byte blocks. + */ + cmp %o2, (4 * 64) + blu,pt %XCC, 75f + andcc GLOBAL_SPARE, 0x7, %g0 + + /* %o0: dst + * %o1: src + * %o2: len (known to be >= 128) + * + * The block copy loops can use %o4, %g2, %g3 as + * temporaries while copying the data. %o5 must + * be preserved between VISEntryHalf and VISExitHalf + */ + + LOAD(prefetch, %o1 + 0x000, #one_read) + LOAD(prefetch, %o1 + 0x040, #one_read) + LOAD(prefetch, %o1 + 0x080, #one_read) + + /* Align destination on 64-byte boundary. */ + andcc %o0, (64 - 1), %o4 + be,pt %XCC, 2f + sub %o4, 64, %o4 + sub %g0, %o4, %o4 ! bytes to align dst + sub %o2, %o4, %o2 +1: subcc %o4, 1, %o4 + EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_o4_plus_1) + EX_ST(STORE(stb, %g1, %o0), NG2_retl_o2_plus_o4_plus_1) + add %o1, 1, %o1 + bne,pt %XCC, 1b + add %o0, 1, %o0 + +2: + /* Clobbers o5/g1/g2/g3/g7/icc/xcc. We must preserve + * o5 from here until we hit VISExitHalf. + */ + VISEntryHalf + + membar #Sync + alignaddr %o1, %g0, %g0 + + add %o1, (64 - 1), %o4 + andn %o4, (64 - 1), %o4 + andn %o2, (64 - 1), %g1 + sub %o2, %g1, %o2 + + and %o1, (64 - 1), %g2 + add %o1, %g1, %o1 + sub %o0, %o4, %g3 + brz,pt %g2, 190f + cmp %g2, 32 + blu,a 5f + cmp %g2, 16 + cmp %g2, 48 + blu,a 4f + cmp %g2, 40 + cmp %g2, 56 + blu 170f + nop + ba,a,pt %xcc, 180f + nop + +4: /* 32 <= low bits < 48 */ + blu 150f + nop + ba,a,pt %xcc, 160f + nop +5: /* 0 < low bits < 32 */ + blu,a 6f + cmp %g2, 8 + cmp %g2, 24 + blu 130f + nop + ba,a,pt %xcc, 140f + nop +6: /* 0 < low bits < 16 */ + bgeu 120f + nop + /* fall through for 0 < low bits < 8 */ +110: sub %o4, 64, %g2 + EX_LD_FP(LOAD_BLK(%g2, %f0), NG2_retl_o2_plus_g1) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) + FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) + FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30) + subcc %g1, 64, %g1 + add %o4, 64, %o4 + bne,pt %xcc, 1b + LOAD(prefetch, %o4 + 64, #one_read) + ba,pt %xcc, 195f + nop + +120: sub %o4, 56, %g2 + FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) + FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) + FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30) + subcc %g1, 64, %g1 + add %o4, 64, %o4 + bne,pt %xcc, 1b + LOAD(prefetch, %o4 + 64, #one_read) + ba,pt %xcc, 195f + nop + +130: sub %o4, 48, %g2 + FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) + FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) + FREG_MOVE_6(f20, f22, f24, f26, f28, f30) + subcc %g1, 64, %g1 + add %o4, 64, %o4 + bne,pt %xcc, 1b + LOAD(prefetch, %o4 + 64, #one_read) + ba,pt %xcc, 195f + nop + +140: sub %o4, 40, %g2 + FREG_LOAD_5(%g2, f0, f2, f4, f6, f8) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) + FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) + FREG_MOVE_5(f22, f24, f26, f28, f30) + subcc %g1, 64, %g1 + add %o4, 64, %o4 + bne,pt %xcc, 1b + LOAD(prefetch, %o4 + 64, #one_read) + ba,pt %xcc, 195f + nop + +150: sub %o4, 32, %g2 + FREG_LOAD_4(%g2, f0, f2, f4, f6) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) + FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) + FREG_MOVE_4(f24, f26, f28, f30) + subcc %g1, 64, %g1 + add %o4, 64, %o4 + bne,pt %xcc, 1b + LOAD(prefetch, %o4 + 64, #one_read) + ba,pt %xcc, 195f + nop + +160: sub %o4, 24, %g2 + FREG_LOAD_3(%g2, f0, f2, f4) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) + FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) + FREG_MOVE_3(f26, f28, f30) + subcc %g1, 64, %g1 + add %o4, 64, %o4 + bne,pt %xcc, 1b + LOAD(prefetch, %o4 + 64, #one_read) + ba,pt %xcc, 195f + nop + +170: sub %o4, 16, %g2 + FREG_LOAD_2(%g2, f0, f2) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) + FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) + FREG_MOVE_2(f28, f30) + subcc %g1, 64, %g1 + add %o4, 64, %o4 + bne,pt %xcc, 1b + LOAD(prefetch, %o4 + 64, #one_read) + ba,pt %xcc, 195f + nop + +180: sub %o4, 8, %g2 + FREG_LOAD_1(%g2, f0) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) + FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) + FREG_MOVE_1(f30) + subcc %g1, 64, %g1 + add %o4, 64, %o4 + bne,pt %xcc, 1b + LOAD(prefetch, %o4 + 64, #one_read) + ba,pt %xcc, 195f + nop + +190: +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + subcc %g1, 64, %g1 + EX_LD_FP(LOAD_BLK(%o4, %f0), NG2_retl_o2_plus_g1_plus_64) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1_plus_64) + add %o4, 64, %o4 + bne,pt %xcc, 1b + LOAD(prefetch, %o4 + 64, #one_read) + +195: + add %o4, %g3, %o0 + membar #Sync + + VISExitHalf + + /* %o2 contains any final bytes still needed to be copied + * over. If anything is left, we copy it one byte at a time. + */ + brz,pt %o2, 85f + sub %o0, %o1, GLOBAL_SPARE + ba,a,pt %XCC, 90f + nop + + .align 64 +75: /* 16 < len <= 64 */ + bne,pn %XCC, 75f + sub %o0, %o1, GLOBAL_SPARE + +72: + andn %o2, 0xf, %o4 + and %o2, 0xf, %o2 +1: subcc %o4, 0x10, %o4 + EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_o4_plus_16) + add %o1, 0x08, %o1 + EX_LD(LOAD(ldx, %o1, %g1), NG2_retl_o2_plus_o4_plus_16) + sub %o1, 0x08, %o1 + EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_16) + add %o1, 0x8, %o1 + EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_8) + bgu,pt %XCC, 1b + add %o1, 0x8, %o1 +73: andcc %o2, 0x8, %g0 + be,pt %XCC, 1f + nop + sub %o2, 0x8, %o2 + EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_8) + EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_8) + add %o1, 0x8, %o1 +1: andcc %o2, 0x4, %g0 + be,pt %XCC, 1f + nop + sub %o2, 0x4, %o2 + EX_LD(LOAD(lduw, %o1, %o5), NG2_retl_o2_plus_4) + EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4) + add %o1, 0x4, %o1 +1: cmp %o2, 0 + be,pt %XCC, 85f + nop + ba,pt %xcc, 90f + nop + +75: + andcc %o0, 0x7, %g1 + sub %g1, 0x8, %g1 + be,pn %icc, 2f + sub %g0, %g1, %g1 + sub %o2, %g1, %o2 + +1: subcc %g1, 1, %g1 + EX_LD(LOAD(ldub, %o1, %o5), NG2_retl_o2_plus_g1_plus_1) + EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_g1_plus_1) + bgu,pt %icc, 1b + add %o1, 1, %o1 + +2: add %o1, GLOBAL_SPARE, %o0 + andcc %o1, 0x7, %g1 + bne,pt %icc, 8f + sll %g1, 3, %g1 + + cmp %o2, 16 + bgeu,pt %icc, 72b + nop + ba,a,pt %xcc, 73b + +8: mov 64, GLOBAL_SPARE + andn %o1, 0x7, %o1 + EX_LD(LOAD(ldx, %o1, %g2), NG2_retl_o2) + sub GLOBAL_SPARE, %g1, GLOBAL_SPARE + andn %o2, 0x7, %o4 + sllx %g2, %g1, %g2 +1: add %o1, 0x8, %o1 + EX_LD(LOAD(ldx, %o1, %g3), NG2_retl_o2_and_7_plus_o4) + subcc %o4, 0x8, %o4 + srlx %g3, GLOBAL_SPARE, %o5 + or %o5, %g2, %o5 + EX_ST(STORE(stx, %o5, %o0), NG2_retl_o2_and_7_plus_o4_plus_8) + add %o0, 0x8, %o0 + bgu,pt %icc, 1b + sllx %g3, %g1, %g2 + + srl %g1, 3, %g1 + andcc %o2, 0x7, %o2 + be,pn %icc, 85f + add %o1, %g1, %o1 + ba,pt %xcc, 90f + sub %o0, %o1, GLOBAL_SPARE + + .align 64 +80: /* 0 < len <= 16 */ + andcc GLOBAL_SPARE, 0x3, %g0 + bne,pn %XCC, 90f + sub %o0, %o1, GLOBAL_SPARE + +1: + subcc %o2, 4, %o2 + EX_LD(LOAD(lduw, %o1, %g1), NG2_retl_o2_plus_4) + EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4) + bgu,pt %XCC, 1b + add %o1, 4, %o1 + +85: retl + mov EX_RETVAL(%o3), %o0 + + .align 32 +90: + subcc %o2, 1, %o2 + EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_1) + EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_1) + bgu,pt %XCC, 90b + add %o1, 1, %o1 + retl + mov EX_RETVAL(%o3), %o0 + + .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc/lib/NG2patch.S b/arch/sparc/lib/NG2patch.S new file mode 100644 index 0000000000..72431b2449 --- /dev/null +++ b/arch/sparc/lib/NG2patch.S @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* NG2patch.S: Patch Ultra-I routines with Niagara-2 variant. + * + * Copyright (C) 2007 David S. Miller <davem@davemloft.net> + */ + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define NG_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + .globl niagara2_patch_copyops + .type niagara2_patch_copyops,#function +niagara2_patch_copyops: + NG_DO_PATCH(memcpy, NG2memcpy) + NG_DO_PATCH(raw_copy_from_user, NG2copy_from_user) + NG_DO_PATCH(raw_copy_to_user, NG2copy_to_user) + retl + nop + .size niagara2_patch_copyops,.-niagara2_patch_copyops diff --git a/arch/sparc/lib/NG4clear_page.S b/arch/sparc/lib/NG4clear_page.S new file mode 100644 index 0000000000..d91d6b5f24 --- /dev/null +++ b/arch/sparc/lib/NG4clear_page.S @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* NG4copy_page.S: Niagara-4 optimized clear page. + * + * Copyright (C) 2012 (davem@davemloft.net) + */ + +#include <asm/asi.h> +#include <asm/page.h> + + .text + + .register %g3, #scratch + + .align 32 + .globl NG4clear_page + .globl NG4clear_user_page +NG4clear_page: /* %o0=dest */ +NG4clear_user_page: /* %o0=dest, %o1=vaddr */ + set PAGE_SIZE, %g7 + mov 0x20, %g3 +1: stxa %g0, [%o0 + %g0] ASI_ST_BLKINIT_MRU_P + subcc %g7, 0x40, %g7 + stxa %g0, [%o0 + %g3] ASI_ST_BLKINIT_MRU_P + bne,pt %xcc, 1b + add %o0, 0x40, %o0 + membar #StoreLoad|#StoreStore + retl + nop + .size NG4clear_page,.-NG4clear_page + .size NG4clear_user_page,.-NG4clear_user_page diff --git a/arch/sparc/lib/NG4copy_from_user.S b/arch/sparc/lib/NG4copy_from_user.S new file mode 100644 index 0000000000..0cac15a6db --- /dev/null +++ b/arch/sparc/lib/NG4copy_from_user.S @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* NG4copy_from_user.S: Niagara-4 optimized copy from userspace. + * + * Copyright (C) 2012 David S. Miller (davem@davemloft.net) + */ + +#define EX_LD(x, y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#define EX_LD_FP(x,y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y##_fp; \ + .text; \ + .align 4; + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#define FUNC_NAME NG4copy_from_user +#define LOAD(type,addr,dest) type##a [addr] %asi, dest +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, raw_copy_in_user; \ + nop +#endif + +#include "NG4memcpy.S" diff --git a/arch/sparc/lib/NG4copy_page.S b/arch/sparc/lib/NG4copy_page.S new file mode 100644 index 0000000000..581062f8ba --- /dev/null +++ b/arch/sparc/lib/NG4copy_page.S @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* NG4copy_page.S: Niagara-4 optimized copy page. + * + * Copyright (C) 2012 (davem@davemloft.net) + */ + +#include <asm/asi.h> +#include <asm/page.h> + + .text + .align 32 + + .register %g2, #scratch + .register %g3, #scratch + + .globl NG4copy_user_page +NG4copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ + prefetch [%o1 + 0x000], #n_reads_strong + prefetch [%o1 + 0x040], #n_reads_strong + prefetch [%o1 + 0x080], #n_reads_strong + prefetch [%o1 + 0x0c0], #n_reads_strong + set PAGE_SIZE, %g7 + prefetch [%o1 + 0x100], #n_reads_strong + prefetch [%o1 + 0x140], #n_reads_strong + prefetch [%o1 + 0x180], #n_reads_strong + prefetch [%o1 + 0x1c0], #n_reads_strong +1: + ldx [%o1 + 0x00], %o2 + subcc %g7, 0x40, %g7 + ldx [%o1 + 0x08], %o3 + ldx [%o1 + 0x10], %o4 + ldx [%o1 + 0x18], %o5 + ldx [%o1 + 0x20], %g1 + stxa %o2, [%o0] ASI_ST_BLKINIT_MRU_P + add %o0, 0x08, %o0 + ldx [%o1 + 0x28], %g2 + stxa %o3, [%o0] ASI_ST_BLKINIT_MRU_P + add %o0, 0x08, %o0 + ldx [%o1 + 0x30], %g3 + stxa %o4, [%o0] ASI_ST_BLKINIT_MRU_P + add %o0, 0x08, %o0 + ldx [%o1 + 0x38], %o2 + add %o1, 0x40, %o1 + stxa %o5, [%o0] ASI_ST_BLKINIT_MRU_P + add %o0, 0x08, %o0 + stxa %g1, [%o0] ASI_ST_BLKINIT_MRU_P + add %o0, 0x08, %o0 + stxa %g2, [%o0] ASI_ST_BLKINIT_MRU_P + add %o0, 0x08, %o0 + stxa %g3, [%o0] ASI_ST_BLKINIT_MRU_P + add %o0, 0x08, %o0 + stxa %o2, [%o0] ASI_ST_BLKINIT_MRU_P + add %o0, 0x08, %o0 + bne,pt %icc, 1b + prefetch [%o1 + 0x200], #n_reads_strong + retl + membar #StoreLoad | #StoreStore + .size NG4copy_user_page,.-NG4copy_user_page diff --git a/arch/sparc/lib/NG4copy_to_user.S b/arch/sparc/lib/NG4copy_to_user.S new file mode 100644 index 0000000000..c5c9abb3cb --- /dev/null +++ b/arch/sparc/lib/NG4copy_to_user.S @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* NG4copy_to_user.S: Niagara-4 optimized copy to userspace. + * + * Copyright (C) 2012 David S. Miller (davem@davemloft.net) + */ + +#define EX_ST(x,y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#define EX_ST_FP(x,y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y##_fp; \ + .text; \ + .align 4; + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#ifndef ASI_BLK_INIT_QUAD_LDD_AIUS +#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 +#endif + +#define FUNC_NAME NG4copy_to_user +#define STORE(type,src,addr) type##a src, [addr] %asi +#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ + /* Writing to %asi is _expensive_ so we hardcode it. + * Reading %asi to check for KERNEL_DS is comparatively + * cheap. + */ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, raw_copy_in_user; \ + nop +#endif + +#include "NG4memcpy.S" diff --git a/arch/sparc/lib/NG4fls.S b/arch/sparc/lib/NG4fls.S new file mode 100644 index 0000000000..2d0991e5b0 --- /dev/null +++ b/arch/sparc/lib/NG4fls.S @@ -0,0 +1,30 @@ +/* NG4fls.S: SPARC optimized fls and __fls for T4 and above. + * + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + */ + +#include <linux/linkage.h> + +#define LZCNT_O0_G2 \ + .word 0x85b002e8 + + .text + .register %g2, #scratch + .register %g3, #scratch + +ENTRY(NG4fls) + LZCNT_O0_G2 !lzcnt %o0, %g2 + mov 64, %g3 + retl + sub %g3, %g2, %o0 +ENDPROC(NG4fls) + +ENTRY(__NG4fls) + brz,pn %o0, 1f + LZCNT_O0_G2 !lzcnt %o0, %g2 + mov 63, %g3 + sub %g3, %g2, %o0 +1: + retl + nop +ENDPROC(__NG4fls) diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S new file mode 100644 index 0000000000..7ad58ebe0d --- /dev/null +++ b/arch/sparc/lib/NG4memcpy.S @@ -0,0 +1,386 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* NG4memcpy.S: Niagara-4 optimized memcpy. + * + * Copyright (C) 2012 David S. Miller (davem@davemloft.net) + */ + +#ifdef __KERNEL__ +#include <linux/linkage.h> +#include <asm/visasm.h> +#include <asm/asi.h> +#define GLOBAL_SPARE %g7 +#else +#define ASI_BLK_INIT_QUAD_LDD_P 0xe2 +#define FPRS_FEF 0x04 + +/* On T4 it is very expensive to access ASRs like %fprs and + * %asi, avoiding a read or a write can save ~50 cycles. + */ +#define FPU_ENTER \ + rd %fprs, %o5; \ + andcc %o5, FPRS_FEF, %g0; \ + be,a,pn %icc, 999f; \ + wr %g0, FPRS_FEF, %fprs; \ + 999: + +#ifdef MEMCPY_DEBUG +#define VISEntryHalf FPU_ENTER; \ + clr %g1; clr %g2; clr %g3; clr %g5; subcc %g0, %g0, %g0; +#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs +#else +#define VISEntryHalf FPU_ENTER +#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs +#endif + +#define GLOBAL_SPARE %g5 +#endif + +#ifndef STORE_ASI +#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA +#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P +#else +#define STORE_ASI 0x80 /* ASI_P */ +#endif +#endif + +#if !defined(EX_LD) && !defined(EX_ST) +#define NON_USER_COPY +#endif + +#ifndef EX_LD +#define EX_LD(x,y) x +#endif +#ifndef EX_LD_FP +#define EX_LD_FP(x,y) x +#endif + +#ifndef EX_ST +#define EX_ST(x,y) x +#endif +#ifndef EX_ST_FP +#define EX_ST_FP(x,y) x +#endif + + +#ifndef LOAD +#define LOAD(type,addr,dest) type [addr], dest +#endif + +#ifndef STORE +#ifndef MEMCPY_DEBUG +#define STORE(type,src,addr) type src, [addr] +#else +#define STORE(type,src,addr) type##a src, [addr] %asi +#endif +#endif + +#ifndef STORE_INIT +#define STORE_INIT(src,addr) stxa src, [addr] STORE_ASI +#endif + +#ifndef FUNC_NAME +#define FUNC_NAME NG4memcpy +#endif +#ifndef PREAMBLE +#define PREAMBLE +#endif + +#ifndef XCC +#define XCC xcc +#endif + + .register %g2,#scratch + .register %g3,#scratch + + .text +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +#endif + .align 64 + + .globl FUNC_NAME + .type FUNC_NAME,#function +FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ +#ifdef MEMCPY_DEBUG + wr %g0, 0x80, %asi +#endif + srlx %o2, 31, %g2 + cmp %g2, 0 + tne %XCC, 5 + PREAMBLE + mov %o0, %o3 + brz,pn %o2, .Lexit + cmp %o2, 3 + ble,pn %icc, .Ltiny + cmp %o2, 19 + ble,pn %icc, .Lsmall + or %o0, %o1, %g2 + cmp %o2, 128 + bl,pn %icc, .Lmedium + nop + +.Llarge:/* len >= 0x80 */ + /* First get dest 8 byte aligned. */ + sub %g0, %o0, %g1 + and %g1, 0x7, %g1 + brz,pt %g1, 51f + sub %o2, %g1, %o2 + + +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1) + add %o1, 1, %o1 + subcc %g1, 1, %g1 + add %o0, 1, %o0 + bne,pt %icc, 1b + EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1) + +51: LOAD(prefetch, %o1 + 0x040, #n_reads_strong) + LOAD(prefetch, %o1 + 0x080, #n_reads_strong) + LOAD(prefetch, %o1 + 0x0c0, #n_reads_strong) + LOAD(prefetch, %o1 + 0x100, #n_reads_strong) + LOAD(prefetch, %o1 + 0x140, #n_reads_strong) + LOAD(prefetch, %o1 + 0x180, #n_reads_strong) + LOAD(prefetch, %o1 + 0x1c0, #n_reads_strong) + LOAD(prefetch, %o1 + 0x200, #n_reads_strong) + + /* Check if we can use the straight fully aligned + * loop, or we require the alignaddr/faligndata variant. + */ + andcc %o1, 0x7, %o5 + bne,pn %icc, .Llarge_src_unaligned + sub %g0, %o0, %g1 + + /* Legitimize the use of initializing stores by getting dest + * to be 64-byte aligned. + */ + and %g1, 0x3f, %g1 + brz,pt %g1, .Llarge_aligned + sub %o2, %g1, %o2 + +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1) + add %o1, 8, %o1 + subcc %g1, 8, %g1 + add %o0, 8, %o0 + bne,pt %icc, 1b + EX_ST(STORE(stx, %g2, %o0 - 0x08), memcpy_retl_o2_plus_g1_plus_8) + +.Llarge_aligned: + /* len >= 0x80 && src 8-byte aligned && dest 8-byte aligned */ + andn %o2, 0x3f, %o4 + sub %o2, %o4, %o2 + +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o4) + add %o1, 0x40, %o1 + EX_LD(LOAD(ldx, %o1 - 0x38, %g2), memcpy_retl_o2_plus_o4) + subcc %o4, 0x40, %o4 + EX_LD(LOAD(ldx, %o1 - 0x30, %g3), memcpy_retl_o2_plus_o4_plus_64) + EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE), memcpy_retl_o2_plus_o4_plus_64) + EX_LD(LOAD(ldx, %o1 - 0x20, %o5), memcpy_retl_o2_plus_o4_plus_64) + EX_ST(STORE_INIT(%g1, %o0), memcpy_retl_o2_plus_o4_plus_64) + add %o0, 0x08, %o0 + EX_ST(STORE_INIT(%g2, %o0), memcpy_retl_o2_plus_o4_plus_56) + add %o0, 0x08, %o0 + EX_LD(LOAD(ldx, %o1 - 0x18, %g2), memcpy_retl_o2_plus_o4_plus_48) + EX_ST(STORE_INIT(%g3, %o0), memcpy_retl_o2_plus_o4_plus_48) + add %o0, 0x08, %o0 + EX_LD(LOAD(ldx, %o1 - 0x10, %g3), memcpy_retl_o2_plus_o4_plus_40) + EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), memcpy_retl_o2_plus_o4_plus_40) + add %o0, 0x08, %o0 + EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE), memcpy_retl_o2_plus_o4_plus_32) + EX_ST(STORE_INIT(%o5, %o0), memcpy_retl_o2_plus_o4_plus_32) + add %o0, 0x08, %o0 + EX_ST(STORE_INIT(%g2, %o0), memcpy_retl_o2_plus_o4_plus_24) + add %o0, 0x08, %o0 + EX_ST(STORE_INIT(%g3, %o0), memcpy_retl_o2_plus_o4_plus_16) + add %o0, 0x08, %o0 + EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), memcpy_retl_o2_plus_o4_plus_8) + add %o0, 0x08, %o0 + bne,pt %icc, 1b + LOAD(prefetch, %o1 + 0x200, #n_reads_strong) + + membar #StoreLoad | #StoreStore + + brz,pn %o2, .Lexit + cmp %o2, 19 + ble,pn %icc, .Lsmall_unaligned + nop + ba,a,pt %icc, .Lmedium_noprefetch + +.Lexit: retl + mov EX_RETVAL(%o3), %o0 + +.Llarge_src_unaligned: +#ifdef NON_USER_COPY + VISEntryHalfFast(.Lmedium_vis_entry_fail) +#else + VISEntryHalf +#endif + andn %o2, 0x3f, %o4 + sub %o2, %o4, %o2 + alignaddr %o1, %g0, %g1 + add %o1, %o4, %o1 + EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0), memcpy_retl_o2_plus_o4) +1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2), memcpy_retl_o2_plus_o4) + subcc %o4, 0x40, %o4 + EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14), memcpy_retl_o2_plus_o4_plus_64) + faligndata %f0, %f2, %f16 + EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0), memcpy_retl_o2_plus_o4_plus_64) + faligndata %f2, %f4, %f18 + add %g1, 0x40, %g1 + faligndata %f4, %f6, %f20 + faligndata %f6, %f8, %f22 + faligndata %f8, %f10, %f24 + faligndata %f10, %f12, %f26 + faligndata %f12, %f14, %f28 + faligndata %f14, %f0, %f30 + EX_ST_FP(STORE(std, %f16, %o0 + 0x00), memcpy_retl_o2_plus_o4_plus_64) + EX_ST_FP(STORE(std, %f18, %o0 + 0x08), memcpy_retl_o2_plus_o4_plus_56) + EX_ST_FP(STORE(std, %f20, %o0 + 0x10), memcpy_retl_o2_plus_o4_plus_48) + EX_ST_FP(STORE(std, %f22, %o0 + 0x18), memcpy_retl_o2_plus_o4_plus_40) + EX_ST_FP(STORE(std, %f24, %o0 + 0x20), memcpy_retl_o2_plus_o4_plus_32) + EX_ST_FP(STORE(std, %f26, %o0 + 0x28), memcpy_retl_o2_plus_o4_plus_24) + EX_ST_FP(STORE(std, %f28, %o0 + 0x30), memcpy_retl_o2_plus_o4_plus_16) + EX_ST_FP(STORE(std, %f30, %o0 + 0x38), memcpy_retl_o2_plus_o4_plus_8) + add %o0, 0x40, %o0 + bne,pt %icc, 1b + LOAD(prefetch, %g1 + 0x200, #n_reads_strong) +#ifdef NON_USER_COPY + VISExitHalfFast +#else + VISExitHalf +#endif + brz,pn %o2, .Lexit + cmp %o2, 19 + ble,pn %icc, .Lsmall_unaligned + nop + ba,a,pt %icc, .Lmedium_unaligned + +#ifdef NON_USER_COPY +.Lmedium_vis_entry_fail: + or %o0, %o1, %g2 +#endif +.Lmedium: + LOAD(prefetch, %o1 + 0x40, #n_reads_strong) + andcc %g2, 0x7, %g0 + bne,pn %icc, .Lmedium_unaligned + nop +.Lmedium_noprefetch: + andncc %o2, 0x20 - 1, %o5 + be,pn %icc, 2f + sub %o2, %o5, %o2 +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x08, %g2), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x18, %o4), memcpy_retl_o2_plus_o5) + add %o1, 0x20, %o1 + subcc %o5, 0x20, %o5 + EX_ST(STORE(stx, %g1, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32) + EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8) + bne,pt %icc, 1b + add %o0, 0x20, %o0 +2: andcc %o2, 0x18, %o5 + be,pt %icc, 3f + sub %o2, %o5, %o2 + +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o5) + add %o1, 0x08, %o1 + add %o0, 0x08, %o0 + subcc %o5, 0x08, %o5 + bne,pt %icc, 1b + EX_ST(STORE(stx, %g1, %o0 - 0x08), memcpy_retl_o2_plus_o5_plus_8) +3: brz,pt %o2, .Lexit + cmp %o2, 0x04 + bl,pn %icc, .Ltiny + nop + EX_LD(LOAD(lduw, %o1 + 0x00, %g1), memcpy_retl_o2) + add %o1, 0x04, %o1 + add %o0, 0x04, %o0 + subcc %o2, 0x04, %o2 + bne,pn %icc, .Ltiny + EX_ST(STORE(stw, %g1, %o0 - 0x04), memcpy_retl_o2_plus_4) + ba,a,pt %icc, .Lexit +.Lmedium_unaligned: + /* First get dest 8 byte aligned. */ + sub %g0, %o0, %g1 + and %g1, 0x7, %g1 + brz,pt %g1, 2f + sub %o2, %g1, %o2 + +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1) + add %o1, 1, %o1 + subcc %g1, 1, %g1 + add %o0, 1, %o0 + bne,pt %icc, 1b + EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1) +2: + and %o1, 0x7, %g1 + brz,pn %g1, .Lmedium_noprefetch + sll %g1, 3, %g1 + mov 64, %g2 + sub %g2, %g1, %g2 + andn %o1, 0x7, %o1 + EX_LD(LOAD(ldx, %o1 + 0x00, %o4), memcpy_retl_o2) + sllx %o4, %g1, %o4 + andn %o2, 0x08 - 1, %o5 + sub %o2, %o5, %o2 +1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), memcpy_retl_o2_plus_o5) + add %o1, 0x08, %o1 + subcc %o5, 0x08, %o5 + srlx %g3, %g2, GLOBAL_SPARE + or GLOBAL_SPARE, %o4, GLOBAL_SPARE + EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_8) + add %o0, 0x08, %o0 + bne,pt %icc, 1b + sllx %g3, %g1, %o4 + srl %g1, 3, %g1 + add %o1, %g1, %o1 + brz,pn %o2, .Lexit + nop + ba,pt %icc, .Lsmall_unaligned + +.Ltiny: + EX_LD(LOAD(ldub, %o1 + 0x00, %g1), memcpy_retl_o2) + subcc %o2, 1, %o2 + be,pn %icc, .Lexit + EX_ST(STORE(stb, %g1, %o0 + 0x00), memcpy_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x01, %g1), memcpy_retl_o2) + subcc %o2, 1, %o2 + be,pn %icc, .Lexit + EX_ST(STORE(stb, %g1, %o0 + 0x01), memcpy_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x02, %g1), memcpy_retl_o2) + ba,pt %icc, .Lexit + EX_ST(STORE(stb, %g1, %o0 + 0x02), memcpy_retl_o2) + +.Lsmall: + andcc %g2, 0x3, %g0 + bne,pn %icc, .Lsmall_unaligned + andn %o2, 0x4 - 1, %o5 + sub %o2, %o5, %o2 +1: + EX_LD(LOAD(lduw, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o5) + add %o1, 0x04, %o1 + subcc %o5, 0x04, %o5 + add %o0, 0x04, %o0 + bne,pt %icc, 1b + EX_ST(STORE(stw, %g1, %o0 - 0x04), memcpy_retl_o2_plus_o5_plus_4) + brz,pt %o2, .Lexit + nop + ba,a,pt %icc, .Ltiny + +.Lsmall_unaligned: +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1), memcpy_retl_o2) + add %o1, 1, %o1 + add %o0, 1, %o0 + subcc %o2, 1, %o2 + bne,pt %icc, 1b + EX_ST(STORE(stb, %g1, %o0 - 0x01), memcpy_retl_o2_plus_1) + ba,a,pt %icc, .Lexit + nop + .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc/lib/NG4memset.S b/arch/sparc/lib/NG4memset.S new file mode 100644 index 0000000000..f81ee5419e --- /dev/null +++ b/arch/sparc/lib/NG4memset.S @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* NG4memset.S: Niagara-4 optimized memset/bzero. + * + * Copyright (C) 2012 David S. Miller (davem@davemloft.net) + */ + +#include <asm/asi.h> + + .register %g2, #scratch + .register %g3, #scratch + + .text + .align 32 + .globl NG4memset +NG4memset: + andcc %o1, 0xff, %o4 + be,pt %icc, 1f + mov %o2, %o1 + sllx %o4, 8, %g1 + or %g1, %o4, %o2 + sllx %o2, 16, %g1 + or %g1, %o2, %o2 + sllx %o2, 32, %g1 + ba,pt %icc, 1f + or %g1, %o2, %o4 + .size NG4memset,.-NG4memset + + .align 32 + .globl NG4bzero +NG4bzero: + clr %o4 +1: cmp %o1, 16 + ble %icc, .Ltiny + mov %o0, %o3 + sub %g0, %o0, %g1 + and %g1, 0x7, %g1 + brz,pt %g1, .Laligned8 + sub %o1, %g1, %o1 +1: stb %o4, [%o0 + 0x00] + subcc %g1, 1, %g1 + bne,pt %icc, 1b + add %o0, 1, %o0 +.Laligned8: + cmp %o1, 64 + (64 - 8) + ble .Lmedium + sub %g0, %o0, %g1 + andcc %g1, (64 - 1), %g1 + brz,pn %g1, .Laligned64 + sub %o1, %g1, %o1 +1: stx %o4, [%o0 + 0x00] + subcc %g1, 8, %g1 + bne,pt %icc, 1b + add %o0, 0x8, %o0 +.Laligned64: + andn %o1, 64 - 1, %g1 + sub %o1, %g1, %o1 + brnz,pn %o4, .Lnon_bzero_loop + mov 0x20, %g2 +1: stxa %o4, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P + subcc %g1, 0x40, %g1 + stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P + bne,pt %icc, 1b + add %o0, 0x40, %o0 +.Lpostloop: + cmp %o1, 8 + bl,pn %icc, .Ltiny + membar #StoreStore|#StoreLoad +.Lmedium: + andn %o1, 0x7, %g1 + sub %o1, %g1, %o1 +1: stx %o4, [%o0 + 0x00] + subcc %g1, 0x8, %g1 + bne,pt %icc, 1b + add %o0, 0x08, %o0 + andcc %o1, 0x4, %g1 + be,pt %icc, .Ltiny + sub %o1, %g1, %o1 + stw %o4, [%o0 + 0x00] + add %o0, 0x4, %o0 +.Ltiny: + cmp %o1, 0 + be,pn %icc, .Lexit +1: subcc %o1, 1, %o1 + stb %o4, [%o0 + 0x00] + bne,pt %icc, 1b + add %o0, 1, %o0 +.Lexit: + retl + mov %o3, %o0 +.Lnon_bzero_loop: + mov 0x08, %g3 + mov 0x28, %o5 +1: stxa %o4, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P + subcc %g1, 0x40, %g1 + stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P + stxa %o4, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P + stxa %o4, [%o0 + %o5] ASI_BLK_INIT_QUAD_LDD_P + add %o0, 0x10, %o0 + stxa %o4, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P + stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P + stxa %o4, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P + stxa %o4, [%o0 + %o5] ASI_BLK_INIT_QUAD_LDD_P + bne,pt %icc, 1b + add %o0, 0x30, %o0 + ba,a,pt %icc, .Lpostloop + nop + .size NG4bzero,.-NG4bzero diff --git a/arch/sparc/lib/NG4patch.S b/arch/sparc/lib/NG4patch.S new file mode 100644 index 0000000000..37866175c9 --- /dev/null +++ b/arch/sparc/lib/NG4patch.S @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* NG4patch.S: Patch Ultra-I routines with Niagara-4 variant. + * + * Copyright (C) 2012 David S. Miller <davem@davemloft.net> + */ + +#include <linux/linkage.h> + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define NG_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + .globl niagara4_patch_copyops + .type niagara4_patch_copyops,#function +niagara4_patch_copyops: + NG_DO_PATCH(memcpy, NG4memcpy) + NG_DO_PATCH(raw_copy_from_user, NG4copy_from_user) + NG_DO_PATCH(raw_copy_to_user, NG4copy_to_user) + retl + nop + .size niagara4_patch_copyops,.-niagara4_patch_copyops + + .globl niagara4_patch_bzero + .type niagara4_patch_bzero,#function +niagara4_patch_bzero: + NG_DO_PATCH(memset, NG4memset) + NG_DO_PATCH(__bzero, NG4bzero) + NG_DO_PATCH(__clear_user, NGclear_user) + NG_DO_PATCH(tsb_init, NGtsb_init) + retl + nop + .size niagara4_patch_bzero,.-niagara4_patch_bzero + + .globl niagara4_patch_pageops + .type niagara4_patch_pageops,#function +niagara4_patch_pageops: + NG_DO_PATCH(copy_user_page, NG4copy_user_page) + NG_DO_PATCH(_clear_page, NG4clear_page) + NG_DO_PATCH(clear_user_page, NG4clear_user_page) + retl + nop + .size niagara4_patch_pageops,.-niagara4_patch_pageops + +ENTRY(niagara4_patch_fls) + NG_DO_PATCH(fls, NG4fls) + NG_DO_PATCH(__fls, __NG4fls) + retl + nop +ENDPROC(niagara4_patch_fls) diff --git a/arch/sparc/lib/NGbzero.S b/arch/sparc/lib/NGbzero.S new file mode 100644 index 0000000000..19327614d5 --- /dev/null +++ b/arch/sparc/lib/NGbzero.S @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* NGbzero.S: Niagara optimized memset/clear_user. + * + * Copyright (C) 2006 David S. Miller (davem@davemloft.net) + */ +#include <asm/asi.h> + +#define EX_ST(x,y) \ +98: x,y; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, __retl_o1_asi;\ + .text; \ + .align 4; + + .text + + .globl NGmemset + .type NGmemset, #function +NGmemset: /* %o0=buf, %o1=pat, %o2=len */ + and %o1, 0xff, %o3 + mov %o2, %o1 + sllx %o3, 8, %g1 + or %g1, %o3, %o2 + sllx %o2, 16, %g1 + or %g1, %o2, %o2 + sllx %o2, 32, %g1 + ba,pt %xcc, 1f + or %g1, %o2, %o2 + + .globl NGbzero + .type NGbzero, #function +NGbzero: + clr %o2 +1: brz,pn %o1, NGbzero_return + mov %o0, %o3 + + /* %o5: saved %asi, restored at NGbzero_done + * %g7: store-init %asi to use + * %o4: non-store-init %asi to use + */ + rd %asi, %o5 + mov ASI_BLK_INIT_QUAD_LDD_P, %g7 + mov ASI_P, %o4 + wr %o4, 0x0, %asi + +NGbzero_from_clear_user: + cmp %o1, 15 + bl,pn %icc, NGbzero_tiny + andcc %o0, 0x7, %g1 + be,pt %xcc, 2f + mov 8, %g2 + sub %g2, %g1, %g1 + sub %o1, %g1, %o1 +1: EX_ST(stba %o2, [%o0 + 0x00] %asi) + subcc %g1, 1, %g1 + bne,pt %xcc, 1b + add %o0, 1, %o0 +2: cmp %o1, 128 + bl,pn %icc, NGbzero_medium + andcc %o0, (64 - 1), %g1 + be,pt %xcc, NGbzero_pre_loop + mov 64, %g2 + sub %g2, %g1, %g1 + sub %o1, %g1, %o1 +1: EX_ST(stxa %o2, [%o0 + 0x00] %asi) + subcc %g1, 8, %g1 + bne,pt %xcc, 1b + add %o0, 8, %o0 + +NGbzero_pre_loop: + wr %g7, 0x0, %asi + andn %o1, (64 - 1), %g1 + sub %o1, %g1, %o1 +NGbzero_loop: + EX_ST(stxa %o2, [%o0 + 0x00] %asi) + EX_ST(stxa %o2, [%o0 + 0x08] %asi) + EX_ST(stxa %o2, [%o0 + 0x10] %asi) + EX_ST(stxa %o2, [%o0 + 0x18] %asi) + EX_ST(stxa %o2, [%o0 + 0x20] %asi) + EX_ST(stxa %o2, [%o0 + 0x28] %asi) + EX_ST(stxa %o2, [%o0 + 0x30] %asi) + EX_ST(stxa %o2, [%o0 + 0x38] %asi) + subcc %g1, 64, %g1 + bne,pt %xcc, NGbzero_loop + add %o0, 64, %o0 + + membar #Sync + wr %o4, 0x0, %asi + brz,pn %o1, NGbzero_done +NGbzero_medium: + andncc %o1, 0x7, %g1 + be,pn %xcc, 2f + sub %o1, %g1, %o1 +1: EX_ST(stxa %o2, [%o0 + 0x00] %asi) + subcc %g1, 8, %g1 + bne,pt %xcc, 1b + add %o0, 8, %o0 +2: brz,pt %o1, NGbzero_done + nop + +NGbzero_tiny: +1: EX_ST(stba %o2, [%o0 + 0x00] %asi) + subcc %o1, 1, %o1 + bne,pt %icc, 1b + add %o0, 1, %o0 + + /* fallthrough */ + +NGbzero_done: + wr %o5, 0x0, %asi + +NGbzero_return: + retl + mov %o3, %o0 + .size NGbzero, .-NGbzero + .size NGmemset, .-NGmemset + + .globl NGclear_user + .type NGclear_user, #function +NGclear_user: /* %o0=buf, %o1=len */ + rd %asi, %o5 + brz,pn %o1, NGbzero_done + clr %o3 + cmp %o5, ASI_AIUS + bne,pn %icc, NGbzero + clr %o2 + mov ASI_BLK_INIT_QUAD_LDD_AIUS, %g7 + ba,pt %xcc, NGbzero_from_clear_user + mov ASI_AIUS, %o4 + .size NGclear_user, .-NGclear_user + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define NG_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + .globl niagara_patch_bzero + .type niagara_patch_bzero,#function +niagara_patch_bzero: + NG_DO_PATCH(memset, NGmemset) + NG_DO_PATCH(__bzero, NGbzero) + NG_DO_PATCH(__clear_user, NGclear_user) + NG_DO_PATCH(tsb_init, NGtsb_init) + retl + nop + .size niagara_patch_bzero,.-niagara_patch_bzero diff --git a/arch/sparc/lib/NGcopy_from_user.S b/arch/sparc/lib/NGcopy_from_user.S new file mode 100644 index 0000000000..9abc49fcdb --- /dev/null +++ b/arch/sparc/lib/NGcopy_from_user.S @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* NGcopy_from_user.S: Niagara optimized copy from userspace. + * + * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) + */ + +#define EX_LD(x,y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#define FUNC_NAME NGcopy_from_user +#define LOAD(type,addr,dest) type##a [addr] ASI_AIUS, dest +#define LOAD_TWIN(addr_reg,dest0,dest1) \ + ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_AIUS, dest0 +#define EX_RETVAL(x) %g0 + +#ifdef __KERNEL__ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, raw_copy_in_user; \ + nop +#endif + +#include "NGmemcpy.S" diff --git a/arch/sparc/lib/NGcopy_to_user.S b/arch/sparc/lib/NGcopy_to_user.S new file mode 100644 index 0000000000..9cbe2f18e5 --- /dev/null +++ b/arch/sparc/lib/NGcopy_to_user.S @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* NGcopy_to_user.S: Niagara optimized copy to userspace. + * + * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) + */ + +#define EX_ST(x,y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#define FUNC_NAME NGcopy_to_user +#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS +#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS +#define EX_RETVAL(x) %g0 + +#ifdef __KERNEL__ + /* Writing to %asi is _expensive_ so we hardcode it. + * Reading %asi to check for KERNEL_DS is comparatively + * cheap. + */ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, raw_copy_in_user; \ + nop +#endif + +#include "NGmemcpy.S" diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S new file mode 100644 index 0000000000..ee51c12306 --- /dev/null +++ b/arch/sparc/lib/NGmemcpy.S @@ -0,0 +1,509 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* NGmemcpy.S: Niagara optimized memcpy. + * + * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) + */ + +#ifdef __KERNEL__ +#include <linux/linkage.h> +#include <asm/asi.h> +#include <asm/thread_info.h> +#define GLOBAL_SPARE %g7 +#define RESTORE_ASI(TMP) \ + wr %g0, ASI_AIUS, %asi +#else +#define GLOBAL_SPARE %g5 +#define RESTORE_ASI(TMP) \ + wr %g0, ASI_PNF, %asi +#endif + +#ifdef __sparc_v9__ +#define SAVE_AMOUNT 128 +#else +#define SAVE_AMOUNT 64 +#endif + +#ifndef STORE_ASI +#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P +#endif + +#ifndef EX_LD +#define EX_LD(x,y) x +#endif + +#ifndef EX_ST +#define EX_ST(x,y) x +#endif + +#ifndef LOAD +#ifndef MEMCPY_DEBUG +#define LOAD(type,addr,dest) type [addr], dest +#else +#define LOAD(type,addr,dest) type##a [addr] 0x80, dest +#endif +#endif + +#ifndef LOAD_TWIN +#define LOAD_TWIN(addr_reg,dest0,dest1) \ + ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_P, dest0 +#endif + +#ifndef STORE +#define STORE(type,src,addr) type src, [addr] +#endif + +#ifndef STORE_INIT +#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA +#define STORE_INIT(src,addr) stxa src, [addr] %asi +#else +#define STORE_INIT(src,addr) stx src, [addr + 0x00] +#endif +#endif + +#ifndef FUNC_NAME +#define FUNC_NAME NGmemcpy +#endif + +#ifndef PREAMBLE +#define PREAMBLE +#endif + +#ifndef XCC +#define XCC xcc +#endif + + .register %g2,#scratch + .register %g3,#scratch + + .text +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +__restore_asi: + ret + wr %g0, ASI_AIUS, %asi + restore +ENTRY(NG_ret_i2_plus_i4_plus_1) + ba,pt %xcc, __restore_asi + add %i2, %i5, %i0 +ENDPROC(NG_ret_i2_plus_i4_plus_1) +ENTRY(NG_ret_i2_plus_g1) + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1) +ENTRY(NG_ret_i2_plus_g1_minus_8) + sub %g1, 8, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_8) +ENTRY(NG_ret_i2_plus_g1_minus_16) + sub %g1, 16, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_16) +ENTRY(NG_ret_i2_plus_g1_minus_24) + sub %g1, 24, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_24) +ENTRY(NG_ret_i2_plus_g1_minus_32) + sub %g1, 32, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_32) +ENTRY(NG_ret_i2_plus_g1_minus_40) + sub %g1, 40, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_40) +ENTRY(NG_ret_i2_plus_g1_minus_48) + sub %g1, 48, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_48) +ENTRY(NG_ret_i2_plus_g1_minus_56) + sub %g1, 56, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_56) +ENTRY(NG_ret_i2_plus_i4) + ba,pt %xcc, __restore_asi + add %i2, %i4, %i0 +ENDPROC(NG_ret_i2_plus_i4) +ENTRY(NG_ret_i2_plus_i4_minus_8) + sub %i4, 8, %i4 + ba,pt %xcc, __restore_asi + add %i2, %i4, %i0 +ENDPROC(NG_ret_i2_plus_i4_minus_8) +ENTRY(NG_ret_i2_plus_8) + ba,pt %xcc, __restore_asi + add %i2, 8, %i0 +ENDPROC(NG_ret_i2_plus_8) +ENTRY(NG_ret_i2_plus_4) + ba,pt %xcc, __restore_asi + add %i2, 4, %i0 +ENDPROC(NG_ret_i2_plus_4) +ENTRY(NG_ret_i2_plus_1) + ba,pt %xcc, __restore_asi + add %i2, 1, %i0 +ENDPROC(NG_ret_i2_plus_1) +ENTRY(NG_ret_i2_plus_g1_plus_1) + add %g1, 1, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_plus_1) +ENTRY(NG_ret_i2) + ba,pt %xcc, __restore_asi + mov %i2, %i0 +ENDPROC(NG_ret_i2) +ENTRY(NG_ret_i2_and_7_plus_i4) + and %i2, 7, %i2 + ba,pt %xcc, __restore_asi + add %i2, %i4, %i0 +ENDPROC(NG_ret_i2_and_7_plus_i4) +#endif + + .align 64 + + .globl FUNC_NAME + .type FUNC_NAME,#function +FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ + PREAMBLE + save %sp, -SAVE_AMOUNT, %sp + srlx %i2, 31, %g2 + cmp %g2, 0 + tne %xcc, 5 + mov %i0, %o0 + cmp %i2, 0 + be,pn %XCC, 85f + or %o0, %i1, %i3 + cmp %i2, 16 + blu,a,pn %XCC, 80f + or %i3, %i2, %i3 + + /* 2 blocks (128 bytes) is the minimum we can do the block + * copy with. We need to ensure that we'll iterate at least + * once in the block copy loop. At worst we'll need to align + * the destination to a 64-byte boundary which can chew up + * to (64 - 1) bytes from the length before we perform the + * block copy loop. + */ + cmp %i2, (2 * 64) + blu,pt %XCC, 70f + andcc %i3, 0x7, %g0 + + /* %o0: dst + * %i1: src + * %i2: len (known to be >= 128) + * + * The block copy loops will use %i4/%i5,%g2/%g3 as + * temporaries while copying the data. + */ + + LOAD(prefetch, %i1, #one_read) + wr %g0, STORE_ASI, %asi + + /* Align destination on 64-byte boundary. */ + andcc %o0, (64 - 1), %i4 + be,pt %XCC, 2f + sub %i4, 64, %i4 + sub %g0, %i4, %i4 ! bytes to align dst + sub %i2, %i4, %i2 +1: subcc %i4, 1, %i4 + EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_i4_plus_1) + EX_ST(STORE(stb, %g1, %o0), NG_ret_i2_plus_i4_plus_1) + add %i1, 1, %i1 + bne,pt %XCC, 1b + add %o0, 1, %o0 + + /* If the source is on a 16-byte boundary we can do + * the direct block copy loop. If it is 8-byte aligned + * we can do the 16-byte loads offset by -8 bytes and the + * init stores offset by one register. + * + * If the source is not even 8-byte aligned, we need to do + * shifting and masking (basically integer faligndata). + * + * The careful bit with init stores is that if we store + * to any part of the cache line we have to store the whole + * cacheline else we can end up with corrupt L2 cache line + * contents. Since the loop works on 64-bytes of 64-byte + * aligned store data at a time, this is easy to ensure. + */ +2: + andcc %i1, (16 - 1), %i4 + andn %i2, (64 - 1), %g1 ! block copy loop iterator + be,pt %XCC, 50f + sub %i2, %g1, %i2 ! final sub-block copy bytes + + cmp %i4, 8 + be,pt %XCC, 10f + sub %i1, %i4, %i1 + + /* Neither 8-byte nor 16-byte aligned, shift and mask. */ + and %i4, 0x7, GLOBAL_SPARE + sll GLOBAL_SPARE, 3, GLOBAL_SPARE + mov 64, %i5 + EX_LD(LOAD_TWIN(%i1, %g2, %g3), NG_ret_i2_plus_g1) + sub %i5, GLOBAL_SPARE, %i5 + mov 16, %o4 + mov 32, %o5 + mov 48, %o7 + mov 64, %i3 + + bg,pn %XCC, 9f + nop + +#define MIX_THREE_WORDS(WORD1, WORD2, WORD3, PRE_SHIFT, POST_SHIFT, TMP) \ + sllx WORD1, POST_SHIFT, WORD1; \ + srlx WORD2, PRE_SHIFT, TMP; \ + sllx WORD2, POST_SHIFT, WORD2; \ + or WORD1, TMP, WORD1; \ + srlx WORD3, PRE_SHIFT, TMP; \ + or WORD2, TMP, WORD2; + +8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1) + MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1) + LOAD(prefetch, %i1 + %i3, #one_read) + + EX_ST(STORE_INIT(%g2, %o0 + 0x00), NG_ret_i2_plus_g1) + EX_ST(STORE_INIT(%g3, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8) + + EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16) + MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1) + + EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16) + EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24) + + EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32) + MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1) + + EX_ST(STORE_INIT(%g2, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32) + EX_ST(STORE_INIT(%g3, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40) + + EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48) + add %i1, 64, %i1 + MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1) + + EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48) + EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56) + + subcc %g1, 64, %g1 + bne,pt %XCC, 8b + add %o0, 64, %o0 + + ba,pt %XCC, 60f + add %i1, %i4, %i1 + +9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1) + MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1) + LOAD(prefetch, %i1 + %i3, #one_read) + + EX_ST(STORE_INIT(%g3, %o0 + 0x00), NG_ret_i2_plus_g1) + EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8) + + EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16) + MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1) + + EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16) + EX_ST(STORE_INIT(%g2, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24) + + EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32) + MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1) + + EX_ST(STORE_INIT(%g3, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32) + EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40) + + EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48) + add %i1, 64, %i1 + MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1) + + EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48) + EX_ST(STORE_INIT(%g2, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56) + + subcc %g1, 64, %g1 + bne,pt %XCC, 9b + add %o0, 64, %o0 + + ba,pt %XCC, 60f + add %i1, %i4, %i1 + +10: /* Destination is 64-byte aligned, source was only 8-byte + * aligned but it has been subtracted by 8 and we perform + * one twin load ahead, then add 8 back into source when + * we finish the loop. + */ + EX_LD(LOAD_TWIN(%i1, %o4, %o5), NG_ret_i2_plus_g1) + mov 16, %o7 + mov 32, %g2 + mov 48, %g3 + mov 64, %o1 +1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1) + LOAD(prefetch, %i1 + %o1, #one_read) + EX_ST(STORE_INIT(%o5, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line + EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8) + EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16) + EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16) + EX_ST(STORE_INIT(%o4, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24) + EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32) + EX_ST(STORE_INIT(%o5, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32) + EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40) + EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5), NG_ret_i2_plus_g1_minus_48) + add %i1, 64, %i1 + EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48) + EX_ST(STORE_INIT(%o4, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56) + subcc %g1, 64, %g1 + bne,pt %XCC, 1b + add %o0, 64, %o0 + + ba,pt %XCC, 60f + add %i1, 0x8, %i1 + +50: /* Destination is 64-byte aligned, and source is 16-byte + * aligned. + */ + mov 16, %o7 + mov 32, %g2 + mov 48, %g3 + mov 64, %o1 +1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5), NG_ret_i2_plus_g1) + EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1) + LOAD(prefetch, %i1 + %o1, #one_read) + EX_ST(STORE_INIT(%o4, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line + EX_ST(STORE_INIT(%o5, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8) + EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16) + EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16) + EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24) + EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32) + add %i1, 64, %i1 + EX_ST(STORE_INIT(%o4, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32) + EX_ST(STORE_INIT(%o5, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40) + EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48) + EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56) + subcc %g1, 64, %g1 + bne,pt %XCC, 1b + add %o0, 64, %o0 + /* fall through */ + +60: + membar #Sync + + /* %i2 contains any final bytes still needed to be copied + * over. If anything is left, we copy it one byte at a time. + */ + RESTORE_ASI(%i3) + brz,pt %i2, 85f + sub %o0, %i1, %i3 + ba,a,pt %XCC, 90f + nop + + .align 64 +70: /* 16 < len <= 64 */ + bne,pn %XCC, 75f + sub %o0, %i1, %i3 + +72: + andn %i2, 0xf, %i4 + and %i2, 0xf, %i2 +1: subcc %i4, 0x10, %i4 + EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_i4) + add %i1, 0x08, %i1 + EX_LD(LOAD(ldx, %i1, %g1), NG_ret_i2_plus_i4) + sub %i1, 0x08, %i1 + EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_i4) + add %i1, 0x8, %i1 + EX_ST(STORE(stx, %g1, %i1 + %i3), NG_ret_i2_plus_i4_minus_8) + bgu,pt %XCC, 1b + add %i1, 0x8, %i1 +73: andcc %i2, 0x8, %g0 + be,pt %XCC, 1f + nop + sub %i2, 0x8, %i2 + EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_8) + EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_8) + add %i1, 0x8, %i1 +1: andcc %i2, 0x4, %g0 + be,pt %XCC, 1f + nop + sub %i2, 0x4, %i2 + EX_LD(LOAD(lduw, %i1, %i5), NG_ret_i2_plus_4) + EX_ST(STORE(stw, %i5, %i1 + %i3), NG_ret_i2_plus_4) + add %i1, 0x4, %i1 +1: cmp %i2, 0 + be,pt %XCC, 85f + nop + ba,pt %xcc, 90f + nop + +75: + andcc %o0, 0x7, %g1 + sub %g1, 0x8, %g1 + be,pn %icc, 2f + sub %g0, %g1, %g1 + sub %i2, %g1, %i2 + +1: subcc %g1, 1, %g1 + EX_LD(LOAD(ldub, %i1, %i5), NG_ret_i2_plus_g1_plus_1) + EX_ST(STORE(stb, %i5, %i1 + %i3), NG_ret_i2_plus_g1_plus_1) + bgu,pt %icc, 1b + add %i1, 1, %i1 + +2: add %i1, %i3, %o0 + andcc %i1, 0x7, %g1 + bne,pt %icc, 8f + sll %g1, 3, %g1 + + cmp %i2, 16 + bgeu,pt %icc, 72b + nop + ba,a,pt %xcc, 73b + +8: mov 64, %i3 + andn %i1, 0x7, %i1 + EX_LD(LOAD(ldx, %i1, %g2), NG_ret_i2) + sub %i3, %g1, %i3 + andn %i2, 0x7, %i4 + sllx %g2, %g1, %g2 +1: add %i1, 0x8, %i1 + EX_LD(LOAD(ldx, %i1, %g3), NG_ret_i2_and_7_plus_i4) + subcc %i4, 0x8, %i4 + srlx %g3, %i3, %i5 + or %i5, %g2, %i5 + EX_ST(STORE(stx, %i5, %o0), NG_ret_i2_and_7_plus_i4) + add %o0, 0x8, %o0 + bgu,pt %icc, 1b + sllx %g3, %g1, %g2 + + srl %g1, 3, %g1 + andcc %i2, 0x7, %i2 + be,pn %icc, 85f + add %i1, %g1, %i1 + ba,pt %xcc, 90f + sub %o0, %i1, %i3 + + .align 64 +80: /* 0 < len <= 16 */ + andcc %i3, 0x3, %g0 + bne,pn %XCC, 90f + sub %o0, %i1, %i3 + +1: + subcc %i2, 4, %i2 + EX_LD(LOAD(lduw, %i1, %g1), NG_ret_i2_plus_4) + EX_ST(STORE(stw, %g1, %i1 + %i3), NG_ret_i2_plus_4) + bgu,pt %XCC, 1b + add %i1, 4, %i1 + +85: ret + restore EX_RETVAL(%i0), %g0, %o0 + + .align 32 +90: + subcc %i2, 1, %i2 + EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_1) + EX_ST(STORE(stb, %g1, %i1 + %i3), NG_ret_i2_plus_1) + bgu,pt %XCC, 90b + add %i1, 1, %i1 + ret + restore EX_RETVAL(%i0), %g0, %o0 + + .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc/lib/NGpage.S b/arch/sparc/lib/NGpage.S new file mode 100644 index 0000000000..88fec78180 --- /dev/null +++ b/arch/sparc/lib/NGpage.S @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* NGpage.S: Niagara optimize clear and copy page. + * + * Copyright (C) 2006 (davem@davemloft.net) + */ + +#include <asm/asi.h> +#include <asm/page.h> + + .text + .align 32 + + /* This is heavily simplified from the sun4u variants + * because Niagara does not have any D-cache aliasing issues + * and also we don't need to use the FPU in order to implement + * an optimal page copy/clear. + */ + +NGcopy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ + save %sp, -192, %sp + rd %asi, %g3 + wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi + set PAGE_SIZE, %g7 + prefetch [%i1 + 0x00], #one_read + prefetch [%i1 + 0x40], #one_read + +1: prefetch [%i1 + 0x80], #one_read + prefetch [%i1 + 0xc0], #one_read + ldda [%i1 + 0x00] %asi, %o2 + ldda [%i1 + 0x10] %asi, %o4 + ldda [%i1 + 0x20] %asi, %l2 + ldda [%i1 + 0x30] %asi, %l4 + stxa %o2, [%i0 + 0x00] %asi + stxa %o3, [%i0 + 0x08] %asi + stxa %o4, [%i0 + 0x10] %asi + stxa %o5, [%i0 + 0x18] %asi + stxa %l2, [%i0 + 0x20] %asi + stxa %l3, [%i0 + 0x28] %asi + stxa %l4, [%i0 + 0x30] %asi + stxa %l5, [%i0 + 0x38] %asi + ldda [%i1 + 0x40] %asi, %o2 + ldda [%i1 + 0x50] %asi, %o4 + ldda [%i1 + 0x60] %asi, %l2 + ldda [%i1 + 0x70] %asi, %l4 + stxa %o2, [%i0 + 0x40] %asi + stxa %o3, [%i0 + 0x48] %asi + stxa %o4, [%i0 + 0x50] %asi + stxa %o5, [%i0 + 0x58] %asi + stxa %l2, [%i0 + 0x60] %asi + stxa %l3, [%i0 + 0x68] %asi + stxa %l4, [%i0 + 0x70] %asi + stxa %l5, [%i0 + 0x78] %asi + add %i1, 128, %i1 + subcc %g7, 128, %g7 + bne,pt %xcc, 1b + add %i0, 128, %i0 + wr %g3, 0x0, %asi + membar #Sync + ret + restore + + .align 32 + .globl NGclear_page + .globl NGclear_user_page +NGclear_page: /* %o0=dest */ +NGclear_user_page: /* %o0=dest, %o1=vaddr */ + rd %asi, %g3 + wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi + set PAGE_SIZE, %g7 + +1: stxa %g0, [%o0 + 0x00] %asi + stxa %g0, [%o0 + 0x08] %asi + stxa %g0, [%o0 + 0x10] %asi + stxa %g0, [%o0 + 0x18] %asi + stxa %g0, [%o0 + 0x20] %asi + stxa %g0, [%o0 + 0x28] %asi + stxa %g0, [%o0 + 0x30] %asi + stxa %g0, [%o0 + 0x38] %asi + stxa %g0, [%o0 + 0x40] %asi + stxa %g0, [%o0 + 0x48] %asi + stxa %g0, [%o0 + 0x50] %asi + stxa %g0, [%o0 + 0x58] %asi + stxa %g0, [%o0 + 0x60] %asi + stxa %g0, [%o0 + 0x68] %asi + stxa %g0, [%o0 + 0x70] %asi + stxa %g0, [%o0 + 0x78] %asi + stxa %g0, [%o0 + 0x80] %asi + stxa %g0, [%o0 + 0x88] %asi + stxa %g0, [%o0 + 0x90] %asi + stxa %g0, [%o0 + 0x98] %asi + stxa %g0, [%o0 + 0xa0] %asi + stxa %g0, [%o0 + 0xa8] %asi + stxa %g0, [%o0 + 0xb0] %asi + stxa %g0, [%o0 + 0xb8] %asi + stxa %g0, [%o0 + 0xc0] %asi + stxa %g0, [%o0 + 0xc8] %asi + stxa %g0, [%o0 + 0xd0] %asi + stxa %g0, [%o0 + 0xd8] %asi + stxa %g0, [%o0 + 0xe0] %asi + stxa %g0, [%o0 + 0xe8] %asi + stxa %g0, [%o0 + 0xf0] %asi + stxa %g0, [%o0 + 0xf8] %asi + subcc %g7, 256, %g7 + bne,pt %xcc, 1b + add %o0, 256, %o0 + wr %g3, 0x0, %asi + membar #Sync + retl + nop + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define NG_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + .globl niagara_patch_pageops + .type niagara_patch_pageops,#function +niagara_patch_pageops: + NG_DO_PATCH(copy_user_page, NGcopy_user_page) + NG_DO_PATCH(_clear_page, NGclear_page) + NG_DO_PATCH(clear_user_page, NGclear_user_page) + retl + nop + .size niagara_patch_pageops,.-niagara_patch_pageops diff --git a/arch/sparc/lib/NGpatch.S b/arch/sparc/lib/NGpatch.S new file mode 100644 index 0000000000..e9f843f106 --- /dev/null +++ b/arch/sparc/lib/NGpatch.S @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* NGpatch.S: Patch Ultra-I routines with Niagara variant. + * + * Copyright (C) 2006 David S. Miller <davem@davemloft.net> + */ + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define NG_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + .globl niagara_patch_copyops + .type niagara_patch_copyops,#function +niagara_patch_copyops: + NG_DO_PATCH(memcpy, NGmemcpy) + NG_DO_PATCH(raw_copy_from_user, NGcopy_from_user) + NG_DO_PATCH(raw_copy_to_user, NGcopy_to_user) + retl + nop + .size niagara_patch_copyops,.-niagara_patch_copyops diff --git a/arch/sparc/lib/PeeCeeI.c b/arch/sparc/lib/PeeCeeI.c new file mode 100644 index 0000000000..cde4c9a51b --- /dev/null +++ b/arch/sparc/lib/PeeCeeI.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PeeCeeI.c: The emerging standard... + * + * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) + */ + +#include <linux/module.h> + +#include <asm/io.h> +#include <asm/byteorder.h> + +void outsb(unsigned long __addr, const void *src, unsigned long count) +{ + void __iomem *addr = (void __iomem *) __addr; + const u8 *p = src; + + while (count--) + __raw_writeb(*p++, addr); +} +EXPORT_SYMBOL(outsb); + +void outsw(unsigned long __addr, const void *src, unsigned long count) +{ + void __iomem *addr = (void __iomem *) __addr; + + while (count--) { + __raw_writew(*(u16 *)src, addr); + src += sizeof(u16); + } +} +EXPORT_SYMBOL(outsw); + +void outsl(unsigned long __addr, const void *src, unsigned long count) +{ + void __iomem *addr = (void __iomem *) __addr; + u32 l, l2; + + if (!count) + return; + + switch (((unsigned long)src) & 0x3) { + case 0x0: + /* src is naturally aligned */ + while (count--) { + __raw_writel(*(u32 *)src, addr); + src += sizeof(u32); + } + break; + case 0x2: + /* 2-byte alignment */ + while (count--) { + l = (*(u16 *)src) << 16; + l |= *(u16 *)(src + sizeof(u16)); + __raw_writel(l, addr); + src += sizeof(u32); + } + break; + case 0x1: + /* Hold three bytes in l each time, grab a byte from l2 */ + l = (*(u8 *)src) << 24; + l |= (*(u16 *)(src + sizeof(u8))) << 8; + src += sizeof(u8) + sizeof(u16); + while (count--) { + l2 = *(u32 *)src; + l |= (l2 >> 24); + __raw_writel(l, addr); + l = l2 << 8; + src += sizeof(u32); + } + break; + case 0x3: + /* Hold a byte in l each time, grab 3 bytes from l2 */ + l = (*(u8 *)src) << 24; + src += sizeof(u8); + while (count--) { + l2 = *(u32 *)src; + l |= (l2 >> 8); + __raw_writel(l, addr); + l = l2 << 24; + src += sizeof(u32); + } + break; + } +} +EXPORT_SYMBOL(outsl); + +void insb(unsigned long __addr, void *dst, unsigned long count) +{ + void __iomem *addr = (void __iomem *) __addr; + + if (count) { + u32 *pi; + u8 *pb = dst; + + while ((((unsigned long)pb) & 0x3) && count--) + *pb++ = __raw_readb(addr); + pi = (u32 *)pb; + while (count >= 4) { + u32 w; + + w = (__raw_readb(addr) << 24); + w |= (__raw_readb(addr) << 16); + w |= (__raw_readb(addr) << 8); + w |= (__raw_readb(addr) << 0); + *pi++ = w; + count -= 4; + } + pb = (u8 *)pi; + while (count--) + *pb++ = __raw_readb(addr); + } +} +EXPORT_SYMBOL(insb); + +void insw(unsigned long __addr, void *dst, unsigned long count) +{ + void __iomem *addr = (void __iomem *) __addr; + + if (count) { + u16 *ps = dst; + u32 *pi; + + if (((unsigned long)ps) & 0x2) { + *ps++ = __raw_readw(addr); + count--; + } + pi = (u32 *)ps; + while (count >= 2) { + u32 w; + + w = __raw_readw(addr) << 16; + w |= __raw_readw(addr) << 0; + *pi++ = w; + count -= 2; + } + ps = (u16 *)pi; + if (count) + *ps = __raw_readw(addr); + } +} +EXPORT_SYMBOL(insw); + +void insl(unsigned long __addr, void *dst, unsigned long count) +{ + void __iomem *addr = (void __iomem *) __addr; + + if (count) { + if ((((unsigned long)dst) & 0x3) == 0) { + u32 *pi = dst; + while (count--) + *pi++ = __raw_readl(addr); + } else { + u32 l = 0, l2, *pi; + u16 *ps; + u8 *pb; + + switch (((unsigned long)dst) & 3) { + case 0x2: + ps = dst; + count -= 1; + l = __raw_readl(addr); + *ps++ = l; + pi = (u32 *)ps; + while (count--) { + l2 = __raw_readl(addr); + *pi++ = (l << 16) | (l2 >> 16); + l = l2; + } + ps = (u16 *)pi; + *ps = l; + break; + + case 0x1: + pb = dst; + count -= 1; + l = __raw_readl(addr); + *pb++ = l >> 24; + ps = (u16 *)pb; + *ps++ = ((l >> 8) & 0xffff); + pi = (u32 *)ps; + while (count--) { + l2 = __raw_readl(addr); + *pi++ = (l << 24) | (l2 >> 8); + l = l2; + } + pb = (u8 *)pi; + *pb = l; + break; + + case 0x3: + pb = (u8 *)dst; + count -= 1; + l = __raw_readl(addr); + *pb++ = l >> 24; + pi = (u32 *)pb; + while (count--) { + l2 = __raw_readl(addr); + *pi++ = (l << 8) | (l2 >> 24); + l = l2; + } + ps = (u16 *)pi; + *ps++ = ((l >> 8) & 0xffff); + pb = (u8 *)ps; + *pb = l; + break; + } + } + } +} +EXPORT_SYMBOL(insl); + diff --git a/arch/sparc/lib/U1copy_from_user.S b/arch/sparc/lib/U1copy_from_user.S new file mode 100644 index 0000000000..bf08d1c788 --- /dev/null +++ b/arch/sparc/lib/U1copy_from_user.S @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* U1copy_from_user.S: UltraSparc-I/II/IIi/IIe optimized copy from userspace. + * + * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) + */ + +#define EX_LD(x,y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#define EX_LD_FP(x,y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#define FUNC_NAME raw_copy_from_user +#define LOAD(type,addr,dest) type##a [addr] %asi, dest +#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_AIUS, dest +#define EX_RETVAL(x) 0 + + /* Writing to %asi is _expensive_ so we hardcode it. + * Reading %asi to check for KERNEL_DS is comparatively + * cheap. + */ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, raw_copy_in_user; \ + nop; \ + +#include "U1memcpy.S" diff --git a/arch/sparc/lib/U1copy_to_user.S b/arch/sparc/lib/U1copy_to_user.S new file mode 100644 index 0000000000..15169851e7 --- /dev/null +++ b/arch/sparc/lib/U1copy_to_user.S @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* U1copy_to_user.S: UltraSparc-I/II/IIi/IIe optimized copy to userspace. + * + * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) + */ + +#define EX_ST(x,y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#define EX_ST_FP(x,y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#define FUNC_NAME raw_copy_to_user +#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS +#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS +#define EX_RETVAL(x) 0 + + /* Writing to %asi is _expensive_ so we hardcode it. + * Reading %asi to check for KERNEL_DS is comparatively + * cheap. + */ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, raw_copy_in_user; \ + nop; \ + +#include "U1memcpy.S" diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S new file mode 100644 index 0000000000..635398ec75 --- /dev/null +++ b/arch/sparc/lib/U1memcpy.S @@ -0,0 +1,685 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* U1memcpy.S: UltraSPARC-I/II/IIi/IIe optimized memcpy. + * + * Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com) + * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz) + */ + +#ifdef __KERNEL__ +#include <linux/export.h> +#include <linux/linkage.h> +#include <asm/visasm.h> +#include <asm/asi.h> +#define GLOBAL_SPARE g7 +#else +#define GLOBAL_SPARE g5 +#define ASI_BLK_P 0xf0 +#define FPRS_FEF 0x04 +#ifdef MEMCPY_DEBUG +#define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \ + clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0; +#define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs +#else +#define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs +#define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs +#endif +#endif + +#ifndef EX_LD +#define EX_LD(x,y) x +#endif +#ifndef EX_LD_FP +#define EX_LD_FP(x,y) x +#endif + +#ifndef EX_ST +#define EX_ST(x,y) x +#endif +#ifndef EX_ST_FP +#define EX_ST_FP(x,y) x +#endif + +#ifndef LOAD +#define LOAD(type,addr,dest) type [addr], dest +#endif + +#ifndef LOAD_BLK +#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_P, dest +#endif + +#ifndef STORE +#define STORE(type,src,addr) type src, [addr] +#endif + +#ifndef STORE_BLK +#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P +#endif + +#ifndef FUNC_NAME +#define FUNC_NAME memcpy +#endif + +#ifndef PREAMBLE +#define PREAMBLE +#endif + +#ifndef XCC +#define XCC xcc +#endif + +#define FREG_FROB(f1, f2, f3, f4, f5, f6, f7, f8, f9) \ + faligndata %f1, %f2, %f48; \ + faligndata %f2, %f3, %f50; \ + faligndata %f3, %f4, %f52; \ + faligndata %f4, %f5, %f54; \ + faligndata %f5, %f6, %f56; \ + faligndata %f6, %f7, %f58; \ + faligndata %f7, %f8, %f60; \ + faligndata %f8, %f9, %f62; + +#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, jmptgt) \ + EX_LD_FP(LOAD_BLK(%src, %fdest), U1_gs_80_fp); \ + EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \ + add %src, 0x40, %src; \ + subcc %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE; \ + be,pn %xcc, jmptgt; \ + add %dest, 0x40, %dest; \ + +#define LOOP_CHUNK1(src, dest, branch_dest) \ + MAIN_LOOP_CHUNK(src, dest, f0, f48, branch_dest) +#define LOOP_CHUNK2(src, dest, branch_dest) \ + MAIN_LOOP_CHUNK(src, dest, f16, f48, branch_dest) +#define LOOP_CHUNK3(src, dest, branch_dest) \ + MAIN_LOOP_CHUNK(src, dest, f32, f48, branch_dest) + +#define DO_SYNC membar #Sync; +#define STORE_SYNC(dest, fsrc) \ + EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \ + add %dest, 0x40, %dest; \ + DO_SYNC + +#define STORE_JUMP(dest, fsrc, target) \ + EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_40_fp); \ + add %dest, 0x40, %dest; \ + ba,pt %xcc, target; \ + nop; + +#define FINISH_VISCHUNK(dest, f0, f1) \ + subcc %g3, 8, %g3; \ + bl,pn %xcc, 95f; \ + faligndata %f0, %f1, %f48; \ + EX_ST_FP(STORE(std, %f48, %dest), U1_g3_8_fp); \ + add %dest, 8, %dest; + +#define UNEVEN_VISCHUNK_LAST(dest, f0, f1) \ + subcc %g3, 8, %g3; \ + bl,pn %xcc, 95f; \ + fsrc2 %f0, %f1; + +#define UNEVEN_VISCHUNK(dest, f0, f1) \ + UNEVEN_VISCHUNK_LAST(dest, f0, f1) \ + ba,a,pt %xcc, 93f; + + .register %g2,#scratch + .register %g3,#scratch + + .text +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +ENTRY(U1_g1_1_fp) + VISExitHalf + add %g1, 1, %g1 + add %g1, %g2, %g1 + retl + add %g1, %o2, %o0 +ENDPROC(U1_g1_1_fp) +ENTRY(U1_g2_0_fp) + VISExitHalf + retl + add %g2, %o2, %o0 +ENDPROC(U1_g2_0_fp) +ENTRY(U1_g2_8_fp) + VISExitHalf + add %g2, 8, %g2 + retl + add %g2, %o2, %o0 +ENDPROC(U1_g2_8_fp) +ENTRY(U1_gs_0_fp) + VISExitHalf + add %GLOBAL_SPARE, %g3, %o0 + retl + add %o0, %o2, %o0 +ENDPROC(U1_gs_0_fp) +ENTRY(U1_gs_80_fp) + VISExitHalf + add %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE + add %GLOBAL_SPARE, %g3, %o0 + retl + add %o0, %o2, %o0 +ENDPROC(U1_gs_80_fp) +ENTRY(U1_gs_40_fp) + VISExitHalf + add %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE + add %GLOBAL_SPARE, %g3, %o0 + retl + add %o0, %o2, %o0 +ENDPROC(U1_gs_40_fp) +ENTRY(U1_g3_0_fp) + VISExitHalf + retl + add %g3, %o2, %o0 +ENDPROC(U1_g3_0_fp) +ENTRY(U1_g3_8_fp) + VISExitHalf + add %g3, 8, %g3 + retl + add %g3, %o2, %o0 +ENDPROC(U1_g3_8_fp) +ENTRY(U1_o2_0_fp) + VISExitHalf + retl + mov %o2, %o0 +ENDPROC(U1_o2_0_fp) +ENTRY(U1_o2_1_fp) + VISExitHalf + retl + add %o2, 1, %o0 +ENDPROC(U1_o2_1_fp) +ENTRY(U1_gs_0) + VISExitHalf + retl + add %GLOBAL_SPARE, %o2, %o0 +ENDPROC(U1_gs_0) +ENTRY(U1_gs_8) + VISExitHalf + add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE + retl + add %GLOBAL_SPARE, 0x8, %o0 +ENDPROC(U1_gs_8) +ENTRY(U1_gs_10) + VISExitHalf + add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE + retl + add %GLOBAL_SPARE, 0x10, %o0 +ENDPROC(U1_gs_10) +ENTRY(U1_o2_0) + retl + mov %o2, %o0 +ENDPROC(U1_o2_0) +ENTRY(U1_o2_8) + retl + add %o2, 8, %o0 +ENDPROC(U1_o2_8) +ENTRY(U1_o2_4) + retl + add %o2, 4, %o0 +ENDPROC(U1_o2_4) +ENTRY(U1_o2_1) + retl + add %o2, 1, %o0 +ENDPROC(U1_o2_1) +ENTRY(U1_g1_0) + retl + add %g1, %o2, %o0 +ENDPROC(U1_g1_0) +ENTRY(U1_g1_1) + add %g1, 1, %g1 + retl + add %g1, %o2, %o0 +ENDPROC(U1_g1_1) +ENTRY(U1_gs_0_o2_adj) + and %o2, 7, %o2 + retl + add %GLOBAL_SPARE, %o2, %o0 +ENDPROC(U1_gs_0_o2_adj) +ENTRY(U1_gs_8_o2_adj) + and %o2, 7, %o2 + add %GLOBAL_SPARE, 8, %GLOBAL_SPARE + retl + add %GLOBAL_SPARE, %o2, %o0 +ENDPROC(U1_gs_8_o2_adj) +#endif + + .align 64 + + .globl FUNC_NAME + .type FUNC_NAME,#function +FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ + srlx %o2, 31, %g2 + cmp %g2, 0 + tne %xcc, 5 + PREAMBLE + mov %o0, %o4 + cmp %o2, 0 + be,pn %XCC, 85f + or %o0, %o1, %o3 + cmp %o2, 16 + blu,a,pn %XCC, 80f + or %o3, %o2, %o3 + + cmp %o2, (5 * 64) + blu,pt %XCC, 70f + andcc %o3, 0x7, %g0 + + /* Clobbers o5/g1/g2/g3/g7/icc/xcc. */ + VISEntry + + /* Is 'dst' already aligned on an 64-byte boundary? */ + andcc %o0, 0x3f, %g2 + be,pt %XCC, 2f + + /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number + * of bytes to copy to make 'dst' 64-byte aligned. We pre- + * subtract this from 'len'. + */ + sub %o0, %o1, %GLOBAL_SPARE + sub %g2, 0x40, %g2 + sub %g0, %g2, %g2 + sub %o2, %g2, %o2 + andcc %g2, 0x7, %g1 + be,pt %icc, 2f + and %g2, 0x38, %g2 + +1: subcc %g1, 0x1, %g1 + EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U1_g1_1_fp) + EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE), U1_g1_1_fp) + bgu,pt %XCC, 1b + add %o1, 0x1, %o1 + + add %o1, %GLOBAL_SPARE, %o0 + +2: cmp %g2, 0x0 + and %o1, 0x7, %g1 + be,pt %icc, 3f + alignaddr %o1, %g0, %o1 + + EX_LD_FP(LOAD(ldd, %o1, %f4), U1_g2_0_fp) +1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U1_g2_0_fp) + add %o1, 0x8, %o1 + subcc %g2, 0x8, %g2 + faligndata %f4, %f6, %f0 + EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp) + be,pn %icc, 3f + add %o0, 0x8, %o0 + + EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U1_g2_0_fp) + add %o1, 0x8, %o1 + subcc %g2, 0x8, %g2 + faligndata %f6, %f4, %f0 + EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp) + bne,pt %icc, 1b + add %o0, 0x8, %o0 + + /* Destination is 64-byte aligned. */ +3: + membar #LoadStore | #StoreStore | #StoreLoad + + subcc %o2, 0x40, %GLOBAL_SPARE + add %o1, %g1, %g1 + andncc %GLOBAL_SPARE, (0x40 - 1), %GLOBAL_SPARE + srl %g1, 3, %g2 + sub %o2, %GLOBAL_SPARE, %g3 + andn %o1, (0x40 - 1), %o1 + and %g2, 7, %g2 + andncc %g3, 0x7, %g3 + fsrc2 %f0, %f2 + sub %g3, 0x8, %g3 + sub %o2, %GLOBAL_SPARE, %o2 + + add %g1, %GLOBAL_SPARE, %g1 + subcc %o2, %g3, %o2 + + EX_LD_FP(LOAD_BLK(%o1, %f0), U1_gs_0_fp) + add %o1, 0x40, %o1 + add %g1, %g3, %g1 + EX_LD_FP(LOAD_BLK(%o1, %f16), U1_gs_0_fp) + add %o1, 0x40, %o1 + sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE + EX_LD_FP(LOAD_BLK(%o1, %f32), U1_gs_80_fp) + add %o1, 0x40, %o1 + + /* There are 8 instances of the unrolled loop, + * one for each possible alignment of the + * source buffer. Each loop instance is 452 + * bytes. + */ + sll %g2, 3, %o3 + sub %o3, %g2, %o3 + sllx %o3, 4, %o3 + add %o3, %g2, %o3 + sllx %o3, 2, %g2 +1: rd %pc, %o3 + add %o3, %lo(1f - 1b), %o3 + jmpl %o3 + %g2, %g0 + nop + + .align 64 +1: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) + LOOP_CHUNK1(o1, o0, 1f) + FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) + LOOP_CHUNK2(o1, o0, 2f) + FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) + LOOP_CHUNK3(o1, o0, 3f) + ba,pt %xcc, 1b+4 + faligndata %f0, %f2, %f48 +1: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) + STORE_SYNC(o0, f48) + FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) + STORE_JUMP(o0, f48, 40f) +2: FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) + STORE_SYNC(o0, f48) + FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) + STORE_JUMP(o0, f48, 48f) +3: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) + STORE_SYNC(o0, f48) + FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) + STORE_JUMP(o0, f48, 56f) + +1: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) + LOOP_CHUNK1(o1, o0, 1f) + FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) + LOOP_CHUNK2(o1, o0, 2f) + FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) + LOOP_CHUNK3(o1, o0, 3f) + ba,pt %xcc, 1b+4 + faligndata %f2, %f4, %f48 +1: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) + STORE_SYNC(o0, f48) + FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) + STORE_JUMP(o0, f48, 41f) +2: FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) + STORE_SYNC(o0, f48) + FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) + STORE_JUMP(o0, f48, 49f) +3: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) + STORE_SYNC(o0, f48) + FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) + STORE_JUMP(o0, f48, 57f) + +1: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) + LOOP_CHUNK1(o1, o0, 1f) + FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) + LOOP_CHUNK2(o1, o0, 2f) + FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) + LOOP_CHUNK3(o1, o0, 3f) + ba,pt %xcc, 1b+4 + faligndata %f4, %f6, %f48 +1: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) + STORE_SYNC(o0, f48) + FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) + STORE_JUMP(o0, f48, 42f) +2: FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) + STORE_SYNC(o0, f48) + FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) + STORE_JUMP(o0, f48, 50f) +3: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) + STORE_SYNC(o0, f48) + FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) + STORE_JUMP(o0, f48, 58f) + +1: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) + LOOP_CHUNK1(o1, o0, 1f) + FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) + LOOP_CHUNK2(o1, o0, 2f) + FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) + LOOP_CHUNK3(o1, o0, 3f) + ba,pt %xcc, 1b+4 + faligndata %f6, %f8, %f48 +1: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) + STORE_SYNC(o0, f48) + FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) + STORE_JUMP(o0, f48, 43f) +2: FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) + STORE_SYNC(o0, f48) + FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) + STORE_JUMP(o0, f48, 51f) +3: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) + STORE_SYNC(o0, f48) + FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) + STORE_JUMP(o0, f48, 59f) + +1: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) + LOOP_CHUNK1(o1, o0, 1f) + FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) + LOOP_CHUNK2(o1, o0, 2f) + FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) + LOOP_CHUNK3(o1, o0, 3f) + ba,pt %xcc, 1b+4 + faligndata %f8, %f10, %f48 +1: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) + STORE_SYNC(o0, f48) + FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) + STORE_JUMP(o0, f48, 44f) +2: FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) + STORE_SYNC(o0, f48) + FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) + STORE_JUMP(o0, f48, 52f) +3: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) + STORE_SYNC(o0, f48) + FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) + STORE_JUMP(o0, f48, 60f) + +1: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) + LOOP_CHUNK1(o1, o0, 1f) + FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) + LOOP_CHUNK2(o1, o0, 2f) + FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) + LOOP_CHUNK3(o1, o0, 3f) + ba,pt %xcc, 1b+4 + faligndata %f10, %f12, %f48 +1: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) + STORE_SYNC(o0, f48) + FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) + STORE_JUMP(o0, f48, 45f) +2: FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) + STORE_SYNC(o0, f48) + FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) + STORE_JUMP(o0, f48, 53f) +3: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) + STORE_SYNC(o0, f48) + FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) + STORE_JUMP(o0, f48, 61f) + +1: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) + LOOP_CHUNK1(o1, o0, 1f) + FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) + LOOP_CHUNK2(o1, o0, 2f) + FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) + LOOP_CHUNK3(o1, o0, 3f) + ba,pt %xcc, 1b+4 + faligndata %f12, %f14, %f48 +1: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) + STORE_SYNC(o0, f48) + FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) + STORE_JUMP(o0, f48, 46f) +2: FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) + STORE_SYNC(o0, f48) + FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) + STORE_JUMP(o0, f48, 54f) +3: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) + STORE_SYNC(o0, f48) + FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) + STORE_JUMP(o0, f48, 62f) + +1: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) + LOOP_CHUNK1(o1, o0, 1f) + FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) + LOOP_CHUNK2(o1, o0, 2f) + FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) + LOOP_CHUNK3(o1, o0, 3f) + ba,pt %xcc, 1b+4 + faligndata %f14, %f16, %f48 +1: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) + STORE_SYNC(o0, f48) + FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) + STORE_JUMP(o0, f48, 47f) +2: FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) + STORE_SYNC(o0, f48) + FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) + STORE_JUMP(o0, f48, 55f) +3: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) + STORE_SYNC(o0, f48) + FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) + STORE_JUMP(o0, f48, 63f) + +40: FINISH_VISCHUNK(o0, f0, f2) +41: FINISH_VISCHUNK(o0, f2, f4) +42: FINISH_VISCHUNK(o0, f4, f6) +43: FINISH_VISCHUNK(o0, f6, f8) +44: FINISH_VISCHUNK(o0, f8, f10) +45: FINISH_VISCHUNK(o0, f10, f12) +46: FINISH_VISCHUNK(o0, f12, f14) +47: UNEVEN_VISCHUNK(o0, f14, f0) +48: FINISH_VISCHUNK(o0, f16, f18) +49: FINISH_VISCHUNK(o0, f18, f20) +50: FINISH_VISCHUNK(o0, f20, f22) +51: FINISH_VISCHUNK(o0, f22, f24) +52: FINISH_VISCHUNK(o0, f24, f26) +53: FINISH_VISCHUNK(o0, f26, f28) +54: FINISH_VISCHUNK(o0, f28, f30) +55: UNEVEN_VISCHUNK(o0, f30, f0) +56: FINISH_VISCHUNK(o0, f32, f34) +57: FINISH_VISCHUNK(o0, f34, f36) +58: FINISH_VISCHUNK(o0, f36, f38) +59: FINISH_VISCHUNK(o0, f38, f40) +60: FINISH_VISCHUNK(o0, f40, f42) +61: FINISH_VISCHUNK(o0, f42, f44) +62: FINISH_VISCHUNK(o0, f44, f46) +63: UNEVEN_VISCHUNK_LAST(o0, f46, f0) + +93: EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_0_fp) + add %o1, 8, %o1 + subcc %g3, 8, %g3 + faligndata %f0, %f2, %f8 + EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp) + bl,pn %xcc, 95f + add %o0, 8, %o0 + EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_0_fp) + add %o1, 8, %o1 + subcc %g3, 8, %g3 + faligndata %f2, %f0, %f8 + EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp) + bge,pt %xcc, 93b + add %o0, 8, %o0 + +95: brz,pt %o2, 2f + mov %g1, %o1 + +1: EX_LD_FP(LOAD(ldub, %o1, %o3), U1_o2_0_fp) + add %o1, 1, %o1 + subcc %o2, 1, %o2 + EX_ST_FP(STORE(stb, %o3, %o0), U1_o2_1_fp) + bne,pt %xcc, 1b + add %o0, 1, %o0 + +2: membar #StoreLoad | #StoreStore + VISExit + retl + mov EX_RETVAL(%o4), %o0 + + .align 64 +70: /* 16 < len <= (5 * 64) */ + bne,pn %XCC, 75f + sub %o0, %o1, %o3 + +72: andn %o2, 0xf, %GLOBAL_SPARE + and %o2, 0xf, %o2 +1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U1_gs_0) + EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U1_gs_0) + subcc %GLOBAL_SPARE, 0x10, %GLOBAL_SPARE + EX_ST(STORE(stx, %o5, %o1 + %o3), U1_gs_10) + add %o1, 0x8, %o1 + EX_ST(STORE(stx, %g1, %o1 + %o3), U1_gs_8) + bgu,pt %XCC, 1b + add %o1, 0x8, %o1 +73: andcc %o2, 0x8, %g0 + be,pt %XCC, 1f + nop + EX_LD(LOAD(ldx, %o1, %o5), U1_o2_0) + sub %o2, 0x8, %o2 + EX_ST(STORE(stx, %o5, %o1 + %o3), U1_o2_8) + add %o1, 0x8, %o1 +1: andcc %o2, 0x4, %g0 + be,pt %XCC, 1f + nop + EX_LD(LOAD(lduw, %o1, %o5), U1_o2_0) + sub %o2, 0x4, %o2 + EX_ST(STORE(stw, %o5, %o1 + %o3), U1_o2_4) + add %o1, 0x4, %o1 +1: cmp %o2, 0 + be,pt %XCC, 85f + nop + ba,pt %xcc, 90f + nop + +75: andcc %o0, 0x7, %g1 + sub %g1, 0x8, %g1 + be,pn %icc, 2f + sub %g0, %g1, %g1 + sub %o2, %g1, %o2 + +1: EX_LD(LOAD(ldub, %o1, %o5), U1_g1_0) + subcc %g1, 1, %g1 + EX_ST(STORE(stb, %o5, %o1 + %o3), U1_g1_1) + bgu,pt %icc, 1b + add %o1, 1, %o1 + +2: add %o1, %o3, %o0 + andcc %o1, 0x7, %g1 + bne,pt %icc, 8f + sll %g1, 3, %g1 + + cmp %o2, 16 + bgeu,pt %icc, 72b + nop + ba,a,pt %xcc, 73b + +8: mov 64, %o3 + andn %o1, 0x7, %o1 + EX_LD(LOAD(ldx, %o1, %g2), U1_o2_0) + sub %o3, %g1, %o3 + andn %o2, 0x7, %GLOBAL_SPARE + sllx %g2, %g1, %g2 +1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U1_gs_0_o2_adj) + subcc %GLOBAL_SPARE, 0x8, %GLOBAL_SPARE + add %o1, 0x8, %o1 + srlx %g3, %o3, %o5 + or %o5, %g2, %o5 + EX_ST(STORE(stx, %o5, %o0), U1_gs_8_o2_adj) + add %o0, 0x8, %o0 + bgu,pt %icc, 1b + sllx %g3, %g1, %g2 + + srl %g1, 3, %g1 + andcc %o2, 0x7, %o2 + be,pn %icc, 85f + add %o1, %g1, %o1 + ba,pt %xcc, 90f + sub %o0, %o1, %o3 + + .align 64 +80: /* 0 < len <= 16 */ + andcc %o3, 0x3, %g0 + bne,pn %XCC, 90f + sub %o0, %o1, %o3 + +1: EX_LD(LOAD(lduw, %o1, %g1), U1_o2_0) + subcc %o2, 4, %o2 + EX_ST(STORE(stw, %g1, %o1 + %o3), U1_o2_4) + bgu,pt %XCC, 1b + add %o1, 4, %o1 + +85: retl + mov EX_RETVAL(%o4), %o0 + + .align 32 +90: EX_LD(LOAD(ldub, %o1, %g1), U1_o2_0) + subcc %o2, 1, %o2 + EX_ST(STORE(stb, %g1, %o1 + %o3), U1_o2_1) + bgu,pt %XCC, 90b + add %o1, 1, %o1 + retl + mov EX_RETVAL(%o4), %o0 + + .size FUNC_NAME, .-FUNC_NAME +EXPORT_SYMBOL(FUNC_NAME) diff --git a/arch/sparc/lib/U3copy_from_user.S b/arch/sparc/lib/U3copy_from_user.S new file mode 100644 index 0000000000..9c891e9edc --- /dev/null +++ b/arch/sparc/lib/U3copy_from_user.S @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* U3copy_from_user.S: UltraSparc-III optimized copy from userspace. + * + * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) + */ + +#define EX_LD(x,y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#define EX_LD_FP(x,y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y##_fp; \ + .text; \ + .align 4; + +#define FUNC_NAME U3copy_from_user +#define LOAD(type,addr,dest) type##a [addr] %asi, dest +#define EX_RETVAL(x) 0 + +#include "U3memcpy.S" diff --git a/arch/sparc/lib/U3copy_to_user.S b/arch/sparc/lib/U3copy_to_user.S new file mode 100644 index 0000000000..da42460827 --- /dev/null +++ b/arch/sparc/lib/U3copy_to_user.S @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* U3copy_to_user.S: UltraSparc-III optimized copy to userspace. + * + * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) + */ + +#define EX_ST(x,y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y; \ + .text; \ + .align 4; + +#define EX_ST_FP(x,y) \ +98: x; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, y##_fp; \ + .text; \ + .align 4; + +#define FUNC_NAME U3copy_to_user +#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS +#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS +#define EX_RETVAL(x) 0 + + /* Writing to %asi is _expensive_ so we hardcode it. + * Reading %asi to check for KERNEL_DS is comparatively + * cheap. + */ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, raw_copy_in_user; \ + nop; \ + +#include "U3memcpy.S" diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S new file mode 100644 index 0000000000..9248d59c73 --- /dev/null +++ b/arch/sparc/lib/U3memcpy.S @@ -0,0 +1,520 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* U3memcpy.S: UltraSparc-III optimized memcpy. + * + * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) + */ + +#ifdef __KERNEL__ +#include <linux/linkage.h> +#include <asm/visasm.h> +#include <asm/asi.h> +#define GLOBAL_SPARE %g7 +#else +#define ASI_BLK_P 0xf0 +#define FPRS_FEF 0x04 +#ifdef MEMCPY_DEBUG +#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \ + clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0; +#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs +#else +#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs +#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs +#endif +#define GLOBAL_SPARE %g5 +#endif + +#ifndef EX_LD +#define EX_LD(x,y) x +#endif +#ifndef EX_LD_FP +#define EX_LD_FP(x,y) x +#endif + +#ifndef EX_ST +#define EX_ST(x,y) x +#endif +#ifndef EX_ST_FP +#define EX_ST_FP(x,y) x +#endif + +#ifndef LOAD +#define LOAD(type,addr,dest) type [addr], dest +#endif + +#ifndef STORE +#define STORE(type,src,addr) type src, [addr] +#endif + +#ifndef STORE_BLK +#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P +#endif + +#ifndef FUNC_NAME +#define FUNC_NAME U3memcpy +#endif + +#ifndef PREAMBLE +#define PREAMBLE +#endif + +#ifndef XCC +#define XCC xcc +#endif + + .register %g2,#scratch + .register %g3,#scratch + + /* Special/non-trivial issues of this code: + * + * 1) %o5 is preserved from VISEntryHalf to VISExitHalf + * 2) Only low 32 FPU registers are used so that only the + * lower half of the FPU register set is dirtied by this + * code. This is especially important in the kernel. + * 3) This code never prefetches cachelines past the end + * of the source buffer. + */ + + .text +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +__restore_fp: + VISExitHalf + retl + nop +ENTRY(U3_retl_o2_plus_g2_plus_g1_plus_1_fp) + add %g1, 1, %g1 + add %g2, %g1, %g2 + ba,pt %xcc, __restore_fp + add %o2, %g2, %o0 +ENDPROC(U3_retl_o2_plus_g2_plus_g1_plus_1_fp) +ENTRY(U3_retl_o2_plus_g2_fp) + ba,pt %xcc, __restore_fp + add %o2, %g2, %o0 +ENDPROC(U3_retl_o2_plus_g2_fp) +ENTRY(U3_retl_o2_plus_g2_plus_8_fp) + add %g2, 8, %g2 + ba,pt %xcc, __restore_fp + add %o2, %g2, %o0 +ENDPROC(U3_retl_o2_plus_g2_plus_8_fp) +ENTRY(U3_retl_o2) + retl + mov %o2, %o0 +ENDPROC(U3_retl_o2) +ENTRY(U3_retl_o2_plus_1) + retl + add %o2, 1, %o0 +ENDPROC(U3_retl_o2_plus_1) +ENTRY(U3_retl_o2_plus_4) + retl + add %o2, 4, %o0 +ENDPROC(U3_retl_o2_plus_4) +ENTRY(U3_retl_o2_plus_8) + retl + add %o2, 8, %o0 +ENDPROC(U3_retl_o2_plus_8) +ENTRY(U3_retl_o2_plus_g1_plus_1) + add %g1, 1, %g1 + retl + add %o2, %g1, %o0 +ENDPROC(U3_retl_o2_plus_g1_plus_1) +ENTRY(U3_retl_o2_fp) + ba,pt %xcc, __restore_fp + mov %o2, %o0 +ENDPROC(U3_retl_o2_fp) +ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp) + sll %o3, 6, %o3 + add %o3, 0x80, %o3 + ba,pt %xcc, __restore_fp + add %o2, %o3, %o0 +ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp) +ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp) + sll %o3, 6, %o3 + add %o3, 0x40, %o3 + ba,pt %xcc, __restore_fp + add %o2, %o3, %o0 +ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp) +ENTRY(U3_retl_o2_plus_GS_plus_0x10) + add GLOBAL_SPARE, 0x10, GLOBAL_SPARE + retl + add %o2, GLOBAL_SPARE, %o0 +ENDPROC(U3_retl_o2_plus_GS_plus_0x10) +ENTRY(U3_retl_o2_plus_GS_plus_0x08) + add GLOBAL_SPARE, 0x08, GLOBAL_SPARE + retl + add %o2, GLOBAL_SPARE, %o0 +ENDPROC(U3_retl_o2_plus_GS_plus_0x08) +ENTRY(U3_retl_o2_and_7_plus_GS) + and %o2, 7, %o2 + retl + add %o2, GLOBAL_SPARE, %o0 +ENDPROC(U3_retl_o2_and_7_plus_GS) +ENTRY(U3_retl_o2_and_7_plus_GS_plus_8) + add GLOBAL_SPARE, 8, GLOBAL_SPARE + and %o2, 7, %o2 + retl + add %o2, GLOBAL_SPARE, %o0 +ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8) +#endif + + .align 64 + + /* The cheetah's flexible spine, oversized liver, enlarged heart, + * slender muscular body, and claws make it the swiftest hunter + * in Africa and the fastest animal on land. Can reach speeds + * of up to 2.4GB per second. + */ + + .globl FUNC_NAME + .type FUNC_NAME,#function +FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ + srlx %o2, 31, %g2 + cmp %g2, 0 + + /* software trap 5 "Range Check" if dst >= 0x80000000 */ + tne %xcc, 5 + PREAMBLE + mov %o0, %o4 + + /* if len == 0 */ + cmp %o2, 0 + be,pn %XCC, end_return + or %o0, %o1, %o3 + + /* if len < 16 */ + cmp %o2, 16 + blu,a,pn %XCC, less_than_16 + or %o3, %o2, %o3 + + /* if len < 192 */ + cmp %o2, (3 * 64) + blu,pt %XCC, less_than_192 + andcc %o3, 0x7, %g0 + + /* Clobbers o5/g1/g2/g3/g7/icc/xcc. We must preserve + * o5 from here until we hit VISExitHalf. + */ + VISEntryHalf + + /* Is 'dst' already aligned on an 64-byte boundary? */ + andcc %o0, 0x3f, %g2 + be,pt %XCC, 2f + + /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number + * of bytes to copy to make 'dst' 64-byte aligned. We pre- + * subtract this from 'len'. + */ + sub %o0, %o1, GLOBAL_SPARE + sub %g2, 0x40, %g2 + sub %g0, %g2, %g2 + sub %o2, %g2, %o2 + andcc %g2, 0x7, %g1 + be,pt %icc, 2f + and %g2, 0x38, %g2 + +1: subcc %g1, 0x1, %g1 + EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U3_retl_o2_plus_g2_plus_g1_plus_1) + EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE), U3_retl_o2_plus_g2_plus_g1_plus_1) + bgu,pt %XCC, 1b + add %o1, 0x1, %o1 + + add %o1, GLOBAL_SPARE, %o0 + +2: cmp %g2, 0x0 + and %o1, 0x7, %g1 + be,pt %icc, 3f + alignaddr %o1, %g0, %o1 + + EX_LD_FP(LOAD(ldd, %o1, %f4), U3_retl_o2_plus_g2) +1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U3_retl_o2_plus_g2) + add %o1, 0x8, %o1 + subcc %g2, 0x8, %g2 + faligndata %f4, %f6, %f0 + EX_ST_FP(STORE(std, %f0, %o0), U3_retl_o2_plus_g2_plus_8) + be,pn %icc, 3f + add %o0, 0x8, %o0 + + EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U3_retl_o2_plus_g2) + add %o1, 0x8, %o1 + subcc %g2, 0x8, %g2 + faligndata %f6, %f4, %f2 + EX_ST_FP(STORE(std, %f2, %o0), U3_retl_o2_plus_g2_plus_8) + bne,pt %icc, 1b + add %o0, 0x8, %o0 + +3: LOAD(prefetch, %o1 + 0x000, #one_read) + LOAD(prefetch, %o1 + 0x040, #one_read) + andn %o2, (0x40 - 1), GLOBAL_SPARE + LOAD(prefetch, %o1 + 0x080, #one_read) + LOAD(prefetch, %o1 + 0x0c0, #one_read) + LOAD(prefetch, %o1 + 0x100, #one_read) + EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0), U3_retl_o2) + LOAD(prefetch, %o1 + 0x140, #one_read) + EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2) + LOAD(prefetch, %o1 + 0x180, #one_read) + EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2) + LOAD(prefetch, %o1 + 0x1c0, #one_read) + faligndata %f0, %f2, %f16 + EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2) + faligndata %f2, %f4, %f18 + EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2) + faligndata %f4, %f6, %f20 + EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2) + faligndata %f6, %f8, %f22 + + EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2) + faligndata %f8, %f10, %f24 + EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2) + faligndata %f10, %f12, %f26 + EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2) + + subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE + add %o1, 0x40, %o1 + bgu,pt %XCC, 1f + srl GLOBAL_SPARE, 6, %o3 + ba,pt %xcc, 2f + nop + + .align 64 +1: + EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80) + faligndata %f12, %f14, %f28 + EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80) + faligndata %f14, %f0, %f30 + EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80) + EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40) + faligndata %f0, %f2, %f16 + add %o0, 0x40, %o0 + + EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40) + faligndata %f2, %f4, %f18 + EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40) + faligndata %f4, %f6, %f20 + EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40) + subcc %o3, 0x01, %o3 + faligndata %f6, %f8, %f22 + EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x80) + + faligndata %f8, %f10, %f24 + EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x80) + LOAD(prefetch, %o1 + 0x1c0, #one_read) + faligndata %f10, %f12, %f26 + bg,pt %XCC, 1b + add %o1, 0x40, %o1 + + /* Finally we copy the last full 64-byte block. */ +2: + EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80) + faligndata %f12, %f14, %f28 + EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80) + faligndata %f14, %f0, %f30 + EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80) + EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40) + faligndata %f0, %f2, %f16 + EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40) + faligndata %f2, %f4, %f18 + EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40) + faligndata %f4, %f6, %f20 + EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40) + faligndata %f6, %f8, %f22 + EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x40) + faligndata %f8, %f10, %f24 + cmp %g1, 0 + be,pt %XCC, 1f + add %o0, 0x40, %o0 + EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x40) +1: faligndata %f10, %f12, %f26 + faligndata %f12, %f14, %f28 + faligndata %f14, %f0, %f30 + EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x40) + add %o0, 0x40, %o0 + add %o1, 0x40, %o1 + membar #Sync + + /* Now we copy the (len modulo 64) bytes at the end. + * Note how we borrow the %f0 loaded above. + * + * Also notice how this code is careful not to perform a + * load past the end of the src buffer. + */ + and %o2, 0x3f, %o2 + andcc %o2, 0x38, %g2 + be,pn %XCC, 2f + subcc %g2, 0x8, %g2 + be,pn %XCC, 2f + cmp %g1, 0 + + sub %o2, %g2, %o2 + be,a,pt %XCC, 1f + EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0), U3_retl_o2_plus_g2) + +1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2), U3_retl_o2_plus_g2) + add %o1, 0x8, %o1 + subcc %g2, 0x8, %g2 + faligndata %f0, %f2, %f8 + EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8) + be,pn %XCC, 2f + add %o0, 0x8, %o0 + EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0), U3_retl_o2_plus_g2) + add %o1, 0x8, %o1 + subcc %g2, 0x8, %g2 + faligndata %f2, %f0, %f8 + EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8) + bne,pn %XCC, 1b + add %o0, 0x8, %o0 + + /* If anything is left, we copy it one byte at a time. + * Note that %g1 is (src & 0x3) saved above before the + * alignaddr was performed. + */ +2: + cmp %o2, 0 + add %o1, %g1, %o1 + VISExitHalf + be,pn %XCC, end_return + sub %o0, %o1, %o3 + + andcc %g1, 0x7, %g0 + bne,pn %icc, 90f + andcc %o2, 0x8, %g0 + be,pt %icc, 1f + nop + EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2) + EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2) + add %o1, 0x8, %o1 + sub %o2, 8, %o2 + +1: andcc %o2, 0x4, %g0 + be,pt %icc, 1f + nop + EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2) + EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2) + add %o1, 0x4, %o1 + sub %o2, 4, %o2 + +1: andcc %o2, 0x2, %g0 + be,pt %icc, 1f + nop + EX_LD(LOAD(lduh, %o1, %o5), U3_retl_o2) + EX_ST(STORE(sth, %o5, %o1 + %o3), U3_retl_o2) + add %o1, 0x2, %o1 + sub %o2, 2, %o2 + +1: andcc %o2, 0x1, %g0 + be,pt %icc, end_return + nop + EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2) + ba,pt %xcc, end_return + EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2) + + .align 64 + /* 16 <= len < 192 */ +less_than_192: + bne,pn %XCC, 75f + sub %o0, %o1, %o3 + +72: + andn %o2, 0xf, GLOBAL_SPARE + and %o2, 0xf, %o2 +1: subcc GLOBAL_SPARE, 0x10, GLOBAL_SPARE + EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U3_retl_o2_plus_GS_plus_0x10) + EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U3_retl_o2_plus_GS_plus_0x10) + EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x10) + add %o1, 0x8, %o1 + EX_ST(STORE(stx, %g1, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x08) + bgu,pt %XCC, 1b + add %o1, 0x8, %o1 +73: andcc %o2, 0x8, %g0 + be,pt %XCC, 1f + nop + sub %o2, 0x8, %o2 + EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2_plus_8) + EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_8) + add %o1, 0x8, %o1 +1: andcc %o2, 0x4, %g0 + be,pt %XCC, 1f + nop + sub %o2, 0x4, %o2 + EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2_plus_4) + EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2_plus_4) + add %o1, 0x4, %o1 +1: cmp %o2, 0 + be,pt %XCC, end_return + nop + ba,pt %xcc, 90f + nop + +75: + andcc %o0, 0x7, %g1 + sub %g1, 0x8, %g1 + be,pn %icc, 2f + sub %g0, %g1, %g1 + sub %o2, %g1, %o2 + +1: subcc %g1, 1, %g1 + EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2_plus_g1_plus_1) + EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2_plus_g1_plus_1) + bgu,pt %icc, 1b + add %o1, 1, %o1 + +2: add %o1, %o3, %o0 + andcc %o1, 0x7, %g1 + bne,pt %icc, 8f + sll %g1, 3, %g1 + + cmp %o2, 16 + bgeu,pt %icc, 72b + nop + ba,a,pt %xcc, 73b + +8: mov 64, %o3 + andn %o1, 0x7, %o1 + EX_LD(LOAD(ldx, %o1, %g2), U3_retl_o2) + sub %o3, %g1, %o3 + andn %o2, 0x7, GLOBAL_SPARE + sllx %g2, %g1, %g2 +1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U3_retl_o2_and_7_plus_GS) + subcc GLOBAL_SPARE, 0x8, GLOBAL_SPARE + add %o1, 0x8, %o1 + srlx %g3, %o3, %o5 + or %o5, %g2, %o5 + EX_ST(STORE(stx, %o5, %o0), U3_retl_o2_and_7_plus_GS_plus_8) + add %o0, 0x8, %o0 + bgu,pt %icc, 1b + sllx %g3, %g1, %g2 + + srl %g1, 3, %g1 + andcc %o2, 0x7, %o2 + be,pn %icc, end_return + add %o1, %g1, %o1 + ba,pt %xcc, 90f + sub %o0, %o1, %o3 + + .align 64 + /* 0 < len < 16 */ +less_than_16: + andcc %o3, 0x3, %g0 + bne,pn %XCC, 90f + sub %o0, %o1, %o3 + +1: + subcc %o2, 4, %o2 + EX_LD(LOAD(lduw, %o1, %g1), U3_retl_o2_plus_4) + EX_ST(STORE(stw, %g1, %o1 + %o3), U3_retl_o2_plus_4) + bgu,pt %XCC, 1b + add %o1, 4, %o1 + +end_return: + retl + mov EX_RETVAL(%o4), %o0 + + .align 32 +90: + subcc %o2, 1, %o2 + EX_LD(LOAD(ldub, %o1, %g1), U3_retl_o2_plus_1) + EX_ST(STORE(stb, %g1, %o1 + %o3), U3_retl_o2_plus_1) + bgu,pt %XCC, 90b + add %o1, 1, %o1 + retl + mov EX_RETVAL(%o4), %o0 + + .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc/lib/U3patch.S b/arch/sparc/lib/U3patch.S new file mode 100644 index 0000000000..9a888088f3 --- /dev/null +++ b/arch/sparc/lib/U3patch.S @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* U3patch.S: Patch Ultra-I routines with Ultra-III variant. + * + * Copyright (C) 2004 David S. Miller <davem@redhat.com> + */ + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define ULTRA3_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + .globl cheetah_patch_copyops + .type cheetah_patch_copyops,#function +cheetah_patch_copyops: + ULTRA3_DO_PATCH(memcpy, U3memcpy) + ULTRA3_DO_PATCH(raw_copy_from_user, U3copy_from_user) + ULTRA3_DO_PATCH(raw_copy_to_user, U3copy_to_user) + retl + nop + .size cheetah_patch_copyops,.-cheetah_patch_copyops diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S new file mode 100644 index 0000000000..31a0c336c1 --- /dev/null +++ b/arch/sparc/lib/VISsave.S @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * VISsave.S: Code for saving FPU register state for + * VIS routines. One should not call this directly, + * but use macros provided in <asm/visasm.h>. + * + * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) + */ + +#include <linux/export.h> +#include <linux/linkage.h> + +#include <asm/asi.h> +#include <asm/page.h> +#include <asm/ptrace.h> +#include <asm/visasm.h> +#include <asm/thread_info.h> + + /* On entry: %o5=current FPRS value, %g7 is callers address */ + /* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */ + + /* Nothing special need be done here to handle pre-emption, this + * FPU save/restore mechanism is already preemption safe. + */ + .text + .align 32 +ENTRY(VISenter) + ldub [%g6 + TI_FPDEPTH], %g1 + brnz,a,pn %g1, 1f + cmp %g1, 1 + stb %g0, [%g6 + TI_FPSAVED] + stx %fsr, [%g6 + TI_XFSR] +9: jmpl %g7 + %g0, %g0 + nop +1: bne,pn %icc, 2f + + srl %g1, 1, %g1 +vis1: ldub [%g6 + TI_FPSAVED], %g3 + stx %fsr, [%g6 + TI_XFSR] + or %g3, %o5, %g3 + stb %g3, [%g6 + TI_FPSAVED] + rd %gsr, %g3 + clr %g1 + ba,pt %xcc, 3f + + stx %g3, [%g6 + TI_GSR] +2: add %g6, %g1, %g3 + mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5 + sll %g1, 3, %g1 + stb %o5, [%g3 + TI_FPSAVED] + rd %gsr, %g2 + add %g6, %g1, %g3 + stx %g2, [%g3 + TI_GSR] + + add %g6, %g1, %g2 + stx %fsr, [%g2 + TI_XFSR] + sll %g1, 5, %g1 +3: andcc %o5, FPRS_DL|FPRS_DU, %g0 + be,pn %icc, 9b + add %g6, TI_FPREGS, %g2 + andcc %o5, FPRS_DL, %g0 + + be,pn %icc, 4f + add %g6, TI_FPREGS+0x40, %g3 + membar #Sync + stda %f0, [%g2 + %g1] ASI_BLK_P + stda %f16, [%g3 + %g1] ASI_BLK_P + membar #Sync + andcc %o5, FPRS_DU, %g0 + be,pn %icc, 5f +4: add %g1, 128, %g1 + membar #Sync + stda %f32, [%g2 + %g1] ASI_BLK_P + + stda %f48, [%g3 + %g1] ASI_BLK_P +5: membar #Sync + ba,pt %xcc, 80f + nop + + .align 32 +80: jmpl %g7 + %g0, %g0 + nop +ENDPROC(VISenter) +EXPORT_SYMBOL(VISenter) diff --git a/arch/sparc/lib/ashldi3.S b/arch/sparc/lib/ashldi3.S new file mode 100644 index 0000000000..2a9e7c4fb2 --- /dev/null +++ b/arch/sparc/lib/ashldi3.S @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ashldi3.S: GCC emits these for certain drivers playing + * with long longs. + * + * Copyright (C) 1999 David S. Miller (davem@redhat.com) + */ + +#include <linux/export.h> +#include <linux/linkage.h> + + .text +ENTRY(__ashldi3) + cmp %o2, 0 + be 9f + mov 0x20, %g2 + + sub %g2, %o2, %g2 + cmp %g2, 0 + bg 7f + sll %o0, %o2, %g3 + + neg %g2 + clr %o5 + b 8f + sll %o1, %g2, %o4 +7: + srl %o1, %g2, %g2 + sll %o1, %o2, %o5 + or %g3, %g2, %o4 +8: + mov %o4, %o0 + mov %o5, %o1 +9: + retl + nop +ENDPROC(__ashldi3) +EXPORT_SYMBOL(__ashldi3) diff --git a/arch/sparc/lib/ashrdi3.S b/arch/sparc/lib/ashrdi3.S new file mode 100644 index 0000000000..8fd0b31172 --- /dev/null +++ b/arch/sparc/lib/ashrdi3.S @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ashrdi3.S: The filesystem code creates all kinds of references to + * this little routine on the sparc with gcc. + * + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + */ + +#include <linux/export.h> +#include <linux/linkage.h> + + .text +ENTRY(__ashrdi3) + tst %o2 + be 3f + or %g0, 32, %g2 + + sub %g2, %o2, %g2 + + tst %g2 + bg 1f + sra %o0, %o2, %o4 + + sra %o0, 31, %o4 + sub %g0, %g2, %g2 + ba 2f + sra %o0, %g2, %o5 + +1: + sll %o0, %g2, %g3 + srl %o1, %o2, %g2 + or %g2, %g3, %o5 +2: + or %g0, %o4, %o0 + or %g0, %o5, %o1 +3: + jmpl %o7 + 8, %g0 + nop +ENDPROC(__ashrdi3) +EXPORT_SYMBOL(__ashrdi3) diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c new file mode 100644 index 0000000000..cf80d1ae35 --- /dev/null +++ b/arch/sparc/lib/atomic32.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * atomic32.c: 32-bit atomic_t implementation + * + * Copyright (C) 2004 Keith M Wesolowski + * Copyright (C) 2007 Kyle McMartin + * + * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf + */ + +#include <linux/atomic.h> +#include <linux/spinlock.h> +#include <linux/module.h> + +#ifdef CONFIG_SMP +#define ATOMIC_HASH_SIZE 4 +#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)]) + +spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = { + [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash) +}; + +#else /* SMP */ + +static DEFINE_SPINLOCK(dummy); +#define ATOMIC_HASH_SIZE 1 +#define ATOMIC_HASH(a) (&dummy) + +#endif /* SMP */ + +#define ATOMIC_FETCH_OP(op, c_op) \ +int arch_atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + int ret; \ + unsigned long flags; \ + spin_lock_irqsave(ATOMIC_HASH(v), flags); \ + \ + ret = v->counter; \ + v->counter c_op i; \ + \ + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ + return ret; \ +} \ +EXPORT_SYMBOL(arch_atomic_fetch_##op); + +#define ATOMIC_OP_RETURN(op, c_op) \ +int arch_atomic_##op##_return(int i, atomic_t *v) \ +{ \ + int ret; \ + unsigned long flags; \ + spin_lock_irqsave(ATOMIC_HASH(v), flags); \ + \ + ret = (v->counter c_op i); \ + \ + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ + return ret; \ +} \ +EXPORT_SYMBOL(arch_atomic_##op##_return); + +ATOMIC_OP_RETURN(add, +=) + +ATOMIC_FETCH_OP(add, +=) +ATOMIC_FETCH_OP(and, &=) +ATOMIC_FETCH_OP(or, |=) +ATOMIC_FETCH_OP(xor, ^=) + +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN + +int arch_atomic_xchg(atomic_t *v, int new) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(ATOMIC_HASH(v), flags); + ret = v->counter; + v->counter = new; + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + return ret; +} +EXPORT_SYMBOL(arch_atomic_xchg); + +int arch_atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(ATOMIC_HASH(v), flags); + ret = v->counter; + if (likely(ret == old)) + v->counter = new; + + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + return ret; +} +EXPORT_SYMBOL(arch_atomic_cmpxchg); + +int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(ATOMIC_HASH(v), flags); + ret = v->counter; + if (ret != u) + v->counter += a; + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + return ret; +} +EXPORT_SYMBOL(arch_atomic_fetch_add_unless); + +/* Atomic operations are already serializing */ +void arch_atomic_set(atomic_t *v, int i) +{ + unsigned long flags; + + spin_lock_irqsave(ATOMIC_HASH(v), flags); + v->counter = i; + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); +} +EXPORT_SYMBOL(arch_atomic_set); + +unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask) +{ + unsigned long old, flags; + + spin_lock_irqsave(ATOMIC_HASH(addr), flags); + old = *addr; + *addr = old | mask; + spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); + + return old & mask; +} +EXPORT_SYMBOL(sp32___set_bit); + +unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask) +{ + unsigned long old, flags; + + spin_lock_irqsave(ATOMIC_HASH(addr), flags); + old = *addr; + *addr = old & ~mask; + spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); + + return old & mask; +} +EXPORT_SYMBOL(sp32___clear_bit); + +unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask) +{ + unsigned long old, flags; + + spin_lock_irqsave(ATOMIC_HASH(addr), flags); + old = *addr; + *addr = old ^ mask; + spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); + + return old & mask; +} +EXPORT_SYMBOL(sp32___change_bit); + +unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) +{ + unsigned long flags; + u32 prev; + + spin_lock_irqsave(ATOMIC_HASH(ptr), flags); + if ((prev = *ptr) == old) + *ptr = new; + spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); + + return (unsigned long)prev; +} +EXPORT_SYMBOL(__cmpxchg_u32); + +u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new) +{ + unsigned long flags; + u64 prev; + + spin_lock_irqsave(ATOMIC_HASH(ptr), flags); + if ((prev = *ptr) == old) + *ptr = new; + spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); + + return prev; +} +EXPORT_SYMBOL(__cmpxchg_u64); + +unsigned long __xchg_u32(volatile u32 *ptr, u32 new) +{ + unsigned long flags; + u32 prev; + + spin_lock_irqsave(ATOMIC_HASH(ptr), flags); + prev = *ptr; + *ptr = new; + spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); + + return (unsigned long)prev; +} +EXPORT_SYMBOL(__xchg_u32); diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S new file mode 100644 index 0000000000..4f8cab2fb9 --- /dev/null +++ b/arch/sparc/lib/atomic_64.S @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* atomic.S: These things are too big to do inline. + * + * Copyright (C) 1999, 2007 2012 David S. Miller (davem@davemloft.net) + */ + +#include <linux/export.h> +#include <linux/linkage.h> +#include <asm/asi.h> +#include <asm/backoff.h> + + .text + + /* Three versions of the atomic routines, one that + * does not return a value and does not perform + * memory barriers, and a two which return + * a value, the new and old value resp. and does the + * barriers. + */ + +#define ATOMIC_OP(op) \ +ENTRY(arch_atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ + BACKOFF_SETUP(%o2); \ +1: lduw [%o1], %g1; \ + op %g1, %o0, %g7; \ + cas [%o1], %g1, %g7; \ + cmp %g1, %g7; \ + bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ + nop; \ + retl; \ + nop; \ +2: BACKOFF_SPIN(%o2, %o3, 1b); \ +ENDPROC(arch_atomic_##op); \ +EXPORT_SYMBOL(arch_atomic_##op); + +#define ATOMIC_OP_RETURN(op) \ +ENTRY(arch_atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */\ + BACKOFF_SETUP(%o2); \ +1: lduw [%o1], %g1; \ + op %g1, %o0, %g7; \ + cas [%o1], %g1, %g7; \ + cmp %g1, %g7; \ + bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ + op %g1, %o0, %g1; \ + retl; \ + sra %g1, 0, %o0; \ +2: BACKOFF_SPIN(%o2, %o3, 1b); \ +ENDPROC(arch_atomic_##op##_return); \ +EXPORT_SYMBOL(arch_atomic_##op##_return); + +#define ATOMIC_FETCH_OP(op) \ +ENTRY(arch_atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ + BACKOFF_SETUP(%o2); \ +1: lduw [%o1], %g1; \ + op %g1, %o0, %g7; \ + cas [%o1], %g1, %g7; \ + cmp %g1, %g7; \ + bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ + nop; \ + retl; \ + sra %g1, 0, %o0; \ +2: BACKOFF_SPIN(%o2, %o3, 1b); \ +ENDPROC(arch_atomic_fetch_##op); \ +EXPORT_SYMBOL(arch_atomic_fetch_##op); + +ATOMIC_OP(add) +ATOMIC_OP_RETURN(add) +ATOMIC_FETCH_OP(add) + +ATOMIC_OP(sub) +ATOMIC_OP_RETURN(sub) +ATOMIC_FETCH_OP(sub) + +ATOMIC_OP(and) +ATOMIC_FETCH_OP(and) + +ATOMIC_OP(or) +ATOMIC_FETCH_OP(or) + +ATOMIC_OP(xor) +ATOMIC_FETCH_OP(xor) + +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +#define ATOMIC64_OP(op) \ +ENTRY(arch_atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ + BACKOFF_SETUP(%o2); \ +1: ldx [%o1], %g1; \ + op %g1, %o0, %g7; \ + casx [%o1], %g1, %g7; \ + cmp %g1, %g7; \ + bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ + nop; \ + retl; \ + nop; \ +2: BACKOFF_SPIN(%o2, %o3, 1b); \ +ENDPROC(arch_atomic64_##op); \ +EXPORT_SYMBOL(arch_atomic64_##op); + +#define ATOMIC64_OP_RETURN(op) \ +ENTRY(arch_atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ + BACKOFF_SETUP(%o2); \ +1: ldx [%o1], %g1; \ + op %g1, %o0, %g7; \ + casx [%o1], %g1, %g7; \ + cmp %g1, %g7; \ + bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ + nop; \ + retl; \ + op %g1, %o0, %o0; \ +2: BACKOFF_SPIN(%o2, %o3, 1b); \ +ENDPROC(arch_atomic64_##op##_return); \ +EXPORT_SYMBOL(arch_atomic64_##op##_return); + +#define ATOMIC64_FETCH_OP(op) \ +ENTRY(arch_atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ + BACKOFF_SETUP(%o2); \ +1: ldx [%o1], %g1; \ + op %g1, %o0, %g7; \ + casx [%o1], %g1, %g7; \ + cmp %g1, %g7; \ + bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ + nop; \ + retl; \ + mov %g1, %o0; \ +2: BACKOFF_SPIN(%o2, %o3, 1b); \ +ENDPROC(arch_atomic64_fetch_##op); \ +EXPORT_SYMBOL(arch_atomic64_fetch_##op); + +ATOMIC64_OP(add) +ATOMIC64_OP_RETURN(add) +ATOMIC64_FETCH_OP(add) + +ATOMIC64_OP(sub) +ATOMIC64_OP_RETURN(sub) +ATOMIC64_FETCH_OP(sub) + +ATOMIC64_OP(and) +ATOMIC64_FETCH_OP(and) + +ATOMIC64_OP(or) +ATOMIC64_FETCH_OP(or) + +ATOMIC64_OP(xor) +ATOMIC64_FETCH_OP(xor) + +#undef ATOMIC64_FETCH_OP +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP + +ENTRY(arch_atomic64_dec_if_positive) /* %o0 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: ldx [%o0], %g1 + brlez,pn %g1, 3f + sub %g1, 1, %g7 + casx [%o0], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) + nop +3: retl + sub %g1, 1, %o0 +2: BACKOFF_SPIN(%o2, %o3, 1b) +ENDPROC(arch_atomic64_dec_if_positive) +EXPORT_SYMBOL(arch_atomic64_dec_if_positive) diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c new file mode 100644 index 0000000000..32a5c1d945 --- /dev/null +++ b/arch/sparc/lib/bitext.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * bitext.c: kernel little helper (of bit shuffling variety). + * + * Copyright (C) 2002 Pete Zaitcev <zaitcev@yahoo.com> + * + * The algorithm to search a zero bit string is geared towards its application. + * We expect a couple of fixed sizes of requests, so a rotating counter, reset + * by align size, should provide fast enough search while maintaining low + * fragmentation. + */ + +#include <linux/string.h> +#include <linux/bitmap.h> + +#include <asm/bitext.h> + +/** + * bit_map_string_get - find and set a bit string in bit map. + * @t: the bit map. + * @len: requested string length + * @align: requested alignment + * + * Returns offset in the map or -1 if out of space. + * + * Not safe to call from an interrupt (uses spin_lock). + */ +int bit_map_string_get(struct bit_map *t, int len, int align) +{ + int offset, count; /* siamese twins */ + int off_new; + int align1; + int i, color; + + if (t->num_colors) { + /* align is overloaded to be the page color */ + color = align; + align = t->num_colors; + } else { + color = 0; + if (align == 0) + align = 1; + } + align1 = align - 1; + if ((align & align1) != 0) + BUG(); + if (align < 0 || align >= t->size) + BUG(); + if (len <= 0 || len > t->size) + BUG(); + color &= align1; + + spin_lock(&t->lock); + if (len < t->last_size) + offset = t->first_free; + else + offset = t->last_off & ~align1; + count = 0; + for (;;) { + off_new = find_next_zero_bit(t->map, t->size, offset); + off_new = ((off_new + align1) & ~align1) + color; + count += off_new - offset; + offset = off_new; + if (offset >= t->size) + offset = 0; + if (count + len > t->size) { + spin_unlock(&t->lock); +/* P3 */ printk(KERN_ERR + "bitmap out: size %d used %d off %d len %d align %d count %d\n", + t->size, t->used, offset, len, align, count); + return -1; + } + + if (offset + len > t->size) { + count += t->size - offset; + offset = 0; + continue; + } + + i = 0; + while (test_bit(offset + i, t->map) == 0) { + i++; + if (i == len) { + bitmap_set(t->map, offset, len); + if (offset == t->first_free) + t->first_free = find_next_zero_bit + (t->map, t->size, + t->first_free + len); + if ((t->last_off = offset + len) >= t->size) + t->last_off = 0; + t->used += len; + t->last_size = len; + spin_unlock(&t->lock); + return offset; + } + } + count += i + 1; + if ((offset += i + 1) >= t->size) + offset = 0; + } +} + +void bit_map_clear(struct bit_map *t, int offset, int len) +{ + int i; + + if (t->used < len) + BUG(); /* Much too late to do any good, but alas... */ + spin_lock(&t->lock); + for (i = 0; i < len; i++) { + if (test_bit(offset + i, t->map) == 0) + BUG(); + __clear_bit(offset + i, t->map); + } + if (offset < t->first_free) + t->first_free = offset; + t->used -= len; + spin_unlock(&t->lock); +} + +void bit_map_init(struct bit_map *t, unsigned long *map, int size) +{ + bitmap_zero(map, size); + memset(t, 0, sizeof *t); + spin_lock_init(&t->lock); + t->map = map; + t->size = size; +} diff --git a/arch/sparc/lib/bitops.S b/arch/sparc/lib/bitops.S new file mode 100644 index 0000000000..9c91cbb310 --- /dev/null +++ b/arch/sparc/lib/bitops.S @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* bitops.S: Sparc64 atomic bit operations. + * + * Copyright (C) 2000, 2007 David S. Miller (davem@davemloft.net) + */ + +#include <linux/export.h> +#include <linux/linkage.h> +#include <asm/asi.h> +#include <asm/backoff.h> + + .text + +ENTRY(test_and_set_bit) /* %o0=nr, %o1=addr */ + BACKOFF_SETUP(%o3) + srlx %o0, 6, %g1 + mov 1, %o2 + sllx %g1, 3, %g3 + and %o0, 63, %g2 + sllx %o2, %g2, %o2 + add %o1, %g3, %o1 +1: ldx [%o1], %g7 + or %g7, %o2, %g1 + casx [%o1], %g7, %g1 + cmp %g7, %g1 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) + and %g7, %o2, %g2 + clr %o0 + movrne %g2, 1, %o0 + retl + nop +2: BACKOFF_SPIN(%o3, %o4, 1b) +ENDPROC(test_and_set_bit) +EXPORT_SYMBOL(test_and_set_bit) + +ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */ + BACKOFF_SETUP(%o3) + srlx %o0, 6, %g1 + mov 1, %o2 + sllx %g1, 3, %g3 + and %o0, 63, %g2 + sllx %o2, %g2, %o2 + add %o1, %g3, %o1 +1: ldx [%o1], %g7 + andn %g7, %o2, %g1 + casx [%o1], %g7, %g1 + cmp %g7, %g1 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) + and %g7, %o2, %g2 + clr %o0 + movrne %g2, 1, %o0 + retl + nop +2: BACKOFF_SPIN(%o3, %o4, 1b) +ENDPROC(test_and_clear_bit) +EXPORT_SYMBOL(test_and_clear_bit) + +ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */ + BACKOFF_SETUP(%o3) + srlx %o0, 6, %g1 + mov 1, %o2 + sllx %g1, 3, %g3 + and %o0, 63, %g2 + sllx %o2, %g2, %o2 + add %o1, %g3, %o1 +1: ldx [%o1], %g7 + xor %g7, %o2, %g1 + casx [%o1], %g7, %g1 + cmp %g7, %g1 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) + and %g7, %o2, %g2 + clr %o0 + movrne %g2, 1, %o0 + retl + nop +2: BACKOFF_SPIN(%o3, %o4, 1b) +ENDPROC(test_and_change_bit) +EXPORT_SYMBOL(test_and_change_bit) + +ENTRY(set_bit) /* %o0=nr, %o1=addr */ + BACKOFF_SETUP(%o3) + srlx %o0, 6, %g1 + mov 1, %o2 + sllx %g1, 3, %g3 + and %o0, 63, %g2 + sllx %o2, %g2, %o2 + add %o1, %g3, %o1 +1: ldx [%o1], %g7 + or %g7, %o2, %g1 + casx [%o1], %g7, %g1 + cmp %g7, %g1 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) + nop + retl + nop +2: BACKOFF_SPIN(%o3, %o4, 1b) +ENDPROC(set_bit) +EXPORT_SYMBOL(set_bit) + +ENTRY(clear_bit) /* %o0=nr, %o1=addr */ + BACKOFF_SETUP(%o3) + srlx %o0, 6, %g1 + mov 1, %o2 + sllx %g1, 3, %g3 + and %o0, 63, %g2 + sllx %o2, %g2, %o2 + add %o1, %g3, %o1 +1: ldx [%o1], %g7 + andn %g7, %o2, %g1 + casx [%o1], %g7, %g1 + cmp %g7, %g1 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) + nop + retl + nop +2: BACKOFF_SPIN(%o3, %o4, 1b) +ENDPROC(clear_bit) +EXPORT_SYMBOL(clear_bit) + +ENTRY(change_bit) /* %o0=nr, %o1=addr */ + BACKOFF_SETUP(%o3) + srlx %o0, 6, %g1 + mov 1, %o2 + sllx %g1, 3, %g3 + and %o0, 63, %g2 + sllx %o2, %g2, %o2 + add %o1, %g3, %o1 +1: ldx [%o1], %g7 + xor %g7, %o2, %g1 + casx [%o1], %g7, %g1 + cmp %g7, %g1 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) + nop + retl + nop +2: BACKOFF_SPIN(%o3, %o4, 1b) +ENDPROC(change_bit) +EXPORT_SYMBOL(change_bit) diff --git a/arch/sparc/lib/blockops.S b/arch/sparc/lib/blockops.S new file mode 100644 index 0000000000..5b92959a4d --- /dev/null +++ b/arch/sparc/lib/blockops.S @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * blockops.S: Common block zero optimized routines. + * + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + */ + +#include <linux/export.h> +#include <linux/linkage.h> +#include <asm/page.h> + + /* Zero out 64 bytes of memory at (buf + offset). + * Assumes %g1 contains zero. + */ +#define BLAST_BLOCK(buf, offset) \ + std %g0, [buf + offset + 0x38]; \ + std %g0, [buf + offset + 0x30]; \ + std %g0, [buf + offset + 0x28]; \ + std %g0, [buf + offset + 0x20]; \ + std %g0, [buf + offset + 0x18]; \ + std %g0, [buf + offset + 0x10]; \ + std %g0, [buf + offset + 0x08]; \ + std %g0, [buf + offset + 0x00]; + + /* Copy 32 bytes of memory at (src + offset) to + * (dst + offset). + */ +#define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ + ldd [src + offset + 0x18], t0; \ + ldd [src + offset + 0x10], t2; \ + ldd [src + offset + 0x08], t4; \ + ldd [src + offset + 0x00], t6; \ + std t0, [dst + offset + 0x18]; \ + std t2, [dst + offset + 0x10]; \ + std t4, [dst + offset + 0x08]; \ + std t6, [dst + offset + 0x00]; + + /* Profiling evidence indicates that memset() is + * commonly called for blocks of size PAGE_SIZE, + * and (2 * PAGE_SIZE) (for kernel stacks) + * and with a second arg of zero. We assume in + * all of these cases that the buffer is aligned + * on at least an 8 byte boundary. + * + * Therefore we special case them to make them + * as fast as possible. + */ + + .text +ENTRY(bzero_1page) +/* NOTE: If you change the number of insns of this routine, please check + * arch/sparc/mm/hypersparc.S */ + /* %o0 = buf */ + or %g0, %g0, %g1 + or %o0, %g0, %o1 + or %g0, (PAGE_SIZE >> 8), %g2 +1: + BLAST_BLOCK(%o0, 0x00) + BLAST_BLOCK(%o0, 0x40) + BLAST_BLOCK(%o0, 0x80) + BLAST_BLOCK(%o0, 0xc0) + subcc %g2, 1, %g2 + bne 1b + add %o0, 0x100, %o0 + + retl + nop +ENDPROC(bzero_1page) +EXPORT_SYMBOL(bzero_1page) + +ENTRY(__copy_1page) +/* NOTE: If you change the number of insns of this routine, please check + * arch/sparc/mm/hypersparc.S */ + /* %o0 = dst, %o1 = src */ + or %g0, (PAGE_SIZE >> 8), %g1 +1: + MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) + MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) + MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) + MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) + MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) + MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) + MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) + MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) + subcc %g1, 1, %g1 + add %o0, 0x100, %o0 + bne 1b + add %o1, 0x100, %o1 + + retl + nop +ENDPROC(__copy_1page) +EXPORT_SYMBOL(__copy_1page) diff --git a/arch/sparc/lib/bzero.S b/arch/sparc/lib/bzero.S new file mode 100644 index 0000000000..2bfa44a6b2 --- /dev/null +++ b/arch/sparc/lib/bzero.S @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* bzero.S: Simple prefetching memset, bzero, and clear_user + * implementations. + * + * Copyright (C) 2005 David S. Miller <davem@davemloft.net> + */ + +#include <linux/export.h> +#include <linux/linkage.h> + + .text + +ENTRY(memset) /* %o0=buf, %o1=pat, %o2=len */ + and %o1, 0xff, %o3 + mov %o2, %o1 + sllx %o3, 8, %g1 + or %g1, %o3, %o2 + sllx %o2, 16, %g1 + or %g1, %o2, %o2 + sllx %o2, 32, %g1 + ba,pt %xcc, 1f + or %g1, %o2, %o2 + +ENTRY(__bzero) /* %o0=buf, %o1=len */ + clr %o2 +1: mov %o0, %o3 + brz,pn %o1, __bzero_done + cmp %o1, 16 + bl,pn %icc, __bzero_tiny + prefetch [%o0 + 0x000], #n_writes + andcc %o0, 0x3, %g0 + be,pt %icc, 2f +1: stb %o2, [%o0 + 0x00] + add %o0, 1, %o0 + andcc %o0, 0x3, %g0 + bne,pn %icc, 1b + sub %o1, 1, %o1 +2: andcc %o0, 0x7, %g0 + be,pt %icc, 3f + stw %o2, [%o0 + 0x00] + sub %o1, 4, %o1 + add %o0, 4, %o0 +3: and %o1, 0x38, %g1 + cmp %o1, 0x40 + andn %o1, 0x3f, %o4 + bl,pn %icc, 5f + and %o1, 0x7, %o1 + prefetch [%o0 + 0x040], #n_writes + prefetch [%o0 + 0x080], #n_writes + prefetch [%o0 + 0x0c0], #n_writes + prefetch [%o0 + 0x100], #n_writes + prefetch [%o0 + 0x140], #n_writes +4: prefetch [%o0 + 0x180], #n_writes + stx %o2, [%o0 + 0x00] + stx %o2, [%o0 + 0x08] + stx %o2, [%o0 + 0x10] + stx %o2, [%o0 + 0x18] + stx %o2, [%o0 + 0x20] + stx %o2, [%o0 + 0x28] + stx %o2, [%o0 + 0x30] + stx %o2, [%o0 + 0x38] + subcc %o4, 0x40, %o4 + bne,pt %icc, 4b + add %o0, 0x40, %o0 + brz,pn %g1, 6f + nop +5: stx %o2, [%o0 + 0x00] + subcc %g1, 8, %g1 + bne,pt %icc, 5b + add %o0, 0x8, %o0 +6: brz,pt %o1, __bzero_done + nop +__bzero_tiny: +1: stb %o2, [%o0 + 0x00] + subcc %o1, 1, %o1 + bne,pt %icc, 1b + add %o0, 1, %o0 +__bzero_done: + retl + mov %o3, %o0 +ENDPROC(__bzero) +ENDPROC(memset) +EXPORT_SYMBOL(__bzero) +EXPORT_SYMBOL(memset) + +#define EX_ST(x,y) \ +98: x,y; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, __retl_o1; \ + .text; \ + .align 4; + +ENTRY(__clear_user) /* %o0=buf, %o1=len */ + brz,pn %o1, __clear_user_done + cmp %o1, 16 + bl,pn %icc, __clear_user_tiny + EX_ST(prefetcha [%o0 + 0x00] %asi, #n_writes) + andcc %o0, 0x3, %g0 + be,pt %icc, 2f +1: EX_ST(stba %g0, [%o0 + 0x00] %asi) + add %o0, 1, %o0 + andcc %o0, 0x3, %g0 + bne,pn %icc, 1b + sub %o1, 1, %o1 +2: andcc %o0, 0x7, %g0 + be,pt %icc, 3f + EX_ST(stwa %g0, [%o0 + 0x00] %asi) + sub %o1, 4, %o1 + add %o0, 4, %o0 +3: and %o1, 0x38, %g1 + cmp %o1, 0x40 + andn %o1, 0x3f, %o4 + bl,pn %icc, 5f + and %o1, 0x7, %o1 + EX_ST(prefetcha [%o0 + 0x040] %asi, #n_writes) + EX_ST(prefetcha [%o0 + 0x080] %asi, #n_writes) + EX_ST(prefetcha [%o0 + 0x0c0] %asi, #n_writes) + EX_ST(prefetcha [%o0 + 0x100] %asi, #n_writes) + EX_ST(prefetcha [%o0 + 0x140] %asi, #n_writes) +4: EX_ST(prefetcha [%o0 + 0x180] %asi, #n_writes) + EX_ST(stxa %g0, [%o0 + 0x00] %asi) + EX_ST(stxa %g0, [%o0 + 0x08] %asi) + EX_ST(stxa %g0, [%o0 + 0x10] %asi) + EX_ST(stxa %g0, [%o0 + 0x18] %asi) + EX_ST(stxa %g0, [%o0 + 0x20] %asi) + EX_ST(stxa %g0, [%o0 + 0x28] %asi) + EX_ST(stxa %g0, [%o0 + 0x30] %asi) + EX_ST(stxa %g0, [%o0 + 0x38] %asi) + subcc %o4, 0x40, %o4 + bne,pt %icc, 4b + add %o0, 0x40, %o0 + brz,pn %g1, 6f + nop +5: EX_ST(stxa %g0, [%o0 + 0x00] %asi) + subcc %g1, 8, %g1 + bne,pt %icc, 5b + add %o0, 0x8, %o0 +6: brz,pt %o1, __clear_user_done + nop +__clear_user_tiny: +1: EX_ST(stba %g0, [%o0 + 0x00] %asi) + subcc %o1, 1, %o1 + bne,pt %icc, 1b + add %o0, 1, %o0 +__clear_user_done: + retl + clr %o0 +ENDPROC(__clear_user) +EXPORT_SYMBOL(__clear_user) diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S new file mode 100644 index 0000000000..66eda40fce --- /dev/null +++ b/arch/sparc/lib/checksum_32.S @@ -0,0 +1,457 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* checksum.S: Sparc optimized checksum code. + * + * Copyright(C) 1995 Linus Torvalds + * Copyright(C) 1995 Miguel de Icaza + * Copyright(C) 1996 David S. Miller + * Copyright(C) 1997 Jakub Jelinek + * + * derived from: + * Linux/Alpha checksum c-code + * Linux/ix86 inline checksum assembly + * RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code) + * David Mosberger-Tang for optimized reference c-code + * BSD4.4 portable checksum routine + */ + +#include <linux/export.h> +#include <asm/errno.h> + +#define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \ + ldd [buf + offset + 0x00], t0; \ + ldd [buf + offset + 0x08], t2; \ + addxcc t0, sum, sum; \ + addxcc t1, sum, sum; \ + ldd [buf + offset + 0x10], t4; \ + addxcc t2, sum, sum; \ + addxcc t3, sum, sum; \ + ldd [buf + offset + 0x18], t0; \ + addxcc t4, sum, sum; \ + addxcc t5, sum, sum; \ + addxcc t0, sum, sum; \ + addxcc t1, sum, sum; + +#define CSUM_LASTCHUNK(buf, offset, sum, t0, t1, t2, t3) \ + ldd [buf - offset - 0x08], t0; \ + ldd [buf - offset - 0x00], t2; \ + addxcc t0, sum, sum; \ + addxcc t1, sum, sum; \ + addxcc t2, sum, sum; \ + addxcc t3, sum, sum; + + /* Do end cruft out of band to get better cache patterns. */ +csum_partial_end_cruft: + be 1f ! caller asks %o1 & 0x8 + andcc %o1, 4, %g0 ! nope, check for word remaining + ldd [%o0], %g2 ! load two + addcc %g2, %o2, %o2 ! add first word to sum + addxcc %g3, %o2, %o2 ! add second word as well + add %o0, 8, %o0 ! advance buf ptr + addx %g0, %o2, %o2 ! add in final carry + andcc %o1, 4, %g0 ! check again for word remaining +1: be 1f ! nope, skip this code + andcc %o1, 3, %o1 ! check for trailing bytes + ld [%o0], %g2 ! load it + addcc %g2, %o2, %o2 ! add to sum + add %o0, 4, %o0 ! advance buf ptr + addx %g0, %o2, %o2 ! add in final carry + andcc %o1, 3, %g0 ! check again for trailing bytes +1: be 1f ! no trailing bytes, return + addcc %o1, -1, %g0 ! only one byte remains? + bne 2f ! at least two bytes more + subcc %o1, 2, %o1 ! only two bytes more? + b 4f ! only one byte remains + or %g0, %g0, %o4 ! clear fake hword value +2: lduh [%o0], %o4 ! get hword + be 6f ! jmp if only hword remains + add %o0, 2, %o0 ! advance buf ptr either way + sll %o4, 16, %o4 ! create upper hword +4: ldub [%o0], %o5 ! get final byte + sll %o5, 8, %o5 ! put into place + or %o5, %o4, %o4 ! coalese with hword (if any) +6: addcc %o4, %o2, %o2 ! add to sum +1: retl ! get outta here + addx %g0, %o2, %o0 ! add final carry into retval + + /* Also do alignment out of band to get better cache patterns. */ +csum_partial_fix_alignment: + cmp %o1, 6 + bl cpte - 0x4 + andcc %o0, 0x2, %g0 + be 1f + andcc %o0, 0x4, %g0 + lduh [%o0 + 0x00], %g2 + sub %o1, 2, %o1 + add %o0, 2, %o0 + sll %g2, 16, %g2 + addcc %g2, %o2, %o2 + srl %o2, 16, %g3 + addx %g0, %g3, %g2 + sll %o2, 16, %o2 + sll %g2, 16, %g3 + srl %o2, 16, %o2 + andcc %o0, 0x4, %g0 + or %g3, %o2, %o2 +1: be cpa + andcc %o1, 0xffffff80, %o3 + ld [%o0 + 0x00], %g2 + sub %o1, 4, %o1 + addcc %g2, %o2, %o2 + add %o0, 4, %o0 + addx %g0, %o2, %o2 + b cpa + andcc %o1, 0xffffff80, %o3 + + /* The common case is to get called with a nicely aligned + * buffer of size 0x20. Follow the code path for that case. + */ + .globl csum_partial + EXPORT_SYMBOL(csum_partial) +csum_partial: /* %o0=buf, %o1=len, %o2=sum */ + andcc %o0, 0x7, %g0 ! alignment problems? + bne csum_partial_fix_alignment ! yep, handle it + sethi %hi(cpte - 8), %g7 ! prepare table jmp ptr + andcc %o1, 0xffffff80, %o3 ! num loop iterations +cpa: be 3f ! none to do + andcc %o1, 0x70, %g1 ! clears carry flag too +5: CSUM_BIGCHUNK(%o0, 0x00, %o2, %o4, %o5, %g2, %g3, %g4, %g5) + CSUM_BIGCHUNK(%o0, 0x20, %o2, %o4, %o5, %g2, %g3, %g4, %g5) + CSUM_BIGCHUNK(%o0, 0x40, %o2, %o4, %o5, %g2, %g3, %g4, %g5) + CSUM_BIGCHUNK(%o0, 0x60, %o2, %o4, %o5, %g2, %g3, %g4, %g5) + addx %g0, %o2, %o2 ! sink in final carry + subcc %o3, 128, %o3 ! detract from loop iters + bne 5b ! more to do + add %o0, 128, %o0 ! advance buf ptr + andcc %o1, 0x70, %g1 ! clears carry flag too +3: be cpte ! nope + andcc %o1, 0xf, %g0 ! anything left at all? + srl %g1, 1, %o4 ! compute offset + sub %g7, %g1, %g7 ! adjust jmp ptr + sub %g7, %o4, %g7 ! final jmp ptr adjust + jmp %g7 + %lo(cpte - 8) ! enter the table + add %o0, %g1, %o0 ! advance buf ptr +cptbl: CSUM_LASTCHUNK(%o0, 0x68, %o2, %g2, %g3, %g4, %g5) + CSUM_LASTCHUNK(%o0, 0x58, %o2, %g2, %g3, %g4, %g5) + CSUM_LASTCHUNK(%o0, 0x48, %o2, %g2, %g3, %g4, %g5) + CSUM_LASTCHUNK(%o0, 0x38, %o2, %g2, %g3, %g4, %g5) + CSUM_LASTCHUNK(%o0, 0x28, %o2, %g2, %g3, %g4, %g5) + CSUM_LASTCHUNK(%o0, 0x18, %o2, %g2, %g3, %g4, %g5) + CSUM_LASTCHUNK(%o0, 0x08, %o2, %g2, %g3, %g4, %g5) + addx %g0, %o2, %o2 ! fetch final carry + andcc %o1, 0xf, %g0 ! anything left at all? +cpte: bne csum_partial_end_cruft ! yep, handle it + andcc %o1, 8, %g0 ! check how much +cpout: retl ! get outta here + mov %o2, %o0 ! return computed csum + +/* Work around cpp -rob */ +#define ALLOC #alloc +#define EXECINSTR #execinstr +#define EX(x,y) \ +98: x,y; \ + .section __ex_table,ALLOC; \ + .align 4; \ + .word 98b, cc_fault; \ + .text; \ + .align 4 + + /* This aligned version executes typically in 8.5 superscalar cycles, this + * is the best I can do. I say 8.5 because the final add will pair with + * the next ldd in the main unrolled loop. Thus the pipe is always full. + * If you change these macros (including order of instructions), + * please check the fixup code below as well. + */ +#define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ + EX(ldd [src + off + 0x00], t0); \ + EX(ldd [src + off + 0x08], t2); \ + addxcc t0, sum, sum; \ + EX(ldd [src + off + 0x10], t4); \ + addxcc t1, sum, sum; \ + EX(ldd [src + off + 0x18], t6); \ + addxcc t2, sum, sum; \ + EX(std t0, [dst + off + 0x00]); \ + addxcc t3, sum, sum; \ + EX(std t2, [dst + off + 0x08]); \ + addxcc t4, sum, sum; \ + EX(std t4, [dst + off + 0x10]); \ + addxcc t5, sum, sum; \ + EX(std t6, [dst + off + 0x18]); \ + addxcc t6, sum, sum; \ + addxcc t7, sum, sum; + + /* 12 superscalar cycles seems to be the limit for this case, + * because of this we thus do all the ldd's together to get + * Viking MXCC into streaming mode. Ho hum... + */ +#define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ + EX(ldd [src + off + 0x00], t0); \ + EX(ldd [src + off + 0x08], t2); \ + EX(ldd [src + off + 0x10], t4); \ + EX(ldd [src + off + 0x18], t6); \ + EX(st t0, [dst + off + 0x00]); \ + addxcc t0, sum, sum; \ + EX(st t1, [dst + off + 0x04]); \ + addxcc t1, sum, sum; \ + EX(st t2, [dst + off + 0x08]); \ + addxcc t2, sum, sum; \ + EX(st t3, [dst + off + 0x0c]); \ + addxcc t3, sum, sum; \ + EX(st t4, [dst + off + 0x10]); \ + addxcc t4, sum, sum; \ + EX(st t5, [dst + off + 0x14]); \ + addxcc t5, sum, sum; \ + EX(st t6, [dst + off + 0x18]); \ + addxcc t6, sum, sum; \ + EX(st t7, [dst + off + 0x1c]); \ + addxcc t7, sum, sum; + + /* Yuck, 6 superscalar cycles... */ +#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \ + EX(ldd [src - off - 0x08], t0); \ + EX(ldd [src - off - 0x00], t2); \ + addxcc t0, sum, sum; \ + EX(st t0, [dst - off - 0x08]); \ + addxcc t1, sum, sum; \ + EX(st t1, [dst - off - 0x04]); \ + addxcc t2, sum, sum; \ + EX(st t2, [dst - off - 0x00]); \ + addxcc t3, sum, sum; \ + EX(st t3, [dst - off + 0x04]); + + /* Handle the end cruft code out of band for better cache patterns. */ +cc_end_cruft: + be 1f + andcc %o3, 4, %g0 + EX(ldd [%o0 + 0x00], %g2) + add %o1, 8, %o1 + addcc %g2, %g7, %g7 + add %o0, 8, %o0 + addxcc %g3, %g7, %g7 + EX(st %g2, [%o1 - 0x08]) + addx %g0, %g7, %g7 + andcc %o3, 4, %g0 + EX(st %g3, [%o1 - 0x04]) +1: be 1f + andcc %o3, 3, %o3 + EX(ld [%o0 + 0x00], %g2) + add %o1, 4, %o1 + addcc %g2, %g7, %g7 + EX(st %g2, [%o1 - 0x04]) + addx %g0, %g7, %g7 + andcc %o3, 3, %g0 + add %o0, 4, %o0 +1: be 1f + addcc %o3, -1, %g0 + bne 2f + subcc %o3, 2, %o3 + b 4f + or %g0, %g0, %o4 +2: EX(lduh [%o0 + 0x00], %o4) + add %o0, 2, %o0 + EX(sth %o4, [%o1 + 0x00]) + be 6f + add %o1, 2, %o1 + sll %o4, 16, %o4 +4: EX(ldub [%o0 + 0x00], %o5) + EX(stb %o5, [%o1 + 0x00]) + sll %o5, 8, %o5 + or %o5, %o4, %o4 +6: addcc %o4, %g7, %g7 +1: retl + addx %g0, %g7, %o0 + + /* Also, handle the alignment code out of band. */ +cc_dword_align: + cmp %g1, 16 + bge 1f + srl %g1, 1, %o3 +2: cmp %o3, 0 + be,a ccte + andcc %g1, 0xf, %o3 + andcc %o3, %o0, %g0 ! Check %o0 only (%o1 has the same last 2 bits) + be,a 2b + srl %o3, 1, %o3 +1: andcc %o0, 0x1, %g0 + bne ccslow + andcc %o0, 0x2, %g0 + be 1f + andcc %o0, 0x4, %g0 + EX(lduh [%o0 + 0x00], %g4) + sub %g1, 2, %g1 + EX(sth %g4, [%o1 + 0x00]) + add %o0, 2, %o0 + sll %g4, 16, %g4 + addcc %g4, %g7, %g7 + add %o1, 2, %o1 + srl %g7, 16, %g3 + addx %g0, %g3, %g4 + sll %g7, 16, %g7 + sll %g4, 16, %g3 + srl %g7, 16, %g7 + andcc %o0, 0x4, %g0 + or %g3, %g7, %g7 +1: be 3f + andcc %g1, 0xffffff80, %g0 + EX(ld [%o0 + 0x00], %g4) + sub %g1, 4, %g1 + EX(st %g4, [%o1 + 0x00]) + add %o0, 4, %o0 + addcc %g4, %g7, %g7 + add %o1, 4, %o1 + addx %g0, %g7, %g7 + b 3f + andcc %g1, 0xffffff80, %g0 + + /* Sun, you just can't beat me, you just can't. Stop trying, + * give up. I'm serious, I am going to kick the living shit + * out of you, game over, lights out. + */ + .align 8 + .globl __csum_partial_copy_sparc_generic + EXPORT_SYMBOL(__csum_partial_copy_sparc_generic) +__csum_partial_copy_sparc_generic: + /* %o0=src, %o1=dest, %g1=len, %g7=sum */ + xor %o0, %o1, %o4 ! get changing bits + andcc %o4, 3, %g0 ! check for mismatched alignment + bne ccslow ! better this than unaligned/fixups + andcc %o0, 7, %g0 ! need to align things? + bne cc_dword_align ! yes, we check for short lengths there + andcc %g1, 0xffffff80, %g0 ! can we use unrolled loop? +3: be 3f ! nope, less than one loop remains + andcc %o1, 4, %g0 ! dest aligned on 4 or 8 byte boundary? + be ccdbl + 4 ! 8 byte aligned, kick ass +5: CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) + CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) + CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) + CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) + sub %g1, 128, %g1 ! detract from length + addx %g0, %g7, %g7 ! add in last carry bit + andcc %g1, 0xffffff80, %g0 ! more to csum? + add %o0, 128, %o0 ! advance src ptr + bne 5b ! we did not go negative, continue looping + add %o1, 128, %o1 ! advance dest ptr +3: andcc %g1, 0x70, %o2 ! can use table? +ccmerge:be ccte ! nope, go and check for end cruft + andcc %g1, 0xf, %o3 ! get low bits of length (clears carry btw) + srl %o2, 1, %o4 ! begin negative offset computation + sethi %hi(12f), %o5 ! set up table ptr end + add %o0, %o2, %o0 ! advance src ptr + sub %o5, %o4, %o5 ! continue table calculation + sll %o2, 1, %g2 ! constant multiplies are fun... + sub %o5, %g2, %o5 ! some more adjustments + jmp %o5 + %lo(12f) ! jump into it, duff style, wheee... + add %o1, %o2, %o1 ! advance dest ptr (carry is clear btw) +cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5) + CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x58,%g2,%g3,%g4,%g5) + CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x48,%g2,%g3,%g4,%g5) + CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x38,%g2,%g3,%g4,%g5) + CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5) + CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5) + CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5) +12: addx %g0, %g7, %g7 + andcc %o3, 0xf, %g0 ! check for low bits set +ccte: bne cc_end_cruft ! something left, handle it out of band + andcc %o3, 8, %g0 ! begin checks for that code + retl ! return + mov %g7, %o0 ! give em the computed checksum +ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) + CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) + CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) + CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3) + sub %g1, 128, %g1 ! detract from length + addx %g0, %g7, %g7 ! add in last carry bit + andcc %g1, 0xffffff80, %g0 ! more to csum? + add %o0, 128, %o0 ! advance src ptr + bne ccdbl ! we did not go negative, continue looping + add %o1, 128, %o1 ! advance dest ptr + b ccmerge ! finish it off, above + andcc %g1, 0x70, %o2 ! can use table? (clears carry btw) + +ccslow: cmp %g1, 0 + mov 0, %g5 + bleu 4f + andcc %o0, 1, %o5 + be,a 1f + srl %g1, 1, %g4 + sub %g1, 1, %g1 + EX(ldub [%o0], %g5) + add %o0, 1, %o0 + EX(stb %g5, [%o1]) + srl %g1, 1, %g4 + add %o1, 1, %o1 +1: cmp %g4, 0 + be,a 3f + andcc %g1, 1, %g0 + andcc %o0, 2, %g0 + be,a 1f + srl %g4, 1, %g4 + EX(lduh [%o0], %o4) + sub %g1, 2, %g1 + srl %o4, 8, %g2 + sub %g4, 1, %g4 + EX(stb %g2, [%o1]) + add %o4, %g5, %g5 + EX(stb %o4, [%o1 + 1]) + add %o0, 2, %o0 + srl %g4, 1, %g4 + add %o1, 2, %o1 +1: cmp %g4, 0 + be,a 2f + andcc %g1, 2, %g0 + EX(ld [%o0], %o4) +5: srl %o4, 24, %g2 + srl %o4, 16, %g3 + EX(stb %g2, [%o1]) + srl %o4, 8, %g2 + EX(stb %g3, [%o1 + 1]) + add %o0, 4, %o0 + EX(stb %g2, [%o1 + 2]) + addcc %o4, %g5, %g5 + EX(stb %o4, [%o1 + 3]) + addx %g5, %g0, %g5 ! I am now to lazy to optimize this (question it + add %o1, 4, %o1 ! is worthy). Maybe some day - with the sll/srl + subcc %g4, 1, %g4 ! tricks + bne,a 5b + EX(ld [%o0], %o4) + sll %g5, 16, %g2 + srl %g5, 16, %g5 + srl %g2, 16, %g2 + andcc %g1, 2, %g0 + add %g2, %g5, %g5 +2: be,a 3f + andcc %g1, 1, %g0 + EX(lduh [%o0], %o4) + andcc %g1, 1, %g0 + srl %o4, 8, %g2 + add %o0, 2, %o0 + EX(stb %g2, [%o1]) + add %g5, %o4, %g5 + EX(stb %o4, [%o1 + 1]) + add %o1, 2, %o1 +3: be,a 1f + sll %g5, 16, %o4 + EX(ldub [%o0], %g2) + sll %g2, 8, %o4 + EX(stb %g2, [%o1]) + add %g5, %o4, %g5 + sll %g5, 16, %o4 +1: addcc %o4, %g5, %g5 + srl %g5, 16, %o4 + addx %g0, %o4, %g5 + orcc %o5, %g0, %g0 + be 4f + srl %g5, 8, %o4 + and %g5, 0xff, %g2 + and %o4, 0xff, %o4 + sll %g2, 8, %g2 + or %g2, %o4, %g5 +4: addcc %g7, %g5, %g7 + retl + addx %g0, %g7, %o0 + +/* We do these strange calculations for the csum_*_from_user case only, ie. + * we only bother with faults on loads... */ + +cc_fault: + retl + clr %o0 diff --git a/arch/sparc/lib/checksum_64.S b/arch/sparc/lib/checksum_64.S new file mode 100644 index 0000000000..32b626f3fe --- /dev/null +++ b/arch/sparc/lib/checksum_64.S @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* checksum.S: Sparc V9 optimized checksum code. + * + * Copyright(C) 1995 Linus Torvalds + * Copyright(C) 1995 Miguel de Icaza + * Copyright(C) 1996, 2000 David S. Miller + * Copyright(C) 1997 Jakub Jelinek + * + * derived from: + * Linux/Alpha checksum c-code + * Linux/ix86 inline checksum assembly + * RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code) + * David Mosberger-Tang for optimized reference c-code + * BSD4.4 portable checksum routine + */ + +#include <linux/export.h> + .text + +csum_partial_fix_alignment: + /* We checked for zero length already, so there must be + * at least one byte. + */ + be,pt %icc, 1f + nop + ldub [%o0 + 0x00], %o4 + add %o0, 1, %o0 + sub %o1, 1, %o1 +1: andcc %o0, 0x2, %g0 + be,pn %icc, csum_partial_post_align + cmp %o1, 2 + blu,pn %icc, csum_partial_end_cruft + nop + lduh [%o0 + 0x00], %o5 + add %o0, 2, %o0 + sub %o1, 2, %o1 + ba,pt %xcc, csum_partial_post_align + add %o5, %o4, %o4 + + .align 32 + .globl csum_partial + .type csum_partial,#function + EXPORT_SYMBOL(csum_partial) +csum_partial: /* %o0=buff, %o1=len, %o2=sum */ + prefetch [%o0 + 0x000], #n_reads + clr %o4 + prefetch [%o0 + 0x040], #n_reads + brz,pn %o1, csum_partial_finish + andcc %o0, 0x3, %g0 + + /* We "remember" whether the lowest bit in the address + * was set in %g7. Because if it is, we have to swap + * upper and lower 8 bit fields of the sum we calculate. + */ + bne,pn %icc, csum_partial_fix_alignment + andcc %o0, 0x1, %g7 + +csum_partial_post_align: + prefetch [%o0 + 0x080], #n_reads + andncc %o1, 0x3f, %o3 + + prefetch [%o0 + 0x0c0], #n_reads + sub %o1, %o3, %o1 + brz,pn %o3, 2f + prefetch [%o0 + 0x100], #n_reads + + /* So that we don't need to use the non-pairing + * add-with-carry instructions we accumulate 32-bit + * values into a 64-bit register. At the end of the + * loop we fold it down to 32-bits and so on. + */ + prefetch [%o0 + 0x140], #n_reads +1: lduw [%o0 + 0x00], %o5 + lduw [%o0 + 0x04], %g1 + lduw [%o0 + 0x08], %g2 + add %o4, %o5, %o4 + lduw [%o0 + 0x0c], %g3 + add %o4, %g1, %o4 + lduw [%o0 + 0x10], %o5 + add %o4, %g2, %o4 + lduw [%o0 + 0x14], %g1 + add %o4, %g3, %o4 + lduw [%o0 + 0x18], %g2 + add %o4, %o5, %o4 + lduw [%o0 + 0x1c], %g3 + add %o4, %g1, %o4 + lduw [%o0 + 0x20], %o5 + add %o4, %g2, %o4 + lduw [%o0 + 0x24], %g1 + add %o4, %g3, %o4 + lduw [%o0 + 0x28], %g2 + add %o4, %o5, %o4 + lduw [%o0 + 0x2c], %g3 + add %o4, %g1, %o4 + lduw [%o0 + 0x30], %o5 + add %o4, %g2, %o4 + lduw [%o0 + 0x34], %g1 + add %o4, %g3, %o4 + lduw [%o0 + 0x38], %g2 + add %o4, %o5, %o4 + lduw [%o0 + 0x3c], %g3 + add %o4, %g1, %o4 + prefetch [%o0 + 0x180], #n_reads + add %o4, %g2, %o4 + subcc %o3, 0x40, %o3 + add %o0, 0x40, %o0 + bne,pt %icc, 1b + add %o4, %g3, %o4 + +2: and %o1, 0x3c, %o3 + brz,pn %o3, 2f + sub %o1, %o3, %o1 +1: lduw [%o0 + 0x00], %o5 + subcc %o3, 0x4, %o3 + add %o0, 0x4, %o0 + bne,pt %icc, 1b + add %o4, %o5, %o4 + +2: + /* fold 64-->32 */ + srlx %o4, 32, %o5 + srl %o4, 0, %o4 + add %o4, %o5, %o4 + srlx %o4, 32, %o5 + srl %o4, 0, %o4 + add %o4, %o5, %o4 + + /* fold 32-->16 */ + sethi %hi(0xffff0000), %g1 + srl %o4, 16, %o5 + andn %o4, %g1, %g2 + add %o5, %g2, %o4 + srl %o4, 16, %o5 + andn %o4, %g1, %g2 + add %o5, %g2, %o4 + +csum_partial_end_cruft: + /* %o4 has the 16-bit sum we have calculated so-far. */ + cmp %o1, 2 + blu,pt %icc, 1f + nop + lduh [%o0 + 0x00], %o5 + sub %o1, 2, %o1 + add %o0, 2, %o0 + add %o4, %o5, %o4 +1: brz,pt %o1, 1f + nop + ldub [%o0 + 0x00], %o5 + sub %o1, 1, %o1 + add %o0, 1, %o0 + sllx %o5, 8, %o5 + add %o4, %o5, %o4 +1: + /* fold 32-->16 */ + sethi %hi(0xffff0000), %g1 + srl %o4, 16, %o5 + andn %o4, %g1, %g2 + add %o5, %g2, %o4 + srl %o4, 16, %o5 + andn %o4, %g1, %g2 + add %o5, %g2, %o4 + +1: brz,pt %g7, 1f + nop + + /* We started with an odd byte, byte-swap the result. */ + srl %o4, 8, %o5 + and %o4, 0xff, %g1 + sll %g1, 8, %g1 + or %o5, %g1, %o4 + +1: addcc %o2, %o4, %o2 + addc %g0, %o2, %o2 + +csum_partial_finish: + retl + srl %o2, 0, %o0 diff --git a/arch/sparc/lib/clear_page.S b/arch/sparc/lib/clear_page.S new file mode 100644 index 0000000000..e63458194f --- /dev/null +++ b/arch/sparc/lib/clear_page.S @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* clear_page.S: UltraSparc optimized clear page. + * + * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com) + * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com) + */ + +#include <linux/export.h> +#include <linux/pgtable.h> +#include <asm/visasm.h> +#include <asm/thread_info.h> +#include <asm/page.h> +#include <asm/spitfire.h> +#include <asm/head.h> + + /* What we used to do was lock a TLB entry into a specific + * TLB slot, clear the page with interrupts disabled, then + * restore the original TLB entry. This was great for + * disturbing the TLB as little as possible, but it meant + * we had to keep interrupts disabled for a long time. + * + * Now, we simply use the normal TLB loading mechanism, + * and this makes the cpu choose a slot all by itself. + * Then we do a normal TLB flush on exit. We need only + * disable preemption during the clear. + */ + + .text + + .globl _clear_page + EXPORT_SYMBOL(_clear_page) +_clear_page: /* %o0=dest */ + ba,pt %xcc, clear_page_common + clr %o4 + + /* This thing is pretty important, it shows up + * on the profiles via do_anonymous_page(). + */ + .align 32 + .globl clear_user_page + EXPORT_SYMBOL(clear_user_page) +clear_user_page: /* %o0=dest, %o1=vaddr */ + lduw [%g6 + TI_PRE_COUNT], %o2 + sethi %hi(PAGE_OFFSET), %g2 + sethi %hi(PAGE_SIZE), %o4 + + ldx [%g2 + %lo(PAGE_OFFSET)], %g2 + sethi %hi(PAGE_KERNEL_LOCKED), %g3 + + ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3 + sub %o0, %g2, %g1 ! paddr + + and %o1, %o4, %o0 ! vaddr D-cache alias bit + + or %g1, %g3, %g1 ! TTE data + sethi %hi(TLBTEMP_BASE), %o3 + + add %o2, 1, %o4 + add %o0, %o3, %o0 ! TTE vaddr + + /* Disable preemption. */ + mov TLB_TAG_ACCESS, %g3 + stw %o4, [%g6 + TI_PRE_COUNT] + + /* Load TLB entry. */ + rdpr %pstate, %o4 + wrpr %o4, PSTATE_IE, %pstate + stxa %o0, [%g3] ASI_DMMU + stxa %g1, [%g0] ASI_DTLB_DATA_IN + sethi %hi(KERNBASE), %g1 + flush %g1 + wrpr %o4, 0x0, %pstate + + mov 1, %o4 + +clear_page_common: + VISEntryHalf + membar #StoreLoad | #StoreStore | #LoadStore + fzero %f0 + sethi %hi(PAGE_SIZE/64), %o1 + mov %o0, %g1 ! remember vaddr for tlbflush + fzero %f2 + or %o1, %lo(PAGE_SIZE/64), %o1 + faddd %f0, %f2, %f4 + fmuld %f0, %f2, %f6 + faddd %f0, %f2, %f8 + fmuld %f0, %f2, %f10 + + faddd %f0, %f2, %f12 + fmuld %f0, %f2, %f14 +1: stda %f0, [%o0 + %g0] ASI_BLK_P + subcc %o1, 1, %o1 + bne,pt %icc, 1b + add %o0, 0x40, %o0 + membar #Sync + VISExitHalf + + brz,pn %o4, out + nop + + stxa %g0, [%g1] ASI_DMMU_DEMAP + membar #Sync + stw %o2, [%g6 + TI_PRE_COUNT] + +out: retl + nop + diff --git a/arch/sparc/lib/cmpdi2.c b/arch/sparc/lib/cmpdi2.c new file mode 100644 index 0000000000..333367fe73 --- /dev/null +++ b/arch/sparc/lib/cmpdi2.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/module.h> + +#include "libgcc.h" + +word_type __cmpdi2(long long a, long long b) +{ + const DWunion au = { + .ll = a + }; + const DWunion bu = { + .ll = b + }; + + if (au.s.high < bu.s.high) + return 0; + else if (au.s.high > bu.s.high) + return 2; + + if ((unsigned int) au.s.low < (unsigned int) bu.s.low) + return 0; + else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) + return 2; + + return 1; +} + +EXPORT_SYMBOL(__cmpdi2); diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S new file mode 100644 index 0000000000..e23e6a69ff --- /dev/null +++ b/arch/sparc/lib/copy_in_user.S @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* copy_in_user.S: Copy from userspace to userspace. + * + * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) + */ + +#include <linux/export.h> +#include <linux/linkage.h> +#include <asm/asi.h> + +#define XCC xcc + +#define EX(x,y,z) \ +98: x,y; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, z; \ + .text; \ + .align 4; + +#define EX_O4(x,y) EX(x,y,__retl_o4_plus_8) +#define EX_O2_4(x,y) EX(x,y,__retl_o2_plus_4) +#define EX_O2_1(x,y) EX(x,y,__retl_o2_plus_1) + + .register %g2,#scratch + .register %g3,#scratch + + .text +__retl_o4_plus_8: + add %o4, %o2, %o4 + retl + add %o4, 8, %o0 +__retl_o2_plus_4: + retl + add %o2, 4, %o0 +__retl_o2_plus_1: + retl + add %o2, 1, %o0 + + .align 32 + + /* Don't try to get too fancy here, just nice and + * simple. This is predominantly used for well aligned + * small copies in the compat layer. It is also used + * to copy register windows around during thread cloning. + */ + +ENTRY(raw_copy_in_user) /* %o0=dst, %o1=src, %o2=len */ + cmp %o2, 0 + be,pn %XCC, 85f + or %o0, %o1, %o3 + cmp %o2, 16 + bleu,a,pn %XCC, 80f + or %o3, %o2, %o3 + + /* 16 < len <= 64 */ + andcc %o3, 0x7, %g0 + bne,pn %XCC, 90f + nop + + andn %o2, 0x7, %o4 + and %o2, 0x7, %o2 +1: subcc %o4, 0x8, %o4 + EX_O4(ldxa [%o1] %asi, %o5) + EX_O4(stxa %o5, [%o0] %asi) + add %o1, 0x8, %o1 + bgu,pt %XCC, 1b + add %o0, 0x8, %o0 + andcc %o2, 0x4, %g0 + be,pt %XCC, 1f + nop + sub %o2, 0x4, %o2 + EX_O2_4(lduwa [%o1] %asi, %o5) + EX_O2_4(stwa %o5, [%o0] %asi) + add %o1, 0x4, %o1 + add %o0, 0x4, %o0 +1: cmp %o2, 0 + be,pt %XCC, 85f + nop + ba,pt %xcc, 90f + nop + +80: /* 0 < len <= 16 */ + andcc %o3, 0x3, %g0 + bne,pn %XCC, 90f + nop + +82: + subcc %o2, 4, %o2 + EX_O2_4(lduwa [%o1] %asi, %g1) + EX_O2_4(stwa %g1, [%o0] %asi) + add %o1, 4, %o1 + bgu,pt %XCC, 82b + add %o0, 4, %o0 + +85: retl + clr %o0 + + .align 32 +90: + subcc %o2, 1, %o2 + EX_O2_1(lduba [%o1] %asi, %g1) + EX_O2_1(stba %g1, [%o0] %asi) + add %o1, 1, %o1 + bgu,pt %XCC, 90b + add %o0, 1, %o0 + retl + clr %o0 +ENDPROC(raw_copy_in_user) +EXPORT_SYMBOL(raw_copy_in_user) diff --git a/arch/sparc/lib/copy_page.S b/arch/sparc/lib/copy_page.S new file mode 100644 index 0000000000..7a041f3ebc --- /dev/null +++ b/arch/sparc/lib/copy_page.S @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* clear_page.S: UltraSparc optimized copy page. + * + * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com) + * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com) + */ + +#include <linux/export.h> +#include <asm/visasm.h> +#include <asm/thread_info.h> +#include <asm/page.h> +#include <linux/pgtable.h> +#include <asm/spitfire.h> +#include <asm/head.h> + + /* What we used to do was lock a TLB entry into a specific + * TLB slot, clear the page with interrupts disabled, then + * restore the original TLB entry. This was great for + * disturbing the TLB as little as possible, but it meant + * we had to keep interrupts disabled for a long time. + * + * Now, we simply use the normal TLB loading mechanism, + * and this makes the cpu choose a slot all by itself. + * Then we do a normal TLB flush on exit. We need only + * disable preemption during the clear. + */ + +#define DCACHE_SIZE (PAGE_SIZE * 2) + +#if (PAGE_SHIFT == 13) +#define PAGE_SIZE_REM 0x80 +#elif (PAGE_SHIFT == 16) +#define PAGE_SIZE_REM 0x100 +#else +#error Wrong PAGE_SHIFT specified +#endif + +#define TOUCH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7) \ + fsrc2 %reg0, %f48; fsrc2 %reg1, %f50; \ + fsrc2 %reg2, %f52; fsrc2 %reg3, %f54; \ + fsrc2 %reg4, %f56; fsrc2 %reg5, %f58; \ + fsrc2 %reg6, %f60; fsrc2 %reg7, %f62; + + .text + + .align 32 + .globl copy_user_page + .type copy_user_page,#function + EXPORT_SYMBOL(copy_user_page) +copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ + lduw [%g6 + TI_PRE_COUNT], %o4 + sethi %hi(PAGE_OFFSET), %g2 + sethi %hi(PAGE_SIZE), %o3 + + ldx [%g2 + %lo(PAGE_OFFSET)], %g2 + sethi %hi(PAGE_KERNEL_LOCKED), %g3 + + ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3 + sub %o0, %g2, %g1 ! dest paddr + + sub %o1, %g2, %g2 ! src paddr + + and %o2, %o3, %o0 ! vaddr D-cache alias bit + or %g1, %g3, %g1 ! dest TTE data + + or %g2, %g3, %g2 ! src TTE data + sethi %hi(TLBTEMP_BASE), %o3 + + sethi %hi(DCACHE_SIZE), %o1 + add %o0, %o3, %o0 ! dest TTE vaddr + + add %o4, 1, %o2 + add %o0, %o1, %o1 ! src TTE vaddr + + /* Disable preemption. */ + mov TLB_TAG_ACCESS, %g3 + stw %o2, [%g6 + TI_PRE_COUNT] + + /* Load TLB entries. */ + rdpr %pstate, %o2 + wrpr %o2, PSTATE_IE, %pstate + stxa %o0, [%g3] ASI_DMMU + stxa %g1, [%g0] ASI_DTLB_DATA_IN + membar #Sync + stxa %o1, [%g3] ASI_DMMU + stxa %g2, [%g0] ASI_DTLB_DATA_IN + membar #Sync + wrpr %o2, 0x0, %pstate + +cheetah_copy_page_insn: + ba,pt %xcc, 9f + nop + +1: + VISEntryHalf + membar #StoreLoad | #StoreStore | #LoadStore + sethi %hi((PAGE_SIZE/64)-2), %o2 + mov %o0, %g1 + prefetch [%o1 + 0x000], #one_read + or %o2, %lo((PAGE_SIZE/64)-2), %o2 + prefetch [%o1 + 0x040], #one_read + prefetch [%o1 + 0x080], #one_read + prefetch [%o1 + 0x0c0], #one_read + ldd [%o1 + 0x000], %f0 + prefetch [%o1 + 0x100], #one_read + ldd [%o1 + 0x008], %f2 + prefetch [%o1 + 0x140], #one_read + ldd [%o1 + 0x010], %f4 + prefetch [%o1 + 0x180], #one_read + fsrc2 %f0, %f16 + ldd [%o1 + 0x018], %f6 + fsrc2 %f2, %f18 + ldd [%o1 + 0x020], %f8 + fsrc2 %f4, %f20 + ldd [%o1 + 0x028], %f10 + fsrc2 %f6, %f22 + ldd [%o1 + 0x030], %f12 + fsrc2 %f8, %f24 + ldd [%o1 + 0x038], %f14 + fsrc2 %f10, %f26 + ldd [%o1 + 0x040], %f0 +1: ldd [%o1 + 0x048], %f2 + fsrc2 %f12, %f28 + ldd [%o1 + 0x050], %f4 + fsrc2 %f14, %f30 + stda %f16, [%o0] ASI_BLK_P + ldd [%o1 + 0x058], %f6 + fsrc2 %f0, %f16 + ldd [%o1 + 0x060], %f8 + fsrc2 %f2, %f18 + ldd [%o1 + 0x068], %f10 + fsrc2 %f4, %f20 + ldd [%o1 + 0x070], %f12 + fsrc2 %f6, %f22 + ldd [%o1 + 0x078], %f14 + fsrc2 %f8, %f24 + ldd [%o1 + 0x080], %f0 + prefetch [%o1 + 0x180], #one_read + fsrc2 %f10, %f26 + subcc %o2, 1, %o2 + add %o0, 0x40, %o0 + bne,pt %xcc, 1b + add %o1, 0x40, %o1 + + ldd [%o1 + 0x048], %f2 + fsrc2 %f12, %f28 + ldd [%o1 + 0x050], %f4 + fsrc2 %f14, %f30 + stda %f16, [%o0] ASI_BLK_P + ldd [%o1 + 0x058], %f6 + fsrc2 %f0, %f16 + ldd [%o1 + 0x060], %f8 + fsrc2 %f2, %f18 + ldd [%o1 + 0x068], %f10 + fsrc2 %f4, %f20 + ldd [%o1 + 0x070], %f12 + fsrc2 %f6, %f22 + add %o0, 0x40, %o0 + ldd [%o1 + 0x078], %f14 + fsrc2 %f8, %f24 + fsrc2 %f10, %f26 + fsrc2 %f12, %f28 + fsrc2 %f14, %f30 + stda %f16, [%o0] ASI_BLK_P + membar #Sync + VISExitHalf + ba,pt %xcc, 5f + nop + +9: + VISEntry + ldub [%g6 + TI_FAULT_CODE], %g3 + mov %o0, %g1 + cmp %g3, 0 + rd %asi, %g3 + be,a,pt %icc, 1f + wr %g0, ASI_BLK_P, %asi + wr %g0, ASI_BLK_COMMIT_P, %asi +1: ldda [%o1] ASI_BLK_P, %f0 + add %o1, 0x40, %o1 + ldda [%o1] ASI_BLK_P, %f16 + add %o1, 0x40, %o1 + sethi %hi(PAGE_SIZE), %o2 +1: TOUCH(f0, f2, f4, f6, f8, f10, f12, f14) + ldda [%o1] ASI_BLK_P, %f32 + stda %f48, [%o0] %asi + add %o1, 0x40, %o1 + sub %o2, 0x40, %o2 + add %o0, 0x40, %o0 + TOUCH(f16, f18, f20, f22, f24, f26, f28, f30) + ldda [%o1] ASI_BLK_P, %f0 + stda %f48, [%o0] %asi + add %o1, 0x40, %o1 + sub %o2, 0x40, %o2 + add %o0, 0x40, %o0 + TOUCH(f32, f34, f36, f38, f40, f42, f44, f46) + ldda [%o1] ASI_BLK_P, %f16 + stda %f48, [%o0] %asi + sub %o2, 0x40, %o2 + add %o1, 0x40, %o1 + cmp %o2, PAGE_SIZE_REM + bne,pt %xcc, 1b + add %o0, 0x40, %o0 +#if (PAGE_SHIFT == 16) + TOUCH(f0, f2, f4, f6, f8, f10, f12, f14) + ldda [%o1] ASI_BLK_P, %f32 + stda %f48, [%o0] %asi + add %o1, 0x40, %o1 + sub %o2, 0x40, %o2 + add %o0, 0x40, %o0 + TOUCH(f16, f18, f20, f22, f24, f26, f28, f30) + ldda [%o1] ASI_BLK_P, %f0 + stda %f48, [%o0] %asi + add %o1, 0x40, %o1 + sub %o2, 0x40, %o2 + add %o0, 0x40, %o0 + membar #Sync + stda %f32, [%o0] %asi + add %o0, 0x40, %o0 + stda %f0, [%o0] %asi +#else + membar #Sync + stda %f0, [%o0] %asi + add %o0, 0x40, %o0 + stda %f16, [%o0] %asi +#endif + membar #Sync + wr %g3, 0x0, %asi + VISExit + +5: + stxa %g0, [%g1] ASI_DMMU_DEMAP + membar #Sync + + sethi %hi(DCACHE_SIZE), %g2 + stxa %g0, [%g1 + %g2] ASI_DMMU_DEMAP + membar #Sync + + retl + stw %o4, [%g6 + TI_PRE_COUNT] + + .size copy_user_page, .-copy_user_page + + .globl cheetah_patch_copy_page +cheetah_patch_copy_page: + sethi %hi(0x01000000), %o1 ! NOP + sethi %hi(cheetah_copy_page_insn), %o0 + or %o0, %lo(cheetah_copy_page_insn), %o0 + stw %o1, [%o0] + membar #StoreStore + flush %o0 + retl + nop diff --git a/arch/sparc/lib/copy_user.S b/arch/sparc/lib/copy_user.S new file mode 100644 index 0000000000..7bb2ef6888 --- /dev/null +++ b/arch/sparc/lib/copy_user.S @@ -0,0 +1,394 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* copy_user.S: Sparc optimized copy_from_user and copy_to_user code. + * + * Copyright(C) 1995 Linus Torvalds + * Copyright(C) 1996 David S. Miller + * Copyright(C) 1996 Eddie C. Dost + * Copyright(C) 1996,1998 Jakub Jelinek + * + * derived from: + * e-mail between David and Eddie. + * + * Returns 0 if successful, otherwise count of bytes not copied yet + */ + +#include <linux/export.h> +#include <asm/ptrace.h> +#include <asm/asmmacro.h> +#include <asm/page.h> +#include <asm/thread_info.h> + +/* Work around cpp -rob */ +#define ALLOC #alloc +#define EXECINSTR #execinstr + +#define EX_ENTRY(l1, l2) \ + .section __ex_table,ALLOC; \ + .align 4; \ + .word l1, l2; \ + .text; + +#define EX(x,y,a,b) \ +98: x,y; \ + .section .fixup,ALLOC,EXECINSTR; \ + .align 4; \ +99: retl; \ + a, b, %o0; \ + EX_ENTRY(98b, 99b) + +#define EX2(x,y,c,d,e,a,b) \ +98: x,y; \ + .section .fixup,ALLOC,EXECINSTR; \ + .align 4; \ +99: c, d, e; \ + retl; \ + a, b, %o0; \ + EX_ENTRY(98b, 99b) + +#define EXO2(x,y) \ +98: x, y; \ + EX_ENTRY(98b, 97f) + +#define LD(insn, src, offset, reg, label) \ +98: insn [%src + (offset)], %reg; \ + .section .fixup,ALLOC,EXECINSTR; \ +99: ba label; \ + mov offset, %g5; \ + EX_ENTRY(98b, 99b) + +#define ST(insn, dst, offset, reg, label) \ +98: insn %reg, [%dst + (offset)]; \ + .section .fixup,ALLOC,EXECINSTR; \ +99: ba label; \ + mov offset, %g5; \ + EX_ENTRY(98b, 99b) + +/* Both these macros have to start with exactly the same insn */ +/* left: g7 + (g1 % 128) - offset */ +#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ + LD(ldd, src, offset + 0x00, t0, bigchunk_fault) \ + LD(ldd, src, offset + 0x08, t2, bigchunk_fault) \ + LD(ldd, src, offset + 0x10, t4, bigchunk_fault) \ + LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \ + ST(st, dst, offset + 0x00, t0, bigchunk_fault) \ + ST(st, dst, offset + 0x04, t1, bigchunk_fault) \ + ST(st, dst, offset + 0x08, t2, bigchunk_fault) \ + ST(st, dst, offset + 0x0c, t3, bigchunk_fault) \ + ST(st, dst, offset + 0x10, t4, bigchunk_fault) \ + ST(st, dst, offset + 0x14, t5, bigchunk_fault) \ + ST(st, dst, offset + 0x18, t6, bigchunk_fault) \ + ST(st, dst, offset + 0x1c, t7, bigchunk_fault) + +/* left: g7 + (g1 % 128) - offset */ +#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ + LD(ldd, src, offset + 0x00, t0, bigchunk_fault) \ + LD(ldd, src, offset + 0x08, t2, bigchunk_fault) \ + LD(ldd, src, offset + 0x10, t4, bigchunk_fault) \ + LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \ + ST(std, dst, offset + 0x00, t0, bigchunk_fault) \ + ST(std, dst, offset + 0x08, t2, bigchunk_fault) \ + ST(std, dst, offset + 0x10, t4, bigchunk_fault) \ + ST(std, dst, offset + 0x18, t6, bigchunk_fault) + + .section .fixup,#alloc,#execinstr +bigchunk_fault: + sub %g7, %g5, %o0 + and %g1, 127, %g1 + retl + add %o0, %g1, %o0 + +/* left: offset + 16 + (g1 % 16) */ +#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \ + LD(ldd, src, -(offset + 0x10), t0, lastchunk_fault) \ + LD(ldd, src, -(offset + 0x08), t2, lastchunk_fault) \ + ST(st, dst, -(offset + 0x10), t0, lastchunk_fault) \ + ST(st, dst, -(offset + 0x0c), t1, lastchunk_fault) \ + ST(st, dst, -(offset + 0x08), t2, lastchunk_fault) \ + ST(st, dst, -(offset + 0x04), t3, lastchunk_fault) + + .section .fixup,#alloc,#execinstr +lastchunk_fault: + and %g1, 15, %g1 + retl + sub %g1, %g5, %o0 + +/* left: o3 + (o2 % 16) - offset */ +#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \ + LD(lduh, src, offset + 0x00, t0, halfchunk_fault) \ + LD(lduh, src, offset + 0x02, t1, halfchunk_fault) \ + LD(lduh, src, offset + 0x04, t2, halfchunk_fault) \ + LD(lduh, src, offset + 0x06, t3, halfchunk_fault) \ + ST(sth, dst, offset + 0x00, t0, halfchunk_fault) \ + ST(sth, dst, offset + 0x02, t1, halfchunk_fault) \ + ST(sth, dst, offset + 0x04, t2, halfchunk_fault) \ + ST(sth, dst, offset + 0x06, t3, halfchunk_fault) + +/* left: o3 + (o2 % 16) + offset + 2 */ +#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \ + LD(ldub, src, -(offset + 0x02), t0, halfchunk_fault) \ + LD(ldub, src, -(offset + 0x01), t1, halfchunk_fault) \ + ST(stb, dst, -(offset + 0x02), t0, halfchunk_fault) \ + ST(stb, dst, -(offset + 0x01), t1, halfchunk_fault) + + .section .fixup,#alloc,#execinstr +halfchunk_fault: + and %o2, 15, %o2 + sub %o3, %g5, %o3 + retl + add %o2, %o3, %o0 + +/* left: offset + 2 + (o2 % 2) */ +#define MOVE_LAST_SHORTCHUNK(src, dst, offset, t0, t1) \ + LD(ldub, src, -(offset + 0x02), t0, last_shortchunk_fault) \ + LD(ldub, src, -(offset + 0x01), t1, last_shortchunk_fault) \ + ST(stb, dst, -(offset + 0x02), t0, last_shortchunk_fault) \ + ST(stb, dst, -(offset + 0x01), t1, last_shortchunk_fault) + + .section .fixup,#alloc,#execinstr +last_shortchunk_fault: + and %o2, 1, %o2 + retl + sub %o2, %g5, %o0 + + .text + .align 4 + + .globl __copy_user_begin +__copy_user_begin: + + .globl __copy_user + EXPORT_SYMBOL(__copy_user) +dword_align: + andcc %o1, 1, %g0 + be 4f + andcc %o1, 2, %g0 + + EXO2(ldub [%o1], %g2) + add %o1, 1, %o1 + EXO2(stb %g2, [%o0]) + sub %o2, 1, %o2 + bne 3f + add %o0, 1, %o0 + + EXO2(lduh [%o1], %g2) + add %o1, 2, %o1 + EXO2(sth %g2, [%o0]) + sub %o2, 2, %o2 + b 3f + add %o0, 2, %o0 +4: + EXO2(lduh [%o1], %g2) + add %o1, 2, %o1 + EXO2(sth %g2, [%o0]) + sub %o2, 2, %o2 + b 3f + add %o0, 2, %o0 + +__copy_user: /* %o0=dst %o1=src %o2=len */ + xor %o0, %o1, %o4 +1: + andcc %o4, 3, %o5 +2: + bne cannot_optimize + cmp %o2, 15 + + bleu short_aligned_end + andcc %o1, 3, %g0 + + bne dword_align +3: + andcc %o1, 4, %g0 + + be 2f + mov %o2, %g1 + + EXO2(ld [%o1], %o4) + sub %g1, 4, %g1 + EXO2(st %o4, [%o0]) + add %o1, 4, %o1 + add %o0, 4, %o0 +2: + andcc %g1, 0xffffff80, %g7 + be 3f + andcc %o0, 4, %g0 + + be ldd_std + 4 +5: + MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5) + MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5) + MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5) + MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5) + subcc %g7, 128, %g7 + add %o1, 128, %o1 + bne 5b + add %o0, 128, %o0 +3: + andcc %g1, 0x70, %g7 + be copy_user_table_end + andcc %g1, 8, %g0 + + sethi %hi(copy_user_table_end), %o5 + srl %g7, 1, %o4 + add %g7, %o4, %o4 + add %o1, %g7, %o1 + sub %o5, %o4, %o5 + jmpl %o5 + %lo(copy_user_table_end), %g0 + add %o0, %g7, %o0 + + MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5) + MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5) + MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5) + MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5) + MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5) + MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5) + MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5) +copy_user_table_end: + be copy_user_last7 + andcc %g1, 4, %g0 + + EX(ldd [%o1], %g2, and %g1, 0xf) + add %o0, 8, %o0 + add %o1, 8, %o1 + EX(st %g2, [%o0 - 0x08], and %g1, 0xf) + EX2(st %g3, [%o0 - 0x04], and %g1, 0xf, %g1, sub %g1, 4) +copy_user_last7: + be 1f + andcc %g1, 2, %g0 + + EX(ld [%o1], %g2, and %g1, 7) + add %o1, 4, %o1 + EX(st %g2, [%o0], and %g1, 7) + add %o0, 4, %o0 +1: + be 1f + andcc %g1, 1, %g0 + + EX(lduh [%o1], %g2, and %g1, 3) + add %o1, 2, %o1 + EX(sth %g2, [%o0], and %g1, 3) + add %o0, 2, %o0 +1: + be 1f + nop + + EX(ldub [%o1], %g2, add %g0, 1) + EX(stb %g2, [%o0], add %g0, 1) +1: + retl + clr %o0 + +ldd_std: + MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5) + MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5) + MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5) + MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5) + subcc %g7, 128, %g7 + add %o1, 128, %o1 + bne ldd_std + add %o0, 128, %o0 + + andcc %g1, 0x70, %g7 + be copy_user_table_end + andcc %g1, 8, %g0 + + sethi %hi(copy_user_table_end), %o5 + srl %g7, 1, %o4 + add %g7, %o4, %o4 + add %o1, %g7, %o1 + sub %o5, %o4, %o5 + jmpl %o5 + %lo(copy_user_table_end), %g0 + add %o0, %g7, %o0 + +cannot_optimize: + bleu short_end + cmp %o5, 2 + + bne byte_chunk + and %o2, 0xfffffff0, %o3 + + andcc %o1, 1, %g0 + be 10f + nop + + EXO2(ldub [%o1], %g2) + add %o1, 1, %o1 + EXO2(stb %g2, [%o0]) + sub %o2, 1, %o2 + andcc %o2, 0xfffffff0, %o3 + be short_end + add %o0, 1, %o0 +10: + MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g4, g5) + MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g4, g5) + subcc %o3, 0x10, %o3 + add %o1, 0x10, %o1 + bne 10b + add %o0, 0x10, %o0 + b 2f + and %o2, 0xe, %o3 + +byte_chunk: + MOVE_SHORTCHUNK(o1, o0, -0x02, g2, g3) + MOVE_SHORTCHUNK(o1, o0, -0x04, g2, g3) + MOVE_SHORTCHUNK(o1, o0, -0x06, g2, g3) + MOVE_SHORTCHUNK(o1, o0, -0x08, g2, g3) + MOVE_SHORTCHUNK(o1, o0, -0x0a, g2, g3) + MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3) + MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3) + MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3) + subcc %o3, 0x10, %o3 + add %o1, 0x10, %o1 + bne byte_chunk + add %o0, 0x10, %o0 + +short_end: + and %o2, 0xe, %o3 +2: + sethi %hi(short_table_end), %o5 + sll %o3, 3, %o4 + add %o0, %o3, %o0 + sub %o5, %o4, %o5 + add %o1, %o3, %o1 + jmpl %o5 + %lo(short_table_end), %g0 + andcc %o2, 1, %g0 + MOVE_LAST_SHORTCHUNK(o1, o0, 0x0c, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x0a, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x08, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x06, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x04, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x02, g2, g3) + MOVE_LAST_SHORTCHUNK(o1, o0, 0x00, g2, g3) +short_table_end: + be 1f + nop + EX(ldub [%o1], %g2, add %g0, 1) + EX(stb %g2, [%o0], add %g0, 1) +1: + retl + clr %o0 + +short_aligned_end: + bne short_end + andcc %o2, 8, %g0 + + be 1f + andcc %o2, 4, %g0 + + EXO2(ld [%o1 + 0x00], %g2) + EXO2(ld [%o1 + 0x04], %g3) + add %o1, 8, %o1 + EXO2(st %g2, [%o0 + 0x00]) + EX(st %g3, [%o0 + 0x04], sub %o2, 4) + add %o0, 8, %o0 +1: + b copy_user_last7 + mov %o2, %g1 + + .section .fixup,#alloc,#execinstr + .align 4 +97: + retl + mov %o2, %o0 + + .globl __copy_user_end +__copy_user_end: diff --git a/arch/sparc/lib/csum_copy.S b/arch/sparc/lib/csum_copy.S new file mode 100644 index 0000000000..f968e83bc9 --- /dev/null +++ b/arch/sparc/lib/csum_copy.S @@ -0,0 +1,315 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* csum_copy.S: Checksum+copy code for sparc64 + * + * Copyright (C) 2005 David S. Miller <davem@davemloft.net> + */ + +#include <linux/export.h> + +#ifdef __KERNEL__ +#define GLOBAL_SPARE %g7 +#else +#define GLOBAL_SPARE %g5 +#endif + +#ifndef EX_LD +#define EX_LD(x) x +#endif + +#ifndef EX_ST +#define EX_ST(x) x +#endif + +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +#endif + +#ifndef LOAD +#define LOAD(type,addr,dest) type [addr], dest +#endif + +#ifndef STORE +#define STORE(type,src,addr) type src, [addr] +#endif + +#ifndef FUNC_NAME +#define FUNC_NAME csum_partial_copy_nocheck +#endif + + .register %g2, #scratch + .register %g3, #scratch + + .text + +90: + /* We checked for zero length already, so there must be + * at least one byte. + */ + be,pt %icc, 1f + nop + EX_LD(LOAD(ldub, %o0 + 0x00, %o4)) + add %o0, 1, %o0 + sub %o2, 1, %o2 + EX_ST(STORE(stb, %o4, %o1 + 0x00)) + add %o1, 1, %o1 +1: andcc %o0, 0x2, %g0 + be,pn %icc, 80f + cmp %o2, 2 + blu,pn %icc, 60f + nop + EX_LD(LOAD(lduh, %o0 + 0x00, %o5)) + add %o0, 2, %o0 + sub %o2, 2, %o2 + EX_ST(STORE(sth, %o5, %o1 + 0x00)) + add %o1, 2, %o1 + ba,pt %xcc, 80f + add %o5, %o4, %o4 + + .globl FUNC_NAME + .type FUNC_NAME,#function + EXPORT_SYMBOL(FUNC_NAME) +FUNC_NAME: /* %o0=src, %o1=dst, %o2=len */ + LOAD(prefetch, %o0 + 0x000, #n_reads) + xor %o0, %o1, %g1 + mov -1, %o3 + clr %o4 + andcc %g1, 0x3, %g0 + bne,pn %icc, 95f + LOAD(prefetch, %o0 + 0x040, #n_reads) + + brz,pn %o2, 70f + andcc %o0, 0x3, %g0 + + /* We "remember" whether the lowest bit in the address + * was set in GLOBAL_SPARE. Because if it is, we have to swap + * upper and lower 8 bit fields of the sum we calculate. + */ + bne,pn %icc, 90b + andcc %o0, 0x1, GLOBAL_SPARE + +80: + LOAD(prefetch, %o0 + 0x080, #n_reads) + andncc %o2, 0x3f, %g3 + + LOAD(prefetch, %o0 + 0x0c0, #n_reads) + sub %o2, %g3, %o2 + brz,pn %g3, 2f + LOAD(prefetch, %o0 + 0x100, #n_reads) + + /* So that we don't need to use the non-pairing + * add-with-carry instructions we accumulate 32-bit + * values into a 64-bit register. At the end of the + * loop we fold it down to 32-bits and so on. + */ + ba,pt %xcc, 1f + LOAD(prefetch, %o0 + 0x140, #n_reads) + + .align 32 +1: EX_LD(LOAD(lduw, %o0 + 0x00, %o5)) + EX_LD(LOAD(lduw, %o0 + 0x04, %g1)) + EX_LD(LOAD(lduw, %o0 + 0x08, %g2)) + add %o4, %o5, %o4 + EX_ST(STORE(stw, %o5, %o1 + 0x00)) + EX_LD(LOAD(lduw, %o0 + 0x0c, %o5)) + add %o4, %g1, %o4 + EX_ST(STORE(stw, %g1, %o1 + 0x04)) + EX_LD(LOAD(lduw, %o0 + 0x10, %g1)) + add %o4, %g2, %o4 + EX_ST(STORE(stw, %g2, %o1 + 0x08)) + EX_LD(LOAD(lduw, %o0 + 0x14, %g2)) + add %o4, %o5, %o4 + EX_ST(STORE(stw, %o5, %o1 + 0x0c)) + EX_LD(LOAD(lduw, %o0 + 0x18, %o5)) + add %o4, %g1, %o4 + EX_ST(STORE(stw, %g1, %o1 + 0x10)) + EX_LD(LOAD(lduw, %o0 + 0x1c, %g1)) + add %o4, %g2, %o4 + EX_ST(STORE(stw, %g2, %o1 + 0x14)) + EX_LD(LOAD(lduw, %o0 + 0x20, %g2)) + add %o4, %o5, %o4 + EX_ST(STORE(stw, %o5, %o1 + 0x18)) + EX_LD(LOAD(lduw, %o0 + 0x24, %o5)) + add %o4, %g1, %o4 + EX_ST(STORE(stw, %g1, %o1 + 0x1c)) + EX_LD(LOAD(lduw, %o0 + 0x28, %g1)) + add %o4, %g2, %o4 + EX_ST(STORE(stw, %g2, %o1 + 0x20)) + EX_LD(LOAD(lduw, %o0 + 0x2c, %g2)) + add %o4, %o5, %o4 + EX_ST(STORE(stw, %o5, %o1 + 0x24)) + EX_LD(LOAD(lduw, %o0 + 0x30, %o5)) + add %o4, %g1, %o4 + EX_ST(STORE(stw, %g1, %o1 + 0x28)) + EX_LD(LOAD(lduw, %o0 + 0x34, %g1)) + add %o4, %g2, %o4 + EX_ST(STORE(stw, %g2, %o1 + 0x2c)) + EX_LD(LOAD(lduw, %o0 + 0x38, %g2)) + add %o4, %o5, %o4 + EX_ST(STORE(stw, %o5, %o1 + 0x30)) + EX_LD(LOAD(lduw, %o0 + 0x3c, %o5)) + add %o4, %g1, %o4 + EX_ST(STORE(stw, %g1, %o1 + 0x34)) + LOAD(prefetch, %o0 + 0x180, #n_reads) + add %o4, %g2, %o4 + EX_ST(STORE(stw, %g2, %o1 + 0x38)) + subcc %g3, 0x40, %g3 + add %o0, 0x40, %o0 + add %o4, %o5, %o4 + EX_ST(STORE(stw, %o5, %o1 + 0x3c)) + bne,pt %icc, 1b + add %o1, 0x40, %o1 + +2: and %o2, 0x3c, %g3 + brz,pn %g3, 2f + sub %o2, %g3, %o2 +1: EX_LD(LOAD(lduw, %o0 + 0x00, %o5)) + subcc %g3, 0x4, %g3 + add %o0, 0x4, %o0 + add %o4, %o5, %o4 + EX_ST(STORE(stw, %o5, %o1 + 0x00)) + bne,pt %icc, 1b + add %o1, 0x4, %o1 + +2: + /* fold 64-->32 */ + srlx %o4, 32, %o5 + srl %o4, 0, %o4 + add %o4, %o5, %o4 + srlx %o4, 32, %o5 + srl %o4, 0, %o4 + add %o4, %o5, %o4 + + /* fold 32-->16 */ + sethi %hi(0xffff0000), %g1 + srl %o4, 16, %o5 + andn %o4, %g1, %g2 + add %o5, %g2, %o4 + srl %o4, 16, %o5 + andn %o4, %g1, %g2 + add %o5, %g2, %o4 + +60: + /* %o4 has the 16-bit sum we have calculated so-far. */ + cmp %o2, 2 + blu,pt %icc, 1f + nop + EX_LD(LOAD(lduh, %o0 + 0x00, %o5)) + sub %o2, 2, %o2 + add %o0, 2, %o0 + add %o4, %o5, %o4 + EX_ST(STORE(sth, %o5, %o1 + 0x00)) + add %o1, 0x2, %o1 +1: brz,pt %o2, 1f + nop + EX_LD(LOAD(ldub, %o0 + 0x00, %o5)) + sub %o2, 1, %o2 + add %o0, 1, %o0 + EX_ST(STORE(stb, %o5, %o1 + 0x00)) + sllx %o5, 8, %o5 + add %o1, 1, %o1 + add %o4, %o5, %o4 +1: + /* fold 32-->16 */ + sethi %hi(0xffff0000), %g1 + srl %o4, 16, %o5 + andn %o4, %g1, %g2 + add %o5, %g2, %o4 + srl %o4, 16, %o5 + andn %o4, %g1, %g2 + add %o5, %g2, %o4 + +1: brz,pt GLOBAL_SPARE, 1f + nop + + /* We started with an odd byte, byte-swap the result. */ + srl %o4, 8, %o5 + and %o4, 0xff, %g1 + sll %g1, 8, %g1 + or %o5, %g1, %o4 + +1: addcc %o3, %o4, %o3 + addc %g0, %o3, %o3 + +70: + retl + srl %o3, 0, %o0 + +95: mov 0, GLOBAL_SPARE + brlez,pn %o2, 4f + andcc %o0, 1, %o5 + be,a,pt %icc, 1f + srl %o2, 1, %g1 + sub %o2, 1, %o2 + EX_LD(LOAD(ldub, %o0, GLOBAL_SPARE)) + add %o0, 1, %o0 + EX_ST(STORE(stb, GLOBAL_SPARE, %o1)) + srl %o2, 1, %g1 + add %o1, 1, %o1 +1: brz,a,pn %g1, 3f + andcc %o2, 1, %g0 + andcc %o0, 2, %g0 + be,a,pt %icc, 1f + srl %g1, 1, %g1 + EX_LD(LOAD(lduh, %o0, %o4)) + sub %o2, 2, %o2 + srl %o4, 8, %g2 + sub %g1, 1, %g1 + EX_ST(STORE(stb, %g2, %o1)) + add %o4, GLOBAL_SPARE, GLOBAL_SPARE + EX_ST(STORE(stb, %o4, %o1 + 1)) + add %o0, 2, %o0 + srl %g1, 1, %g1 + add %o1, 2, %o1 +1: brz,a,pn %g1, 2f + andcc %o2, 2, %g0 + EX_LD(LOAD(lduw, %o0, %o4)) +5: srl %o4, 24, %g2 + srl %o4, 16, %g3 + EX_ST(STORE(stb, %g2, %o1)) + srl %o4, 8, %g2 + EX_ST(STORE(stb, %g3, %o1 + 1)) + add %o0, 4, %o0 + EX_ST(STORE(stb, %g2, %o1 + 2)) + addcc %o4, GLOBAL_SPARE, GLOBAL_SPARE + EX_ST(STORE(stb, %o4, %o1 + 3)) + addc GLOBAL_SPARE, %g0, GLOBAL_SPARE + add %o1, 4, %o1 + subcc %g1, 1, %g1 + bne,a,pt %icc, 5b + EX_LD(LOAD(lduw, %o0, %o4)) + sll GLOBAL_SPARE, 16, %g2 + srl GLOBAL_SPARE, 16, GLOBAL_SPARE + srl %g2, 16, %g2 + andcc %o2, 2, %g0 + add %g2, GLOBAL_SPARE, GLOBAL_SPARE +2: be,a,pt %icc, 3f + andcc %o2, 1, %g0 + EX_LD(LOAD(lduh, %o0, %o4)) + andcc %o2, 1, %g0 + srl %o4, 8, %g2 + add %o0, 2, %o0 + EX_ST(STORE(stb, %g2, %o1)) + add GLOBAL_SPARE, %o4, GLOBAL_SPARE + EX_ST(STORE(stb, %o4, %o1 + 1)) + add %o1, 2, %o1 +3: be,a,pt %icc, 1f + sll GLOBAL_SPARE, 16, %o4 + EX_LD(LOAD(ldub, %o0, %g2)) + sll %g2, 8, %o4 + EX_ST(STORE(stb, %g2, %o1)) + add GLOBAL_SPARE, %o4, GLOBAL_SPARE + sll GLOBAL_SPARE, 16, %o4 +1: addcc %o4, GLOBAL_SPARE, GLOBAL_SPARE + srl GLOBAL_SPARE, 16, %o4 + addc %g0, %o4, GLOBAL_SPARE + brz,pt %o5, 4f + srl GLOBAL_SPARE, 8, %o4 + and GLOBAL_SPARE, 0xff, %g2 + and %o4, 0xff, %o4 + sll %g2, 8, %g2 + or %g2, %o4, GLOBAL_SPARE +4: addcc %o3, GLOBAL_SPARE, %o3 + addc %g0, %o3, %o0 + retl + srl %o0, 0, %o0 + .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc/lib/csum_copy_from_user.S b/arch/sparc/lib/csum_copy_from_user.S new file mode 100644 index 0000000000..b0ba8d4dd4 --- /dev/null +++ b/arch/sparc/lib/csum_copy_from_user.S @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* csum_copy_from_user.S: Checksum+copy from userspace. + * + * Copyright (C) 2005 David S. Miller (davem@davemloft.net) + */ + +#define EX_LD(x) \ +98: x; \ + .section .fixup, "ax"; \ + .align 4; \ +99: retl; \ + mov 0, %o0; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, 99b; \ + .text; \ + .align 4; + +#define FUNC_NAME csum_and_copy_from_user +#define LOAD(type,addr,dest) type##a [addr] %asi, dest + +#include "csum_copy.S" diff --git a/arch/sparc/lib/csum_copy_to_user.S b/arch/sparc/lib/csum_copy_to_user.S new file mode 100644 index 0000000000..91ba36dbf7 --- /dev/null +++ b/arch/sparc/lib/csum_copy_to_user.S @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* csum_copy_to_user.S: Checksum+copy to userspace. + * + * Copyright (C) 2005 David S. Miller (davem@davemloft.net) + */ + +#define EX_ST(x) \ +98: x; \ + .section .fixup,"ax"; \ + .align 4; \ +99: retl; \ + mov 0, %o0; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, 99b; \ + .text; \ + .align 4; + +#define FUNC_NAME csum_and_copy_to_user +#define STORE(type,src,addr) type##a src, [addr] %asi + +#include "csum_copy.S" diff --git a/arch/sparc/lib/divdi3.S b/arch/sparc/lib/divdi3.S new file mode 100644 index 0000000000..4ba901acd5 --- /dev/null +++ b/arch/sparc/lib/divdi3.S @@ -0,0 +1,271 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. + +This file is part of GNU CC. + + */ + +#include <linux/export.h> + .text + .align 4 + .globl __divdi3 +__divdi3: + save %sp,-104,%sp + cmp %i0,0 + bge .LL40 + mov 0,%l4 + mov -1,%l4 + sub %g0,%i1,%o0 + mov %o0,%o5 + subcc %g0,%o0,%g0 + sub %g0,%i0,%o0 + subx %o0,0,%o4 + mov %o4,%i0 + mov %o5,%i1 +.LL40: + cmp %i2,0 + bge .LL84 + mov %i3,%o4 + xnor %g0,%l4,%l4 + sub %g0,%i3,%o0 + mov %o0,%o3 + subcc %g0,%o0,%g0 + sub %g0,%i2,%o0 + subx %o0,0,%o2 + mov %o2,%i2 + mov %o3,%i3 + mov %i3,%o4 +.LL84: + cmp %i2,0 + bne .LL45 + mov %i1,%i3 + cmp %o4,%i0 + bleu .LL46 + mov %i3,%o1 + mov 32,%g1 + subcc %i0,%o4,%g0 +1: bcs 5f + addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb + sub %i0,%o4,%i0 ! this kills msb of n + addx %i0,%i0,%i0 ! so this cannot give carry + subcc %g1,1,%g1 +2: bne 1b + subcc %i0,%o4,%g0 + bcs 3f + addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb + b 3f + sub %i0,%o4,%i0 ! this kills msb of n +4: sub %i0,%o4,%i0 +5: addxcc %i0,%i0,%i0 + bcc 2b + subcc %g1,1,%g1 +! Got carry from n. Subtract next step to cancel this carry. + bne 4b + addcc %o1,%o1,%o1 ! shift n1n0 and a 0-bit in lsb + sub %i0,%o4,%i0 +3: xnor %o1,0,%o1 + b .LL50 + mov 0,%o2 +.LL46: + cmp %o4,0 + bne .LL85 + mov %i0,%o2 + mov 1,%o0 + mov 0,%o1 + wr %g0, 0, %y + udiv %o0, %o1, %o0 + mov %o0,%o4 + mov %i0,%o2 +.LL85: + mov 0,%g3 + mov 32,%g1 + subcc %g3,%o4,%g0 +1: bcs 5f + addxcc %o2,%o2,%o2 ! shift n1n0 and a q-bit in lsb + sub %g3,%o4,%g3 ! this kills msb of n + addx %g3,%g3,%g3 ! so this cannot give carry + subcc %g1,1,%g1 +2: bne 1b + subcc %g3,%o4,%g0 + bcs 3f + addxcc %o2,%o2,%o2 ! shift n1n0 and a q-bit in lsb + b 3f + sub %g3,%o4,%g3 ! this kills msb of n +4: sub %g3,%o4,%g3 +5: addxcc %g3,%g3,%g3 + bcc 2b + subcc %g1,1,%g1 +! Got carry from n. Subtract next step to cancel this carry. + bne 4b + addcc %o2,%o2,%o2 ! shift n1n0 and a 0-bit in lsb + sub %g3,%o4,%g3 +3: xnor %o2,0,%o2 + mov %g3,%i0 + mov %i3,%o1 + mov 32,%g1 + subcc %i0,%o4,%g0 +1: bcs 5f + addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb + sub %i0,%o4,%i0 ! this kills msb of n + addx %i0,%i0,%i0 ! so this cannot give carry + subcc %g1,1,%g1 +2: bne 1b + subcc %i0,%o4,%g0 + bcs 3f + addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb + b 3f + sub %i0,%o4,%i0 ! this kills msb of n +4: sub %i0,%o4,%i0 +5: addxcc %i0,%i0,%i0 + bcc 2b + subcc %g1,1,%g1 +! Got carry from n. Subtract next step to cancel this carry. + bne 4b + addcc %o1,%o1,%o1 ! shift n1n0 and a 0-bit in lsb + sub %i0,%o4,%i0 +3: xnor %o1,0,%o1 + b .LL86 + mov %o1,%l1 +.LL45: + cmp %i2,%i0 + bleu .LL51 + sethi %hi(65535),%o0 + b .LL78 + mov 0,%o1 +.LL51: + or %o0,%lo(65535),%o0 + cmp %i2,%o0 + bgu .LL58 + mov %i2,%o1 + cmp %i2,256 + addx %g0,-1,%o0 + b .LL64 + and %o0,8,%o2 +.LL58: + sethi %hi(16777215),%o0 + or %o0,%lo(16777215),%o0 + cmp %i2,%o0 + bgu .LL64 + mov 24,%o2 + mov 16,%o2 +.LL64: + srl %o1,%o2,%o0 + sethi %hi(__clz_tab),%o1 + or %o1,%lo(__clz_tab),%o1 + ldub [%o0+%o1],%o0 + add %o0,%o2,%o0 + mov 32,%o1 + subcc %o1,%o0,%o3 + bne,a .LL72 + sub %o1,%o3,%o1 + cmp %i0,%i2 + bgu .LL74 + cmp %i3,%o4 + blu .LL78 + mov 0,%o1 +.LL74: + b .LL78 + mov 1,%o1 +.LL72: + sll %i2,%o3,%o2 + srl %o4,%o1,%o0 + or %o2,%o0,%i2 + sll %o4,%o3,%o4 + srl %i0,%o1,%o2 + sll %i0,%o3,%o0 + srl %i3,%o1,%o1 + or %o0,%o1,%i0 + sll %i3,%o3,%i3 + mov %i0,%o1 + mov 32,%g1 + subcc %o2,%i2,%g0 +1: bcs 5f + addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb + sub %o2,%i2,%o2 ! this kills msb of n + addx %o2,%o2,%o2 ! so this cannot give carry + subcc %g1,1,%g1 +2: bne 1b + subcc %o2,%i2,%g0 + bcs 3f + addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb + b 3f + sub %o2,%i2,%o2 ! this kills msb of n +4: sub %o2,%i2,%o2 +5: addxcc %o2,%o2,%o2 + bcc 2b + subcc %g1,1,%g1 +! Got carry from n. Subtract next step to cancel this carry. + bne 4b + addcc %o1,%o1,%o1 ! shift n1n0 and a 0-bit in lsb + sub %o2,%i2,%o2 +3: xnor %o1,0,%o1 + mov %o2,%i0 + wr %g0,%o1,%y ! SPARC has 0-3 delay insn after a wr + sra %o4,31,%g2 ! Do not move this insn + and %o1,%g2,%g2 ! Do not move this insn + andcc %g0,0,%g1 ! Do not move this insn + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,%o4,%g1 + mulscc %g1,0,%g1 + add %g1,%g2,%o0 + rd %y,%o2 + cmp %o0,%i0 + bgu,a .LL78 + add %o1,-1,%o1 + bne,a .LL50 + mov 0,%o2 + cmp %o2,%i3 + bleu .LL50 + mov 0,%o2 + add %o1,-1,%o1 +.LL78: + mov 0,%o2 +.LL50: + mov %o1,%l1 +.LL86: + mov %o2,%l0 + mov %l0,%i0 + mov %l1,%i1 + cmp %l4,0 + be .LL81 + sub %g0,%i1,%o0 + mov %o0,%l3 + subcc %g0,%o0,%g0 + sub %g0,%i0,%o0 + subx %o0,0,%l2 + mov %l2,%i0 + mov %l3,%i1 +.LL81: + ret + restore +EXPORT_SYMBOL(__divdi3) diff --git a/arch/sparc/lib/ffs.S b/arch/sparc/lib/ffs.S new file mode 100644 index 0000000000..3a9ad8ffdf --- /dev/null +++ b/arch/sparc/lib/ffs.S @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/export.h> +#include <linux/linkage.h> + + .register %g2,#scratch + + .text + .align 32 + +ENTRY(ffs) + brnz,pt %o0, 1f + mov 1, %o1 + retl + clr %o0 + nop + nop +ENTRY(__ffs) + sllx %o0, 32, %g1 /* 1 */ + srlx %o0, 32, %g2 + + clr %o1 /* 2 */ + movrz %g1, %g2, %o0 + + movrz %g1, 32, %o1 /* 3 */ +1: clr %o2 + + sllx %o0, (64 - 16), %g1 /* 4 */ + srlx %o0, 16, %g2 + + movrz %g1, %g2, %o0 /* 5 */ + clr %o3 + + movrz %g1, 16, %o2 /* 6 */ + clr %o4 + + and %o0, 0xff, %g1 /* 7 */ + srlx %o0, 8, %g2 + + movrz %g1, %g2, %o0 /* 8 */ + clr %o5 + + movrz %g1, 8, %o3 /* 9 */ + add %o2, %o1, %o2 + + and %o0, 0xf, %g1 /* 10 */ + srlx %o0, 4, %g2 + + movrz %g1, %g2, %o0 /* 11 */ + add %o2, %o3, %o2 + + movrz %g1, 4, %o4 /* 12 */ + + and %o0, 0x3, %g1 /* 13 */ + srlx %o0, 2, %g2 + + movrz %g1, %g2, %o0 /* 14 */ + add %o2, %o4, %o2 + + movrz %g1, 2, %o5 /* 15 */ + + and %o0, 0x1, %g1 /* 16 */ + + add %o2, %o5, %o2 /* 17 */ + xor %g1, 0x1, %g1 + + retl /* 18 */ + add %o2, %g1, %o0 +ENDPROC(ffs) +ENDPROC(__ffs) +EXPORT_SYMBOL(__ffs) +EXPORT_SYMBOL(ffs) + + .section .popc_6insn_patch, "ax" + .word ffs + brz,pn %o0, 98f + neg %o0, %g1 + xnor %o0, %g1, %o1 + popc %o1, %o0 +98: retl + nop + .word __ffs + neg %o0, %g1 + xnor %o0, %g1, %o1 + popc %o1, %o0 + retl + sub %o0, 1, %o0 + nop + .previous diff --git a/arch/sparc/lib/fls.S b/arch/sparc/lib/fls.S new file mode 100644 index 0000000000..ccf97fb7d8 --- /dev/null +++ b/arch/sparc/lib/fls.S @@ -0,0 +1,67 @@ +/* fls.S: SPARC default fls definition. + * + * SPARC default fls definition, which follows the same algorithm as + * in generic fls(). This function will be boot time patched on T4 + * and onward. + */ + +#include <linux/export.h> +#include <linux/linkage.h> + + .text + .register %g2, #scratch + .register %g3, #scratch +ENTRY(fls) + brz,pn %o0, 6f + mov 0, %o1 + sethi %hi(0xffff0000), %g3 + mov %o0, %g2 + andcc %o0, %g3, %g0 + be,pt %icc, 8f + mov 32, %o1 + sethi %hi(0xff000000), %g3 + andcc %g2, %g3, %g0 + bne,pt %icc, 3f + sethi %hi(0xf0000000), %g3 + sll %o0, 8, %o0 +1: + add %o1, -8, %o1 + sra %o0, 0, %o0 + mov %o0, %g2 +2: + sethi %hi(0xf0000000), %g3 +3: + andcc %g2, %g3, %g0 + bne,pt %icc, 4f + sethi %hi(0xc0000000), %g3 + sll %o0, 4, %o0 + add %o1, -4, %o1 + sra %o0, 0, %o0 + mov %o0, %g2 +4: + andcc %g2, %g3, %g0 + be,a,pt %icc, 7f + sll %o0, 2, %o0 +5: + xnor %g0, %o0, %o0 + srl %o0, 31, %o0 + sub %o1, %o0, %o1 +6: + jmp %o7 + 8 + sra %o1, 0, %o0 +7: + add %o1, -2, %o1 + ba,pt %xcc, 5b + sra %o0, 0, %o0 +8: + sll %o0, 16, %o0 + sethi %hi(0xff000000), %g3 + sra %o0, 0, %o0 + mov %o0, %g2 + andcc %g2, %g3, %g0 + bne,pt %icc, 2b + mov 16, %o1 + ba,pt %xcc, 1b + sll %o0, 8, %o0 +ENDPROC(fls) +EXPORT_SYMBOL(fls) diff --git a/arch/sparc/lib/fls64.S b/arch/sparc/lib/fls64.S new file mode 100644 index 0000000000..87005b67d3 --- /dev/null +++ b/arch/sparc/lib/fls64.S @@ -0,0 +1,61 @@ +/* fls64.S: SPARC default __fls definition. + * + * SPARC default __fls definition, which follows the same algorithm as + * in generic __fls(). This function will be boot time patched on T4 + * and onward. + */ + +#include <linux/export.h> +#include <linux/linkage.h> + + .text + .register %g2, #scratch + .register %g3, #scratch +ENTRY(__fls) + mov -1, %g2 + sllx %g2, 32, %g2 + and %o0, %g2, %g2 + brnz,pt %g2, 1f + mov 63, %g1 + sllx %o0, 32, %o0 + mov 31, %g1 +1: + mov -1, %g2 + sllx %g2, 48, %g2 + and %o0, %g2, %g2 + brnz,pt %g2, 2f + mov -1, %g2 + sllx %o0, 16, %o0 + add %g1, -16, %g1 +2: + mov -1, %g2 + sllx %g2, 56, %g2 + and %o0, %g2, %g2 + brnz,pt %g2, 3f + mov -1, %g2 + sllx %o0, 8, %o0 + add %g1, -8, %g1 +3: + sllx %g2, 60, %g2 + and %o0, %g2, %g2 + brnz,pt %g2, 4f + mov -1, %g2 + sllx %o0, 4, %o0 + add %g1, -4, %g1 +4: + sllx %g2, 62, %g2 + and %o0, %g2, %g2 + brnz,pt %g2, 5f + mov -1, %g3 + sllx %o0, 2, %o0 + add %g1, -2, %g1 +5: + mov 0, %g2 + sllx %g3, 63, %g3 + and %o0, %g3, %o0 + movre %o0, 1, %g2 + sub %g1, %g2, %g1 + jmp %o7+8 + sra %g1, 0, %o0 +ENDPROC(__fls) +EXPORT_SYMBOL(__fls) diff --git a/arch/sparc/lib/hweight.S b/arch/sparc/lib/hweight.S new file mode 100644 index 0000000000..eebee59b06 --- /dev/null +++ b/arch/sparc/lib/hweight.S @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/export.h> +#include <linux/linkage.h> + + .text + .align 32 +ENTRY(__arch_hweight8) + sethi %hi(__sw_hweight8), %g1 + jmpl %g1 + %lo(__sw_hweight8), %g0 + nop +ENDPROC(__arch_hweight8) +EXPORT_SYMBOL(__arch_hweight8) + .section .popc_3insn_patch, "ax" + .word __arch_hweight8 + sllx %o0, 64-8, %g1 + retl + popc %g1, %o0 + .previous + +ENTRY(__arch_hweight16) + sethi %hi(__sw_hweight16), %g1 + jmpl %g1 + %lo(__sw_hweight16), %g0 + nop +ENDPROC(__arch_hweight16) +EXPORT_SYMBOL(__arch_hweight16) + .section .popc_3insn_patch, "ax" + .word __arch_hweight16 + sllx %o0, 64-16, %g1 + retl + popc %g1, %o0 + .previous + +ENTRY(__arch_hweight32) + sethi %hi(__sw_hweight32), %g1 + jmpl %g1 + %lo(__sw_hweight32), %g0 + nop +ENDPROC(__arch_hweight32) +EXPORT_SYMBOL(__arch_hweight32) + .section .popc_3insn_patch, "ax" + .word __arch_hweight32 + sllx %o0, 64-32, %g1 + retl + popc %g1, %o0 + .previous + +ENTRY(__arch_hweight64) + sethi %hi(__sw_hweight64), %g1 + jmpl %g1 + %lo(__sw_hweight64), %g0 + nop +ENDPROC(__arch_hweight64) +EXPORT_SYMBOL(__arch_hweight64) + .section .popc_3insn_patch, "ax" + .word __arch_hweight64 + retl + popc %o0, %o0 + nop + .previous diff --git a/arch/sparc/lib/iomap.c b/arch/sparc/lib/iomap.c new file mode 100644 index 0000000000..f3a8cd491c --- /dev/null +++ b/arch/sparc/lib/iomap.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Implement the sparc iomap interfaces + */ +#include <linux/pci.h> +#include <linux/module.h> +#include <asm/io.h> + +/* Create a virtual mapping cookie for an IO port range */ +void __iomem *ioport_map(unsigned long port, unsigned int nr) +{ + return (void __iomem *) (unsigned long) port; +} + +void ioport_unmap(void __iomem *addr) +{ + /* Nothing to do */ +} +EXPORT_SYMBOL(ioport_map); +EXPORT_SYMBOL(ioport_unmap); + +#ifdef CONFIG_PCI +void pci_iounmap(struct pci_dev *dev, void __iomem * addr) +{ + /* nothing to do */ +} +EXPORT_SYMBOL(pci_iounmap); +#endif diff --git a/arch/sparc/lib/ipcsum.S b/arch/sparc/lib/ipcsum.S new file mode 100644 index 0000000000..7fa8fd4b79 --- /dev/null +++ b/arch/sparc/lib/ipcsum.S @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/export.h> +#include <linux/linkage.h> + + .text +ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */ + sub %o1, 4, %g7 + lduw [%o0 + 0x00], %o2 + lduw [%o0 + 0x04], %g2 + lduw [%o0 + 0x08], %g3 + addcc %g2, %o2, %o2 + lduw [%o0 + 0x0c], %g2 + addccc %g3, %o2, %o2 + lduw [%o0 + 0x10], %g3 + + addccc %g2, %o2, %o2 + addc %o2, %g0, %o2 +1: addcc %g3, %o2, %o2 + add %o0, 4, %o0 + addccc %o2, %g0, %o2 + subcc %g7, 1, %g7 + be,a,pt %icc, 2f + sll %o2, 16, %g2 + + lduw [%o0 + 0x10], %g3 + ba,pt %xcc, 1b + nop +2: addcc %o2, %g2, %g2 + srl %g2, 16, %o2 + addc %o2, %g0, %o2 + xnor %g0, %o2, %o2 + set 0xffff, %o1 + retl + and %o2, %o1, %o0 +ENDPROC(ip_fast_csum) +EXPORT_SYMBOL(ip_fast_csum) diff --git a/arch/sparc/lib/libgcc.h b/arch/sparc/lib/libgcc.h new file mode 100644 index 0000000000..79845c941b --- /dev/null +++ b/arch/sparc/lib/libgcc.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_LIBGCC_H +#define __ASM_LIBGCC_H + +#include <asm/byteorder.h> + +typedef int word_type __attribute__ ((mode (__word__))); + +struct DWstruct { + int high, low; +}; + +typedef union +{ + struct DWstruct s; + long long ll; +} DWunion; + +#endif /* __ASM_LIBGCC_H */ diff --git a/arch/sparc/lib/locks.S b/arch/sparc/lib/locks.S new file mode 100644 index 0000000000..47a39f4384 --- /dev/null +++ b/arch/sparc/lib/locks.S @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * locks.S: SMP low-level lock primitives on Sparc. + * + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1998 Anton Blanchard (anton@progsoc.uts.edu.au) + * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) + */ + +#include <linux/export.h> +#include <asm/ptrace.h> +#include <asm/psr.h> +#include <asm/smp.h> +#include <asm/spinlock.h> + + .text + .align 4 + + /* Read/writer locks, as usual this is overly clever to make it + * as fast as possible. + */ + + /* caches... */ +___rw_read_enter_spin_on_wlock: + orcc %g2, 0x0, %g0 + be,a ___rw_read_enter + ldstub [%g1 + 3], %g2 + b ___rw_read_enter_spin_on_wlock + ldub [%g1 + 3], %g2 +___rw_read_try_spin_on_wlock: + andcc %g2, 0xff, %g0 + be,a ___rw_read_try + ldstub [%g1 + 3], %g2 + xnorcc %g2, 0x0, %o0 /* if g2 is ~0, set o0 to 0 and bugger off */ + bne,a ___rw_read_enter_spin_on_wlock + ld [%g1], %g2 + retl + mov %g4, %o7 +___rw_read_exit_spin_on_wlock: + orcc %g2, 0x0, %g0 + be,a ___rw_read_exit + ldstub [%g1 + 3], %g2 + b ___rw_read_exit_spin_on_wlock + ldub [%g1 + 3], %g2 +___rw_write_enter_spin_on_wlock: + orcc %g2, 0x0, %g0 + be,a ___rw_write_enter + ldstub [%g1 + 3], %g2 + b ___rw_write_enter_spin_on_wlock + ld [%g1], %g2 + + .globl ___rw_read_enter +EXPORT_SYMBOL(___rw_read_enter) +___rw_read_enter: + orcc %g2, 0x0, %g0 + bne,a ___rw_read_enter_spin_on_wlock + ldub [%g1 + 3], %g2 + ld [%g1], %g2 + add %g2, 1, %g2 + st %g2, [%g1] + retl + mov %g4, %o7 + + .globl ___rw_read_exit +EXPORT_SYMBOL(___rw_read_exit) +___rw_read_exit: + orcc %g2, 0x0, %g0 + bne,a ___rw_read_exit_spin_on_wlock + ldub [%g1 + 3], %g2 + ld [%g1], %g2 + sub %g2, 0x1ff, %g2 + st %g2, [%g1] + retl + mov %g4, %o7 + + .globl ___rw_read_try +EXPORT_SYMBOL(___rw_read_try) +___rw_read_try: + orcc %g2, 0x0, %g0 + bne ___rw_read_try_spin_on_wlock + ld [%g1], %g2 + add %g2, 1, %g2 + st %g2, [%g1] + set 1, %o1 + retl + mov %g4, %o7 + + .globl ___rw_write_enter +EXPORT_SYMBOL(___rw_write_enter) +___rw_write_enter: + orcc %g2, 0x0, %g0 + bne ___rw_write_enter_spin_on_wlock + ld [%g1], %g2 + andncc %g2, 0xff, %g0 + bne,a ___rw_write_enter_spin_on_wlock + stb %g0, [%g1 + 3] + retl + mov %g4, %o7 diff --git a/arch/sparc/lib/lshrdi3.S b/arch/sparc/lib/lshrdi3.S new file mode 100644 index 0000000000..09bf581a0b --- /dev/null +++ b/arch/sparc/lib/lshrdi3.S @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/export.h> +#include <linux/linkage.h> + +ENTRY(__lshrdi3) + cmp %o2, 0 + be 3f + mov 0x20, %g2 + + sub %g2, %o2, %g2 + cmp %g2, 0 + bg 1f + srl %o0, %o2, %o4 + + clr %o4 + neg %g2 + b 2f + srl %o0, %g2, %o5 +1: + sll %o0, %g2, %g3 + srl %o1, %o2, %g2 + or %g2, %g3, %o5 +2: + mov %o4, %o0 + mov %o5, %o1 +3: + retl + nop +ENDPROC(__lshrdi3) +EXPORT_SYMBOL(__lshrdi3) diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S new file mode 100644 index 0000000000..f7f7910eb4 --- /dev/null +++ b/arch/sparc/lib/mcount.S @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com) + * + * This file implements mcount(), which is used to collect profiling data. + * This can also be tweaked for kernel stack overflow detection. + */ + +#include <linux/export.h> +#include <linux/linkage.h> + +/* + * This is the main variant and is called by C code. GCC's -pg option + * automatically instruments every C function with a call to this. + */ + + .text + .align 32 + .globl _mcount + .type _mcount,#function + EXPORT_SYMBOL(_mcount) + .globl mcount + .type mcount,#function +_mcount: +mcount: +#ifdef CONFIG_FUNCTION_TRACER +#ifdef CONFIG_DYNAMIC_FTRACE + /* Do nothing, the retl/nop below is all we need. */ +#else + sethi %hi(ftrace_trace_function), %g1 + sethi %hi(ftrace_stub), %g2 + ldx [%g1 + %lo(ftrace_trace_function)], %g1 + or %g2, %lo(ftrace_stub), %g2 + cmp %g1, %g2 + be,pn %icc, 1f + mov %i7, %g3 + save %sp, -176, %sp + mov %g3, %o1 + jmpl %g1, %o7 + mov %i7, %o0 + ret + restore + /* not reached */ +1: +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + sethi %hi(ftrace_graph_return), %g1 + ldx [%g1 + %lo(ftrace_graph_return)], %g3 + cmp %g2, %g3 + bne,pn %xcc, 5f + sethi %hi(ftrace_graph_entry_stub), %g2 + sethi %hi(ftrace_graph_entry), %g1 + or %g2, %lo(ftrace_graph_entry_stub), %g2 + ldx [%g1 + %lo(ftrace_graph_entry)], %g1 + cmp %g1, %g2 + be,pt %xcc, 2f + nop +5: mov %i7, %g2 + mov %fp, %g3 + save %sp, -176, %sp + mov %g2, %l0 + ba,pt %xcc, ftrace_graph_caller + mov %g3, %l1 +#endif +2: +#endif +#endif + retl + nop + .size _mcount,.-_mcount + .size mcount,.-mcount + +#ifdef CONFIG_FUNCTION_TRACER + .globl ftrace_stub + .type ftrace_stub,#function +ftrace_stub: + retl + nop + .size ftrace_stub,.-ftrace_stub +#ifdef CONFIG_DYNAMIC_FTRACE + .globl ftrace_caller + .type ftrace_caller,#function +ftrace_caller: + mov %i7, %g2 + mov %fp, %g3 + save %sp, -176, %sp + mov %g2, %o1 + mov %g2, %l0 + mov %g3, %l1 + .globl ftrace_call +ftrace_call: + call ftrace_stub + mov %i7, %o0 +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl ftrace_graph_call +ftrace_graph_call: + call ftrace_stub + nop +#endif + ret + restore +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .size ftrace_graph_call,.-ftrace_graph_call +#endif + .size ftrace_call,.-ftrace_call + .size ftrace_caller,.-ftrace_caller +#endif +#endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(ftrace_graph_caller) + mov %l0, %o0 + mov %i7, %o1 + call prepare_ftrace_return + mov %l1, %o2 + ret + restore %o0, -8, %i7 +END(ftrace_graph_caller) + +ENTRY(return_to_handler) + save %sp, -176, %sp + call ftrace_return_to_handler + mov %fp, %o0 + jmpl %o0 + 8, %g0 + restore +END(return_to_handler) +#endif diff --git a/arch/sparc/lib/memcmp.S b/arch/sparc/lib/memcmp.S new file mode 100644 index 0000000000..c87e8000fe --- /dev/null +++ b/arch/sparc/lib/memcmp.S @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Sparc optimized memcmp code. + * + * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Copyright (C) 2000, 2008 David S. Miller (davem@davemloft.net) + */ + +#include <linux/export.h> +#include <linux/linkage.h> +#include <asm/asm.h> + + .text +ENTRY(memcmp) + cmp %o2, 0 +1: BRANCH32(be, pn, 2f) + nop + ldub [%o0], %g7 + ldub [%o1], %g3 + sub %o2, 1, %o2 + add %o0, 1, %o0 + add %o1, 1, %o1 + subcc %g7, %g3, %g3 + BRANCH32(be, pt, 1b) + cmp %o2, 0 + retl + mov %g3, %o0 +2: retl + mov 0, %o0 +ENDPROC(memcmp) +EXPORT_SYMBOL(memcmp) diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S new file mode 100644 index 0000000000..57b1ae0f59 --- /dev/null +++ b/arch/sparc/lib/memcpy.S @@ -0,0 +1,463 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* memcpy.S: Sparc optimized memcpy and memmove code + * Hand optimized from GNU libc's memcpy and memmove + * Copyright (C) 1991,1996 Free Software Foundation + * Copyright (C) 1995 Linus Torvalds (Linus.Torvalds@helsinki.fi) + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) + * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + */ + +#include <linux/export.h> + +#define FUNC(x) \ + .globl x; \ + .type x,@function; \ + .align 4; \ +x: + +/* Both these macros have to start with exactly the same insn */ +#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ + ldd [%src + (offset) + 0x00], %t0; \ + ldd [%src + (offset) + 0x08], %t2; \ + ldd [%src + (offset) + 0x10], %t4; \ + ldd [%src + (offset) + 0x18], %t6; \ + st %t0, [%dst + (offset) + 0x00]; \ + st %t1, [%dst + (offset) + 0x04]; \ + st %t2, [%dst + (offset) + 0x08]; \ + st %t3, [%dst + (offset) + 0x0c]; \ + st %t4, [%dst + (offset) + 0x10]; \ + st %t5, [%dst + (offset) + 0x14]; \ + st %t6, [%dst + (offset) + 0x18]; \ + st %t7, [%dst + (offset) + 0x1c]; + +#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ + ldd [%src + (offset) + 0x00], %t0; \ + ldd [%src + (offset) + 0x08], %t2; \ + ldd [%src + (offset) + 0x10], %t4; \ + ldd [%src + (offset) + 0x18], %t6; \ + std %t0, [%dst + (offset) + 0x00]; \ + std %t2, [%dst + (offset) + 0x08]; \ + std %t4, [%dst + (offset) + 0x10]; \ + std %t6, [%dst + (offset) + 0x18]; + +#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \ + ldd [%src - (offset) - 0x10], %t0; \ + ldd [%src - (offset) - 0x08], %t2; \ + st %t0, [%dst - (offset) - 0x10]; \ + st %t1, [%dst - (offset) - 0x0c]; \ + st %t2, [%dst - (offset) - 0x08]; \ + st %t3, [%dst - (offset) - 0x04]; + +#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \ + ldd [%src - (offset) - 0x10], %t0; \ + ldd [%src - (offset) - 0x08], %t2; \ + std %t0, [%dst - (offset) - 0x10]; \ + std %t2, [%dst - (offset) - 0x08]; + +#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \ + ldub [%src - (offset) - 0x02], %t0; \ + ldub [%src - (offset) - 0x01], %t1; \ + stb %t0, [%dst - (offset) - 0x02]; \ + stb %t1, [%dst - (offset) - 0x01]; + + .text + .align 4 + +FUNC(memmove) +EXPORT_SYMBOL(memmove) + cmp %o0, %o1 + mov %o0, %g7 + bleu 9f + sub %o0, %o1, %o4 + + add %o1, %o2, %o3 + cmp %o3, %o0 + bleu 0f + andcc %o4, 3, %o5 + + add %o1, %o2, %o1 + add %o0, %o2, %o0 + sub %o1, 1, %o1 + sub %o0, 1, %o0 + +1: /* reverse_bytes */ + + ldub [%o1], %o4 + subcc %o2, 1, %o2 + stb %o4, [%o0] + sub %o1, 1, %o1 + bne 1b + sub %o0, 1, %o0 + + retl + mov %g7, %o0 + +/* NOTE: This code is executed just for the cases, + where %src (=%o1) & 3 is != 0. + We need to align it to 4. So, for (%src & 3) + 1 we need to do ldub,lduh + 2 lduh + 3 just ldub + so even if it looks weird, the branches + are correct here. -jj + */ +78: /* dword_align */ + + andcc %o1, 1, %g0 + be 4f + andcc %o1, 2, %g0 + + ldub [%o1], %g2 + add %o1, 1, %o1 + stb %g2, [%o0] + sub %o2, 1, %o2 + bne 3f + add %o0, 1, %o0 +4: + lduh [%o1], %g2 + add %o1, 2, %o1 + sth %g2, [%o0] + sub %o2, 2, %o2 + b 3f + add %o0, 2, %o0 + +FUNC(memcpy) /* %o0=dst %o1=src %o2=len */ +EXPORT_SYMBOL(memcpy) + + sub %o0, %o1, %o4 + mov %o0, %g7 +9: + andcc %o4, 3, %o5 +0: + bne 86f + cmp %o2, 15 + + bleu 90f + andcc %o1, 3, %g0 + + bne 78b +3: + andcc %o1, 4, %g0 + + be 2f + mov %o2, %g1 + + ld [%o1], %o4 + sub %g1, 4, %g1 + st %o4, [%o0] + add %o1, 4, %o1 + add %o0, 4, %o0 +2: + andcc %g1, 0xffffff80, %g0 + be 3f + andcc %o0, 4, %g0 + + be 82f + 4 +5: + MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5) + MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5) + MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5) + MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5) + sub %g1, 128, %g1 + add %o1, 128, %o1 + cmp %g1, 128 + bge 5b + add %o0, 128, %o0 +3: + andcc %g1, 0x70, %g4 + be 80f + andcc %g1, 8, %g0 + + sethi %hi(80f), %o5 + srl %g4, 1, %o4 + add %g4, %o4, %o4 + add %o1, %g4, %o1 + sub %o5, %o4, %o5 + jmpl %o5 + %lo(80f), %g0 + add %o0, %g4, %o0 + +79: /* memcpy_table */ + + MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5) + MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5) + MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5) + MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5) + MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5) + MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5) + MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5) + +80: /* memcpy_table_end */ + be 81f + andcc %g1, 4, %g0 + + ldd [%o1], %g2 + add %o0, 8, %o0 + st %g2, [%o0 - 0x08] + add %o1, 8, %o1 + st %g3, [%o0 - 0x04] + +81: /* memcpy_last7 */ + + be 1f + andcc %g1, 2, %g0 + + ld [%o1], %g2 + add %o1, 4, %o1 + st %g2, [%o0] + add %o0, 4, %o0 +1: + be 1f + andcc %g1, 1, %g0 + + lduh [%o1], %g2 + add %o1, 2, %o1 + sth %g2, [%o0] + add %o0, 2, %o0 +1: + be 1f + nop + + ldub [%o1], %g2 + stb %g2, [%o0] +1: + retl + mov %g7, %o0 + +82: /* ldd_std */ + MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5) + MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5) + MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5) + MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5) + subcc %g1, 128, %g1 + add %o1, 128, %o1 + cmp %g1, 128 + bge 82b + add %o0, 128, %o0 + + andcc %g1, 0x70, %g4 + be 84f + andcc %g1, 8, %g0 + + sethi %hi(84f), %o5 + add %o1, %g4, %o1 + sub %o5, %g4, %o5 + jmpl %o5 + %lo(84f), %g0 + add %o0, %g4, %o0 + +83: /* amemcpy_table */ + + MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3, g4, g5) + MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3, g4, g5) + MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3, g4, g5) + MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5) + MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5) + MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5) + MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5) + +84: /* amemcpy_table_end */ + be 85f + andcc %g1, 4, %g0 + + ldd [%o1], %g2 + add %o0, 8, %o0 + std %g2, [%o0 - 0x08] + add %o1, 8, %o1 +85: /* amemcpy_last7 */ + be 1f + andcc %g1, 2, %g0 + + ld [%o1], %g2 + add %o1, 4, %o1 + st %g2, [%o0] + add %o0, 4, %o0 +1: + be 1f + andcc %g1, 1, %g0 + + lduh [%o1], %g2 + add %o1, 2, %o1 + sth %g2, [%o0] + add %o0, 2, %o0 +1: + be 1f + nop + + ldub [%o1], %g2 + stb %g2, [%o0] +1: + retl + mov %g7, %o0 + +86: /* non_aligned */ + cmp %o2, 6 + bleu 88f + nop + + save %sp, -96, %sp + andcc %i0, 3, %g0 + be 61f + andcc %i0, 1, %g0 + be 60f + andcc %i0, 2, %g0 + + ldub [%i1], %g5 + add %i1, 1, %i1 + stb %g5, [%i0] + sub %i2, 1, %i2 + bne 61f + add %i0, 1, %i0 +60: + ldub [%i1], %g3 + add %i1, 2, %i1 + stb %g3, [%i0] + sub %i2, 2, %i2 + ldub [%i1 - 1], %g3 + add %i0, 2, %i0 + stb %g3, [%i0 - 1] +61: + and %i1, 3, %g2 + and %i2, 0xc, %g3 + and %i1, -4, %i1 + cmp %g3, 4 + sll %g2, 3, %g4 + mov 32, %g2 + be 4f + sub %g2, %g4, %l0 + + blu 3f + cmp %g3, 0x8 + + be 2f + srl %i2, 2, %g3 + + ld [%i1], %i3 + add %i0, -8, %i0 + ld [%i1 + 4], %i4 + b 8f + add %g3, 1, %g3 +2: + ld [%i1], %i4 + add %i0, -12, %i0 + ld [%i1 + 4], %i5 + add %g3, 2, %g3 + b 9f + add %i1, -4, %i1 +3: + ld [%i1], %g1 + add %i0, -4, %i0 + ld [%i1 + 4], %i3 + srl %i2, 2, %g3 + b 7f + add %i1, 4, %i1 +4: + ld [%i1], %i5 + cmp %i2, 7 + ld [%i1 + 4], %g1 + srl %i2, 2, %g3 + bleu 10f + add %i1, 8, %i1 + + ld [%i1], %i3 + add %g3, -1, %g3 +5: + sll %i5, %g4, %g2 + srl %g1, %l0, %g5 + or %g2, %g5, %g2 + st %g2, [%i0] +7: + ld [%i1 + 4], %i4 + sll %g1, %g4, %g2 + srl %i3, %l0, %g5 + or %g2, %g5, %g2 + st %g2, [%i0 + 4] +8: + ld [%i1 + 8], %i5 + sll %i3, %g4, %g2 + srl %i4, %l0, %g5 + or %g2, %g5, %g2 + st %g2, [%i0 + 8] +9: + ld [%i1 + 12], %g1 + sll %i4, %g4, %g2 + srl %i5, %l0, %g5 + addcc %g3, -4, %g3 + or %g2, %g5, %g2 + add %i1, 16, %i1 + st %g2, [%i0 + 12] + add %i0, 16, %i0 + bne,a 5b + ld [%i1], %i3 +10: + sll %i5, %g4, %g2 + srl %g1, %l0, %g5 + srl %l0, 3, %g3 + or %g2, %g5, %g2 + sub %i1, %g3, %i1 + andcc %i2, 2, %g0 + st %g2, [%i0] + be 1f + andcc %i2, 1, %g0 + + ldub [%i1], %g2 + add %i1, 2, %i1 + stb %g2, [%i0 + 4] + add %i0, 2, %i0 + ldub [%i1 - 1], %g2 + stb %g2, [%i0 + 3] +1: + be 1f + nop + ldub [%i1], %g2 + stb %g2, [%i0 + 4] +1: + ret + restore %g7, %g0, %o0 + +88: /* short_end */ + + and %o2, 0xe, %o3 +20: + sethi %hi(89f), %o5 + sll %o3, 3, %o4 + add %o0, %o3, %o0 + sub %o5, %o4, %o5 + add %o1, %o3, %o1 + jmpl %o5 + %lo(89f), %g0 + andcc %o2, 1, %g0 + + MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3) + MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3) + MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3) + MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3) + MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3) + MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3) + MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3) + +89: /* short_table_end */ + + be 1f + nop + + ldub [%o1], %g2 + stb %g2, [%o0] +1: + retl + mov %g7, %o0 + +90: /* short_aligned_end */ + bne 88b + andcc %o2, 8, %g0 + + be 1f + andcc %o2, 4, %g0 + + ld [%o1 + 0x00], %g2 + ld [%o1 + 0x04], %g3 + add %o1, 8, %o1 + st %g2, [%o0 + 0x00] + st %g3, [%o0 + 0x04] + add %o0, 8, %o0 +1: + b 81b + mov %o2, %g1 diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S new file mode 100644 index 0000000000..543dda7b9d --- /dev/null +++ b/arch/sparc/lib/memmove.S @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* memmove.S: Simple memmove implementation. + * + * Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com) + * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz) + */ + +#include <linux/export.h> +#include <linux/linkage.h> + + .text +ENTRY(memmove) /* o0=dst o1=src o2=len */ + brz,pn %o2, 99f + mov %o0, %g1 + + cmp %o0, %o1 + bleu,pt %xcc, 2f + add %o1, %o2, %g7 + cmp %g7, %o0 + bleu,pt %xcc, memcpy + add %o0, %o2, %o5 + sub %g7, 1, %o1 + + sub %o5, 1, %o0 +1: ldub [%o1], %g7 + subcc %o2, 1, %o2 + sub %o1, 1, %o1 + stb %g7, [%o0] + bne,pt %icc, 1b + sub %o0, 1, %o0 +99: + retl + mov %g1, %o0 + + /* We can't just call memcpy for these memmove cases. On some + * chips the memcpy uses cache initializing stores and when dst + * and src are close enough, those can clobber the source data + * before we've loaded it in. + */ +2: or %o0, %o1, %g7 + or %o2, %g7, %g7 + andcc %g7, 0x7, %g0 + bne,pn %xcc, 4f + nop + +3: ldx [%o1], %g7 + add %o1, 8, %o1 + subcc %o2, 8, %o2 + add %o0, 8, %o0 + bne,pt %icc, 3b + stx %g7, [%o0 - 0x8] + ba,a,pt %xcc, 99b + +4: ldub [%o1], %g7 + add %o1, 1, %o1 + subcc %o2, 1, %o2 + add %o0, 1, %o0 + bne,pt %icc, 4b + stb %g7, [%o0 - 0x1] + ba,a,pt %xcc, 99b +ENDPROC(memmove) +EXPORT_SYMBOL(memmove) diff --git a/arch/sparc/lib/memscan_32.S b/arch/sparc/lib/memscan_32.S new file mode 100644 index 0000000000..5386a3a200 --- /dev/null +++ b/arch/sparc/lib/memscan_32.S @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * memscan.S: Optimized memscan for the Sparc. + * + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + */ + +#include <linux/export.h> + +/* In essence, this is just a fancy strlen. */ + +#define LO_MAGIC 0x01010101 +#define HI_MAGIC 0x80808080 + + .text + .align 4 + .globl __memscan_zero, __memscan_generic + .globl memscan +EXPORT_SYMBOL(__memscan_zero) +EXPORT_SYMBOL(__memscan_generic) +__memscan_zero: + /* %o0 = addr, %o1 = size */ + cmp %o1, 0 + bne,a 1f + andcc %o0, 3, %g0 + + retl + nop + +1: + be mzero_scan_word + sethi %hi(HI_MAGIC), %g2 + + ldsb [%o0], %g3 +mzero_still_not_word_aligned: + cmp %g3, 0 + bne 1f + add %o0, 1, %o0 + + retl + sub %o0, 1, %o0 + +1: + subcc %o1, 1, %o1 + bne,a 1f + andcc %o0, 3, %g0 + + retl + nop + +1: + bne,a mzero_still_not_word_aligned + ldsb [%o0], %g3 + + sethi %hi(HI_MAGIC), %g2 +mzero_scan_word: + or %g2, %lo(HI_MAGIC), %o3 + sethi %hi(LO_MAGIC), %g3 + or %g3, %lo(LO_MAGIC), %o2 +mzero_next_word: + ld [%o0], %g2 +mzero_next_word_preloaded: + sub %g2, %o2, %g2 +mzero_next_word_preloaded_next: + andcc %g2, %o3, %g0 + bne mzero_byte_zero + add %o0, 4, %o0 + +mzero_check_out_of_fuel: + subcc %o1, 4, %o1 + bg,a 1f + ld [%o0], %g2 + + retl + nop + +1: + b mzero_next_word_preloaded_next + sub %g2, %o2, %g2 + + /* Check every byte. */ +mzero_byte_zero: + ldsb [%o0 - 4], %g2 + cmp %g2, 0 + bne mzero_byte_one + sub %o0, 4, %g3 + + retl + mov %g3, %o0 + +mzero_byte_one: + ldsb [%o0 - 3], %g2 + cmp %g2, 0 + bne,a mzero_byte_two_and_three + ldsb [%o0 - 2], %g2 + + retl + sub %o0, 3, %o0 + +mzero_byte_two_and_three: + cmp %g2, 0 + bne,a 1f + ldsb [%o0 - 1], %g2 + + retl + sub %o0, 2, %o0 + +1: + cmp %g2, 0 + bne,a mzero_next_word_preloaded + ld [%o0], %g2 + + retl + sub %o0, 1, %o0 + +mzero_found_it: + retl + sub %o0, 2, %o0 + +memscan: +__memscan_generic: + /* %o0 = addr, %o1 = c, %o2 = size */ + cmp %o2, 0 + bne,a 0f + ldub [%o0], %g2 + + b,a 2f +1: + ldub [%o0], %g2 +0: + cmp %g2, %o1 + be 2f + addcc %o2, -1, %o2 + bne 1b + add %o0, 1, %o0 +2: + retl + nop diff --git a/arch/sparc/lib/memscan_64.S b/arch/sparc/lib/memscan_64.S new file mode 100644 index 0000000000..70a4f21057 --- /dev/null +++ b/arch/sparc/lib/memscan_64.S @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * memscan.S: Optimized memscan for Sparc64. + * + * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz) + * Copyright (C) 1998 David S. Miller (davem@redhat.com) + */ + +#include <linux/export.h> + +#define HI_MAGIC 0x8080808080808080 +#define LO_MAGIC 0x0101010101010101 +#define ASI_PL 0x88 + + .text + .align 32 + .globl __memscan_zero, __memscan_generic + .type __memscan_zero,#function + .type __memscan_generic,#function + .globl memscan + EXPORT_SYMBOL(__memscan_zero) + EXPORT_SYMBOL(__memscan_generic) + +__memscan_zero: + /* %o0 = bufp, %o1 = size */ + brlez,pn %o1, szzero + andcc %o0, 7, %g0 + be,pt %icc, we_are_aligned + sethi %hi(HI_MAGIC), %o4 + ldub [%o0], %o5 +1: subcc %o1, 1, %o1 + brz,pn %o5, 10f + add %o0, 1, %o0 + + be,pn %xcc, szzero + andcc %o0, 7, %g0 + bne,a,pn %icc, 1b + ldub [%o0], %o5 +we_are_aligned: + ldxa [%o0] ASI_PL, %o5 + or %o4, %lo(HI_MAGIC), %o3 + sllx %o3, 32, %o4 + or %o4, %o3, %o3 + + srlx %o3, 7, %o2 +msloop: + sub %o1, 8, %o1 + add %o0, 8, %o0 + sub %o5, %o2, %o4 + xor %o4, %o5, %o4 + andcc %o4, %o3, %g3 + bne,pn %xcc, check_bytes + srlx %o4, 32, %g3 + + brgz,a,pt %o1, msloop + ldxa [%o0] ASI_PL, %o5 +check_bytes: + bne,a,pn %icc, 2f + andcc %o5, 0xff, %g0 + add %o0, -5, %g2 + ba,pt %xcc, 3f + srlx %o5, 32, %g7 + +2: srlx %o5, 8, %g7 + be,pn %icc, 1f + add %o0, -8, %g2 + andcc %g7, 0xff, %g0 + srlx %g7, 8, %g7 + be,pn %icc, 1f + inc %g2 + andcc %g7, 0xff, %g0 + + srlx %g7, 8, %g7 + be,pn %icc, 1f + inc %g2 + andcc %g7, 0xff, %g0 + srlx %g7, 8, %g7 + be,pn %icc, 1f + inc %g2 + andcc %g3, %o3, %g0 + + be,a,pn %icc, 2f + mov %o0, %g2 +3: andcc %g7, 0xff, %g0 + srlx %g7, 8, %g7 + be,pn %icc, 1f + inc %g2 + andcc %g7, 0xff, %g0 + srlx %g7, 8, %g7 + + be,pn %icc, 1f + inc %g2 + andcc %g7, 0xff, %g0 + srlx %g7, 8, %g7 + be,pn %icc, 1f + inc %g2 + andcc %g7, 0xff, %g0 + srlx %g7, 8, %g7 + + be,pn %icc, 1f + inc %g2 +2: brgz,a,pt %o1, msloop + ldxa [%o0] ASI_PL, %o5 + inc %g2 +1: add %o0, %o1, %o0 + cmp %g2, %o0 + retl + + movle %xcc, %g2, %o0 +10: retl + sub %o0, 1, %o0 +szzero: retl + nop + +memscan: +__memscan_generic: + /* %o0 = addr, %o1 = c, %o2 = size */ + brz,pn %o2, 3f + add %o0, %o2, %o3 + ldub [%o0], %o5 + sub %g0, %o2, %o4 +1: + cmp %o5, %o1 + be,pn %icc, 2f + addcc %o4, 1, %o4 + bne,a,pt %xcc, 1b + ldub [%o3 + %o4], %o5 + retl + /* The delay slot is the same as the next insn, this is just to make it look more awful */ +2: + add %o3, %o4, %o0 + retl + sub %o0, 1, %o0 +3: + retl + nop diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S new file mode 100644 index 0000000000..a33419dbb4 --- /dev/null +++ b/arch/sparc/lib/memset.S @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* linux/arch/sparc/lib/memset.S: Sparc optimized memset, bzero and clear_user code + * Copyright (C) 1991,1996 Free Software Foundation + * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + * + * Calls to memset returns initial %o0. Calls to bzero returns 0, if ok, and + * number of bytes not yet set if exception occurs and we were called as + * clear_user. + */ + +#include <linux/export.h> +#include <asm/ptrace.h> + +/* Work around cpp -rob */ +#define ALLOC #alloc +#define EXECINSTR #execinstr +#define EX(x,y,a,b) \ +98: x,y; \ + .section .fixup,ALLOC,EXECINSTR; \ + .align 4; \ +99: retl; \ + a, b, %o0; \ + .section __ex_table,ALLOC; \ + .align 4; \ + .word 98b, 99b; \ + .text; \ + .align 4 + +#define STORE(source, base, offset, n) \ +98: std source, [base + offset + n]; \ + .section .fixup,ALLOC,EXECINSTR; \ + .align 4; \ +99: ba 30f; \ + sub %o3, n - offset, %o3; \ + .section __ex_table,ALLOC; \ + .align 4; \ + .word 98b, 99b; \ + .text; \ + .align 4; + +#define STORE_LAST(source, base, offset, n) \ + EX(std source, [base - offset - n], \ + add %o1, offset + n); + +/* Please don't change these macros, unless you change the logic + * in the .fixup section below as well. + * Store 64 bytes at (BASE + OFFSET) using value SOURCE. */ +#define ZERO_BIG_BLOCK(base, offset, source) \ + STORE(source, base, offset, 0x00); \ + STORE(source, base, offset, 0x08); \ + STORE(source, base, offset, 0x10); \ + STORE(source, base, offset, 0x18); \ + STORE(source, base, offset, 0x20); \ + STORE(source, base, offset, 0x28); \ + STORE(source, base, offset, 0x30); \ + STORE(source, base, offset, 0x38); + +#define ZERO_LAST_BLOCKS(base, offset, source) \ + STORE_LAST(source, base, offset, 0x38); \ + STORE_LAST(source, base, offset, 0x30); \ + STORE_LAST(source, base, offset, 0x28); \ + STORE_LAST(source, base, offset, 0x20); \ + STORE_LAST(source, base, offset, 0x18); \ + STORE_LAST(source, base, offset, 0x10); \ + STORE_LAST(source, base, offset, 0x08); \ + STORE_LAST(source, base, offset, 0x00); + + .text + .align 4 + + .globl __bzero_begin +__bzero_begin: + + .globl __bzero + .type __bzero,#function + .globl memset + EXPORT_SYMBOL(__bzero) + EXPORT_SYMBOL(memset) +memset: + mov %o0, %g1 + mov 1, %g4 + and %o1, 0xff, %g3 + sll %g3, 8, %g2 + or %g3, %g2, %g3 + sll %g3, 16, %g2 + or %g3, %g2, %g3 + b 1f + mov %o2, %o1 +3: + cmp %o2, 3 + be 2f + EX(stb %g3, [%o0], sub %o1, 0) + + cmp %o2, 2 + be 2f + EX(stb %g3, [%o0 + 0x01], sub %o1, 1) + + EX(stb %g3, [%o0 + 0x02], sub %o1, 2) +2: + sub %o2, 4, %o2 + add %o1, %o2, %o1 + b 4f + sub %o0, %o2, %o0 + +__bzero: + clr %g4 + mov %g0, %g3 +1: + cmp %o1, 7 + bleu 7f + andcc %o0, 3, %o2 + + bne 3b +4: + andcc %o0, 4, %g0 + + be 2f + mov %g3, %g2 + + EX(st %g3, [%o0], sub %o1, 0) + sub %o1, 4, %o1 + add %o0, 4, %o0 +2: + andcc %o1, 0xffffff80, %o3 ! Now everything is 8 aligned and o1 is len to run + be 9f + andcc %o1, 0x78, %o2 +10: + ZERO_BIG_BLOCK(%o0, 0x00, %g2) + subcc %o3, 128, %o3 + ZERO_BIG_BLOCK(%o0, 0x40, %g2) + bne 10b + add %o0, 128, %o0 + + orcc %o2, %g0, %g0 +9: + be 13f + andcc %o1, 7, %o1 + + srl %o2, 1, %o3 + set 13f, %o4 + sub %o4, %o3, %o4 + jmp %o4 + add %o0, %o2, %o0 + + ZERO_LAST_BLOCKS(%o0, 0x48, %g2) + ZERO_LAST_BLOCKS(%o0, 0x08, %g2) +13: + be 8f + andcc %o1, 4, %g0 + + be 1f + andcc %o1, 2, %g0 + + EX(st %g3, [%o0], and %o1, 7) + add %o0, 4, %o0 +1: + be 1f + andcc %o1, 1, %g0 + + EX(sth %g3, [%o0], and %o1, 3) + add %o0, 2, %o0 +1: + bne,a 8f + EX(stb %g3, [%o0], and %o1, 1) +8: + b 0f + nop +7: + be 13b + orcc %o1, 0, %g0 + + be 0f +8: + add %o0, 1, %o0 + subcc %o1, 1, %o1 + bne 8b + EX(stb %g3, [%o0 - 1], add %o1, 1) +0: + andcc %g4, 1, %g0 + be 5f + nop + retl + mov %g1, %o0 +5: + retl + clr %o0 + + .section .fixup,#alloc,#execinstr + .align 4 +30: + and %o1, 0x7f, %o1 + retl + add %o3, %o1, %o0 + + .globl __bzero_end +__bzero_end: diff --git a/arch/sparc/lib/muldi3.S b/arch/sparc/lib/muldi3.S new file mode 100644 index 0000000000..7e1e8cd30a --- /dev/null +++ b/arch/sparc/lib/muldi3.S @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. + +This file is part of GNU CC. + + */ + +#include <linux/export.h> + .text + .align 4 + .globl __muldi3 +__muldi3: + save %sp, -104, %sp + wr %g0, %i1, %y + sra %i3, 0x1f, %g2 + and %i1, %g2, %g2 + andcc %g0, 0, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, %i3, %g1 + mulscc %g1, 0, %g1 + add %g1, %g2, %l2 + rd %y, %o1 + mov %o1, %l3 + mov %i1, %o0 + mov %i2, %o1 + umul %o0, %o1, %o0 + mov %o0, %l0 + mov %i0, %o0 + mov %i3, %o1 + umul %o0, %o1, %o0 + add %l0, %o0, %l0 + mov %l2, %i0 + add %l2, %l0, %i0 + ret + restore %g0, %l3, %o1 +EXPORT_SYMBOL(__muldi3) diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S new file mode 100644 index 0000000000..5bb4c122a2 --- /dev/null +++ b/arch/sparc/lib/multi3.S @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/export.h> +#include <linux/linkage.h> + + .text + .align 4 +ENTRY(__multi3) /* %o0 = u, %o1 = v */ + mov %o1, %g1 + srl %o3, 0, %o4 + mulx %o4, %g1, %o1 + srlx %g1, 0x20, %g3 + mulx %g3, %o4, %g7 + sllx %g7, 0x20, %o5 + srl %g1, 0, %o4 + sub %o1, %o5, %o5 + srlx %o5, 0x20, %o5 + addcc %g7, %o5, %g7 + srlx %o3, 0x20, %o5 + mulx %o4, %o5, %o4 + mulx %g3, %o5, %o5 + sethi %hi(0x80000000), %g3 + addcc %g7, %o4, %g7 + srlx %g7, 0x20, %g7 + add %g3, %g3, %g3 + movcc %xcc, %g0, %g3 + addcc %o5, %g7, %o5 + sllx %o4, 0x20, %o4 + add %o1, %o4, %o1 + add %o5, %g3, %g2 + mulx %g1, %o2, %g1 + add %g1, %g2, %g1 + mulx %o0, %o3, %o0 + retl + add %g1, %o0, %o0 +ENDPROC(__multi3) +EXPORT_SYMBOL(__multi3) diff --git a/arch/sparc/lib/strlen.S b/arch/sparc/lib/strlen.S new file mode 100644 index 0000000000..27478b3f16 --- /dev/null +++ b/arch/sparc/lib/strlen.S @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* strlen.S: Sparc optimized strlen code + * Hand optimized from GNU libc's strlen + * Copyright (C) 1991,1996 Free Software Foundation + * Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net) + * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + */ + +#include <linux/export.h> +#include <linux/linkage.h> +#include <asm/asm.h> + +#define LO_MAGIC 0x01010101 +#define HI_MAGIC 0x80808080 + + .text +ENTRY(strlen) + mov %o0, %o1 + andcc %o0, 3, %g0 + BRANCH32(be, pt, 9f) + sethi %hi(HI_MAGIC), %o4 + ldub [%o0], %o5 + BRANCH_REG_ZERO(pn, %o5, 11f) + add %o0, 1, %o0 + andcc %o0, 3, %g0 + BRANCH32(be, pn, 4f) + or %o4, %lo(HI_MAGIC), %o3 + ldub [%o0], %o5 + BRANCH_REG_ZERO(pn, %o5, 12f) + add %o0, 1, %o0 + andcc %o0, 3, %g0 + BRANCH32(be, pt, 5f) + sethi %hi(LO_MAGIC), %o4 + ldub [%o0], %o5 + BRANCH_REG_ZERO(pn, %o5, 13f) + add %o0, 1, %o0 + BRANCH32(ba, pt, 8f) + or %o4, %lo(LO_MAGIC), %o2 +9: + or %o4, %lo(HI_MAGIC), %o3 +4: + sethi %hi(LO_MAGIC), %o4 +5: + or %o4, %lo(LO_MAGIC), %o2 +8: + ld [%o0], %o5 +2: + sub %o5, %o2, %o4 + andcc %o4, %o3, %g0 + BRANCH32(be, pt, 8b) + add %o0, 4, %o0 + + /* Check every byte. */ + srl %o5, 24, %g7 + andcc %g7, 0xff, %g0 + BRANCH32(be, pn, 1f) + add %o0, -4, %o4 + srl %o5, 16, %g7 + andcc %g7, 0xff, %g0 + BRANCH32(be, pn, 1f) + add %o4, 1, %o4 + srl %o5, 8, %g7 + andcc %g7, 0xff, %g0 + BRANCH32(be, pn, 1f) + add %o4, 1, %o4 + andcc %o5, 0xff, %g0 + BRANCH32_ANNUL(bne, pt, 2b) + ld [%o0], %o5 + add %o4, 1, %o4 +1: + retl + sub %o4, %o1, %o0 +11: + retl + mov 0, %o0 +12: + retl + mov 1, %o0 +13: + retl + mov 2, %o0 +ENDPROC(strlen) +EXPORT_SYMBOL(strlen) diff --git a/arch/sparc/lib/strncmp_32.S b/arch/sparc/lib/strncmp_32.S new file mode 100644 index 0000000000..387bbf6215 --- /dev/null +++ b/arch/sparc/lib/strncmp_32.S @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * strncmp.S: Hand optimized Sparc assembly of GCC output from GNU libc + * generic strncmp routine. + */ + +#include <linux/export.h> +#include <linux/linkage.h> + + .text +ENTRY(strncmp) + mov %o0, %g3 + mov 0, %o3 + + cmp %o2, 3 + ble 7f + mov 0, %g2 + + sra %o2, 2, %o4 + ldub [%g3], %o3 + +0: + ldub [%o1], %g2 + add %g3, 1, %g3 + and %o3, 0xff, %o0 + + cmp %o0, 0 + be 8f + add %o1, 1, %o1 + + cmp %o0, %g2 + be,a 1f + ldub [%g3], %o3 + + retl + sub %o0, %g2, %o0 + +1: + ldub [%o1], %g2 + add %g3,1, %g3 + and %o3, 0xff, %o0 + + cmp %o0, 0 + be 8f + add %o1, 1, %o1 + + cmp %o0, %g2 + be,a 1f + ldub [%g3], %o3 + + retl + sub %o0, %g2, %o0 + +1: + ldub [%o1], %g2 + add %g3, 1, %g3 + and %o3, 0xff, %o0 + + cmp %o0, 0 + be 8f + add %o1, 1, %o1 + + cmp %o0, %g2 + be,a 1f + ldub [%g3], %o3 + + retl + sub %o0, %g2, %o0 + +1: + ldub [%o1], %g2 + add %g3, 1, %g3 + and %o3, 0xff, %o0 + + cmp %o0, 0 + be 8f + add %o1, 1, %o1 + + cmp %o0, %g2 + be 1f + add %o4, -1, %o4 + + retl + sub %o0, %g2, %o0 + +1: + + cmp %o4, 0 + bg,a 0b + ldub [%g3], %o3 + + b 7f + and %o2, 3, %o2 + +9: + ldub [%o1], %g2 + add %g3, 1, %g3 + and %o3, 0xff, %o0 + + cmp %o0, 0 + be 8f + add %o1, 1, %o1 + + cmp %o0, %g2 + be 7f + add %o2, -1, %o2 + +8: + retl + sub %o0, %g2, %o0 + +7: + cmp %o2, 0 + bg,a 9b + ldub [%g3], %o3 + + and %g2, 0xff, %o0 + retl + sub %o3, %o0, %o0 +ENDPROC(strncmp) +EXPORT_SYMBOL(strncmp) diff --git a/arch/sparc/lib/strncmp_64.S b/arch/sparc/lib/strncmp_64.S new file mode 100644 index 0000000000..76c1207ecf --- /dev/null +++ b/arch/sparc/lib/strncmp_64.S @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Sparc64 optimized strncmp code. + * + * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + */ + +#include <linux/export.h> +#include <linux/linkage.h> +#include <asm/asi.h> + + .text +ENTRY(strncmp) + brlez,pn %o2, 3f + lduba [%o0] (ASI_PNF), %o3 +1: + add %o0, 1, %o0 + ldub [%o1], %o4 + brz,pn %o3, 2f + add %o1, 1, %o1 + cmp %o3, %o4 + bne,pn %icc, 2f + subcc %o2, 1, %o2 + bne,a,pt %xcc, 1b + ldub [%o0], %o3 +2: + retl + sub %o3, %o4, %o0 +3: + retl + clr %o0 +ENDPROC(strncmp) +EXPORT_SYMBOL(strncmp) diff --git a/arch/sparc/lib/ucmpdi2.c b/arch/sparc/lib/ucmpdi2.c new file mode 100644 index 0000000000..82c1cccb12 --- /dev/null +++ b/arch/sparc/lib/ucmpdi2.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/module.h> +#include "libgcc.h" + +word_type __ucmpdi2(unsigned long long a, unsigned long long b) +{ + const DWunion au = {.ll = a}; + const DWunion bu = {.ll = b}; + + if ((unsigned int) au.s.high < (unsigned int) bu.s.high) + return 0; + else if ((unsigned int) au.s.high > (unsigned int) bu.s.high) + return 2; + if ((unsigned int) au.s.low < (unsigned int) bu.s.low) + return 0; + else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) + return 2; + return 1; +} +EXPORT_SYMBOL(__ucmpdi2); diff --git a/arch/sparc/lib/udivdi3.S b/arch/sparc/lib/udivdi3.S new file mode 100644 index 0000000000..7a1117ec76 --- /dev/null +++ b/arch/sparc/lib/udivdi3.S @@ -0,0 +1,247 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. + +This file is part of GNU CC. + + */ + + .text + .align 4 + .globl __udivdi3 +__udivdi3: + save %sp,-104,%sp + mov %i3,%o3 + cmp %i2,0 + bne .LL40 + mov %i1,%i3 + cmp %o3,%i0 + bleu .LL41 + mov %i3,%o1 + ! Inlined udiv_qrnnd + mov 32,%g1 + subcc %i0,%o3,%g0 +1: bcs 5f + addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb + sub %i0,%o3,%i0 ! this kills msb of n + addx %i0,%i0,%i0 ! so this cannot give carry + subcc %g1,1,%g1 +2: bne 1b + subcc %i0,%o3,%g0 + bcs 3f + addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb + b 3f + sub %i0,%o3,%i0 ! this kills msb of n +4: sub %i0,%o3,%i0 +5: addxcc %i0,%i0,%i0 + bcc 2b + subcc %g1,1,%g1 +! Got carry from n. Subtract next step to cancel this carry. + bne 4b + addcc %o1,%o1,%o1 ! shift n1n0 and a 0-bit in lsb + sub %i0,%o3,%i0 +3: xnor %o1,0,%o1 + ! End of inline udiv_qrnnd + b .LL45 + mov 0,%o2 +.LL41: + cmp %o3,0 + bne .LL77 + mov %i0,%o2 + mov 1,%o0 + mov 0,%o1 + wr %g0, 0, %y + udiv %o0, %o1, %o0 + mov %o0,%o3 + mov %i0,%o2 +.LL77: + mov 0,%o4 + ! Inlined udiv_qrnnd + mov 32,%g1 + subcc %o4,%o3,%g0 +1: bcs 5f + addxcc %o2,%o2,%o2 ! shift n1n0 and a q-bit in lsb + sub %o4,%o3,%o4 ! this kills msb of n + addx %o4,%o4,%o4 ! so this cannot give carry + subcc %g1,1,%g1 +2: bne 1b + subcc %o4,%o3,%g0 + bcs 3f + addxcc %o2,%o2,%o2 ! shift n1n0 and a q-bit in lsb + b 3f + sub %o4,%o3,%o4 ! this kills msb of n +4: sub %o4,%o3,%o4 +5: addxcc %o4,%o4,%o4 + bcc 2b + subcc %g1,1,%g1 +! Got carry from n. Subtract next step to cancel this carry. + bne 4b + addcc %o2,%o2,%o2 ! shift n1n0 and a 0-bit in lsb + sub %o4,%o3,%o4 +3: xnor %o2,0,%o2 + ! End of inline udiv_qrnnd + mov %o4,%i0 + mov %i3,%o1 + ! Inlined udiv_qrnnd + mov 32,%g1 + subcc %i0,%o3,%g0 +1: bcs 5f + addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb + sub %i0,%o3,%i0 ! this kills msb of n + addx %i0,%i0,%i0 ! so this cannot give carry + subcc %g1,1,%g1 +2: bne 1b + subcc %i0,%o3,%g0 + bcs 3f + addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb + b 3f + sub %i0,%o3,%i0 ! this kills msb of n +4: sub %i0,%o3,%i0 +5: addxcc %i0,%i0,%i0 + bcc 2b + subcc %g1,1,%g1 +! Got carry from n. Subtract next step to cancel this carry. + bne 4b + addcc %o1,%o1,%o1 ! shift n1n0 and a 0-bit in lsb + sub %i0,%o3,%i0 +3: xnor %o1,0,%o1 + ! End of inline udiv_qrnnd + b .LL78 + mov %o1,%l1 +.LL40: + cmp %i2,%i0 + bleu .LL46 + sethi %hi(65535),%o0 + b .LL73 + mov 0,%o1 +.LL46: + or %o0,%lo(65535),%o0 + cmp %i2,%o0 + bgu .LL53 + mov %i2,%o1 + cmp %i2,256 + addx %g0,-1,%o0 + b .LL59 + and %o0,8,%o2 +.LL53: + sethi %hi(16777215),%o0 + or %o0,%lo(16777215),%o0 + cmp %o1,%o0 + bgu .LL59 + mov 24,%o2 + mov 16,%o2 +.LL59: + srl %o1,%o2,%o1 + sethi %hi(__clz_tab),%o0 + or %o0,%lo(__clz_tab),%o0 + ldub [%o1+%o0],%o0 + add %o0,%o2,%o0 + mov 32,%o1 + subcc %o1,%o0,%o2 + bne,a .LL67 + mov 32,%o0 + cmp %i0,%i2 + bgu .LL69 + cmp %i3,%o3 + blu .LL73 + mov 0,%o1 +.LL69: + b .LL73 + mov 1,%o1 +.LL67: + sub %o0,%o2,%o0 + sll %i2,%o2,%i2 + srl %o3,%o0,%o1 + or %i2,%o1,%i2 + sll %o3,%o2,%o3 + srl %i0,%o0,%o1 + sll %i0,%o2,%i0 + srl %i3,%o0,%o0 + or %i0,%o0,%i0 + sll %i3,%o2,%i3 + mov %i0,%o5 + mov %o1,%o4 + ! Inlined udiv_qrnnd + mov 32,%g1 + subcc %o4,%i2,%g0 +1: bcs 5f + addxcc %o5,%o5,%o5 ! shift n1n0 and a q-bit in lsb + sub %o4,%i2,%o4 ! this kills msb of n + addx %o4,%o4,%o4 ! so this cannot give carry + subcc %g1,1,%g1 +2: bne 1b + subcc %o4,%i2,%g0 + bcs 3f + addxcc %o5,%o5,%o5 ! shift n1n0 and a q-bit in lsb + b 3f + sub %o4,%i2,%o4 ! this kills msb of n +4: sub %o4,%i2,%o4 +5: addxcc %o4,%o4,%o4 + bcc 2b + subcc %g1,1,%g1 +! Got carry from n. Subtract next step to cancel this carry. + bne 4b + addcc %o5,%o5,%o5 ! shift n1n0 and a 0-bit in lsb + sub %o4,%i2,%o4 +3: xnor %o5,0,%o5 + ! End of inline udiv_qrnnd + mov %o4,%i0 + mov %o5,%o1 + ! Inlined umul_ppmm + wr %g0,%o1,%y ! SPARC has 0-3 delay insn after a wr + sra %o3,31,%g2 ! Do not move this insn + and %o1,%g2,%g2 ! Do not move this insn + andcc %g0,0,%g1 ! Do not move this insn + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,%o3,%g1 + mulscc %g1,0,%g1 + add %g1,%g2,%o0 + rd %y,%o2 + cmp %o0,%i0 + bgu,a .LL73 + add %o1,-1,%o1 + bne,a .LL45 + mov 0,%o2 + cmp %o2,%i3 + bleu .LL45 + mov 0,%o2 + add %o1,-1,%o1 +.LL73: + mov 0,%o2 +.LL45: + mov %o1,%l1 +.LL78: + mov %o2,%l0 + mov %l0,%i0 + mov %l1,%i1 + ret + restore diff --git a/arch/sparc/lib/xor.S b/arch/sparc/lib/xor.S new file mode 100644 index 0000000000..35461e3b2a --- /dev/null +++ b/arch/sparc/lib/xor.S @@ -0,0 +1,646 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sparc64/lib/xor.S + * + * High speed xor_block operation for RAID4/5 utilizing the + * UltraSparc Visual Instruction Set and Niagara store-init/twin-load. + * + * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) + * Copyright (C) 2006 David S. Miller <davem@davemloft.net> + */ + +#include <linux/export.h> +#include <linux/linkage.h> +#include <asm/visasm.h> +#include <asm/asi.h> +#include <asm/dcu.h> +#include <asm/spitfire.h> + +/* + * Requirements: + * !(((long)dest | (long)sourceN) & (64 - 1)) && + * !(len & 127) && len >= 256 + */ + .text + + /* VIS versions. */ +ENTRY(xor_vis_2) + rd %fprs, %o5 + andcc %o5, FPRS_FEF|FPRS_DU, %g0 + be,pt %icc, 0f + sethi %hi(VISenter), %g1 + jmpl %g1 + %lo(VISenter), %g7 + add %g7, 8, %g7 +0: wr %g0, FPRS_FEF, %fprs + rd %asi, %g1 + wr %g0, ASI_BLK_P, %asi + membar #LoadStore|#StoreLoad|#StoreStore + sub %o0, 128, %o0 + ldda [%o1] %asi, %f0 + ldda [%o2] %asi, %f16 + +2: ldda [%o1 + 64] %asi, %f32 + fxor %f0, %f16, %f16 + fxor %f2, %f18, %f18 + fxor %f4, %f20, %f20 + fxor %f6, %f22, %f22 + fxor %f8, %f24, %f24 + fxor %f10, %f26, %f26 + fxor %f12, %f28, %f28 + fxor %f14, %f30, %f30 + stda %f16, [%o1] %asi + ldda [%o2 + 64] %asi, %f48 + ldda [%o1 + 128] %asi, %f0 + fxor %f32, %f48, %f48 + fxor %f34, %f50, %f50 + add %o1, 128, %o1 + fxor %f36, %f52, %f52 + add %o2, 128, %o2 + fxor %f38, %f54, %f54 + subcc %o0, 128, %o0 + fxor %f40, %f56, %f56 + fxor %f42, %f58, %f58 + fxor %f44, %f60, %f60 + fxor %f46, %f62, %f62 + stda %f48, [%o1 - 64] %asi + bne,pt %xcc, 2b + ldda [%o2] %asi, %f16 + + ldda [%o1 + 64] %asi, %f32 + fxor %f0, %f16, %f16 + fxor %f2, %f18, %f18 + fxor %f4, %f20, %f20 + fxor %f6, %f22, %f22 + fxor %f8, %f24, %f24 + fxor %f10, %f26, %f26 + fxor %f12, %f28, %f28 + fxor %f14, %f30, %f30 + stda %f16, [%o1] %asi + ldda [%o2 + 64] %asi, %f48 + membar #Sync + fxor %f32, %f48, %f48 + fxor %f34, %f50, %f50 + fxor %f36, %f52, %f52 + fxor %f38, %f54, %f54 + fxor %f40, %f56, %f56 + fxor %f42, %f58, %f58 + fxor %f44, %f60, %f60 + fxor %f46, %f62, %f62 + stda %f48, [%o1 + 64] %asi + membar #Sync|#StoreStore|#StoreLoad + wr %g1, %g0, %asi + retl + wr %g0, 0, %fprs +ENDPROC(xor_vis_2) +EXPORT_SYMBOL(xor_vis_2) + +ENTRY(xor_vis_3) + rd %fprs, %o5 + andcc %o5, FPRS_FEF|FPRS_DU, %g0 + be,pt %icc, 0f + sethi %hi(VISenter), %g1 + jmpl %g1 + %lo(VISenter), %g7 + add %g7, 8, %g7 +0: wr %g0, FPRS_FEF, %fprs + rd %asi, %g1 + wr %g0, ASI_BLK_P, %asi + membar #LoadStore|#StoreLoad|#StoreStore + sub %o0, 64, %o0 + ldda [%o1] %asi, %f0 + ldda [%o2] %asi, %f16 + +3: ldda [%o3] %asi, %f32 + fxor %f0, %f16, %f48 + fxor %f2, %f18, %f50 + add %o1, 64, %o1 + fxor %f4, %f20, %f52 + fxor %f6, %f22, %f54 + add %o2, 64, %o2 + fxor %f8, %f24, %f56 + fxor %f10, %f26, %f58 + fxor %f12, %f28, %f60 + fxor %f14, %f30, %f62 + ldda [%o1] %asi, %f0 + fxor %f48, %f32, %f48 + fxor %f50, %f34, %f50 + fxor %f52, %f36, %f52 + fxor %f54, %f38, %f54 + add %o3, 64, %o3 + fxor %f56, %f40, %f56 + fxor %f58, %f42, %f58 + subcc %o0, 64, %o0 + fxor %f60, %f44, %f60 + fxor %f62, %f46, %f62 + stda %f48, [%o1 - 64] %asi + bne,pt %xcc, 3b + ldda [%o2] %asi, %f16 + + ldda [%o3] %asi, %f32 + fxor %f0, %f16, %f48 + fxor %f2, %f18, %f50 + fxor %f4, %f20, %f52 + fxor %f6, %f22, %f54 + fxor %f8, %f24, %f56 + fxor %f10, %f26, %f58 + fxor %f12, %f28, %f60 + fxor %f14, %f30, %f62 + membar #Sync + fxor %f48, %f32, %f48 + fxor %f50, %f34, %f50 + fxor %f52, %f36, %f52 + fxor %f54, %f38, %f54 + fxor %f56, %f40, %f56 + fxor %f58, %f42, %f58 + fxor %f60, %f44, %f60 + fxor %f62, %f46, %f62 + stda %f48, [%o1] %asi + membar #Sync|#StoreStore|#StoreLoad + wr %g1, %g0, %asi + retl + wr %g0, 0, %fprs +ENDPROC(xor_vis_3) +EXPORT_SYMBOL(xor_vis_3) + +ENTRY(xor_vis_4) + rd %fprs, %o5 + andcc %o5, FPRS_FEF|FPRS_DU, %g0 + be,pt %icc, 0f + sethi %hi(VISenter), %g1 + jmpl %g1 + %lo(VISenter), %g7 + add %g7, 8, %g7 +0: wr %g0, FPRS_FEF, %fprs + rd %asi, %g1 + wr %g0, ASI_BLK_P, %asi + membar #LoadStore|#StoreLoad|#StoreStore + sub %o0, 64, %o0 + ldda [%o1] %asi, %f0 + ldda [%o2] %asi, %f16 + +4: ldda [%o3] %asi, %f32 + fxor %f0, %f16, %f16 + fxor %f2, %f18, %f18 + add %o1, 64, %o1 + fxor %f4, %f20, %f20 + fxor %f6, %f22, %f22 + add %o2, 64, %o2 + fxor %f8, %f24, %f24 + fxor %f10, %f26, %f26 + fxor %f12, %f28, %f28 + fxor %f14, %f30, %f30 + ldda [%o4] %asi, %f48 + fxor %f16, %f32, %f32 + fxor %f18, %f34, %f34 + fxor %f20, %f36, %f36 + fxor %f22, %f38, %f38 + add %o3, 64, %o3 + fxor %f24, %f40, %f40 + fxor %f26, %f42, %f42 + fxor %f28, %f44, %f44 + fxor %f30, %f46, %f46 + ldda [%o1] %asi, %f0 + fxor %f32, %f48, %f48 + fxor %f34, %f50, %f50 + fxor %f36, %f52, %f52 + add %o4, 64, %o4 + fxor %f38, %f54, %f54 + fxor %f40, %f56, %f56 + fxor %f42, %f58, %f58 + subcc %o0, 64, %o0 + fxor %f44, %f60, %f60 + fxor %f46, %f62, %f62 + stda %f48, [%o1 - 64] %asi + bne,pt %xcc, 4b + ldda [%o2] %asi, %f16 + + ldda [%o3] %asi, %f32 + fxor %f0, %f16, %f16 + fxor %f2, %f18, %f18 + fxor %f4, %f20, %f20 + fxor %f6, %f22, %f22 + fxor %f8, %f24, %f24 + fxor %f10, %f26, %f26 + fxor %f12, %f28, %f28 + fxor %f14, %f30, %f30 + ldda [%o4] %asi, %f48 + fxor %f16, %f32, %f32 + fxor %f18, %f34, %f34 + fxor %f20, %f36, %f36 + fxor %f22, %f38, %f38 + fxor %f24, %f40, %f40 + fxor %f26, %f42, %f42 + fxor %f28, %f44, %f44 + fxor %f30, %f46, %f46 + membar #Sync + fxor %f32, %f48, %f48 + fxor %f34, %f50, %f50 + fxor %f36, %f52, %f52 + fxor %f38, %f54, %f54 + fxor %f40, %f56, %f56 + fxor %f42, %f58, %f58 + fxor %f44, %f60, %f60 + fxor %f46, %f62, %f62 + stda %f48, [%o1] %asi + membar #Sync|#StoreStore|#StoreLoad + wr %g1, %g0, %asi + retl + wr %g0, 0, %fprs +ENDPROC(xor_vis_4) +EXPORT_SYMBOL(xor_vis_4) + +ENTRY(xor_vis_5) + save %sp, -192, %sp + rd %fprs, %o5 + andcc %o5, FPRS_FEF|FPRS_DU, %g0 + be,pt %icc, 0f + sethi %hi(VISenter), %g1 + jmpl %g1 + %lo(VISenter), %g7 + add %g7, 8, %g7 +0: wr %g0, FPRS_FEF, %fprs + rd %asi, %g1 + wr %g0, ASI_BLK_P, %asi + membar #LoadStore|#StoreLoad|#StoreStore + sub %i0, 64, %i0 + ldda [%i1] %asi, %f0 + ldda [%i2] %asi, %f16 + +5: ldda [%i3] %asi, %f32 + fxor %f0, %f16, %f48 + fxor %f2, %f18, %f50 + add %i1, 64, %i1 + fxor %f4, %f20, %f52 + fxor %f6, %f22, %f54 + add %i2, 64, %i2 + fxor %f8, %f24, %f56 + fxor %f10, %f26, %f58 + fxor %f12, %f28, %f60 + fxor %f14, %f30, %f62 + ldda [%i4] %asi, %f16 + fxor %f48, %f32, %f48 + fxor %f50, %f34, %f50 + fxor %f52, %f36, %f52 + fxor %f54, %f38, %f54 + add %i3, 64, %i3 + fxor %f56, %f40, %f56 + fxor %f58, %f42, %f58 + fxor %f60, %f44, %f60 + fxor %f62, %f46, %f62 + ldda [%i5] %asi, %f32 + fxor %f48, %f16, %f48 + fxor %f50, %f18, %f50 + add %i4, 64, %i4 + fxor %f52, %f20, %f52 + fxor %f54, %f22, %f54 + add %i5, 64, %i5 + fxor %f56, %f24, %f56 + fxor %f58, %f26, %f58 + fxor %f60, %f28, %f60 + fxor %f62, %f30, %f62 + ldda [%i1] %asi, %f0 + fxor %f48, %f32, %f48 + fxor %f50, %f34, %f50 + fxor %f52, %f36, %f52 + fxor %f54, %f38, %f54 + fxor %f56, %f40, %f56 + fxor %f58, %f42, %f58 + subcc %i0, 64, %i0 + fxor %f60, %f44, %f60 + fxor %f62, %f46, %f62 + stda %f48, [%i1 - 64] %asi + bne,pt %xcc, 5b + ldda [%i2] %asi, %f16 + + ldda [%i3] %asi, %f32 + fxor %f0, %f16, %f48 + fxor %f2, %f18, %f50 + fxor %f4, %f20, %f52 + fxor %f6, %f22, %f54 + fxor %f8, %f24, %f56 + fxor %f10, %f26, %f58 + fxor %f12, %f28, %f60 + fxor %f14, %f30, %f62 + ldda [%i4] %asi, %f16 + fxor %f48, %f32, %f48 + fxor %f50, %f34, %f50 + fxor %f52, %f36, %f52 + fxor %f54, %f38, %f54 + fxor %f56, %f40, %f56 + fxor %f58, %f42, %f58 + fxor %f60, %f44, %f60 + fxor %f62, %f46, %f62 + ldda [%i5] %asi, %f32 + fxor %f48, %f16, %f48 + fxor %f50, %f18, %f50 + fxor %f52, %f20, %f52 + fxor %f54, %f22, %f54 + fxor %f56, %f24, %f56 + fxor %f58, %f26, %f58 + fxor %f60, %f28, %f60 + fxor %f62, %f30, %f62 + membar #Sync + fxor %f48, %f32, %f48 + fxor %f50, %f34, %f50 + fxor %f52, %f36, %f52 + fxor %f54, %f38, %f54 + fxor %f56, %f40, %f56 + fxor %f58, %f42, %f58 + fxor %f60, %f44, %f60 + fxor %f62, %f46, %f62 + stda %f48, [%i1] %asi + membar #Sync|#StoreStore|#StoreLoad + wr %g1, %g0, %asi + wr %g0, 0, %fprs + ret + restore +ENDPROC(xor_vis_5) +EXPORT_SYMBOL(xor_vis_5) + + /* Niagara versions. */ +ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */ + save %sp, -192, %sp + prefetch [%i1], #n_writes + prefetch [%i2], #one_read + rd %asi, %g7 + wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi + srlx %i0, 6, %g1 + mov %i1, %i0 + mov %i2, %i1 +1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src + 0x00 */ + ldda [%i1 + 0x10] %asi, %i4 /* %i4/%i5 = src + 0x10 */ + ldda [%i1 + 0x20] %asi, %g2 /* %g2/%g3 = src + 0x20 */ + ldda [%i1 + 0x30] %asi, %l0 /* %l0/%l1 = src + 0x30 */ + prefetch [%i1 + 0x40], #one_read + ldda [%i0 + 0x00] %asi, %o0 /* %o0/%o1 = dest + 0x00 */ + ldda [%i0 + 0x10] %asi, %o2 /* %o2/%o3 = dest + 0x10 */ + ldda [%i0 + 0x20] %asi, %o4 /* %o4/%o5 = dest + 0x20 */ + ldda [%i0 + 0x30] %asi, %l2 /* %l2/%l3 = dest + 0x30 */ + prefetch [%i0 + 0x40], #n_writes + xor %o0, %i2, %o0 + xor %o1, %i3, %o1 + stxa %o0, [%i0 + 0x00] %asi + stxa %o1, [%i0 + 0x08] %asi + xor %o2, %i4, %o2 + xor %o3, %i5, %o3 + stxa %o2, [%i0 + 0x10] %asi + stxa %o3, [%i0 + 0x18] %asi + xor %o4, %g2, %o4 + xor %o5, %g3, %o5 + stxa %o4, [%i0 + 0x20] %asi + stxa %o5, [%i0 + 0x28] %asi + xor %l2, %l0, %l2 + xor %l3, %l1, %l3 + stxa %l2, [%i0 + 0x30] %asi + stxa %l3, [%i0 + 0x38] %asi + add %i0, 0x40, %i0 + subcc %g1, 1, %g1 + bne,pt %xcc, 1b + add %i1, 0x40, %i1 + membar #Sync + wr %g7, 0x0, %asi + ret + restore +ENDPROC(xor_niagara_2) +EXPORT_SYMBOL(xor_niagara_2) + +ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */ + save %sp, -192, %sp + prefetch [%i1], #n_writes + prefetch [%i2], #one_read + prefetch [%i3], #one_read + rd %asi, %g7 + wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi + srlx %i0, 6, %g1 + mov %i1, %i0 + mov %i2, %i1 + mov %i3, %l7 +1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ + ldda [%i1 + 0x10] %asi, %i4 /* %i4/%i5 = src1 + 0x10 */ + ldda [%l7 + 0x00] %asi, %g2 /* %g2/%g3 = src2 + 0x00 */ + ldda [%l7 + 0x10] %asi, %l0 /* %l0/%l1 = src2 + 0x10 */ + ldda [%i0 + 0x00] %asi, %o0 /* %o0/%o1 = dest + 0x00 */ + ldda [%i0 + 0x10] %asi, %o2 /* %o2/%o3 = dest + 0x10 */ + xor %g2, %i2, %g2 + xor %g3, %i3, %g3 + xor %o0, %g2, %o0 + xor %o1, %g3, %o1 + stxa %o0, [%i0 + 0x00] %asi + stxa %o1, [%i0 + 0x08] %asi + ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ + ldda [%l7 + 0x20] %asi, %g2 /* %g2/%g3 = src2 + 0x20 */ + ldda [%i0 + 0x20] %asi, %o0 /* %o0/%o1 = dest + 0x20 */ + xor %l0, %i4, %l0 + xor %l1, %i5, %l1 + xor %o2, %l0, %o2 + xor %o3, %l1, %o3 + stxa %o2, [%i0 + 0x10] %asi + stxa %o3, [%i0 + 0x18] %asi + ldda [%i1 + 0x30] %asi, %i4 /* %i4/%i5 = src1 + 0x30 */ + ldda [%l7 + 0x30] %asi, %l0 /* %l0/%l1 = src2 + 0x30 */ + ldda [%i0 + 0x30] %asi, %o2 /* %o2/%o3 = dest + 0x30 */ + prefetch [%i1 + 0x40], #one_read + prefetch [%l7 + 0x40], #one_read + prefetch [%i0 + 0x40], #n_writes + xor %g2, %i2, %g2 + xor %g3, %i3, %g3 + xor %o0, %g2, %o0 + xor %o1, %g3, %o1 + stxa %o0, [%i0 + 0x20] %asi + stxa %o1, [%i0 + 0x28] %asi + xor %l0, %i4, %l0 + xor %l1, %i5, %l1 + xor %o2, %l0, %o2 + xor %o3, %l1, %o3 + stxa %o2, [%i0 + 0x30] %asi + stxa %o3, [%i0 + 0x38] %asi + add %i0, 0x40, %i0 + add %i1, 0x40, %i1 + subcc %g1, 1, %g1 + bne,pt %xcc, 1b + add %l7, 0x40, %l7 + membar #Sync + wr %g7, 0x0, %asi + ret + restore +ENDPROC(xor_niagara_3) +EXPORT_SYMBOL(xor_niagara_3) + +ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */ + save %sp, -192, %sp + prefetch [%i1], #n_writes + prefetch [%i2], #one_read + prefetch [%i3], #one_read + prefetch [%i4], #one_read + rd %asi, %g7 + wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi + srlx %i0, 6, %g1 + mov %i1, %i0 + mov %i2, %i1 + mov %i3, %l7 + mov %i4, %l6 +1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ + ldda [%l7 + 0x00] %asi, %i4 /* %i4/%i5 = src2 + 0x00 */ + ldda [%l6 + 0x00] %asi, %g2 /* %g2/%g3 = src3 + 0x00 */ + ldda [%i0 + 0x00] %asi, %l0 /* %l0/%l1 = dest + 0x00 */ + xor %i4, %i2, %i4 + xor %i5, %i3, %i5 + ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */ + xor %g2, %i4, %g2 + xor %g3, %i5, %g3 + ldda [%l7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */ + xor %l0, %g2, %l0 + xor %l1, %g3, %l1 + stxa %l0, [%i0 + 0x00] %asi + stxa %l1, [%i0 + 0x08] %asi + ldda [%l6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */ + ldda [%i0 + 0x10] %asi, %l0 /* %l0/%l1 = dest + 0x10 */ + + xor %i4, %i2, %i4 + xor %i5, %i3, %i5 + ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ + xor %g2, %i4, %g2 + xor %g3, %i5, %g3 + ldda [%l7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */ + xor %l0, %g2, %l0 + xor %l1, %g3, %l1 + stxa %l0, [%i0 + 0x10] %asi + stxa %l1, [%i0 + 0x18] %asi + ldda [%l6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */ + ldda [%i0 + 0x20] %asi, %l0 /* %l0/%l1 = dest + 0x20 */ + + xor %i4, %i2, %i4 + xor %i5, %i3, %i5 + ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */ + xor %g2, %i4, %g2 + xor %g3, %i5, %g3 + ldda [%l7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */ + xor %l0, %g2, %l0 + xor %l1, %g3, %l1 + stxa %l0, [%i0 + 0x20] %asi + stxa %l1, [%i0 + 0x28] %asi + ldda [%l6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */ + ldda [%i0 + 0x30] %asi, %l0 /* %l0/%l1 = dest + 0x30 */ + + prefetch [%i1 + 0x40], #one_read + prefetch [%l7 + 0x40], #one_read + prefetch [%l6 + 0x40], #one_read + prefetch [%i0 + 0x40], #n_writes + + xor %i4, %i2, %i4 + xor %i5, %i3, %i5 + xor %g2, %i4, %g2 + xor %g3, %i5, %g3 + xor %l0, %g2, %l0 + xor %l1, %g3, %l1 + stxa %l0, [%i0 + 0x30] %asi + stxa %l1, [%i0 + 0x38] %asi + + add %i0, 0x40, %i0 + add %i1, 0x40, %i1 + add %l7, 0x40, %l7 + subcc %g1, 1, %g1 + bne,pt %xcc, 1b + add %l6, 0x40, %l6 + membar #Sync + wr %g7, 0x0, %asi + ret + restore +ENDPROC(xor_niagara_4) +EXPORT_SYMBOL(xor_niagara_4) + +ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */ + save %sp, -192, %sp + prefetch [%i1], #n_writes + prefetch [%i2], #one_read + prefetch [%i3], #one_read + prefetch [%i4], #one_read + prefetch [%i5], #one_read + rd %asi, %g7 + wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi + srlx %i0, 6, %g1 + mov %i1, %i0 + mov %i2, %i1 + mov %i3, %l7 + mov %i4, %l6 + mov %i5, %l5 +1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ + ldda [%l7 + 0x00] %asi, %i4 /* %i4/%i5 = src2 + 0x00 */ + ldda [%l6 + 0x00] %asi, %g2 /* %g2/%g3 = src3 + 0x00 */ + ldda [%l5 + 0x00] %asi, %l0 /* %l0/%l1 = src4 + 0x00 */ + ldda [%i0 + 0x00] %asi, %l2 /* %l2/%l3 = dest + 0x00 */ + xor %i4, %i2, %i4 + xor %i5, %i3, %i5 + ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */ + xor %g2, %i4, %g2 + xor %g3, %i5, %g3 + ldda [%l7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */ + xor %l0, %g2, %l0 + xor %l1, %g3, %l1 + ldda [%l6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */ + xor %l2, %l0, %l2 + xor %l3, %l1, %l3 + stxa %l2, [%i0 + 0x00] %asi + stxa %l3, [%i0 + 0x08] %asi + ldda [%l5 + 0x10] %asi, %l0 /* %l0/%l1 = src4 + 0x10 */ + ldda [%i0 + 0x10] %asi, %l2 /* %l2/%l3 = dest + 0x10 */ + + xor %i4, %i2, %i4 + xor %i5, %i3, %i5 + ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ + xor %g2, %i4, %g2 + xor %g3, %i5, %g3 + ldda [%l7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */ + xor %l0, %g2, %l0 + xor %l1, %g3, %l1 + ldda [%l6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */ + xor %l2, %l0, %l2 + xor %l3, %l1, %l3 + stxa %l2, [%i0 + 0x10] %asi + stxa %l3, [%i0 + 0x18] %asi + ldda [%l5 + 0x20] %asi, %l0 /* %l0/%l1 = src4 + 0x20 */ + ldda [%i0 + 0x20] %asi, %l2 /* %l2/%l3 = dest + 0x20 */ + + xor %i4, %i2, %i4 + xor %i5, %i3, %i5 + ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */ + xor %g2, %i4, %g2 + xor %g3, %i5, %g3 + ldda [%l7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */ + xor %l0, %g2, %l0 + xor %l1, %g3, %l1 + ldda [%l6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */ + xor %l2, %l0, %l2 + xor %l3, %l1, %l3 + stxa %l2, [%i0 + 0x20] %asi + stxa %l3, [%i0 + 0x28] %asi + ldda [%l5 + 0x30] %asi, %l0 /* %l0/%l1 = src4 + 0x30 */ + ldda [%i0 + 0x30] %asi, %l2 /* %l2/%l3 = dest + 0x30 */ + + prefetch [%i1 + 0x40], #one_read + prefetch [%l7 + 0x40], #one_read + prefetch [%l6 + 0x40], #one_read + prefetch [%l5 + 0x40], #one_read + prefetch [%i0 + 0x40], #n_writes + + xor %i4, %i2, %i4 + xor %i5, %i3, %i5 + xor %g2, %i4, %g2 + xor %g3, %i5, %g3 + xor %l0, %g2, %l0 + xor %l1, %g3, %l1 + xor %l2, %l0, %l2 + xor %l3, %l1, %l3 + stxa %l2, [%i0 + 0x30] %asi + stxa %l3, [%i0 + 0x38] %asi + + add %i0, 0x40, %i0 + add %i1, 0x40, %i1 + add %l7, 0x40, %l7 + add %l6, 0x40, %l6 + subcc %g1, 1, %g1 + bne,pt %xcc, 1b + add %l5, 0x40, %l5 + membar #Sync + wr %g7, 0x0, %asi + ret + restore +ENDPROC(xor_niagara_5) +EXPORT_SYMBOL(xor_niagara_5) |