summaryrefslogtreecommitdiffstats
path: root/arch/x86/math-emu/round_Xsig.S
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /arch/x86/math-emu/round_Xsig.S
parentInitial commit. (diff)
downloadlinux-upstream.tar.xz
linux-upstream.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/x86/math-emu/round_Xsig.S')
-rw-r--r--arch/x86/math-emu/round_Xsig.S142
1 files changed, 142 insertions, 0 deletions
diff --git a/arch/x86/math-emu/round_Xsig.S b/arch/x86/math-emu/round_Xsig.S
new file mode 100644
index 000000000..126c40473
--- /dev/null
+++ b/arch/x86/math-emu/round_Xsig.S
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*---------------------------------------------------------------------------+
+ | round_Xsig.S |
+ | |
+ | Copyright (C) 1992,1993,1994,1995 |
+ | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
+ | Australia. E-mail billm@jacobi.maths.monash.edu.au |
+ | |
+ | Normalize and round a 12 byte quantity. |
+ | Call from C as: |
+ | int round_Xsig(Xsig *n) |
+ | |
+ | Normalize a 12 byte quantity. |
+ | Call from C as: |
+ | int norm_Xsig(Xsig *n) |
+ | |
+ | Each function returns the size of the shift (nr of bits). |
+ | |
+ +---------------------------------------------------------------------------*/
+ .file "round_Xsig.S"
+
+#include "fpu_emu.h"
+
+
+.text
+SYM_FUNC_START(round_Xsig)
+ pushl %ebp
+ movl %esp,%ebp
+ pushl %ebx /* Reserve some space */
+ pushl %ebx
+ pushl %esi
+
+ movl PARAM1,%esi
+
+ movl 8(%esi),%edx
+ movl 4(%esi),%ebx
+ movl (%esi),%eax
+
+ movl $0,-4(%ebp)
+
+ orl %edx,%edx /* ms bits */
+ js L_round /* Already normalized */
+ jnz L_shift_1 /* Shift left 1 - 31 bits */
+
+ movl %ebx,%edx
+ movl %eax,%ebx
+ xorl %eax,%eax
+ movl $-32,-4(%ebp)
+
+/* We need to shift left by 1 - 31 bits */
+L_shift_1:
+ bsrl %edx,%ecx /* get the required shift in %ecx */
+ subl $31,%ecx
+ negl %ecx
+ subl %ecx,-4(%ebp)
+ shld %cl,%ebx,%edx
+ shld %cl,%eax,%ebx
+ shl %cl,%eax
+
+L_round:
+ testl $0x80000000,%eax
+ jz L_exit
+
+ addl $1,%ebx
+ adcl $0,%edx
+ jnz L_exit
+
+ movl $0x80000000,%edx
+ incl -4(%ebp)
+
+L_exit:
+ movl %edx,8(%esi)
+ movl %ebx,4(%esi)
+ movl %eax,(%esi)
+
+ movl -4(%ebp),%eax
+
+ popl %esi
+ popl %ebx
+ leave
+ RET
+SYM_FUNC_END(round_Xsig)
+
+
+
+SYM_FUNC_START(norm_Xsig)
+ pushl %ebp
+ movl %esp,%ebp
+ pushl %ebx /* Reserve some space */
+ pushl %ebx
+ pushl %esi
+
+ movl PARAM1,%esi
+
+ movl 8(%esi),%edx
+ movl 4(%esi),%ebx
+ movl (%esi),%eax
+
+ movl $0,-4(%ebp)
+
+ orl %edx,%edx /* ms bits */
+ js L_n_exit /* Already normalized */
+ jnz L_n_shift_1 /* Shift left 1 - 31 bits */
+
+ movl %ebx,%edx
+ movl %eax,%ebx
+ xorl %eax,%eax
+ movl $-32,-4(%ebp)
+
+ orl %edx,%edx /* ms bits */
+ js L_n_exit /* Normalized now */
+ jnz L_n_shift_1 /* Shift left 1 - 31 bits */
+
+ movl %ebx,%edx
+ movl %eax,%ebx
+ xorl %eax,%eax
+ addl $-32,-4(%ebp)
+ jmp L_n_exit /* Might not be normalized,
+ but shift no more. */
+
+/* We need to shift left by 1 - 31 bits */
+L_n_shift_1:
+ bsrl %edx,%ecx /* get the required shift in %ecx */
+ subl $31,%ecx
+ negl %ecx
+ subl %ecx,-4(%ebp)
+ shld %cl,%ebx,%edx
+ shld %cl,%eax,%ebx
+ shl %cl,%eax
+
+L_n_exit:
+ movl %edx,8(%esi)
+ movl %ebx,4(%esi)
+ movl %eax,(%esi)
+
+ movl -4(%ebp),%eax
+
+ popl %esi
+ popl %ebx
+ leave
+ RET
+SYM_FUNC_END(norm_Xsig)