1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
|
; $Id: memmove.asm $
;; @file
; IPRT - No-CRT memmove - AMD64 & X86.
;
;
; Copyright (C) 2006-2019 Oracle Corporation
;
; This file is part of VirtualBox Open Source Edition (OSE), as
; available from http://www.virtualbox.org. This file is free software;
; you can redistribute it and/or modify it under the terms of the GNU
; General Public License (GPL) as published by the Free Software
; Foundation, in version 2 as it comes in the "COPYING" file of the
; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
;
; The contents of this file may alternatively be used under the terms
; of the Common Development and Distribution License Version 1.0
; (CDDL) only, as it comes in the "COPYING.CDDL" file of the
; VirtualBox OSE distribution, in which case the provisions of the
; CDDL are applicable instead of those of the GPL.
;
; You may elect to license modified versions of this file under the
; terms and conditions of either the GPL or the CDDL or both.
;
%include "iprt/asmdefs.mac"
BEGINCODE
;;
; @param pvDst gcc: rdi msc: rcx x86:[esp+4] wcall: eax
; @param pvSrc gcc: rsi msc: rdx x86:[esp+8] wcall: edx
; @param cb gcc: rdx msc: r8 x86:[esp+0ch] wcall: ebx
RT_NOCRT_BEGINPROC memmove
; Prolog.
%ifdef RT_ARCH_AMD64
%ifdef ASM_CALL64_MSC
mov r10, rdi ; save
mov r11, rsi ; save
mov rdi, rcx
mov rsi, rdx
mov rcx, r8
mov rdx, r8
%else
mov rcx, rdx
%endif
mov rax, rdi ; save the return value
%else
push edi
push esi
%ifdef ASM_CALL32_WATCOM
mov edi, eax
mov esi, edx
mov ecx, ebx
mov edx, ebx
%else
mov edi, [esp + 04h + 8]
mov esi, [esp + 08h + 8]
mov ecx, [esp + 0ch + 8]
mov edx, ecx
mov eax, edi ; save the return value
%endif
%endif
;
; Decide which direction to perform the copy in.
;
%if 1 ; keep it simple for now.
cmp xDI, xSI
jnb .backward
;
; Slow/simple forward copy.
;
cld
rep movsb
jmp .epilog
%else ; disabled - it seems to work, but play safe for now.
;sub xAX, xSI
;jnb .backward
cmp xDI, xSI
jnb .backward
;
; Fast forward copy.
;
.fast_forward:
cld
%ifdef RT_ARCH_AMD64
shr rcx, 3
rep movsq
%else
shr ecx, 2
rep movsd
%endif
; The remaining bytes.
%ifdef RT_ARCH_AMD64
test dl, 4
jz .forward_dont_move_dword
movsd
%endif
.forward_dont_move_dword:
test dl, 2
jz .forward_dont_move_word
movsw
.forward_dont_move_word:
test dl, 1
jz .forward_dont_move_byte
movsb
.forward_dont_move_byte:
%endif ; disabled
;
; The epilog.
;
.epilog:
%ifdef RT_ARCH_AMD64
%ifdef ASM_CALL64_MSC
mov rdi, r10
mov rsi, r11
%endif
%else
pop esi
pop edi
%endif
ret
;
; Slow/simple backward copy.
;
ALIGNCODE(16)
.backward:
;; @todo check if they overlap.
lea xDI, [xDI + xCX - 1]
lea xSI, [xSI + xCX - 1]
std
rep movsb
cld
jmp .epilog
ENDPROC RT_NOCRT(memmove)
|