summaryrefslogtreecommitdiffstats
path: root/src/runtime/internal/atomic/atomic_s390x.s
blob: a0c204b0e1217cfa187e65393f2501e2806576fc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

#include "textflag.h"

// func Store(ptr *uint32, val uint32)
TEXT ·Store(SB), NOSPLIT, $0
	MOVD	ptr+0(FP), R2
	MOVWZ	val+8(FP), R3
	MOVW	R3, 0(R2)
	SYNC
	RET

// func Store8(ptr *uint8, val uint8)
TEXT ·Store8(SB), NOSPLIT, $0
	MOVD	ptr+0(FP), R2
	MOVB	val+8(FP), R3
	MOVB	R3, 0(R2)
	SYNC
	RET

// func Store64(ptr *uint64, val uint64)
TEXT ·Store64(SB), NOSPLIT, $0
	MOVD	ptr+0(FP), R2
	MOVD	val+8(FP), R3
	MOVD	R3, 0(R2)
	SYNC
	RET

// func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
TEXT ·StorepNoWB(SB), NOSPLIT, $0
	MOVD	ptr+0(FP), R2
	MOVD	val+8(FP), R3
	MOVD	R3, 0(R2)
	SYNC
	RET

// func Cas(ptr *uint32, old, new uint32) bool
// Atomically:
//	if *ptr == old {
//		*val = new
//		return 1
//	} else {
//		return 0
//	}
TEXT ·Cas(SB), NOSPLIT, $0-17
	MOVD	ptr+0(FP), R3
	MOVWZ	old+8(FP), R4
	MOVWZ	new+12(FP), R5
	CS	R4, R5, 0(R3)    //  if (R4 == 0(R3)) then 0(R3)= R5
	BNE	cas_fail
	MOVB	$1, ret+16(FP)
	RET
cas_fail:
	MOVB	$0, ret+16(FP)
	RET

// func Cas64(ptr *uint64, old, new uint64) bool
// Atomically:
//	if *ptr == old {
//		*ptr = new
//		return 1
//	} else {
//		return 0
//	}
TEXT ·Cas64(SB), NOSPLIT, $0-25
	MOVD	ptr+0(FP), R3
	MOVD	old+8(FP), R4
	MOVD	new+16(FP), R5
	CSG	R4, R5, 0(R3)    //  if (R4 == 0(R3)) then 0(R3)= R5
	BNE	cas64_fail
	MOVB	$1, ret+24(FP)
	RET
cas64_fail:
	MOVB	$0, ret+24(FP)
	RET

// func Casint32(ptr *int32, old, new int32) bool
TEXT ·Casint32(SB), NOSPLIT, $0-17
	BR	·Cas(SB)

// func Casint64(ptr *int64, old, new int64) bool
TEXT ·Casint64(SB), NOSPLIT, $0-25
	BR	·Cas64(SB)

// func Casuintptr(ptr *uintptr, old, new uintptr) bool
TEXT ·Casuintptr(SB), NOSPLIT, $0-25
	BR	·Cas64(SB)

// func CasRel(ptr *uint32, old, new uint32) bool
TEXT ·CasRel(SB), NOSPLIT, $0-17
	BR	·Cas(SB)

// func Loaduintptr(ptr *uintptr) uintptr
TEXT ·Loaduintptr(SB), NOSPLIT, $0-16
	BR	·Load64(SB)

// func Loaduint(ptr *uint) uint
TEXT ·Loaduint(SB), NOSPLIT, $0-16
	BR	·Load64(SB)

// func Storeint32(ptr *int32, new int32)
TEXT ·Storeint32(SB), NOSPLIT, $0-12
	BR	·Store(SB)

// func Storeint64(ptr *int64, new int64)
TEXT ·Storeint64(SB), NOSPLIT, $0-16
	BR	·Store64(SB)

// func Storeuintptr(ptr *uintptr, new uintptr)
TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
	BR	·Store64(SB)

// func Loadint32(ptr *int32) int32
TEXT ·Loadint32(SB), NOSPLIT, $0-12
	BR	·Load(SB)

// func Loadint64(ptr *int64) int64
TEXT ·Loadint64(SB), NOSPLIT, $0-16
	BR	·Load64(SB)

// func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
	BR	·Xadd64(SB)

// func Xaddint32(ptr *int32, delta int32) int32
TEXT ·Xaddint32(SB), NOSPLIT, $0-20
	BR	·Xadd(SB)

// func Xaddint64(ptr *int64, delta int64) int64
TEXT ·Xaddint64(SB), NOSPLIT, $0-24
	BR	·Xadd64(SB)

// func Casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
// Atomically:
//	if *ptr == old {
//		*ptr = new
//		return 1
//	} else {
//		return 0
//	}
TEXT ·Casp1(SB), NOSPLIT, $0-25
	BR ·Cas64(SB)

// func Xadd(ptr *uint32, delta int32) uint32
// Atomically:
//	*ptr += delta
//	return *ptr
TEXT ·Xadd(SB), NOSPLIT, $0-20
	MOVD	ptr+0(FP), R4
	MOVW	delta+8(FP), R5
	MOVW	(R4), R3
repeat:
	ADD	R5, R3, R6
	CS	R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
	BNE	repeat
	MOVW	R6, ret+16(FP)
	RET

// func Xadd64(ptr *uint64, delta int64) uint64
TEXT ·Xadd64(SB), NOSPLIT, $0-24
	MOVD	ptr+0(FP), R4
	MOVD	delta+8(FP), R5
	MOVD	(R4), R3
repeat:
	ADD	R5, R3, R6
	CSG	R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
	BNE	repeat
	MOVD	R6, ret+16(FP)
	RET

// func Xchg(ptr *uint32, new uint32) uint32
TEXT ·Xchg(SB), NOSPLIT, $0-20
	MOVD	ptr+0(FP), R4
	MOVW	new+8(FP), R3
	MOVW	(R4), R6
repeat:
	CS	R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4)
	BNE	repeat
	MOVW	R6, ret+16(FP)
	RET

// func Xchg64(ptr *uint64, new uint64) uint64
TEXT ·Xchg64(SB), NOSPLIT, $0-24
	MOVD	ptr+0(FP), R4
	MOVD	new+8(FP), R3
	MOVD	(R4), R6
repeat:
	CSG	R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4)
	BNE	repeat
	MOVD	R6, ret+16(FP)
	RET

// func Xchgint32(ptr *int32, new int32) int32
TEXT ·Xchgint32(SB), NOSPLIT, $0-20
	BR	·Xchg(SB)

// func Xchgint64(ptr *int64, new int64) int64
TEXT ·Xchgint64(SB), NOSPLIT, $0-24
	BR	·Xchg64(SB)

// func Xchguintptr(ptr *uintptr, new uintptr) uintptr
TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
	BR	·Xchg64(SB)

// func Or8(addr *uint8, v uint8)
TEXT ·Or8(SB), NOSPLIT, $0-9
	MOVD	ptr+0(FP), R3
	MOVBZ	val+8(FP), R4
	// We don't have atomic operations that work on individual bytes so we
	// need to align addr down to a word boundary and create a mask
	// containing v to OR with the entire word atomically.
	MOVD	$(3<<3), R5
	RXSBG	$59, $60, $3, R3, R5 // R5 = 24 - ((addr % 4) * 8) = ((addr & 3) << 3) ^ (3 << 3)
	ANDW	$~3, R3              // R3 = floor(addr, 4) = addr &^ 3
	SLW	R5, R4               // R4 = uint32(v) << R5
	LAO	R4, R6, 0(R3)        // R6 = *R3; *R3 |= R4; (atomic)
	RET

// func And8(addr *uint8, v uint8)
TEXT ·And8(SB), NOSPLIT, $0-9
	MOVD	ptr+0(FP), R3
	MOVBZ	val+8(FP), R4
	// We don't have atomic operations that work on individual bytes so we
	// need to align addr down to a word boundary and create a mask
	// containing v to AND with the entire word atomically.
	ORW	$~0xff, R4           // R4 = uint32(v) | 0xffffff00
	MOVD	$(3<<3), R5
	RXSBG	$59, $60, $3, R3, R5 // R5 = 24 - ((addr % 4) * 8) = ((addr & 3) << 3) ^ (3 << 3)
	ANDW	$~3, R3              // R3 = floor(addr, 4) = addr &^ 3
	RLL	R5, R4, R4           // R4 = rotl(R4, R5)
	LAN	R4, R6, 0(R3)        // R6 = *R3; *R3 &= R4; (atomic)
	RET

// func Or(addr *uint32, v uint32)
TEXT ·Or(SB), NOSPLIT, $0-12
	MOVD	ptr+0(FP), R3
	MOVW	val+8(FP), R4
	LAO	R4, R6, 0(R3)        // R6 = *R3; *R3 |= R4; (atomic)
	RET

// func And(addr *uint32, v uint32)
TEXT ·And(SB), NOSPLIT, $0-12
	MOVD	ptr+0(FP), R3
	MOVW	val+8(FP), R4
	LAN	R4, R6, 0(R3)        // R6 = *R3; *R3 &= R4; (atomic)
	RET