Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp
40951 views
1
/*
2
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2016, 2019 SAP SE. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#ifndef OS_CPU_LINUX_S390_ATOMIC_LINUX_S390_HPP
27
#define OS_CPU_LINUX_S390_ATOMIC_LINUX_S390_HPP
28
29
#include "runtime/atomic.hpp"
30
#include "runtime/os.hpp"
31
#include "runtime/vm_version.hpp"
32
33
// Note that the compare-and-swap instructions on System z perform
34
// a serialization function before the storage operand is fetched
35
// and again after the operation is completed.
36
//
37
// Used constraint modifiers:
38
// = write-only access: Value on entry to inline-assembler code irrelevant.
39
// + read/write access: Value on entry is used; on exit value is changed.
40
// read-only access: Value on entry is used and never changed.
41
// & early-clobber access: Might be modified before all read-only operands
42
// have been used.
43
// a address register operand (not GR0).
44
// d general register operand (including GR0)
45
// Q memory operand w/o index register.
46
// 0..9 operand reference (by operand position).
47
// Used for operands that fill multiple roles. One example would be a
48
// write-only operand receiving its initial value from a read-only operand.
49
// Refer to cmpxchg(..) operand #0 and variable cmp_val for a real-life example.
50
//
51
52
// On System z, all store operations are atomic if the address where the data is stored into
53
// is an integer multiple of the data length. Furthermore, all stores are ordered:
54
// a store which occurs conceptually before another store becomes visible to other CPUs
55
// before the other store becomes visible.
56
57
//------------
58
// Atomic::add
59
//------------
60
// These methods force the value in memory to be augmented by the passed increment.
61
// Both, memory value and increment, are treated as 32bit signed binary integers.
62
// No overflow exceptions are recognized, and the condition code does not hold
63
// information about the value in memory.
64
//
65
// The value in memory is updated by using a compare-and-swap instruction. The
66
// instruction is retried as often as required.
67
//
68
// The return value of the method is the value that was successfully stored. At the
69
// time the caller receives back control, the value in memory may have changed already.
70
71
// New atomic operations only include specific-operand-serialization, not full
72
// memory barriers. We can use the Fast-BCR-Serialization Facility for them.
73
inline void z196_fast_sync() {
74
__asm__ __volatile__ ("bcr 14, 0" : : : "memory");
75
}
76
77
template<size_t byte_size>
78
struct Atomic::PlatformAdd {
79
template<typename D, typename I>
80
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
81
82
template<typename D, typename I>
83
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
84
return add_and_fetch(dest, add_value, order) - add_value;
85
}
86
};
87
88
template<>
89
template<typename D, typename I>
90
inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I inc,
91
atomic_memory_order order) const {
92
STATIC_ASSERT(4 == sizeof(I));
93
STATIC_ASSERT(4 == sizeof(D));
94
95
D old, upd;
96
97
if (VM_Version::has_LoadAndALUAtomicV1()) {
98
if (order == memory_order_conservative) { z196_fast_sync(); }
99
__asm__ __volatile__ (
100
" LGFR 0,%[inc] \n\t" // save increment
101
" LA 3,%[mem] \n\t" // force data address into ARG2
102
// " LAA %[upd],%[inc],%[mem] \n\t" // increment and get old value
103
// " LAA 2,0,0(3) \n\t" // actually coded instruction
104
" .byte 0xeb \n\t" // LAA main opcode
105
" .byte 0x20 \n\t" // R1,R3
106
" .byte 0x30 \n\t" // R2,disp1
107
" .byte 0x00 \n\t" // disp2,disp3
108
" .byte 0x00 \n\t" // disp4,disp5
109
" .byte 0xf8 \n\t" // LAA minor opcode
110
" AR 2,0 \n\t" // calc new value in register
111
" LR %[upd],2 \n\t" // move to result register
112
//---< outputs >---
113
: [upd] "=&d" (upd) // write-only, updated counter value
114
, [mem] "+Q" (*dest) // read/write, memory to be updated atomically
115
//---< inputs >---
116
: [inc] "a" (inc) // read-only.
117
//---< clobbered >---
118
: "cc", "r0", "r2", "r3", "memory"
119
);
120
if (order == memory_order_conservative) { z196_fast_sync(); }
121
} else {
122
__asm__ __volatile__ (
123
" LLGF %[old],%[mem] \n\t" // get old value
124
"0: LA %[upd],0(%[inc],%[old]) \n\t" // calc result
125
" CS %[old],%[upd],%[mem] \n\t" // try to xchg res with mem
126
" JNE 0b \n\t" // no success? -> retry
127
//---< outputs >---
128
: [old] "=&a" (old) // write-only, old counter value
129
, [upd] "=&d" (upd) // write-only, updated counter value
130
, [mem] "+Q" (*dest) // read/write, memory to be updated atomically
131
//---< inputs >---
132
: [inc] "a" (inc) // read-only.
133
//---< clobbered >---
134
: "cc", "memory"
135
);
136
}
137
138
return upd;
139
}
140
141
142
template<>
143
template<typename D, typename I>
144
inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I inc,
145
atomic_memory_order order) const {
146
STATIC_ASSERT(8 == sizeof(I));
147
STATIC_ASSERT(8 == sizeof(D));
148
149
D old, upd;
150
151
if (VM_Version::has_LoadAndALUAtomicV1()) {
152
if (order == memory_order_conservative) { z196_fast_sync(); }
153
__asm__ __volatile__ (
154
" LGR 0,%[inc] \n\t" // save increment
155
" LA 3,%[mem] \n\t" // force data address into ARG2
156
// " LAAG %[upd],%[inc],%[mem] \n\t" // increment and get old value
157
// " LAAG 2,0,0(3) \n\t" // actually coded instruction
158
" .byte 0xeb \n\t" // LAA main opcode
159
" .byte 0x20 \n\t" // R1,R3
160
" .byte 0x30 \n\t" // R2,disp1
161
" .byte 0x00 \n\t" // disp2,disp3
162
" .byte 0x00 \n\t" // disp4,disp5
163
" .byte 0xe8 \n\t" // LAA minor opcode
164
" AGR 2,0 \n\t" // calc new value in register
165
" LGR %[upd],2 \n\t" // move to result register
166
//---< outputs >---
167
: [upd] "=&d" (upd) // write-only, updated counter value
168
, [mem] "+Q" (*dest) // read/write, memory to be updated atomically
169
//---< inputs >---
170
: [inc] "a" (inc) // read-only.
171
//---< clobbered >---
172
: "cc", "r0", "r2", "r3", "memory"
173
);
174
if (order == memory_order_conservative) { z196_fast_sync(); }
175
} else {
176
__asm__ __volatile__ (
177
" LG %[old],%[mem] \n\t" // get old value
178
"0: LA %[upd],0(%[inc],%[old]) \n\t" // calc result
179
" CSG %[old],%[upd],%[mem] \n\t" // try to xchg res with mem
180
" JNE 0b \n\t" // no success? -> retry
181
//---< outputs >---
182
: [old] "=&a" (old) // write-only, old counter value
183
, [upd] "=&d" (upd) // write-only, updated counter value
184
, [mem] "+Q" (*dest) // read/write, memory to be updated atomically
185
//---< inputs >---
186
: [inc] "a" (inc) // read-only.
187
//---< clobbered >---
188
: "cc", "memory"
189
);
190
}
191
192
return upd;
193
}
194
195
196
//-------------
197
// Atomic::xchg
198
//-------------
199
// These methods force the value in memory to be replaced by the new value passed
200
// in as argument.
201
//
202
// The value in memory is replaced by using a compare-and-swap instruction. The
203
// instruction is retried as often as required. This makes sure that the new
204
// value can be seen, at least for a very short period of time, by other CPUs.
205
//
206
// If we would use a normal "load(old value) store(new value)" sequence,
207
// the new value could be lost unnoticed, due to a store(new value) from
208
// another thread.
209
//
210
// The return value is the (unchanged) value from memory as it was when the
211
// replacement succeeded.
212
template<>
213
template<typename T>
214
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
215
T exchange_value,
216
atomic_memory_order unused) const {
217
STATIC_ASSERT(4 == sizeof(T));
218
T old;
219
220
__asm__ __volatile__ (
221
" LLGF %[old],%[mem] \n\t" // get old value
222
"0: CS %[old],%[upd],%[mem] \n\t" // try to xchg upd with mem
223
" JNE 0b \n\t" // no success? -> retry
224
//---< outputs >---
225
: [old] "=&d" (old) // write-only, prev value irrelevant
226
, [mem] "+Q" (*dest) // read/write, memory to be updated atomically
227
//---< inputs >---
228
: [upd] "d" (exchange_value) // read-only, value to be written to memory
229
//---< clobbered >---
230
: "cc", "memory"
231
);
232
233
return old;
234
}
235
236
template<>
237
template<typename T>
238
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
239
T exchange_value,
240
atomic_memory_order unused) const {
241
STATIC_ASSERT(8 == sizeof(T));
242
T old;
243
244
__asm__ __volatile__ (
245
" LG %[old],%[mem] \n\t" // get old value
246
"0: CSG %[old],%[upd],%[mem] \n\t" // try to xchg upd with mem
247
" JNE 0b \n\t" // no success? -> retry
248
//---< outputs >---
249
: [old] "=&d" (old) // write-only, init from memory
250
, [mem] "+Q" (*dest) // read/write, memory to be updated atomically
251
//---< inputs >---
252
: [upd] "d" (exchange_value) // read-only, value to be written to memory
253
//---< clobbered >---
254
: "cc", "memory"
255
);
256
257
return old;
258
}
259
260
//----------------
261
// Atomic::cmpxchg
262
//----------------
263
// These methods compare the value in memory with a given compare value.
264
// If both values compare equal, the value in memory is replaced with
265
// the exchange value.
266
//
267
// The value in memory is compared and replaced by using a compare-and-swap
268
// instruction. The instruction is NOT retried (one shot only).
269
//
270
// The return value is the (unchanged) value from memory as it was when the
271
// compare-and-swap instruction completed. A successful exchange operation
272
// is indicated by (return value == compare_value). If unsuccessful, a new
273
// exchange value can be calculated based on the return value which is the
274
// latest contents of the memory location.
275
//
276
// Inspecting the return value is the only way for the caller to determine
277
// if the compare-and-swap instruction was successful:
278
// - If return value and compare value compare equal, the compare-and-swap
279
// instruction was successful and the value in memory was replaced by the
280
// exchange value.
281
// - If return value and compare value compare unequal, the compare-and-swap
282
// instruction was not successful. The value in memory was left unchanged.
283
//
284
// The s390 processors always fence before and after the csg instructions.
285
// Thus we ignore the memory ordering argument. The docu says: "A serialization
286
// function is performed before the operand is fetched and again after the
287
// operation is completed."
288
289
// No direct support for cmpxchg of bytes; emulate using int.
290
template<>
291
struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
292
293
template<>
294
template<typename T>
295
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
296
T cmp_val,
297
T xchg_val,
298
atomic_memory_order unused) const {
299
STATIC_ASSERT(4 == sizeof(T));
300
T old;
301
302
__asm__ __volatile__ (
303
" CS %[old],%[upd],%[mem] \n\t" // Try to xchg upd with mem.
304
// outputs
305
: [old] "=&d" (old) // Write-only, prev value irrelevant.
306
, [mem] "+Q" (*dest) // Read/write, memory to be updated atomically.
307
// inputs
308
: [upd] "d" (xchg_val)
309
, "0" (cmp_val) // Read-only, initial value for [old] (operand #0).
310
// clobbered
311
: "cc", "memory"
312
);
313
314
return old;
315
}
316
317
template<>
318
template<typename T>
319
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
320
T cmp_val,
321
T xchg_val,
322
atomic_memory_order unused) const {
323
STATIC_ASSERT(8 == sizeof(T));
324
T old;
325
326
__asm__ __volatile__ (
327
" CSG %[old],%[upd],%[mem] \n\t" // Try to xchg upd with mem.
328
// outputs
329
: [old] "=&d" (old) // Write-only, prev value irrelevant.
330
, [mem] "+Q" (*dest) // Read/write, memory to be updated atomically.
331
// inputs
332
: [upd] "d" (xchg_val)
333
, "0" (cmp_val) // Read-only, initial value for [old] (operand #0).
334
// clobbered
335
: "cc", "memory"
336
);
337
338
return old;
339
}
340
341
template<size_t byte_size>
342
struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
343
{
344
template <typename T>
345
T operator()(const volatile T* p) const { T t = *p; OrderAccess::acquire(); return t; }
346
};
347
348
#endif // OS_CPU_LINUX_S390_ATOMIC_LINUX_S390_HPP
349
350