Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/utilities/copy.cpp
40949 views
1
/*
2
* Copyright (c) 2006, 2020, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "utilities/copy.hpp"
27
#include "runtime/sharedRuntime.hpp"
28
#include "utilities/align.hpp"
29
#include "utilities/copy.hpp"
30
31
32
// Copy bytes; larger units are filled atomically if everything is aligned.
33
void Copy::conjoint_memory_atomic(const void* from, void* to, size_t size) {
34
uintptr_t bits = (uintptr_t) from | (uintptr_t) to | (uintptr_t) size;
35
36
// (Note: We could improve performance by ignoring the low bits of size,
37
// and putting a short cleanup loop after each bulk copy loop.
38
// There are plenty of other ways to make this faster also,
39
// and it's a slippery slope. For now, let's keep this code simple
40
// since the simplicity helps clarify the atomicity semantics of
41
// this operation. There are also CPU-specific assembly versions
42
// which may or may not want to include such optimizations.)
43
44
if (bits % sizeof(jlong) == 0) {
45
Copy::conjoint_jlongs_atomic((const jlong*) from, (jlong*) to, size / sizeof(jlong));
46
} else if (bits % sizeof(jint) == 0) {
47
Copy::conjoint_jints_atomic((const jint*) from, (jint*) to, size / sizeof(jint));
48
} else if (bits % sizeof(jshort) == 0) {
49
Copy::conjoint_jshorts_atomic((const jshort*) from, (jshort*) to, size / sizeof(jshort));
50
} else {
51
// Not aligned, so no need to be atomic.
52
Copy::conjoint_jbytes((const void*) from, (void*) to, size);
53
}
54
}
55
56
class CopySwap : AllStatic {
57
public:
58
/**
59
* Copy and optionally byte swap elements
60
*
61
* <swap> - true if elements should be byte swapped
62
*
63
* @param src address of source
64
* @param dst address of destination
65
* @param byte_count number of bytes to copy
66
* @param elem_size size of the elements to copy-swap
67
*/
68
template<bool swap>
69
static void conjoint_swap_if_needed(const void* src, void* dst, size_t byte_count, size_t elem_size) {
70
assert(src != NULL, "address must not be NULL");
71
assert(dst != NULL, "address must not be NULL");
72
assert(elem_size == 2 || elem_size == 4 || elem_size == 8,
73
"incorrect element size: " SIZE_FORMAT, elem_size);
74
assert(is_aligned(byte_count, elem_size),
75
"byte_count " SIZE_FORMAT " must be multiple of element size " SIZE_FORMAT, byte_count, elem_size);
76
77
address src_end = (address)src + byte_count;
78
79
if (dst <= src || dst >= src_end) {
80
do_conjoint_swap<RIGHT,swap>(src, dst, byte_count, elem_size);
81
} else {
82
do_conjoint_swap<LEFT,swap>(src, dst, byte_count, elem_size);
83
}
84
}
85
86
private:
87
/**
88
* Byte swap a 16-bit value
89
*/
90
static uint16_t byte_swap(uint16_t x) {
91
return (x << 8) | (x >> 8);
92
}
93
94
/**
95
* Byte swap a 32-bit value
96
*/
97
static uint32_t byte_swap(uint32_t x) {
98
uint16_t lo = (uint16_t)x;
99
uint16_t hi = (uint16_t)(x >> 16);
100
101
return ((uint32_t)byte_swap(lo) << 16) | (uint32_t)byte_swap(hi);
102
}
103
104
/**
105
* Byte swap a 64-bit value
106
*/
107
static uint64_t byte_swap(uint64_t x) {
108
uint32_t lo = (uint32_t)x;
109
uint32_t hi = (uint32_t)(x >> 32);
110
111
return ((uint64_t)byte_swap(lo) << 32) | (uint64_t)byte_swap(hi);
112
}
113
114
enum CopyDirection {
115
RIGHT, // lower -> higher address
116
LEFT // higher -> lower address
117
};
118
119
/**
120
* Copy and byte swap elements
121
*
122
* <T> - type of element to copy
123
* <D> - copy direction
124
* <is_src_aligned> - true if src argument is aligned to element size
125
* <is_dst_aligned> - true if dst argument is aligned to element size
126
*
127
* @param src address of source
128
* @param dst address of destination
129
* @param byte_count number of bytes to copy
130
*/
131
template <typename T, CopyDirection D, bool swap, bool is_src_aligned, bool is_dst_aligned>
132
static void do_conjoint_swap(const void* src, void* dst, size_t byte_count) {
133
const char* cur_src;
134
char* cur_dst;
135
136
switch (D) {
137
case RIGHT:
138
cur_src = (const char*)src;
139
cur_dst = (char*)dst;
140
break;
141
case LEFT:
142
cur_src = (const char*)src + byte_count - sizeof(T);
143
cur_dst = (char*)dst + byte_count - sizeof(T);
144
break;
145
}
146
147
for (size_t i = 0; i < byte_count / sizeof(T); i++) {
148
T tmp;
149
150
if (is_src_aligned) {
151
tmp = *(T*)cur_src;
152
} else {
153
memcpy(&tmp, cur_src, sizeof(T));
154
}
155
156
if (swap) {
157
tmp = byte_swap(tmp);
158
}
159
160
if (is_dst_aligned) {
161
*(T*)cur_dst = tmp;
162
} else {
163
memcpy(cur_dst, &tmp, sizeof(T));
164
}
165
166
switch (D) {
167
case RIGHT:
168
cur_src += sizeof(T);
169
cur_dst += sizeof(T);
170
break;
171
case LEFT:
172
cur_src -= sizeof(T);
173
cur_dst -= sizeof(T);
174
break;
175
}
176
}
177
}
178
179
/**
180
* Copy and byte swap elements
181
*
182
* <T> - type of element to copy
183
* <D> - copy direction
184
* <swap> - true if elements should be byte swapped
185
*
186
* @param src address of source
187
* @param dst address of destination
188
* @param byte_count number of bytes to copy
189
*/
190
template <typename T, CopyDirection direction, bool swap>
191
static void do_conjoint_swap(const void* src, void* dst, size_t byte_count) {
192
if (is_aligned(src, sizeof(T))) {
193
if (is_aligned(dst, sizeof(T))) {
194
do_conjoint_swap<T,direction,swap,true,true>(src, dst, byte_count);
195
} else {
196
do_conjoint_swap<T,direction,swap,true,false>(src, dst, byte_count);
197
}
198
} else {
199
if (is_aligned(dst, sizeof(T))) {
200
do_conjoint_swap<T,direction,swap,false,true>(src, dst, byte_count);
201
} else {
202
do_conjoint_swap<T,direction,swap,false,false>(src, dst, byte_count);
203
}
204
}
205
}
206
207
208
/**
209
* Copy and byte swap elements
210
*
211
* <D> - copy direction
212
* <swap> - true if elements should be byte swapped
213
*
214
* @param src address of source
215
* @param dst address of destination
216
* @param byte_count number of bytes to copy
217
* @param elem_size size of the elements to copy-swap
218
*/
219
template <CopyDirection D, bool swap>
220
static void do_conjoint_swap(const void* src, void* dst, size_t byte_count, size_t elem_size) {
221
switch (elem_size) {
222
case 2: do_conjoint_swap<uint16_t,D,swap>(src, dst, byte_count); break;
223
case 4: do_conjoint_swap<uint32_t,D,swap>(src, dst, byte_count); break;
224
case 8: do_conjoint_swap<uint64_t,D,swap>(src, dst, byte_count); break;
225
default: guarantee(false, "do_conjoint_swap: Invalid elem_size " SIZE_FORMAT "\n", elem_size);
226
}
227
}
228
};
229
230
void Copy::conjoint_copy(const void* src, void* dst, size_t byte_count, size_t elem_size) {
231
CopySwap::conjoint_swap_if_needed<false>(src, dst, byte_count, elem_size);
232
}
233
234
void Copy::conjoint_swap(const void* src, void* dst, size_t byte_count, size_t elem_size) {
235
CopySwap::conjoint_swap_if_needed<true>(src, dst, byte_count, elem_size);
236
}
237
238
// Fill bytes; larger units are filled atomically if everything is aligned.
239
void Copy::fill_to_memory_atomic(void* to, size_t size, jubyte value) {
240
address dst = (address) to;
241
uintptr_t bits = (uintptr_t) to | (uintptr_t) size;
242
if (bits % sizeof(jlong) == 0) {
243
jlong fill = (julong)( (jubyte)value ); // zero-extend
244
if (fill != 0) {
245
fill += fill << 8;
246
fill += fill << 16;
247
fill += fill << 32;
248
}
249
//Copy::fill_to_jlongs_atomic((jlong*) dst, size / sizeof(jlong));
250
for (uintptr_t off = 0; off < size; off += sizeof(jlong)) {
251
*(jlong*)(dst + off) = fill;
252
}
253
} else if (bits % sizeof(jint) == 0) {
254
jint fill = (juint)( (jubyte)value ); // zero-extend
255
if (fill != 0) {
256
fill += fill << 8;
257
fill += fill << 16;
258
}
259
//Copy::fill_to_jints_atomic((jint*) dst, size / sizeof(jint));
260
for (uintptr_t off = 0; off < size; off += sizeof(jint)) {
261
*(jint*)(dst + off) = fill;
262
}
263
} else if (bits % sizeof(jshort) == 0) {
264
jshort fill = (jushort)( (jubyte)value ); // zero-extend
265
fill += fill << 8;
266
//Copy::fill_to_jshorts_atomic((jshort*) dst, size / sizeof(jshort));
267
for (uintptr_t off = 0; off < size; off += sizeof(jshort)) {
268
*(jshort*)(dst + off) = fill;
269
}
270
} else {
271
// Not aligned, so no need to be atomic.
272
Copy::fill_to_bytes(dst, size, value);
273
}
274
}
275
276