Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
wine-mirror
GitHub Repository: wine-mirror/wine
Path: blob/master/libs/compiler-rt/lib/builtins/emutls.c
12346 views
1
/* ===---------- emutls.c - Implements __emutls_get_address ---------------===
2
*
3
* The LLVM Compiler Infrastructure
4
*
5
* This file is dual licensed under the MIT and the University of Illinois Open
6
* Source Licenses. See LICENSE.TXT for details.
7
*
8
* ===----------------------------------------------------------------------===
9
*/
10
#include <stdint.h>
11
#include <stdlib.h>
12
#include <string.h>
13
14
#include "int_lib.h"
15
#include "int_util.h"
16
17
#ifdef __BIONIC__
18
/* There are 4 pthread key cleanup rounds on Bionic. Delay emutls deallocation
19
to round 2. We need to delay deallocation because:
20
- Android versions older than M lack __cxa_thread_atexit_impl, so apps
21
use a pthread key destructor to call C++ destructors.
22
- Apps might use __thread/thread_local variables in pthread destructors.
23
We can't wait until the final two rounds, because jemalloc needs two rounds
24
after the final malloc/free call to free its thread-specific data (see
25
https://reviews.llvm.org/D46978#1107507). */
26
#define EMUTLS_SKIP_DESTRUCTOR_ROUNDS 1
27
#else
28
#define EMUTLS_SKIP_DESTRUCTOR_ROUNDS 0
29
#endif
30
31
typedef struct emutls_address_array {
32
uintptr_t skip_destructor_rounds;
33
uintptr_t size; /* number of elements in the 'data' array */
34
void* data[];
35
} emutls_address_array;
36
37
static void emutls_shutdown(emutls_address_array *array);
38
39
#ifndef _WIN32
40
41
#include <pthread.h>
42
43
static pthread_mutex_t emutls_mutex = PTHREAD_MUTEX_INITIALIZER;
44
static pthread_key_t emutls_pthread_key;
45
46
typedef unsigned int gcc_word __attribute__((mode(word)));
47
typedef unsigned int gcc_pointer __attribute__((mode(pointer)));
48
49
/* Default is not to use posix_memalign, so systems like Android
50
* can use thread local data without heavier POSIX memory allocators.
51
*/
52
#ifndef EMUTLS_USE_POSIX_MEMALIGN
53
#define EMUTLS_USE_POSIX_MEMALIGN 0
54
#endif
55
56
static __inline void *emutls_memalign_alloc(size_t align, size_t size) {
57
void *base;
58
#if EMUTLS_USE_POSIX_MEMALIGN
59
if (posix_memalign(&base, align, size) != 0)
60
abort();
61
#else
62
#define EXTRA_ALIGN_PTR_BYTES (align - 1 + sizeof(void*))
63
char* object;
64
if ((object = (char*)malloc(EXTRA_ALIGN_PTR_BYTES + size)) == NULL)
65
abort();
66
base = (void*)(((uintptr_t)(object + EXTRA_ALIGN_PTR_BYTES))
67
& ~(uintptr_t)(align - 1));
68
69
((void**)base)[-1] = object;
70
#endif
71
return base;
72
}
73
74
static __inline void emutls_memalign_free(void *base) {
75
#if EMUTLS_USE_POSIX_MEMALIGN
76
free(base);
77
#else
78
/* The mallocated address is in ((void**)base)[-1] */
79
free(((void**)base)[-1]);
80
#endif
81
}
82
83
static __inline void emutls_setspecific(emutls_address_array *value) {
84
pthread_setspecific(emutls_pthread_key, (void*) value);
85
}
86
87
static __inline emutls_address_array* emutls_getspecific() {
88
return (emutls_address_array*) pthread_getspecific(emutls_pthread_key);
89
}
90
91
static void emutls_key_destructor(void* ptr) {
92
emutls_address_array *array = (emutls_address_array*)ptr;
93
if (array->skip_destructor_rounds > 0) {
94
/* emutls is deallocated using a pthread key destructor. These
95
* destructors are called in several rounds to accommodate destructor
96
* functions that (re)initialize key values with pthread_setspecific.
97
* Delay the emutls deallocation to accommodate other end-of-thread
98
* cleanup tasks like calling thread_local destructors (e.g. the
99
* __cxa_thread_atexit fallback in libc++abi).
100
*/
101
array->skip_destructor_rounds--;
102
emutls_setspecific(array);
103
} else {
104
emutls_shutdown(array);
105
free(ptr);
106
}
107
}
108
109
static __inline void emutls_init(void) {
110
if (pthread_key_create(&emutls_pthread_key, emutls_key_destructor) != 0)
111
abort();
112
}
113
114
static __inline void emutls_init_once(void) {
115
static pthread_once_t once = PTHREAD_ONCE_INIT;
116
pthread_once(&once, emutls_init);
117
}
118
119
static __inline void emutls_lock() {
120
pthread_mutex_lock(&emutls_mutex);
121
}
122
123
static __inline void emutls_unlock() {
124
pthread_mutex_unlock(&emutls_mutex);
125
}
126
127
#else /* _WIN32 */
128
129
#include <windows.h>
130
#include <malloc.h>
131
#include <stdio.h>
132
#include <assert.h>
133
134
static LPCRITICAL_SECTION emutls_mutex;
135
static DWORD emutls_tls_index = TLS_OUT_OF_INDEXES;
136
137
typedef uintptr_t gcc_word;
138
typedef void * gcc_pointer;
139
140
static void win_error(DWORD last_err, const char *hint) {
141
char *buffer = NULL;
142
if (FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
143
FORMAT_MESSAGE_FROM_SYSTEM |
144
FORMAT_MESSAGE_MAX_WIDTH_MASK,
145
NULL, last_err, 0, (LPSTR)&buffer, 1, NULL)) {
146
fprintf(stderr, "Windows error: %s\n", buffer);
147
} else {
148
fprintf(stderr, "Unkown Windows error: %s\n", hint);
149
}
150
LocalFree(buffer);
151
}
152
153
static __inline void win_abort(DWORD last_err, const char *hint) {
154
win_error(last_err, hint);
155
abort();
156
}
157
158
static __inline void *emutls_memalign_alloc(size_t align, size_t size) {
159
void *base = _aligned_malloc(size, align);
160
if (!base)
161
win_abort(GetLastError(), "_aligned_malloc");
162
return base;
163
}
164
165
static __inline void emutls_memalign_free(void *base) {
166
_aligned_free(base);
167
}
168
169
static void emutls_exit(void) {
170
if (emutls_mutex) {
171
DeleteCriticalSection(emutls_mutex);
172
_aligned_free(emutls_mutex);
173
emutls_mutex = NULL;
174
}
175
if (emutls_tls_index != TLS_OUT_OF_INDEXES) {
176
emutls_shutdown((emutls_address_array*)TlsGetValue(emutls_tls_index));
177
TlsFree(emutls_tls_index);
178
emutls_tls_index = TLS_OUT_OF_INDEXES;
179
}
180
}
181
182
#pragma warning (push)
183
#pragma warning (disable : 4100)
184
static BOOL CALLBACK emutls_init(PINIT_ONCE p0, PVOID p1, PVOID *p2) {
185
emutls_mutex = (LPCRITICAL_SECTION)_aligned_malloc(sizeof(CRITICAL_SECTION), 16);
186
if (!emutls_mutex) {
187
win_error(GetLastError(), "_aligned_malloc");
188
return FALSE;
189
}
190
InitializeCriticalSection(emutls_mutex);
191
192
emutls_tls_index = TlsAlloc();
193
if (emutls_tls_index == TLS_OUT_OF_INDEXES) {
194
emutls_exit();
195
win_error(GetLastError(), "TlsAlloc");
196
return FALSE;
197
}
198
atexit(&emutls_exit);
199
return TRUE;
200
}
201
202
static __inline void emutls_init_once(void) {
203
static INIT_ONCE once;
204
InitOnceExecuteOnce(&once, emutls_init, NULL, NULL);
205
}
206
207
static __inline void emutls_lock() {
208
EnterCriticalSection(emutls_mutex);
209
}
210
211
static __inline void emutls_unlock() {
212
LeaveCriticalSection(emutls_mutex);
213
}
214
215
static __inline void emutls_setspecific(emutls_address_array *value) {
216
if (TlsSetValue(emutls_tls_index, (LPVOID) value) == 0)
217
win_abort(GetLastError(), "TlsSetValue");
218
}
219
220
static __inline emutls_address_array* emutls_getspecific() {
221
LPVOID value = TlsGetValue(emutls_tls_index);
222
if (value == NULL) {
223
const DWORD err = GetLastError();
224
if (err != ERROR_SUCCESS)
225
win_abort(err, "TlsGetValue");
226
}
227
return (emutls_address_array*) value;
228
}
229
230
/* Provide atomic load/store functions for emutls_get_index if built with MSVC.
231
*/
232
#if !defined(__ATOMIC_RELEASE)
233
#include <intrin.h>
234
235
enum { __ATOMIC_ACQUIRE = 2, __ATOMIC_RELEASE = 3 };
236
237
static __inline uintptr_t __atomic_load_n(void *ptr, unsigned type) {
238
assert(type == __ATOMIC_ACQUIRE);
239
// These return the previous value - but since we do an OR with 0,
240
// it's equivalent to a plain load.
241
#ifdef _WIN64
242
return InterlockedOr64(ptr, 0);
243
#else
244
return InterlockedOr(ptr, 0);
245
#endif
246
}
247
248
static __inline void __atomic_store_n(void *ptr, uintptr_t val, unsigned type) {
249
assert(type == __ATOMIC_RELEASE);
250
InterlockedExchangePointer((void *volatile *)ptr, (void *)val);
251
}
252
253
#endif /* __ATOMIC_RELEASE */
254
255
#pragma warning (pop)
256
257
#endif /* _WIN32 */
258
259
static size_t emutls_num_object = 0; /* number of allocated TLS objects */
260
261
/* Free the allocated TLS data
262
*/
263
static void emutls_shutdown(emutls_address_array *array) {
264
if (array) {
265
uintptr_t i;
266
for (i = 0; i < array->size; ++i) {
267
if (array->data[i])
268
emutls_memalign_free(array->data[i]);
269
}
270
}
271
}
272
273
/* For every TLS variable xyz,
274
* there is one __emutls_control variable named __emutls_v.xyz.
275
* If xyz has non-zero initial value, __emutls_v.xyz's "value"
276
* will point to __emutls_t.xyz, which has the initial value.
277
*/
278
typedef struct __emutls_control {
279
/* Must use gcc_word here, instead of size_t, to match GCC. When
280
gcc_word is larger than size_t, the upper extra bits are all
281
zeros. We can use variables of size_t to operate on size and
282
align. */
283
gcc_word size; /* size of the object in bytes */
284
gcc_word align; /* alignment of the object in bytes */
285
union {
286
uintptr_t index; /* data[index-1] is the object address */
287
void* address; /* object address, when in single thread env */
288
} object;
289
void* value; /* null or non-zero initial value for the object */
290
} __emutls_control;
291
292
/* Emulated TLS objects are always allocated at run-time. */
293
static __inline void *emutls_allocate_object(__emutls_control *control) {
294
/* Use standard C types, check with gcc's emutls.o. */
295
COMPILE_TIME_ASSERT(sizeof(uintptr_t) == sizeof(gcc_pointer));
296
COMPILE_TIME_ASSERT(sizeof(uintptr_t) == sizeof(void*));
297
298
size_t size = control->size;
299
size_t align = control->align;
300
void* base;
301
if (align < sizeof(void*))
302
align = sizeof(void*);
303
/* Make sure that align is power of 2. */
304
if ((align & (align - 1)) != 0)
305
abort();
306
307
base = emutls_memalign_alloc(align, size);
308
if (control->value)
309
memcpy(base, control->value, size);
310
else
311
memset(base, 0, size);
312
return base;
313
}
314
315
316
/* Returns control->object.index; set index if not allocated yet. */
317
static __inline uintptr_t emutls_get_index(__emutls_control *control) {
318
uintptr_t index = __atomic_load_n(&control->object.index, __ATOMIC_ACQUIRE);
319
if (!index) {
320
emutls_init_once();
321
emutls_lock();
322
index = control->object.index;
323
if (!index) {
324
index = ++emutls_num_object;
325
__atomic_store_n(&control->object.index, index, __ATOMIC_RELEASE);
326
}
327
emutls_unlock();
328
}
329
return index;
330
}
331
332
/* Updates newly allocated thread local emutls_address_array. */
333
static __inline void emutls_check_array_set_size(emutls_address_array *array,
334
uintptr_t size) {
335
if (array == NULL)
336
abort();
337
array->size = size;
338
emutls_setspecific(array);
339
}
340
341
/* Returns the new 'data' array size, number of elements,
342
* which must be no smaller than the given index.
343
*/
344
static __inline uintptr_t emutls_new_data_array_size(uintptr_t index) {
345
/* Need to allocate emutls_address_array with extra slots
346
* to store the header.
347
* Round up the emutls_address_array size to multiple of 16.
348
*/
349
uintptr_t header_words = sizeof(emutls_address_array) / sizeof(void *);
350
return ((index + header_words + 15) & ~((uintptr_t)15)) - header_words;
351
}
352
353
/* Returns the size in bytes required for an emutls_address_array with
354
* N number of elements for data field.
355
*/
356
static __inline uintptr_t emutls_asize(uintptr_t N) {
357
return N * sizeof(void *) + sizeof(emutls_address_array);
358
}
359
360
/* Returns the thread local emutls_address_array.
361
* Extends its size if necessary to hold address at index.
362
*/
363
static __inline emutls_address_array *
364
emutls_get_address_array(uintptr_t index) {
365
emutls_address_array* array = emutls_getspecific();
366
if (array == NULL) {
367
uintptr_t new_size = emutls_new_data_array_size(index);
368
array = (emutls_address_array*) malloc(emutls_asize(new_size));
369
if (array) {
370
memset(array->data, 0, new_size * sizeof(void*));
371
array->skip_destructor_rounds = EMUTLS_SKIP_DESTRUCTOR_ROUNDS;
372
}
373
emutls_check_array_set_size(array, new_size);
374
} else if (index > array->size) {
375
uintptr_t orig_size = array->size;
376
uintptr_t new_size = emutls_new_data_array_size(index);
377
array = (emutls_address_array*) realloc(array, emutls_asize(new_size));
378
if (array)
379
memset(array->data + orig_size, 0,
380
(new_size - orig_size) * sizeof(void*));
381
emutls_check_array_set_size(array, new_size);
382
}
383
return array;
384
}
385
386
void* __emutls_get_address(void* control) {
387
uintptr_t index = emutls_get_index(control);
388
emutls_address_array* array = emutls_get_address_array(index--);
389
if (array->data[index] == NULL)
390
array->data[index] = emutls_allocate_object(control);
391
return array->data[index];
392
}
393
394