Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
emscripten-core
GitHub Repository: emscripten-core/emscripten
Path: blob/main/system/lib/pthread/pthread_create.c
6171 views
1
/*
2
* Copyright 2021 The Emscripten Authors. All rights reserved.
3
* Emscripten is available under two separate licenses, the MIT license and the
4
* University of Illinois/NCSA Open Source License. Both these licenses can be
5
* found in the LICENSE file.
6
*/
7
8
#define _GNU_SOURCE
9
#include "pthread_impl.h"
10
#include "stdio_impl.h"
11
#include "assert.h"
12
#include <pthread.h>
13
#include <stdbool.h>
14
#include <string.h>
15
#include <threads.h>
16
#include <unistd.h>
17
#include <emscripten/heap.h>
18
#include <emscripten/threading.h>
19
20
#define STACK_ALIGN __BIGGEST_ALIGNMENT__
21
#define TSD_ALIGN (sizeof(void*))
22
23
// Comment this line to enable tracing of thread creation and destruction:
24
// #define PTHREAD_DEBUG
25
#ifdef PTHREAD_DEBUG
26
#define dbg(fmt, ...) emscripten_dbgf(fmt, ##__VA_ARGS__)
27
#else
28
#define dbg(fmt, ...)
29
#endif
30
31
// See musl's pthread_create.c
32
33
static void dummy_0() {}
34
weak_alias(dummy_0, __pthread_tsd_run_dtors);
35
36
static void __run_cleanup_handlers() {
37
pthread_t self = __pthread_self();
38
while (self->cancelbuf) {
39
void (*f)(void *) = self->cancelbuf->__f;
40
void *x = self->cancelbuf->__x;
41
self->cancelbuf = self->cancelbuf->__next;
42
f(x);
43
}
44
}
45
46
void __do_cleanup_push(struct __ptcb *cb) {
47
struct pthread *self = __pthread_self();
48
cb->__next = self->cancelbuf;
49
self->cancelbuf = cb;
50
}
51
52
void __do_cleanup_pop(struct __ptcb *cb) {
53
__pthread_self()->cancelbuf = cb->__next;
54
}
55
56
static FILE *volatile dummy_file = 0;
57
weak_alias(dummy_file, __stdin_used);
58
weak_alias(dummy_file, __stdout_used);
59
weak_alias(dummy_file, __stderr_used);
60
61
static void init_file_lock(FILE *f) {
62
if (f && f->lock<0) f->lock = 0;
63
}
64
65
static pid_t next_tid = 0;
66
67
// In case the stub syscall is not linked it
68
static int dummy_getpid(void) {
69
return 42;
70
}
71
weak_alias(dummy_getpid, __syscall_getpid);
72
73
static int tl_lock_count;
74
static int tl_lock_waiters;
75
76
volatile int __thread_list_lock;
77
78
void __tl_lock(void) {
79
int tid = __pthread_self()->tid;
80
int val = __thread_list_lock;
81
if (val == tid) {
82
tl_lock_count++;
83
return;
84
}
85
while ((val = a_cas(&__thread_list_lock, 0, tid)))
86
__wait(&__thread_list_lock, &tl_lock_waiters, val, 0);
87
}
88
89
void __tl_unlock(void) {
90
if (tl_lock_count) {
91
tl_lock_count--;
92
return;
93
}
94
a_store(&__thread_list_lock, 0);
95
if (tl_lock_waiters) __wake(&__thread_list_lock, 1, 0);
96
}
97
98
void __tl_sync(pthread_t td)
99
{
100
a_barrier();
101
int val = __thread_list_lock;
102
if (!val) return;
103
__wait(&__thread_list_lock, &tl_lock_waiters, val, 0);
104
if (tl_lock_waiters) __wake(&__thread_list_lock, 1, 0);
105
}
106
107
/* pthread_key_create.c overrides this */
108
static volatile size_t dummy = 0;
109
weak_alias(dummy, __pthread_tsd_size);
110
111
#define ROUND_UP(x, ALIGNMENT) (((x)+ALIGNMENT-1)&-ALIGNMENT)
112
113
int __pthread_create(pthread_t* restrict res,
114
const pthread_attr_t* restrict attrp,
115
void* (*entry)(void*),
116
void* restrict arg) {
117
// Note on LSAN: lsan intercepts/wraps calls to pthread_create so any
118
// allocation we do here should be considered leaks.
119
// See: lsan_interceptors.cpp.
120
if (!res) {
121
return EINVAL;
122
}
123
124
// Create threads with monotonically increasing TID starting with the main
125
// thread which has TID == PID.
126
if (!next_tid) {
127
next_tid = getpid() + 1;
128
}
129
130
if (!libc.threaded) {
131
for (FILE *f=*__ofl_lock(); f; f=f->next)
132
init_file_lock(f);
133
__ofl_unlock();
134
init_file_lock(__stdin_used);
135
init_file_lock(__stdout_used);
136
init_file_lock(__stderr_used);
137
libc.threaded = 1;
138
}
139
140
pthread_attr_t attr = { 0 };
141
if (attrp && attrp != __ATTRP_C11_THREAD) attr = *attrp;
142
if (!attr._a_stacksize) {
143
attr._a_stacksize = __default_stacksize;
144
}
145
146
// Allocate memory for new thread. The layout of the thread block is
147
// as follows. From low to high address:
148
//
149
// 1. pthread struct (sizeof struct pthread)
150
// 2. tls data (__builtin_wasm_tls_size())
151
// 3. tsd pointers (__pthread_tsd_size)
152
// 4. stack (__default_stacksize AKA -sDEFAULT_PTHREAD_STACK_SIZE)
153
size_t size = sizeof(struct pthread);
154
if (__builtin_wasm_tls_size()) {
155
size += __builtin_wasm_tls_size() + __builtin_wasm_tls_align() - 1;
156
}
157
size += __pthread_tsd_size + TSD_ALIGN - 1;
158
size_t zero_size = size;
159
if (!attr._a_stackaddr) {
160
size += attr._a_stacksize + STACK_ALIGN - 1;
161
}
162
163
// Allocate all the data for the new thread and zero-initialize all parts
164
// except for the stack.
165
unsigned char* block = emscripten_builtin_malloc(size);
166
memset(block, 0, zero_size);
167
168
uintptr_t offset = (uintptr_t)block;
169
170
// 1. pthread struct
171
struct pthread *new = (struct pthread*)offset;
172
offset += sizeof(struct pthread);
173
174
new->map_base = block;
175
new->map_size = size;
176
177
// The pthread struct has a field that points to itself - this is used as a
178
// magic ID to detect whether the pthread_t structure is 'alive'.
179
new->self = new;
180
new->tid = next_tid++;
181
182
// pthread struct robust_list head should point to itself.
183
new->robust_list.head = &new->robust_list.head;
184
185
new->locale = &libc.global_locale;
186
if (attr._a_detach) {
187
new->detach_state = DT_DETACHED;
188
} else {
189
new->detach_state = DT_JOINABLE;
190
}
191
new->stack_size = attr._a_stacksize;
192
193
// 2. tls data
194
if (__builtin_wasm_tls_size()) {
195
offset = ROUND_UP(offset, __builtin_wasm_tls_align());
196
new->tls_base = (void*)offset;
197
offset += __builtin_wasm_tls_size();
198
}
199
200
// 3. tsd slots
201
if (__pthread_tsd_size) {
202
offset = ROUND_UP(offset, TSD_ALIGN);
203
new->tsd = (void*)offset;
204
offset += __pthread_tsd_size;
205
}
206
207
// 4. stack data
208
// musl stores top of the stack in pthread_t->stack (i.e. the high
209
// end from which it grows down).
210
if (attr._a_stackaddr) {
211
new->stack = (void*)attr._a_stackaddr;
212
} else {
213
offset = ROUND_UP(offset + new->stack_size, STACK_ALIGN);
214
new->stack = (void*)offset;
215
}
216
217
// Check that we didn't use more data than we allocated.
218
assert(offset < (uintptr_t)block + size);
219
220
#ifndef NDEBUG
221
_emscripten_thread_profiler_init(new);
222
#endif
223
224
_emscripten_thread_mailbox_init(new);
225
226
struct pthread *self = __pthread_self();
227
dbg("start __pthread_create: new=%p new_end=%p stack=%p->%p "
228
"stack_size=%zu tls_base=%p",
229
new,
230
new + 1,
231
(char*)new->stack - new->stack_size,
232
new->stack,
233
new->stack_size,
234
new->tls_base);
235
236
// thread may already be running/exited after the _pthread_create_js call below
237
__tl_lock();
238
239
new->next = self->next;
240
new->prev = self;
241
new->next->prev = new;
242
new->prev->next = new;
243
244
__tl_unlock();
245
246
// Set libc.need_locks before calling __pthread_create_js since
247
// by the time it returns the thread could be running and we
248
// want libc.need_locks to be set from the moment it starts.
249
if (!libc.threads_minus_1++) libc.need_locks = 1;
250
251
// Assign the pthread_t object over immediately, so that by the time pthread_create_js()
252
// is dispatched to a pthread and the pthread main runs, the value will be visible to
253
// the thread to examine.
254
// Use __atomic_store_n() instead of a_store() to avoid splicing the pointer.
255
__atomic_store_n(res, new, __ATOMIC_SEQ_CST);
256
257
int rtn = __pthread_create_js(new, &attr, entry, arg);
258
if (rtn != 0) {
259
// Reset the pthread_t return value to zero (we assigned to it above,
260
// so by clearing it here we won't litter bits to caller)
261
__atomic_store_n(res, 0, __ATOMIC_SEQ_CST);
262
263
if (!--libc.threads_minus_1) libc.need_locks = 0;
264
265
// undo previous addition to the thread list
266
__tl_lock();
267
268
new->next->prev = new->prev;
269
new->prev->next = new->next;
270
new->next = new->prev = new;
271
272
__tl_unlock();
273
274
return rtn;
275
}
276
277
dbg("done __pthread_create next=%p prev=%p new=%p",
278
self->next,
279
self->prev,
280
new);
281
282
return 0;
283
}
284
285
/*
286
* Called from JS main thread to free data associated with a thread
287
* that is no longer running.
288
*/
289
void _emscripten_thread_free_data(pthread_t t) {
290
// A thread can never free its own thread data.
291
assert(t != pthread_self());
292
#ifndef NDEBUG
293
if (t->profilerBlock) {
294
emscripten_builtin_free(t->profilerBlock);
295
}
296
#endif
297
298
// Free all the entire thread block (called map_base because
299
// musl normally allocates this using mmap). This region
300
// includes the pthread structure itself.
301
unsigned char* block = t->map_base;
302
dbg("_emscripten_thread_free_data thread=%p map_base=%p", t, block);
303
// To aid in debugging, set the entire region to zero.
304
memset(block, 0, sizeof(struct pthread));
305
emscripten_builtin_free(block);
306
}
307
308
void _emscripten_thread_exit(void* result) {
309
struct pthread *self = __pthread_self();
310
assert(self);
311
312
self->canceldisable = PTHREAD_CANCEL_DISABLE;
313
self->cancelasync = PTHREAD_CANCEL_DEFERRED;
314
self->result = result;
315
316
_emscripten_thread_mailbox_shutdown(self);
317
318
// Run any handlers registered with pthread_cleanup_push
319
__run_cleanup_handlers();
320
321
// Call into the musl function that runs destructors of all thread-specific data.
322
__pthread_tsd_run_dtors();
323
324
__tl_lock();
325
326
/* Process robust list in userspace to handle non-pshared mutexes
327
* and the detached thread case where the robust list head will
328
* be invalid when the kernel would process it. */
329
__vm_lock();
330
volatile void *volatile *rp;
331
while ((rp=self->robust_list.head) && rp != &self->robust_list.head) {
332
pthread_mutex_t *m = (void *)((char *)rp
333
- offsetof(pthread_mutex_t, _m_next));
334
int waiters = m->_m_waiters;
335
int priv = (m->_m_type & 128) ^ 128;
336
self->robust_list.pending = rp;
337
self->robust_list.head = *rp;
338
int cont = a_swap(&m->_m_lock, 0x40000000);
339
self->robust_list.pending = 0;
340
if (cont < 0 || waiters)
341
__wake(&m->_m_lock, 1, priv);
342
}
343
__vm_unlock();
344
345
if (!--libc.threads_minus_1) libc.need_locks = 0;
346
347
self->next->prev = self->prev;
348
self->prev->next = self->next;
349
self->prev = self->next = self;
350
351
__tl_unlock();
352
353
if (emscripten_is_main_runtime_thread()) {
354
exit(0);
355
return;
356
}
357
358
// Not hosting a pthread anymore in this worker set __pthread_self to NULL
359
__set_thread_state(NULL, 0, 0, 1);
360
361
/* This atomic potentially competes with a concurrent pthread_detach
362
* call; the loser is responsible for freeing thread resources. */
363
int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING);
364
365
if (state == DT_DETACHED) {
366
_emscripten_thread_cleanup(self);
367
} else {
368
// Mark the thread as no longer running, so it can be joined.
369
// Once we publish this, any threads that are waiting to join with us can
370
// proceed and this worker can be recycled and used on another thread.
371
#ifdef EMSCRIPTEN_DYNAMIC_LINKING
372
// When dynamic linking is enabled we need to keep track of zombie threads
373
_emscripten_thread_exit_joinable(self);
374
#endif
375
a_store(&self->detach_state, DT_EXITED);
376
__wake(&self->detach_state, 1, 1); // Wake any joiner.
377
}
378
}
379
380
// Mark as `no_sanitize("address"` since emscripten_pthread_exit destroys
381
// the current thread and runs its exit handlers. Without this asan injects
382
// a call to __asan_handle_no_return before emscripten_unwind_to_js_event_loop
383
// which seem to cause a crash later down the line.
384
__attribute__((no_sanitize("address")))
385
_Noreturn void __pthread_exit(void* retval) {
386
_emscripten_thread_exit(retval);
387
emscripten_unwind_to_js_event_loop();
388
}
389
390
weak_alias(__pthread_create, emscripten_builtin_pthread_create);
391
weak_alias(__pthread_create, pthread_create);
392
weak_alias(__pthread_exit, emscripten_builtin_pthread_exit);
393
weak_alias(__pthread_exit, pthread_exit);
394
395