Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/compiler-rt/lib/tsan/go/tsan_go.cpp
35268 views
1
//===-- tsan_go.cpp -------------------------------------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// ThreadSanitizer runtime for Go language.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "tsan_rtl.h"
14
#include "tsan_symbolize.h"
15
#include "sanitizer_common/sanitizer_common.h"
16
#include <stdlib.h>
17
18
namespace __tsan {
19
20
void InitializeInterceptors() {
21
}
22
23
void InitializeDynamicAnnotations() {
24
}
25
26
bool IsExpectedReport(uptr addr, uptr size) {
27
return false;
28
}
29
30
void *Alloc(uptr sz) { return InternalAlloc(sz); }
31
32
void FreeImpl(void *p) { InternalFree(p); }
33
34
// Callback into Go.
35
static void (*go_runtime_cb)(uptr cmd, void *ctx);
36
37
enum {
38
CallbackGetProc = 0,
39
CallbackSymbolizeCode = 1,
40
CallbackSymbolizeData = 2,
41
};
42
43
struct SymbolizeCodeContext {
44
uptr pc;
45
char *func;
46
char *file;
47
uptr line;
48
uptr off;
49
uptr res;
50
};
51
52
SymbolizedStack *SymbolizeCode(uptr addr) {
53
SymbolizedStack *first = SymbolizedStack::New(addr);
54
SymbolizedStack *s = first;
55
for (;;) {
56
SymbolizeCodeContext cbctx;
57
internal_memset(&cbctx, 0, sizeof(cbctx));
58
cbctx.pc = addr;
59
go_runtime_cb(CallbackSymbolizeCode, &cbctx);
60
if (cbctx.res == 0)
61
break;
62
AddressInfo &info = s->info;
63
info.module_offset = cbctx.off;
64
info.function = internal_strdup(cbctx.func ? cbctx.func : "??");
65
info.file = internal_strdup(cbctx.file ? cbctx.file : "-");
66
info.line = cbctx.line;
67
info.column = 0;
68
69
if (cbctx.pc == addr) // outermost (non-inlined) function
70
break;
71
addr = cbctx.pc;
72
// Allocate a stack entry for the parent of the inlined function.
73
SymbolizedStack *s2 = SymbolizedStack::New(addr);
74
s->next = s2;
75
s = s2;
76
}
77
return first;
78
}
79
80
struct SymbolizeDataContext {
81
uptr addr;
82
uptr heap;
83
uptr start;
84
uptr size;
85
char *name;
86
char *file;
87
uptr line;
88
uptr res;
89
};
90
91
ReportLocation *SymbolizeData(uptr addr) {
92
SymbolizeDataContext cbctx;
93
internal_memset(&cbctx, 0, sizeof(cbctx));
94
cbctx.addr = addr;
95
go_runtime_cb(CallbackSymbolizeData, &cbctx);
96
if (!cbctx.res)
97
return 0;
98
if (cbctx.heap) {
99
MBlock *b = ctx->metamap.GetBlock(cbctx.start);
100
if (!b)
101
return 0;
102
auto *loc = New<ReportLocation>();
103
loc->type = ReportLocationHeap;
104
loc->heap_chunk_start = cbctx.start;
105
loc->heap_chunk_size = b->siz;
106
loc->tid = b->tid;
107
loc->stack = SymbolizeStackId(b->stk);
108
return loc;
109
} else {
110
auto *loc = New<ReportLocation>();
111
loc->type = ReportLocationGlobal;
112
loc->global.name = internal_strdup(cbctx.name ? cbctx.name : "??");
113
loc->global.file = internal_strdup(cbctx.file ? cbctx.file : "??");
114
loc->global.line = cbctx.line;
115
loc->global.start = cbctx.start;
116
loc->global.size = cbctx.size;
117
return loc;
118
}
119
}
120
121
static ThreadState *main_thr;
122
static bool inited;
123
124
static Processor* get_cur_proc() {
125
if (UNLIKELY(!inited)) {
126
// Running Initialize().
127
// We have not yet returned the Processor to Go, so we cannot ask it back.
128
// Currently, Initialize() does not use the Processor, so return nullptr.
129
return nullptr;
130
}
131
Processor *proc;
132
go_runtime_cb(CallbackGetProc, &proc);
133
return proc;
134
}
135
136
Processor *ThreadState::proc() {
137
return get_cur_proc();
138
}
139
140
extern "C" {
141
142
static ThreadState *AllocGoroutine() {
143
auto *thr = (ThreadState *)Alloc(sizeof(ThreadState));
144
internal_memset(thr, 0, sizeof(*thr));
145
return thr;
146
}
147
148
void __tsan_init(ThreadState **thrp, Processor **procp,
149
void (*cb)(uptr cmd, void *cb)) {
150
go_runtime_cb = cb;
151
ThreadState *thr = AllocGoroutine();
152
main_thr = *thrp = thr;
153
Initialize(thr);
154
*procp = thr->proc1;
155
inited = true;
156
}
157
158
void __tsan_fini() {
159
// FIXME: Not necessary thread 0.
160
ThreadState *thr = main_thr;
161
int res = Finalize(thr);
162
exit(res);
163
}
164
165
void __tsan_map_shadow(uptr addr, uptr size) {
166
MapShadow(addr, size);
167
}
168
169
void __tsan_read(ThreadState *thr, void *addr, void *pc) {
170
MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessRead);
171
}
172
173
void __tsan_read_pc(ThreadState *thr, void *addr, uptr callpc, uptr pc) {
174
if (callpc != 0)
175
FuncEntry(thr, callpc);
176
MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessRead);
177
if (callpc != 0)
178
FuncExit(thr);
179
}
180
181
void __tsan_write(ThreadState *thr, void *addr, void *pc) {
182
MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessWrite);
183
}
184
185
void __tsan_write_pc(ThreadState *thr, void *addr, uptr callpc, uptr pc) {
186
if (callpc != 0)
187
FuncEntry(thr, callpc);
188
MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessWrite);
189
if (callpc != 0)
190
FuncExit(thr);
191
}
192
193
void __tsan_read_range(ThreadState *thr, void *addr, uptr size, uptr pc) {
194
MemoryAccessRange(thr, (uptr)pc, (uptr)addr, size, false);
195
}
196
197
void __tsan_write_range(ThreadState *thr, void *addr, uptr size, uptr pc) {
198
MemoryAccessRange(thr, (uptr)pc, (uptr)addr, size, true);
199
}
200
201
void __tsan_func_enter(ThreadState *thr, void *pc) {
202
FuncEntry(thr, (uptr)pc);
203
}
204
205
void __tsan_func_exit(ThreadState *thr) {
206
FuncExit(thr);
207
}
208
209
void __tsan_malloc(ThreadState *thr, uptr pc, uptr p, uptr sz) {
210
CHECK(inited);
211
if (thr && pc)
212
ctx->metamap.AllocBlock(thr, pc, p, sz);
213
MemoryResetRange(thr, pc, (uptr)p, sz);
214
}
215
216
void __tsan_free(uptr p, uptr sz) {
217
ctx->metamap.FreeRange(get_cur_proc(), p, sz, false);
218
}
219
220
void __tsan_go_start(ThreadState *parent, ThreadState **pthr, void *pc) {
221
ThreadState *thr = AllocGoroutine();
222
*pthr = thr;
223
Tid goid = ThreadCreate(parent, (uptr)pc, 0, true);
224
ThreadStart(thr, goid, 0, ThreadType::Regular);
225
}
226
227
void __tsan_go_end(ThreadState *thr) {
228
ThreadFinish(thr);
229
Free(thr);
230
}
231
232
void __tsan_proc_create(Processor **pproc) {
233
*pproc = ProcCreate();
234
}
235
236
void __tsan_proc_destroy(Processor *proc) {
237
ProcDestroy(proc);
238
}
239
240
void __tsan_acquire(ThreadState *thr, void *addr) {
241
Acquire(thr, 0, (uptr)addr);
242
}
243
244
void __tsan_release_acquire(ThreadState *thr, void *addr) {
245
ReleaseStoreAcquire(thr, 0, (uptr)addr);
246
}
247
248
void __tsan_release(ThreadState *thr, void *addr) {
249
ReleaseStore(thr, 0, (uptr)addr);
250
}
251
252
void __tsan_release_merge(ThreadState *thr, void *addr) {
253
Release(thr, 0, (uptr)addr);
254
}
255
256
void __tsan_finalizer_goroutine(ThreadState *thr) { AcquireGlobal(thr); }
257
258
void __tsan_mutex_before_lock(ThreadState *thr, uptr addr, uptr write) {
259
if (write)
260
MutexPreLock(thr, 0, addr);
261
else
262
MutexPreReadLock(thr, 0, addr);
263
}
264
265
void __tsan_mutex_after_lock(ThreadState *thr, uptr addr, uptr write) {
266
if (write)
267
MutexPostLock(thr, 0, addr);
268
else
269
MutexPostReadLock(thr, 0, addr);
270
}
271
272
void __tsan_mutex_before_unlock(ThreadState *thr, uptr addr, uptr write) {
273
if (write)
274
MutexUnlock(thr, 0, addr);
275
else
276
MutexReadUnlock(thr, 0, addr);
277
}
278
279
void __tsan_go_ignore_sync_begin(ThreadState *thr) {
280
ThreadIgnoreSyncBegin(thr, 0);
281
}
282
283
void __tsan_go_ignore_sync_end(ThreadState *thr) { ThreadIgnoreSyncEnd(thr); }
284
285
void __tsan_report_count(u64 *pn) {
286
Lock lock(&ctx->report_mtx);
287
*pn = ctx->nreported;
288
}
289
290
} // extern "C"
291
} // namespace __tsan
292
293