Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/openmp/runtime/src/kmp_error.cpp
35258 views
1
/*
2
* kmp_error.cpp -- KPTS functions for error checking at runtime
3
*/
4
5
//===----------------------------------------------------------------------===//
6
//
7
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8
// See https://llvm.org/LICENSE.txt for license information.
9
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "kmp.h"
14
#include "kmp_error.h"
15
#include "kmp_i18n.h"
16
#include "kmp_str.h"
17
18
/* ------------------------------------------------------------------------ */
19
20
#define MIN_STACK 100
21
22
static char const *cons_text_c[] = {
23
"(none)",
24
"\"parallel\"",
25
"work-sharing", /* this is not called "for"
26
because of lowering of
27
"sections" pragmas */
28
"\"ordered\" work-sharing", /* this is not called "for ordered" because of
29
lowering of "sections" pragmas */
30
"\"sections\"",
31
"work-sharing", /* this is not called "single" because of lowering of
32
"sections" pragmas */
33
"\"critical\"",
34
"\"ordered\"", /* in PARALLEL */
35
"\"ordered\"", /* in PDO */
36
"\"master\"",
37
"\"reduce\"",
38
"\"barrier\"",
39
"\"masked\""};
40
41
#define get_src(ident) ((ident) == NULL ? NULL : (ident)->psource)
42
43
#define PUSH_MSG(ct, ident) \
44
"\tpushing on stack: %s (%s)\n", cons_text_c[(ct)], get_src((ident))
45
#define POP_MSG(p) \
46
"\tpopping off stack: %s (%s)\n", cons_text_c[(p)->stack_data[tos].type], \
47
get_src((p)->stack_data[tos].ident)
48
49
static int const cons_text_c_num = sizeof(cons_text_c) / sizeof(char const *);
50
51
/* --------------- START OF STATIC LOCAL ROUTINES ------------------------- */
52
53
static void __kmp_check_null_func(void) { /* nothing to do */
54
}
55
56
static void __kmp_expand_cons_stack(int gtid, struct cons_header *p) {
57
int i;
58
struct cons_data *d;
59
60
/* TODO for monitor perhaps? */
61
if (gtid < 0)
62
__kmp_check_null_func();
63
64
KE_TRACE(10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid()));
65
66
d = p->stack_data;
67
68
p->stack_size = (p->stack_size * 2) + 100;
69
70
/* TODO free the old data */
71
p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) *
72
(p->stack_size + 1));
73
74
for (i = p->stack_top; i >= 0; --i)
75
p->stack_data[i] = d[i];
76
77
/* NOTE: we do not free the old stack_data */
78
}
79
80
// NOTE: Function returns allocated memory, caller must free it!
81
static char *__kmp_pragma(int ct, ident_t const *ident) {
82
char const *cons = NULL; // Construct name.
83
char *file = NULL; // File name.
84
char *func = NULL; // Function (routine) name.
85
char *line = NULL; // Line number.
86
kmp_str_buf_t buffer;
87
kmp_msg_t prgm;
88
__kmp_str_buf_init(&buffer);
89
if (0 < ct && ct < cons_text_c_num) {
90
cons = cons_text_c[ct];
91
} else {
92
KMP_DEBUG_ASSERT(0);
93
}
94
if (ident != NULL && ident->psource != NULL) {
95
char *tail = NULL;
96
__kmp_str_buf_print(&buffer, "%s",
97
ident->psource); // Copy source to buffer.
98
// Split string in buffer to file, func, and line.
99
tail = buffer.str;
100
__kmp_str_split(tail, ';', NULL, &tail);
101
__kmp_str_split(tail, ';', &file, &tail);
102
__kmp_str_split(tail, ';', &func, &tail);
103
__kmp_str_split(tail, ';', &line, &tail);
104
}
105
prgm = __kmp_msg_format(kmp_i18n_fmt_Pragma, cons, file, func, line);
106
__kmp_str_buf_free(&buffer);
107
return prgm.str;
108
} // __kmp_pragma
109
110
/* ----------------- END OF STATIC LOCAL ROUTINES ------------------------- */
111
112
void __kmp_error_construct(kmp_i18n_id_t id, // Message identifier.
113
enum cons_type ct, // Construct type.
114
ident_t const *ident // Construct ident.
115
) {
116
char *construct = __kmp_pragma(ct, ident);
117
__kmp_fatal(__kmp_msg_format(id, construct), __kmp_msg_null);
118
KMP_INTERNAL_FREE(construct);
119
}
120
121
void __kmp_error_construct2(kmp_i18n_id_t id, // Message identifier.
122
enum cons_type ct, // First construct type.
123
ident_t const *ident, // First construct ident.
124
struct cons_data const *cons // Second construct.
125
) {
126
char *construct1 = __kmp_pragma(ct, ident);
127
char *construct2 = __kmp_pragma(cons->type, cons->ident);
128
__kmp_fatal(__kmp_msg_format(id, construct1, construct2), __kmp_msg_null);
129
KMP_INTERNAL_FREE(construct1);
130
KMP_INTERNAL_FREE(construct2);
131
}
132
133
struct cons_header *__kmp_allocate_cons_stack(int gtid) {
134
struct cons_header *p;
135
136
/* TODO for monitor perhaps? */
137
if (gtid < 0) {
138
__kmp_check_null_func();
139
}
140
KE_TRACE(10, ("allocate cons_stack (%d)\n", gtid));
141
p = (struct cons_header *)__kmp_allocate(sizeof(struct cons_header));
142
p->p_top = p->w_top = p->s_top = 0;
143
p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) *
144
(MIN_STACK + 1));
145
p->stack_size = MIN_STACK;
146
p->stack_top = 0;
147
p->stack_data[0].type = ct_none;
148
p->stack_data[0].prev = 0;
149
p->stack_data[0].ident = NULL;
150
return p;
151
}
152
153
void __kmp_free_cons_stack(void *ptr) {
154
struct cons_header *p = (struct cons_header *)ptr;
155
if (p != NULL) {
156
if (p->stack_data != NULL) {
157
__kmp_free(p->stack_data);
158
p->stack_data = NULL;
159
}
160
__kmp_free(p);
161
}
162
}
163
164
#if KMP_DEBUG
165
static void dump_cons_stack(int gtid, struct cons_header *p) {
166
int i;
167
int tos = p->stack_top;
168
kmp_str_buf_t buffer;
169
__kmp_str_buf_init(&buffer);
170
__kmp_str_buf_print(
171
&buffer,
172
"+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
173
__kmp_str_buf_print(&buffer,
174
"Begin construct stack with %d items for thread %d\n",
175
tos, gtid);
176
__kmp_str_buf_print(&buffer, " stack_top=%d { P=%d, W=%d, S=%d }\n", tos,
177
p->p_top, p->w_top, p->s_top);
178
for (i = tos; i > 0; i--) {
179
struct cons_data *c = &(p->stack_data[i]);
180
__kmp_str_buf_print(
181
&buffer, " stack_data[%2d] = { %s (%s) %d %p }\n", i,
182
cons_text_c[c->type], get_src(c->ident), c->prev, c->name);
183
}
184
__kmp_str_buf_print(&buffer, "End construct stack for thread %d\n", gtid);
185
__kmp_str_buf_print(
186
&buffer,
187
"+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
188
__kmp_debug_printf("%s", buffer.str);
189
__kmp_str_buf_free(&buffer);
190
}
191
#endif
192
193
void __kmp_push_parallel(int gtid, ident_t const *ident) {
194
int tos;
195
struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
196
197
KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
198
KE_TRACE(10, ("__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
199
KE_TRACE(100, (PUSH_MSG(ct_parallel, ident)));
200
if (p->stack_top >= p->stack_size) {
201
__kmp_expand_cons_stack(gtid, p);
202
}
203
tos = ++p->stack_top;
204
p->stack_data[tos].type = ct_parallel;
205
p->stack_data[tos].prev = p->p_top;
206
p->stack_data[tos].ident = ident;
207
p->stack_data[tos].name = NULL;
208
p->p_top = tos;
209
KE_DUMP(1000, dump_cons_stack(gtid, p));
210
}
211
212
void __kmp_check_workshare(int gtid, enum cons_type ct, ident_t const *ident) {
213
struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
214
215
KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
216
KE_TRACE(10, ("__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
217
218
if (p->stack_top >= p->stack_size) {
219
__kmp_expand_cons_stack(gtid, p);
220
}
221
if (p->w_top > p->p_top) {
222
// We are already in a WORKSHARE construct for this PARALLEL region.
223
__kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
224
&p->stack_data[p->w_top]);
225
}
226
if (p->s_top > p->p_top) {
227
// We are already in a SYNC construct for this PARALLEL region.
228
__kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
229
&p->stack_data[p->s_top]);
230
}
231
}
232
233
void __kmp_push_workshare(int gtid, enum cons_type ct, ident_t const *ident) {
234
int tos;
235
struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
236
KE_TRACE(10, ("__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
237
__kmp_check_workshare(gtid, ct, ident);
238
KE_TRACE(100, (PUSH_MSG(ct, ident)));
239
tos = ++p->stack_top;
240
p->stack_data[tos].type = ct;
241
p->stack_data[tos].prev = p->w_top;
242
p->stack_data[tos].ident = ident;
243
p->stack_data[tos].name = NULL;
244
p->w_top = tos;
245
KE_DUMP(1000, dump_cons_stack(gtid, p));
246
}
247
248
void
249
#if KMP_USE_DYNAMIC_LOCK
250
__kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
251
#else
252
__kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
253
#endif
254
{
255
struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
256
257
KE_TRACE(10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid()));
258
259
if (p->stack_top >= p->stack_size)
260
__kmp_expand_cons_stack(gtid, p);
261
262
if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo) {
263
if (p->w_top <= p->p_top) {
264
/* we are not in a worksharing construct */
265
#ifdef BUILD_PARALLEL_ORDERED
266
/* do not report error messages for PARALLEL ORDERED */
267
KMP_ASSERT(ct == ct_ordered_in_parallel);
268
#else
269
__kmp_error_construct(kmp_i18n_msg_CnsBoundToWorksharing, ct, ident);
270
#endif /* BUILD_PARALLEL_ORDERED */
271
} else {
272
/* inside a WORKSHARING construct for this PARALLEL region */
273
if (!IS_CONS_TYPE_ORDERED(p->stack_data[p->w_top].type)) {
274
__kmp_error_construct2(kmp_i18n_msg_CnsNoOrderedClause, ct, ident,
275
&p->stack_data[p->w_top]);
276
}
277
}
278
if (p->s_top > p->p_top && p->s_top > p->w_top) {
279
/* inside a sync construct which is inside a worksharing construct */
280
int index = p->s_top;
281
enum cons_type stack_type;
282
283
stack_type = p->stack_data[index].type;
284
285
if (stack_type == ct_critical ||
286
((stack_type == ct_ordered_in_parallel ||
287
stack_type == ct_ordered_in_pdo) &&
288
/* C doesn't allow named ordered; ordered in ordered gets error */
289
p->stack_data[index].ident != NULL &&
290
(p->stack_data[index].ident->flags & KMP_IDENT_KMPC))) {
291
/* we are in ORDERED which is inside an ORDERED or CRITICAL construct */
292
__kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
293
&p->stack_data[index]);
294
}
295
}
296
} else if (ct == ct_critical) {
297
#if KMP_USE_DYNAMIC_LOCK
298
if (lck != NULL &&
299
__kmp_get_user_lock_owner(lck, seq) ==
300
gtid) { /* this thread already has lock for this critical section */
301
#else
302
if (lck != NULL &&
303
__kmp_get_user_lock_owner(lck) ==
304
gtid) { /* this thread already has lock for this critical section */
305
#endif
306
int index = p->s_top;
307
struct cons_data cons = {NULL, ct_critical, 0, NULL};
308
/* walk up construct stack and try to find critical with matching name */
309
while (index != 0 && p->stack_data[index].name != lck) {
310
index = p->stack_data[index].prev;
311
}
312
if (index != 0) {
313
/* found match on the stack (may not always because of interleaved
314
* critical for Fortran) */
315
cons = p->stack_data[index];
316
}
317
/* we are in CRITICAL which is inside a CRITICAL construct of same name */
318
__kmp_error_construct2(kmp_i18n_msg_CnsNestingSameName, ct, ident, &cons);
319
}
320
} else if (ct == ct_master || ct == ct_masked || ct == ct_reduce) {
321
if (p->w_top > p->p_top) {
322
/* inside a WORKSHARING construct for this PARALLEL region */
323
__kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
324
&p->stack_data[p->w_top]);
325
}
326
if (ct == ct_reduce && p->s_top > p->p_top) {
327
/* inside a another SYNC construct for this PARALLEL region */
328
__kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
329
&p->stack_data[p->s_top]);
330
}
331
}
332
}
333
334
void
335
#if KMP_USE_DYNAMIC_LOCK
336
__kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
337
#else
338
__kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
339
#endif
340
{
341
int tos;
342
struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
343
344
KMP_ASSERT(gtid == __kmp_get_gtid());
345
KE_TRACE(10, ("__kmp_push_sync (gtid=%d)\n", gtid));
346
#if KMP_USE_DYNAMIC_LOCK
347
__kmp_check_sync(gtid, ct, ident, lck, seq);
348
#else
349
__kmp_check_sync(gtid, ct, ident, lck);
350
#endif
351
KE_TRACE(100, (PUSH_MSG(ct, ident)));
352
tos = ++p->stack_top;
353
p->stack_data[tos].type = ct;
354
p->stack_data[tos].prev = p->s_top;
355
p->stack_data[tos].ident = ident;
356
p->stack_data[tos].name = lck;
357
p->s_top = tos;
358
KE_DUMP(1000, dump_cons_stack(gtid, p));
359
}
360
361
/* ------------------------------------------------------------------------ */
362
363
void __kmp_pop_parallel(int gtid, ident_t const *ident) {
364
int tos;
365
struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
366
tos = p->stack_top;
367
KE_TRACE(10, ("__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
368
if (tos == 0 || p->p_top == 0) {
369
__kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident);
370
}
371
if (tos != p->p_top || p->stack_data[tos].type != ct_parallel) {
372
__kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct_parallel, ident,
373
&p->stack_data[tos]);
374
}
375
KE_TRACE(100, (POP_MSG(p)));
376
p->p_top = p->stack_data[tos].prev;
377
p->stack_data[tos].type = ct_none;
378
p->stack_data[tos].ident = NULL;
379
p->stack_top = tos - 1;
380
KE_DUMP(1000, dump_cons_stack(gtid, p));
381
}
382
383
enum cons_type __kmp_pop_workshare(int gtid, enum cons_type ct,
384
ident_t const *ident) {
385
int tos;
386
struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
387
388
tos = p->stack_top;
389
KE_TRACE(10, ("__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
390
if (tos == 0 || p->w_top == 0) {
391
__kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
392
}
393
394
if (tos != p->w_top ||
395
(p->stack_data[tos].type != ct &&
396
// below is the exception to the rule that construct types must match
397
!(p->stack_data[tos].type == ct_pdo_ordered && ct == ct_pdo))) {
398
__kmp_check_null_func();
399
__kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
400
&p->stack_data[tos]);
401
}
402
KE_TRACE(100, (POP_MSG(p)));
403
p->w_top = p->stack_data[tos].prev;
404
p->stack_data[tos].type = ct_none;
405
p->stack_data[tos].ident = NULL;
406
p->stack_top = tos - 1;
407
KE_DUMP(1000, dump_cons_stack(gtid, p));
408
return p->stack_data[p->w_top].type;
409
}
410
411
void __kmp_pop_sync(int gtid, enum cons_type ct, ident_t const *ident) {
412
int tos;
413
struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
414
tos = p->stack_top;
415
KE_TRACE(10, ("__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid()));
416
if (tos == 0 || p->s_top == 0) {
417
__kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
418
}
419
if (tos != p->s_top || p->stack_data[tos].type != ct) {
420
__kmp_check_null_func();
421
__kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
422
&p->stack_data[tos]);
423
}
424
KE_TRACE(100, (POP_MSG(p)));
425
p->s_top = p->stack_data[tos].prev;
426
p->stack_data[tos].type = ct_none;
427
p->stack_data[tos].ident = NULL;
428
p->stack_top = tos - 1;
429
KE_DUMP(1000, dump_cons_stack(gtid, p));
430
}
431
432
/* ------------------------------------------------------------------------ */
433
434
void __kmp_check_barrier(int gtid, enum cons_type ct, ident_t const *ident) {
435
struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
436
KE_TRACE(10, ("__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid,
437
__kmp_get_gtid()));
438
if (ident != 0) {
439
__kmp_check_null_func();
440
}
441
if (p->w_top > p->p_top) {
442
/* we are already in a WORKSHARING construct for this PARALLEL region */
443
__kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
444
&p->stack_data[p->w_top]);
445
}
446
if (p->s_top > p->p_top) {
447
/* we are already in a SYNC construct for this PARALLEL region */
448
__kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
449
&p->stack_data[p->s_top]);
450
}
451
}
452
453