14 #include "kmp_error.h" 22 static char const *cons_text_c[] = {
23 "(none)",
"\"parallel\"",
"work-sharing",
26 "\"ordered\" work-sharing",
31 "\"critical\"",
"\"ordered\"",
33 "\"master\"",
"\"reduce\"",
"\"barrier\""};
35 #define get_src(ident) ((ident) == NULL ? NULL : (ident)->psource) 37 #define PUSH_MSG(ct, ident) \ 38 "\tpushing on stack: %s (%s)\n", cons_text_c[(ct)], get_src((ident)) 40 "\tpopping off stack: %s (%s)\n", cons_text_c[(p)->stack_data[tos].type], \ 41 get_src((p)->stack_data[tos].ident) 43 static int const cons_text_c_num =
sizeof(cons_text_c) /
sizeof(
char const *);
47 static void __kmp_check_null_func(
void) {
50 static void __kmp_expand_cons_stack(
int gtid,
struct cons_header *p) {
56 __kmp_check_null_func();
58 KE_TRACE(10, (
"expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid()));
62 p->stack_size = (p->stack_size * 2) + 100;
65 p->stack_data = (
struct cons_data *)__kmp_allocate(
sizeof(
struct cons_data) *
68 for (i = p->stack_top; i >= 0; --i)
69 p->stack_data[i] = d[i];
75 static char *__kmp_pragma(
int ct,
ident_t const *
ident) {
76 char const *cons = NULL;
82 __kmp_str_buf_init(&buffer);
83 if (0 < ct && ct < cons_text_c_num) {
84 cons = cons_text_c[ct];
88 if (ident != NULL && ident->
psource != NULL) {
90 __kmp_str_buf_print(&buffer,
"%s",
94 __kmp_str_split(tail,
';', NULL, &tail);
95 __kmp_str_split(tail,
';', &file, &tail);
96 __kmp_str_split(tail,
';', &func, &tail);
97 __kmp_str_split(tail,
';', &line, &tail);
99 prgm = __kmp_msg_format(kmp_i18n_fmt_Pragma, cons, file, func, line);
100 __kmp_str_buf_free(&buffer);
106 void __kmp_error_construct(kmp_i18n_id_t
id,
110 char *construct = __kmp_pragma(ct, ident);
111 __kmp_fatal(__kmp_msg_format(
id, construct), __kmp_msg_null);
112 KMP_INTERNAL_FREE(construct);
115 void __kmp_error_construct2(kmp_i18n_id_t
id,
118 struct cons_data
const *cons
120 char *construct1 = __kmp_pragma(ct, ident);
121 char *construct2 = __kmp_pragma(cons->type, cons->ident);
122 __kmp_fatal(__kmp_msg_format(
id, construct1, construct2), __kmp_msg_null);
123 KMP_INTERNAL_FREE(construct1);
124 KMP_INTERNAL_FREE(construct2);
127 struct cons_header *__kmp_allocate_cons_stack(
int gtid) {
128 struct cons_header *p;
132 __kmp_check_null_func();
134 KE_TRACE(10, (
"allocate cons_stack (%d)\n", gtid));
135 p = (
struct cons_header *)__kmp_allocate(
sizeof(
struct cons_header));
136 p->p_top = p->w_top = p->s_top = 0;
137 p->stack_data = (
struct cons_data *)__kmp_allocate(
sizeof(
struct cons_data) *
139 p->stack_size = MIN_STACK;
141 p->stack_data[0].type = ct_none;
142 p->stack_data[0].prev = 0;
143 p->stack_data[0].ident = NULL;
147 void __kmp_free_cons_stack(
void *ptr) {
148 struct cons_header *p = (
struct cons_header *)ptr;
150 if (p->stack_data != NULL) {
151 __kmp_free(p->stack_data);
152 p->stack_data = NULL;
159 static void dump_cons_stack(
int gtid,
struct cons_header *p) {
161 int tos = p->stack_top;
162 kmp_str_buf_t buffer;
163 __kmp_str_buf_init(&buffer);
166 "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
167 __kmp_str_buf_print(&buffer,
168 "Begin construct stack with %d items for thread %d\n",
170 __kmp_str_buf_print(&buffer,
" stack_top=%d { P=%d, W=%d, S=%d }\n", tos,
171 p->p_top, p->w_top, p->s_top);
172 for (i = tos; i > 0; i--) {
173 struct cons_data *c = &(p->stack_data[i]);
175 &buffer,
" stack_data[%2d] = { %s (%s) %d %p }\n", i,
176 cons_text_c[c->type], get_src(c->ident), c->prev, c->name);
178 __kmp_str_buf_print(&buffer,
"End construct stack for thread %d\n", gtid);
181 "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
182 __kmp_debug_printf(
"%s", buffer.str);
183 __kmp_str_buf_free(&buffer);
187 void __kmp_push_parallel(
int gtid,
ident_t const *ident) {
189 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
191 KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
192 KE_TRACE(10, (
"__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
193 KE_TRACE(100, (PUSH_MSG(ct_parallel, ident)));
194 if (p->stack_top >= p->stack_size) {
195 __kmp_expand_cons_stack(gtid, p);
197 tos = ++p->stack_top;
198 p->stack_data[tos].type = ct_parallel;
199 p->stack_data[tos].prev = p->p_top;
200 p->stack_data[tos].ident = ident;
201 p->stack_data[tos].name = NULL;
203 KE_DUMP(1000, dump_cons_stack(gtid, p));
206 void __kmp_check_workshare(
int gtid,
enum cons_type ct,
ident_t const *ident) {
207 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
209 KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
210 KE_TRACE(10, (
"__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
212 if (p->stack_top >= p->stack_size) {
213 __kmp_expand_cons_stack(gtid, p);
215 if (p->w_top > p->p_top) {
217 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
218 &p->stack_data[p->w_top]);
220 if (p->s_top > p->p_top) {
222 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
223 &p->stack_data[p->s_top]);
227 void __kmp_push_workshare(
int gtid,
enum cons_type ct,
ident_t const *ident) {
229 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
230 KE_TRACE(10, (
"__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
231 __kmp_check_workshare(gtid, ct, ident);
232 KE_TRACE(100, (PUSH_MSG(ct, ident)));
233 tos = ++p->stack_top;
234 p->stack_data[tos].type = ct;
235 p->stack_data[tos].prev = p->w_top;
236 p->stack_data[tos].ident = ident;
237 p->stack_data[tos].name = NULL;
239 KE_DUMP(1000, dump_cons_stack(gtid, p));
243 #if KMP_USE_DYNAMIC_LOCK 244 __kmp_check_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
246 __kmp_check_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck )
249 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
251 KE_TRACE(10, (
"__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid()));
253 if (p->stack_top >= p->stack_size)
254 __kmp_expand_cons_stack(gtid, p);
256 if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo) {
257 if (p->w_top <= p->p_top) {
259 #ifdef BUILD_PARALLEL_ORDERED 261 KMP_ASSERT(ct == ct_ordered_in_parallel);
263 __kmp_error_construct(kmp_i18n_msg_CnsBoundToWorksharing, ct, ident);
267 if (!IS_CONS_TYPE_ORDERED(p->stack_data[p->w_top].type)) {
268 __kmp_error_construct2(kmp_i18n_msg_CnsNoOrderedClause, ct, ident,
269 &p->stack_data[p->w_top]);
272 if (p->s_top > p->p_top && p->s_top > p->w_top) {
274 int index = p->s_top;
275 enum cons_type stack_type;
277 stack_type = p->stack_data[index].type;
279 if (stack_type == ct_critical ||
280 ((stack_type == ct_ordered_in_parallel ||
281 stack_type == ct_ordered_in_pdo) &&
283 p->stack_data[index].ident != NULL &&
286 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
287 &p->stack_data[index]);
290 }
else if (ct == ct_critical) {
291 #if KMP_USE_DYNAMIC_LOCK 293 __kmp_get_user_lock_owner(lck, seq) ==
297 __kmp_get_user_lock_owner(lck) ==
300 int index = p->s_top;
301 struct cons_data cons = {NULL, ct_critical, 0, NULL};
303 while (index != 0 && p->stack_data[index].name != lck) {
304 index = p->stack_data[index].prev;
309 cons = p->stack_data[index];
312 __kmp_error_construct2(kmp_i18n_msg_CnsNestingSameName, ct, ident, &cons);
314 }
else if (ct == ct_master || ct == ct_reduce) {
315 if (p->w_top > p->p_top) {
317 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
318 &p->stack_data[p->w_top]);
320 if (ct == ct_reduce && p->s_top > p->p_top) {
322 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
323 &p->stack_data[p->s_top]);
329 #if KMP_USE_DYNAMIC_LOCK 330 __kmp_push_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
332 __kmp_push_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck )
336 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
338 KMP_ASSERT(gtid == __kmp_get_gtid());
339 KE_TRACE(10, (
"__kmp_push_sync (gtid=%d)\n", gtid));
340 #if KMP_USE_DYNAMIC_LOCK 341 __kmp_check_sync(gtid, ct, ident, lck, seq);
343 __kmp_check_sync(gtid, ct, ident, lck);
345 KE_TRACE(100, (PUSH_MSG(ct, ident)));
346 tos = ++p->stack_top;
347 p->stack_data[tos].type = ct;
348 p->stack_data[tos].prev = p->s_top;
349 p->stack_data[tos].ident = ident;
350 p->stack_data[tos].name = lck;
352 KE_DUMP(1000, dump_cons_stack(gtid, p));
357 void __kmp_pop_parallel(
int gtid,
ident_t const *ident) {
359 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
361 KE_TRACE(10, (
"__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
362 if (tos == 0 || p->p_top == 0) {
363 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident);
365 if (tos != p->p_top || p->stack_data[tos].type != ct_parallel) {
366 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct_parallel, ident,
367 &p->stack_data[tos]);
369 KE_TRACE(100, (POP_MSG(p)));
370 p->p_top = p->stack_data[tos].prev;
371 p->stack_data[tos].type = ct_none;
372 p->stack_data[tos].ident = NULL;
373 p->stack_top = tos - 1;
374 KE_DUMP(1000, dump_cons_stack(gtid, p));
377 enum cons_type __kmp_pop_workshare(
int gtid,
enum cons_type ct,
380 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
383 KE_TRACE(10, (
"__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
384 if (tos == 0 || p->w_top == 0) {
385 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
388 if (tos != p->w_top ||
389 (p->stack_data[tos].type != ct &&
391 !(p->stack_data[tos].type == ct_pdo_ordered && ct == ct_pdo))) {
392 __kmp_check_null_func();
393 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
394 &p->stack_data[tos]);
396 KE_TRACE(100, (POP_MSG(p)));
397 p->w_top = p->stack_data[tos].prev;
398 p->stack_data[tos].type = ct_none;
399 p->stack_data[tos].ident = NULL;
400 p->stack_top = tos - 1;
401 KE_DUMP(1000, dump_cons_stack(gtid, p));
402 return p->stack_data[p->w_top].type;
405 void __kmp_pop_sync(
int gtid,
enum cons_type ct,
ident_t const *ident) {
407 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
409 KE_TRACE(10, (
"__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid()));
410 if (tos == 0 || p->s_top == 0) {
411 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
413 if (tos != p->s_top || p->stack_data[tos].type != ct) {
414 __kmp_check_null_func();
415 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
416 &p->stack_data[tos]);
419 __kmp_check_null_func();
421 KE_TRACE(100, (POP_MSG(p)));
422 p->s_top = p->stack_data[tos].prev;
423 p->stack_data[tos].type = ct_none;
424 p->stack_data[tos].ident = NULL;
425 p->stack_top = tos - 1;
426 KE_DUMP(1000, dump_cons_stack(gtid, p));
431 void __kmp_check_barrier(
int gtid,
enum cons_type ct,
ident_t const *ident) {
432 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
433 KE_TRACE(10, (
"__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid,
436 __kmp_check_null_func();
438 if (p->w_top > p->p_top) {
440 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
441 &p->stack_data[p->w_top]);
443 if (p->s_top > p->p_top) {
445 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
446 &p->stack_data[p->s_top]);