LLVM OpenMP* Runtime Library
kmp_taskdeps.cpp
1 /*
2  * kmp_taskdeps.cpp
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 //#define KMP_SUPPORT_GRAPH_OUTPUT 1
14 
15 #include "kmp.h"
16 #include "kmp_io.h"
17 #include "kmp_wait_release.h"
18 #include "kmp_taskdeps.h"
19 #if OMPT_SUPPORT
20 #include "ompt-specific.h"
21 #endif
22 
23 // TODO: Improve memory allocation? keep a list of pre-allocated structures?
24 // allocate in blocks? re-use list finished list entries?
25 // TODO: don't use atomic ref counters for stack-allocated nodes.
26 // TODO: find an alternate to atomic refs for heap-allocated nodes?
27 // TODO: Finish graph output support
28 // TODO: kmp_lock_t seems a tad to big (and heavy weight) for this. Check other
29 // runtime locks
30 // TODO: Any ITT support needed?
31 
32 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
33 static std::atomic<kmp_int32> kmp_node_id_seed = ATOMIC_VAR_INIT(0);
34 #endif
35 
36 static void __kmp_init_node(kmp_depnode_t *node) {
37  node->dn.successors = NULL;
38  node->dn.task = NULL; // will point to the rigth task
39  // once dependences have been processed
40  for (int i = 0; i < MAX_MTX_DEPS; ++i)
41  node->dn.mtx_locks[i] = NULL;
42  node->dn.mtx_num_locks = 0;
43  __kmp_init_lock(&node->dn.lock);
44  KMP_ATOMIC_ST_RLX(&node->dn.nrefs, 1); // init creates the first reference
45 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
46  node->dn.id = KMP_ATOMIC_INC(&kmp_node_id_seed);
47 #endif
48 }
49 
50 static inline kmp_depnode_t *__kmp_node_ref(kmp_depnode_t *node) {
51  KMP_ATOMIC_INC(&node->dn.nrefs);
52  return node;
53 }
54 
55 enum { KMP_DEPHASH_OTHER_SIZE = 97, KMP_DEPHASH_MASTER_SIZE = 997 };
56 
57 static inline kmp_int32 __kmp_dephash_hash(kmp_intptr_t addr, size_t hsize) {
58  // TODO alternate to try: set = (((Addr64)(addrUsefulBits * 9.618)) %
59  // m_num_sets );
60  return ((addr >> 6) ^ (addr >> 2)) % hsize;
61 }
62 
63 static kmp_dephash_t *__kmp_dephash_create(kmp_info_t *thread,
64  kmp_taskdata_t *current_task) {
65  kmp_dephash_t *h;
66 
67  size_t h_size;
68 
69  if (current_task->td_flags.tasktype == TASK_IMPLICIT)
70  h_size = KMP_DEPHASH_MASTER_SIZE;
71  else
72  h_size = KMP_DEPHASH_OTHER_SIZE;
73 
74  kmp_int32 size =
75  h_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t);
76 
77 #if USE_FAST_MEMORY
78  h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size);
79 #else
80  h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size);
81 #endif
82  h->size = h_size;
83 
84 #ifdef KMP_DEBUG
85  h->nelements = 0;
86  h->nconflicts = 0;
87 #endif
88  h->buckets = (kmp_dephash_entry **)(h + 1);
89 
90  for (size_t i = 0; i < h_size; i++)
91  h->buckets[i] = 0;
92 
93  return h;
94 }
95 
96 #define ENTRY_LAST_INS 0
97 #define ENTRY_LAST_MTXS 1
98 
99 static kmp_dephash_entry *
100 __kmp_dephash_find(kmp_info_t *thread, kmp_dephash_t *h, kmp_intptr_t addr) {
101  kmp_int32 bucket = __kmp_dephash_hash(addr, h->size);
102 
103  kmp_dephash_entry_t *entry;
104  for (entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket)
105  if (entry->addr == addr)
106  break;
107 
108  if (entry == NULL) {
109 // create entry. This is only done by one thread so no locking required
110 #if USE_FAST_MEMORY
111  entry = (kmp_dephash_entry_t *)__kmp_fast_allocate(
112  thread, sizeof(kmp_dephash_entry_t));
113 #else
114  entry = (kmp_dephash_entry_t *)__kmp_thread_malloc(
115  thread, sizeof(kmp_dephash_entry_t));
116 #endif
117  entry->addr = addr;
118  entry->last_out = NULL;
119  entry->last_ins = NULL;
120  entry->last_mtxs = NULL;
121  entry->last_flag = ENTRY_LAST_INS;
122  entry->mtx_lock = NULL;
123  entry->next_in_bucket = h->buckets[bucket];
124  h->buckets[bucket] = entry;
125 #ifdef KMP_DEBUG
126  h->nelements++;
127  if (entry->next_in_bucket)
128  h->nconflicts++;
129 #endif
130  }
131  return entry;
132 }
133 
134 static kmp_depnode_list_t *__kmp_add_node(kmp_info_t *thread,
135  kmp_depnode_list_t *list,
136  kmp_depnode_t *node) {
137  kmp_depnode_list_t *new_head;
138 
139 #if USE_FAST_MEMORY
140  new_head = (kmp_depnode_list_t *)__kmp_fast_allocate(
141  thread, sizeof(kmp_depnode_list_t));
142 #else
143  new_head = (kmp_depnode_list_t *)__kmp_thread_malloc(
144  thread, sizeof(kmp_depnode_list_t));
145 #endif
146 
147  new_head->node = __kmp_node_ref(node);
148  new_head->next = list;
149 
150  return new_head;
151 }
152 
153 static inline void __kmp_track_dependence(kmp_depnode_t *source,
154  kmp_depnode_t *sink,
155  kmp_task_t *sink_task) {
156 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
157  kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
158  // do not use sink->dn.task as that is only filled after the dependencies
159  // are already processed!
160  kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
161 
162  __kmp_printf("%d(%s) -> %d(%s)\n", source->dn.id,
163  task_source->td_ident->psource, sink->dn.id,
164  task_sink->td_ident->psource);
165 #endif
166 #if OMPT_SUPPORT && OMPT_OPTIONAL
167  /* OMPT tracks dependences between task (a=source, b=sink) in which
168  task a blocks the execution of b through the ompt_new_dependence_callback
169  */
170  if (ompt_enabled.ompt_callback_task_dependence) {
171  kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
172  kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
173 
174  ompt_callbacks.ompt_callback(ompt_callback_task_dependence)(
175  &(task_source->ompt_task_info.task_data),
176  &(task_sink->ompt_task_info.task_data));
177  }
178 #endif /* OMPT_SUPPORT && OMPT_OPTIONAL */
179 }
180 
181 static inline kmp_int32
182 __kmp_depnode_link_successor(kmp_int32 gtid, kmp_info_t *thread,
183  kmp_task_t *task, kmp_depnode_t *node,
184  kmp_depnode_list_t *plist) {
185  if (!plist)
186  return 0;
187  kmp_int32 npredecessors = 0;
188  // link node as successor of list elements
189  for (kmp_depnode_list_t *p = plist; p; p = p->next) {
190  kmp_depnode_t *dep = p->node;
191  if (dep->dn.task) {
192  KMP_ACQUIRE_DEPNODE(gtid, dep);
193  if (dep->dn.task) {
194  __kmp_track_dependence(dep, node, task);
195  dep->dn.successors = __kmp_add_node(thread, dep->dn.successors, node);
196  KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to "
197  "%p\n",
198  gtid, KMP_TASK_TO_TASKDATA(dep->dn.task),
199  KMP_TASK_TO_TASKDATA(task)));
200  npredecessors++;
201  }
202  KMP_RELEASE_DEPNODE(gtid, dep);
203  }
204  }
205  return npredecessors;
206 }
207 
208 static inline kmp_int32 __kmp_depnode_link_successor(kmp_int32 gtid,
209  kmp_info_t *thread,
210  kmp_task_t *task,
211  kmp_depnode_t *source,
212  kmp_depnode_t *sink) {
213  if (!sink)
214  return 0;
215  kmp_int32 npredecessors = 0;
216  if (sink->dn.task) {
217  // synchronously add source to sink' list of successors
218  KMP_ACQUIRE_DEPNODE(gtid, sink);
219  if (sink->dn.task) {
220  __kmp_track_dependence(sink, source, task);
221  sink->dn.successors = __kmp_add_node(thread, sink->dn.successors, source);
222  KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to "
223  "%p\n",
224  gtid, KMP_TASK_TO_TASKDATA(sink->dn.task),
225  KMP_TASK_TO_TASKDATA(task)));
226  npredecessors++;
227  }
228  KMP_RELEASE_DEPNODE(gtid, sink);
229  }
230  return npredecessors;
231 }
232 
233 template <bool filter>
234 static inline kmp_int32
235 __kmp_process_deps(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t *hash,
236  bool dep_barrier, kmp_int32 ndeps,
237  kmp_depend_info_t *dep_list, kmp_task_t *task) {
238  KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d processing %d dependencies : "
239  "dep_barrier = %d\n",
240  filter, gtid, ndeps, dep_barrier));
241 
242  kmp_info_t *thread = __kmp_threads[gtid];
243  kmp_int32 npredecessors = 0;
244  for (kmp_int32 i = 0; i < ndeps; i++) {
245  const kmp_depend_info_t *dep = &dep_list[i];
246 
247  if (filter && dep->base_addr == 0)
248  continue; // skip filtered entries
249 
250  kmp_dephash_entry_t *info =
251  __kmp_dephash_find(thread, hash, dep->base_addr);
252  kmp_depnode_t *last_out = info->last_out;
253  kmp_depnode_list_t *last_ins = info->last_ins;
254  kmp_depnode_list_t *last_mtxs = info->last_mtxs;
255 
256  if (dep->flags.out) { // out --> clean lists of ins and mtxs if any
257  if (last_ins || last_mtxs) {
258  if (info->last_flag == ENTRY_LAST_INS) { // INS were last
259  npredecessors +=
260  __kmp_depnode_link_successor(gtid, thread, task, node, last_ins);
261  } else { // MTXS were last
262  npredecessors +=
263  __kmp_depnode_link_successor(gtid, thread, task, node, last_mtxs);
264  }
265  __kmp_depnode_list_free(thread, last_ins);
266  __kmp_depnode_list_free(thread, last_mtxs);
267  info->last_ins = NULL;
268  info->last_mtxs = NULL;
269  } else {
270  npredecessors +=
271  __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
272  }
273  __kmp_node_deref(thread, last_out);
274  if (dep_barrier) {
275  // if this is a sync point in the serial sequence, then the previous
276  // outputs are guaranteed to be completed after the execution of this
277  // task so the previous output nodes can be cleared.
278  info->last_out = NULL;
279  } else {
280  info->last_out = __kmp_node_ref(node);
281  }
282  } else if (dep->flags.in) {
283  // in --> link node to either last_out or last_mtxs, clean earlier deps
284  if (last_mtxs) {
285  npredecessors +=
286  __kmp_depnode_link_successor(gtid, thread, task, node, last_mtxs);
287  __kmp_node_deref(thread, last_out);
288  info->last_out = NULL;
289  if (info->last_flag == ENTRY_LAST_MTXS && last_ins) { // MTXS were last
290  // clean old INS before creating new list
291  __kmp_depnode_list_free(thread, last_ins);
292  info->last_ins = NULL;
293  }
294  } else {
295  // link node as successor of the last_out if any
296  npredecessors +=
297  __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
298  }
299  info->last_flag = ENTRY_LAST_INS;
300  info->last_ins = __kmp_add_node(thread, info->last_ins, node);
301  } else {
302  KMP_DEBUG_ASSERT(dep->flags.mtx == 1);
303  // mtx --> link node to either last_out or last_ins, clean earlier deps
304  if (last_ins) {
305  npredecessors +=
306  __kmp_depnode_link_successor(gtid, thread, task, node, last_ins);
307  __kmp_node_deref(thread, last_out);
308  info->last_out = NULL;
309  if (info->last_flag == ENTRY_LAST_INS && last_mtxs) { // INS were last
310  // clean old MTXS before creating new list
311  __kmp_depnode_list_free(thread, last_mtxs);
312  info->last_mtxs = NULL;
313  }
314  } else {
315  // link node as successor of the last_out if any
316  npredecessors +=
317  __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
318  }
319  info->last_flag = ENTRY_LAST_MTXS;
320  info->last_mtxs = __kmp_add_node(thread, info->last_mtxs, node);
321  if (info->mtx_lock == NULL) {
322  info->mtx_lock = (kmp_lock_t *)__kmp_allocate(sizeof(kmp_lock_t));
323  __kmp_init_lock(info->mtx_lock);
324  }
325  KMP_DEBUG_ASSERT(node->dn.mtx_num_locks < MAX_MTX_DEPS);
326  kmp_int32 m;
327  // Save lock in node's array
328  for (m = 0; m < MAX_MTX_DEPS; ++m) {
329  // sort pointers in decreasing order to avoid potential livelock
330  if (node->dn.mtx_locks[m] < info->mtx_lock) {
331  KMP_DEBUG_ASSERT(node->dn.mtx_locks[node->dn.mtx_num_locks] == NULL);
332  for (int n = node->dn.mtx_num_locks; n > m; --n) {
333  // shift right all lesser non-NULL pointers
334  KMP_DEBUG_ASSERT(node->dn.mtx_locks[n - 1] != NULL);
335  node->dn.mtx_locks[n] = node->dn.mtx_locks[n - 1];
336  }
337  node->dn.mtx_locks[m] = info->mtx_lock;
338  break;
339  }
340  }
341  KMP_DEBUG_ASSERT(m < MAX_MTX_DEPS); // must break from loop
342  node->dn.mtx_num_locks++;
343  }
344  }
345  KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d found %d predecessors\n", filter,
346  gtid, npredecessors));
347  return npredecessors;
348 }
349 
350 #define NO_DEP_BARRIER (false)
351 #define DEP_BARRIER (true)
352 
353 // returns true if the task has any outstanding dependence
354 static bool __kmp_check_deps(kmp_int32 gtid, kmp_depnode_t *node,
355  kmp_task_t *task, kmp_dephash_t *hash,
356  bool dep_barrier, kmp_int32 ndeps,
357  kmp_depend_info_t *dep_list,
358  kmp_int32 ndeps_noalias,
359  kmp_depend_info_t *noalias_dep_list) {
360  int i, n_mtxs = 0;
361 #if KMP_DEBUG
362  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
363 #endif
364  KA_TRACE(20, ("__kmp_check_deps: T#%d checking dependencies for task %p : %d "
365  "possibly aliased dependencies, %d non-aliased depedencies : "
366  "dep_barrier=%d .\n",
367  gtid, taskdata, ndeps, ndeps_noalias, dep_barrier));
368 
369  // Filter deps in dep_list
370  // TODO: Different algorithm for large dep_list ( > 10 ? )
371  for (i = 0; i < ndeps; i++) {
372  if (dep_list[i].base_addr != 0) {
373  for (int j = i + 1; j < ndeps; j++) {
374  if (dep_list[i].base_addr == dep_list[j].base_addr) {
375  dep_list[i].flags.in |= dep_list[j].flags.in;
376  dep_list[i].flags.out |=
377  (dep_list[j].flags.out ||
378  (dep_list[i].flags.in && dep_list[j].flags.mtx) ||
379  (dep_list[i].flags.mtx && dep_list[j].flags.in));
380  dep_list[i].flags.mtx =
381  dep_list[i].flags.mtx | dep_list[j].flags.mtx &&
382  !dep_list[i].flags.out;
383  dep_list[j].base_addr = 0; // Mark j element as void
384  }
385  }
386  if (dep_list[i].flags.mtx) {
387  // limit number of mtx deps to MAX_MTX_DEPS per node
388  if (n_mtxs < MAX_MTX_DEPS && task != NULL) {
389  ++n_mtxs;
390  } else {
391  dep_list[i].flags.in = 1; // downgrade mutexinoutset to inout
392  dep_list[i].flags.out = 1;
393  dep_list[i].flags.mtx = 0;
394  }
395  }
396  }
397  }
398 
399  // doesn't need to be atomic as no other thread is going to be accessing this
400  // node just yet.
401  // npredecessors is set -1 to ensure that none of the releasing tasks queues
402  // this task before we have finished processing all the dependencies
403  node->dn.npredecessors = -1;
404 
405  // used to pack all npredecessors additions into a single atomic operation at
406  // the end
407  int npredecessors;
408 
409  npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier, ndeps,
410  dep_list, task);
411  npredecessors += __kmp_process_deps<false>(
412  gtid, node, hash, dep_barrier, ndeps_noalias, noalias_dep_list, task);
413 
414  node->dn.task = task;
415  KMP_MB();
416 
417  // Account for our initial fake value
418  npredecessors++;
419 
420  // Update predecessors and obtain current value to check if there are still
421  // any outstandig dependences (some tasks may have finished while we processed
422  // the dependences)
423  npredecessors =
424  node->dn.npredecessors.fetch_add(npredecessors) + npredecessors;
425 
426  KA_TRACE(20, ("__kmp_check_deps: T#%d found %d predecessors for task %p \n",
427  gtid, npredecessors, taskdata));
428 
429  // beyond this point the task could be queued (and executed) by a releasing
430  // task...
431  return npredecessors > 0 ? true : false;
432 }
433 
450 kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid,
451  kmp_task_t *new_task, kmp_int32 ndeps,
452  kmp_depend_info_t *dep_list,
453  kmp_int32 ndeps_noalias,
454  kmp_depend_info_t *noalias_dep_list) {
455 
456  kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
457  KA_TRACE(10, ("__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n", gtid,
458  loc_ref, new_taskdata));
459 
460  kmp_info_t *thread = __kmp_threads[gtid];
461  kmp_taskdata_t *current_task = thread->th.th_current_task;
462 
463 #if OMPT_SUPPORT
464  if (ompt_enabled.enabled) {
465  OMPT_STORE_RETURN_ADDRESS(gtid);
466  if (!current_task->ompt_task_info.frame.enter_frame.ptr)
467  current_task->ompt_task_info.frame.enter_frame.ptr =
468  OMPT_GET_FRAME_ADDRESS(0);
469  if (ompt_enabled.ompt_callback_task_create) {
470  ompt_data_t task_data = ompt_data_none;
471  ompt_callbacks.ompt_callback(ompt_callback_task_create)(
472  current_task ? &(current_task->ompt_task_info.task_data) : &task_data,
473  current_task ? &(current_task->ompt_task_info.frame) : NULL,
474  &(new_taskdata->ompt_task_info.task_data),
475  ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 1,
476  OMPT_LOAD_RETURN_ADDRESS(gtid));
477  }
478 
479  new_taskdata->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
480  }
481 
482 #if OMPT_OPTIONAL
483  /* OMPT grab all dependences if requested by the tool */
484  if (ndeps + ndeps_noalias > 0 &&
485  ompt_enabled.ompt_callback_dependences) {
486  kmp_int32 i;
487 
488  new_taskdata->ompt_task_info.ndeps = ndeps + ndeps_noalias;
489  new_taskdata->ompt_task_info.deps =
490  (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC(
491  thread, (ndeps + ndeps_noalias) * sizeof(ompt_dependence_t));
492 
493  KMP_ASSERT(new_taskdata->ompt_task_info.deps != NULL);
494 
495  for (i = 0; i < ndeps; i++) {
496  new_taskdata->ompt_task_info.deps[i].variable.ptr =
497  (void *)dep_list[i].base_addr;
498  if (dep_list[i].flags.in && dep_list[i].flags.out)
499  new_taskdata->ompt_task_info.deps[i].dependence_type =
500  ompt_dependence_type_inout;
501  else if (dep_list[i].flags.out)
502  new_taskdata->ompt_task_info.deps[i].dependence_type =
503  ompt_dependence_type_out;
504  else if (dep_list[i].flags.in)
505  new_taskdata->ompt_task_info.deps[i].dependence_type =
506  ompt_dependence_type_in;
507  }
508  for (i = 0; i < ndeps_noalias; i++) {
509  new_taskdata->ompt_task_info.deps[ndeps + i].variable.ptr =
510  (void *)noalias_dep_list[i].base_addr;
511  if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
512  new_taskdata->ompt_task_info.deps[ndeps + i].dependence_type =
513  ompt_dependence_type_inout;
514  else if (noalias_dep_list[i].flags.out)
515  new_taskdata->ompt_task_info.deps[ndeps + i].dependence_type =
516  ompt_dependence_type_out;
517  else if (noalias_dep_list[i].flags.in)
518  new_taskdata->ompt_task_info.deps[ndeps + i].dependence_type =
519  ompt_dependence_type_in;
520  }
521  ompt_callbacks.ompt_callback(ompt_callback_dependences)(
522  &(new_taskdata->ompt_task_info.task_data),
523  new_taskdata->ompt_task_info.deps, new_taskdata->ompt_task_info.ndeps);
524  /* We can now free the allocated memory for the dependencies */
525  /* For OMPD we might want to delay the free until task_end */
526  KMP_OMPT_DEPS_FREE(thread, new_taskdata->ompt_task_info.deps);
527  new_taskdata->ompt_task_info.deps = NULL;
528  new_taskdata->ompt_task_info.ndeps = 0;
529  }
530 #endif /* OMPT_OPTIONAL */
531 #endif /* OMPT_SUPPORT */
532 
533  bool serial = current_task->td_flags.team_serial ||
534  current_task->td_flags.tasking_ser ||
535  current_task->td_flags.final;
536  kmp_task_team_t *task_team = thread->th.th_task_team;
537  serial = serial && !(task_team && task_team->tt.tt_found_proxy_tasks);
538 
539  if (!serial && (ndeps > 0 || ndeps_noalias > 0)) {
540  /* if no dependencies have been tracked yet, create the dependence hash */
541  if (current_task->td_dephash == NULL)
542  current_task->td_dephash = __kmp_dephash_create(thread, current_task);
543 
544 #if USE_FAST_MEMORY
545  kmp_depnode_t *node =
546  (kmp_depnode_t *)__kmp_fast_allocate(thread, sizeof(kmp_depnode_t));
547 #else
548  kmp_depnode_t *node =
549  (kmp_depnode_t *)__kmp_thread_malloc(thread, sizeof(kmp_depnode_t));
550 #endif
551 
552  __kmp_init_node(node);
553  new_taskdata->td_depnode = node;
554 
555  if (__kmp_check_deps(gtid, node, new_task, current_task->td_dephash,
556  NO_DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
557  noalias_dep_list)) {
558  KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had blocking "
559  "dependencies: "
560  "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
561  gtid, loc_ref, new_taskdata));
562 #if OMPT_SUPPORT
563  if (ompt_enabled.enabled) {
564  current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
565  }
566 #endif
567  return TASK_CURRENT_NOT_QUEUED;
568  }
569  } else {
570  KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d ignored dependencies "
571  "for task (serialized)"
572  "loc=%p task=%p\n",
573  gtid, loc_ref, new_taskdata));
574  }
575 
576  KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had no blocking "
577  "dependencies : "
578  "loc=%p task=%p, transferring to __kmp_omp_task\n",
579  gtid, loc_ref, new_taskdata));
580 
581  kmp_int32 ret = __kmp_omp_task(gtid, new_task, true);
582 #if OMPT_SUPPORT
583  if (ompt_enabled.enabled) {
584  current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
585  }
586 #endif
587  return ret;
588 }
589 
601 void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps,
602  kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
603  kmp_depend_info_t *noalias_dep_list) {
604  KA_TRACE(10, ("__kmpc_omp_wait_deps(enter): T#%d loc=%p\n", gtid, loc_ref));
605 
606  if (ndeps == 0 && ndeps_noalias == 0) {
607  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no dependencies to "
608  "wait upon : loc=%p\n",
609  gtid, loc_ref));
610  return;
611  }
612 
613  kmp_info_t *thread = __kmp_threads[gtid];
614  kmp_taskdata_t *current_task = thread->th.th_current_task;
615 
616  // We can return immediately as:
617  // - dependences are not computed in serial teams (except with proxy tasks)
618  // - if the dephash is not yet created it means we have nothing to wait for
619  bool ignore = current_task->td_flags.team_serial ||
620  current_task->td_flags.tasking_ser ||
621  current_task->td_flags.final;
622  ignore = ignore && thread->th.th_task_team != NULL &&
623  thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE;
624  ignore = ignore || current_task->td_dephash == NULL;
625 
626  if (ignore) {
627  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking "
628  "dependencies : loc=%p\n",
629  gtid, loc_ref));
630  return;
631  }
632 
633  kmp_depnode_t node = {0};
634  __kmp_init_node(&node);
635 
636  if (!__kmp_check_deps(gtid, &node, NULL, current_task->td_dephash,
637  DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
638  noalias_dep_list)) {
639  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking "
640  "dependencies : loc=%p\n",
641  gtid, loc_ref));
642  return;
643  }
644 
645  int thread_finished = FALSE;
646  kmp_flag_32 flag((std::atomic<kmp_uint32> *)&node.dn.npredecessors, 0U);
647  while (node.dn.npredecessors > 0) {
648  flag.execute_tasks(thread, gtid, FALSE,
649  &thread_finished USE_ITT_BUILD_ARG(NULL),
650  __kmp_task_stealing_constraint);
651  }
652 
653  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d finished waiting : loc=%p\n",
654  gtid, loc_ref));
655 }
void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
Definition: kmp.h:222