LLVM OpenMP* Runtime Library
Loading...
Searching...
No Matches
kmp_taskdeps.cpp
1/*
2 * kmp_taskdeps.cpp
3 */
4
5//===----------------------------------------------------------------------===//
6//
7// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8// See https://llvm.org/LICENSE.txt for license information.
9// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10//
11//===----------------------------------------------------------------------===//
12
13//#define KMP_SUPPORT_GRAPH_OUTPUT 1
14
15#include "kmp.h"
16#include "kmp_io.h"
17#include "kmp_wait_release.h"
18#include "kmp_taskdeps.h"
19#if OMPT_SUPPORT
20#include "ompt-specific.h"
21#endif
22
23// TODO: Improve memory allocation? keep a list of pre-allocated structures?
24// allocate in blocks? re-use list finished list entries?
25// TODO: don't use atomic ref counters for stack-allocated nodes.
26// TODO: find an alternate to atomic refs for heap-allocated nodes?
27// TODO: Finish graph output support
28// TODO: kmp_lock_t seems a tad to big (and heavy weight) for this. Check other
29// runtime locks
30// TODO: Any ITT support needed?
31
32#ifdef KMP_SUPPORT_GRAPH_OUTPUT
33static std::atomic<kmp_int32> kmp_node_id_seed = 0;
34#endif
35
36static void __kmp_init_node(kmp_depnode_t *node, bool on_stack) {
37 node->dn.successors = NULL;
38 node->dn.task = NULL; // will point to the right task
39 // once dependences have been processed
40 for (int i = 0; i < MAX_MTX_DEPS; ++i)
41 node->dn.mtx_locks[i] = NULL;
42 node->dn.mtx_num_locks = 0;
43 __kmp_init_lock(&node->dn.lock);
44 // Init creates the first reference. Bit 0 indicates that this node
45 // resides on the stack. The refcount is incremented and decremented in
46 // steps of two, maintaining use of even numbers for heap nodes and odd
47 // numbers for stack nodes.
48 KMP_ATOMIC_ST_RLX(&node->dn.nrefs, on_stack ? 3 : 2);
49#ifdef KMP_SUPPORT_GRAPH_OUTPUT
50 node->dn.id = KMP_ATOMIC_INC(&kmp_node_id_seed);
51#endif
52#if USE_ITT_BUILD && USE_ITT_NOTIFY
53 __itt_sync_create(node, "OMP task dep node", NULL, 0);
54#endif
55}
56
57static inline kmp_depnode_t *__kmp_node_ref(kmp_depnode_t *node) {
58 KMP_ATOMIC_ADD(&node->dn.nrefs, 2);
59 return node;
60}
61
62enum { KMP_DEPHASH_OTHER_SIZE = 97, KMP_DEPHASH_MASTER_SIZE = 997 };
63
64size_t sizes[] = {997, 2003, 4001, 8191, 16001, 32003, 64007, 131071, 270029};
65const size_t MAX_GEN = 8;
66
67static inline size_t __kmp_dephash_hash(kmp_intptr_t addr, size_t hsize) {
68 // TODO alternate to try: set = (((Addr64)(addrUsefulBits * 9.618)) %
69 // m_num_sets );
70 return ((addr >> 6) ^ (addr >> 2)) % hsize;
71}
72
73static kmp_dephash_t *__kmp_dephash_extend(kmp_info_t *thread,
74 kmp_dephash_t *current_dephash) {
75 kmp_dephash_t *h;
76
77 size_t gen = current_dephash->generation + 1;
78 if (gen >= MAX_GEN)
79 return current_dephash;
80 size_t new_size = sizes[gen];
81
82 size_t size_to_allocate =
83 new_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t);
84
85#if USE_FAST_MEMORY
86 h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size_to_allocate);
87#else
88 h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size_to_allocate);
89#endif
90
91 h->size = new_size;
92 h->nelements = current_dephash->nelements;
93 h->buckets = (kmp_dephash_entry **)(h + 1);
94 h->generation = gen;
95 h->nconflicts = 0;
96 h->last_all = current_dephash->last_all;
97
98 // make sure buckets are properly initialized
99 for (size_t i = 0; i < new_size; i++) {
100 h->buckets[i] = NULL;
101 }
102
103 // insert existing elements in the new table
104 for (size_t i = 0; i < current_dephash->size; i++) {
105 kmp_dephash_entry_t *next, *entry;
106 for (entry = current_dephash->buckets[i]; entry; entry = next) {
107 next = entry->next_in_bucket;
108 // Compute the new hash using the new size, and insert the entry in
109 // the new bucket.
110 size_t new_bucket = __kmp_dephash_hash(entry->addr, h->size);
111 entry->next_in_bucket = h->buckets[new_bucket];
112 if (entry->next_in_bucket) {
113 h->nconflicts++;
114 }
115 h->buckets[new_bucket] = entry;
116 }
117 }
118
119 // Free old hash table
120#if USE_FAST_MEMORY
121 __kmp_fast_free(thread, current_dephash);
122#else
123 __kmp_thread_free(thread, current_dephash);
124#endif
125
126 return h;
127}
128
129static kmp_dephash_t *__kmp_dephash_create(kmp_info_t *thread,
130 kmp_taskdata_t *current_task) {
131 kmp_dephash_t *h;
132
133 size_t h_size;
134
135 if (current_task->td_flags.tasktype == TASK_IMPLICIT)
136 h_size = KMP_DEPHASH_MASTER_SIZE;
137 else
138 h_size = KMP_DEPHASH_OTHER_SIZE;
139
140 size_t size = h_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t);
141
142#if USE_FAST_MEMORY
143 h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size);
144#else
145 h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size);
146#endif
147 h->size = h_size;
148
149 h->generation = 0;
150 h->nelements = 0;
151 h->nconflicts = 0;
152 h->buckets = (kmp_dephash_entry **)(h + 1);
153 h->last_all = NULL;
154
155 for (size_t i = 0; i < h_size; i++)
156 h->buckets[i] = 0;
157
158 return h;
159}
160
161static kmp_dephash_entry *__kmp_dephash_find(kmp_info_t *thread,
162 kmp_dephash_t **hash,
163 kmp_intptr_t addr) {
164 kmp_dephash_t *h = *hash;
165 if (h->nelements != 0 && h->nconflicts / h->size >= 1) {
166 *hash = __kmp_dephash_extend(thread, h);
167 h = *hash;
168 }
169 size_t bucket = __kmp_dephash_hash(addr, h->size);
170
171 kmp_dephash_entry_t *entry;
172 for (entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket)
173 if (entry->addr == addr)
174 break;
175
176 if (entry == NULL) {
177// create entry. This is only done by one thread so no locking required
178#if USE_FAST_MEMORY
179 entry = (kmp_dephash_entry_t *)__kmp_fast_allocate(
180 thread, sizeof(kmp_dephash_entry_t));
181#else
182 entry = (kmp_dephash_entry_t *)__kmp_thread_malloc(
183 thread, sizeof(kmp_dephash_entry_t));
184#endif
185 entry->addr = addr;
186 if (!h->last_all) // no predecessor task with omp_all_memory dependence
187 entry->last_out = NULL;
188 else // else link the omp_all_memory depnode to the new entry
189 entry->last_out = __kmp_node_ref(h->last_all);
190 entry->last_set = NULL;
191 entry->prev_set = NULL;
192 entry->last_flag = 0;
193 entry->mtx_lock = NULL;
194 entry->next_in_bucket = h->buckets[bucket];
195 h->buckets[bucket] = entry;
196 h->nelements++;
197 if (entry->next_in_bucket)
198 h->nconflicts++;
199 }
200 return entry;
201}
202
203static kmp_depnode_list_t *__kmp_add_node(kmp_info_t *thread,
204 kmp_depnode_list_t *list,
205 kmp_depnode_t *node) {
206 kmp_depnode_list_t *new_head;
207
208#if USE_FAST_MEMORY
209 new_head = (kmp_depnode_list_t *)__kmp_fast_allocate(
210 thread, sizeof(kmp_depnode_list_t));
211#else
212 new_head = (kmp_depnode_list_t *)__kmp_thread_malloc(
213 thread, sizeof(kmp_depnode_list_t));
214#endif
215
216 new_head->node = __kmp_node_ref(node);
217 new_head->next = list;
218
219 return new_head;
220}
221
222static inline void __kmp_track_dependence(kmp_int32 gtid, kmp_depnode_t *source,
223 kmp_depnode_t *sink,
224 kmp_task_t *sink_task) {
225#if OMPX_TASKGRAPH
226 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
227 kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
228 if (source->dn.task && sink_task) {
229 // Not supporting dependency between two tasks that one is within the TDG
230 // and the other is not
231 KMP_ASSERT(task_source->is_taskgraph == task_sink->is_taskgraph);
232 }
233 if (task_sink->is_taskgraph &&
234 __kmp_tdg_is_recording(task_sink->tdg->tdg_status)) {
235 kmp_node_info_t *source_info =
236 &task_sink->tdg->record_map[task_source->td_tdg_task_id];
237 bool exists = false;
238 for (int i = 0; i < source_info->nsuccessors; i++) {
239 if (source_info->successors[i] == task_sink->td_tdg_task_id) {
240 exists = true;
241 break;
242 }
243 }
244 if (!exists) {
245 if (source_info->nsuccessors >= source_info->successors_size) {
246 source_info->successors_size = 2 * source_info->successors_size;
247 kmp_int32 *old_succ_ids = source_info->successors;
248 kmp_int32 *new_succ_ids = (kmp_int32 *)__kmp_allocate(
249 source_info->successors_size * sizeof(kmp_int32));
250 source_info->successors = new_succ_ids;
251 __kmp_free(old_succ_ids);
252 }
253
254 source_info->successors[source_info->nsuccessors] =
255 task_sink->td_tdg_task_id;
256 source_info->nsuccessors++;
257
258 kmp_node_info_t *sink_info =
259 &(task_sink->tdg->record_map[task_sink->td_tdg_task_id]);
260 sink_info->npredecessors++;
261 }
262 }
263#endif
264#ifdef KMP_SUPPORT_GRAPH_OUTPUT
265 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
266 // do not use sink->dn.task as that is only filled after the dependences
267 // are already processed!
268 kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
269
270 __kmp_printf("%d(%s) -> %d(%s)\n", source->dn.id,
271 task_source->td_ident->psource, sink->dn.id,
272 task_sink->td_ident->psource);
273#endif
274#if OMPT_SUPPORT && OMPT_OPTIONAL
275 /* OMPT tracks dependences between task (a=source, b=sink) in which
276 task a blocks the execution of b through the ompt_new_dependence_callback
277 */
278 if (ompt_enabled.ompt_callback_task_dependence) {
279 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
280 ompt_data_t *sink_data;
281 if (sink_task)
282 sink_data = &(KMP_TASK_TO_TASKDATA(sink_task)->ompt_task_info.task_data);
283 else
284 sink_data = &__kmp_threads[gtid]->th.ompt_thread_info.task_data;
285
286 ompt_callbacks.ompt_callback(ompt_callback_task_dependence)(
287 &(task_source->ompt_task_info.task_data), sink_data);
288 }
289#endif /* OMPT_SUPPORT && OMPT_OPTIONAL */
290}
291
292kmp_base_depnode_t *__kmpc_task_get_depnode(kmp_task_t *task) {
293 kmp_taskdata_t *td = KMP_TASK_TO_TASKDATA(task);
294 return td->td_depnode ? &(td->td_depnode->dn) : NULL;
295}
296
297kmp_depnode_list_t *__kmpc_task_get_successors(kmp_task_t *task) {
298 kmp_taskdata_t *td = KMP_TASK_TO_TASKDATA(task);
299 return td->td_depnode->dn.successors;
300}
301
302static inline kmp_int32
303__kmp_depnode_link_successor(kmp_int32 gtid, kmp_info_t *thread,
304 kmp_task_t *task, kmp_depnode_t *node,
305 kmp_depnode_list_t *plist) {
306 if (!plist)
307 return 0;
308 kmp_int32 npredecessors = 0;
309 // link node as successor of list elements
310 for (kmp_depnode_list_t *p = plist; p; p = p->next) {
311 kmp_depnode_t *dep = p->node;
312#if OMPX_TASKGRAPH
313 kmp_tdg_status tdg_status = KMP_TDG_NONE;
314 if (task) {
315 kmp_taskdata_t *td = KMP_TASK_TO_TASKDATA(task);
316 if (td->is_taskgraph)
317 tdg_status = KMP_TASK_TO_TASKDATA(task)->tdg->tdg_status;
318 if (__kmp_tdg_is_recording(tdg_status))
319 __kmp_track_dependence(gtid, dep, node, task);
320 }
321#endif
322 if (dep->dn.task) {
323 KMP_ACQUIRE_DEPNODE(gtid, dep);
324 if (dep->dn.task) {
325 if (!dep->dn.successors || dep->dn.successors->node != node) {
326#if OMPX_TASKGRAPH
327 if (!(__kmp_tdg_is_recording(tdg_status)) && task)
328#endif
329 __kmp_track_dependence(gtid, dep, node, task);
330 dep->dn.successors = __kmp_add_node(thread, dep->dn.successors, node);
331 KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to "
332 "%p\n",
333 gtid, KMP_TASK_TO_TASKDATA(dep->dn.task),
334 KMP_TASK_TO_TASKDATA(task)));
335 npredecessors++;
336 }
337 }
338 KMP_RELEASE_DEPNODE(gtid, dep);
339 }
340 }
341 return npredecessors;
342}
343
344// Add the edge 'sink' -> 'source' in the task dependency graph
345static inline kmp_int32 __kmp_depnode_link_successor(kmp_int32 gtid,
346 kmp_info_t *thread,
347 kmp_task_t *task,
348 kmp_depnode_t *source,
349 kmp_depnode_t *sink) {
350 if (!sink)
351 return 0;
352 kmp_int32 npredecessors = 0;
353#if OMPX_TASKGRAPH
354 kmp_tdg_status tdg_status = KMP_TDG_NONE;
355 kmp_taskdata_t *td = KMP_TASK_TO_TASKDATA(task);
356 if (task) {
357 if (td->is_taskgraph)
358 tdg_status = KMP_TASK_TO_TASKDATA(task)->tdg->tdg_status;
359 if (__kmp_tdg_is_recording(tdg_status) && sink->dn.task)
360 __kmp_track_dependence(gtid, sink, source, task);
361 }
362#endif
363 if (sink->dn.task) {
364 // synchronously add source to sink' list of successors
365 KMP_ACQUIRE_DEPNODE(gtid, sink);
366 if (sink->dn.task) {
367 if (!sink->dn.successors || sink->dn.successors->node != source) {
368#if OMPX_TASKGRAPH
369 if (!(__kmp_tdg_is_recording(tdg_status)) && task)
370#endif
371 __kmp_track_dependence(gtid, sink, source, task);
372 sink->dn.successors = __kmp_add_node(thread, sink->dn.successors, source);
373 KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to "
374 "%p\n",
375 gtid, KMP_TASK_TO_TASKDATA(sink->dn.task),
376 KMP_TASK_TO_TASKDATA(task)));
377#if OMPX_TASKGRAPH
378 if (__kmp_tdg_is_recording(tdg_status)) {
379 kmp_taskdata_t *tdd = KMP_TASK_TO_TASKDATA(sink->dn.task);
380 if (tdd->is_taskgraph) {
381 if (tdd->td_flags.onced)
382 // decrement npredecessors if sink->dn.task belongs to a taskgraph
383 // and
384 // 1) the task is reset to its initial state (by kmp_free_task) or
385 // 2) the task is complete but not yet reset
386 npredecessors--;
387 }
388 }
389#endif
390 npredecessors++;
391 }
392 }
393 KMP_RELEASE_DEPNODE(gtid, sink);
394 }
395 return npredecessors;
396}
397
398static inline kmp_int32
399__kmp_process_dep_all(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t *h,
400 bool dep_barrier, kmp_task_t *task) {
401 KA_TRACE(30, ("__kmp_process_dep_all: T#%d processing dep_all, "
402 "dep_barrier = %d\n",
403 gtid, dep_barrier));
404 kmp_info_t *thread = __kmp_threads[gtid];
405 kmp_int32 npredecessors = 0;
406
407 // process previous omp_all_memory node if any
408 npredecessors +=
409 __kmp_depnode_link_successor(gtid, thread, task, node, h->last_all);
410 __kmp_node_deref(thread, h->last_all);
411 if (!dep_barrier) {
412 h->last_all = __kmp_node_ref(node);
413 } else {
414 // if this is a sync point in the serial sequence, then the previous
415 // outputs are guaranteed to be completed after the execution of this
416 // task so the previous output nodes can be cleared.
417 h->last_all = NULL;
418 }
419
420 // process all regular dependences
421 for (size_t i = 0; i < h->size; i++) {
422 kmp_dephash_entry_t *info = h->buckets[i];
423 if (!info) // skip empty slots in dephash
424 continue;
425 for (; info; info = info->next_in_bucket) {
426 // for each entry the omp_all_memory works as OUT dependence
427 kmp_depnode_t *last_out = info->last_out;
428 kmp_depnode_list_t *last_set = info->last_set;
429 kmp_depnode_list_t *prev_set = info->prev_set;
430 if (last_set) {
431 npredecessors +=
432 __kmp_depnode_link_successor(gtid, thread, task, node, last_set);
433 __kmp_depnode_list_free(thread, last_set);
434 __kmp_depnode_list_free(thread, prev_set);
435 info->last_set = NULL;
436 info->prev_set = NULL;
437 info->last_flag = 0; // no sets in this dephash entry
438 } else {
439 npredecessors +=
440 __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
441 }
442 __kmp_node_deref(thread, last_out);
443 if (!dep_barrier) {
444 info->last_out = __kmp_node_ref(node);
445 } else {
446 info->last_out = NULL;
447 }
448 }
449 }
450 KA_TRACE(30, ("__kmp_process_dep_all: T#%d found %d predecessors\n", gtid,
451 npredecessors));
452 return npredecessors;
453}
454
455template <bool filter>
456static inline kmp_int32
457__kmp_process_deps(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t **hash,
458 bool dep_barrier, kmp_int32 ndeps,
459 kmp_depend_info_t *dep_list, kmp_task_t *task) {
460 KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d processing %d dependences : "
461 "dep_barrier = %d\n",
462 filter, gtid, ndeps, dep_barrier));
463
464 kmp_info_t *thread = __kmp_threads[gtid];
465 kmp_int32 npredecessors = 0;
466 for (kmp_int32 i = 0; i < ndeps; i++) {
467 const kmp_depend_info_t *dep = &dep_list[i];
468
469 if (filter && dep->base_addr == 0)
470 continue; // skip filtered entries
471
472 kmp_dephash_entry_t *info =
473 __kmp_dephash_find(thread, hash, dep->base_addr);
474 kmp_depnode_t *last_out = info->last_out;
475 kmp_depnode_list_t *last_set = info->last_set;
476 kmp_depnode_list_t *prev_set = info->prev_set;
477
478 if (dep->flags.out) { // out or inout --> clean lists if any
479 if (last_set) {
480 npredecessors +=
481 __kmp_depnode_link_successor(gtid, thread, task, node, last_set);
482 __kmp_depnode_list_free(thread, last_set);
483 __kmp_depnode_list_free(thread, prev_set);
484 info->last_set = NULL;
485 info->prev_set = NULL;
486 info->last_flag = 0; // no sets in this dephash entry
487 } else {
488 npredecessors +=
489 __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
490 }
491 __kmp_node_deref(thread, last_out);
492 if (!dep_barrier) {
493 info->last_out = __kmp_node_ref(node);
494 } else {
495 // if this is a sync point in the serial sequence, then the previous
496 // outputs are guaranteed to be completed after the execution of this
497 // task so the previous output nodes can be cleared.
498 info->last_out = NULL;
499 }
500 } else { // either IN or MTX or SET
501 if (info->last_flag == 0 || info->last_flag == dep->flag) {
502 // last_set either didn't exist or of same dep kind
503 // link node as successor of the last_out if any
504 npredecessors +=
505 __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
506 // link node as successor of all nodes in the prev_set if any
507 npredecessors +=
508 __kmp_depnode_link_successor(gtid, thread, task, node, prev_set);
509 if (dep_barrier) {
510 // clean last_out and prev_set if any; don't touch last_set
511 __kmp_node_deref(thread, last_out);
512 info->last_out = NULL;
513 __kmp_depnode_list_free(thread, prev_set);
514 info->prev_set = NULL;
515 }
516 } else { // last_set is of different dep kind, make it prev_set
517 // link node as successor of all nodes in the last_set
518 npredecessors +=
519 __kmp_depnode_link_successor(gtid, thread, task, node, last_set);
520 // clean last_out if any
521 __kmp_node_deref(thread, last_out);
522 info->last_out = NULL;
523 // clean prev_set if any
524 __kmp_depnode_list_free(thread, prev_set);
525 if (!dep_barrier) {
526 // move last_set to prev_set, new last_set will be allocated
527 info->prev_set = last_set;
528 } else {
529 info->prev_set = NULL;
530 info->last_flag = 0;
531 }
532 info->last_set = NULL;
533 }
534 // for dep_barrier last_flag value should remain:
535 // 0 if last_set is empty, unchanged otherwise
536 if (!dep_barrier) {
537 info->last_flag = dep->flag; // store dep kind of the last_set
538 info->last_set = __kmp_add_node(thread, info->last_set, node);
539 }
540 // check if we are processing MTX dependency
541 if (dep->flag == KMP_DEP_MTX) {
542 if (info->mtx_lock == NULL) {
543 info->mtx_lock = (kmp_lock_t *)__kmp_allocate(sizeof(kmp_lock_t));
544 __kmp_init_lock(info->mtx_lock);
545 }
546 KMP_DEBUG_ASSERT(node->dn.mtx_num_locks < MAX_MTX_DEPS);
547 kmp_int32 m;
548 // Save lock in node's array
549 for (m = 0; m < MAX_MTX_DEPS; ++m) {
550 // sort pointers in decreasing order to avoid potential livelock
551 if (node->dn.mtx_locks[m] < info->mtx_lock) {
552 KMP_DEBUG_ASSERT(!node->dn.mtx_locks[node->dn.mtx_num_locks]);
553 for (int n = node->dn.mtx_num_locks; n > m; --n) {
554 // shift right all lesser non-NULL pointers
555 KMP_DEBUG_ASSERT(node->dn.mtx_locks[n - 1] != NULL);
556 node->dn.mtx_locks[n] = node->dn.mtx_locks[n - 1];
557 }
558 node->dn.mtx_locks[m] = info->mtx_lock;
559 break;
560 }
561 }
562 KMP_DEBUG_ASSERT(m < MAX_MTX_DEPS); // must break from loop
563 node->dn.mtx_num_locks++;
564 }
565 }
566 }
567 KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d found %d predecessors\n", filter,
568 gtid, npredecessors));
569 return npredecessors;
570}
571
572#define NO_DEP_BARRIER (false)
573#define DEP_BARRIER (true)
574
575// returns true if the task has any outstanding dependence
576static bool __kmp_check_deps(kmp_int32 gtid, kmp_depnode_t *node,
577 kmp_task_t *task, kmp_dephash_t **hash,
578 bool dep_barrier, kmp_int32 ndeps,
579 kmp_depend_info_t *dep_list,
580 kmp_int32 ndeps_noalias,
581 kmp_depend_info_t *noalias_dep_list) {
582 int i, n_mtxs = 0, dep_all = 0;
583#if KMP_DEBUG
584 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
585#endif
586 KA_TRACE(20, ("__kmp_check_deps: T#%d checking dependences for task %p : %d "
587 "possibly aliased dependences, %d non-aliased dependences : "
588 "dep_barrier=%d .\n",
589 gtid, taskdata, ndeps, ndeps_noalias, dep_barrier));
590
591 // Filter deps in dep_list
592 // TODO: Different algorithm for large dep_list ( > 10 ? )
593 for (i = 0; i < ndeps; i++) {
594 if (dep_list[i].base_addr != 0 &&
595 dep_list[i].base_addr != (kmp_intptr_t)KMP_SIZE_T_MAX) {
596 KMP_DEBUG_ASSERT(
597 dep_list[i].flag == KMP_DEP_IN || dep_list[i].flag == KMP_DEP_OUT ||
598 dep_list[i].flag == KMP_DEP_INOUT ||
599 dep_list[i].flag == KMP_DEP_MTX || dep_list[i].flag == KMP_DEP_SET);
600 for (int j = i + 1; j < ndeps; j++) {
601 if (dep_list[i].base_addr == dep_list[j].base_addr) {
602 if (dep_list[i].flag != dep_list[j].flag) {
603 // two different dependences on same address work identical to OUT
604 dep_list[i].flag = KMP_DEP_OUT;
605 }
606 dep_list[j].base_addr = 0; // Mark j element as void
607 }
608 }
609 if (dep_list[i].flag == KMP_DEP_MTX) {
610 // limit number of mtx deps to MAX_MTX_DEPS per node
611 if (n_mtxs < MAX_MTX_DEPS && task != NULL) {
612 ++n_mtxs;
613 } else {
614 dep_list[i].flag = KMP_DEP_OUT; // downgrade mutexinoutset to inout
615 }
616 }
617 } else if (dep_list[i].flag == KMP_DEP_ALL ||
618 dep_list[i].base_addr == (kmp_intptr_t)KMP_SIZE_T_MAX) {
619 // omp_all_memory dependence can be marked by compiler by either
620 // (addr=0 && flag=0x80) (flag KMP_DEP_ALL), or (addr=-1).
621 // omp_all_memory overrides all other dependences if any
622 dep_all = 1;
623 break;
624 }
625 }
626
627 // doesn't need to be atomic as no other thread is going to be accessing this
628 // node just yet.
629 // npredecessors is set -1 to ensure that none of the releasing tasks queues
630 // this task before we have finished processing all the dependences
631 node->dn.npredecessors = -1;
632
633 // used to pack all npredecessors additions into a single atomic operation at
634 // the end
635 int npredecessors;
636
637 if (!dep_all) { // regular dependences
638 npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier,
639 ndeps, dep_list, task);
640 npredecessors += __kmp_process_deps<false>(
641 gtid, node, hash, dep_barrier, ndeps_noalias, noalias_dep_list, task);
642 } else { // omp_all_memory dependence
643 npredecessors = __kmp_process_dep_all(gtid, node, *hash, dep_barrier, task);
644 }
645
646 node->dn.task = task;
647 KMP_MB();
648
649 // Account for our initial fake value
650 npredecessors++;
651
652 // Update predecessors and obtain current value to check if there are still
653 // any outstanding dependences (some tasks may have finished while we
654 // processed the dependences)
655 npredecessors =
656 node->dn.npredecessors.fetch_add(npredecessors) + npredecessors;
657
658 KA_TRACE(20, ("__kmp_check_deps: T#%d found %d predecessors for task %p \n",
659 gtid, npredecessors, taskdata));
660
661 // beyond this point the task could be queued (and executed) by a releasing
662 // task...
663 return npredecessors > 0 ? true : false;
664}
665
682kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid,
683 kmp_task_t *new_task, kmp_int32 ndeps,
684 kmp_depend_info_t *dep_list,
685 kmp_int32 ndeps_noalias,
686 kmp_depend_info_t *noalias_dep_list) {
687
688 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
689 KA_TRACE(10, ("__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n", gtid,
690 loc_ref, new_taskdata));
691 __kmp_assert_valid_gtid(gtid);
692 kmp_info_t *thread = __kmp_threads[gtid];
693 kmp_taskdata_t *current_task = thread->th.th_current_task;
694
695#if OMPX_TASKGRAPH
696 // record TDG with deps
697 if (new_taskdata->is_taskgraph &&
698 __kmp_tdg_is_recording(new_taskdata->tdg->tdg_status)) {
699 kmp_tdg_info_t *tdg = new_taskdata->tdg;
700 // extend record_map if needed
701 if (new_taskdata->td_task_id >= tdg->map_size) {
702 __kmp_acquire_bootstrap_lock(&tdg->graph_lock);
703 if (new_taskdata->td_task_id >= tdg->map_size) {
704 kmp_uint old_size = tdg->map_size;
705 kmp_uint new_size = old_size * 2;
706 kmp_node_info_t *old_record = tdg->record_map;
707 kmp_node_info_t *new_record = (kmp_node_info_t *)__kmp_allocate(
708 new_size * sizeof(kmp_node_info_t));
709 KMP_MEMCPY(new_record, tdg->record_map,
710 old_size * sizeof(kmp_node_info_t));
711 tdg->record_map = new_record;
712
713 __kmp_free(old_record);
714
715 for (kmp_int i = old_size; i < new_size; i++) {
716 kmp_int32 *successorsList = (kmp_int32 *)__kmp_allocate(
717 __kmp_successors_size * sizeof(kmp_int32));
718 new_record[i].task = nullptr;
719 new_record[i].successors = successorsList;
720 new_record[i].nsuccessors = 0;
721 new_record[i].npredecessors = 0;
722 new_record[i].successors_size = __kmp_successors_size;
723 KMP_ATOMIC_ST_REL(&new_record[i].npredecessors_counter, 0);
724 }
725 // update the size at the end, so that we avoid other
726 // threads use old_record while map_size is already updated
727 tdg->map_size = new_size;
728 }
729 __kmp_release_bootstrap_lock(&tdg->graph_lock);
730 }
731 tdg->record_map[new_taskdata->td_tdg_task_id].task = new_task;
732 tdg->record_map[new_taskdata->td_tdg_task_id].parent_task =
733 new_taskdata->td_parent;
734 KMP_ATOMIC_INC(&tdg->num_tasks);
735 }
736#endif
737#if OMPT_SUPPORT
738 if (ompt_enabled.enabled) {
739 if (!current_task->ompt_task_info.frame.enter_frame.ptr)
740 current_task->ompt_task_info.frame.enter_frame.ptr =
741 OMPT_GET_FRAME_ADDRESS(0);
742 if (ompt_enabled.ompt_callback_task_create) {
743 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
744 &(current_task->ompt_task_info.task_data),
745 &(current_task->ompt_task_info.frame),
746 &(new_taskdata->ompt_task_info.task_data),
747 TASK_TYPE_DETAILS_FORMAT(new_taskdata), 1,
748 OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid));
749 }
750
751 new_taskdata->ompt_task_info.frame.enter_frame.ptr =
752 OMPT_GET_FRAME_ADDRESS(0);
753 }
754
755#if OMPT_OPTIONAL
756 /* OMPT grab all dependences if requested by the tool */
757 if (ndeps + ndeps_noalias > 0 && ompt_enabled.ompt_callback_dependences) {
758 kmp_int32 i;
759
760 int ompt_ndeps = ndeps + ndeps_noalias;
761 ompt_dependence_t *ompt_deps = (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC(
762 thread, (ndeps + ndeps_noalias) * sizeof(ompt_dependence_t));
763
764 KMP_ASSERT(ompt_deps != NULL);
765
766 for (i = 0; i < ndeps; i++) {
767 ompt_deps[i].variable.ptr = (void *)dep_list[i].base_addr;
768 if (dep_list[i].base_addr == (kmp_intptr_t)KMP_SIZE_T_MAX)
769 ompt_deps[i].dependence_type = ompt_dependence_type_out_all_memory;
770 else if (dep_list[i].flags.in && dep_list[i].flags.out)
771 ompt_deps[i].dependence_type = ompt_dependence_type_inout;
772 else if (dep_list[i].flags.out)
773 ompt_deps[i].dependence_type = ompt_dependence_type_out;
774 else if (dep_list[i].flags.in)
775 ompt_deps[i].dependence_type = ompt_dependence_type_in;
776 else if (dep_list[i].flags.mtx)
777 ompt_deps[i].dependence_type = ompt_dependence_type_mutexinoutset;
778 else if (dep_list[i].flags.set)
779 ompt_deps[i].dependence_type = ompt_dependence_type_inoutset;
780 else if (dep_list[i].flags.all)
781 ompt_deps[i].dependence_type = ompt_dependence_type_out_all_memory;
782 }
783 for (i = 0; i < ndeps_noalias; i++) {
784 ompt_deps[ndeps + i].variable.ptr = (void *)noalias_dep_list[i].base_addr;
785 if (noalias_dep_list[i].base_addr == (kmp_intptr_t)KMP_SIZE_T_MAX)
786 ompt_deps[ndeps + i].dependence_type =
787 ompt_dependence_type_out_all_memory;
788 else if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
789 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inout;
790 else if (noalias_dep_list[i].flags.out)
791 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_out;
792 else if (noalias_dep_list[i].flags.in)
793 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_in;
794 else if (noalias_dep_list[i].flags.mtx)
795 ompt_deps[ndeps + i].dependence_type =
796 ompt_dependence_type_mutexinoutset;
797 else if (noalias_dep_list[i].flags.set)
798 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inoutset;
799 else if (noalias_dep_list[i].flags.all)
800 ompt_deps[ndeps + i].dependence_type =
801 ompt_dependence_type_out_all_memory;
802 }
803 ompt_callbacks.ompt_callback(ompt_callback_dependences)(
804 &(new_taskdata->ompt_task_info.task_data), ompt_deps, ompt_ndeps);
805 /* We can now free the allocated memory for the dependences */
806 /* For OMPD we might want to delay the free until end of this function */
807 KMP_OMPT_DEPS_FREE(thread, ompt_deps);
808 }
809#endif /* OMPT_OPTIONAL */
810#endif /* OMPT_SUPPORT */
811
812 bool serial = current_task->td_flags.team_serial ||
813 current_task->td_flags.tasking_ser ||
814 current_task->td_flags.final;
815 kmp_task_team_t *task_team = thread->th.th_task_team;
816 serial = serial &&
817 !(task_team && (task_team->tt.tt_found_proxy_tasks ||
818 task_team->tt.tt_hidden_helper_task_encountered));
819
820 if (!serial && (ndeps > 0 || ndeps_noalias > 0)) {
821 /* if no dependences have been tracked yet, create the dependence hash */
822 if (current_task->td_dephash == NULL)
823 current_task->td_dephash = __kmp_dephash_create(thread, current_task);
824
825#if USE_FAST_MEMORY
826 kmp_depnode_t *node =
827 (kmp_depnode_t *)__kmp_fast_allocate(thread, sizeof(kmp_depnode_t));
828#else
829 kmp_depnode_t *node =
830 (kmp_depnode_t *)__kmp_thread_malloc(thread, sizeof(kmp_depnode_t));
831#endif
832
833 __kmp_init_node(node, /*on_stack=*/false);
834 new_taskdata->td_depnode = node;
835
836 if (__kmp_check_deps(gtid, node, new_task, &current_task->td_dephash,
837 NO_DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
838 noalias_dep_list)) {
839 KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had blocking "
840 "dependences: "
841 "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
842 gtid, loc_ref, new_taskdata));
843#if OMPT_SUPPORT
844 if (ompt_enabled.enabled) {
845 current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
846 }
847#endif
848 return TASK_CURRENT_NOT_QUEUED;
849 }
850 } else {
851 KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d ignored dependences "
852 "for task (serialized) loc=%p task=%p\n",
853 gtid, loc_ref, new_taskdata));
854 }
855
856 KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had no blocking "
857 "dependences : "
858 "loc=%p task=%p, transferring to __kmp_omp_task\n",
859 gtid, loc_ref, new_taskdata));
860
861 kmp_int32 ret = __kmp_omp_task(gtid, new_task, true);
862#if OMPT_SUPPORT
863 if (ompt_enabled.enabled) {
864 current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
865 }
866#endif
867 return ret;
868}
869
870#if OMPT_SUPPORT
871void __ompt_taskwait_dep_finish(kmp_taskdata_t *current_task,
872 ompt_data_t *taskwait_task_data) {
873 if (ompt_enabled.ompt_callback_task_schedule) {
874 ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
875 taskwait_task_data, ompt_taskwait_complete, NULL);
876 }
877 current_task->ompt_task_info.frame.enter_frame.ptr = NULL;
878 *taskwait_task_data = ompt_data_none;
879}
880#endif /* OMPT_SUPPORT */
881
893void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps,
894 kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
895 kmp_depend_info_t *noalias_dep_list) {
896 __kmpc_omp_taskwait_deps_51(loc_ref, gtid, ndeps, dep_list, ndeps_noalias,
897 noalias_dep_list, false);
898}
899
900/* __kmpc_omp_taskwait_deps_51 : Function for OpenMP 5.1 nowait clause.
901 Placeholder for taskwait with nowait clause.
902 Earlier code of __kmpc_omp_wait_deps() is now
903 in this function.
904*/
905void __kmpc_omp_taskwait_deps_51(ident_t *loc_ref, kmp_int32 gtid,
906 kmp_int32 ndeps, kmp_depend_info_t *dep_list,
907 kmp_int32 ndeps_noalias,
908 kmp_depend_info_t *noalias_dep_list,
909 kmp_int32 has_no_wait) {
910 KA_TRACE(10, ("__kmpc_omp_taskwait_deps(enter): T#%d loc=%p nowait#%d\n",
911 gtid, loc_ref, has_no_wait));
912 if (ndeps == 0 && ndeps_noalias == 0) {
913 KA_TRACE(10, ("__kmpc_omp_taskwait_deps(exit): T#%d has no dependences to "
914 "wait upon : loc=%p\n",
915 gtid, loc_ref));
916 return;
917 }
918 __kmp_assert_valid_gtid(gtid);
919 kmp_info_t *thread = __kmp_threads[gtid];
920 kmp_taskdata_t *current_task = thread->th.th_current_task;
921
922#if OMPT_SUPPORT
923 // this function represents a taskwait construct with depend clause
924 // We signal 4 events:
925 // - creation of the taskwait task
926 // - dependences of the taskwait task
927 // - schedule and finish of the taskwait task
928 ompt_data_t *taskwait_task_data = &thread->th.ompt_thread_info.task_data;
929 KMP_ASSERT(taskwait_task_data->ptr == NULL);
930 if (ompt_enabled.enabled) {
931 if (!current_task->ompt_task_info.frame.enter_frame.ptr)
932 current_task->ompt_task_info.frame.enter_frame.ptr =
933 OMPT_GET_FRAME_ADDRESS(0);
934 if (ompt_enabled.ompt_callback_task_create) {
935 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
936 &(current_task->ompt_task_info.task_data),
937 &(current_task->ompt_task_info.frame), taskwait_task_data,
938 ompt_task_taskwait | ompt_task_undeferred | ompt_task_mergeable, 1,
939 OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid));
940 }
941 }
942
943#if OMPT_OPTIONAL
944 /* OMPT grab all dependences if requested by the tool */
945 if (ndeps + ndeps_noalias > 0 && ompt_enabled.ompt_callback_dependences) {
946 kmp_int32 i;
947
948 int ompt_ndeps = ndeps + ndeps_noalias;
949 ompt_dependence_t *ompt_deps = (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC(
950 thread, (ndeps + ndeps_noalias) * sizeof(ompt_dependence_t));
951
952 KMP_ASSERT(ompt_deps != NULL);
953
954 for (i = 0; i < ndeps; i++) {
955 ompt_deps[i].variable.ptr = (void *)dep_list[i].base_addr;
956 if (dep_list[i].flags.in && dep_list[i].flags.out)
957 ompt_deps[i].dependence_type = ompt_dependence_type_inout;
958 else if (dep_list[i].flags.out)
959 ompt_deps[i].dependence_type = ompt_dependence_type_out;
960 else if (dep_list[i].flags.in)
961 ompt_deps[i].dependence_type = ompt_dependence_type_in;
962 else if (dep_list[i].flags.mtx)
963 ompt_deps[ndeps + i].dependence_type =
964 ompt_dependence_type_mutexinoutset;
965 else if (dep_list[i].flags.set)
966 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inoutset;
967 }
968 for (i = 0; i < ndeps_noalias; i++) {
969 ompt_deps[ndeps + i].variable.ptr = (void *)noalias_dep_list[i].base_addr;
970 if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
971 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inout;
972 else if (noalias_dep_list[i].flags.out)
973 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_out;
974 else if (noalias_dep_list[i].flags.in)
975 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_in;
976 else if (noalias_dep_list[i].flags.mtx)
977 ompt_deps[ndeps + i].dependence_type =
978 ompt_dependence_type_mutexinoutset;
979 else if (noalias_dep_list[i].flags.set)
980 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inoutset;
981 }
982 ompt_callbacks.ompt_callback(ompt_callback_dependences)(
983 taskwait_task_data, ompt_deps, ompt_ndeps);
984 /* We can now free the allocated memory for the dependences */
985 /* For OMPD we might want to delay the free until end of this function */
986 KMP_OMPT_DEPS_FREE(thread, ompt_deps);
987 ompt_deps = NULL;
988 }
989#endif /* OMPT_OPTIONAL */
990#endif /* OMPT_SUPPORT */
991
992 // We can return immediately as:
993 // - dependences are not computed in serial teams (except with proxy tasks)
994 // - if the dephash is not yet created it means we have nothing to wait for
995 bool ignore = current_task->td_flags.team_serial ||
996 current_task->td_flags.tasking_ser ||
997 current_task->td_flags.final;
998 ignore =
999 ignore && thread->th.th_task_team != NULL &&
1000 thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE &&
1001 thread->th.th_task_team->tt.tt_hidden_helper_task_encountered == FALSE;
1002 ignore = ignore || current_task->td_dephash == NULL;
1003
1004 if (ignore) {
1005 KA_TRACE(10, ("__kmpc_omp_taskwait_deps(exit): T#%d has no blocking "
1006 "dependences : loc=%p\n",
1007 gtid, loc_ref));
1008#if OMPT_SUPPORT
1009 __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
1010#endif /* OMPT_SUPPORT */
1011 return;
1012 }
1013
1014 kmp_depnode_t node = {0};
1015 __kmp_init_node(&node, /*on_stack=*/true);
1016
1017 if (!__kmp_check_deps(gtid, &node, NULL, &current_task->td_dephash,
1018 DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
1019 noalias_dep_list)) {
1020 KA_TRACE(10, ("__kmpc_omp_taskwait_deps(exit): T#%d has no blocking "
1021 "dependences : loc=%p\n",
1022 gtid, loc_ref));
1023#if OMPT_SUPPORT
1024 __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
1025#endif /* OMPT_SUPPORT */
1026
1027 // There may still be references to this node here, due to task stealing.
1028 // Wait for them to be released.
1029 kmp_int32 nrefs;
1030 while ((nrefs = node.dn.nrefs) > 3) {
1031 KMP_DEBUG_ASSERT((nrefs & 1) == 1);
1032 KMP_YIELD(TRUE);
1033 }
1034 KMP_DEBUG_ASSERT(nrefs == 3);
1035
1036 return;
1037 }
1038
1039 int thread_finished = FALSE;
1040 kmp_flag_32<false, false> flag(
1041 (std::atomic<kmp_uint32> *)&node.dn.npredecessors, 0U);
1042 while (node.dn.npredecessors > 0) {
1043 flag.execute_tasks(thread, gtid, FALSE,
1044 &thread_finished USE_ITT_BUILD_ARG(NULL),
1045 __kmp_task_stealing_constraint);
1046 }
1047
1048 // Wait until the last __kmp_release_deps is finished before we free the
1049 // current stack frame holding the "node" variable; once its nrefs count
1050 // reaches 3 (meaning 1, since bit zero of the refcount indicates a stack
1051 // rather than a heap address), we're sure nobody else can try to reference
1052 // it again.
1053 kmp_int32 nrefs;
1054 while ((nrefs = node.dn.nrefs) > 3) {
1055 KMP_DEBUG_ASSERT((nrefs & 1) == 1);
1056 KMP_YIELD(TRUE);
1057 }
1058 KMP_DEBUG_ASSERT(nrefs == 3);
1059
1060#if OMPT_SUPPORT
1061 __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
1062#endif /* OMPT_SUPPORT */
1063 KA_TRACE(10, ("__kmpc_omp_taskwait_deps(exit): T#%d finished waiting : loc=%p\
1064 \n",
1065 gtid, loc_ref));
1066}
kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
Definition kmp.h:247