@@ -1656,7 +1656,8 @@ kmp_task_t *__kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1656
1656
(task_entry != (kmp_routine_entry_t )__kmp_taskloop_task)) {
1657
1657
taskdata->is_taskgraph = 1 ;
1658
1658
taskdata->tdg = __kmp_global_tdgs[__kmp_curr_tdg_idx];
1659
- taskdata->td_task_id = KMP_ATOMIC_INC (&__kmp_tdg_task_id);
1659
+ taskdata->td_task_id = KMP_GEN_TASK_ID ();
1660
+ taskdata->td_tdg_task_id = KMP_ATOMIC_INC (&__kmp_tdg_task_id);
1660
1661
}
1661
1662
#endif
1662
1663
KA_TRACE (20 , (" __kmp_task_alloc(exit): T#%d created task %p parent=%p\n " ,
@@ -2019,11 +2020,11 @@ kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
2019
2020
__kmp_tdg_is_recording (new_taskdata->tdg ->tdg_status )) {
2020
2021
kmp_tdg_info_t *tdg = new_taskdata->tdg ;
2021
2022
// extend the record_map if needed
2022
- if (new_taskdata->td_task_id >= new_taskdata->tdg ->map_size ) {
2023
+ if (new_taskdata->td_tdg_task_id >= new_taskdata->tdg ->map_size ) {
2023
2024
__kmp_acquire_bootstrap_lock (&tdg->graph_lock );
2024
2025
// map_size could have been updated by another thread if recursive
2025
2026
// taskloop
2026
- if (new_taskdata->td_task_id >= tdg->map_size ) {
2027
+ if (new_taskdata->td_tdg_task_id >= tdg->map_size ) {
2027
2028
kmp_uint old_size = tdg->map_size ;
2028
2029
kmp_uint new_size = old_size * 2 ;
2029
2030
kmp_node_info_t *old_record = tdg->record_map ;
@@ -2052,9 +2053,9 @@ kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
2052
2053
__kmp_release_bootstrap_lock (&tdg->graph_lock );
2053
2054
}
2054
2055
// record a task
2055
- if (tdg->record_map [new_taskdata->td_task_id ].task == nullptr ) {
2056
- tdg->record_map [new_taskdata->td_task_id ].task = new_task;
2057
- tdg->record_map [new_taskdata->td_task_id ].parent_task =
2056
+ if (tdg->record_map [new_taskdata->td_tdg_task_id ].task == nullptr ) {
2057
+ tdg->record_map [new_taskdata->td_tdg_task_id ].task = new_task;
2058
+ tdg->record_map [new_taskdata->td_tdg_task_id ].parent_task =
2058
2059
new_taskdata->td_parent ;
2059
2060
KMP_ATOMIC_INC (&tdg->num_tasks );
2060
2061
}
@@ -4681,14 +4682,11 @@ kmp_task_t *__kmp_task_dup_alloc(kmp_info_t *thread, kmp_task_t *task_src
4681
4682
4682
4683
// Initialize new task (only specific fields not affected by memcpy)
4683
4684
#if OMPX_TASKGRAPH
4684
- if (!taskdata->is_taskgraph || taskloop_recur)
4685
- taskdata->td_task_id = KMP_GEN_TASK_ID ();
4686
- else if (taskdata->is_taskgraph &&
4687
- __kmp_tdg_is_recording (taskdata_src->tdg ->tdg_status ))
4688
- taskdata->td_task_id = KMP_ATOMIC_INC (&__kmp_tdg_task_id);
4689
- #else
4690
- taskdata->td_task_id = KMP_GEN_TASK_ID ();
4685
+ if (taskdata->is_taskgraph && !taskloop_recur &&
4686
+ __kmp_tdg_is_recording (taskdata_src->tdg ->tdg_status ))
4687
+ taskdata->td_tdg_task_id = KMP_ATOMIC_INC (&__kmp_tdg_task_id);
4691
4688
#endif
4689
+ taskdata->td_task_id = KMP_GEN_TASK_ID ();
4692
4690
if (task->shareds != NULL ) { // need setup shareds pointer
4693
4691
shareds_offset = (char *)task_src->shareds - (char *)taskdata_src;
4694
4692
task->shareds = &((char *)taskdata)[shareds_offset];
0 commit comments