@@ -42,221 +42,6 @@ static kmp_tdg_info_t *__kmp_find_tdg(kmp_int32 tdg_id);
42
42
int __kmp_taskloop_task (int gtid, void *ptask);
43
43
#endif
44
44
45
- #ifdef BUILD_TIED_TASK_STACK
46
-
47
- // __kmp_trace_task_stack: print the tied tasks from the task stack in order
48
- // from top do bottom
49
- //
50
- // gtid: global thread identifier for thread containing stack
51
- // thread_data: thread data for task team thread containing stack
52
- // threshold: value above which the trace statement triggers
53
- // location: string identifying call site of this function (for trace)
54
- static void __kmp_trace_task_stack (kmp_int32 gtid,
55
- kmp_thread_data_t *thread_data,
56
- int threshold, char *location) {
57
- kmp_task_stack_t *task_stack = &thread_data->td .td_susp_tied_tasks ;
58
- kmp_taskdata_t **stack_top = task_stack->ts_top ;
59
- kmp_int32 entries = task_stack->ts_entries ;
60
- kmp_taskdata_t *tied_task;
61
-
62
- KA_TRACE (
63
- threshold,
64
- (" __kmp_trace_task_stack(start): location = %s, gtid = %d, entries = %d, "
65
- " first_block = %p, stack_top = %p \n " ,
66
- location, gtid, entries, task_stack->ts_first_block , stack_top));
67
-
68
- KMP_DEBUG_ASSERT (stack_top != NULL );
69
- KMP_DEBUG_ASSERT (entries > 0 );
70
-
71
- while (entries != 0 ) {
72
- KMP_DEBUG_ASSERT (stack_top != &task_stack->ts_first_block .sb_block [0 ]);
73
- // fix up ts_top if we need to pop from previous block
74
- if (entries & TASK_STACK_INDEX_MASK == 0 ) {
75
- kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(stack_top);
76
-
77
- stack_block = stack_block->sb_prev ;
78
- stack_top = &stack_block->sb_block [TASK_STACK_BLOCK_SIZE];
79
- }
80
-
81
- // finish bookkeeping
82
- stack_top--;
83
- entries--;
84
-
85
- tied_task = *stack_top;
86
-
87
- KMP_DEBUG_ASSERT (tied_task != NULL );
88
- KMP_DEBUG_ASSERT (tied_task->td_flags .tasktype == TASK_TIED);
89
-
90
- KA_TRACE (threshold,
91
- (" __kmp_trace_task_stack(%s): gtid=%d, entry=%d, "
92
- " stack_top=%p, tied_task=%p\n " ,
93
- location, gtid, entries, stack_top, tied_task));
94
- }
95
- KMP_DEBUG_ASSERT (stack_top == &task_stack->ts_first_block .sb_block [0 ]);
96
-
97
- KA_TRACE (threshold,
98
- (" __kmp_trace_task_stack(exit): location = %s, gtid = %d\n " ,
99
- location, gtid));
100
- }
101
-
102
- // __kmp_init_task_stack: initialize the task stack for the first time
103
- // after a thread_data structure is created.
104
- // It should not be necessary to do this again (assuming the stack works).
105
- //
106
- // gtid: global thread identifier of calling thread
107
- // thread_data: thread data for task team thread containing stack
108
- static void __kmp_init_task_stack (kmp_int32 gtid,
109
- kmp_thread_data_t *thread_data) {
110
- kmp_task_stack_t *task_stack = &thread_data->td .td_susp_tied_tasks ;
111
- kmp_stack_block_t *first_block;
112
-
113
- // set up the first block of the stack
114
- first_block = &task_stack->ts_first_block ;
115
- task_stack->ts_top = (kmp_taskdata_t **)first_block;
116
- memset ((void *)first_block, ' \0 ' ,
117
- TASK_STACK_BLOCK_SIZE * sizeof (kmp_taskdata_t *));
118
-
119
- // initialize the stack to be empty
120
- task_stack->ts_entries = TASK_STACK_EMPTY;
121
- first_block->sb_next = NULL ;
122
- first_block->sb_prev = NULL ;
123
- }
124
-
125
- // __kmp_free_task_stack: free the task stack when thread_data is destroyed.
126
- //
127
- // gtid: global thread identifier for calling thread
128
- // thread_data: thread info for thread containing stack
129
- static void __kmp_free_task_stack (kmp_int32 gtid,
130
- kmp_thread_data_t *thread_data) {
131
- kmp_task_stack_t *task_stack = &thread_data->td .td_susp_tied_tasks ;
132
- kmp_stack_block_t *stack_block = &task_stack->ts_first_block ;
133
-
134
- KMP_DEBUG_ASSERT (task_stack->ts_entries == TASK_STACK_EMPTY);
135
- // free from the second block of the stack
136
- while (stack_block != NULL ) {
137
- kmp_stack_block_t *next_block = (stack_block) ? stack_block->sb_next : NULL ;
138
-
139
- stack_block->sb_next = NULL ;
140
- stack_block->sb_prev = NULL ;
141
- if (stack_block != &task_stack->ts_first_block ) {
142
- __kmp_thread_free (thread,
143
- stack_block); // free the block, if not the first
144
- }
145
- stack_block = next_block;
146
- }
147
- // initialize the stack to be empty
148
- task_stack->ts_entries = 0 ;
149
- task_stack->ts_top = NULL ;
150
- }
151
-
152
- // __kmp_push_task_stack: Push the tied task onto the task stack.
153
- // Grow the stack if necessary by allocating another block.
154
- //
155
- // gtid: global thread identifier for calling thread
156
- // thread: thread info for thread containing stack
157
- // tied_task: the task to push on the stack
158
- static void __kmp_push_task_stack (kmp_int32 gtid, kmp_info_t *thread,
159
- kmp_taskdata_t *tied_task) {
160
- // GEH - need to consider what to do if tt_threads_data not allocated yet
161
- kmp_thread_data_t *thread_data =
162
- &thread->th .th_task_team ->tt .tt_threads_data [__kmp_tid_from_gtid (gtid)];
163
- kmp_task_stack_t *task_stack = &thread_data->td .td_susp_tied_tasks ;
164
-
165
- if (tied_task->td_flags .team_serial || tied_task->td_flags .tasking_ser ) {
166
- return ; // Don't push anything on stack if team or team tasks are serialized
167
- }
168
-
169
- KMP_DEBUG_ASSERT (tied_task->td_flags .tasktype == TASK_TIED);
170
- KMP_DEBUG_ASSERT (task_stack->ts_top != NULL );
171
-
172
- KA_TRACE (20 ,
173
- (" __kmp_push_task_stack(enter): GTID: %d; THREAD: %p; TASK: %p\n " ,
174
- gtid, thread, tied_task));
175
- // Store entry
176
- *(task_stack->ts_top ) = tied_task;
177
-
178
- // Do bookkeeping for next push
179
- task_stack->ts_top ++;
180
- task_stack->ts_entries ++;
181
-
182
- if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0 ) {
183
- // Find beginning of this task block
184
- kmp_stack_block_t *stack_block =
185
- (kmp_stack_block_t *)(task_stack->ts_top - TASK_STACK_BLOCK_SIZE);
186
-
187
- // Check if we already have a block
188
- if (stack_block->sb_next !=
189
- NULL ) { // reset ts_top to beginning of next block
190
- task_stack->ts_top = &stack_block->sb_next ->sb_block [0 ];
191
- } else { // Alloc new block and link it up
192
- kmp_stack_block_t *new_block = (kmp_stack_block_t *)__kmp_thread_calloc (
193
- thread, sizeof (kmp_stack_block_t ));
194
-
195
- task_stack->ts_top = &new_block->sb_block [0 ];
196
- stack_block->sb_next = new_block;
197
- new_block->sb_prev = stack_block;
198
- new_block->sb_next = NULL ;
199
-
200
- KA_TRACE (
201
- 30 ,
202
- (" __kmp_push_task_stack(): GTID: %d; TASK: %p; Alloc new block: %p\n " ,
203
- gtid, tied_task, new_block));
204
- }
205
- }
206
- KA_TRACE (20 , (" __kmp_push_task_stack(exit): GTID: %d; TASK: %p\n " , gtid,
207
- tied_task));
208
- }
209
-
210
- // __kmp_pop_task_stack: Pop the tied task from the task stack. Don't return
211
- // the task, just check to make sure it matches the ending task passed in.
212
- //
213
- // gtid: global thread identifier for the calling thread
214
- // thread: thread info structure containing stack
215
- // tied_task: the task popped off the stack
216
- // ending_task: the task that is ending (should match popped task)
217
- static void __kmp_pop_task_stack (kmp_int32 gtid, kmp_info_t *thread,
218
- kmp_taskdata_t *ending_task) {
219
- // GEH - need to consider what to do if tt_threads_data not allocated yet
220
- kmp_thread_data_t *thread_data =
221
- &thread->th .th_task_team ->tt_threads_data [__kmp_tid_from_gtid (gtid)];
222
- kmp_task_stack_t *task_stack = &thread_data->td .td_susp_tied_tasks ;
223
- kmp_taskdata_t *tied_task;
224
-
225
- if (ending_task->td_flags .team_serial || ending_task->td_flags .tasking_ser ) {
226
- // Don't pop anything from stack if team or team tasks are serialized
227
- return ;
228
- }
229
-
230
- KMP_DEBUG_ASSERT (task_stack->ts_top != NULL );
231
- KMP_DEBUG_ASSERT (task_stack->ts_entries > 0 );
232
-
233
- KA_TRACE (20 , (" __kmp_pop_task_stack(enter): GTID: %d; THREAD: %p\n " , gtid,
234
- thread));
235
-
236
- // fix up ts_top if we need to pop from previous block
237
- if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0 ) {
238
- kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(task_stack->ts_top );
239
-
240
- stack_block = stack_block->sb_prev ;
241
- task_stack->ts_top = &stack_block->sb_block [TASK_STACK_BLOCK_SIZE];
242
- }
243
-
244
- // finish bookkeeping
245
- task_stack->ts_top --;
246
- task_stack->ts_entries --;
247
-
248
- tied_task = *(task_stack->ts_top );
249
-
250
- KMP_DEBUG_ASSERT (tied_task != NULL );
251
- KMP_DEBUG_ASSERT (tied_task->td_flags .tasktype == TASK_TIED);
252
- KMP_DEBUG_ASSERT (tied_task == ending_task); // If we built the stack correctly
253
-
254
- KA_TRACE (20 , (" __kmp_pop_task_stack(exit): GTID: %d; TASK: %p\n " , gtid,
255
- tied_task));
256
- return ;
257
- }
258
- #endif /* BUILD_TIED_TASK_STACK */
259
-
260
45
// returns 1 if new task is allowed to execute, 0 otherwise
261
46
// checks Task Scheduling constraint (if requested) and
262
47
// mutexinoutset dependencies if any
@@ -683,13 +468,6 @@ static void __kmp_task_start(kmp_int32 gtid, kmp_task_t *task,
683
468
// KMP_DEBUG_ASSERT( current_task -> td_flags.executing == 1 );
684
469
current_task->td_flags .executing = 0 ;
685
470
686
- // Add task to stack if tied
687
- #ifdef BUILD_TIED_TASK_STACK
688
- if (taskdata->td_flags .tiedness == TASK_TIED) {
689
- __kmp_push_task_stack (gtid, thread, taskdata);
690
- }
691
- #endif /* BUILD_TIED_TASK_STACK */
692
-
693
471
// mark starting task as executing and as current task
694
472
thread->th .th_current_task = taskdata;
695
473
@@ -1041,13 +819,6 @@ static void __kmp_task_finish(kmp_int32 gtid, kmp_task_t *task,
1041
819
is_taskgraph = taskdata->is_taskgraph ;
1042
820
#endif
1043
821
1044
- // Pop task from stack if tied
1045
- #ifdef BUILD_TIED_TASK_STACK
1046
- if (taskdata->td_flags .tiedness == TASK_TIED) {
1047
- __kmp_pop_task_stack (gtid, thread, taskdata);
1048
- }
1049
- #endif /* BUILD_TIED_TASK_STACK */
1050
-
1051
822
if (UNLIKELY (taskdata->td_flags .tiedness == TASK_UNTIED)) {
1052
823
// untied task needs to check the counter so that the task structure is not
1053
824
// freed prematurely
@@ -3786,13 +3557,6 @@ static void __kmp_free_task_deque(kmp_thread_data_t *thread_data) {
3786
3557
thread_data->td .td_deque = NULL ;
3787
3558
__kmp_release_bootstrap_lock (&thread_data->td .td_deque_lock );
3788
3559
}
3789
-
3790
- #ifdef BUILD_TIED_TASK_STACK
3791
- // GEH: Figure out what to do here for td_susp_tied_tasks
3792
- if (thread_data->td .td_susp_tied_tasks .ts_entries != TASK_STACK_EMPTY) {
3793
- __kmp_free_task_stack (__kmp_thread_from_gtid (gtid), thread_data);
3794
- }
3795
- #endif // BUILD_TIED_TASK_STACK
3796
3560
}
3797
3561
3798
3562
// __kmp_realloc_task_threads_data:
@@ -3849,14 +3613,7 @@ static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
3849
3613
KMP_MEMCPY_S ((void *)new_data, nthreads * sizeof (kmp_thread_data_t ),
3850
3614
(void *)old_data, maxthreads * sizeof (kmp_thread_data_t ));
3851
3615
3852
- #ifdef BUILD_TIED_TASK_STACK
3853
- // GEH: Figure out if this is the right thing to do
3854
- for (i = maxthreads; i < nthreads; i++) {
3855
- kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3856
- __kmp_init_task_stack (__kmp_gtid_from_thread (thread), thread_data);
3857
- }
3858
- #endif // BUILD_TIED_TASK_STACK
3859
- // Install the new data and free the old data
3616
+ // Install the new data and free the old data
3860
3617
(*threads_data_p) = new_data;
3861
3618
__kmp_free (old_data);
3862
3619
} else {
@@ -3868,13 +3625,6 @@ static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
3868
3625
// kmp_reap_task_team( ).
3869
3626
*threads_data_p = (kmp_thread_data_t *)__kmp_allocate (
3870
3627
nthreads * sizeof (kmp_thread_data_t ));
3871
- #ifdef BUILD_TIED_TASK_STACK
3872
- // GEH: Figure out if this is the right thing to do
3873
- for (i = 0 ; i < nthreads; i++) {
3874
- kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3875
- __kmp_init_task_stack (__kmp_gtid_from_thread (thread), thread_data);
3876
- }
3877
- #endif // BUILD_TIED_TASK_STACK
3878
3628
}
3879
3629
task_team->tt .tt_max_threads = nthreads;
3880
3630
} else {
0 commit comments