2 Copyright (c) 2005-2019 Intel Corporation
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
8 http://www.apache.org/licenses/LICENSE-2.0
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
17 #ifndef __TBB_task_group_H
18 #define __TBB_task_group_H
20 #define __TBB_task_group_H_include_area
21 #include "internal/_warning_suppress_enable_notice.h"
24 #include "tbb_exception.h"
25 #include "internal/_template_helpers.h"
26 #if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION
27 #include "task_arena.h"
30 #if __TBB_TASK_GROUP_CONTEXT
35 template<typename F> class task_handle_task;
39 class structured_task_group;
40 #if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION
41 class isolated_task_group;
45 class task_handle : internal::no_assign {
46 template<typename _F> friend class internal::task_handle_task;
47 friend class task_group;
48 friend class structured_task_group;
49 #if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION
50 friend class isolated_task_group;
53 static const intptr_t scheduled = 0x1;
58 void mark_scheduled () {
59 // The check here is intentionally lax to avoid the impact of interlocked operation
60 if ( my_state & scheduled )
61 internal::throw_exception( internal::eid_invalid_multiple_scheduling );
62 my_state |= scheduled;
65 task_handle( const F& f ) : my_func(f), my_state(0) {}
66 #if __TBB_CPP11_RVALUE_REF_PRESENT
67 task_handle( F&& f ) : my_func( std::move(f)), my_state(0) {}
70 void operator() () const { my_func(); }
73 enum task_group_status {
82 class task_handle_task : public task {
83 task_handle<F>& my_handle;
84 task* execute() __TBB_override {
89 task_handle_task( task_handle<F>& h ) : my_handle(h) { h.mark_scheduled(); }
92 class task_group_base : internal::no_copy {
93 class ref_count_guard : internal::no_copy {
96 ref_count_guard(task& t) : my_task(t) {
97 my_task.increment_ref_count();
100 my_task.decrement_ref_count();
105 task_group_context my_context;
108 task_group_status internal_run_and_wait( F& f ) {
110 if ( !my_context.is_group_execution_cancelled() ) {
111 // We need to increase the reference count of the root task to notify waiters that
112 // this task group has some work in progress.
113 ref_count_guard guard(*my_root);
116 } __TBB_CATCH( ... ) {
117 my_context.register_pending_exception();
122 template<typename Task, typename F>
123 task* prepare_task( __TBB_FORWARDING_REF(F) f ) {
124 return new( task::allocate_additional_child_of(*my_root) ) Task( internal::forward<F>(f) );
128 task_group_base( uintptr_t traits = 0 )
129 : my_context(task_group_context::bound, task_group_context::default_traits | traits)
131 my_root = new( task::allocate_root(my_context) ) empty_task;
132 my_root->set_ref_count(1);
135 ~task_group_base() __TBB_NOEXCEPT(false) {
136 if( my_root->ref_count() > 1 ) {
137 #if __TBB_CPP17_UNCAUGHT_EXCEPTIONS_PRESENT
138 bool stack_unwinding_in_progress = std::uncaught_exceptions() > 0;
140 bool stack_unwinding_in_progress = std::uncaught_exception();
142 // Always attempt to do proper cleanup to avoid inevitable memory corruption
143 // in case of missing wait (for the sake of better testability & debuggability)
144 if ( !is_canceling() )
147 my_root->wait_for_all();
148 } __TBB_CATCH (...) {
149 task::destroy(*my_root);
152 task::destroy(*my_root);
153 if ( !stack_unwinding_in_progress )
154 internal::throw_exception( internal::eid_missing_wait );
157 task::destroy(*my_root);
162 void run( task_handle<F>& h ) {
163 task::spawn( *prepare_task< internal::task_handle_task<F> >(h) );
166 task_group_status wait() {
168 my_root->wait_for_all();
169 } __TBB_CATCH( ... ) {
173 if ( my_context.is_group_execution_cancelled() ) {
174 // TODO: the reset method is not thread-safe. Ensure the correct behavior.
181 bool is_canceling() {
182 return my_context.is_group_execution_cancelled();
186 my_context.cancel_group_execution();
188 }; // class task_group_base
190 } // namespace internal
192 class task_group : public internal::task_group_base {
194 task_group () : task_group_base( task_group_context::concurrent_wait ) {}
198 void run( task_handle<F>& h ) {
199 internal_run< internal::task_handle_task<F> >( h );
202 using task_group_base::run;
205 #if __TBB_CPP11_RVALUE_REF_PRESENT
208 task::spawn( *prepare_task< internal::function_task< typename internal::strip<F>::type > >(std::forward<F>(f)) );
212 void run(const F& f) {
213 task::spawn( *prepare_task< internal::function_task<F> >(f) );
218 task_group_status run_and_wait( const F& f ) {
219 return internal_run_and_wait<const F>( f );
222 // TODO: add task_handle rvalues support
224 task_group_status run_and_wait( task_handle<F>& h ) {
226 return internal_run_and_wait< task_handle<F> >( h );
228 }; // class task_group
230 class __TBB_DEPRECATED structured_task_group : public internal::task_group_base {
232 // TODO: add task_handle rvalues support
234 task_group_status run_and_wait ( task_handle<F>& h ) {
236 return internal_run_and_wait< task_handle<F> >( h );
239 task_group_status wait() {
240 task_group_status res = task_group_base::wait();
241 my_root->set_ref_count(1);
244 }; // class structured_task_group
246 #if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION
248 using interface7::internal::delegate_base;
249 using interface7::internal::isolate_within_arena;
251 class spawn_delegate : public delegate_base {
253 void operator()() const __TBB_override {
254 task::spawn(*task_to_spawn);
257 spawn_delegate(task* a_task) : task_to_spawn(a_task) {}
260 class wait_delegate : public delegate_base {
261 void operator()() const __TBB_override {
266 task_group_status& status;
268 wait_delegate(task_group& a_group, task_group_status& tgs)
269 : tg(a_group), status(tgs) {}
273 class run_wait_delegate : public wait_delegate {
275 void operator()() const __TBB_override {
276 status = tg.run_and_wait( func );
279 run_wait_delegate(task_group& a_group, F& a_func, task_group_status& tgs)
280 : wait_delegate(a_group, tgs), func(a_func) {}
282 } // namespace internal
284 class isolated_task_group : public task_group {
285 intptr_t this_isolation() {
286 return reinterpret_cast<intptr_t>(this);
289 isolated_task_group () : task_group() {}
291 #if __TBB_CPP11_RVALUE_REF_PRESENT
294 internal::spawn_delegate sd(
295 prepare_task< internal::function_task< typename internal::strip<F>::type > >(std::forward<F>(f))
297 internal::isolate_within_arena( sd, this_isolation() );
301 void run(const F& f) {
302 internal::spawn_delegate sd( prepare_task< internal::function_task<F> >(f) );
303 internal::isolate_within_arena( sd, this_isolation() );
308 task_group_status run_and_wait( const F& f ) {
309 task_group_status result = not_complete;
310 internal::run_wait_delegate< const F > rwd( *this, f, result );
311 internal::isolate_within_arena( rwd, this_isolation() );
312 __TBB_ASSERT( result!=not_complete, "premature exit from wait?" );
316 // TODO: add task_handle rvalues support
318 void run( task_handle<F>& h ) {
319 internal::spawn_delegate sd( prepare_task< internal::task_handle_task<F> >(h) );
320 internal::isolate_within_arena( sd, this_isolation() );
324 task_group_status run_and_wait ( task_handle<F>& h ) {
325 task_group_status result = not_complete;
326 internal::run_wait_delegate< task_handle<F> > rwd( *this, h, result );
327 internal::isolate_within_arena( rwd, this_isolation() );
328 __TBB_ASSERT( result!=not_complete, "premature exit from wait?" );
332 task_group_status wait() {
333 task_group_status result = not_complete;
334 internal::wait_delegate wd( *this, result );
335 internal::isolate_within_arena( wd, this_isolation() );
336 __TBB_ASSERT( result!=not_complete, "premature exit from wait?" );
339 }; // class isolated_task_group
340 #endif // TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION
343 bool is_current_task_group_canceling() {
344 return task::self().is_cancelled();
347 #if __TBB_CPP11_RVALUE_REF_PRESENT
349 task_handle< typename internal::strip<F>::type > make_task( F&& f ) {
350 return task_handle< typename internal::strip<F>::type >( std::forward<F>(f) );
354 task_handle<F> make_task( const F& f ) {
355 return task_handle<F>( f );
357 #endif /* __TBB_CPP11_RVALUE_REF_PRESENT */
361 #endif /* __TBB_TASK_GROUP_CONTEXT */
363 #include "internal/_warning_suppress_disable_notice.h"
364 #undef __TBB_task_group_H_include_area
366 #endif /* __TBB_task_group_H */