2 Copyright (c) 2005-2019 Intel Corporation
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
8 http://www.apache.org/licenses/LICENSE-2.0
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
21 #include "scheduler.h"
25 #include "tbb/task_scheduler_init.h"
27 #include "dynamic_link.h"
32 //------------------------------------------------------------------------
34 //------------------------------------------------------------------------
36 #if __TBB_SURVIVE_THREAD_SWITCH
37 // Support for interoperability with Intel(R) Cilk(TM) Plus.
40 #define CILKLIB_NAME "cilkrts20.dll"
42 #define CILKLIB_NAME "libcilkrts.so"
45 //! Handler for interoperation with cilkrts library.
46 static __cilk_tbb_retcode (*watch_stack_handler)(struct __cilk_tbb_unwatch_thunk* u,
47 struct __cilk_tbb_stack_op_thunk o);
49 //! Table describing how to link the handlers.
50 static const dynamic_link_descriptor CilkLinkTable[] = {
51 DLD_NOWEAK(__cilkrts_watch_stack, watch_stack_handler)
54 static atomic<do_once_state> cilkrts_load_state;
56 bool initialize_cilk_interop() {
57 // Pinning can fail. This is a normal situation, and means that the current
58 // thread does not use cilkrts and consequently does not need interop.
59 return dynamic_link( CILKLIB_NAME, CilkLinkTable, 1, /*handle=*/0, DYNAMIC_LINK_GLOBAL );
61 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
64 tbb_server* make_private_server( tbb_client& client );
67 void governor::acquire_resources () {
69 int status = theTLS.create(auto_terminate);
71 int status = theTLS.create();
74 handle_perror(status, "TBB failed to initialize task scheduler TLS\n");
75 is_speculation_enabled = cpu_has_speculation();
76 is_rethrow_broken = gcc_rethrow_exception_broken();
79 void governor::release_resources () {
80 theRMLServerFactory.close();
81 destroy_process_mask();
83 if( __TBB_InitOnce::initialization_done() && theTLS.get() )
84 runtime_warning( "TBB is unloaded while tbb::task_scheduler_init object is alive?" );
86 int status = theTLS.destroy();
88 runtime_warning("failed to destroy task scheduler TLS: %s", strerror(status));
92 rml::tbb_server* governor::create_rml_server ( rml::tbb_client& client ) {
93 rml::tbb_server* server = NULL;
94 if( !UsePrivateRML ) {
95 ::rml::factory::status_type status = theRMLServerFactory.make_server( server, client );
96 if( status != ::rml::factory::st_success ) {
98 runtime_warning( "rml::tbb_factory::make_server failed with status %x, falling back on private rml", status );
102 __TBB_ASSERT( UsePrivateRML, NULL );
103 server = rml::make_private_server( client );
105 __TBB_ASSERT( server, "Failed to create RML server" );
110 uintptr_t governor::tls_value_of( generic_scheduler* s ) {
111 __TBB_ASSERT( (uintptr_t(s)&1) == 0, "Bad pointer to the scheduler" );
112 // LSB marks the scheduler initialized with arena
113 return uintptr_t(s) | uintptr_t((s && (s->my_arena || s->is_worker()))? 1 : 0);
116 void governor::assume_scheduler( generic_scheduler* s ) {
117 theTLS.set( tls_value_of(s) );
120 bool governor::is_set( generic_scheduler* s ) {
121 return theTLS.get() == tls_value_of(s);
124 void governor::sign_on(generic_scheduler* s) {
125 __TBB_ASSERT( is_set(NULL) && s, NULL );
126 assume_scheduler( s );
127 #if __TBB_SURVIVE_THREAD_SWITCH
128 if( watch_stack_handler ) {
129 __cilk_tbb_stack_op_thunk o;
130 o.routine = &stack_op_handler;
132 if( (*watch_stack_handler)(&s->my_cilk_unwatch_thunk, o) ) {
133 // Failed to register with cilkrts, make sure we are clean
134 s->my_cilk_unwatch_thunk.routine = NULL;
138 s->my_cilk_state = generic_scheduler::cs_running;
139 #endif /* TBB_USE_ASSERT */
141 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
142 __TBB_ASSERT( is_set(s), NULL );
145 void governor::sign_off(generic_scheduler* s) {
146 suppress_unused_warning(s);
147 __TBB_ASSERT( is_set(s), "attempt to unregister a wrong scheduler instance" );
148 assume_scheduler(NULL);
149 #if __TBB_SURVIVE_THREAD_SWITCH
150 __cilk_tbb_unwatch_thunk &ut = s->my_cilk_unwatch_thunk;
152 (*ut.routine)(ut.data);
153 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
156 void governor::one_time_init() {
157 if( !__TBB_InitOnce::initialization_done() )
158 DoOneTimeInitializations();
159 #if __TBB_SURVIVE_THREAD_SWITCH
160 atomic_do_once( &initialize_cilk_interop, cilkrts_load_state );
161 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
164 generic_scheduler* governor::init_scheduler_weak() {
166 __TBB_ASSERT( is_set(NULL), "TLS contains a scheduler?" );
167 generic_scheduler* s = generic_scheduler::create_master( NULL ); // without arena
168 s->my_auto_initialized = true;
172 generic_scheduler* governor::init_scheduler( int num_threads, stack_size_type stack_size, bool auto_init ) {
174 if ( uintptr_t v = theTLS.get() ) {
175 generic_scheduler* s = tls_scheduler_of( v );
176 if ( (v&1) == 0 ) { // TLS holds scheduler instance without arena
177 __TBB_ASSERT( s->my_ref_count == 1, "weakly initialized scheduler must have refcount equal to 1" );
178 __TBB_ASSERT( !s->my_arena, "weakly initialized scheduler must have no arena" );
179 __TBB_ASSERT( s->my_auto_initialized, "weakly initialized scheduler is supposed to be auto-initialized" );
180 s->attach_arena( market::create_arena( default_num_threads(), 1, 0 ), 0, /*is_master*/true );
181 __TBB_ASSERT( s->my_arena_index == 0, "Master thread must occupy the first slot in its arena" );
182 s->my_arena_slot->my_scheduler = s;
183 #if __TBB_TASK_GROUP_CONTEXT
184 s->my_arena->my_default_ctx = s->default_context(); // it also transfers implied ownership
186 // Mark the scheduler as fully initialized
187 assume_scheduler( s );
189 // Increment refcount only for explicit instances of task_scheduler_init.
190 if ( !auto_init ) s->my_ref_count += 1;
191 __TBB_ASSERT( s->my_arena, "scheduler is not initialized fully" );
194 // Create new scheduler instance with arena
195 if( num_threads == task_scheduler_init::automatic )
196 num_threads = default_num_threads();
197 arena *a = market::create_arena( num_threads, 1, stack_size );
198 generic_scheduler* s = generic_scheduler::create_master( a );
199 __TBB_ASSERT(s, "Somehow a local scheduler creation for a master thread failed");
200 __TBB_ASSERT( is_set(s), NULL );
201 s->my_auto_initialized = auto_init;
205 bool governor::terminate_scheduler( generic_scheduler* s, bool blocking ) {
207 __TBB_ASSERT( is_set(s), "Attempt to terminate non-local scheduler instance" );
208 if (0 == --(s->my_ref_count)) {
209 ok = s->cleanup_master( blocking );
210 __TBB_ASSERT( is_set(NULL), "cleanup_master has not cleared its TLS slot" );
215 void governor::auto_terminate(void* arg){
216 generic_scheduler* s = tls_scheduler_of( uintptr_t(arg) ); // arg is equivalent to theTLS.get()
217 if( s && s->my_auto_initialized ) {
218 if( !--(s->my_ref_count) ) {
219 // If the TLS slot is already cleared by OS or underlying concurrency
220 // runtime, restore its value.
223 s->cleanup_master( /*blocking_terminate=*/false );
224 __TBB_ASSERT( is_set(NULL), "cleanup_master has not cleared its TLS slot" );
229 void governor::print_version_info () {
231 PrintExtraVersionInfo( "RML", "private" );
233 PrintExtraVersionInfo( "RML", "shared" );
234 theRMLServerFactory.call_with_server_info( PrintRMLVersionInfo, (void*)"" );
236 #if __TBB_SURVIVE_THREAD_SWITCH
237 if( watch_stack_handler )
238 PrintExtraVersionInfo( "CILK", CILKLIB_NAME );
239 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
242 void governor::initialize_rml_factory () {
243 ::rml::factory::status_type res = theRMLServerFactory.open();
244 UsePrivateRML = res != ::rml::factory::st_success;
247 #if __TBB_SURVIVE_THREAD_SWITCH
248 __cilk_tbb_retcode governor::stack_op_handler( __cilk_tbb_stack_op op, void* data ) {
249 __TBB_ASSERT(data,NULL);
250 generic_scheduler* s = static_cast<generic_scheduler*>(data);
252 void* current = local_scheduler_if_initialized();
254 uintptr_t thread_id = GetCurrentThreadId();
256 uintptr_t thread_id = uintptr_t(pthread_self());
258 #endif /* TBB_USE_ASSERT */
260 case CILK_TBB_STACK_ADOPT: {
261 __TBB_ASSERT( !current && s->my_cilk_state==generic_scheduler::cs_limbo ||
262 current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid adoption" );
265 runtime_warning( "redundant adoption of %p by thread %p\n", s, (void*)thread_id );
266 s->my_cilk_state = generic_scheduler::cs_running;
267 #endif /* TBB_USE_ASSERT */
268 assume_scheduler( s );
271 case CILK_TBB_STACK_ORPHAN: {
272 __TBB_ASSERT( current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid orphaning" );
274 s->my_cilk_state = generic_scheduler::cs_limbo;
275 #endif /* TBB_USE_ASSERT */
276 assume_scheduler(NULL);
279 case CILK_TBB_STACK_RELEASE: {
280 __TBB_ASSERT( !current && s->my_cilk_state==generic_scheduler::cs_limbo ||
281 current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid release" );
283 s->my_cilk_state = generic_scheduler::cs_freed;
284 #endif /* TBB_USE_ASSERT */
285 s->my_cilk_unwatch_thunk.routine = NULL;
290 __TBB_ASSERT(0, "invalid op");
294 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
296 } // namespace internal
298 //------------------------------------------------------------------------
299 // task_scheduler_init
300 //------------------------------------------------------------------------
302 using namespace internal;
304 /** Left out-of-line for the sake of the backward binary compatibility **/
305 void task_scheduler_init::initialize( int number_of_threads ) {
306 initialize( number_of_threads, 0 );
309 void task_scheduler_init::initialize( int number_of_threads, stack_size_type thread_stack_size ) {
310 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
311 uintptr_t new_mode = thread_stack_size & propagation_mode_mask;
313 thread_stack_size &= ~(stack_size_type)propagation_mode_mask;
314 if( number_of_threads!=deferred ) {
315 __TBB_ASSERT_RELEASE( !my_scheduler, "task_scheduler_init already initialized" );
316 __TBB_ASSERT_RELEASE( number_of_threads==automatic || number_of_threads > 0,
317 "number_of_threads for task_scheduler_init must be automatic or positive" );
318 internal::generic_scheduler *s = governor::init_scheduler( number_of_threads, thread_stack_size, /*auto_init=*/false );
319 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
320 if ( s->master_outermost_level() ) {
321 uintptr_t &vt = s->default_context()->my_version_and_traits;
322 uintptr_t prev_mode = vt & task_group_context::exact_exception ? propagation_mode_exact : 0;
323 vt = new_mode & propagation_mode_exact ? vt | task_group_context::exact_exception
324 : new_mode & propagation_mode_captured ? vt & ~task_group_context::exact_exception : vt;
325 // Use least significant bit of the scheduler pointer to store previous mode.
326 // This is necessary when components compiled with different compilers and/or
327 // TBB versions initialize the
328 my_scheduler = static_cast<scheduler*>((generic_scheduler*)((uintptr_t)s | prev_mode));
331 #endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */
334 __TBB_ASSERT_RELEASE( !thread_stack_size, "deferred initialization ignores stack size setting" );
338 bool task_scheduler_init::internal_terminate( bool blocking ) {
339 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
340 uintptr_t prev_mode = (uintptr_t)my_scheduler & propagation_mode_exact;
341 my_scheduler = (scheduler*)((uintptr_t)my_scheduler & ~(uintptr_t)propagation_mode_exact);
342 #endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */
343 generic_scheduler* s = static_cast<generic_scheduler*>(my_scheduler);
345 __TBB_ASSERT_RELEASE( s, "task_scheduler_init::terminate without corresponding task_scheduler_init::initialize()");
346 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
347 if ( s->master_outermost_level() ) {
348 uintptr_t &vt = s->default_context()->my_version_and_traits;
349 vt = prev_mode & propagation_mode_exact ? vt | task_group_context::exact_exception
350 : vt & ~task_group_context::exact_exception;
352 #endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */
353 return governor::terminate_scheduler(s, blocking);
356 void task_scheduler_init::terminate() {
357 internal_terminate(/*blocking_terminate=*/false);
360 #if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
361 bool task_scheduler_init::internal_blocking_terminate( bool throwing ) {
362 bool ok = internal_terminate( /*blocking_terminate=*/true );
363 #if TBB_USE_EXCEPTIONS
364 if( throwing && !ok )
365 throw_exception( eid_blocking_thread_join_impossible );
367 suppress_unused_warning( throwing );
371 #endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
373 int task_scheduler_init::default_num_threads() {
374 return governor::default_num_threads();