2 Copyright (c) 2005-2019 Intel Corporation
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
8 http://www.apache.org/licenses/LICENSE-2.0
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
17 #ifndef __TBB_partitioner_H
18 #define __TBB_partitioner_H
20 #define __TBB_partitioner_H_include_area
21 #include "internal/_warning_suppress_enable_notice.h"
23 #ifndef __TBB_INITIAL_CHUNKS
24 // initial task divisions per thread
25 #define __TBB_INITIAL_CHUNKS 2
27 #ifndef __TBB_RANGE_POOL_CAPACITY
28 // maximum number of elements in range pool
29 #define __TBB_RANGE_POOL_CAPACITY 8
31 #ifndef __TBB_INIT_DEPTH
32 // initial value for depth of range pool
33 #define __TBB_INIT_DEPTH 5
35 #ifndef __TBB_DEMAND_DEPTH_ADD
36 // when imbalance is found range splits this value times more
37 #define __TBB_DEMAND_DEPTH_ADD 1
39 #ifndef __TBB_STATIC_THRESHOLD
40 // necessary number of clocks for the work to be distributed among all tasks
41 #define __TBB_STATIC_THRESHOLD 40000
44 #define __TBB_NONUNIFORM_TASK_CREATION 1
45 #ifdef __TBB_time_stamp
46 #define __TBB_USE_MACHINE_TIME_STAMPS 1
47 #define __TBB_task_duration() __TBB_STATIC_THRESHOLD
48 #endif // __TBB_machine_time_stamp
49 #endif // __TBB_DEFINE_MIC
52 #include "task_arena.h"
53 #include "aligned_space.h"
55 #include "internal/_template_helpers.h"
57 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
58 // Workaround for overzealous compiler warnings
59 #pragma warning (push)
60 #pragma warning (disable: 4244)
65 class auto_partitioner;
66 class simple_partitioner;
67 class static_partitioner;
68 class affinity_partitioner;
70 namespace interface9 {
72 class affinity_partition_type;
76 namespace internal { //< @cond INTERNAL
77 size_t __TBB_EXPORTED_FUNC get_initial_auto_partitioner_divisor();
79 //! Defines entry point for affinity partitioner into tbb run-time library.
80 class affinity_partitioner_base_v3: no_copy {
81 friend class tbb::affinity_partitioner;
82 friend class tbb::interface9::internal::affinity_partition_type;
83 //! Array that remembers affinities of tree positions to affinity_id.
84 /** NULL if my_size==0. */
85 affinity_id* my_array;
86 //! Number of elements in my_array.
89 affinity_partitioner_base_v3() : my_array(NULL), my_size(0) {}
90 //! Deallocates my_array.
91 ~affinity_partitioner_base_v3() {resize(0);}
93 /** Retains values if resulting size is the same. */
94 void __TBB_EXPORTED_METHOD resize( unsigned factor );
97 //! Provides backward-compatible methods for partition objects without affinity.
98 class partition_type_base {
100 void set_affinity( task & ) {}
101 void note_affinity( task::affinity_id ) {}
102 task* continue_after_execute_range() {return NULL;}
103 bool decide_whether_to_delay() {return false;}
104 void spawn_or_delay( bool, task& b ) {
109 template<typename Range, typename Body, typename Partitioner> class start_scan;
111 } //< namespace internal @endcond
114 namespace interface9 {
115 template<typename Range, typename Body, typename Partitioner> class start_for;
119 namespace interface9 {
122 using namespace tbb::internal;
123 template<typename Range, typename Body, typename Partitioner> class start_for;
124 template<typename Range, typename Body, typename Partitioner> class start_reduce;
125 template<typename Range, typename Body, typename Partitioner> class start_deterministic_reduce;
127 //! Join task node that contains shared flag for stealing feedback
128 class flag_task: public task {
130 tbb::atomic<bool> my_child_stolen;
131 flag_task() { my_child_stolen = false; }
132 task* execute() __TBB_override { return NULL; }
133 static void mark_task_stolen(task &t) {
134 tbb::atomic<bool> &flag = static_cast<flag_task*>(t.parent())->my_child_stolen;
135 #if TBB_USE_THREADING_TOOLS
136 // Threading tools respect lock prefix but report false-positive data-race via plain store
137 flag.fetch_and_store<release>(true);
140 #endif //TBB_USE_THREADING_TOOLS
142 static bool is_peer_stolen(task &t) {
143 return static_cast<flag_task*>(t.parent())->my_child_stolen;
147 //! Depth is a relative depth of recursive division inside a range pool. Relative depth allows
148 //! infinite absolute depth of the recursion for heavily unbalanced workloads with range represented
149 //! by a number that cannot fit into machine word.
150 typedef unsigned char depth_t;
152 //! Range pool stores ranges of type T in a circular buffer with MaxCapacity
153 template <typename T, depth_t MaxCapacity>
158 depth_t my_depth[MaxCapacity]; // relative depths of stored ranges
159 tbb::aligned_space<T, MaxCapacity> my_pool;
162 //! initialize via first range in pool
163 range_vector(const T& elem) : my_head(0), my_tail(0), my_size(1) {
165 new( static_cast<void *>(my_pool.begin()) ) T(elem);//TODO: std::move?
168 while( !empty() ) pop_back();
170 bool empty() const { return my_size == 0; }
171 depth_t size() const { return my_size; }
172 //! Populates range pool via ranges up to max depth or while divisible
173 //! max_depth starts from 0, e.g. value 2 makes 3 ranges in the pool up to two 1/4 pieces
174 void split_to_fill(depth_t max_depth) {
175 while( my_size < MaxCapacity && is_divisible(max_depth) ) {
176 depth_t prev = my_head;
177 my_head = (my_head + 1) % MaxCapacity;
178 new(my_pool.begin()+my_head) T(my_pool.begin()[prev]); // copy TODO: std::move?
179 my_pool.begin()[prev].~T(); // instead of assignment
180 new(my_pool.begin()+prev) T(my_pool.begin()[my_head], split()); // do 'inverse' split
181 my_depth[my_head] = ++my_depth[prev];
186 __TBB_ASSERT(my_size > 0, "range_vector::pop_back() with empty size");
187 my_pool.begin()[my_head].~T();
189 my_head = (my_head + MaxCapacity - 1) % MaxCapacity;
192 __TBB_ASSERT(my_size > 0, "range_vector::pop_front() with empty size");
193 my_pool.begin()[my_tail].~T();
195 my_tail = (my_tail + 1) % MaxCapacity;
198 __TBB_ASSERT(my_size > 0, "range_vector::back() with empty size");
199 return my_pool.begin()[my_head];
202 __TBB_ASSERT(my_size > 0, "range_vector::front() with empty size");
203 return my_pool.begin()[my_tail];
205 //! similarly to front(), returns depth of the first range in the pool
206 depth_t front_depth() {
207 __TBB_ASSERT(my_size > 0, "range_vector::front_depth() with empty size");
208 return my_depth[my_tail];
210 depth_t back_depth() {
211 __TBB_ASSERT(my_size > 0, "range_vector::back_depth() with empty size");
212 return my_depth[my_head];
214 bool is_divisible(depth_t max_depth) {
215 return back_depth() < max_depth && back().is_divisible();
219 //! Provides default methods for partition objects and common algorithm blocks.
220 template <typename Partition>
221 struct partition_type_base {
222 typedef split split_type;
224 void set_affinity( task & ) {}
225 void note_affinity( task::affinity_id ) {}
226 bool check_being_stolen(task &) { return false; } // part of old should_execute_range()
227 bool check_for_demand(task &) { return false; }
228 bool is_divisible() { return true; } // part of old should_execute_range()
229 depth_t max_depth() { return 0; }
230 void align_depth(depth_t) { }
231 template <typename Range> split_type get_split() { return split(); }
232 Partition& self() { return *static_cast<Partition*>(this); } // CRTP helper
234 template<typename StartType, typename Range>
235 void work_balance(StartType &start, Range &range) {
236 start.run_body( range ); // simple partitioner goes always here
239 template<typename StartType, typename Range>
240 void execute(StartType &start, Range &range) {
241 // The algorithm in a few words ([]-denotes calls to decision methods of partitioner):
242 // [If this task is stolen, adjust depth and divisions if necessary, set flag].
243 // If range is divisible {
244 // Spread the work while [initial divisions left];
245 // Create trap task [if necessary];
247 // If not divisible or [max depth is reached], execute, else do the range pool part
248 if ( range.is_divisible() ) {
249 if ( self().is_divisible() ) {
250 do { // split until is divisible
251 typename Partition::split_type split_obj = self().template get_split<Range>();
252 start.offer_work( split_obj );
253 } while ( range.is_divisible() && self().is_divisible() );
256 self().work_balance(start, range);
260 //! Provides default splitting strategy for partition objects.
261 template <typename Partition>
262 struct adaptive_mode : partition_type_base<Partition> {
263 typedef Partition my_partition;
265 // For affinity_partitioner, my_divisor indicates the number of affinity array indices the task reserves.
266 // A task which has only one index must produce the right split without reserved index in order to avoid
267 // it to be overwritten in note_affinity() of the created (right) task.
268 // I.e. a task created deeper than the affinity array can remember must not save its affinity (LIFO order)
269 static const unsigned factor = 1;
270 adaptive_mode() : my_divisor(tbb::internal::get_initial_auto_partitioner_divisor() / 4 * my_partition::factor) {}
271 adaptive_mode(adaptive_mode &src, split) : my_divisor(do_split(src, split())) {}
272 /*! Override do_split methods in order to specify splitting strategy */
273 size_t do_split(adaptive_mode &src, split) {
274 return src.my_divisor /= 2u;
278 //! A helper class to create a proportional_split object for a given type of Range.
279 /** If the Range has static boolean constant 'is_splittable_in_proportion' set to 'true',
280 the created object splits a provided value in an implemenation-defined proportion;
281 otherwise it represents equal-size split. */
282 // TODO: check if this helper can be a nested class of proportional_mode.
283 template <typename Range, typename = void>
284 struct proportion_helper {
285 static proportional_split get_split(size_t) { return proportional_split(1,1); }
287 template <typename Range>
288 struct proportion_helper<Range, typename enable_if<Range::is_splittable_in_proportion, void>::type> {
289 static proportional_split get_split(size_t n) {
290 #if __TBB_NONUNIFORM_TASK_CREATION
291 size_t right = (n + 2) / 3;
293 size_t right = n / 2;
295 size_t left = n - right;
296 return proportional_split(left, right);
300 //! Provides proportional splitting strategy for partition objects
301 template <typename Partition>
302 struct proportional_mode : adaptive_mode<Partition> {
303 typedef Partition my_partition;
304 using partition_type_base<Partition>::self; // CRTP helper to get access to derived classes
306 proportional_mode() : adaptive_mode<Partition>() {}
307 proportional_mode(proportional_mode &src, split) : adaptive_mode<Partition>(src, split()) {}
308 proportional_mode(proportional_mode &src, const proportional_split& split_obj) { self().my_divisor = do_split(src, split_obj); }
309 size_t do_split(proportional_mode &src, const proportional_split& split_obj) {
310 #if __TBB_ENABLE_RANGE_FEEDBACK
311 size_t portion = size_t(float(src.my_divisor) * float(split_obj.right())
312 / float(split_obj.left() + split_obj.right()) + 0.5f);
314 size_t portion = split_obj.right() * my_partition::factor;
316 portion = (portion + my_partition::factor/2) & (0ul - my_partition::factor);
317 #if __TBB_ENABLE_RANGE_FEEDBACK
318 /** Corner case handling */
320 portion = my_partition::factor;
321 else if (portion == src.my_divisor)
322 portion = src.my_divisor - my_partition::factor;
324 src.my_divisor -= portion;
327 bool is_divisible() { // part of old should_execute_range()
328 return self().my_divisor > my_partition::factor;
330 template <typename Range>
331 proportional_split get_split() {
332 // Create a proportion for the number of threads expected to handle "this" subrange
333 return proportion_helper<Range>::get_split( self().my_divisor / my_partition::factor );
337 static size_t get_initial_partition_head() {
338 int current_index = tbb::this_task_arena::current_thread_index();
339 if (current_index == tbb::task_arena::not_initialized)
341 return size_t(current_index);
344 //! Provides default linear indexing of partitioner's sequence
345 template <typename Partition>
346 struct linear_affinity_mode : proportional_mode<Partition> {
348 size_t my_max_affinity;
349 using proportional_mode<Partition>::self;
350 linear_affinity_mode() : proportional_mode<Partition>(), my_head(get_initial_partition_head()),
351 my_max_affinity(self().my_divisor) {}
352 linear_affinity_mode(linear_affinity_mode &src, split) : proportional_mode<Partition>(src, split())
353 , my_head((src.my_head + src.my_divisor) % src.my_max_affinity), my_max_affinity(src.my_max_affinity) {}
354 linear_affinity_mode(linear_affinity_mode &src, const proportional_split& split_obj) : proportional_mode<Partition>(src, split_obj)
355 , my_head((src.my_head + src.my_divisor) % src.my_max_affinity), my_max_affinity(src.my_max_affinity) {}
356 void set_affinity( task &t ) {
357 if( self().my_divisor )
358 t.set_affinity( affinity_id(my_head) + 1 );
362 /*! Determine work-balance phase implementing splitting & stealing actions */
364 struct dynamic_grainsize_mode : Mode {
366 #ifdef __TBB_USE_MACHINE_TIME_STAMPS
367 tbb::internal::machine_tsc_t my_dst_tsc;
374 depth_t my_max_depth;
375 static const unsigned range_pool_size = __TBB_RANGE_POOL_CAPACITY;
376 dynamic_grainsize_mode(): Mode()
377 #ifdef __TBB_USE_MACHINE_TIME_STAMPS
381 , my_max_depth(__TBB_INIT_DEPTH) {}
382 dynamic_grainsize_mode(dynamic_grainsize_mode& p, split)
384 #ifdef __TBB_USE_MACHINE_TIME_STAMPS
388 , my_max_depth(p.my_max_depth) {}
389 dynamic_grainsize_mode(dynamic_grainsize_mode& p, const proportional_split& split_obj)
391 #ifdef __TBB_USE_MACHINE_TIME_STAMPS
395 , my_max_depth(p.my_max_depth) {}
396 bool check_being_stolen(task &t) { // part of old should_execute_range()
397 if( !(self().my_divisor / Mode::my_partition::factor) ) { // if not from the top P tasks of binary tree
398 self().my_divisor = 1; // TODO: replace by on-stack flag (partition_state's member)?
399 if( t.is_stolen_task() && t.parent()->ref_count() >= 2 ) { // runs concurrently with the left task
400 #if __TBB_USE_OPTIONAL_RTTI
401 // RTTI is available, check whether the cast is valid
402 __TBB_ASSERT(dynamic_cast<flag_task*>(t.parent()), 0);
403 // correctness of the cast relies on avoiding the root task for which:
404 // - initial value of my_divisor != 0 (protected by separate assertion)
405 // - is_stolen_task() always returns false for the root task.
407 flag_task::mark_task_stolen(t);
408 if( !my_max_depth ) my_max_depth++;
409 my_max_depth += __TBB_DEMAND_DEPTH_ADD;
415 depth_t max_depth() { return my_max_depth; }
416 void align_depth(depth_t base) {
417 __TBB_ASSERT(base <= my_max_depth, 0);
418 my_max_depth -= base;
420 template<typename StartType, typename Range>
421 void work_balance(StartType &start, Range &range) {
422 if( !range.is_divisible() || !self().max_depth() ) {
423 start.run_body( range ); // simple partitioner goes always here
425 else { // do range pool
426 internal::range_vector<Range, range_pool_size> range_pool(range);
428 range_pool.split_to_fill(self().max_depth()); // fill range pool
429 if( self().check_for_demand( start ) ) {
430 if( range_pool.size() > 1 ) {
431 start.offer_work( range_pool.front(), range_pool.front_depth() );
432 range_pool.pop_front();
435 if( range_pool.is_divisible(self().max_depth()) ) // was not enough depth to fork a task
436 continue; // note: next split_to_fill() should split range at least once
438 start.run_body( range_pool.back() );
439 range_pool.pop_back();
440 } while( !range_pool.empty() && !start.is_cancelled() );
443 bool check_for_demand( task &t ) {
444 if( pass == my_delay ) {
445 if( self().my_divisor > 1 ) // produce affinitized tasks while they have slot in array
446 return true; // do not do my_max_depth++ here, but be sure range_pool is splittable once more
447 else if( self().my_divisor && my_max_depth ) { // make balancing task
448 self().my_divisor = 0; // once for each task; depth will be decreased in align_depth()
451 else if( flag_task::is_peer_stolen(t) ) {
452 my_max_depth += __TBB_DEMAND_DEPTH_ADD;
455 } else if( begin == my_delay ) {
456 #ifndef __TBB_USE_MACHINE_TIME_STAMPS
459 my_dst_tsc = __TBB_time_stamp() + __TBB_task_duration();
461 } else if( run == my_delay ) {
462 if( __TBB_time_stamp() < my_dst_tsc ) {
463 __TBB_ASSERT(my_max_depth > 0, NULL);
464 my_max_depth--; // increase granularity since tasks seem having too small work
469 #endif // __TBB_USE_MACHINE_TIME_STAMPS
475 class auto_partition_type: public dynamic_grainsize_mode<adaptive_mode<auto_partition_type> > {
477 auto_partition_type( const auto_partitioner& )
478 : dynamic_grainsize_mode<adaptive_mode<auto_partition_type> >() {
479 my_divisor *= __TBB_INITIAL_CHUNKS;
481 auto_partition_type( auto_partition_type& src, split)
482 : dynamic_grainsize_mode<adaptive_mode<auto_partition_type> >(src, split()) {}
483 bool is_divisible() { // part of old should_execute_range()
484 if( my_divisor > 1 ) return true;
485 if( my_divisor && my_max_depth ) { // can split the task. TODO: on-stack flag instead
486 // keep same fragmentation while splitting for the local task pool
488 my_divisor = 0; // decrease max_depth once per task
492 bool check_for_demand(task &t) {
493 if( flag_task::is_peer_stolen(t) ) {
494 my_max_depth += __TBB_DEMAND_DEPTH_ADD;
500 class simple_partition_type: public partition_type_base<simple_partition_type> {
502 simple_partition_type( const simple_partitioner& ) {}
503 simple_partition_type( const simple_partition_type&, split ) {}
504 //! simplified algorithm
505 template<typename StartType, typename Range>
506 void execute(StartType &start, Range &range) {
507 split_type split_obj = split(); // start.offer_work accepts split_type as reference
508 while( range.is_divisible() )
509 start.offer_work( split_obj );
510 start.run_body( range );
514 class static_partition_type : public linear_affinity_mode<static_partition_type> {
516 typedef proportional_split split_type;
517 static_partition_type( const static_partitioner& )
518 : linear_affinity_mode<static_partition_type>() {}
519 static_partition_type( static_partition_type& p, split )
520 : linear_affinity_mode<static_partition_type>(p, split()) {}
521 static_partition_type( static_partition_type& p, const proportional_split& split_obj )
522 : linear_affinity_mode<static_partition_type>(p, split_obj) {}
525 class affinity_partition_type : public dynamic_grainsize_mode<linear_affinity_mode<affinity_partition_type> > {
526 static const unsigned factor_power = 4; // TODO: get a unified formula based on number of computing units
527 tbb::internal::affinity_id* my_array;
529 static const unsigned factor = 1 << factor_power; // number of slots in affinity array per task
530 typedef proportional_split split_type;
531 affinity_partition_type( tbb::internal::affinity_partitioner_base_v3& ap )
532 : dynamic_grainsize_mode<linear_affinity_mode<affinity_partition_type> >() {
533 __TBB_ASSERT( (factor&(factor-1))==0, "factor must be power of two" );
535 my_array = ap.my_array;
536 my_max_depth = factor_power + 1;
537 __TBB_ASSERT( my_max_depth < __TBB_RANGE_POOL_CAPACITY, 0 );
539 affinity_partition_type(affinity_partition_type& p, split)
540 : dynamic_grainsize_mode<linear_affinity_mode<affinity_partition_type> >(p, split())
541 , my_array(p.my_array) {}
542 affinity_partition_type(affinity_partition_type& p, const proportional_split& split_obj)
543 : dynamic_grainsize_mode<linear_affinity_mode<affinity_partition_type> >(p, split_obj)
544 , my_array(p.my_array) {}
545 void set_affinity( task &t ) {
547 if( !my_array[my_head] )
548 // TODO: consider new ideas with my_array for both affinity and static partitioner's, then code reuse
549 t.set_affinity( affinity_id(my_head / factor + 1) );
551 t.set_affinity( my_array[my_head] );
554 void note_affinity( task::affinity_id id ) {
556 my_array[my_head] = id;
560 //! Backward-compatible partition for auto and affinity partition objects.
561 class old_auto_partition_type: public tbb::internal::partition_type_base {
563 static const size_t VICTIM_CHUNKS = 4;
565 bool should_execute_range(const task &t) {
566 if( num_chunks<VICTIM_CHUNKS && t.is_stolen_task() )
567 num_chunks = VICTIM_CHUNKS;
568 return num_chunks==1;
570 old_auto_partition_type( const auto_partitioner& )
571 : num_chunks(internal::get_initial_auto_partitioner_divisor()*__TBB_INITIAL_CHUNKS/4) {}
572 old_auto_partition_type( const affinity_partitioner& )
573 : num_chunks(internal::get_initial_auto_partitioner_divisor()*__TBB_INITIAL_CHUNKS/4) {}
574 old_auto_partition_type( old_auto_partition_type& pt, split ) {
575 num_chunks = pt.num_chunks = (pt.num_chunks+1u) / 2u;
579 } // namespace interfaceX::internal
581 } // namespace interfaceX
583 //! A simple partitioner
584 /** Divides the range until the range is not divisible.
585 @ingroup algorithms */
586 class simple_partitioner {
588 simple_partitioner() {}
590 template<typename Range, typename Body, typename Partitioner> friend class serial::interface9::start_for;
591 template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_for;
592 template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_reduce;
593 template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_deterministic_reduce;
594 template<typename Range, typename Body, typename Partitioner> friend class internal::start_scan;
595 // backward compatibility
596 class partition_type: public internal::partition_type_base {
598 bool should_execute_range(const task& ) {return false;}
599 partition_type( const simple_partitioner& ) {}
600 partition_type( const partition_type&, split ) {}
602 // new implementation just extends existing interface
603 typedef interface9::internal::simple_partition_type task_partition_type;
605 // TODO: consider to make split_type public
606 typedef interface9::internal::simple_partition_type::split_type split_type;
609 //! An auto partitioner
610 /** The range is initial divided into several large chunks.
611 Chunks are further subdivided into smaller pieces if demand detected and they are divisible.
612 @ingroup algorithms */
613 class auto_partitioner {
615 auto_partitioner() {}
618 template<typename Range, typename Body, typename Partitioner> friend class serial::interface9::start_for;
619 template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_for;
620 template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_reduce;
621 template<typename Range, typename Body, typename Partitioner> friend class internal::start_scan;
622 // backward compatibility
623 typedef interface9::internal::old_auto_partition_type partition_type;
624 // new implementation just extends existing interface
625 typedef interface9::internal::auto_partition_type task_partition_type;
627 // TODO: consider to make split_type public
628 typedef interface9::internal::auto_partition_type::split_type split_type;
631 //! A static partitioner
632 class static_partitioner {
634 static_partitioner() {}
636 template<typename Range, typename Body, typename Partitioner> friend class serial::interface9::start_for;
637 template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_for;
638 template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_reduce;
639 template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_deterministic_reduce;
640 template<typename Range, typename Body, typename Partitioner> friend class internal::start_scan;
641 // backward compatibility
642 typedef interface9::internal::old_auto_partition_type partition_type;
643 // new implementation just extends existing interface
644 typedef interface9::internal::static_partition_type task_partition_type;
646 // TODO: consider to make split_type public
647 typedef interface9::internal::static_partition_type::split_type split_type;
650 //! An affinity partitioner
651 class affinity_partitioner: internal::affinity_partitioner_base_v3 {
653 affinity_partitioner() {}
656 template<typename Range, typename Body, typename Partitioner> friend class serial::interface9::start_for;
657 template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_for;
658 template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_reduce;
659 template<typename Range, typename Body, typename Partitioner> friend class internal::start_scan;
660 // backward compatibility - for parallel_scan only
661 typedef interface9::internal::old_auto_partition_type partition_type;
662 // new implementation just extends existing interface
663 typedef interface9::internal::affinity_partition_type task_partition_type;
665 // TODO: consider to make split_type public
666 typedef interface9::internal::affinity_partition_type::split_type split_type;
671 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
672 #pragma warning (pop)
673 #endif // warning 4244 is back
674 #undef __TBB_INITIAL_CHUNKS
675 #undef __TBB_RANGE_POOL_CAPACITY
676 #undef __TBB_INIT_DEPTH
678 #include "internal/_warning_suppress_disable_notice.h"
679 #undef __TBB_partitioner_H_include_area
681 #endif /* __TBB_partitioner_H */