Intel(R) Threading Building Blocks Doxygen Documentation version 4.2.3
parallel_reduce.h
Go to the documentation of this file.
1/*
2 Copyright (c) 2005-2020 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#ifndef __TBB_parallel_reduce_H
18#define __TBB_parallel_reduce_H
19
20#define __TBB_parallel_reduce_H_include_area
22
23#include <new>
24#include "task.h"
25#include "aligned_space.h"
26#include "partitioner.h"
27#include "tbb_profiling.h"
28
29namespace tbb {
30
31namespace interface9 {
33namespace internal {
34
35 using namespace tbb::internal;
36
38 enum {
40 };
41
43 typedef char reduction_context;
44
46
47 template<typename Body>
48 class finish_reduce: public flag_task {
52 Body* my_body;
53 aligned_space<Body> zombie_space;
55 has_right_zombie(false), // TODO: substitute by flag_task::child_stolen?
56 my_context(context_),
57 my_body(NULL)
58 {
59 }
62 zombie_space.begin()->~Body();
63 }
65 if( has_right_zombie ) {
66 // Right child was stolen.
67 Body* s = zombie_space.begin();
68 my_body->join( *s );
69 // Body::join() won't be called if canceled. Defer destruction to destructor
70 }
73 return NULL;
74 }
75 template<typename Range,typename Body_, typename Partitioner>
76 friend class start_reduce;
77 };
78
80 void allocate_sibling(task* start_reduce_task, task *tasks[], size_t start_bytes, size_t finish_bytes);
81
83
84 template<typename Range, typename Body, typename Partitioner>
85 class start_reduce: public task {
87 Body* my_body;
88 Range my_range;
89 typename Partitioner::task_partition_type my_partition;
94 my_partition.note_affinity( id );
95 }
96 template<typename Body_>
97 friend class finish_reduce;
98
99public:
101 start_reduce( const Range& range, Body* body, Partitioner& partitioner ) :
102 my_body(body),
103 my_range(range),
104 my_partition(partitioner),
106 {
107 }
109
110 start_reduce( start_reduce& parent_, typename Partitioner::split_type& split_obj ) :
111 my_body(parent_.my_body),
112 my_range(parent_.my_range, split_obj),
113 my_partition(parent_.my_partition, split_obj),
115 {
116 my_partition.set_affinity(*this);
117 parent_.my_context = left_child;
118 }
120
121 start_reduce( start_reduce& parent_, const Range& r, depth_t d ) :
122 my_body(parent_.my_body),
123 my_range(r),
124 my_partition(parent_.my_partition, split()),
126 {
127 my_partition.set_affinity(*this);
128 my_partition.align_depth( d ); // TODO: move into constructor of partitioner
129 parent_.my_context = left_child;
130 }
131 static void run( const Range& range, Body& body, Partitioner& partitioner ) {
132 if( !range.empty() ) {
133#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP
134 task::spawn_root_and_wait( *new(task::allocate_root()) start_reduce(range,&body,partitioner) );
135#else
136 // Bound context prevents exceptions from body to affect nesting or sibling algorithms,
137 // and allows users to handle exceptions safely by wrapping parallel_for in the try-block.
138 task_group_context context(PARALLEL_REDUCE);
139 task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) );
140#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */
141 }
142 }
143#if __TBB_TASK_GROUP_CONTEXT
144 static void run( const Range& range, Body& body, Partitioner& partitioner, task_group_context& context ) {
145 if( !range.empty() )
146 task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) );
147 }
148#endif /* __TBB_TASK_GROUP_CONTEXT */
150 void run_body( Range &r ) { (*my_body)( r ); }
151
153 // TODO: remove code duplication from 'offer_work' methods
154 void offer_work(typename Partitioner::split_type& split_obj) {
155 task *tasks[2];
156 allocate_sibling(static_cast<task*>(this), tasks, sizeof(start_reduce), sizeof(finish_type));
157 new((void*)tasks[0]) finish_type(my_context);
158 new((void*)tasks[1]) start_reduce(*this, split_obj);
159 spawn(*tasks[1]);
160 }
162 void offer_work(const Range& r, depth_t d = 0) {
163 task *tasks[2];
164 allocate_sibling(static_cast<task*>(this), tasks, sizeof(start_reduce), sizeof(finish_type));
165 new((void*)tasks[0]) finish_type(my_context);
166 new((void*)tasks[1]) start_reduce(*this, r, d);
167 spawn(*tasks[1]);
168 }
169 };
170
172 // TODO: 'inline' here is to avoid multiple definition error but for sake of code size this should not be inlined
173 inline void allocate_sibling(task* start_reduce_task, task *tasks[], size_t start_bytes, size_t finish_bytes) {
174 tasks[0] = &start_reduce_task->allocate_continuation().allocate(finish_bytes);
175 start_reduce_task->set_parent(tasks[0]);
176 tasks[0]->set_ref_count(2);
177 tasks[1] = &tasks[0]->allocate_child().allocate(start_bytes);
178 }
179
180 template<typename Range, typename Body, typename Partitioner>
182 my_partition.check_being_stolen( *this );
183 if( my_context==right_child ) {
184 finish_type* parent_ptr = static_cast<finish_type*>(parent());
185 if( !itt_load_word_with_acquire(parent_ptr->my_body) ) { // TODO: replace by is_stolen_task() or by parent_ptr->ref_count() == 2???
186 my_body = new( parent_ptr->zombie_space.begin() ) Body(*my_body,split());
187 parent_ptr->has_right_zombie = true;
188 }
189 } else __TBB_ASSERT(my_context==root_task,NULL);// because left leaf spawns right leafs without recycling
190 my_partition.execute(*this, my_range);
191 if( my_context==left_child ) {
192 finish_type* parent_ptr = static_cast<finish_type*>(parent());
193 __TBB_ASSERT(my_body!=parent_ptr->zombie_space.begin(),NULL);
194 itt_store_word_with_release(parent_ptr->my_body, my_body );
195 }
196 return NULL;
197 }
198
200
201 template<typename Body>
205
207 my_left_body( body ),
208 my_right_body( body, split() )
209 {
210 }
213 return NULL;
214 }
215 template<typename Range,typename Body_, typename Partitioner>
217 };
218
220
221 template<typename Range, typename Body, typename Partitioner>
224 Body &my_body;
225 Range my_range;
226 typename Partitioner::task_partition_type my_partition;
228
230 start_deterministic_reduce( const Range& range, Body& body, Partitioner& partitioner ) :
231 my_body( body ),
232 my_range( range ),
233 my_partition( partitioner )
234 {
235 }
237
238 start_deterministic_reduce( start_deterministic_reduce& parent_, finish_type& c, typename Partitioner::split_type& split_obj ) :
239 my_body( c.my_right_body ),
240 my_range( parent_.my_range, split_obj ),
241 my_partition( parent_.my_partition, split_obj )
242 {
243 }
244
245public:
246 static void run( const Range& range, Body& body, Partitioner& partitioner ) {
247 if( !range.empty() ) {
248#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP
250#else
251 // Bound context prevents exceptions from body to affect nesting or sibling algorithms,
252 // and allows users to handle exceptions safely by wrapping parallel_for in the try-block.
253 task_group_context context(PARALLEL_REDUCE);
255#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */
256 }
257 }
258#if __TBB_TASK_GROUP_CONTEXT
259 static void run( const Range& range, Body& body, Partitioner& partitioner, task_group_context& context ) {
260 if( !range.empty() )
262 }
263#endif /* __TBB_TASK_GROUP_CONTEXT */
264
265 void offer_work( typename Partitioner::split_type& split_obj) {
266 task* tasks[2];
267 allocate_sibling(static_cast<task*>(this), tasks, sizeof(start_deterministic_reduce), sizeof(finish_type));
268 new((void*)tasks[0]) finish_type(my_body);
269 new((void*)tasks[1]) start_deterministic_reduce(*this, *static_cast<finish_type*>(tasks[0]), split_obj);
270 spawn(*tasks[1]);
271 }
272
273 void run_body( Range &r ) { my_body(r); }
274 };
275
276 template<typename Range, typename Body, typename Partitioner>
278 my_partition.execute(*this, my_range);
279 return NULL;
280 }
281} // namespace internal
283} //namespace interfaceX
284
286namespace internal {
290
294 template<typename Range, typename Value, typename RealBody, typename Reduction>
296
297//FIXME: decide if my_real_body, my_reduction, and identity_element should be copied or referenced
298// (might require some performance measurements)
299
300 const Value& identity_element;
301 const RealBody& my_real_body;
302 const Reduction& my_reduction;
303 Value my_value;
305 public:
306 lambda_reduce_body( const Value& identity, const RealBody& body, const Reduction& reduction )
307 : identity_element(identity)
308 , my_real_body(body)
309 , my_reduction(reduction)
310 , my_value(identity)
311 { }
316 , my_value(other.my_value)
317 { }
323 { }
324 void operator()(Range& range) {
325 my_value = my_real_body(range, const_cast<const Value&>(my_value));
326 }
328 my_value = my_reduction(const_cast<const Value&>(my_value), const_cast<const Value&>(rhs.my_value));
329 }
330 Value result() const {
331 return my_value;
332 }
333 };
334
335} // namespace internal
337
338// Requirements on Range concept are documented in blocked_range.h
339
358
360
361template<typename Range, typename Body>
362void parallel_reduce( const Range& range, Body& body ) {
364}
365
367
368template<typename Range, typename Body>
369void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner ) {
371}
372
374
375template<typename Range, typename Body>
376void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner ) {
378}
379
381
382template<typename Range, typename Body>
383void parallel_reduce( const Range& range, Body& body, const static_partitioner& partitioner ) {
385}
386
388
389template<typename Range, typename Body>
390void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner ) {
392}
393
394#if __TBB_TASK_GROUP_CONTEXT
396
397template<typename Range, typename Body>
398void parallel_reduce( const Range& range, Body& body, task_group_context& context ) {
400}
401
403
404template<typename Range, typename Body>
405void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner, task_group_context& context ) {
406 internal::start_reduce<Range,Body,const simple_partitioner>::run( range, body, partitioner, context );
407}
408
410
411template<typename Range, typename Body>
412void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner, task_group_context& context ) {
413 internal::start_reduce<Range,Body,const auto_partitioner>::run( range, body, partitioner, context );
414}
415
417
418template<typename Range, typename Body>
419void parallel_reduce( const Range& range, Body& body, const static_partitioner& partitioner, task_group_context& context ) {
420 internal::start_reduce<Range,Body,const static_partitioner>::run( range, body, partitioner, context );
421}
422
424
425template<typename Range, typename Body>
426void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner, task_group_context& context ) {
427 internal::start_reduce<Range,Body,affinity_partitioner>::run( range, body, partitioner, context );
428}
429#endif /* __TBB_TASK_GROUP_CONTEXT */
430
435
436template<typename Range, typename Value, typename RealBody, typename Reduction>
437Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) {
438 internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
440 ::run(range, body, __TBB_DEFAULT_PARTITIONER() );
441 return body.result();
442}
443
445
446template<typename Range, typename Value, typename RealBody, typename Reduction>
447Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
448 const simple_partitioner& partitioner ) {
449 internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
451 ::run(range, body, partitioner );
452 return body.result();
453}
454
456
457template<typename Range, typename Value, typename RealBody, typename Reduction>
458Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
459 const auto_partitioner& partitioner ) {
460 internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
462 ::run( range, body, partitioner );
463 return body.result();
464}
465
467
468template<typename Range, typename Value, typename RealBody, typename Reduction>
469Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
470 const static_partitioner& partitioner ) {
471 internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
473 ::run( range, body, partitioner );
474 return body.result();
475}
476
478
479template<typename Range, typename Value, typename RealBody, typename Reduction>
480Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
481 affinity_partitioner& partitioner ) {
482 internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
484 ::run( range, body, partitioner );
485 return body.result();
486}
487
488#if __TBB_TASK_GROUP_CONTEXT
490
491template<typename Range, typename Value, typename RealBody, typename Reduction>
492Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
493 task_group_context& context ) {
494 internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
496 ::run( range, body, __TBB_DEFAULT_PARTITIONER(), context );
497 return body.result();
498}
499
501
502template<typename Range, typename Value, typename RealBody, typename Reduction>
503Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
504 const simple_partitioner& partitioner, task_group_context& context ) {
505 internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
507 ::run( range, body, partitioner, context );
508 return body.result();
509}
510
512
513template<typename Range, typename Value, typename RealBody, typename Reduction>
514Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
515 const auto_partitioner& partitioner, task_group_context& context ) {
516 internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
518 ::run( range, body, partitioner, context );
519 return body.result();
520}
521
523
524template<typename Range, typename Value, typename RealBody, typename Reduction>
525Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
526 const static_partitioner& partitioner, task_group_context& context ) {
527 internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
529 ::run( range, body, partitioner, context );
530 return body.result();
531}
532
534
535template<typename Range, typename Value, typename RealBody, typename Reduction>
536Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
537 affinity_partitioner& partitioner, task_group_context& context ) {
538 internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
540 ::run( range, body, partitioner, context );
541 return body.result();
542}
543#endif /* __TBB_TASK_GROUP_CONTEXT */
544
546
547template<typename Range, typename Body>
548void parallel_deterministic_reduce( const Range& range, Body& body ) {
550}
551
553
554template<typename Range, typename Body>
555void parallel_deterministic_reduce( const Range& range, Body& body, const simple_partitioner& partitioner ) {
557}
558
560
561template<typename Range, typename Body>
562void parallel_deterministic_reduce( const Range& range, Body& body, const static_partitioner& partitioner ) {
564}
565
566#if __TBB_TASK_GROUP_CONTEXT
568
569template<typename Range, typename Body>
570void parallel_deterministic_reduce( const Range& range, Body& body, task_group_context& context ) {
572}
573
575
576template<typename Range, typename Body>
577void parallel_deterministic_reduce( const Range& range, Body& body, const simple_partitioner& partitioner, task_group_context& context ) {
579}
580
582
583template<typename Range, typename Body>
584void parallel_deterministic_reduce( const Range& range, Body& body, const static_partitioner& partitioner, task_group_context& context ) {
586}
587#endif /* __TBB_TASK_GROUP_CONTEXT */
588
593// TODO: consider making static_partitioner the default
595template<typename Range, typename Value, typename RealBody, typename Reduction>
596Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) {
597 return parallel_deterministic_reduce(range, identity, real_body, reduction, simple_partitioner());
598}
599
601
602template<typename Range, typename Value, typename RealBody, typename Reduction>
603Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, const simple_partitioner& partitioner ) {
604 internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
606 ::run(range, body, partitioner);
607 return body.result();
608}
609
611
612template<typename Range, typename Value, typename RealBody, typename Reduction>
613Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, const static_partitioner& partitioner ) {
614 internal::lambda_reduce_body<Range, Value, RealBody, Reduction> body(identity, real_body, reduction);
616 ::run(range, body, partitioner);
617 return body.result();
618}
619#if __TBB_TASK_GROUP_CONTEXT
621
622template<typename Range, typename Value, typename RealBody, typename Reduction>
623Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
624 task_group_context& context ) {
625 return parallel_deterministic_reduce(range, identity, real_body, reduction, simple_partitioner(), context);
626}
627
629
630template<typename Range, typename Value, typename RealBody, typename Reduction>
631Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
632 const simple_partitioner& partitioner, task_group_context& context ) {
633 internal::lambda_reduce_body<Range, Value, RealBody, Reduction> body(identity, real_body, reduction);
635 ::run(range, body, partitioner, context);
636 return body.result();
637}
638
640
641template<typename Range, typename Value, typename RealBody, typename Reduction>
642Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
643 const static_partitioner& partitioner, task_group_context& context ) {
644 internal::lambda_reduce_body<Range, Value, RealBody, Reduction> body(identity, real_body, reduction);
646 ::run(range, body, partitioner, context);
647 return body.result();
648}
649#endif /* __TBB_TASK_GROUP_CONTEXT */
651
652} // namespace tbb
653
655#undef __TBB_parallel_reduce_H_include_area
656
657#endif /* __TBB_parallel_reduce_H */
#define __TBB_DEFAULT_PARTITIONER
Definition: tbb_config.h:596
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
#define __TBB_override
Definition: tbb_stddef.h:240
void const char const char int ITT_FORMAT __itt_group_sync s
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
void parallel_deterministic_reduce(const Range &range, Body &body)
Parallel iteration with deterministic reduction and default simple partitioner.
void parallel_reduce(const Range &range, Body &body)
Parallel iteration with reduction and default partitioner.
The graph class.
Identifiers declared inside namespace internal should never be used directly by client code.
Definition: atomic.h:65
T itt_load_word_with_acquire(const tbb::atomic< T > &src)
void itt_store_word_with_release(tbb::atomic< T > &dst, U src)
void * allocate_sibling(task *start_for_task, size_t bytes)
allocate right task with new parent
Task type used to combine the partial results of parallel_reduce.
task * execute() __TBB_override
Should be overridden by derived classes.
finish_reduce(reduction_context context_)
bool has_right_zombie
Pointer to body, or NULL if the left child has not yet finished.
Task type used to split the work of parallel_reduce.
void offer_work(typename Partitioner::split_type &split_obj)
spawn right task, serves as callback for partitioner
void offer_work(const Range &r, depth_t d=0)
spawn right task, serves as callback for partitioner
Partitioner::task_partition_type my_partition
static void run(const Range &range, Body &body, Partitioner &partitioner)
start_reduce(start_reduce &parent_, const Range &r, depth_t d)
Construct right child from the given range as response to the demand.
start_reduce(const Range &range, Body *body, Partitioner &partitioner)
Constructor used for root task.
start_reduce(start_reduce &parent_, typename Partitioner::split_type &split_obj)
Splitting constructor used to generate children.
task * execute() __TBB_override
Should be overridden by derived classes.
static void run(const Range &range, Body &body, Partitioner &partitioner, task_group_context &context)
void note_affinity(affinity_id id) __TBB_override
Update affinity info, if any.
void run_body(Range &r)
Run body for range.
Task type used to combine the partial results of parallel_deterministic_reduce.
task * execute() __TBB_override
Should be overridden by derived classes.
Task type used to split the work of parallel_deterministic_reduce.
finish_deterministic_reduce< Body > finish_type
static void run(const Range &range, Body &body, Partitioner &partitioner)
task * execute() __TBB_override
Should be overridden by derived classes.
start_deterministic_reduce(start_deterministic_reduce &parent_, finish_type &c, typename Partitioner::split_type &split_obj)
Splitting constructor used to generate children.
static void run(const Range &range, Body &body, Partitioner &partitioner, task_group_context &context)
start_deterministic_reduce(const Range &range, Body &body, Partitioner &partitioner)
Constructor used for root task.
void offer_work(typename Partitioner::split_type &split_obj)
Auxiliary class for parallel_reduce; for internal use only.
lambda_reduce_body(const lambda_reduce_body &other)
lambda_reduce_body(lambda_reduce_body &other, tbb::split)
lambda_reduce_body & operator=(const lambda_reduce_body &other)
void join(lambda_reduce_body &rhs)
lambda_reduce_body(const Value &identity, const RealBody &body, const Reduction &reduction)
Join task node that contains shared flag for stealing feedback.
Definition: partitioner.h:128
A simple partitioner.
Definition: partitioner.h:586
An auto partitioner.
Definition: partitioner.h:613
A static partitioner.
Definition: partitioner.h:632
An affinity partitioner.
Definition: partitioner.h:651
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Definition: task.cpp:96
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Definition: task.cpp:114
Used to form groups of tasks.
Definition: task.h:358
Base class for user-defined tasks.
Definition: task.h:615
virtual task * execute()=0
Should be overridden by derived classes.
internal::allocate_child_proxy & allocate_child()
Returns proxy for overloaded new that allocates a child task of *this.
Definition: task.h:681
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
Definition: task.h:865
internal::allocate_continuation_proxy & allocate_continuation()
Returns proxy for overloaded new that allocates a continuation task of *this.
Definition: task.h:676
void set_parent(task *p)
sets parent task pointer to specified value
Definition: task.h:868
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
Definition: task.h:663
void set_ref_count(int count)
Set reference count.
Definition: task.h:761
task_group_context * context()
This method is deprecated and will be removed in the future.
Definition: task.h:878
internal::affinity_id affinity_id
An id as used for specifying affinity.
Definition: task.h:940
static void spawn_root_and_wait(task &root)
Spawn task allocated by allocate_root, wait for it to complete, and deallocate it.
Definition: task.h:808
Dummy type that distinguishes splitting constructor from copy constructor.
Definition: tbb_stddef.h:416

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.