Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 10:12:58

0001 /*
0002     Copyright (c) 2005-2020 Intel Corporation
0003 
0004     Licensed under the Apache License, Version 2.0 (the "License");
0005     you may not use this file except in compliance with the License.
0006     You may obtain a copy of the License at
0007 
0008         http://www.apache.org/licenses/LICENSE-2.0
0009 
0010     Unless required by applicable law or agreed to in writing, software
0011     distributed under the License is distributed on an "AS IS" BASIS,
0012     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0013     See the License for the specific language governing permissions and
0014     limitations under the License.
0015 */
0016 
0017 #ifndef __TBB_parallel_reduce_H
0018 #define __TBB_parallel_reduce_H
0019 
0020 #define __TBB_parallel_reduce_H_include_area
0021 #include "internal/_warning_suppress_enable_notice.h"
0022 
0023 #include <new>
0024 #include "task.h"
0025 #include "aligned_space.h"
0026 #include "partitioner.h"
0027 #include "tbb_profiling.h"
0028 
0029 namespace tbb {
0030 
0031 namespace interface9 {
0032 //! @cond INTERNAL
0033 namespace internal {
0034 
0035     using namespace tbb::internal;
0036 
0037     /** Values for reduction_context. */
0038     enum {
0039         root_task, left_child, right_child
0040     };
0041 
0042     /** Represented as a char, not enum, for compactness. */
0043     typedef char reduction_context;
0044 
0045     //! Task type used to combine the partial results of parallel_reduce.
0046     /** @ingroup algorithms */
0047     template<typename Body>
0048     class finish_reduce: public flag_task {
0049         //! Pointer to body, or NULL if the left child has not yet finished.
0050         bool has_right_zombie;
0051         const reduction_context my_context;
0052         Body* my_body;
0053         aligned_space<Body> zombie_space;
0054         finish_reduce( reduction_context context_ ) :
0055             has_right_zombie(false), // TODO: substitute by flag_task::child_stolen?
0056             my_context(context_),
0057             my_body(NULL)
0058         {
0059         }
0060         ~finish_reduce() {
0061             if( has_right_zombie )
0062                 zombie_space.begin()->~Body();
0063         }
0064         task* execute() __TBB_override {
0065             if( has_right_zombie ) {
0066                 // Right child was stolen.
0067                 Body* s = zombie_space.begin();
0068                 my_body->join( *s );
0069                 // Body::join() won't be called if canceled. Defer destruction to destructor
0070             }
0071             if( my_context==left_child )
0072                 itt_store_word_with_release( static_cast<finish_reduce*>(parent())->my_body, my_body );
0073             return NULL;
0074         }
0075         template<typename Range,typename Body_, typename Partitioner>
0076         friend class start_reduce;
0077     };
0078 
0079     //! allocate right task with new parent
0080     void allocate_sibling(task* start_reduce_task, task *tasks[], size_t start_bytes, size_t finish_bytes);
0081 
0082     //! Task type used to split the work of parallel_reduce.
0083     /** @ingroup algorithms */
0084     template<typename Range, typename Body, typename Partitioner>
0085     class start_reduce: public task {
0086         typedef finish_reduce<Body> finish_type;
0087         Body* my_body;
0088         Range my_range;
0089         typename Partitioner::task_partition_type my_partition;
0090         reduction_context my_context;
0091         task* execute() __TBB_override;
0092         //! Update affinity info, if any
0093         void note_affinity( affinity_id id ) __TBB_override {
0094             my_partition.note_affinity( id );
0095         }
0096         template<typename Body_>
0097         friend class finish_reduce;
0098 
0099 public:
0100         //! Constructor used for root task
0101         start_reduce( const Range& range, Body* body, Partitioner& partitioner ) :
0102             my_body(body),
0103             my_range(range),
0104             my_partition(partitioner),
0105             my_context(root_task)
0106         {
0107         }
0108         //! Splitting constructor used to generate children.
0109         /** parent_ becomes left child.  Newly constructed object is right child. */
0110         start_reduce( start_reduce& parent_, typename Partitioner::split_type& split_obj ) :
0111             my_body(parent_.my_body),
0112             my_range(parent_.my_range, split_obj),
0113             my_partition(parent_.my_partition, split_obj),
0114             my_context(right_child)
0115         {
0116             my_partition.set_affinity(*this);
0117             parent_.my_context = left_child;
0118         }
0119         //! Construct right child from the given range as response to the demand.
0120         /** parent_ remains left child.  Newly constructed object is right child. */
0121         start_reduce( start_reduce& parent_, const Range& r, depth_t d ) :
0122             my_body(parent_.my_body),
0123             my_range(r),
0124             my_partition(parent_.my_partition, split()),
0125             my_context(right_child)
0126         {
0127             my_partition.set_affinity(*this);
0128             my_partition.align_depth( d ); // TODO: move into constructor of partitioner
0129             parent_.my_context = left_child;
0130         }
0131         static void run( const Range& range, Body& body, Partitioner& partitioner ) {
0132             if( !range.empty() ) {
0133 #if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP
0134                 task::spawn_root_and_wait( *new(task::allocate_root()) start_reduce(range,&body,partitioner) );
0135 #else
0136                 // Bound context prevents exceptions from body to affect nesting or sibling algorithms,
0137                 // and allows users to handle exceptions safely by wrapping parallel_for in the try-block.
0138                 task_group_context context(PARALLEL_REDUCE);
0139                 task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) );
0140 #endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */
0141             }
0142         }
0143 #if __TBB_TASK_GROUP_CONTEXT
0144         static void run( const Range& range, Body& body, Partitioner& partitioner, task_group_context& context ) {
0145             if( !range.empty() )
0146                 task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) );
0147         }
0148 #endif /* __TBB_TASK_GROUP_CONTEXT */
0149         //! Run body for range
0150         void run_body( Range &r ) { (*my_body)( r ); }
0151 
0152         //! spawn right task, serves as callback for partitioner
0153         // TODO: remove code duplication from 'offer_work' methods
0154         void offer_work(typename Partitioner::split_type& split_obj) {
0155             task *tasks[2];
0156             allocate_sibling(static_cast<task*>(this), tasks, sizeof(start_reduce), sizeof(finish_type));
0157             new((void*)tasks[0]) finish_type(my_context);
0158             new((void*)tasks[1]) start_reduce(*this, split_obj);
0159             spawn(*tasks[1]);
0160         }
0161         //! spawn right task, serves as callback for partitioner
0162         void offer_work(const Range& r, depth_t d = 0) {
0163             task *tasks[2];
0164             allocate_sibling(static_cast<task*>(this), tasks, sizeof(start_reduce), sizeof(finish_type));
0165             new((void*)tasks[0]) finish_type(my_context);
0166             new((void*)tasks[1]) start_reduce(*this, r, d);
0167             spawn(*tasks[1]);
0168         }
0169     };
0170 
0171     //! allocate right task with new parent
0172     // TODO: 'inline' here is to avoid multiple definition error but for sake of code size this should not be inlined
0173     inline void allocate_sibling(task* start_reduce_task, task *tasks[], size_t start_bytes, size_t finish_bytes) {
0174         tasks[0] = &start_reduce_task->allocate_continuation().allocate(finish_bytes);
0175         start_reduce_task->set_parent(tasks[0]);
0176         tasks[0]->set_ref_count(2);
0177         tasks[1] = &tasks[0]->allocate_child().allocate(start_bytes);
0178     }
0179 
0180     template<typename Range, typename Body, typename Partitioner>
0181     task* start_reduce<Range,Body,Partitioner>::execute() {
0182         my_partition.check_being_stolen( *this );
0183         if( my_context==right_child ) {
0184             finish_type* parent_ptr = static_cast<finish_type*>(parent());
0185             if( !itt_load_word_with_acquire(parent_ptr->my_body) ) { // TODO: replace by is_stolen_task() or by parent_ptr->ref_count() == 2???
0186                 my_body = new( parent_ptr->zombie_space.begin() ) Body(*my_body,split());
0187                 parent_ptr->has_right_zombie = true;
0188             }
0189         } else __TBB_ASSERT(my_context==root_task,NULL);// because left leaf spawns right leafs without recycling
0190         my_partition.execute(*this, my_range);
0191         if( my_context==left_child ) {
0192             finish_type* parent_ptr = static_cast<finish_type*>(parent());
0193             __TBB_ASSERT(my_body!=parent_ptr->zombie_space.begin(),NULL);
0194             itt_store_word_with_release(parent_ptr->my_body, my_body );
0195         }
0196         return NULL;
0197     }
0198 
0199     //! Task type used to combine the partial results of parallel_deterministic_reduce.
0200     /** @ingroup algorithms */
0201     template<typename Body>
0202     class finish_deterministic_reduce: public task {
0203         Body &my_left_body;
0204         Body my_right_body;
0205 
0206         finish_deterministic_reduce( Body &body ) :
0207             my_left_body( body ),
0208             my_right_body( body, split() )
0209         {
0210         }
0211         task* execute() __TBB_override {
0212             my_left_body.join( my_right_body );
0213             return NULL;
0214         }
0215         template<typename Range,typename Body_, typename Partitioner>
0216         friend class start_deterministic_reduce;
0217     };
0218 
0219     //! Task type used to split the work of parallel_deterministic_reduce.
0220     /** @ingroup algorithms */
0221     template<typename Range, typename Body, typename Partitioner>
0222     class start_deterministic_reduce: public task {
0223         typedef finish_deterministic_reduce<Body> finish_type;
0224         Body &my_body;
0225         Range my_range;
0226         typename Partitioner::task_partition_type my_partition;
0227         task* execute() __TBB_override;
0228 
0229         //! Constructor used for root task
0230         start_deterministic_reduce( const Range& range, Body& body, Partitioner& partitioner ) :
0231             my_body( body ),
0232             my_range( range ),
0233             my_partition( partitioner )
0234         {
0235         }
0236         //! Splitting constructor used to generate children.
0237         /** parent_ becomes left child.  Newly constructed object is right child. */
0238         start_deterministic_reduce( start_deterministic_reduce& parent_, finish_type& c, typename Partitioner::split_type& split_obj ) :
0239             my_body( c.my_right_body ),
0240             my_range( parent_.my_range, split_obj ),
0241             my_partition( parent_.my_partition, split_obj )
0242         {
0243         }
0244 
0245 public:
0246         static void run( const Range& range, Body& body, Partitioner& partitioner ) {
0247             if( !range.empty() ) {
0248 #if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP
0249                 task::spawn_root_and_wait( *new(task::allocate_root()) start_deterministic_reduce(range,&body,partitioner) );
0250 #else
0251                 // Bound context prevents exceptions from body to affect nesting or sibling algorithms,
0252                 // and allows users to handle exceptions safely by wrapping parallel_for in the try-block.
0253                 task_group_context context(PARALLEL_REDUCE);
0254                 task::spawn_root_and_wait( *new(task::allocate_root(context)) start_deterministic_reduce(range,body,partitioner) );
0255 #endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */
0256             }
0257         }
0258 #if __TBB_TASK_GROUP_CONTEXT
0259         static void run( const Range& range, Body& body, Partitioner& partitioner, task_group_context& context ) {
0260             if( !range.empty() )
0261                 task::spawn_root_and_wait( *new(task::allocate_root(context)) start_deterministic_reduce(range,body,partitioner) );
0262         }
0263 #endif /* __TBB_TASK_GROUP_CONTEXT */
0264 
0265         void offer_work( typename Partitioner::split_type& split_obj) {
0266             task* tasks[2];
0267             allocate_sibling(static_cast<task*>(this), tasks, sizeof(start_deterministic_reduce), sizeof(finish_type));
0268             new((void*)tasks[0]) finish_type(my_body);
0269             new((void*)tasks[1]) start_deterministic_reduce(*this, *static_cast<finish_type*>(tasks[0]), split_obj);
0270             spawn(*tasks[1]);
0271         }
0272 
0273         void run_body( Range &r ) { my_body(r); }
0274     };
0275 
0276     template<typename Range, typename Body, typename Partitioner>
0277     task* start_deterministic_reduce<Range,Body, Partitioner>::execute() {
0278         my_partition.execute(*this, my_range);
0279         return NULL;
0280     }
0281 } // namespace internal
0282 //! @endcond
0283 } //namespace interfaceX
0284 
0285 //! @cond INTERNAL
0286 namespace internal {
0287     using interface9::internal::start_reduce;
0288     using interface9::internal::start_deterministic_reduce;
0289     //! Auxiliary class for parallel_reduce; for internal use only.
0290     /** The adaptor class that implements \ref parallel_reduce_body_req "parallel_reduce Body"
0291         using given \ref parallel_reduce_lambda_req "anonymous function objects".
0292      **/
0293     /** @ingroup algorithms */
0294     template<typename Range, typename Value, typename RealBody, typename Reduction>
0295     class lambda_reduce_body {
0296 
0297 //FIXME: decide if my_real_body, my_reduction, and identity_element should be copied or referenced
0298 //       (might require some performance measurements)
0299 
0300         const Value&     identity_element;
0301         const RealBody&  my_real_body;
0302         const Reduction& my_reduction;
0303         Value            my_value;
0304         lambda_reduce_body& operator= ( const lambda_reduce_body& other );
0305     public:
0306         lambda_reduce_body( const Value& identity, const RealBody& body, const Reduction& reduction )
0307             : identity_element(identity)
0308             , my_real_body(body)
0309             , my_reduction(reduction)
0310             , my_value(identity)
0311         { }
0312         lambda_reduce_body( const lambda_reduce_body& other )
0313             : identity_element(other.identity_element)
0314             , my_real_body(other.my_real_body)
0315             , my_reduction(other.my_reduction)
0316             , my_value(other.my_value)
0317         { }
0318         lambda_reduce_body( lambda_reduce_body& other, tbb::split )
0319             : identity_element(other.identity_element)
0320             , my_real_body(other.my_real_body)
0321             , my_reduction(other.my_reduction)
0322             , my_value(other.identity_element)
0323         { }
0324         void operator()(Range& range) {
0325             my_value = my_real_body(range, const_cast<const Value&>(my_value));
0326         }
0327         void join( lambda_reduce_body& rhs ) {
0328             my_value = my_reduction(const_cast<const Value&>(my_value), const_cast<const Value&>(rhs.my_value));
0329         }
0330         Value result() const {
0331             return my_value;
0332         }
0333     };
0334 
0335 } // namespace internal
0336 //! @endcond
0337 
0338 // Requirements on Range concept are documented in blocked_range.h
0339 
0340 /** \page parallel_reduce_body_req Requirements on parallel_reduce body
0341     Class \c Body implementing the concept of parallel_reduce body must define:
0342     - \code Body::Body( Body&, split ); \endcode        Splitting constructor.
0343                                                         Must be able to run concurrently with operator() and method \c join
0344     - \code Body::~Body(); \endcode                     Destructor
0345     - \code void Body::operator()( Range& r ); \endcode Function call operator applying body to range \c r
0346                                                         and accumulating the result
0347     - \code void Body::join( Body& b ); \endcode        Join results.
0348                                                         The result in \c b should be merged into the result of \c this
0349 **/
0350 
0351 /** \page parallel_reduce_lambda_req Requirements on parallel_reduce anonymous function objects (lambda functions)
0352     TO BE DOCUMENTED
0353 **/
0354 
0355 /** \name parallel_reduce
0356     See also requirements on \ref range_req "Range" and \ref parallel_reduce_body_req "parallel_reduce Body". **/
0357 //@{
0358 
0359 //! Parallel iteration with reduction and default partitioner.
0360 /** @ingroup algorithms **/
0361 template<typename Range, typename Body>
0362 void parallel_reduce( const Range& range, Body& body ) {
0363     internal::start_reduce<Range,Body, const __TBB_DEFAULT_PARTITIONER>::run( range, body, __TBB_DEFAULT_PARTITIONER() );
0364 }
0365 
0366 //! Parallel iteration with reduction and simple_partitioner
0367 /** @ingroup algorithms **/
0368 template<typename Range, typename Body>
0369 void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner ) {
0370     internal::start_reduce<Range,Body,const simple_partitioner>::run( range, body, partitioner );
0371 }
0372 
0373 //! Parallel iteration with reduction and auto_partitioner
0374 /** @ingroup algorithms **/
0375 template<typename Range, typename Body>
0376 void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner ) {
0377     internal::start_reduce<Range,Body,const auto_partitioner>::run( range, body, partitioner );
0378 }
0379 
0380 //! Parallel iteration with reduction and static_partitioner
0381 /** @ingroup algorithms **/
0382 template<typename Range, typename Body>
0383 void parallel_reduce( const Range& range, Body& body, const static_partitioner& partitioner ) {
0384     internal::start_reduce<Range,Body,const static_partitioner>::run( range, body, partitioner );
0385 }
0386 
0387 //! Parallel iteration with reduction and affinity_partitioner
0388 /** @ingroup algorithms **/
0389 template<typename Range, typename Body>
0390 void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner ) {
0391     internal::start_reduce<Range,Body,affinity_partitioner>::run( range, body, partitioner );
0392 }
0393 
0394 #if __TBB_TASK_GROUP_CONTEXT
0395 //! Parallel iteration with reduction, default partitioner and user-supplied context.
0396 /** @ingroup algorithms **/
0397 template<typename Range, typename Body>
0398 void parallel_reduce( const Range& range, Body& body, task_group_context& context ) {
0399     internal::start_reduce<Range,Body,const __TBB_DEFAULT_PARTITIONER>::run( range, body, __TBB_DEFAULT_PARTITIONER(), context );
0400 }
0401 
0402 //! Parallel iteration with reduction, simple partitioner and user-supplied context.
0403 /** @ingroup algorithms **/
0404 template<typename Range, typename Body>
0405 void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner, task_group_context& context ) {
0406     internal::start_reduce<Range,Body,const simple_partitioner>::run( range, body, partitioner, context );
0407 }
0408 
0409 //! Parallel iteration with reduction, auto_partitioner and user-supplied context
0410 /** @ingroup algorithms **/
0411 template<typename Range, typename Body>
0412 void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner, task_group_context& context ) {
0413     internal::start_reduce<Range,Body,const auto_partitioner>::run( range, body, partitioner, context );
0414 }
0415 
0416 //! Parallel iteration with reduction, static_partitioner and user-supplied context
0417 /** @ingroup algorithms **/
0418 template<typename Range, typename Body>
0419 void parallel_reduce( const Range& range, Body& body, const static_partitioner& partitioner, task_group_context& context ) {
0420     internal::start_reduce<Range,Body,const static_partitioner>::run( range, body, partitioner, context );
0421 }
0422 
0423 //! Parallel iteration with reduction, affinity_partitioner and user-supplied context
0424 /** @ingroup algorithms **/
0425 template<typename Range, typename Body>
0426 void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner, task_group_context& context ) {
0427     internal::start_reduce<Range,Body,affinity_partitioner>::run( range, body, partitioner, context );
0428 }
0429 #endif /* __TBB_TASK_GROUP_CONTEXT */
0430 
0431 /** parallel_reduce overloads that work with anonymous function objects
0432     (see also \ref parallel_reduce_lambda_req "requirements on parallel_reduce anonymous function objects"). **/
0433 
0434 //! Parallel iteration with reduction and default partitioner.
0435 /** @ingroup algorithms **/
0436 template<typename Range, typename Value, typename RealBody, typename Reduction>
0437 Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) {
0438     internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
0439     internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const __TBB_DEFAULT_PARTITIONER>
0440                           ::run(range, body, __TBB_DEFAULT_PARTITIONER() );
0441     return body.result();
0442 }
0443 
0444 //! Parallel iteration with reduction and simple_partitioner.
0445 /** @ingroup algorithms **/
0446 template<typename Range, typename Value, typename RealBody, typename Reduction>
0447 Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
0448                        const simple_partitioner& partitioner ) {
0449     internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
0450     internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const simple_partitioner>
0451                           ::run(range, body, partitioner );
0452     return body.result();
0453 }
0454 
0455 //! Parallel iteration with reduction and auto_partitioner
0456 /** @ingroup algorithms **/
0457 template<typename Range, typename Value, typename RealBody, typename Reduction>
0458 Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
0459                        const auto_partitioner& partitioner ) {
0460     internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
0461     internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const auto_partitioner>
0462                           ::run( range, body, partitioner );
0463     return body.result();
0464 }
0465 
0466 //! Parallel iteration with reduction and static_partitioner
0467 /** @ingroup algorithms **/
0468 template<typename Range, typename Value, typename RealBody, typename Reduction>
0469 Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
0470                        const static_partitioner& partitioner ) {
0471     internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
0472     internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const static_partitioner>
0473                                         ::run( range, body, partitioner );
0474     return body.result();
0475 }
0476 
0477 //! Parallel iteration with reduction and affinity_partitioner
0478 /** @ingroup algorithms **/
0479 template<typename Range, typename Value, typename RealBody, typename Reduction>
0480 Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
0481                        affinity_partitioner& partitioner ) {
0482     internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
0483     internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,affinity_partitioner>
0484                                         ::run( range, body, partitioner );
0485     return body.result();
0486 }
0487 
0488 #if __TBB_TASK_GROUP_CONTEXT
0489 //! Parallel iteration with reduction, default partitioner and user-supplied context.
0490 /** @ingroup algorithms **/
0491 template<typename Range, typename Value, typename RealBody, typename Reduction>
0492 Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
0493                        task_group_context& context ) {
0494     internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
0495     internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const __TBB_DEFAULT_PARTITIONER>
0496                           ::run( range, body, __TBB_DEFAULT_PARTITIONER(), context );
0497     return body.result();
0498 }
0499 
0500 //! Parallel iteration with reduction, simple partitioner and user-supplied context.
0501 /** @ingroup algorithms **/
0502 template<typename Range, typename Value, typename RealBody, typename Reduction>
0503 Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
0504                        const simple_partitioner& partitioner, task_group_context& context ) {
0505     internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
0506     internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const simple_partitioner>
0507                           ::run( range, body, partitioner, context );
0508     return body.result();
0509 }
0510 
0511 //! Parallel iteration with reduction, auto_partitioner and user-supplied context
0512 /** @ingroup algorithms **/
0513 template<typename Range, typename Value, typename RealBody, typename Reduction>
0514 Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
0515                        const auto_partitioner& partitioner, task_group_context& context ) {
0516     internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
0517     internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const auto_partitioner>
0518                           ::run( range, body, partitioner, context );
0519     return body.result();
0520 }
0521 
0522 //! Parallel iteration with reduction, static_partitioner and user-supplied context
0523 /** @ingroup algorithms **/
0524 template<typename Range, typename Value, typename RealBody, typename Reduction>
0525 Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
0526                        const static_partitioner& partitioner, task_group_context& context ) {
0527     internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
0528     internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const static_partitioner>
0529                                         ::run( range, body, partitioner, context );
0530     return body.result();
0531 }
0532 
0533 //! Parallel iteration with reduction, affinity_partitioner and user-supplied context
0534 /** @ingroup algorithms **/
0535 template<typename Range, typename Value, typename RealBody, typename Reduction>
0536 Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
0537                        affinity_partitioner& partitioner, task_group_context& context ) {
0538     internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
0539     internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,affinity_partitioner>
0540                                         ::run( range, body, partitioner, context );
0541     return body.result();
0542 }
0543 #endif /* __TBB_TASK_GROUP_CONTEXT */
0544 
0545 //! Parallel iteration with deterministic reduction and default simple partitioner.
0546 /** @ingroup algorithms **/
0547 template<typename Range, typename Body>
0548 void parallel_deterministic_reduce( const Range& range, Body& body ) {
0549     internal::start_deterministic_reduce<Range, Body, const simple_partitioner>::run(range, body, simple_partitioner());
0550 }
0551 
0552 //! Parallel iteration with deterministic reduction and simple partitioner.
0553 /** @ingroup algorithms **/
0554 template<typename Range, typename Body>
0555 void parallel_deterministic_reduce( const Range& range, Body& body, const simple_partitioner& partitioner ) {
0556     internal::start_deterministic_reduce<Range, Body, const simple_partitioner>::run(range, body, partitioner);
0557 }
0558 
0559 //! Parallel iteration with deterministic reduction and static partitioner.
0560 /** @ingroup algorithms **/
0561 template<typename Range, typename Body>
0562 void parallel_deterministic_reduce( const Range& range, Body& body, const static_partitioner& partitioner ) {
0563     internal::start_deterministic_reduce<Range, Body, const static_partitioner>::run(range, body, partitioner);
0564 }
0565 
0566 #if __TBB_TASK_GROUP_CONTEXT
0567 //! Parallel iteration with deterministic reduction, default simple partitioner and user-supplied context.
0568 /** @ingroup algorithms **/
0569 template<typename Range, typename Body>
0570 void parallel_deterministic_reduce( const Range& range, Body& body, task_group_context& context ) {
0571     internal::start_deterministic_reduce<Range,Body, const simple_partitioner>::run( range, body, simple_partitioner(), context );
0572 }
0573 
0574 //! Parallel iteration with deterministic reduction, simple partitioner and user-supplied context.
0575 /** @ingroup algorithms **/
0576 template<typename Range, typename Body>
0577 void parallel_deterministic_reduce( const Range& range, Body& body, const simple_partitioner& partitioner, task_group_context& context ) {
0578     internal::start_deterministic_reduce<Range, Body, const simple_partitioner>::run(range, body, partitioner, context);
0579 }
0580 
0581 //! Parallel iteration with deterministic reduction, static partitioner and user-supplied context.
0582 /** @ingroup algorithms **/
0583 template<typename Range, typename Body>
0584 void parallel_deterministic_reduce( const Range& range, Body& body, const static_partitioner& partitioner, task_group_context& context ) {
0585     internal::start_deterministic_reduce<Range, Body, const static_partitioner>::run(range, body, partitioner, context);
0586 }
0587 #endif /* __TBB_TASK_GROUP_CONTEXT */
0588 
0589 /** parallel_reduce overloads that work with anonymous function objects
0590     (see also \ref parallel_reduce_lambda_req "requirements on parallel_reduce anonymous function objects"). **/
0591 
0592 //! Parallel iteration with deterministic reduction and default simple partitioner.
0593 // TODO: consider making static_partitioner the default
0594 /** @ingroup algorithms **/
0595 template<typename Range, typename Value, typename RealBody, typename Reduction>
0596 Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) {
0597     return parallel_deterministic_reduce(range, identity, real_body, reduction, simple_partitioner());
0598 }
0599 
0600 //! Parallel iteration with deterministic reduction and simple partitioner.
0601 /** @ingroup algorithms **/
0602 template<typename Range, typename Value, typename RealBody, typename Reduction>
0603 Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, const simple_partitioner& partitioner ) {
0604     internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
0605     internal::start_deterministic_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>, const simple_partitioner>
0606                           ::run(range, body, partitioner);
0607     return body.result();
0608 }
0609 
0610 //! Parallel iteration with deterministic reduction and static partitioner.
0611 /** @ingroup algorithms **/
0612 template<typename Range, typename Value, typename RealBody, typename Reduction>
0613 Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, const static_partitioner& partitioner ) {
0614     internal::lambda_reduce_body<Range, Value, RealBody, Reduction> body(identity, real_body, reduction);
0615     internal::start_deterministic_reduce<Range, internal::lambda_reduce_body<Range, Value, RealBody, Reduction>, const static_partitioner>
0616         ::run(range, body, partitioner);
0617     return body.result();
0618 }
0619 #if __TBB_TASK_GROUP_CONTEXT
0620 //! Parallel iteration with deterministic reduction, default simple partitioner and user-supplied context.
0621 /** @ingroup algorithms **/
0622 template<typename Range, typename Value, typename RealBody, typename Reduction>
0623 Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
0624     task_group_context& context ) {
0625     return parallel_deterministic_reduce(range, identity, real_body, reduction, simple_partitioner(), context);
0626 }
0627 
0628 //! Parallel iteration with deterministic reduction, simple partitioner and user-supplied context.
0629 /** @ingroup algorithms **/
0630 template<typename Range, typename Value, typename RealBody, typename Reduction>
0631 Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
0632     const simple_partitioner& partitioner, task_group_context& context ) {
0633     internal::lambda_reduce_body<Range, Value, RealBody, Reduction> body(identity, real_body, reduction);
0634     internal::start_deterministic_reduce<Range, internal::lambda_reduce_body<Range, Value, RealBody, Reduction>, const simple_partitioner>
0635         ::run(range, body, partitioner, context);
0636     return body.result();
0637 }
0638 
0639 //! Parallel iteration with deterministic reduction, static partitioner and user-supplied context.
0640 /** @ingroup algorithms **/
0641 template<typename Range, typename Value, typename RealBody, typename Reduction>
0642 Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
0643     const static_partitioner& partitioner, task_group_context& context ) {
0644     internal::lambda_reduce_body<Range, Value, RealBody, Reduction> body(identity, real_body, reduction);
0645     internal::start_deterministic_reduce<Range, internal::lambda_reduce_body<Range, Value, RealBody, Reduction>, const static_partitioner>
0646         ::run(range, body, partitioner, context);
0647     return body.result();
0648 }
0649 #endif /* __TBB_TASK_GROUP_CONTEXT */
0650 //@}
0651 
0652 } // namespace tbb
0653 
0654 #include "internal/_warning_suppress_disable_notice.h"
0655 #undef __TBB_parallel_reduce_H_include_area
0656 
0657 #endif /* __TBB_parallel_reduce_H */