Back to home page

EIC code displayed by LXR

 
 

    


Warning, file /include/oneapi/tbb/task_group.h was not indexed or was modified since last indexation (in which case cross-reference links may be missing, inaccurate or erroneous).

0001 /*
0002     Copyright (c) 2005-2023 Intel Corporation
0003 
0004     Licensed under the Apache License, Version 2.0 (the "License");
0005     you may not use this file except in compliance with the License.
0006     You may obtain a copy of the License at
0007 
0008         http://www.apache.org/licenses/LICENSE-2.0
0009 
0010     Unless required by applicable law or agreed to in writing, software
0011     distributed under the License is distributed on an "AS IS" BASIS,
0012     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0013     See the License for the specific language governing permissions and
0014     limitations under the License.
0015 */
0016 
0017 #ifndef __TBB_task_group_H
0018 #define __TBB_task_group_H
0019 
0020 #include "detail/_config.h"
0021 #include "detail/_namespace_injection.h"
0022 #include "detail/_assert.h"
0023 #include "detail/_utils.h"
0024 #include "detail/_template_helpers.h"
0025 #include "detail/_exception.h"
0026 #include "detail/_task.h"
0027 #include "detail/_small_object_pool.h"
0028 #include "detail/_intrusive_list_node.h"
0029 #include "detail/_task_handle.h"
0030 
0031 #include "profiling.h"
0032 
0033 #include <type_traits>
0034 
0035 #if _MSC_VER && !defined(__INTEL_COMPILER)
0036     // Suppress warning: structure was padded due to alignment specifier
0037     #pragma warning(push)
0038     #pragma warning(disable:4324)
0039 #endif
0040 
0041 namespace tbb {
0042 namespace detail {
0043 
0044 namespace d1 {
0045 class delegate_base;
0046 class task_arena_base;
0047 class task_group_context;
0048 class task_group_base;
0049 }
0050 
0051 namespace r1 {
0052 // Forward declarations
0053 class tbb_exception_ptr;
0054 class cancellation_disseminator;
0055 class thread_data;
0056 class task_dispatcher;
0057 template <bool>
0058 class context_guard_helper;
0059 struct task_arena_impl;
0060 class context_list;
0061 
0062 TBB_EXPORT void __TBB_EXPORTED_FUNC execute(d1::task_arena_base&, d1::delegate_base&);
0063 TBB_EXPORT void __TBB_EXPORTED_FUNC isolate_within_arena(d1::delegate_base&, std::intptr_t);
0064 
0065 TBB_EXPORT void __TBB_EXPORTED_FUNC initialize(d1::task_group_context&);
0066 TBB_EXPORT void __TBB_EXPORTED_FUNC destroy(d1::task_group_context&);
0067 TBB_EXPORT void __TBB_EXPORTED_FUNC reset(d1::task_group_context&);
0068 TBB_EXPORT bool __TBB_EXPORTED_FUNC cancel_group_execution(d1::task_group_context&);
0069 TBB_EXPORT bool __TBB_EXPORTED_FUNC is_group_execution_cancelled(d1::task_group_context&);
0070 TBB_EXPORT void __TBB_EXPORTED_FUNC capture_fp_settings(d1::task_group_context&);
0071 
0072 struct task_group_context_impl;
0073 }
0074 
0075 namespace d2 {
0076 
0077 namespace {
0078 template<typename F>
0079 d1::task* task_ptr_or_nullptr(F&& f);
0080 }
0081 
0082 template<typename F>
0083 class function_task : public task_handle_task  {
0084     //TODO: apply empty base optimization here
0085     const F m_func;
0086 
0087 private:
0088     d1::task* execute(d1::execution_data& ed) override {
0089         __TBB_ASSERT(ed.context == &this->ctx(), "The task group context should be used for all tasks");
0090         task* res = task_ptr_or_nullptr(m_func);
0091         finalize(&ed);
0092         return res;
0093     }
0094     d1::task* cancel(d1::execution_data& ed) override {
0095         finalize(&ed);
0096         return nullptr;
0097     }
0098 public:
0099     template<typename FF>
0100     function_task(FF&& f, d1::wait_context& wo, d1::task_group_context& ctx, d1::small_object_allocator& alloc)
0101         : task_handle_task{wo, ctx, alloc},
0102           m_func(std::forward<FF>(f)) {}
0103 };
0104 
0105 #if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
0106 namespace {
0107     template<typename F>
0108     d1::task* task_ptr_or_nullptr_impl(std::false_type, F&& f){
0109         task_handle th = std::forward<F>(f)();
0110         return task_handle_accessor::release(th);
0111     }
0112 
0113     template<typename F>
0114     d1::task* task_ptr_or_nullptr_impl(std::true_type, F&& f){
0115         std::forward<F>(f)();
0116         return nullptr;
0117     }
0118 
0119     template<typename F>
0120     d1::task* task_ptr_or_nullptr(F&& f){
0121         using is_void_t = std::is_void<
0122             decltype(std::forward<F>(f)())
0123             >;
0124 
0125         return  task_ptr_or_nullptr_impl(is_void_t{}, std::forward<F>(f));
0126     }
0127 }
0128 #else
0129 namespace {
0130     template<typename F>
0131     d1::task* task_ptr_or_nullptr(F&& f){
0132         std::forward<F>(f)();
0133         return nullptr;
0134     }
0135 }  // namespace
0136 #endif // __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
0137 } // namespace d2
0138 
0139 namespace d1 {
0140 
0141 // This structure is left here for backward compatibility check
0142 struct context_list_node {
0143     std::atomic<context_list_node*> prev{};
0144     std::atomic<context_list_node*> next{};
0145 };
0146 
0147 //! Used to form groups of tasks
0148 /** @ingroup task_scheduling
0149     The context services explicit cancellation requests from user code, and unhandled
0150     exceptions intercepted during tasks execution. Intercepting an exception results
0151     in generating internal cancellation requests (which is processed in exactly the
0152     same way as external ones).
0153 
0154     The context is associated with one or more root tasks and defines the cancellation
0155     group that includes all the descendants of the corresponding root task(s). Association
0156     is established when a context object is passed as an argument to the task::allocate_root()
0157     method. See task_group_context::task_group_context for more details.
0158 
0159     The context can be bound to another one, and other contexts can be bound to it,
0160     forming a tree-like structure: parent -> this -> children. Arrows here designate
0161     cancellation propagation direction. If a task in a cancellation group is cancelled
0162     all the other tasks in this group and groups bound to it (as children) get cancelled too.
0163 **/
0164 class task_group_context : no_copy {
0165 public:
0166     enum traits_type {
0167         fp_settings     = 1 << 1,
0168         concurrent_wait = 1 << 2,
0169         default_traits  = 0
0170     };
0171     enum kind_type {
0172         isolated,
0173         bound
0174     };
0175 private:
0176     //! Space for platform-specific FPU settings.
0177     /** Must only be accessed inside TBB binaries, and never directly in user
0178     code or inline methods. */
0179     std::uint64_t my_cpu_ctl_env;
0180 
0181     //! Specifies whether cancellation was requested for this task group.
0182     std::atomic<std::uint32_t> my_cancellation_requested;
0183 
0184     //! Versioning for run-time checks and behavioral traits of the context.
0185     enum class task_group_context_version : std::uint8_t {
0186         unused = 1       // ensure that new versions, if any, will not clash with previously used ones
0187     };
0188     task_group_context_version my_version;
0189 
0190     //! The context traits.
0191     struct context_traits {
0192         bool fp_settings        : 1;
0193         bool concurrent_wait    : 1;
0194         bool bound              : 1;
0195         bool reserved1          : 1;
0196         bool reserved2          : 1;
0197         bool reserved3          : 1;
0198         bool reserved4          : 1;
0199         bool reserved5          : 1;
0200     } my_traits;
0201 
0202     static_assert(sizeof(context_traits) == 1, "Traits shall fit into one byte.");
0203 
0204     static constexpr std::uint8_t may_have_children = 1;
0205     //! The context internal state (currently only may_have_children).
0206     std::atomic<std::uint8_t> my_may_have_children;
0207 
0208     enum class state : std::uint8_t {
0209         created,
0210         locked,
0211         isolated,
0212         bound,
0213         dead,
0214         proxy = std::uint8_t(-1) //the context is not the real one, but proxy to other one
0215     };
0216 
0217     //! The synchronization machine state to manage lifetime.
0218     std::atomic<state> my_state;
0219 
0220     union {
0221         //! Pointer to the context of the parent cancellation group. nullptr for isolated contexts.
0222         task_group_context* my_parent;
0223 
0224         //! Pointer to the actual context 'this' context represents a proxy of.
0225         task_group_context* my_actual_context;
0226     };
0227 
0228     //! Thread data instance that registered this context in its list.
0229     r1::context_list* my_context_list;
0230     static_assert(sizeof(std::atomic<r1::thread_data*>) == sizeof(r1::context_list*), "To preserve backward compatibility these types should have the same size");
0231 
0232     //! Used to form the thread specific list of contexts without additional memory allocation.
0233     /** A context is included into the list of the current thread when its binding to
0234         its parent happens. Any context can be present in the list of one thread only. **/
0235     intrusive_list_node my_node;
0236     static_assert(sizeof(intrusive_list_node) == sizeof(context_list_node), "To preserve backward compatibility these types should have the same size");
0237 
0238     //! Pointer to the container storing exception being propagated across this task group.
0239     std::atomic<r1::tbb_exception_ptr*> my_exception;
0240     static_assert(sizeof(std::atomic<r1::tbb_exception_ptr*>) == sizeof(r1::tbb_exception_ptr*),
0241         "backward compatibility check");
0242 
0243     //! Used to set and maintain stack stitching point for Intel Performance Tools.
0244     void* my_itt_caller;
0245 
0246     //! Description of algorithm for scheduler based instrumentation.
0247     string_resource_index my_name;
0248 
0249     char padding[max_nfs_size
0250         - sizeof(std::uint64_t)                          // my_cpu_ctl_env
0251         - sizeof(std::atomic<std::uint32_t>)             // my_cancellation_requested
0252         - sizeof(std::uint8_t)                           // my_version
0253         - sizeof(context_traits)                         // my_traits
0254         - sizeof(std::atomic<std::uint8_t>)              // my_state
0255         - sizeof(std::atomic<state>)                     // my_state
0256         - sizeof(task_group_context*)                    // my_parent
0257         - sizeof(r1::context_list*)                      // my_context_list
0258         - sizeof(intrusive_list_node)                    // my_node
0259         - sizeof(std::atomic<r1::tbb_exception_ptr*>)    // my_exception
0260         - sizeof(void*)                                  // my_itt_caller
0261         - sizeof(string_resource_index)                  // my_name
0262     ];
0263 
0264     task_group_context(context_traits t, string_resource_index name)
0265         : my_version{task_group_context_version::unused}, my_name{name}
0266     {
0267         my_traits = t; // GCC4.8 issues warning list initialization for bitset (missing-field-initializers)
0268         r1::initialize(*this);
0269     }
0270 
0271     task_group_context(task_group_context* actual_context)
0272         : my_version{task_group_context_version::unused}
0273         , my_state{state::proxy}
0274         , my_actual_context{actual_context}
0275     {
0276         __TBB_ASSERT(my_actual_context, "Passed pointer value points to nothing.");
0277         my_name = actual_context->my_name;
0278 
0279         // no need to initialize 'this' context as it acts as a proxy for my_actual_context, which
0280         // initialization is a user-side responsibility.
0281     }
0282 
0283     static context_traits make_traits(kind_type relation_with_parent, std::uintptr_t user_traits) {
0284         context_traits ct;
0285         ct.fp_settings = (user_traits & fp_settings) == fp_settings;
0286         ct.concurrent_wait = (user_traits & concurrent_wait) == concurrent_wait;
0287         ct.bound = relation_with_parent == bound;
0288         ct.reserved1 = ct.reserved2 = ct.reserved3 = ct.reserved4 = ct.reserved5 = false;
0289         return ct;
0290     }
0291 
0292     bool is_proxy() const {
0293         return my_state.load(std::memory_order_relaxed) == state::proxy;
0294     }
0295 
0296     task_group_context& actual_context() noexcept {
0297         if (is_proxy()) {
0298             __TBB_ASSERT(my_actual_context, "Actual task_group_context is not set.");
0299             return *my_actual_context;
0300         }
0301         return *this;
0302     }
0303 
0304     const task_group_context& actual_context() const noexcept {
0305         if (is_proxy()) {
0306             __TBB_ASSERT(my_actual_context, "Actual task_group_context is not set.");
0307             return *my_actual_context;
0308         }
0309         return *this;
0310     }
0311 
0312 public:
0313     //! Default & binding constructor.
0314     /** By default a bound context is created. That is this context will be bound
0315         (as child) to the context of the currently executing task . Cancellation
0316         requests passed to the parent context are propagated to all the contexts
0317         bound to it. Similarly priority change is propagated from the parent context
0318         to its children.
0319 
0320         If task_group_context::isolated is used as the argument, then the tasks associated
0321         with this context will never be affected by events in any other context.
0322 
0323         Creating isolated contexts involve much less overhead, but they have limited
0324         utility. Normally when an exception occurs in an algorithm that has nested
0325         ones running, it is desirably to have all the nested algorithms cancelled
0326         as well. Such a behavior requires nested algorithms to use bound contexts.
0327 
0328         There is one good place where using isolated algorithms is beneficial. It is
0329         an external thread. That is if a particular algorithm is invoked directly from
0330         the external thread (not from a TBB task), supplying it with explicitly
0331         created isolated context will result in a faster algorithm startup.
0332 
0333         VERSIONING NOTE:
0334         Implementation(s) of task_group_context constructor(s) cannot be made
0335         entirely out-of-line because the run-time version must be set by the user
0336         code. This will become critically important for binary compatibility, if
0337         we ever have to change the size of the context object. **/
0338 
0339     task_group_context(kind_type relation_with_parent = bound,
0340                        std::uintptr_t t = default_traits)
0341         : task_group_context(make_traits(relation_with_parent, t), CUSTOM_CTX) {}
0342 
0343     // Custom constructor for instrumentation of oneTBB algorithm
0344     task_group_context(string_resource_index name )
0345         : task_group_context(make_traits(bound, default_traits), name) {}
0346 
0347     // Do not introduce any logic on user side since it might break state propagation assumptions
0348     ~task_group_context() {
0349         // When 'this' serves as a proxy, the initialization does not happen - nor should the
0350         // destruction.
0351         if (!is_proxy())
0352         {
0353             r1::destroy(*this);
0354         }
0355     }
0356 
0357     //! Forcefully reinitializes the context after the task tree it was associated with is completed.
0358     /** Because the method assumes that all the tasks that used to be associated with
0359         this context have already finished, calling it while the context is still
0360         in use somewhere in the task hierarchy leads to undefined behavior.
0361 
0362         IMPORTANT: This method is not thread safe!
0363 
0364         The method does not change the context's parent if it is set. **/
0365     void reset() {
0366         r1::reset(actual_context());
0367     }
0368 
0369     //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
0370     /** \return false if cancellation has already been requested, true otherwise.
0371 
0372         Note that canceling never fails. When false is returned, it just means that
0373         another thread (or this one) has already sent cancellation request to this
0374         context or to one of its ancestors (if this context is bound). It is guaranteed
0375         that when this method is concurrently called on the same not yet cancelled
0376         context, true will be returned by one and only one invocation. **/
0377     bool cancel_group_execution() {
0378         return r1::cancel_group_execution(actual_context());
0379     }
0380 
0381     //! Returns true if the context received cancellation request.
0382     bool is_group_execution_cancelled() {
0383         return r1::is_group_execution_cancelled(actual_context());
0384     }
0385 
0386 #if __TBB_FP_CONTEXT
0387     //! Captures the current FPU control settings to the context.
0388     /** Because the method assumes that all the tasks that used to be associated with
0389         this context have already finished, calling it while the context is still
0390         in use somewhere in the task hierarchy leads to undefined behavior.
0391 
0392         IMPORTANT: This method is not thread safe!
0393 
0394         The method does not change the FPU control settings of the context's parent. **/
0395     void capture_fp_settings() {
0396         r1::capture_fp_settings(actual_context());
0397     }
0398 #endif
0399 
0400     //! Returns the user visible context trait
0401     std::uintptr_t traits() const {
0402         std::uintptr_t t{};
0403         const task_group_context& ctx = actual_context();
0404         t |= ctx.my_traits.fp_settings ? fp_settings : 0;
0405         t |= ctx.my_traits.concurrent_wait ? concurrent_wait : 0;
0406         return t;
0407     }
0408 private:
0409     //// TODO: cleanup friends
0410     friend class r1::cancellation_disseminator;
0411     friend class r1::thread_data;
0412     friend class r1::task_dispatcher;
0413     template <bool>
0414     friend class r1::context_guard_helper;
0415     friend struct r1::task_arena_impl;
0416     friend struct r1::task_group_context_impl;
0417     friend class task_group_base;
0418 }; // class task_group_context
0419 
0420 static_assert(sizeof(task_group_context) == 128, "Wrong size of task_group_context");
0421 
0422 enum task_group_status {
0423     not_complete,
0424     complete,
0425     canceled
0426 };
0427 
0428 class task_group;
0429 class structured_task_group;
0430 #if TBB_PREVIEW_ISOLATED_TASK_GROUP
0431 class isolated_task_group;
0432 #endif
0433 
0434 template<typename F>
0435 class function_task : public task {
0436     const F m_func;
0437     wait_context& m_wait_ctx;
0438     small_object_allocator m_allocator;
0439 
0440     void finalize(const execution_data& ed) {
0441         // Make a local reference not to access this after destruction.
0442         wait_context& wo = m_wait_ctx;
0443         // Copy allocator to the stack
0444         auto allocator = m_allocator;
0445         // Destroy user functor before release wait.
0446         this->~function_task();
0447         wo.release();
0448 
0449         allocator.deallocate(this, ed);
0450     }
0451     task* execute(execution_data& ed) override {
0452         task* res = d2::task_ptr_or_nullptr(m_func);
0453         finalize(ed);
0454         return res;
0455     }
0456     task* cancel(execution_data& ed) override {
0457         finalize(ed);
0458         return nullptr;
0459     }
0460 public:
0461     function_task(const F& f, wait_context& wo, small_object_allocator& alloc)
0462         : m_func(f)
0463         , m_wait_ctx(wo)
0464         , m_allocator(alloc) {}
0465 
0466     function_task(F&& f, wait_context& wo, small_object_allocator& alloc)
0467         : m_func(std::move(f))
0468         , m_wait_ctx(wo)
0469         , m_allocator(alloc) {}
0470 };
0471 
0472 template <typename F>
0473 class function_stack_task : public task {
0474     const F& m_func;
0475     wait_context& m_wait_ctx;
0476 
0477     void finalize() {
0478         m_wait_ctx.release();
0479     }
0480     task* execute(execution_data&) override {
0481         task* res = d2::task_ptr_or_nullptr(m_func);
0482         finalize();
0483         return res;
0484     }
0485     task* cancel(execution_data&) override {
0486         finalize();
0487         return nullptr;
0488     }
0489 public:
0490     function_stack_task(const F& f, wait_context& wo) : m_func(f), m_wait_ctx(wo) {}
0491 };
0492 
0493 class task_group_base : no_copy {
0494 protected:
0495     wait_context m_wait_ctx;
0496     task_group_context m_context;
0497 
0498     template<typename F>
0499     task_group_status internal_run_and_wait(const F& f) {
0500         function_stack_task<F> t{ f, m_wait_ctx };
0501         m_wait_ctx.reserve();
0502         bool cancellation_status = false;
0503         try_call([&] {
0504             execute_and_wait(t, context(), m_wait_ctx, context());
0505         }).on_completion([&] {
0506             // TODO: the reset method is not thread-safe. Ensure the correct behavior.
0507             cancellation_status = context().is_group_execution_cancelled();
0508             context().reset();
0509         });
0510         return cancellation_status ? canceled : complete;
0511     }
0512 
0513     task_group_status internal_run_and_wait(d2::task_handle&& h) {
0514         __TBB_ASSERT(h != nullptr, "Attempt to schedule empty task_handle");
0515 
0516         using acs = d2::task_handle_accessor;
0517         __TBB_ASSERT(&acs::ctx_of(h) == &context(), "Attempt to schedule task_handle into different task_group");
0518 
0519         bool cancellation_status = false;
0520         try_call([&] {
0521             execute_and_wait(*acs::release(h), context(), m_wait_ctx, context());
0522         }).on_completion([&] {
0523             // TODO: the reset method is not thread-safe. Ensure the correct behavior.
0524             cancellation_status = context().is_group_execution_cancelled();
0525             context().reset();
0526         });
0527         return cancellation_status ? canceled : complete;
0528     }
0529 
0530     template<typename F>
0531     task* prepare_task(F&& f) {
0532         m_wait_ctx.reserve();
0533         small_object_allocator alloc{};
0534         return alloc.new_object<function_task<typename std::decay<F>::type>>(std::forward<F>(f), m_wait_ctx, alloc);
0535     }
0536 
0537     task_group_context& context() noexcept {
0538         return m_context.actual_context();
0539     }
0540 
0541     template<typename F>
0542     d2::task_handle prepare_task_handle(F&& f) {
0543         m_wait_ctx.reserve();
0544         small_object_allocator alloc{};
0545         using function_task_t =  d2::function_task<typename std::decay<F>::type>;
0546         d2::task_handle_task* function_task_p =  alloc.new_object<function_task_t>(std::forward<F>(f), m_wait_ctx, context(), alloc);
0547 
0548         return d2::task_handle_accessor::construct(function_task_p);
0549     }
0550 
0551 public:
0552     task_group_base(uintptr_t traits = 0)
0553         : m_wait_ctx(0)
0554         , m_context(task_group_context::bound, task_group_context::default_traits | traits)
0555     {}
0556 
0557     task_group_base(task_group_context& ctx)
0558         : m_wait_ctx(0)
0559         , m_context(&ctx)
0560     {}
0561 
0562     ~task_group_base() noexcept(false) {
0563         if (m_wait_ctx.continue_execution()) {
0564 #if __TBB_CPP17_UNCAUGHT_EXCEPTIONS_PRESENT
0565             bool stack_unwinding_in_progress = std::uncaught_exceptions() > 0;
0566 #else
0567             bool stack_unwinding_in_progress = std::uncaught_exception();
0568 #endif
0569             // Always attempt to do proper cleanup to avoid inevitable memory corruption
0570             // in case of missing wait (for the sake of better testability & debuggability)
0571             if (!context().is_group_execution_cancelled())
0572                 cancel();
0573             d1::wait(m_wait_ctx, context());
0574             if (!stack_unwinding_in_progress)
0575                 throw_exception(exception_id::missing_wait);
0576         }
0577     }
0578 
0579     task_group_status wait() {
0580         bool cancellation_status = false;
0581         try_call([&] {
0582             d1::wait(m_wait_ctx, context());
0583         }).on_completion([&] {
0584             // TODO: the reset method is not thread-safe. Ensure the correct behavior.
0585             cancellation_status = m_context.is_group_execution_cancelled();
0586             context().reset();
0587         });
0588         return cancellation_status ? canceled : complete;
0589     }
0590 
0591     void cancel() {
0592         context().cancel_group_execution();
0593     }
0594 }; // class task_group_base
0595 
0596 class task_group : public task_group_base {
0597 public:
0598     task_group() : task_group_base(task_group_context::concurrent_wait) {}
0599     task_group(task_group_context& ctx) : task_group_base(ctx) {}
0600 
0601     template<typename F>
0602     void run(F&& f) {
0603         spawn(*prepare_task(std::forward<F>(f)), context());
0604     }
0605 
0606     void run(d2::task_handle&& h) {
0607         __TBB_ASSERT(h != nullptr, "Attempt to schedule empty task_handle");
0608 
0609         using acs = d2::task_handle_accessor;
0610         __TBB_ASSERT(&acs::ctx_of(h) == &context(), "Attempt to schedule task_handle into different task_group");
0611 
0612         spawn(*acs::release(h), context());
0613     }
0614 
0615     template<typename F>
0616     d2::task_handle defer(F&& f) {
0617         return prepare_task_handle(std::forward<F>(f));
0618 
0619     }
0620 
0621     template<typename F>
0622     task_group_status run_and_wait(const F& f) {
0623         return internal_run_and_wait(f);
0624     }
0625 
0626     task_group_status run_and_wait(d2::task_handle&& h) {
0627         return internal_run_and_wait(std::move(h));
0628     }
0629 }; // class task_group
0630 
0631 #if TBB_PREVIEW_ISOLATED_TASK_GROUP
0632 class spawn_delegate : public delegate_base {
0633     task* task_to_spawn;
0634     task_group_context& context;
0635     bool operator()() const override {
0636         spawn(*task_to_spawn, context);
0637         return true;
0638     }
0639 public:
0640     spawn_delegate(task* a_task, task_group_context& ctx)
0641         : task_to_spawn(a_task), context(ctx)
0642     {}
0643 };
0644 
0645 class wait_delegate : public delegate_base {
0646     bool operator()() const override {
0647         status = tg.wait();
0648         return true;
0649     }
0650 protected:
0651     task_group& tg;
0652     task_group_status& status;
0653 public:
0654     wait_delegate(task_group& a_group, task_group_status& tgs)
0655         : tg(a_group), status(tgs) {}
0656 };
0657 
0658 template<typename F>
0659 class run_wait_delegate : public wait_delegate {
0660     F& func;
0661     bool operator()() const override {
0662         status = tg.run_and_wait(func);
0663         return true;
0664     }
0665 public:
0666     run_wait_delegate(task_group& a_group, F& a_func, task_group_status& tgs)
0667         : wait_delegate(a_group, tgs), func(a_func) {}
0668 };
0669 
0670 class isolated_task_group : public task_group {
0671     intptr_t this_isolation() {
0672         return reinterpret_cast<intptr_t>(this);
0673     }
0674 public:
0675     isolated_task_group() : task_group() {}
0676 
0677     isolated_task_group(task_group_context& ctx) : task_group(ctx) {}
0678 
0679     template<typename F>
0680     void run(F&& f) {
0681         spawn_delegate sd(prepare_task(std::forward<F>(f)), context());
0682         r1::isolate_within_arena(sd, this_isolation());
0683     }
0684 
0685     void run(d2::task_handle&& h) {
0686         __TBB_ASSERT(h != nullptr, "Attempt to schedule empty task_handle");
0687 
0688         using acs = d2::task_handle_accessor;
0689         __TBB_ASSERT(&acs::ctx_of(h) == &context(), "Attempt to schedule task_handle into different task_group");
0690 
0691         spawn_delegate sd(acs::release(h), context());
0692         r1::isolate_within_arena(sd, this_isolation());
0693     }
0694 
0695     template<typename F>
0696     task_group_status run_and_wait( const F& f ) {
0697         task_group_status result = not_complete;
0698         run_wait_delegate<const F> rwd(*this, f, result);
0699         r1::isolate_within_arena(rwd, this_isolation());
0700         __TBB_ASSERT(result != not_complete, "premature exit from wait?");
0701         return result;
0702     }
0703 
0704     task_group_status wait() {
0705         task_group_status result = not_complete;
0706         wait_delegate wd(*this, result);
0707         r1::isolate_within_arena(wd, this_isolation());
0708         __TBB_ASSERT(result != not_complete, "premature exit from wait?");
0709         return result;
0710     }
0711 }; // class isolated_task_group
0712 #endif // TBB_PREVIEW_ISOLATED_TASK_GROUP
0713 
0714 inline bool is_current_task_group_canceling() {
0715     task_group_context* ctx = current_context();
0716     return ctx ? ctx->is_group_execution_cancelled() : false;
0717 }
0718 
0719 } // namespace d1
0720 } // namespace detail
0721 
0722 inline namespace v1 {
0723 using detail::d1::task_group_context;
0724 using detail::d1::task_group;
0725 #if TBB_PREVIEW_ISOLATED_TASK_GROUP
0726 using detail::d1::isolated_task_group;
0727 #endif
0728 
0729 using detail::d1::task_group_status;
0730 using detail::d1::not_complete;
0731 using detail::d1::complete;
0732 using detail::d1::canceled;
0733 
0734 using detail::d1::is_current_task_group_canceling;
0735 using detail::r1::missing_wait;
0736 
0737 using detail::d2::task_handle;
0738 }
0739 
0740 } // namespace tbb
0741 
0742 #if _MSC_VER && !defined(__INTEL_COMPILER)
0743     #pragma warning(pop) // 4324 warning
0744 #endif
0745 
0746 #endif // __TBB_task_group_H