Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-12-18 10:24:27

0001 /*
0002     Copyright (c) 2005-2024 Intel Corporation
0003 
0004     Licensed under the Apache License, Version 2.0 (the "License");
0005     you may not use this file except in compliance with the License.
0006     You may obtain a copy of the License at
0007 
0008         http://www.apache.org/licenses/LICENSE-2.0
0009 
0010     Unless required by applicable law or agreed to in writing, software
0011     distributed under the License is distributed on an "AS IS" BASIS,
0012     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0013     See the License for the specific language governing permissions and
0014     limitations under the License.
0015 */
0016 
0017 #ifndef __TBB_task_group_H
0018 #define __TBB_task_group_H
0019 
0020 #include "detail/_config.h"
0021 #include "detail/_namespace_injection.h"
0022 #include "detail/_assert.h"
0023 #include "detail/_utils.h"
0024 #include "detail/_template_helpers.h"
0025 #include "detail/_exception.h"
0026 #include "detail/_task.h"
0027 #include "detail/_small_object_pool.h"
0028 #include "detail/_intrusive_list_node.h"
0029 #include "detail/_task_handle.h"
0030 
0031 #include "profiling.h"
0032 
0033 #include <type_traits>
0034 
0035 #if _MSC_VER && !defined(__INTEL_COMPILER)
0036     // Suppress warning: structure was padded due to alignment specifier
0037     #pragma warning(push)
0038     #pragma warning(disable:4324)
0039 #endif
0040 
0041 namespace tbb {
0042 namespace detail {
0043 
0044 namespace d1 {
0045 class delegate_base;
0046 class task_arena_base;
0047 class task_group_context;
0048 }
0049 
0050 namespace r1 {
0051 // Forward declarations
0052 class tbb_exception_ptr;
0053 class cancellation_disseminator;
0054 class thread_data;
0055 class task_dispatcher;
0056 template <bool>
0057 class context_guard_helper;
0058 struct task_arena_impl;
0059 class context_list;
0060 
0061 TBB_EXPORT void __TBB_EXPORTED_FUNC execute(d1::task_arena_base&, d1::delegate_base&);
0062 TBB_EXPORT void __TBB_EXPORTED_FUNC isolate_within_arena(d1::delegate_base&, std::intptr_t);
0063 
0064 TBB_EXPORT void __TBB_EXPORTED_FUNC initialize(d1::task_group_context&);
0065 TBB_EXPORT void __TBB_EXPORTED_FUNC destroy(d1::task_group_context&);
0066 TBB_EXPORT void __TBB_EXPORTED_FUNC reset(d1::task_group_context&);
0067 TBB_EXPORT bool __TBB_EXPORTED_FUNC cancel_group_execution(d1::task_group_context&);
0068 TBB_EXPORT bool __TBB_EXPORTED_FUNC is_group_execution_cancelled(d1::task_group_context&);
0069 TBB_EXPORT void __TBB_EXPORTED_FUNC capture_fp_settings(d1::task_group_context&);
0070 
0071 struct task_group_context_impl;
0072 }
0073 
0074 namespace d2 {
0075 
0076 namespace {
0077 template<typename F>
0078 d1::task* task_ptr_or_nullptr(F&& f);
0079 }
0080 
0081 template<typename F>
0082 class function_task : public task_handle_task  {
0083     //TODO: apply empty base optimization here
0084     const F m_func;
0085 
0086 private:
0087     d1::task* execute(d1::execution_data& ed) override {
0088         __TBB_ASSERT(ed.context == &this->ctx(), "The task group context should be used for all tasks");
0089         task* res = task_ptr_or_nullptr(m_func);
0090         finalize(&ed);
0091         return res;
0092     }
0093     d1::task* cancel(d1::execution_data& ed) override {
0094         finalize(&ed);
0095         return nullptr;
0096     }
0097 public:
0098     template<typename FF>
0099     function_task(FF&& f, d1::wait_tree_vertex_interface* vertex, d1::task_group_context& ctx, d1::small_object_allocator& alloc)
0100         : task_handle_task{vertex, ctx, alloc},
0101           m_func(std::forward<FF>(f)) {}
0102 };
0103 
0104 #if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
0105 namespace {
0106     template<typename F>
0107     d1::task* task_ptr_or_nullptr_impl(std::false_type, F&& f){
0108         task_handle th = std::forward<F>(f)();
0109         return task_handle_accessor::release(th);
0110     }
0111 
0112     template<typename F>
0113     d1::task* task_ptr_or_nullptr_impl(std::true_type, F&& f){
0114         std::forward<F>(f)();
0115         return nullptr;
0116     }
0117 
0118     template<typename F>
0119     d1::task* task_ptr_or_nullptr(F&& f){
0120         using is_void_t = std::is_void<
0121             decltype(std::forward<F>(f)())
0122             >;
0123 
0124         return  task_ptr_or_nullptr_impl(is_void_t{}, std::forward<F>(f));
0125     }
0126 }
0127 #else
0128 namespace {
0129     template<typename F>
0130     d1::task* task_ptr_or_nullptr(F&& f){
0131         std::forward<F>(f)();
0132         return nullptr;
0133     }
0134 }  // namespace
0135 #endif // __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
0136 } // namespace d2
0137 
0138 namespace d1 {
0139 
0140 // This structure is left here for backward compatibility check
0141 struct context_list_node {
0142     std::atomic<context_list_node*> prev{};
0143     std::atomic<context_list_node*> next{};
0144 };
0145 
0146 //! Used to form groups of tasks
0147 /** @ingroup task_scheduling
0148     The context services explicit cancellation requests from user code, and unhandled
0149     exceptions intercepted during tasks execution. Intercepting an exception results
0150     in generating internal cancellation requests (which is processed in exactly the
0151     same way as external ones).
0152 
0153     The context is associated with one or more root tasks and defines the cancellation
0154     group that includes all the descendants of the corresponding root task(s). Association
0155     is established when a context object is passed as an argument to the task::allocate_root()
0156     method. See task_group_context::task_group_context for more details.
0157 
0158     The context can be bound to another one, and other contexts can be bound to it,
0159     forming a tree-like structure: parent -> this -> children. Arrows here designate
0160     cancellation propagation direction. If a task in a cancellation group is cancelled
0161     all the other tasks in this group and groups bound to it (as children) get cancelled too.
0162 **/
0163 class task_group_context : no_copy {
0164 public:
0165     enum traits_type {
0166         fp_settings     = 1 << 1,
0167         concurrent_wait = 1 << 2,
0168         default_traits  = 0
0169     };
0170     enum kind_type {
0171         isolated,
0172         bound
0173     };
0174 private:
0175     //! Space for platform-specific FPU settings.
0176     /** Must only be accessed inside TBB binaries, and never directly in user
0177     code or inline methods. */
0178     std::uint64_t my_cpu_ctl_env;
0179 
0180     //! Specifies whether cancellation was requested for this task group.
0181     std::atomic<std::uint32_t> my_cancellation_requested;
0182 
0183     //! Versioning for run-time checks and behavioral traits of the context.
0184     enum class task_group_context_version : std::uint8_t {
0185         unused = 1       // ensure that new versions, if any, will not clash with previously used ones
0186     };
0187     task_group_context_version my_version;
0188 
0189     //! The context traits.
0190     struct context_traits {
0191         bool fp_settings        : 1;
0192         bool concurrent_wait    : 1;
0193         bool bound              : 1;
0194         bool reserved1          : 1;
0195         bool reserved2          : 1;
0196         bool reserved3          : 1;
0197         bool reserved4          : 1;
0198         bool reserved5          : 1;
0199     } my_traits;
0200 
0201     static_assert(sizeof(context_traits) == 1, "Traits shall fit into one byte.");
0202 
0203     static constexpr std::uint8_t may_have_children = 1;
0204     //! The context internal state (currently only may_have_children).
0205     std::atomic<std::uint8_t> my_may_have_children;
0206 
0207     enum class state : std::uint8_t {
0208         created,
0209         locked,
0210         isolated,
0211         bound,
0212         dead,
0213         proxy = std::uint8_t(-1) //the context is not the real one, but proxy to other one
0214     };
0215 
0216     //! The synchronization machine state to manage lifetime.
0217     std::atomic<state> my_state;
0218 
0219     union {
0220         //! Pointer to the context of the parent cancellation group. nullptr for isolated contexts.
0221         task_group_context* my_parent;
0222 
0223         //! Pointer to the actual context 'this' context represents a proxy of.
0224         task_group_context* my_actual_context;
0225     };
0226 
0227     //! Thread data instance that registered this context in its list.
0228     r1::context_list* my_context_list;
0229     static_assert(sizeof(std::atomic<r1::thread_data*>) == sizeof(r1::context_list*), "To preserve backward compatibility these types should have the same size");
0230 
0231     //! Used to form the thread specific list of contexts without additional memory allocation.
0232     /** A context is included into the list of the current thread when its binding to
0233         its parent happens. Any context can be present in the list of one thread only. **/
0234     intrusive_list_node my_node;
0235     static_assert(sizeof(intrusive_list_node) == sizeof(context_list_node), "To preserve backward compatibility these types should have the same size");
0236 
0237     //! Pointer to the container storing exception being propagated across this task group.
0238     std::atomic<r1::tbb_exception_ptr*> my_exception;
0239     static_assert(sizeof(std::atomic<r1::tbb_exception_ptr*>) == sizeof(r1::tbb_exception_ptr*),
0240         "backward compatibility check");
0241 
0242     //! Used to set and maintain stack stitching point for Intel Performance Tools.
0243     void* my_itt_caller;
0244 
0245     //! Description of algorithm for scheduler based instrumentation.
0246     string_resource_index my_name;
0247 
0248     char padding[max_nfs_size
0249         - sizeof(std::uint64_t)                          // my_cpu_ctl_env
0250         - sizeof(std::atomic<std::uint32_t>)             // my_cancellation_requested
0251         - sizeof(std::uint8_t)                           // my_version
0252         - sizeof(context_traits)                         // my_traits
0253         - sizeof(std::atomic<std::uint8_t>)              // my_state
0254         - sizeof(std::atomic<state>)                     // my_state
0255         - sizeof(task_group_context*)                    // my_parent
0256         - sizeof(r1::context_list*)                      // my_context_list
0257         - sizeof(intrusive_list_node)                    // my_node
0258         - sizeof(std::atomic<r1::tbb_exception_ptr*>)    // my_exception
0259         - sizeof(void*)                                  // my_itt_caller
0260         - sizeof(string_resource_index)                  // my_name
0261     ];
0262 
0263     task_group_context(context_traits t, string_resource_index name)
0264         : my_version{task_group_context_version::unused}, my_name{name}
0265     {
0266         my_traits = t; // GCC4.8 issues warning list initialization for bitset (missing-field-initializers)
0267         r1::initialize(*this);
0268     }
0269 
0270     task_group_context(task_group_context* actual_context)
0271         : my_version{task_group_context_version::unused}
0272         , my_state{state::proxy}
0273         , my_actual_context{actual_context}
0274     {
0275         __TBB_ASSERT(my_actual_context, "Passed pointer value points to nothing.");
0276         my_name = actual_context->my_name;
0277 
0278         // no need to initialize 'this' context as it acts as a proxy for my_actual_context, which
0279         // initialization is a user-side responsibility.
0280     }
0281 
0282     static context_traits make_traits(kind_type relation_with_parent, std::uintptr_t user_traits) {
0283         context_traits ct;
0284         ct.fp_settings = (user_traits & fp_settings) == fp_settings;
0285         ct.concurrent_wait = (user_traits & concurrent_wait) == concurrent_wait;
0286         ct.bound = relation_with_parent == bound;
0287         ct.reserved1 = ct.reserved2 = ct.reserved3 = ct.reserved4 = ct.reserved5 = false;
0288         return ct;
0289     }
0290 
0291     bool is_proxy() const {
0292         return my_state.load(std::memory_order_relaxed) == state::proxy;
0293     }
0294 
0295     task_group_context& actual_context() noexcept {
0296         if (is_proxy()) {
0297             __TBB_ASSERT(my_actual_context, "Actual task_group_context is not set.");
0298             return *my_actual_context;
0299         }
0300         return *this;
0301     }
0302 
0303     const task_group_context& actual_context() const noexcept {
0304         if (is_proxy()) {
0305             __TBB_ASSERT(my_actual_context, "Actual task_group_context is not set.");
0306             return *my_actual_context;
0307         }
0308         return *this;
0309     }
0310 
0311 public:
0312     //! Default & binding constructor.
0313     /** By default a bound context is created. That is this context will be bound
0314         (as child) to the context of the currently executing task . Cancellation
0315         requests passed to the parent context are propagated to all the contexts
0316         bound to it. Similarly priority change is propagated from the parent context
0317         to its children.
0318 
0319         If task_group_context::isolated is used as the argument, then the tasks associated
0320         with this context will never be affected by events in any other context.
0321 
0322         Creating isolated contexts involve much less overhead, but they have limited
0323         utility. Normally when an exception occurs in an algorithm that has nested
0324         ones running, it is desirably to have all the nested algorithms cancelled
0325         as well. Such a behavior requires nested algorithms to use bound contexts.
0326 
0327         There is one good place where using isolated algorithms is beneficial. It is
0328         an external thread. That is if a particular algorithm is invoked directly from
0329         the external thread (not from a TBB task), supplying it with explicitly
0330         created isolated context will result in a faster algorithm startup.
0331 
0332         VERSIONING NOTE:
0333         Implementation(s) of task_group_context constructor(s) cannot be made
0334         entirely out-of-line because the run-time version must be set by the user
0335         code. This will become critically important for binary compatibility, if
0336         we ever have to change the size of the context object. **/
0337 
0338     task_group_context(kind_type relation_with_parent = bound,
0339                        std::uintptr_t t = default_traits)
0340         : task_group_context(make_traits(relation_with_parent, t), CUSTOM_CTX) {}
0341 
0342     // Custom constructor for instrumentation of oneTBB algorithm
0343     task_group_context(string_resource_index name )
0344         : task_group_context(make_traits(bound, default_traits), name) {}
0345 
0346     // Do not introduce any logic on user side since it might break state propagation assumptions
0347     ~task_group_context() {
0348         // When 'this' serves as a proxy, the initialization does not happen - nor should the
0349         // destruction.
0350         if (!is_proxy())
0351         {
0352             r1::destroy(*this);
0353         }
0354     }
0355 
0356     //! Forcefully reinitializes the context after the task tree it was associated with is completed.
0357     /** Because the method assumes that all the tasks that used to be associated with
0358         this context have already finished, calling it while the context is still
0359         in use somewhere in the task hierarchy leads to undefined behavior.
0360 
0361         IMPORTANT: This method is not thread safe!
0362 
0363         The method does not change the context's parent if it is set. **/
0364     void reset() {
0365         r1::reset(actual_context());
0366     }
0367 
0368     //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
0369     /** \return false if cancellation has already been requested, true otherwise.
0370 
0371         Note that canceling never fails. When false is returned, it just means that
0372         another thread (or this one) has already sent cancellation request to this
0373         context or to one of its ancestors (if this context is bound). It is guaranteed
0374         that when this method is concurrently called on the same not yet cancelled
0375         context, true will be returned by one and only one invocation. **/
0376     bool cancel_group_execution() {
0377         return r1::cancel_group_execution(actual_context());
0378     }
0379 
0380     //! Returns true if the context received cancellation request.
0381     bool is_group_execution_cancelled() {
0382         return r1::is_group_execution_cancelled(actual_context());
0383     }
0384 
0385 #if __TBB_FP_CONTEXT
0386     //! Captures the current FPU control settings to the context.
0387     /** Because the method assumes that all the tasks that used to be associated with
0388         this context have already finished, calling it while the context is still
0389         in use somewhere in the task hierarchy leads to undefined behavior.
0390 
0391         IMPORTANT: This method is not thread safe!
0392 
0393         The method does not change the FPU control settings of the context's parent. **/
0394     void capture_fp_settings() {
0395         r1::capture_fp_settings(actual_context());
0396     }
0397 #endif
0398 
0399     //! Returns the user visible context trait
0400     std::uintptr_t traits() const {
0401         std::uintptr_t t{};
0402         const task_group_context& ctx = actual_context();
0403         t |= ctx.my_traits.fp_settings ? fp_settings : 0;
0404         t |= ctx.my_traits.concurrent_wait ? concurrent_wait : 0;
0405         return t;
0406     }
0407 private:
0408     //// TODO: cleanup friends
0409     friend class r1::cancellation_disseminator;
0410     friend class r1::thread_data;
0411     friend class r1::task_dispatcher;
0412     template <bool>
0413     friend class r1::context_guard_helper;
0414     friend struct r1::task_arena_impl;
0415     friend struct r1::task_group_context_impl;
0416     friend class d2::task_group_base;
0417 }; // class task_group_context
0418 
0419 static_assert(sizeof(task_group_context) == 128, "Wrong size of task_group_context");
0420 
0421 inline bool is_current_task_group_canceling() {
0422     task_group_context* ctx = current_context();
0423     return ctx ? ctx->is_group_execution_cancelled() : false;
0424 }
0425 
0426 } // namespace d1
0427 
0428 namespace d2 {
0429 
0430 enum task_group_status {
0431     not_complete,
0432     complete,
0433     canceled
0434 };
0435 
0436 class task_group;
0437 class structured_task_group;
0438 #if TBB_PREVIEW_ISOLATED_TASK_GROUP
0439 class isolated_task_group;
0440 #endif
0441 
0442 template <typename F>
0443 class function_stack_task : public d1::task {
0444     const F& m_func;
0445     d1::wait_tree_vertex_interface* m_wait_tree_vertex;
0446 
0447     void finalize() {
0448         m_wait_tree_vertex->release();
0449     }
0450     task* execute(d1::execution_data&) override {
0451         task* res = d2::task_ptr_or_nullptr(m_func);
0452         finalize();
0453         return res;
0454     }
0455     task* cancel(d1::execution_data&) override {
0456         finalize();
0457         return nullptr;
0458     }
0459 public:
0460     function_stack_task(const F& f, d1::wait_tree_vertex_interface* vertex) : m_func(f), m_wait_tree_vertex(vertex) {
0461         m_wait_tree_vertex->reserve();
0462     }
0463 };
0464 
0465 class task_group_base : no_copy {
0466 protected:
0467     d1::wait_context_vertex m_wait_vertex;
0468     d1::task_group_context m_context;
0469 
0470     template<typename F>
0471     task_group_status internal_run_and_wait(const F& f) {
0472         function_stack_task<F> t{ f, r1::get_thread_reference_vertex(&m_wait_vertex) };
0473 
0474         bool cancellation_status = false;
0475         try_call([&] {
0476             execute_and_wait(t, context(), m_wait_vertex.get_context(), context());
0477         }).on_completion([&] {
0478             // TODO: the reset method is not thread-safe. Ensure the correct behavior.
0479             cancellation_status = context().is_group_execution_cancelled();
0480             context().reset();
0481         });
0482         return cancellation_status ? canceled : complete;
0483     }
0484 
0485     task_group_status internal_run_and_wait(d2::task_handle&& h) {
0486         __TBB_ASSERT(h != nullptr, "Attempt to schedule empty task_handle");
0487 
0488         using acs = d2::task_handle_accessor;
0489         __TBB_ASSERT(&acs::ctx_of(h) == &context(), "Attempt to schedule task_handle into different task_group");
0490 
0491         bool cancellation_status = false;
0492         try_call([&] {
0493             execute_and_wait(*acs::release(h), context(), m_wait_vertex.get_context(), context());
0494         }).on_completion([&] {
0495             // TODO: the reset method is not thread-safe. Ensure the correct behavior.
0496             cancellation_status = context().is_group_execution_cancelled();
0497             context().reset();
0498         });
0499         return cancellation_status ? canceled : complete;
0500     }
0501 
0502     template<typename F>
0503     d1::task* prepare_task(F&& f) {
0504         d1::small_object_allocator alloc{};
0505         return alloc.new_object<function_task<typename std::decay<F>::type>>(std::forward<F>(f),
0506             r1::get_thread_reference_vertex(&m_wait_vertex), context(), alloc);
0507     }
0508 
0509     d1::task_group_context& context() noexcept {
0510         return m_context.actual_context();
0511     }
0512 
0513     template<typename F>
0514     d2::task_handle prepare_task_handle(F&& f) {
0515         d1::small_object_allocator alloc{};
0516         using function_task_t =  d2::function_task<typename std::decay<F>::type>;
0517         d2::task_handle_task* function_task_p =  alloc.new_object<function_task_t>(std::forward<F>(f),
0518             r1::get_thread_reference_vertex(&m_wait_vertex), context(), alloc);
0519 
0520         return d2::task_handle_accessor::construct(function_task_p);
0521     }
0522 
0523 public:
0524     task_group_base(uintptr_t traits = 0)
0525         : m_wait_vertex(0)
0526         , m_context(d1::task_group_context::bound, d1::task_group_context::default_traits | traits)
0527     {}
0528 
0529     task_group_base(d1::task_group_context& ctx)
0530         : m_wait_vertex(0)
0531         , m_context(&ctx)
0532     {}
0533 
0534     ~task_group_base() noexcept(false) {
0535         if (m_wait_vertex.continue_execution()) {
0536 #if __TBB_CPP17_UNCAUGHT_EXCEPTIONS_PRESENT
0537             bool stack_unwinding_in_progress = std::uncaught_exceptions() > 0;
0538 #else
0539             bool stack_unwinding_in_progress = std::uncaught_exception();
0540 #endif
0541             // Always attempt to do proper cleanup to avoid inevitable memory corruption
0542             // in case of missing wait (for the sake of better testability & debuggability)
0543             if (!context().is_group_execution_cancelled())
0544                 cancel();
0545             d1::wait(m_wait_vertex.get_context(), context());
0546             if (!stack_unwinding_in_progress)
0547                 throw_exception(exception_id::missing_wait);
0548         }
0549     }
0550 
0551     task_group_status wait() {
0552         bool cancellation_status = false;
0553         try_call([&] {
0554             d1::wait(m_wait_vertex.get_context(), context());
0555         }).on_completion([&] {
0556             // TODO: the reset method is not thread-safe. Ensure the correct behavior.
0557             cancellation_status = m_context.is_group_execution_cancelled();
0558             context().reset();
0559         });
0560         return cancellation_status ? canceled : complete;
0561     }
0562 
0563     void cancel() {
0564         context().cancel_group_execution();
0565     }
0566 }; // class task_group_base
0567 
0568 class task_group : public task_group_base {
0569 public:
0570     task_group() : task_group_base(d1::task_group_context::concurrent_wait) {}
0571     task_group(d1::task_group_context& ctx) : task_group_base(ctx) {}
0572 
0573     template<typename F>
0574     void run(F&& f) {
0575         d1::spawn(*prepare_task(std::forward<F>(f)), context());
0576     }
0577 
0578     void run(d2::task_handle&& h) {
0579         __TBB_ASSERT(h != nullptr, "Attempt to schedule empty task_handle");
0580 
0581         using acs = d2::task_handle_accessor;
0582         __TBB_ASSERT(&acs::ctx_of(h) == &context(), "Attempt to schedule task_handle into different task_group");
0583 
0584         d1::spawn(*acs::release(h), context());
0585     }
0586 
0587     template<typename F>
0588     d2::task_handle defer(F&& f) {
0589         return prepare_task_handle(std::forward<F>(f));
0590 
0591     }
0592 
0593     template<typename F>
0594     task_group_status run_and_wait(const F& f) {
0595         return internal_run_and_wait(f);
0596     }
0597 
0598     task_group_status run_and_wait(d2::task_handle&& h) {
0599         return internal_run_and_wait(std::move(h));
0600     }
0601 }; // class task_group
0602 
0603 #if TBB_PREVIEW_ISOLATED_TASK_GROUP
0604 class spawn_delegate : public d1::delegate_base {
0605     d1::task* task_to_spawn;
0606     d1::task_group_context& context;
0607     bool operator()() const override {
0608         spawn(*task_to_spawn, context);
0609         return true;
0610     }
0611 public:
0612     spawn_delegate(d1::task* a_task, d1::task_group_context& ctx)
0613         : task_to_spawn(a_task), context(ctx)
0614     {}
0615 };
0616 
0617 class wait_delegate : public d1::delegate_base {
0618     bool operator()() const override {
0619         status = tg.wait();
0620         return true;
0621     }
0622 protected:
0623     task_group& tg;
0624     task_group_status& status;
0625 public:
0626     wait_delegate(task_group& a_group, task_group_status& tgs)
0627         : tg(a_group), status(tgs) {}
0628 };
0629 
0630 template<typename F>
0631 class run_wait_delegate : public wait_delegate {
0632     F& func;
0633     bool operator()() const override {
0634         status = tg.run_and_wait(func);
0635         return true;
0636     }
0637 public:
0638     run_wait_delegate(task_group& a_group, F& a_func, task_group_status& tgs)
0639         : wait_delegate(a_group, tgs), func(a_func) {}
0640 };
0641 
0642 class isolated_task_group : public task_group {
0643     intptr_t this_isolation() {
0644         return reinterpret_cast<intptr_t>(this);
0645     }
0646 public:
0647     isolated_task_group() : task_group() {}
0648 
0649     isolated_task_group(d1::task_group_context& ctx) : task_group(ctx) {}
0650 
0651     template<typename F>
0652     void run(F&& f) {
0653         spawn_delegate sd(prepare_task(std::forward<F>(f)), context());
0654         r1::isolate_within_arena(sd, this_isolation());
0655     }
0656 
0657     void run(d2::task_handle&& h) {
0658         __TBB_ASSERT(h != nullptr, "Attempt to schedule empty task_handle");
0659 
0660         using acs = d2::task_handle_accessor;
0661         __TBB_ASSERT(&acs::ctx_of(h) == &context(), "Attempt to schedule task_handle into different task_group");
0662 
0663         spawn_delegate sd(acs::release(h), context());
0664         r1::isolate_within_arena(sd, this_isolation());
0665     }
0666 
0667     template<typename F>
0668     task_group_status run_and_wait( const F& f ) {
0669         task_group_status result = not_complete;
0670         run_wait_delegate<const F> rwd(*this, f, result);
0671         r1::isolate_within_arena(rwd, this_isolation());
0672         __TBB_ASSERT(result != not_complete, "premature exit from wait?");
0673         return result;
0674     }
0675 
0676     task_group_status wait() {
0677         task_group_status result = not_complete;
0678         wait_delegate wd(*this, result);
0679         r1::isolate_within_arena(wd, this_isolation());
0680         __TBB_ASSERT(result != not_complete, "premature exit from wait?");
0681         return result;
0682     }
0683 }; // class isolated_task_group
0684 #endif // TBB_PREVIEW_ISOLATED_TASK_GROUP
0685 } // namespace d2
0686 } // namespace detail
0687 
0688 inline namespace v1 {
0689 using detail::d1::task_group_context;
0690 using detail::d2::task_group;
0691 #if TBB_PREVIEW_ISOLATED_TASK_GROUP
0692 using detail::d2::isolated_task_group;
0693 #endif
0694 
0695 using detail::d2::task_group_status;
0696 using detail::d2::not_complete;
0697 using detail::d2::complete;
0698 using detail::d2::canceled;
0699 
0700 using detail::d1::is_current_task_group_canceling;
0701 using detail::r1::missing_wait;
0702 
0703 using detail::d2::task_handle;
0704 }
0705 
0706 } // namespace tbb
0707 
0708 #if _MSC_VER && !defined(__INTEL_COMPILER)
0709     #pragma warning(pop) // 4324 warning
0710 #endif
0711 
0712 #endif // __TBB_task_group_H