Warning, file /include/oneapi/tbb/task_group.h was not indexed
or was modified since last indexation (in which case cross-reference links may be missing, inaccurate or erroneous).
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #ifndef __TBB_task_group_H
0018 #define __TBB_task_group_H
0019
0020 #include "detail/_config.h"
0021 #include "detail/_namespace_injection.h"
0022 #include "detail/_assert.h"
0023 #include "detail/_utils.h"
0024 #include "detail/_template_helpers.h"
0025 #include "detail/_exception.h"
0026 #include "detail/_task.h"
0027 #include "detail/_small_object_pool.h"
0028 #include "detail/_intrusive_list_node.h"
0029 #include "detail/_task_handle.h"
0030
0031 #include "profiling.h"
0032
0033 #include <type_traits>
0034
0035 #if _MSC_VER && !defined(__INTEL_COMPILER)
0036
0037 #pragma warning(push)
0038 #pragma warning(disable:4324)
0039 #endif
0040
0041 namespace tbb {
0042 namespace detail {
0043
0044 namespace d1 {
0045 class delegate_base;
0046 class task_arena_base;
0047 class task_group_context;
0048 class task_group_base;
0049 }
0050
0051 namespace r1 {
0052
0053 class tbb_exception_ptr;
0054 class cancellation_disseminator;
0055 class thread_data;
0056 class task_dispatcher;
0057 template <bool>
0058 class context_guard_helper;
0059 struct task_arena_impl;
0060 class context_list;
0061
0062 TBB_EXPORT void __TBB_EXPORTED_FUNC execute(d1::task_arena_base&, d1::delegate_base&);
0063 TBB_EXPORT void __TBB_EXPORTED_FUNC isolate_within_arena(d1::delegate_base&, std::intptr_t);
0064
0065 TBB_EXPORT void __TBB_EXPORTED_FUNC initialize(d1::task_group_context&);
0066 TBB_EXPORT void __TBB_EXPORTED_FUNC destroy(d1::task_group_context&);
0067 TBB_EXPORT void __TBB_EXPORTED_FUNC reset(d1::task_group_context&);
0068 TBB_EXPORT bool __TBB_EXPORTED_FUNC cancel_group_execution(d1::task_group_context&);
0069 TBB_EXPORT bool __TBB_EXPORTED_FUNC is_group_execution_cancelled(d1::task_group_context&);
0070 TBB_EXPORT void __TBB_EXPORTED_FUNC capture_fp_settings(d1::task_group_context&);
0071
0072 struct task_group_context_impl;
0073 }
0074
0075 namespace d2 {
0076
0077 namespace {
0078 template<typename F>
0079 d1::task* task_ptr_or_nullptr(F&& f);
0080 }
0081
0082 template<typename F>
0083 class function_task : public task_handle_task {
0084
0085 const F m_func;
0086
0087 private:
0088 d1::task* execute(d1::execution_data& ed) override {
0089 __TBB_ASSERT(ed.context == &this->ctx(), "The task group context should be used for all tasks");
0090 task* res = task_ptr_or_nullptr(m_func);
0091 finalize(&ed);
0092 return res;
0093 }
0094 d1::task* cancel(d1::execution_data& ed) override {
0095 finalize(&ed);
0096 return nullptr;
0097 }
0098 public:
0099 template<typename FF>
0100 function_task(FF&& f, d1::wait_context& wo, d1::task_group_context& ctx, d1::small_object_allocator& alloc)
0101 : task_handle_task{wo, ctx, alloc},
0102 m_func(std::forward<FF>(f)) {}
0103 };
0104
0105 #if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
0106 namespace {
0107 template<typename F>
0108 d1::task* task_ptr_or_nullptr_impl(std::false_type, F&& f){
0109 task_handle th = std::forward<F>(f)();
0110 return task_handle_accessor::release(th);
0111 }
0112
0113 template<typename F>
0114 d1::task* task_ptr_or_nullptr_impl(std::true_type, F&& f){
0115 std::forward<F>(f)();
0116 return nullptr;
0117 }
0118
0119 template<typename F>
0120 d1::task* task_ptr_or_nullptr(F&& f){
0121 using is_void_t = std::is_void<
0122 decltype(std::forward<F>(f)())
0123 >;
0124
0125 return task_ptr_or_nullptr_impl(is_void_t{}, std::forward<F>(f));
0126 }
0127 }
0128 #else
0129 namespace {
0130 template<typename F>
0131 d1::task* task_ptr_or_nullptr(F&& f){
0132 std::forward<F>(f)();
0133 return nullptr;
0134 }
0135 }
0136 #endif
0137 }
0138
0139 namespace d1 {
0140
0141
0142 struct context_list_node {
0143 std::atomic<context_list_node*> prev{};
0144 std::atomic<context_list_node*> next{};
0145 };
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164 class task_group_context : no_copy {
0165 public:
0166 enum traits_type {
0167 fp_settings = 1 << 1,
0168 concurrent_wait = 1 << 2,
0169 default_traits = 0
0170 };
0171 enum kind_type {
0172 isolated,
0173 bound
0174 };
0175 private:
0176
0177
0178
0179 std::uint64_t my_cpu_ctl_env;
0180
0181
0182 std::atomic<std::uint32_t> my_cancellation_requested;
0183
0184
0185 enum class task_group_context_version : std::uint8_t {
0186 unused = 1
0187 };
0188 task_group_context_version my_version;
0189
0190
0191 struct context_traits {
0192 bool fp_settings : 1;
0193 bool concurrent_wait : 1;
0194 bool bound : 1;
0195 bool reserved1 : 1;
0196 bool reserved2 : 1;
0197 bool reserved3 : 1;
0198 bool reserved4 : 1;
0199 bool reserved5 : 1;
0200 } my_traits;
0201
0202 static_assert(sizeof(context_traits) == 1, "Traits shall fit into one byte.");
0203
0204 static constexpr std::uint8_t may_have_children = 1;
0205
0206 std::atomic<std::uint8_t> my_may_have_children;
0207
0208 enum class state : std::uint8_t {
0209 created,
0210 locked,
0211 isolated,
0212 bound,
0213 dead,
0214 proxy = std::uint8_t(-1)
0215 };
0216
0217
0218 std::atomic<state> my_state;
0219
0220 union {
0221
0222 task_group_context* my_parent;
0223
0224
0225 task_group_context* my_actual_context;
0226 };
0227
0228
0229 r1::context_list* my_context_list;
0230 static_assert(sizeof(std::atomic<r1::thread_data*>) == sizeof(r1::context_list*), "To preserve backward compatibility these types should have the same size");
0231
0232
0233
0234
0235 intrusive_list_node my_node;
0236 static_assert(sizeof(intrusive_list_node) == sizeof(context_list_node), "To preserve backward compatibility these types should have the same size");
0237
0238
0239 std::atomic<r1::tbb_exception_ptr*> my_exception;
0240 static_assert(sizeof(std::atomic<r1::tbb_exception_ptr*>) == sizeof(r1::tbb_exception_ptr*),
0241 "backward compatibility check");
0242
0243
0244 void* my_itt_caller;
0245
0246
0247 string_resource_index my_name;
0248
0249 char padding[max_nfs_size
0250 - sizeof(std::uint64_t)
0251 - sizeof(std::atomic<std::uint32_t>)
0252 - sizeof(std::uint8_t)
0253 - sizeof(context_traits)
0254 - sizeof(std::atomic<std::uint8_t>)
0255 - sizeof(std::atomic<state>)
0256 - sizeof(task_group_context*)
0257 - sizeof(r1::context_list*)
0258 - sizeof(intrusive_list_node)
0259 - sizeof(std::atomic<r1::tbb_exception_ptr*>)
0260 - sizeof(void*)
0261 - sizeof(string_resource_index)
0262 ];
0263
0264 task_group_context(context_traits t, string_resource_index name)
0265 : my_version{task_group_context_version::unused}, my_name{name}
0266 {
0267 my_traits = t;
0268 r1::initialize(*this);
0269 }
0270
0271 task_group_context(task_group_context* actual_context)
0272 : my_version{task_group_context_version::unused}
0273 , my_state{state::proxy}
0274 , my_actual_context{actual_context}
0275 {
0276 __TBB_ASSERT(my_actual_context, "Passed pointer value points to nothing.");
0277 my_name = actual_context->my_name;
0278
0279
0280
0281 }
0282
0283 static context_traits make_traits(kind_type relation_with_parent, std::uintptr_t user_traits) {
0284 context_traits ct;
0285 ct.fp_settings = (user_traits & fp_settings) == fp_settings;
0286 ct.concurrent_wait = (user_traits & concurrent_wait) == concurrent_wait;
0287 ct.bound = relation_with_parent == bound;
0288 ct.reserved1 = ct.reserved2 = ct.reserved3 = ct.reserved4 = ct.reserved5 = false;
0289 return ct;
0290 }
0291
0292 bool is_proxy() const {
0293 return my_state.load(std::memory_order_relaxed) == state::proxy;
0294 }
0295
0296 task_group_context& actual_context() noexcept {
0297 if (is_proxy()) {
0298 __TBB_ASSERT(my_actual_context, "Actual task_group_context is not set.");
0299 return *my_actual_context;
0300 }
0301 return *this;
0302 }
0303
0304 const task_group_context& actual_context() const noexcept {
0305 if (is_proxy()) {
0306 __TBB_ASSERT(my_actual_context, "Actual task_group_context is not set.");
0307 return *my_actual_context;
0308 }
0309 return *this;
0310 }
0311
0312 public:
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339 task_group_context(kind_type relation_with_parent = bound,
0340 std::uintptr_t t = default_traits)
0341 : task_group_context(make_traits(relation_with_parent, t), CUSTOM_CTX) {}
0342
0343
0344 task_group_context(string_resource_index name )
0345 : task_group_context(make_traits(bound, default_traits), name) {}
0346
0347
0348 ~task_group_context() {
0349
0350
0351 if (!is_proxy())
0352 {
0353 r1::destroy(*this);
0354 }
0355 }
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365 void reset() {
0366 r1::reset(actual_context());
0367 }
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377 bool cancel_group_execution() {
0378 return r1::cancel_group_execution(actual_context());
0379 }
0380
0381
0382 bool is_group_execution_cancelled() {
0383 return r1::is_group_execution_cancelled(actual_context());
0384 }
0385
0386 #if __TBB_FP_CONTEXT
0387
0388
0389
0390
0391
0392
0393
0394
0395 void capture_fp_settings() {
0396 r1::capture_fp_settings(actual_context());
0397 }
0398 #endif
0399
0400
0401 std::uintptr_t traits() const {
0402 std::uintptr_t t{};
0403 const task_group_context& ctx = actual_context();
0404 t |= ctx.my_traits.fp_settings ? fp_settings : 0;
0405 t |= ctx.my_traits.concurrent_wait ? concurrent_wait : 0;
0406 return t;
0407 }
0408 private:
0409
0410 friend class r1::cancellation_disseminator;
0411 friend class r1::thread_data;
0412 friend class r1::task_dispatcher;
0413 template <bool>
0414 friend class r1::context_guard_helper;
0415 friend struct r1::task_arena_impl;
0416 friend struct r1::task_group_context_impl;
0417 friend class task_group_base;
0418 };
0419
0420 static_assert(sizeof(task_group_context) == 128, "Wrong size of task_group_context");
0421
0422 enum task_group_status {
0423 not_complete,
0424 complete,
0425 canceled
0426 };
0427
0428 class task_group;
0429 class structured_task_group;
0430 #if TBB_PREVIEW_ISOLATED_TASK_GROUP
0431 class isolated_task_group;
0432 #endif
0433
0434 template<typename F>
0435 class function_task : public task {
0436 const F m_func;
0437 wait_context& m_wait_ctx;
0438 small_object_allocator m_allocator;
0439
0440 void finalize(const execution_data& ed) {
0441
0442 wait_context& wo = m_wait_ctx;
0443
0444 auto allocator = m_allocator;
0445
0446 this->~function_task();
0447 wo.release();
0448
0449 allocator.deallocate(this, ed);
0450 }
0451 task* execute(execution_data& ed) override {
0452 task* res = d2::task_ptr_or_nullptr(m_func);
0453 finalize(ed);
0454 return res;
0455 }
0456 task* cancel(execution_data& ed) override {
0457 finalize(ed);
0458 return nullptr;
0459 }
0460 public:
0461 function_task(const F& f, wait_context& wo, small_object_allocator& alloc)
0462 : m_func(f)
0463 , m_wait_ctx(wo)
0464 , m_allocator(alloc) {}
0465
0466 function_task(F&& f, wait_context& wo, small_object_allocator& alloc)
0467 : m_func(std::move(f))
0468 , m_wait_ctx(wo)
0469 , m_allocator(alloc) {}
0470 };
0471
0472 template <typename F>
0473 class function_stack_task : public task {
0474 const F& m_func;
0475 wait_context& m_wait_ctx;
0476
0477 void finalize() {
0478 m_wait_ctx.release();
0479 }
0480 task* execute(execution_data&) override {
0481 task* res = d2::task_ptr_or_nullptr(m_func);
0482 finalize();
0483 return res;
0484 }
0485 task* cancel(execution_data&) override {
0486 finalize();
0487 return nullptr;
0488 }
0489 public:
0490 function_stack_task(const F& f, wait_context& wo) : m_func(f), m_wait_ctx(wo) {}
0491 };
0492
0493 class task_group_base : no_copy {
0494 protected:
0495 wait_context m_wait_ctx;
0496 task_group_context m_context;
0497
0498 template<typename F>
0499 task_group_status internal_run_and_wait(const F& f) {
0500 function_stack_task<F> t{ f, m_wait_ctx };
0501 m_wait_ctx.reserve();
0502 bool cancellation_status = false;
0503 try_call([&] {
0504 execute_and_wait(t, context(), m_wait_ctx, context());
0505 }).on_completion([&] {
0506
0507 cancellation_status = context().is_group_execution_cancelled();
0508 context().reset();
0509 });
0510 return cancellation_status ? canceled : complete;
0511 }
0512
0513 task_group_status internal_run_and_wait(d2::task_handle&& h) {
0514 __TBB_ASSERT(h != nullptr, "Attempt to schedule empty task_handle");
0515
0516 using acs = d2::task_handle_accessor;
0517 __TBB_ASSERT(&acs::ctx_of(h) == &context(), "Attempt to schedule task_handle into different task_group");
0518
0519 bool cancellation_status = false;
0520 try_call([&] {
0521 execute_and_wait(*acs::release(h), context(), m_wait_ctx, context());
0522 }).on_completion([&] {
0523
0524 cancellation_status = context().is_group_execution_cancelled();
0525 context().reset();
0526 });
0527 return cancellation_status ? canceled : complete;
0528 }
0529
0530 template<typename F>
0531 task* prepare_task(F&& f) {
0532 m_wait_ctx.reserve();
0533 small_object_allocator alloc{};
0534 return alloc.new_object<function_task<typename std::decay<F>::type>>(std::forward<F>(f), m_wait_ctx, alloc);
0535 }
0536
0537 task_group_context& context() noexcept {
0538 return m_context.actual_context();
0539 }
0540
0541 template<typename F>
0542 d2::task_handle prepare_task_handle(F&& f) {
0543 m_wait_ctx.reserve();
0544 small_object_allocator alloc{};
0545 using function_task_t = d2::function_task<typename std::decay<F>::type>;
0546 d2::task_handle_task* function_task_p = alloc.new_object<function_task_t>(std::forward<F>(f), m_wait_ctx, context(), alloc);
0547
0548 return d2::task_handle_accessor::construct(function_task_p);
0549 }
0550
0551 public:
0552 task_group_base(uintptr_t traits = 0)
0553 : m_wait_ctx(0)
0554 , m_context(task_group_context::bound, task_group_context::default_traits | traits)
0555 {}
0556
0557 task_group_base(task_group_context& ctx)
0558 : m_wait_ctx(0)
0559 , m_context(&ctx)
0560 {}
0561
0562 ~task_group_base() noexcept(false) {
0563 if (m_wait_ctx.continue_execution()) {
0564 #if __TBB_CPP17_UNCAUGHT_EXCEPTIONS_PRESENT
0565 bool stack_unwinding_in_progress = std::uncaught_exceptions() > 0;
0566 #else
0567 bool stack_unwinding_in_progress = std::uncaught_exception();
0568 #endif
0569
0570
0571 if (!context().is_group_execution_cancelled())
0572 cancel();
0573 d1::wait(m_wait_ctx, context());
0574 if (!stack_unwinding_in_progress)
0575 throw_exception(exception_id::missing_wait);
0576 }
0577 }
0578
0579 task_group_status wait() {
0580 bool cancellation_status = false;
0581 try_call([&] {
0582 d1::wait(m_wait_ctx, context());
0583 }).on_completion([&] {
0584
0585 cancellation_status = m_context.is_group_execution_cancelled();
0586 context().reset();
0587 });
0588 return cancellation_status ? canceled : complete;
0589 }
0590
0591 void cancel() {
0592 context().cancel_group_execution();
0593 }
0594 };
0595
0596 class task_group : public task_group_base {
0597 public:
0598 task_group() : task_group_base(task_group_context::concurrent_wait) {}
0599 task_group(task_group_context& ctx) : task_group_base(ctx) {}
0600
0601 template<typename F>
0602 void run(F&& f) {
0603 spawn(*prepare_task(std::forward<F>(f)), context());
0604 }
0605
0606 void run(d2::task_handle&& h) {
0607 __TBB_ASSERT(h != nullptr, "Attempt to schedule empty task_handle");
0608
0609 using acs = d2::task_handle_accessor;
0610 __TBB_ASSERT(&acs::ctx_of(h) == &context(), "Attempt to schedule task_handle into different task_group");
0611
0612 spawn(*acs::release(h), context());
0613 }
0614
0615 template<typename F>
0616 d2::task_handle defer(F&& f) {
0617 return prepare_task_handle(std::forward<F>(f));
0618
0619 }
0620
0621 template<typename F>
0622 task_group_status run_and_wait(const F& f) {
0623 return internal_run_and_wait(f);
0624 }
0625
0626 task_group_status run_and_wait(d2::task_handle&& h) {
0627 return internal_run_and_wait(std::move(h));
0628 }
0629 };
0630
0631 #if TBB_PREVIEW_ISOLATED_TASK_GROUP
0632 class spawn_delegate : public delegate_base {
0633 task* task_to_spawn;
0634 task_group_context& context;
0635 bool operator()() const override {
0636 spawn(*task_to_spawn, context);
0637 return true;
0638 }
0639 public:
0640 spawn_delegate(task* a_task, task_group_context& ctx)
0641 : task_to_spawn(a_task), context(ctx)
0642 {}
0643 };
0644
0645 class wait_delegate : public delegate_base {
0646 bool operator()() const override {
0647 status = tg.wait();
0648 return true;
0649 }
0650 protected:
0651 task_group& tg;
0652 task_group_status& status;
0653 public:
0654 wait_delegate(task_group& a_group, task_group_status& tgs)
0655 : tg(a_group), status(tgs) {}
0656 };
0657
0658 template<typename F>
0659 class run_wait_delegate : public wait_delegate {
0660 F& func;
0661 bool operator()() const override {
0662 status = tg.run_and_wait(func);
0663 return true;
0664 }
0665 public:
0666 run_wait_delegate(task_group& a_group, F& a_func, task_group_status& tgs)
0667 : wait_delegate(a_group, tgs), func(a_func) {}
0668 };
0669
0670 class isolated_task_group : public task_group {
0671 intptr_t this_isolation() {
0672 return reinterpret_cast<intptr_t>(this);
0673 }
0674 public:
0675 isolated_task_group() : task_group() {}
0676
0677 isolated_task_group(task_group_context& ctx) : task_group(ctx) {}
0678
0679 template<typename F>
0680 void run(F&& f) {
0681 spawn_delegate sd(prepare_task(std::forward<F>(f)), context());
0682 r1::isolate_within_arena(sd, this_isolation());
0683 }
0684
0685 void run(d2::task_handle&& h) {
0686 __TBB_ASSERT(h != nullptr, "Attempt to schedule empty task_handle");
0687
0688 using acs = d2::task_handle_accessor;
0689 __TBB_ASSERT(&acs::ctx_of(h) == &context(), "Attempt to schedule task_handle into different task_group");
0690
0691 spawn_delegate sd(acs::release(h), context());
0692 r1::isolate_within_arena(sd, this_isolation());
0693 }
0694
0695 template<typename F>
0696 task_group_status run_and_wait( const F& f ) {
0697 task_group_status result = not_complete;
0698 run_wait_delegate<const F> rwd(*this, f, result);
0699 r1::isolate_within_arena(rwd, this_isolation());
0700 __TBB_ASSERT(result != not_complete, "premature exit from wait?");
0701 return result;
0702 }
0703
0704 task_group_status wait() {
0705 task_group_status result = not_complete;
0706 wait_delegate wd(*this, result);
0707 r1::isolate_within_arena(wd, this_isolation());
0708 __TBB_ASSERT(result != not_complete, "premature exit from wait?");
0709 return result;
0710 }
0711 };
0712 #endif
0713
0714 inline bool is_current_task_group_canceling() {
0715 task_group_context* ctx = current_context();
0716 return ctx ? ctx->is_group_execution_cancelled() : false;
0717 }
0718
0719 }
0720 }
0721
0722 inline namespace v1 {
0723 using detail::d1::task_group_context;
0724 using detail::d1::task_group;
0725 #if TBB_PREVIEW_ISOLATED_TASK_GROUP
0726 using detail::d1::isolated_task_group;
0727 #endif
0728
0729 using detail::d1::task_group_status;
0730 using detail::d1::not_complete;
0731 using detail::d1::complete;
0732 using detail::d1::canceled;
0733
0734 using detail::d1::is_current_task_group_canceling;
0735 using detail::r1::missing_wait;
0736
0737 using detail::d2::task_handle;
0738 }
0739
0740 }
0741
0742 #if _MSC_VER && !defined(__INTEL_COMPILER)
0743 #pragma warning(pop)
0744 #endif
0745
0746 #endif