File indexing completed on 2025-12-18 10:24:27
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #ifndef __TBB_task_group_H
0018 #define __TBB_task_group_H
0019
0020 #include "detail/_config.h"
0021 #include "detail/_namespace_injection.h"
0022 #include "detail/_assert.h"
0023 #include "detail/_utils.h"
0024 #include "detail/_template_helpers.h"
0025 #include "detail/_exception.h"
0026 #include "detail/_task.h"
0027 #include "detail/_small_object_pool.h"
0028 #include "detail/_intrusive_list_node.h"
0029 #include "detail/_task_handle.h"
0030
0031 #include "profiling.h"
0032
0033 #include <type_traits>
0034
0035 #if _MSC_VER && !defined(__INTEL_COMPILER)
0036
0037 #pragma warning(push)
0038 #pragma warning(disable:4324)
0039 #endif
0040
0041 namespace tbb {
0042 namespace detail {
0043
0044 namespace d1 {
0045 class delegate_base;
0046 class task_arena_base;
0047 class task_group_context;
0048 }
0049
0050 namespace r1 {
0051
0052 class tbb_exception_ptr;
0053 class cancellation_disseminator;
0054 class thread_data;
0055 class task_dispatcher;
0056 template <bool>
0057 class context_guard_helper;
0058 struct task_arena_impl;
0059 class context_list;
0060
0061 TBB_EXPORT void __TBB_EXPORTED_FUNC execute(d1::task_arena_base&, d1::delegate_base&);
0062 TBB_EXPORT void __TBB_EXPORTED_FUNC isolate_within_arena(d1::delegate_base&, std::intptr_t);
0063
0064 TBB_EXPORT void __TBB_EXPORTED_FUNC initialize(d1::task_group_context&);
0065 TBB_EXPORT void __TBB_EXPORTED_FUNC destroy(d1::task_group_context&);
0066 TBB_EXPORT void __TBB_EXPORTED_FUNC reset(d1::task_group_context&);
0067 TBB_EXPORT bool __TBB_EXPORTED_FUNC cancel_group_execution(d1::task_group_context&);
0068 TBB_EXPORT bool __TBB_EXPORTED_FUNC is_group_execution_cancelled(d1::task_group_context&);
0069 TBB_EXPORT void __TBB_EXPORTED_FUNC capture_fp_settings(d1::task_group_context&);
0070
0071 struct task_group_context_impl;
0072 }
0073
0074 namespace d2 {
0075
0076 namespace {
0077 template<typename F>
0078 d1::task* task_ptr_or_nullptr(F&& f);
0079 }
0080
0081 template<typename F>
0082 class function_task : public task_handle_task {
0083
0084 const F m_func;
0085
0086 private:
0087 d1::task* execute(d1::execution_data& ed) override {
0088 __TBB_ASSERT(ed.context == &this->ctx(), "The task group context should be used for all tasks");
0089 task* res = task_ptr_or_nullptr(m_func);
0090 finalize(&ed);
0091 return res;
0092 }
0093 d1::task* cancel(d1::execution_data& ed) override {
0094 finalize(&ed);
0095 return nullptr;
0096 }
0097 public:
0098 template<typename FF>
0099 function_task(FF&& f, d1::wait_tree_vertex_interface* vertex, d1::task_group_context& ctx, d1::small_object_allocator& alloc)
0100 : task_handle_task{vertex, ctx, alloc},
0101 m_func(std::forward<FF>(f)) {}
0102 };
0103
0104 #if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
0105 namespace {
0106 template<typename F>
0107 d1::task* task_ptr_or_nullptr_impl(std::false_type, F&& f){
0108 task_handle th = std::forward<F>(f)();
0109 return task_handle_accessor::release(th);
0110 }
0111
0112 template<typename F>
0113 d1::task* task_ptr_or_nullptr_impl(std::true_type, F&& f){
0114 std::forward<F>(f)();
0115 return nullptr;
0116 }
0117
0118 template<typename F>
0119 d1::task* task_ptr_or_nullptr(F&& f){
0120 using is_void_t = std::is_void<
0121 decltype(std::forward<F>(f)())
0122 >;
0123
0124 return task_ptr_or_nullptr_impl(is_void_t{}, std::forward<F>(f));
0125 }
0126 }
0127 #else
0128 namespace {
0129 template<typename F>
0130 d1::task* task_ptr_or_nullptr(F&& f){
0131 std::forward<F>(f)();
0132 return nullptr;
0133 }
0134 }
0135 #endif
0136 }
0137
0138 namespace d1 {
0139
0140
0141 struct context_list_node {
0142 std::atomic<context_list_node*> prev{};
0143 std::atomic<context_list_node*> next{};
0144 };
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163 class task_group_context : no_copy {
0164 public:
0165 enum traits_type {
0166 fp_settings = 1 << 1,
0167 concurrent_wait = 1 << 2,
0168 default_traits = 0
0169 };
0170 enum kind_type {
0171 isolated,
0172 bound
0173 };
0174 private:
0175
0176
0177
0178 std::uint64_t my_cpu_ctl_env;
0179
0180
0181 std::atomic<std::uint32_t> my_cancellation_requested;
0182
0183
0184 enum class task_group_context_version : std::uint8_t {
0185 unused = 1
0186 };
0187 task_group_context_version my_version;
0188
0189
0190 struct context_traits {
0191 bool fp_settings : 1;
0192 bool concurrent_wait : 1;
0193 bool bound : 1;
0194 bool reserved1 : 1;
0195 bool reserved2 : 1;
0196 bool reserved3 : 1;
0197 bool reserved4 : 1;
0198 bool reserved5 : 1;
0199 } my_traits;
0200
0201 static_assert(sizeof(context_traits) == 1, "Traits shall fit into one byte.");
0202
0203 static constexpr std::uint8_t may_have_children = 1;
0204
0205 std::atomic<std::uint8_t> my_may_have_children;
0206
0207 enum class state : std::uint8_t {
0208 created,
0209 locked,
0210 isolated,
0211 bound,
0212 dead,
0213 proxy = std::uint8_t(-1)
0214 };
0215
0216
0217 std::atomic<state> my_state;
0218
0219 union {
0220
0221 task_group_context* my_parent;
0222
0223
0224 task_group_context* my_actual_context;
0225 };
0226
0227
0228 r1::context_list* my_context_list;
0229 static_assert(sizeof(std::atomic<r1::thread_data*>) == sizeof(r1::context_list*), "To preserve backward compatibility these types should have the same size");
0230
0231
0232
0233
0234 intrusive_list_node my_node;
0235 static_assert(sizeof(intrusive_list_node) == sizeof(context_list_node), "To preserve backward compatibility these types should have the same size");
0236
0237
0238 std::atomic<r1::tbb_exception_ptr*> my_exception;
0239 static_assert(sizeof(std::atomic<r1::tbb_exception_ptr*>) == sizeof(r1::tbb_exception_ptr*),
0240 "backward compatibility check");
0241
0242
0243 void* my_itt_caller;
0244
0245
0246 string_resource_index my_name;
0247
0248 char padding[max_nfs_size
0249 - sizeof(std::uint64_t)
0250 - sizeof(std::atomic<std::uint32_t>)
0251 - sizeof(std::uint8_t)
0252 - sizeof(context_traits)
0253 - sizeof(std::atomic<std::uint8_t>)
0254 - sizeof(std::atomic<state>)
0255 - sizeof(task_group_context*)
0256 - sizeof(r1::context_list*)
0257 - sizeof(intrusive_list_node)
0258 - sizeof(std::atomic<r1::tbb_exception_ptr*>)
0259 - sizeof(void*)
0260 - sizeof(string_resource_index)
0261 ];
0262
0263 task_group_context(context_traits t, string_resource_index name)
0264 : my_version{task_group_context_version::unused}, my_name{name}
0265 {
0266 my_traits = t;
0267 r1::initialize(*this);
0268 }
0269
0270 task_group_context(task_group_context* actual_context)
0271 : my_version{task_group_context_version::unused}
0272 , my_state{state::proxy}
0273 , my_actual_context{actual_context}
0274 {
0275 __TBB_ASSERT(my_actual_context, "Passed pointer value points to nothing.");
0276 my_name = actual_context->my_name;
0277
0278
0279
0280 }
0281
0282 static context_traits make_traits(kind_type relation_with_parent, std::uintptr_t user_traits) {
0283 context_traits ct;
0284 ct.fp_settings = (user_traits & fp_settings) == fp_settings;
0285 ct.concurrent_wait = (user_traits & concurrent_wait) == concurrent_wait;
0286 ct.bound = relation_with_parent == bound;
0287 ct.reserved1 = ct.reserved2 = ct.reserved3 = ct.reserved4 = ct.reserved5 = false;
0288 return ct;
0289 }
0290
0291 bool is_proxy() const {
0292 return my_state.load(std::memory_order_relaxed) == state::proxy;
0293 }
0294
0295 task_group_context& actual_context() noexcept {
0296 if (is_proxy()) {
0297 __TBB_ASSERT(my_actual_context, "Actual task_group_context is not set.");
0298 return *my_actual_context;
0299 }
0300 return *this;
0301 }
0302
0303 const task_group_context& actual_context() const noexcept {
0304 if (is_proxy()) {
0305 __TBB_ASSERT(my_actual_context, "Actual task_group_context is not set.");
0306 return *my_actual_context;
0307 }
0308 return *this;
0309 }
0310
0311 public:
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338 task_group_context(kind_type relation_with_parent = bound,
0339 std::uintptr_t t = default_traits)
0340 : task_group_context(make_traits(relation_with_parent, t), CUSTOM_CTX) {}
0341
0342
0343 task_group_context(string_resource_index name )
0344 : task_group_context(make_traits(bound, default_traits), name) {}
0345
0346
0347 ~task_group_context() {
0348
0349
0350 if (!is_proxy())
0351 {
0352 r1::destroy(*this);
0353 }
0354 }
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364 void reset() {
0365 r1::reset(actual_context());
0366 }
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376 bool cancel_group_execution() {
0377 return r1::cancel_group_execution(actual_context());
0378 }
0379
0380
0381 bool is_group_execution_cancelled() {
0382 return r1::is_group_execution_cancelled(actual_context());
0383 }
0384
0385 #if __TBB_FP_CONTEXT
0386
0387
0388
0389
0390
0391
0392
0393
0394 void capture_fp_settings() {
0395 r1::capture_fp_settings(actual_context());
0396 }
0397 #endif
0398
0399
0400 std::uintptr_t traits() const {
0401 std::uintptr_t t{};
0402 const task_group_context& ctx = actual_context();
0403 t |= ctx.my_traits.fp_settings ? fp_settings : 0;
0404 t |= ctx.my_traits.concurrent_wait ? concurrent_wait : 0;
0405 return t;
0406 }
0407 private:
0408
0409 friend class r1::cancellation_disseminator;
0410 friend class r1::thread_data;
0411 friend class r1::task_dispatcher;
0412 template <bool>
0413 friend class r1::context_guard_helper;
0414 friend struct r1::task_arena_impl;
0415 friend struct r1::task_group_context_impl;
0416 friend class d2::task_group_base;
0417 };
0418
0419 static_assert(sizeof(task_group_context) == 128, "Wrong size of task_group_context");
0420
0421 inline bool is_current_task_group_canceling() {
0422 task_group_context* ctx = current_context();
0423 return ctx ? ctx->is_group_execution_cancelled() : false;
0424 }
0425
0426 }
0427
0428 namespace d2 {
0429
0430 enum task_group_status {
0431 not_complete,
0432 complete,
0433 canceled
0434 };
0435
0436 class task_group;
0437 class structured_task_group;
0438 #if TBB_PREVIEW_ISOLATED_TASK_GROUP
0439 class isolated_task_group;
0440 #endif
0441
0442 template <typename F>
0443 class function_stack_task : public d1::task {
0444 const F& m_func;
0445 d1::wait_tree_vertex_interface* m_wait_tree_vertex;
0446
0447 void finalize() {
0448 m_wait_tree_vertex->release();
0449 }
0450 task* execute(d1::execution_data&) override {
0451 task* res = d2::task_ptr_or_nullptr(m_func);
0452 finalize();
0453 return res;
0454 }
0455 task* cancel(d1::execution_data&) override {
0456 finalize();
0457 return nullptr;
0458 }
0459 public:
0460 function_stack_task(const F& f, d1::wait_tree_vertex_interface* vertex) : m_func(f), m_wait_tree_vertex(vertex) {
0461 m_wait_tree_vertex->reserve();
0462 }
0463 };
0464
0465 class task_group_base : no_copy {
0466 protected:
0467 d1::wait_context_vertex m_wait_vertex;
0468 d1::task_group_context m_context;
0469
0470 template<typename F>
0471 task_group_status internal_run_and_wait(const F& f) {
0472 function_stack_task<F> t{ f, r1::get_thread_reference_vertex(&m_wait_vertex) };
0473
0474 bool cancellation_status = false;
0475 try_call([&] {
0476 execute_and_wait(t, context(), m_wait_vertex.get_context(), context());
0477 }).on_completion([&] {
0478
0479 cancellation_status = context().is_group_execution_cancelled();
0480 context().reset();
0481 });
0482 return cancellation_status ? canceled : complete;
0483 }
0484
0485 task_group_status internal_run_and_wait(d2::task_handle&& h) {
0486 __TBB_ASSERT(h != nullptr, "Attempt to schedule empty task_handle");
0487
0488 using acs = d2::task_handle_accessor;
0489 __TBB_ASSERT(&acs::ctx_of(h) == &context(), "Attempt to schedule task_handle into different task_group");
0490
0491 bool cancellation_status = false;
0492 try_call([&] {
0493 execute_and_wait(*acs::release(h), context(), m_wait_vertex.get_context(), context());
0494 }).on_completion([&] {
0495
0496 cancellation_status = context().is_group_execution_cancelled();
0497 context().reset();
0498 });
0499 return cancellation_status ? canceled : complete;
0500 }
0501
0502 template<typename F>
0503 d1::task* prepare_task(F&& f) {
0504 d1::small_object_allocator alloc{};
0505 return alloc.new_object<function_task<typename std::decay<F>::type>>(std::forward<F>(f),
0506 r1::get_thread_reference_vertex(&m_wait_vertex), context(), alloc);
0507 }
0508
0509 d1::task_group_context& context() noexcept {
0510 return m_context.actual_context();
0511 }
0512
0513 template<typename F>
0514 d2::task_handle prepare_task_handle(F&& f) {
0515 d1::small_object_allocator alloc{};
0516 using function_task_t = d2::function_task<typename std::decay<F>::type>;
0517 d2::task_handle_task* function_task_p = alloc.new_object<function_task_t>(std::forward<F>(f),
0518 r1::get_thread_reference_vertex(&m_wait_vertex), context(), alloc);
0519
0520 return d2::task_handle_accessor::construct(function_task_p);
0521 }
0522
0523 public:
0524 task_group_base(uintptr_t traits = 0)
0525 : m_wait_vertex(0)
0526 , m_context(d1::task_group_context::bound, d1::task_group_context::default_traits | traits)
0527 {}
0528
0529 task_group_base(d1::task_group_context& ctx)
0530 : m_wait_vertex(0)
0531 , m_context(&ctx)
0532 {}
0533
0534 ~task_group_base() noexcept(false) {
0535 if (m_wait_vertex.continue_execution()) {
0536 #if __TBB_CPP17_UNCAUGHT_EXCEPTIONS_PRESENT
0537 bool stack_unwinding_in_progress = std::uncaught_exceptions() > 0;
0538 #else
0539 bool stack_unwinding_in_progress = std::uncaught_exception();
0540 #endif
0541
0542
0543 if (!context().is_group_execution_cancelled())
0544 cancel();
0545 d1::wait(m_wait_vertex.get_context(), context());
0546 if (!stack_unwinding_in_progress)
0547 throw_exception(exception_id::missing_wait);
0548 }
0549 }
0550
0551 task_group_status wait() {
0552 bool cancellation_status = false;
0553 try_call([&] {
0554 d1::wait(m_wait_vertex.get_context(), context());
0555 }).on_completion([&] {
0556
0557 cancellation_status = m_context.is_group_execution_cancelled();
0558 context().reset();
0559 });
0560 return cancellation_status ? canceled : complete;
0561 }
0562
0563 void cancel() {
0564 context().cancel_group_execution();
0565 }
0566 };
0567
0568 class task_group : public task_group_base {
0569 public:
0570 task_group() : task_group_base(d1::task_group_context::concurrent_wait) {}
0571 task_group(d1::task_group_context& ctx) : task_group_base(ctx) {}
0572
0573 template<typename F>
0574 void run(F&& f) {
0575 d1::spawn(*prepare_task(std::forward<F>(f)), context());
0576 }
0577
0578 void run(d2::task_handle&& h) {
0579 __TBB_ASSERT(h != nullptr, "Attempt to schedule empty task_handle");
0580
0581 using acs = d2::task_handle_accessor;
0582 __TBB_ASSERT(&acs::ctx_of(h) == &context(), "Attempt to schedule task_handle into different task_group");
0583
0584 d1::spawn(*acs::release(h), context());
0585 }
0586
0587 template<typename F>
0588 d2::task_handle defer(F&& f) {
0589 return prepare_task_handle(std::forward<F>(f));
0590
0591 }
0592
0593 template<typename F>
0594 task_group_status run_and_wait(const F& f) {
0595 return internal_run_and_wait(f);
0596 }
0597
0598 task_group_status run_and_wait(d2::task_handle&& h) {
0599 return internal_run_and_wait(std::move(h));
0600 }
0601 };
0602
0603 #if TBB_PREVIEW_ISOLATED_TASK_GROUP
0604 class spawn_delegate : public d1::delegate_base {
0605 d1::task* task_to_spawn;
0606 d1::task_group_context& context;
0607 bool operator()() const override {
0608 spawn(*task_to_spawn, context);
0609 return true;
0610 }
0611 public:
0612 spawn_delegate(d1::task* a_task, d1::task_group_context& ctx)
0613 : task_to_spawn(a_task), context(ctx)
0614 {}
0615 };
0616
0617 class wait_delegate : public d1::delegate_base {
0618 bool operator()() const override {
0619 status = tg.wait();
0620 return true;
0621 }
0622 protected:
0623 task_group& tg;
0624 task_group_status& status;
0625 public:
0626 wait_delegate(task_group& a_group, task_group_status& tgs)
0627 : tg(a_group), status(tgs) {}
0628 };
0629
0630 template<typename F>
0631 class run_wait_delegate : public wait_delegate {
0632 F& func;
0633 bool operator()() const override {
0634 status = tg.run_and_wait(func);
0635 return true;
0636 }
0637 public:
0638 run_wait_delegate(task_group& a_group, F& a_func, task_group_status& tgs)
0639 : wait_delegate(a_group, tgs), func(a_func) {}
0640 };
0641
0642 class isolated_task_group : public task_group {
0643 intptr_t this_isolation() {
0644 return reinterpret_cast<intptr_t>(this);
0645 }
0646 public:
0647 isolated_task_group() : task_group() {}
0648
0649 isolated_task_group(d1::task_group_context& ctx) : task_group(ctx) {}
0650
0651 template<typename F>
0652 void run(F&& f) {
0653 spawn_delegate sd(prepare_task(std::forward<F>(f)), context());
0654 r1::isolate_within_arena(sd, this_isolation());
0655 }
0656
0657 void run(d2::task_handle&& h) {
0658 __TBB_ASSERT(h != nullptr, "Attempt to schedule empty task_handle");
0659
0660 using acs = d2::task_handle_accessor;
0661 __TBB_ASSERT(&acs::ctx_of(h) == &context(), "Attempt to schedule task_handle into different task_group");
0662
0663 spawn_delegate sd(acs::release(h), context());
0664 r1::isolate_within_arena(sd, this_isolation());
0665 }
0666
0667 template<typename F>
0668 task_group_status run_and_wait( const F& f ) {
0669 task_group_status result = not_complete;
0670 run_wait_delegate<const F> rwd(*this, f, result);
0671 r1::isolate_within_arena(rwd, this_isolation());
0672 __TBB_ASSERT(result != not_complete, "premature exit from wait?");
0673 return result;
0674 }
0675
0676 task_group_status wait() {
0677 task_group_status result = not_complete;
0678 wait_delegate wd(*this, result);
0679 r1::isolate_within_arena(wd, this_isolation());
0680 __TBB_ASSERT(result != not_complete, "premature exit from wait?");
0681 return result;
0682 }
0683 };
0684 #endif
0685 }
0686 }
0687
0688 inline namespace v1 {
0689 using detail::d1::task_group_context;
0690 using detail::d2::task_group;
0691 #if TBB_PREVIEW_ISOLATED_TASK_GROUP
0692 using detail::d2::isolated_task_group;
0693 #endif
0694
0695 using detail::d2::task_group_status;
0696 using detail::d2::not_complete;
0697 using detail::d2::complete;
0698 using detail::d2::canceled;
0699
0700 using detail::d1::is_current_task_group_canceling;
0701 using detail::r1::missing_wait;
0702
0703 using detail::d2::task_handle;
0704 }
0705
0706 }
0707
0708 #if _MSC_VER && !defined(__INTEL_COMPILER)
0709 #pragma warning(pop)
0710 #endif
0711
0712 #endif