Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2026-05-03 08:13:42

0001 // -*- C++ -*-
0002 //===----------------------------------------------------------------------===//
0003 //
0004 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
0005 // See https://llvm.org/LICENSE.txt for license information.
0006 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
0007 //
0008 //===----------------------------------------------------------------------===//
0009 
0010 #ifndef _LIBCPP___CXX03___STOP_TOKEN_STOP_STATE_H
0011 #define _LIBCPP___CXX03___STOP_TOKEN_STOP_STATE_H
0012 
0013 #include <__cxx03/__assert>
0014 #include <__cxx03/__config>
0015 #include <__cxx03/__stop_token/atomic_unique_lock.h>
0016 #include <__cxx03/__stop_token/intrusive_list_view.h>
0017 #include <__cxx03/__thread/id.h>
0018 #include <__cxx03/atomic>
0019 #include <__cxx03/cstdint>
0020 
0021 #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
0022 #  pragma GCC system_header
0023 #endif
0024 
0025 _LIBCPP_BEGIN_NAMESPACE_STD
0026 
0027 #if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_THREADS)
0028 
0029 struct __stop_callback_base : __intrusive_node_base<__stop_callback_base> {
0030   using __callback_fn_t = void(__stop_callback_base*) noexcept;
0031   _LIBCPP_HIDE_FROM_ABI explicit __stop_callback_base(__callback_fn_t* __callback_fn) : __callback_fn_(__callback_fn) {}
0032 
0033   _LIBCPP_HIDE_FROM_ABI void __invoke() noexcept { __callback_fn_(this); }
0034 
0035   __callback_fn_t* __callback_fn_;
0036   atomic<bool> __completed_ = false;
0037   bool* __destroyed_        = nullptr;
0038 };
0039 
0040 class __stop_state {
0041   static constexpr uint32_t __stop_requested_bit        = 1;
0042   static constexpr uint32_t __callback_list_locked_bit  = 1 << 1;
0043   static constexpr uint32_t __stop_source_counter_shift = 2;
0044 
0045   // The "stop_source counter" is not used for lifetime reference counting.
0046   // When the number of stop_source reaches 0, the remaining stop_tokens's
0047   // stop_possible will return false. We need this counter to track this.
0048   //
0049   // The "callback list locked" bit implements the atomic_unique_lock to
0050   // guard the operations on the callback list
0051   //
0052   //       31 - 2          |  1                   |    0           |
0053   //  stop_source counter  | callback list locked | stop_requested |
0054   atomic<uint32_t> __state_ = 0;
0055 
0056   // Reference count for stop_token + stop_callback + stop_source
0057   // When the counter reaches zero, the state is destroyed
0058   // It is used by __intrusive_shared_ptr, but it is stored here for better layout
0059   atomic<uint32_t> __ref_count_ = 0;
0060 
0061   using __state_t            = uint32_t;
0062   using __callback_list_lock = __atomic_unique_lock<__state_t, __callback_list_locked_bit>;
0063   using __callback_list      = __intrusive_list_view<__stop_callback_base>;
0064 
0065   __callback_list __callback_list_;
0066   __thread_id __requesting_thread_;
0067 
0068 public:
0069   _LIBCPP_HIDE_FROM_ABI __stop_state() noexcept = default;
0070 
0071   _LIBCPP_HIDE_FROM_ABI void __increment_stop_source_counter() noexcept {
0072     _LIBCPP_ASSERT_UNCATEGORIZED(
0073         __state_.load(std::memory_order_relaxed) <= static_cast<__state_t>(~(1 << __stop_source_counter_shift)),
0074         "stop_source's counter reaches the maximum. Incrementing the counter will overflow");
0075     __state_.fetch_add(1 << __stop_source_counter_shift, std::memory_order_relaxed);
0076   }
0077 
0078   // We are not destroying the object after counter decrements to zero, nor do we have
0079   // operations depend on the ordering of decrementing the counter. relaxed is enough.
0080   _LIBCPP_HIDE_FROM_ABI void __decrement_stop_source_counter() noexcept {
0081     _LIBCPP_ASSERT_UNCATEGORIZED(
0082         __state_.load(std::memory_order_relaxed) >= static_cast<__state_t>(1 << __stop_source_counter_shift),
0083         "stop_source's counter is 0. Decrementing the counter will underflow");
0084     __state_.fetch_sub(1 << __stop_source_counter_shift, std::memory_order_relaxed);
0085   }
0086 
0087   _LIBCPP_HIDE_FROM_ABI bool __stop_requested() const noexcept {
0088     // acquire because [thread.stoptoken.intro] A call to request_stop that returns true
0089     // synchronizes with a call to stop_requested on an associated stop_token or stop_source
0090     // object that returns true.
0091     // request_stop's compare_exchange_weak has release which syncs with this acquire
0092     return (__state_.load(std::memory_order_acquire) & __stop_requested_bit) != 0;
0093   }
0094 
0095   _LIBCPP_HIDE_FROM_ABI bool __stop_possible_for_stop_token() const noexcept {
0096     // [stoptoken.mem] false if "a stop request was not made and there are no associated stop_source objects"
0097     // Todo: Can this be std::memory_order_relaxed as the standard does not say anything except not to introduce data
0098     // race?
0099     __state_t __curent_state = __state_.load(std::memory_order_acquire);
0100     return ((__curent_state & __stop_requested_bit) != 0) || ((__curent_state >> __stop_source_counter_shift) != 0);
0101   }
0102 
0103   _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI bool __request_stop() noexcept {
0104     auto __cb_list_lock = __try_lock_for_request_stop();
0105     if (!__cb_list_lock.__owns_lock()) {
0106       return false;
0107     }
0108     __requesting_thread_ = this_thread::get_id();
0109 
0110     while (!__callback_list_.__empty()) {
0111       auto __cb = __callback_list_.__pop_front();
0112 
0113       // allow other callbacks to be removed while invoking the current callback
0114       __cb_list_lock.__unlock();
0115 
0116       bool __destroyed   = false;
0117       __cb->__destroyed_ = &__destroyed;
0118 
0119       __cb->__invoke();
0120 
0121       // __cb's invoke function could potentially delete itself. We need to check before accessing __cb's member
0122       if (!__destroyed) {
0123         // needs to set __destroyed_ pointer to nullptr, otherwise it points to a local variable
0124         // which is to be destroyed at the end of the loop
0125         __cb->__destroyed_ = nullptr;
0126 
0127         // [stopcallback.cons] If callback is concurrently executing on another thread, then the return
0128         // from the invocation of callback strongly happens before ([intro.races]) callback is destroyed.
0129         // this release syncs with the acquire in the remove_callback
0130         __cb->__completed_.store(true, std::memory_order_release);
0131         __cb->__completed_.notify_all();
0132       }
0133 
0134       __cb_list_lock.__lock();
0135     }
0136 
0137     return true;
0138   }
0139 
0140   _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI bool __add_callback(__stop_callback_base* __cb) noexcept {
0141     // If it is already stop_requested. Do not try to request it again.
0142     const auto __give_up_trying_to_lock_condition = [__cb](__state_t __state) {
0143       if ((__state & __stop_requested_bit) != 0) {
0144         // already stop requested, synchronously run the callback and no need to lock the list again
0145         __cb->__invoke();
0146         return true;
0147       }
0148       // no stop source. no need to lock the list to add the callback as it can never be invoked
0149       return (__state >> __stop_source_counter_shift) == 0;
0150     };
0151 
0152     __callback_list_lock __cb_list_lock(__state_, __give_up_trying_to_lock_condition);
0153 
0154     if (!__cb_list_lock.__owns_lock()) {
0155       return false;
0156     }
0157 
0158     __callback_list_.__push_front(__cb);
0159 
0160     return true;
0161     // unlock here: [thread.stoptoken.intro] Registration of a callback synchronizes with the invocation of
0162     // that callback.
0163     // Note: this release sync with the acquire in the request_stop' __try_lock_for_request_stop
0164   }
0165 
0166   // called by the destructor of stop_callback
0167   _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void __remove_callback(__stop_callback_base* __cb) noexcept {
0168     __callback_list_lock __cb_list_lock(__state_);
0169 
0170     // under below condition, the request_stop call just popped __cb from the list and could execute it now
0171     bool __potentially_executing_now = __cb->__prev_ == nullptr && !__callback_list_.__is_head(__cb);
0172 
0173     if (__potentially_executing_now) {
0174       auto __requested_thread = __requesting_thread_;
0175       __cb_list_lock.__unlock();
0176 
0177       if (std::this_thread::get_id() != __requested_thread) {
0178         // [stopcallback.cons] If callback is concurrently executing on another thread, then the return
0179         // from the invocation of callback strongly happens before ([intro.races]) callback is destroyed.
0180         __cb->__completed_.wait(false, std::memory_order_acquire);
0181       } else {
0182         // The destructor of stop_callback runs on the same thread of the thread that invokes the callback.
0183         // The callback is potentially invoking its own destuctor. Set the flag to avoid accessing destroyed
0184         // members on the invoking side
0185         if (__cb->__destroyed_) {
0186           *__cb->__destroyed_ = true;
0187         }
0188       }
0189     } else {
0190       __callback_list_.__remove(__cb);
0191     }
0192   }
0193 
0194 private:
0195   _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI __callback_list_lock __try_lock_for_request_stop() noexcept {
0196     // If it is already stop_requested, do not try to request stop or lock the list again.
0197     const auto __lock_fail_condition = [](__state_t __state) { return (__state & __stop_requested_bit) != 0; };
0198 
0199     // set locked and requested bit at the same time
0200     const auto __after_lock_state = [](__state_t __state) {
0201       return __state | __callback_list_locked_bit | __stop_requested_bit;
0202     };
0203 
0204     // acq because [thread.stoptoken.intro] Registration of a callback synchronizes with the invocation of that
0205     //     callback. We are going to invoke the callback after getting the lock, acquire so that we can see the
0206     //     registration of a callback (and other writes that happens-before the add_callback)
0207     //     Note: the rel (unlock) in the add_callback syncs with this acq
0208     // rel because [thread.stoptoken.intro] A call to request_stop that returns true synchronizes with a call
0209     //     to stop_requested on an associated stop_token or stop_source object that returns true.
0210     //     We need to make sure that all writes (including user code) before request_stop will be made visible
0211     //     to the threads that waiting for `stop_requested == true`
0212     //     Note: this rel syncs with the acq in `stop_requested`
0213     const auto __locked_ordering = std::memory_order_acq_rel;
0214 
0215     return __callback_list_lock(__state_, __lock_fail_condition, __after_lock_state, __locked_ordering);
0216   }
0217 
0218   template <class _Tp>
0219   friend struct __intrusive_shared_ptr_traits;
0220 };
0221 
0222 template <class _Tp>
0223 struct __intrusive_shared_ptr_traits;
0224 
0225 template <>
0226 struct __intrusive_shared_ptr_traits<__stop_state> {
0227   _LIBCPP_HIDE_FROM_ABI static atomic<uint32_t>& __get_atomic_ref_count(__stop_state& __state) {
0228     return __state.__ref_count_;
0229   }
0230 };
0231 
0232 #endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_THREADS)
0233 
0234 _LIBCPP_END_NAMESPACE_STD
0235 
0236 #endif // _LIBCPP___CXX03___STOP_TOKEN_STOP_STATE_H