Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2026-05-03 08:14:03

0001 // -*- C++ -*-
0002 //===----------------------------------------------------------------------===//
0003 //
0004 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
0005 // See https://llvm.org/LICENSE.txt for license information.
0006 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
0007 //
0008 //===----------------------------------------------------------------------===//
0009 
0010 #ifndef _LIBCPP___STOP_TOKEN_ATOMIC_UNIQUE_LOCK_H
0011 #define _LIBCPP___STOP_TOKEN_ATOMIC_UNIQUE_LOCK_H
0012 
0013 #include <__bit/popcount.h>
0014 #include <__config>
0015 #include <atomic>
0016 
0017 #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
0018 #  pragma GCC system_header
0019 #endif
0020 
0021 _LIBCPP_BEGIN_NAMESPACE_STD
0022 
0023 #if _LIBCPP_STD_VER >= 20
0024 
0025 // This class implements an RAII unique_lock without a mutex.
0026 // It uses std::atomic<State>,
0027 // where State contains a lock bit and might contain other data,
0028 // and LockedBit is the value of State when the lock bit is set, e.g  1 << 2
0029 template <class _State, _State _LockedBit>
0030 class _LIBCPP_AVAILABILITY_SYNC __atomic_unique_lock {
0031   static_assert(std::__libcpp_popcount(static_cast<unsigned long long>(_LockedBit)) == 1,
0032                 "LockedBit must be an integer where only one bit is set");
0033 
0034   std::atomic<_State>& __state_;
0035   bool __is_locked_;
0036 
0037 public:
0038   _LIBCPP_HIDE_FROM_ABI explicit __atomic_unique_lock(std::atomic<_State>& __state) noexcept
0039       : __state_(__state), __is_locked_(true) {
0040     __lock();
0041   }
0042 
0043   template <class _Pred>
0044   _LIBCPP_HIDE_FROM_ABI __atomic_unique_lock(std::atomic<_State>& __state, _Pred&& __give_up_locking) noexcept
0045       : __state_(__state), __is_locked_(false) {
0046     __is_locked_ = __lock_impl(__give_up_locking, __set_locked_bit, std::memory_order_acquire);
0047   }
0048 
0049   template <class _Pred, class _UnaryFunction>
0050   _LIBCPP_HIDE_FROM_ABI __atomic_unique_lock(
0051       std::atomic<_State>& __state,
0052       _Pred&& __give_up_locking,
0053       _UnaryFunction&& __state_after_lock,
0054       std::memory_order __locked_ordering) noexcept
0055       : __state_(__state), __is_locked_(false) {
0056     __is_locked_ = __lock_impl(__give_up_locking, __state_after_lock, __locked_ordering);
0057   }
0058 
0059   __atomic_unique_lock(const __atomic_unique_lock&)            = delete;
0060   __atomic_unique_lock(__atomic_unique_lock&&)                 = delete;
0061   __atomic_unique_lock& operator=(const __atomic_unique_lock&) = delete;
0062   __atomic_unique_lock& operator=(__atomic_unique_lock&&)      = delete;
0063 
0064   _LIBCPP_HIDE_FROM_ABI ~__atomic_unique_lock() {
0065     if (__is_locked_) {
0066       __unlock();
0067     }
0068   }
0069 
0070   _LIBCPP_HIDE_FROM_ABI bool __owns_lock() const noexcept { return __is_locked_; }
0071 
0072   _LIBCPP_HIDE_FROM_ABI void __lock() noexcept {
0073     const auto __never_give_up_locking = [](_State) { return false; };
0074     // std::memory_order_acquire because we'd like to make sure that all the read operations after the lock can read the
0075     // up-to-date values.
0076     __lock_impl(__never_give_up_locking, __set_locked_bit, std::memory_order_acquire);
0077     __is_locked_ = true;
0078   }
0079 
0080   _LIBCPP_HIDE_FROM_ABI void __unlock() noexcept {
0081     // unset the _LockedBit. `memory_order_release` because we need to make sure all the write operations before calling
0082     // `__unlock` will be made visible to other threads
0083     __state_.fetch_and(static_cast<_State>(~_LockedBit), std::memory_order_release);
0084     __state_.notify_all();
0085     __is_locked_ = false;
0086   }
0087 
0088 private:
0089   template <class _Pred, class _UnaryFunction>
0090   _LIBCPP_HIDE_FROM_ABI bool
0091   __lock_impl(_Pred&& __give_up_locking, // while trying to lock the state, if the predicate returns true, give up
0092                                          // locking and return
0093               _UnaryFunction&& __state_after_lock,
0094               std::memory_order __locked_ordering) noexcept {
0095     // At this stage, until we exit the inner while loop, other than the atomic state, we are not reading any order
0096     // dependent values that is written on other threads, or writing anything that needs to be seen on other threads.
0097     // Therefore `memory_order_relaxed` is enough.
0098     _State __current_state = __state_.load(std::memory_order_relaxed);
0099     do {
0100       while (true) {
0101         if (__give_up_locking(__current_state)) {
0102           // user provided early return condition. fail to lock
0103           return false;
0104         } else if ((__current_state & _LockedBit) != 0) {
0105           // another thread has locked the state, we need to wait
0106           __state_.wait(__current_state, std::memory_order_relaxed);
0107           // when it is woken up by notifyAll or spuriously, the __state_
0108           // might have changed. reload the state
0109           // Note that the new state's _LockedBit may or may not equal to 0
0110           __current_state = __state_.load(std::memory_order_relaxed);
0111         } else {
0112           // at least for now, it is not locked. we can try `compare_exchange_weak` to lock it.
0113           // Note that the variable `__current_state`'s lock bit has to be 0 at this point.
0114           break;
0115         }
0116       }
0117     } while (!__state_.compare_exchange_weak(
0118         __current_state, // if __state_ has the same value of __current_state, lock bit must be zero before exchange and
0119                          // we are good to lock/exchange and return. If _state has a different value, because other
0120                          // threads locked it between the `break` statement above and this statement, exchange will fail
0121                          // and go back to the inner while loop above.
0122         __state_after_lock(__current_state), // state after lock. Usually it should be __current_state | _LockedBit.
0123                                              // Some use cases need to set other bits at the same time as an atomic
0124                                              // operation therefore we accept a function
0125         __locked_ordering,        // sucessful exchange order. Usually it should be std::memory_order_acquire.
0126                                   // Some use cases need more strict ordering therefore we accept it as a parameter
0127         std::memory_order_relaxed // fail to exchange order. We don't need any ordering as we are going back to the
0128                                   // inner while loop
0129         ));
0130     return true;
0131   }
0132 
0133   _LIBCPP_HIDE_FROM_ABI static constexpr auto __set_locked_bit = [](_State __state) { return __state | _LockedBit; };
0134 };
0135 
0136 #endif // _LIBCPP_STD_VER >= 20 && _LIBCPP_HAS_THREADS
0137 
0138 _LIBCPP_END_NAMESPACE_STD
0139 
0140 #endif // _LIBCPP___STOP_TOKEN_ATOMIC_UNIQUE_LOCK_H