Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-12-17 09:36:55

0001 //
0002 // Copyright 2017 The Abseil Authors.
0003 //
0004 // Licensed under the Apache License, Version 2.0 (the "License");
0005 // you may not use this file except in compliance with the License.
0006 // You may obtain a copy of the License at
0007 //
0008 //      https://www.apache.org/licenses/LICENSE-2.0
0009 //
0010 // Unless required by applicable law or agreed to in writing, software
0011 // distributed under the License is distributed on an "AS IS" BASIS,
0012 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0013 // See the License for the specific language governing permissions and
0014 // limitations under the License.
0015 //
0016 
0017 //  Most users requiring mutual exclusion should use Mutex.
0018 //  SpinLock is provided for use in two situations:
0019 //   - for use by Abseil internal code that Mutex itself depends on
0020 //   - for async signal safety (see below)
0021 
0022 // SpinLock with a base_internal::SchedulingMode::SCHEDULE_KERNEL_ONLY is async
0023 // signal safe. If a spinlock is used within a signal handler, all code that
0024 // acquires the lock must ensure that the signal cannot arrive while they are
0025 // holding the lock. Typically, this is done by blocking the signal.
0026 //
0027 // Threads waiting on a SpinLock may be woken in an arbitrary order.
0028 
0029 #ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_
0030 #define ABSL_BASE_INTERNAL_SPINLOCK_H_
0031 
0032 #include <atomic>
0033 #include <cstdint>
0034 
0035 #include "absl/base/attributes.h"
0036 #include "absl/base/const_init.h"
0037 #include "absl/base/dynamic_annotations.h"
0038 #include "absl/base/internal/low_level_scheduling.h"
0039 #include "absl/base/internal/raw_logging.h"
0040 #include "absl/base/internal/scheduling_mode.h"
0041 #include "absl/base/internal/tsan_mutex_interface.h"
0042 #include "absl/base/thread_annotations.h"
0043 
0044 namespace tcmalloc {
0045 namespace tcmalloc_internal {
0046 
0047 class AllocationGuardSpinLockHolder;
0048 
0049 }  // namespace tcmalloc_internal
0050 }  // namespace tcmalloc
0051 
0052 namespace absl {
0053 ABSL_NAMESPACE_BEGIN
0054 namespace base_internal {
0055 
0056 class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock {
0057  public:
0058   SpinLock() : lockword_(kSpinLockCooperative) {
0059     ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
0060   }
0061 
0062   // Constructors that allow non-cooperative spinlocks to be created for use
0063   // inside thread schedulers.  Normal clients should not use these.
0064   explicit SpinLock(base_internal::SchedulingMode mode);
0065 
0066   // Constructor for global SpinLock instances.  See absl/base/const_init.h.
0067   constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode)
0068       : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {}
0069 
0070   // For global SpinLock instances prefer trivial destructor when possible.
0071   // Default but non-trivial destructor in some build configurations causes an
0072   // extra static initializer.
0073 #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
0074   ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
0075 #else
0076   ~SpinLock() = default;
0077 #endif
0078 
0079   // Acquire this SpinLock.
0080   inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() {
0081     ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
0082     if (!TryLockImpl()) {
0083       SlowLock();
0084     }
0085     ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
0086   }
0087 
0088   // Try to acquire this SpinLock without blocking and return true if the
0089   // acquisition was successful.  If the lock was not acquired, false is
0090   // returned.  If this SpinLock is free at the time of the call, TryLock
0091   // will return true with high probability.
0092   ABSL_MUST_USE_RESULT inline bool TryLock()
0093       ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
0094     ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
0095     bool res = TryLockImpl();
0096     ABSL_TSAN_MUTEX_POST_LOCK(
0097         this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),
0098         0);
0099     return res;
0100   }
0101 
0102   // Release this SpinLock, which must be held by the calling thread.
0103   inline void Unlock() ABSL_UNLOCK_FUNCTION() {
0104     ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
0105     uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
0106     lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,
0107                                     std::memory_order_release);
0108 
0109     if ((lock_value & kSpinLockDisabledScheduling) != 0) {
0110       base_internal::SchedulingGuard::EnableRescheduling(true);
0111     }
0112     if ((lock_value & kWaitTimeMask) != 0) {
0113       // Collect contentionz profile info, and speed the wakeup of any waiter.
0114       // The wait_cycles value indicates how long this thread spent waiting
0115       // for the lock.
0116       SlowUnlock(lock_value);
0117     }
0118     ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
0119   }
0120 
0121   // Determine if the lock is held.  When the lock is held by the invoking
0122   // thread, true will always be returned. Intended to be used as
0123   // CHECK(lock.IsHeld()).
0124   ABSL_MUST_USE_RESULT inline bool IsHeld() const {
0125     return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
0126   }
0127 
0128   // Return immediately if this thread holds the SpinLock exclusively.
0129   // Otherwise, report an error by crashing with a diagnostic.
0130   inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() {
0131     if (!IsHeld()) {
0132       ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock");
0133     }
0134   }
0135 
0136  protected:
0137   // These should not be exported except for testing.
0138 
0139   // Store number of cycles between wait_start_time and wait_end_time in a
0140   // lock value.
0141   static uint32_t EncodeWaitCycles(int64_t wait_start_time,
0142                                    int64_t wait_end_time);
0143 
0144   // Extract number of wait cycles in a lock value.
0145   static int64_t DecodeWaitCycles(uint32_t lock_value);
0146 
0147   // Provide access to protected method above.  Use for testing only.
0148   friend struct SpinLockTest;
0149   friend class tcmalloc::tcmalloc_internal::AllocationGuardSpinLockHolder;
0150 
0151  private:
0152   // lockword_ is used to store the following:
0153   //
0154   // bit[0] encodes whether a lock is being held.
0155   // bit[1] encodes whether a lock uses cooperative scheduling.
0156   // bit[2] encodes whether the current lock holder disabled scheduling when
0157   //        acquiring the lock. Only set when kSpinLockHeld is also set.
0158   // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
0159   //        This is set by the lock holder to indicate how long it waited on
0160   //        the lock before eventually acquiring it. The number of cycles is
0161   //        encoded as a 29-bit unsigned int, or in the case that the current
0162   //        holder did not wait but another waiter is queued, the LSB
0163   //        (kSpinLockSleeper) is set. The implementation does not explicitly
0164   //        track the number of queued waiters beyond this. It must always be
0165   //        assumed that waiters may exist if the current holder was required to
0166   //        queue.
0167   //
0168   // Invariant: if the lock is not held, the value is either 0 or
0169   // kSpinLockCooperative.
0170   static constexpr uint32_t kSpinLockHeld = 1;
0171   static constexpr uint32_t kSpinLockCooperative = 2;
0172   static constexpr uint32_t kSpinLockDisabledScheduling = 4;
0173   static constexpr uint32_t kSpinLockSleeper = 8;
0174   // Includes kSpinLockSleeper.
0175   static constexpr uint32_t kWaitTimeMask =
0176       ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling);
0177 
0178   // Returns true if the provided scheduling mode is cooperative.
0179   static constexpr bool IsCooperative(
0180       base_internal::SchedulingMode scheduling_mode) {
0181     return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
0182   }
0183 
0184   bool IsCooperative() const {
0185     return lockword_.load(std::memory_order_relaxed) & kSpinLockCooperative;
0186   }
0187 
0188   uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
0189   void SlowLock() ABSL_ATTRIBUTE_COLD;
0190   void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
0191   uint32_t SpinLoop();
0192 
0193   inline bool TryLockImpl() {
0194     uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
0195     return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
0196   }
0197 
0198   std::atomic<uint32_t> lockword_;
0199 
0200   SpinLock(const SpinLock&) = delete;
0201   SpinLock& operator=(const SpinLock&) = delete;
0202 };
0203 
0204 // Corresponding locker object that arranges to acquire a spinlock for
0205 // the duration of a C++ scope.
0206 //
0207 // TODO(b/176172494): Use only [[nodiscard]] when baseline is raised.
0208 // TODO(b/6695610): Remove forward declaration when #ifdef is no longer needed.
0209 #if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
0210 class [[nodiscard]] SpinLockHolder;
0211 #else
0212 class ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_TRIVIAL_ABI SpinLockHolder;
0213 #endif
0214 
0215 class ABSL_SCOPED_LOCKABLE SpinLockHolder {
0216  public:
0217   inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
0218       : lock_(l) {
0219     l->Lock();
0220   }
0221   inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); }
0222 
0223   SpinLockHolder(const SpinLockHolder&) = delete;
0224   SpinLockHolder& operator=(const SpinLockHolder&) = delete;
0225 
0226  private:
0227   SpinLock* lock_;
0228 };
0229 
0230 // Register a hook for profiling support.
0231 //
0232 // The function pointer registered here will be called whenever a spinlock is
0233 // contended.  The callback is given an opaque handle to the contended spinlock
0234 // and the number of wait cycles.  This is thread-safe, but only a single
0235 // profiler can be registered.  It is an error to call this function multiple
0236 // times with different arguments.
0237 void RegisterSpinLockProfiler(void (*fn)(const void* lock,
0238                                          int64_t wait_cycles));
0239 
0240 //------------------------------------------------------------------------------
0241 // Public interface ends here.
0242 //------------------------------------------------------------------------------
0243 
0244 // If (result & kSpinLockHeld) == 0, then *this was successfully locked.
0245 // Otherwise, returns last observed value for lockword_.
0246 inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
0247                                           uint32_t wait_cycles) {
0248   if ((lock_value & kSpinLockHeld) != 0) {
0249     return lock_value;
0250   }
0251 
0252   uint32_t sched_disabled_bit = 0;
0253   if ((lock_value & kSpinLockCooperative) == 0) {
0254     // For non-cooperative locks we must make sure we mark ourselves as
0255     // non-reschedulable before we attempt to CompareAndSwap.
0256     if (base_internal::SchedulingGuard::DisableRescheduling()) {
0257       sched_disabled_bit = kSpinLockDisabledScheduling;
0258     }
0259   }
0260 
0261   if (!lockword_.compare_exchange_strong(
0262           lock_value,
0263           kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
0264           std::memory_order_acquire, std::memory_order_relaxed)) {
0265     base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
0266   }
0267 
0268   return lock_value;
0269 }
0270 
0271 }  // namespace base_internal
0272 ABSL_NAMESPACE_END
0273 }  // namespace absl
0274 
0275 #endif  // ABSL_BASE_INTERNAL_SPINLOCK_H_