File indexing completed on 2025-12-17 09:36:55
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 #ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_
0030 #define ABSL_BASE_INTERNAL_SPINLOCK_H_
0031
0032 #include <atomic>
0033 #include <cstdint>
0034
0035 #include "absl/base/attributes.h"
0036 #include "absl/base/const_init.h"
0037 #include "absl/base/dynamic_annotations.h"
0038 #include "absl/base/internal/low_level_scheduling.h"
0039 #include "absl/base/internal/raw_logging.h"
0040 #include "absl/base/internal/scheduling_mode.h"
0041 #include "absl/base/internal/tsan_mutex_interface.h"
0042 #include "absl/base/thread_annotations.h"
0043
0044 namespace tcmalloc {
0045 namespace tcmalloc_internal {
0046
0047 class AllocationGuardSpinLockHolder;
0048
0049 }
0050 }
0051
0052 namespace absl {
0053 ABSL_NAMESPACE_BEGIN
0054 namespace base_internal {
0055
0056 class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock {
0057 public:
0058 SpinLock() : lockword_(kSpinLockCooperative) {
0059 ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
0060 }
0061
0062
0063
0064 explicit SpinLock(base_internal::SchedulingMode mode);
0065
0066
0067 constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode)
0068 : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {}
0069
0070
0071
0072
0073 #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
0074 ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
0075 #else
0076 ~SpinLock() = default;
0077 #endif
0078
0079
0080 inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() {
0081 ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
0082 if (!TryLockImpl()) {
0083 SlowLock();
0084 }
0085 ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
0086 }
0087
0088
0089
0090
0091
0092 ABSL_MUST_USE_RESULT inline bool TryLock()
0093 ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
0094 ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
0095 bool res = TryLockImpl();
0096 ABSL_TSAN_MUTEX_POST_LOCK(
0097 this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),
0098 0);
0099 return res;
0100 }
0101
0102
0103 inline void Unlock() ABSL_UNLOCK_FUNCTION() {
0104 ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
0105 uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
0106 lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,
0107 std::memory_order_release);
0108
0109 if ((lock_value & kSpinLockDisabledScheduling) != 0) {
0110 base_internal::SchedulingGuard::EnableRescheduling(true);
0111 }
0112 if ((lock_value & kWaitTimeMask) != 0) {
0113
0114
0115
0116 SlowUnlock(lock_value);
0117 }
0118 ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
0119 }
0120
0121
0122
0123
0124 ABSL_MUST_USE_RESULT inline bool IsHeld() const {
0125 return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
0126 }
0127
0128
0129
0130 inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() {
0131 if (!IsHeld()) {
0132 ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock");
0133 }
0134 }
0135
0136 protected:
0137
0138
0139
0140
0141 static uint32_t EncodeWaitCycles(int64_t wait_start_time,
0142 int64_t wait_end_time);
0143
0144
0145 static int64_t DecodeWaitCycles(uint32_t lock_value);
0146
0147
0148 friend struct SpinLockTest;
0149 friend class tcmalloc::tcmalloc_internal::AllocationGuardSpinLockHolder;
0150
0151 private:
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170 static constexpr uint32_t kSpinLockHeld = 1;
0171 static constexpr uint32_t kSpinLockCooperative = 2;
0172 static constexpr uint32_t kSpinLockDisabledScheduling = 4;
0173 static constexpr uint32_t kSpinLockSleeper = 8;
0174
0175 static constexpr uint32_t kWaitTimeMask =
0176 ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling);
0177
0178
0179 static constexpr bool IsCooperative(
0180 base_internal::SchedulingMode scheduling_mode) {
0181 return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
0182 }
0183
0184 bool IsCooperative() const {
0185 return lockword_.load(std::memory_order_relaxed) & kSpinLockCooperative;
0186 }
0187
0188 uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
0189 void SlowLock() ABSL_ATTRIBUTE_COLD;
0190 void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
0191 uint32_t SpinLoop();
0192
0193 inline bool TryLockImpl() {
0194 uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
0195 return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
0196 }
0197
0198 std::atomic<uint32_t> lockword_;
0199
0200 SpinLock(const SpinLock&) = delete;
0201 SpinLock& operator=(const SpinLock&) = delete;
0202 };
0203
0204
0205
0206
0207
0208
0209 #if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
0210 class [[nodiscard]] SpinLockHolder;
0211 #else
0212 class ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_TRIVIAL_ABI SpinLockHolder;
0213 #endif
0214
0215 class ABSL_SCOPED_LOCKABLE SpinLockHolder {
0216 public:
0217 inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
0218 : lock_(l) {
0219 l->Lock();
0220 }
0221 inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); }
0222
0223 SpinLockHolder(const SpinLockHolder&) = delete;
0224 SpinLockHolder& operator=(const SpinLockHolder&) = delete;
0225
0226 private:
0227 SpinLock* lock_;
0228 };
0229
0230
0231
0232
0233
0234
0235
0236
0237 void RegisterSpinLockProfiler(void (*fn)(const void* lock,
0238 int64_t wait_cycles));
0239
0240
0241
0242
0243
0244
0245
0246 inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
0247 uint32_t wait_cycles) {
0248 if ((lock_value & kSpinLockHeld) != 0) {
0249 return lock_value;
0250 }
0251
0252 uint32_t sched_disabled_bit = 0;
0253 if ((lock_value & kSpinLockCooperative) == 0) {
0254
0255
0256 if (base_internal::SchedulingGuard::DisableRescheduling()) {
0257 sched_disabled_bit = kSpinLockDisabledScheduling;
0258 }
0259 }
0260
0261 if (!lockword_.compare_exchange_strong(
0262 lock_value,
0263 kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
0264 std::memory_order_acquire, std::memory_order_relaxed)) {
0265 base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
0266 }
0267
0268 return lock_value;
0269 }
0270
0271 }
0272 ABSL_NAMESPACE_END
0273 }
0274
0275 #endif