File indexing completed on 2025-01-30 09:35:34
0001
0002
0003
0004
0005
0006
0007 #ifndef BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_FUTEX_H
0008 #define BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_FUTEX_H
0009
0010 #include <algorithm>
0011 #include <atomic>
0012 #include <cmath>
0013 #include <random>
0014 #include <thread>
0015
0016 #include <boost/fiber/detail/config.hpp>
0017 #include <boost/fiber/detail/cpu_relax.hpp>
0018 #include <boost/fiber/detail/futex.hpp>
0019
0020
0021
0022
0023
0024 namespace boost {
0025 namespace fibers {
0026 namespace detail {
0027
0028 class spinlock_ttas_adaptive_futex {
0029 private:
0030 template< typename FBSplk >
0031 friend class spinlock_rtm;
0032
0033 std::atomic< std::int32_t > value_{ 0 };
0034 std::atomic< std::int32_t > retries_{ 0 };
0035
0036 public:
0037 spinlock_ttas_adaptive_futex() = default;
0038
0039 spinlock_ttas_adaptive_futex( spinlock_ttas_adaptive_futex const&) = delete;
0040 spinlock_ttas_adaptive_futex & operator=( spinlock_ttas_adaptive_futex const&) = delete;
0041
0042 void lock() noexcept {
0043 static thread_local std::minstd_rand generator{ std::random_device{}() };
0044 std::int32_t collisions = 0, retries = 0, expected = 0;
0045 const std::int32_t prev_retries = retries_.load( std::memory_order_relaxed);
0046 const std::int32_t max_relax_retries = (std::min)(
0047 static_cast< std::int32_t >( BOOST_FIBERS_SPIN_BEFORE_SLEEP0), 2 * prev_retries + 10);
0048 const std::int32_t max_sleep_retries = (std::min)(
0049 static_cast< std::int32_t >( BOOST_FIBERS_SPIN_BEFORE_YIELD), 2 * prev_retries + 10);
0050
0051 while ( retries++ < BOOST_FIBERS_RETRY_THRESHOLD) {
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062 if ( 0 != ( expected = value_.load( std::memory_order_relaxed) ) ) {
0063 #if !defined(BOOST_FIBERS_SPIN_SINGLE_CORE)
0064 if ( max_relax_retries > retries) {
0065
0066
0067
0068
0069
0070 cpu_relax();
0071 } else if ( max_sleep_retries > retries) {
0072
0073
0074
0075
0076 static constexpr std::chrono::microseconds us0{ 0 };
0077 std::this_thread::sleep_for( us0);
0078 } else {
0079
0080
0081
0082 std::this_thread::yield();
0083 }
0084 #else
0085
0086
0087
0088 std::this_thread::yield();
0089 #endif
0090 } else if ( ! value_.compare_exchange_strong( expected, 1, std::memory_order_acquire) ) {
0091
0092
0093
0094 std::uniform_int_distribution< std::int32_t > distribution{
0095 0, static_cast< std::int32_t >( 1) << (std::min)(collisions, static_cast< std::int32_t >( BOOST_FIBERS_CONTENTION_WINDOW_THRESHOLD)) };
0096 const std::int32_t z = distribution( generator);
0097 ++collisions;
0098 for ( std::int32_t i = 0; i < z; ++i) {
0099
0100
0101 cpu_relax();
0102 }
0103 } else {
0104
0105 retries_.store( prev_retries + (retries - prev_retries) / 8, std::memory_order_relaxed);
0106 return;
0107 }
0108 }
0109
0110
0111 if ( 2 != expected) {
0112 expected = value_.exchange( 2, std::memory_order_acquire);
0113 }
0114 while ( 0 != expected) {
0115 futex_wait( & value_, 2);
0116 expected = value_.exchange( 2, std::memory_order_acquire);
0117 }
0118
0119 retries_.store( prev_retries + (retries - prev_retries) / 8, std::memory_order_relaxed);
0120 }
0121
0122 bool try_lock() noexcept {
0123 std::int32_t expected = 0;
0124 return value_.compare_exchange_strong( expected, 1, std::memory_order_acquire);
0125 }
0126
0127 void unlock() noexcept {
0128 if ( 1 != value_.fetch_sub( 1, std::memory_order_acquire) ) {
0129 value_.store( 0, std::memory_order_release);
0130 futex_wake( & value_);
0131 }
0132 }
0133 };
0134
0135 }}}
0136
0137 #endif