File indexing completed on 2025-01-18 09:53:20
0001 #ifndef BOOST_UNORDERED_DETAIL_FOA_RW_SPINLOCK_HPP_INCLUDED
0002 #define BOOST_UNORDERED_DETAIL_FOA_RW_SPINLOCK_HPP_INCLUDED
0003
0004
0005
0006
0007
0008 #include <boost/core/yield_primitives.hpp>
0009 #include <atomic>
0010 #include <cstdint>
0011
0012 namespace boost{
0013 namespace unordered{
0014 namespace detail{
0015 namespace foa{
0016
0017 class rw_spinlock
0018 {
0019 private:
0020
0021
0022
0023
0024
0025 static constexpr std::uint32_t locked_exclusive_mask = 1u << 31;
0026 static constexpr std::uint32_t writer_pending_mask = 1u << 30;
0027 static constexpr std::uint32_t reader_lock_count_mask = writer_pending_mask - 1;
0028
0029 std::atomic<std::uint32_t> state_ = {};
0030
0031 private:
0032
0033
0034
0035
0036 static void yield( unsigned k ) noexcept
0037 {
0038 unsigned const sleep_every = 1024;
0039
0040 k %= sleep_every;
0041
0042 if( k < 5 )
0043 {
0044
0045
0046
0047
0048
0049 unsigned const pause_count = 1u << k;
0050
0051 for( unsigned i = 0; i < pause_count; ++i )
0052 {
0053 boost::core::sp_thread_pause();
0054 }
0055 }
0056 else if( k < sleep_every - 1 )
0057 {
0058
0059
0060
0061 boost::core::sp_thread_yield();
0062 }
0063 else
0064 {
0065
0066
0067
0068 boost::core::sp_thread_sleep();
0069 }
0070 }
0071
0072 public:
0073
0074 bool try_lock_shared() noexcept
0075 {
0076 std::uint32_t st = state_.load( std::memory_order_relaxed );
0077
0078 if( st >= reader_lock_count_mask )
0079 {
0080
0081 return false;
0082 }
0083
0084 std::uint32_t newst = st + 1;
0085 return state_.compare_exchange_strong( st, newst, std::memory_order_acquire, std::memory_order_relaxed );
0086 }
0087
0088 void lock_shared() noexcept
0089 {
0090 for( unsigned k = 0; ; ++k )
0091 {
0092 std::uint32_t st = state_.load( std::memory_order_relaxed );
0093
0094 if( st < reader_lock_count_mask )
0095 {
0096 std::uint32_t newst = st + 1;
0097 if( state_.compare_exchange_weak( st, newst, std::memory_order_acquire, std::memory_order_relaxed ) ) return;
0098 }
0099
0100 yield( k );
0101 }
0102 }
0103
0104 void unlock_shared() noexcept
0105 {
0106
0107
0108 state_.fetch_sub( 1, std::memory_order_release );
0109
0110
0111
0112 }
0113
0114 bool try_lock() noexcept
0115 {
0116 std::uint32_t st = state_.load( std::memory_order_relaxed );
0117
0118 if( st & locked_exclusive_mask )
0119 {
0120
0121 return false;
0122 }
0123
0124 if( st & reader_lock_count_mask )
0125 {
0126
0127 return false;
0128 }
0129
0130 std::uint32_t newst = locked_exclusive_mask;
0131 return state_.compare_exchange_strong( st, newst, std::memory_order_acquire, std::memory_order_relaxed );
0132 }
0133
0134 void lock() noexcept
0135 {
0136 for( unsigned k = 0; ; ++k )
0137 {
0138 std::uint32_t st = state_.load( std::memory_order_relaxed );
0139
0140 if( st & locked_exclusive_mask )
0141 {
0142
0143 }
0144 else if( ( st & reader_lock_count_mask ) == 0 )
0145 {
0146
0147
0148 std::uint32_t newst = locked_exclusive_mask;
0149 if( state_.compare_exchange_weak( st, newst, std::memory_order_acquire, std::memory_order_relaxed ) ) return;
0150 }
0151 else if( st & writer_pending_mask )
0152 {
0153
0154 }
0155 else
0156 {
0157
0158
0159 std::uint32_t newst = st | writer_pending_mask;
0160 state_.compare_exchange_weak( st, newst, std::memory_order_relaxed, std::memory_order_relaxed );
0161 }
0162
0163 yield( k );
0164 }
0165 }
0166
0167 void unlock() noexcept
0168 {
0169
0170 state_.store( 0, std::memory_order_release );
0171 }
0172 };
0173
0174 }
0175 }
0176 }
0177 }
0178
0179 #endif