Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 09:53:20

0001 #ifndef BOOST_UNORDERED_DETAIL_FOA_RW_SPINLOCK_HPP_INCLUDED
0002 #define BOOST_UNORDERED_DETAIL_FOA_RW_SPINLOCK_HPP_INCLUDED
0003 
0004 // Copyright 2023 Peter Dimov
0005 // Distributed under the Boost Software License, Version 1.0.
0006 // https://www.boost.org/LICENSE_1_0.txt
0007 
0008 #include <boost/core/yield_primitives.hpp>
0009 #include <atomic>
0010 #include <cstdint>
0011 
0012 namespace boost{
0013 namespace unordered{
0014 namespace detail{
0015 namespace foa{
0016 
0017 class rw_spinlock
0018 {
0019 private:
0020 
0021     // bit 31: locked exclusive
0022     // bit 30: writer pending
0023     // bit 29..0: reader lock count
0024 
0025     static constexpr std::uint32_t locked_exclusive_mask = 1u << 31; // 0x8000'0000
0026     static constexpr std::uint32_t writer_pending_mask = 1u << 30; // 0x4000'0000
0027     static constexpr std::uint32_t reader_lock_count_mask = writer_pending_mask - 1; // 0x3FFF'FFFF
0028 
0029     std::atomic<std::uint32_t> state_ = {};
0030 
0031 private:
0032 
0033     // Effects: Provides a hint to the implementation that the current thread
0034     //          has been unable to make progress for k+1 iterations.
0035 
0036     static void yield( unsigned k ) noexcept
0037     {
0038         unsigned const sleep_every = 1024; // see below
0039 
0040         k %= sleep_every;
0041 
0042         if( k < 5 )
0043         {
0044             // Intel recommendation from the Optimization Reference Manual
0045             // Exponentially increase number of PAUSE instructions each
0046             // iteration until reaching a maximum which is approximately
0047             // one timeslice long (2^4 == 16 in our case)
0048 
0049             unsigned const pause_count = 1u << k;
0050 
0051             for( unsigned i = 0; i < pause_count; ++i )
0052             {
0053                 boost::core::sp_thread_pause();
0054             }
0055         }
0056         else if( k < sleep_every - 1 )
0057         {
0058             // Once the maximum number of PAUSE instructions is reached,
0059             // we switch to yielding the timeslice immediately
0060 
0061             boost::core::sp_thread_yield();
0062         }
0063         else
0064         {
0065             // After `sleep_every` iterations of no progress, we sleep,
0066             // to avoid a deadlock if a lower priority thread has the lock
0067 
0068             boost::core::sp_thread_sleep();
0069         }
0070     }
0071 
0072 public:
0073 
0074     bool try_lock_shared() noexcept
0075     {
0076         std::uint32_t st = state_.load( std::memory_order_relaxed );
0077 
0078         if( st >= reader_lock_count_mask )
0079         {
0080             // either bit 31 set, bit 30 set, or reader count is max
0081             return false;
0082         }
0083 
0084         std::uint32_t newst = st + 1;
0085         return state_.compare_exchange_strong( st, newst, std::memory_order_acquire, std::memory_order_relaxed );
0086     }
0087 
0088     void lock_shared() noexcept
0089     {
0090         for( unsigned k = 0; ; ++k )
0091         {
0092             std::uint32_t st = state_.load( std::memory_order_relaxed );
0093 
0094             if( st < reader_lock_count_mask )
0095             {
0096                 std::uint32_t newst = st + 1;
0097                 if( state_.compare_exchange_weak( st, newst, std::memory_order_acquire, std::memory_order_relaxed ) ) return;
0098             }
0099 
0100             yield( k );
0101         }
0102     }
0103 
0104     void unlock_shared() noexcept
0105     {
0106         // pre: locked shared, not locked exclusive
0107 
0108         state_.fetch_sub( 1, std::memory_order_release );
0109 
0110         // if the writer pending bit is set, there's a writer waiting
0111         // let it acquire the lock; it will clear the bit on unlock
0112     }
0113 
0114     bool try_lock() noexcept
0115     {
0116         std::uint32_t st = state_.load( std::memory_order_relaxed );
0117 
0118         if( st & locked_exclusive_mask )
0119         {
0120             // locked exclusive
0121             return false;
0122         }
0123 
0124         if( st & reader_lock_count_mask )
0125         {
0126             // locked shared
0127             return false;
0128         }
0129 
0130         std::uint32_t newst = locked_exclusive_mask;
0131         return state_.compare_exchange_strong( st, newst, std::memory_order_acquire, std::memory_order_relaxed );
0132     }
0133 
0134     void lock() noexcept
0135     {
0136         for( unsigned k = 0; ; ++k )
0137         {
0138             std::uint32_t st = state_.load( std::memory_order_relaxed );
0139 
0140             if( st & locked_exclusive_mask )
0141             {
0142                 // locked exclusive, spin
0143             }
0144             else if( ( st & reader_lock_count_mask ) == 0 )
0145             {
0146                 // not locked exclusive, not locked shared, try to lock
0147 
0148                 std::uint32_t newst = locked_exclusive_mask;
0149                 if( state_.compare_exchange_weak( st, newst, std::memory_order_acquire, std::memory_order_relaxed ) ) return;
0150             }
0151             else if( st & writer_pending_mask )
0152             {
0153                 // writer pending bit already set, nothing to do
0154             }
0155             else
0156             {
0157                 // locked shared, set writer pending bit
0158 
0159                 std::uint32_t newst = st | writer_pending_mask;
0160                 state_.compare_exchange_weak( st, newst, std::memory_order_relaxed, std::memory_order_relaxed );
0161             }
0162 
0163             yield( k );
0164         }
0165     }
0166 
0167     void unlock() noexcept
0168     {
0169         // pre: locked exclusive, not locked shared
0170         state_.store( 0, std::memory_order_release );
0171     }
0172 };
0173 
0174 } /* namespace foa */
0175 } /* namespace detail */
0176 } /* namespace unordered */
0177 } /* namespace boost */
0178 
0179 #endif // BOOST_UNORDERED_DETAIL_FOA_RW_SPINLOCK_HPP_INCLUDED