Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-30 09:35:34

0001 
0002 //          Copyright Oliver Kowalke 2016.
0003 // Distributed under the Boost Software License, Version 1.0.
0004 //    (See accompanying file LICENSE_1_0.txt or copy at
0005 //          http://www.boost.org/LICENSE_1_0.txt)
0006 
0007 #ifndef BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_H
0008 #define BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_H
0009 
0010 #include <algorithm>
0011 #include <atomic>
0012 #include <chrono>
0013 #include <cmath>
0014 #include <random>
0015 #include <thread>
0016 
0017 #include <boost/fiber/detail/config.hpp>
0018 #include <boost/fiber/detail/cpu_relax.hpp>
0019 #include <boost/fiber/detail/spinlock_status.hpp>
0020 
0021 // based on informations from:
0022 // https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops
0023 // https://software.intel.com/en-us/articles/long-duration-spin-wait-loops-on-hyper-threading-technology-enabled-intel-processors
0024 
0025 namespace boost {
0026 namespace fibers {
0027 namespace detail {
0028 
0029 class spinlock_ttas_adaptive {
0030 private:
0031     template< typename FBSplk >
0032     friend class spinlock_rtm;
0033 
0034     std::atomic< spinlock_status >              state_{ spinlock_status::unlocked };
0035     std::atomic< std::size_t >                  retries_{ 0 };
0036 
0037 public:
0038     spinlock_ttas_adaptive() = default;
0039 
0040     spinlock_ttas_adaptive( spinlock_ttas_adaptive const&) = delete;
0041     spinlock_ttas_adaptive & operator=( spinlock_ttas_adaptive const&) = delete;
0042 
0043     void lock() noexcept {
0044         static thread_local std::minstd_rand generator{ std::random_device{}() };
0045         std::size_t collisions = 0 ;
0046         for (;;) {
0047             std::size_t retries = 0;
0048             const std::size_t prev_retries = retries_.load( std::memory_order_relaxed);
0049             const std::size_t max_relax_retries = (std::min)(
0050                     static_cast< std::size_t >( BOOST_FIBERS_SPIN_BEFORE_SLEEP0), 2 * prev_retries + 10);
0051             const std::size_t max_sleep_retries = (std::min)(
0052                     static_cast< std::size_t >( BOOST_FIBERS_SPIN_BEFORE_YIELD), 2 * prev_retries + 10);
0053             // avoid using multiple pause instructions for a delay of a specific cycle count
0054             // the delay of cpu_relax() (pause on Intel) depends on the processor family
0055             // the cycle count can not guaranteed from one system to the next
0056             // -> check the shared variable 'state_' in between each cpu_relax() to prevent
0057             //    unnecessarily long delays on some systems
0058             // test shared variable 'status_'
0059             // first access to 'state_' -> chache miss
0060             // sucessive acccess to 'state_' -> cache hit
0061             // if 'state_' was released by other fiber
0062             // cached 'state_' is invalidated -> cache miss
0063             while ( spinlock_status::locked == state_.load( std::memory_order_relaxed) ) {
0064 #if !defined(BOOST_FIBERS_SPIN_SINGLE_CORE)
0065                 if ( max_relax_retries > retries) {
0066                     ++retries;
0067                     // give CPU a hint that this thread is in a "spin-wait" loop
0068                     // delays the next instruction's execution for a finite period of time (depends on processor family)
0069                     // the CPU is not under demand, parts of the pipeline are no longer being used
0070                     // -> reduces the power consumed by the CPU
0071                     // -> prevent pipeline stalls
0072                     cpu_relax();
0073                 } else if ( max_sleep_retries > retries) {
0074                     ++retries;
0075                     // std::this_thread::sleep_for( 0us) has a fairly long instruction path length,
0076                     // combined with an expensive ring3 to ring 0 transition costing about 1000 cycles
0077                     // std::this_thread::sleep_for( 0us) lets give up this_thread the remaining part of its time slice
0078                     // if and only if a thread of equal or greater priority is ready to run
0079                     static constexpr std::chrono::microseconds us0{ 0 };
0080                     std::this_thread::sleep_for( us0);
0081                 } else {
0082                     // std::this_thread::yield() allows this_thread to give up the remaining part of its time slice,
0083                     // but only to another thread on the same processor
0084                     // instead of constant checking, a thread only checks if no other useful work is pending
0085                     std::this_thread::yield();
0086                 }
0087 #else
0088                 std::this_thread::yield();
0089 #endif
0090             }
0091             // test-and-set shared variable 'status_'
0092             // everytime 'status_' is signaled over the bus, even if the test failes
0093             if ( spinlock_status::locked == state_.exchange( spinlock_status::locked, std::memory_order_acquire) ) {
0094                 // spinlock now contended
0095                 // utilize 'Binary Exponential Backoff' algorithm
0096                 // linear_congruential_engine is a random number engine based on Linear congruential generator (LCG)
0097                 std::uniform_int_distribution< std::size_t > distribution{
0098                     0, static_cast< std::size_t >( 1) << (std::min)(collisions, static_cast< std::size_t >( BOOST_FIBERS_CONTENTION_WINDOW_THRESHOLD)) };
0099                 const std::size_t z = distribution( generator);
0100                 ++collisions;
0101                 for ( std::size_t i = 0; i < z; ++i) {
0102                     // -> reduces the power consumed by the CPU
0103                     // -> prevent pipeline stalls
0104                     cpu_relax();
0105                 }
0106             } else {
0107                 retries_.store( prev_retries + (retries - prev_retries) / 8, std::memory_order_relaxed);
0108                 // success, thread has acquired the lock
0109                 break;
0110             }
0111         }
0112     }
0113 
0114     bool try_lock() noexcept {
0115         return spinlock_status::unlocked == state_.exchange( spinlock_status::locked, std::memory_order_acquire);
0116     }
0117 
0118     void unlock() noexcept {
0119         state_.store( spinlock_status::unlocked, std::memory_order_release);
0120     }
0121 };
0122 
0123 }}}
0124 
0125 #endif // BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_H