Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-30 09:35:34

0001 
0002 //          Copyright Oliver Kowalke 2016.
0003 // Distributed under the Boost Software License, Version 1.0.
0004 //    (See accompanying file LICENSE_1_0.txt or copy at
0005 //          http://www.boost.org/LICENSE_1_0.txt)
0006 
0007 #ifndef BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_FUTEX_H
0008 #define BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_FUTEX_H
0009 
0010 #include <algorithm>
0011 #include <atomic>
0012 #include <cmath>
0013 #include <random>
0014 #include <thread>
0015 
0016 #include <boost/fiber/detail/config.hpp>
0017 #include <boost/fiber/detail/cpu_relax.hpp>
0018 #include <boost/fiber/detail/futex.hpp>
0019 
0020 // based on informations from:
0021 // https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops
0022 // https://software.intel.com/en-us/articles/long-duration-spin-wait-loops-on-hyper-threading-technology-enabled-intel-processors
0023 
0024 namespace boost {
0025 namespace fibers {
0026 namespace detail {
0027 
0028 class spinlock_ttas_adaptive_futex {
0029 private:
0030     template< typename FBSplk >
0031     friend class spinlock_rtm;
0032 
0033     std::atomic< std::int32_t >                 value_{ 0 };
0034     std::atomic< std::int32_t >                 retries_{ 0 };
0035 
0036 public:
0037     spinlock_ttas_adaptive_futex() = default;
0038 
0039     spinlock_ttas_adaptive_futex( spinlock_ttas_adaptive_futex const&) = delete;
0040     spinlock_ttas_adaptive_futex & operator=( spinlock_ttas_adaptive_futex const&) = delete;
0041 
0042     void lock() noexcept {
0043         static thread_local std::minstd_rand generator{ std::random_device{}() };
0044         std::int32_t collisions = 0, retries = 0, expected = 0;
0045         const std::int32_t prev_retries = retries_.load( std::memory_order_relaxed);
0046         const std::int32_t max_relax_retries = (std::min)(
0047                 static_cast< std::int32_t >( BOOST_FIBERS_SPIN_BEFORE_SLEEP0), 2 * prev_retries + 10);
0048         const std::int32_t max_sleep_retries = (std::min)(
0049                 static_cast< std::int32_t >( BOOST_FIBERS_SPIN_BEFORE_YIELD), 2 * prev_retries + 10);
0050         // after max. spins or collisions suspend via futex
0051         while ( retries++ < BOOST_FIBERS_RETRY_THRESHOLD) {
0052             // avoid using multiple pause instructions for a delay of a specific cycle count
0053             // the delay of cpu_relax() (pause on Intel) depends on the processor family
0054             // the cycle count can not guaranteed from one system to the next
0055             // -> check the shared variable 'value_' in between each cpu_relax() to prevent
0056             //    unnecessarily long delays on some systems
0057             // test shared variable 'status_'
0058             // first access to 'value_' -> chache miss
0059             // sucessive acccess to 'value_' -> cache hit
0060             // if 'value_' was released by other fiber
0061             // cached 'value_' is invalidated -> cache miss
0062             if ( 0 != ( expected = value_.load( std::memory_order_relaxed) ) ) {
0063 #if !defined(BOOST_FIBERS_SPIN_SINGLE_CORE)
0064                 if ( max_relax_retries > retries) {
0065                     // give CPU a hint that this thread is in a "spin-wait" loop
0066                     // delays the next instruction's execution for a finite period of time (depends on processor family)
0067                     // the CPU is not under demand, parts of the pipeline are no longer being used
0068                     // -> reduces the power consumed by the CPU
0069                     // -> prevent pipeline stalls
0070                     cpu_relax();
0071                 } else if ( max_sleep_retries > retries) {
0072                     // std::this_thread::sleep_for( 0us) has a fairly long instruction path length,
0073                     // combined with an expensive ring3 to ring 0 transition costing about 1000 cycles
0074                     // std::this_thread::sleep_for( 0us) lets give up this_thread the remaining part of its time slice
0075                     // if and only if a thread of equal or greater priority is ready to run
0076                     static constexpr std::chrono::microseconds us0{ 0 };
0077                     std::this_thread::sleep_for( us0);
0078                 } else {
0079                     // std::this_thread::yield() allows this_thread to give up the remaining part of its time slice,
0080                     // but only to another thread on the same processor
0081                     // instead of constant checking, a thread only checks if no other useful work is pending
0082                     std::this_thread::yield();
0083                 }
0084 #else
0085                 // std::this_thread::yield() allows this_thread to give up the remaining part of its time slice,
0086                 // but only to another thread on the same processor
0087                 // instead of constant checking, a thread only checks if no other useful work is pending
0088                 std::this_thread::yield();
0089 #endif
0090             } else if ( ! value_.compare_exchange_strong( expected, 1, std::memory_order_acquire) ) {
0091                 // spinlock now contended
0092                 // utilize 'Binary Exponential Backoff' algorithm
0093                 // linear_congruential_engine is a random number engine based on Linear congruential generator (LCG)
0094                 std::uniform_int_distribution< std::int32_t > distribution{
0095                     0, static_cast< std::int32_t >( 1) << (std::min)(collisions, static_cast< std::int32_t >( BOOST_FIBERS_CONTENTION_WINDOW_THRESHOLD)) };
0096                 const std::int32_t z = distribution( generator);
0097                 ++collisions;
0098                 for ( std::int32_t i = 0; i < z; ++i) {
0099                     // -> reduces the power consumed by the CPU
0100                     // -> prevent pipeline stalls
0101                     cpu_relax();
0102                 }
0103             } else {
0104                 // success, lock acquired
0105                 retries_.store( prev_retries + (retries - prev_retries) / 8, std::memory_order_relaxed);
0106                 return;
0107             }
0108         }
0109         // failure, lock not acquired
0110         // pause via futex
0111         if ( 2 != expected) {
0112             expected = value_.exchange( 2, std::memory_order_acquire);
0113         }
0114         while ( 0 != expected) {
0115             futex_wait( & value_, 2);
0116             expected = value_.exchange( 2, std::memory_order_acquire);
0117         }
0118         // success, lock acquired
0119         retries_.store( prev_retries + (retries - prev_retries) / 8, std::memory_order_relaxed);
0120     }
0121 
0122     bool try_lock() noexcept {
0123         std::int32_t expected = 0;
0124         return value_.compare_exchange_strong( expected, 1, std::memory_order_acquire);
0125     }
0126 
0127     void unlock() noexcept {
0128         if ( 1 != value_.fetch_sub( 1, std::memory_order_acquire) ) {
0129             value_.store( 0, std::memory_order_release);
0130             futex_wake( & value_);
0131         }
0132     }
0133 };
0134 
0135 }}}
0136 
0137 #endif // BOOST_FIBERS_SPINLOCK_TTAS_ADAPTIVE_FUTEX_H