Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-12-16 09:43:51

0001 /*
0002  * Distributed under the Boost Software License, Version 1.0.
0003  * (See accompanying file LICENSE_1_0.txt or copy at
0004  * http://www.boost.org/LICENSE_1_0.txt)
0005  *
0006  * Copyright (c) 2011 Helge Bahmann
0007  * Copyright (c) 2013-2014, 2020 Andrey Semashev
0008  */
0009 /*!
0010  * \file   atomic/detail/lock_pool.hpp
0011  *
0012  * This header contains declaration of the lock pool used to emulate atomic ops.
0013  */
0014 
0015 #ifndef BOOST_ATOMIC_DETAIL_LOCK_POOL_HPP_INCLUDED_
0016 #define BOOST_ATOMIC_DETAIL_LOCK_POOL_HPP_INCLUDED_
0017 
0018 #include <cstddef>
0019 #include <boost/atomic/detail/config.hpp>
0020 #include <boost/atomic/detail/link.hpp>
0021 #include <boost/atomic/detail/intptr.hpp>
0022 #if defined(BOOST_WINDOWS)
0023 #include <boost/winapi/thread.hpp>
0024 #elif defined(BOOST_HAS_NANOSLEEP)
0025 #include <time.h>
0026 #else
0027 #include <unistd.h>
0028 #endif
0029 #include <boost/atomic/detail/header.hpp>
0030 
0031 #ifdef BOOST_HAS_PRAGMA_ONCE
0032 #pragma once
0033 #endif
0034 
0035 namespace boost {
0036 namespace atomics {
0037 namespace detail {
0038 
0039 BOOST_FORCEINLINE void wait_some() BOOST_NOEXCEPT
0040 {
0041 #if defined(BOOST_WINDOWS)
0042     boost::winapi::SwitchToThread();
0043 #elif defined(BOOST_HAS_NANOSLEEP)
0044     // Do not use sched_yield or pthread_yield as at least on Linux it doesn't block the thread if there are no other
0045     // pending threads on the current CPU. Proper sleeping is guaranteed to block the thread, which allows other threads
0046     // to potentially migrate to this CPU and complete the tasks we're waiting for.
0047     struct ::timespec ts = {};
0048     ts.tv_sec = 0;
0049     ts.tv_nsec = 1000;
0050     ::nanosleep(&ts, NULL);
0051 #else
0052     ::usleep(1);
0053 #endif
0054 }
0055 
0056 namespace lock_pool {
0057 
0058 BOOST_ATOMIC_DECL void* short_lock(atomics::detail::uintptr_t h) BOOST_NOEXCEPT;
0059 BOOST_ATOMIC_DECL void* long_lock(atomics::detail::uintptr_t h) BOOST_NOEXCEPT;
0060 BOOST_ATOMIC_DECL void unlock(void* ls) BOOST_NOEXCEPT;
0061 
0062 BOOST_ATOMIC_DECL void* allocate_wait_state(void* ls, const volatile void* addr) BOOST_NOEXCEPT;
0063 BOOST_ATOMIC_DECL void free_wait_state(void* ls, void* ws) BOOST_NOEXCEPT;
0064 BOOST_ATOMIC_DECL void wait(void* ls, void* ws) BOOST_NOEXCEPT;
0065 BOOST_ATOMIC_DECL void notify_one(void* ls, const volatile void* addr) BOOST_NOEXCEPT;
0066 BOOST_ATOMIC_DECL void notify_all(void* ls, const volatile void* addr) BOOST_NOEXCEPT;
0067 
0068 BOOST_ATOMIC_DECL void thread_fence() BOOST_NOEXCEPT;
0069 BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT;
0070 
0071 template< std::size_t Alignment >
0072 BOOST_FORCEINLINE atomics::detail::uintptr_t hash_ptr(const volatile void* addr) BOOST_NOEXCEPT
0073 {
0074     atomics::detail::uintptr_t ptr = (atomics::detail::uintptr_t)addr;
0075     atomics::detail::uintptr_t h = ptr / Alignment;
0076 
0077     // Since many malloc/new implementations return pointers with higher alignment
0078     // than indicated by Alignment, it makes sense to mix higher bits
0079     // into the lower ones. On 64-bit platforms, malloc typically aligns to 16 bytes,
0080     // on 32-bit - to 8 bytes.
0081     BOOST_CONSTEXPR_OR_CONST std::size_t malloc_alignment = sizeof(void*) >= 8u ? 16u : 8u;
0082     BOOST_IF_CONSTEXPR (Alignment != malloc_alignment)
0083         h ^= ptr / malloc_alignment;
0084 
0085     return h;
0086 }
0087 
0088 template< std::size_t Alignment, bool LongLock = false >
0089 class scoped_lock
0090 {
0091 private:
0092     void* m_lock;
0093 
0094 public:
0095     explicit scoped_lock(const volatile void* addr) BOOST_NOEXCEPT
0096     {
0097         atomics::detail::uintptr_t h = lock_pool::hash_ptr< Alignment >(addr);
0098         BOOST_IF_CONSTEXPR (!LongLock)
0099             m_lock = lock_pool::short_lock(h);
0100         else
0101             m_lock = lock_pool::long_lock(h);
0102     }
0103     ~scoped_lock() BOOST_NOEXCEPT
0104     {
0105         lock_pool::unlock(m_lock);
0106     }
0107 
0108     void* get_lock_state() const BOOST_NOEXCEPT
0109     {
0110         return m_lock;
0111     }
0112 
0113     BOOST_DELETED_FUNCTION(scoped_lock(scoped_lock const&))
0114     BOOST_DELETED_FUNCTION(scoped_lock& operator=(scoped_lock const&))
0115 };
0116 
0117 template< std::size_t Alignment >
0118 class scoped_wait_state :
0119     public scoped_lock< Alignment, true >
0120 {
0121 private:
0122     void* m_wait_state;
0123 
0124 public:
0125     explicit scoped_wait_state(const volatile void* addr) BOOST_NOEXCEPT :
0126         scoped_lock< Alignment, true >(addr)
0127     {
0128         m_wait_state = lock_pool::allocate_wait_state(this->get_lock_state(), addr);
0129     }
0130     ~scoped_wait_state() BOOST_NOEXCEPT
0131     {
0132         lock_pool::free_wait_state(this->get_lock_state(), m_wait_state);
0133     }
0134 
0135     void wait() BOOST_NOEXCEPT
0136     {
0137         lock_pool::wait(this->get_lock_state(), m_wait_state);
0138     }
0139 
0140     BOOST_DELETED_FUNCTION(scoped_wait_state(scoped_wait_state const&))
0141     BOOST_DELETED_FUNCTION(scoped_wait_state& operator=(scoped_wait_state const&))
0142 };
0143 
0144 } // namespace lock_pool
0145 } // namespace detail
0146 } // namespace atomics
0147 } // namespace boost
0148 
0149 #include <boost/atomic/detail/footer.hpp>
0150 
0151 #endif // BOOST_ATOMIC_DETAIL_LOCK_POOL_HPP_INCLUDED_