Back to home page

EIC code displayed by LXR

 
 

    


Warning, file /include/oneapi/tbb/queuing_rw_mutex.h was not indexed or was modified since last indexation (in which case cross-reference links may be missing, inaccurate or erroneous).

0001 /*
0002     Copyright (c) 2005-2022 Intel Corporation
0003 
0004     Licensed under the Apache License, Version 2.0 (the "License");
0005     you may not use this file except in compliance with the License.
0006     You may obtain a copy of the License at
0007 
0008         http://www.apache.org/licenses/LICENSE-2.0
0009 
0010     Unless required by applicable law or agreed to in writing, software
0011     distributed under the License is distributed on an "AS IS" BASIS,
0012     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0013     See the License for the specific language governing permissions and
0014     limitations under the License.
0015 */
0016 
0017 #ifndef __TBB_queuing_rw_mutex_H
0018 #define __TBB_queuing_rw_mutex_H
0019 
0020 #include "detail/_config.h"
0021 #include "detail/_namespace_injection.h"
0022 #include "detail/_assert.h"
0023 #include "detail/_mutex_common.h"
0024 
0025 #include "profiling.h"
0026 
0027 #include <cstring>
0028 #include <atomic>
0029 
0030 namespace tbb {
0031 namespace detail {
0032 namespace r1 {
0033 struct queuing_rw_mutex_impl;
0034 }
0035 namespace d1 {
0036 
0037 //! Queuing reader-writer mutex with local-only spinning.
0038 /** Adapted from Krieger, Stumm, et al. pseudocode at
0039     https://www.researchgate.net/publication/221083709_A_Fair_Fast_Scalable_Reader-Writer_Lock
0040     @ingroup synchronization */
0041 class queuing_rw_mutex {
0042     friend r1::queuing_rw_mutex_impl;
0043 public:
0044     //! Construct unacquired mutex.
0045     queuing_rw_mutex() noexcept  {
0046         create_itt_sync(this, "tbb::queuing_rw_mutex", "");
0047     }
0048 
0049     //! Destructor asserts if the mutex is acquired, i.e. q_tail is non-null
0050     ~queuing_rw_mutex() {
0051         __TBB_ASSERT(q_tail.load(std::memory_order_relaxed) == nullptr, "destruction of an acquired mutex");
0052     }
0053 
0054     //! No Copy
0055     queuing_rw_mutex(const queuing_rw_mutex&) = delete;
0056     queuing_rw_mutex& operator=(const queuing_rw_mutex&) = delete;
0057 
0058     //! The scoped locking pattern
0059     /** It helps to avoid the common problem of forgetting to release lock.
0060         It also nicely provides the "node" for queuing locks. */
0061     class scoped_lock {
0062         friend r1::queuing_rw_mutex_impl;
0063         //! Initialize fields to mean "no lock held".
0064         void initialize() {
0065             my_mutex = nullptr;
0066             my_internal_lock.store(0, std::memory_order_relaxed);
0067             my_going.store(0, std::memory_order_relaxed);
0068 #if TBB_USE_ASSERT
0069             my_state = 0xFF; // Set to invalid state
0070             my_next.store(reinterpret_cast<uintptr_t>(reinterpret_cast<void*>(-1)), std::memory_order_relaxed);
0071             my_prev.store(reinterpret_cast<uintptr_t>(reinterpret_cast<void*>(-1)), std::memory_order_relaxed);
0072 #endif /* TBB_USE_ASSERT */
0073         }
0074 
0075     public:
0076         //! Construct lock that has not acquired a mutex.
0077         /** Equivalent to zero-initialization of *this. */
0078         scoped_lock() {initialize();}
0079 
0080         //! Acquire lock on given mutex.
0081         scoped_lock( queuing_rw_mutex& m, bool write=true ) {
0082             initialize();
0083             acquire(m,write);
0084         }
0085 
0086         //! Release lock (if lock is held).
0087         ~scoped_lock() {
0088             if( my_mutex ) release();
0089         }
0090 
0091         //! No Copy
0092         scoped_lock(const scoped_lock&) = delete;
0093         scoped_lock& operator=(const scoped_lock&) = delete;
0094 
0095         //! Acquire lock on given mutex.
0096         void acquire( queuing_rw_mutex& m, bool write=true );
0097 
0098         //! Acquire lock on given mutex if free (i.e. non-blocking)
0099         bool try_acquire( queuing_rw_mutex& m, bool write=true );
0100 
0101         //! Release lock.
0102         void release();
0103 
0104         //! Upgrade reader to become a writer.
0105         /** Returns whether the upgrade happened without releasing and re-acquiring the lock */
0106         bool upgrade_to_writer();
0107 
0108         //! Downgrade writer to become a reader.
0109         bool downgrade_to_reader();
0110 
0111         bool is_writer() const;
0112 
0113     private:
0114         //! The pointer to the mutex owned, or nullptr if not holding a mutex.
0115         queuing_rw_mutex* my_mutex;
0116 
0117         //! The 'pointer' to the previous and next competitors for a mutex
0118         std::atomic<uintptr_t> my_prev;
0119         std::atomic<uintptr_t> my_next;
0120 
0121         using state_t = unsigned char ;
0122 
0123         //! State of the request: reader, writer, active reader, other service states
0124         std::atomic<state_t> my_state;
0125 
0126         //! The local spin-wait variable
0127         /** Corresponds to "spin" in the pseudocode but inverted for the sake of zero-initialization */
0128         std::atomic<unsigned char> my_going;
0129 
0130         //! A tiny internal lock
0131         std::atomic<unsigned char> my_internal_lock;
0132     };
0133 
0134     // Mutex traits
0135     static constexpr bool is_rw_mutex = true;
0136     static constexpr bool is_recursive_mutex = false;
0137     static constexpr bool is_fair_mutex = true;
0138 
0139 private:
0140     //! The last competitor requesting the lock
0141     std::atomic<scoped_lock*> q_tail{nullptr};
0142 };
0143 #if TBB_USE_PROFILING_TOOLS
0144 inline void set_name(queuing_rw_mutex& obj, const char* name) {
0145     itt_set_sync_name(&obj, name);
0146 }
0147 #if (_WIN32||_WIN64)
0148 inline void set_name(queuing_rw_mutex& obj, const wchar_t* name) {
0149     itt_set_sync_name(&obj, name);
0150 }
0151 #endif //WIN
0152 #else
0153 inline void set_name(queuing_rw_mutex&, const char*) {}
0154 #if (_WIN32||_WIN64)
0155 inline void set_name(queuing_rw_mutex&, const wchar_t*) {}
0156 #endif //WIN
0157 #endif
0158 } // namespace d1
0159 
0160 namespace r1 {
0161 TBB_EXPORT void acquire(d1::queuing_rw_mutex&, d1::queuing_rw_mutex::scoped_lock&, bool);
0162 TBB_EXPORT bool try_acquire(d1::queuing_rw_mutex&, d1::queuing_rw_mutex::scoped_lock&, bool);
0163 TBB_EXPORT void release(d1::queuing_rw_mutex::scoped_lock&);
0164 TBB_EXPORT bool upgrade_to_writer(d1::queuing_rw_mutex::scoped_lock&);
0165 TBB_EXPORT bool downgrade_to_reader(d1::queuing_rw_mutex::scoped_lock&);
0166 TBB_EXPORT bool is_writer(const d1::queuing_rw_mutex::scoped_lock&);
0167 } // namespace r1
0168 
0169 namespace d1 {
0170 
0171 
0172 inline void queuing_rw_mutex::scoped_lock::acquire(queuing_rw_mutex& m,bool write) {
0173     r1::acquire(m, *this, write);
0174 }
0175 
0176 inline bool queuing_rw_mutex::scoped_lock::try_acquire(queuing_rw_mutex& m, bool write) {
0177     return r1::try_acquire(m, *this, write);
0178 }
0179 
0180 inline void queuing_rw_mutex::scoped_lock::release() {
0181     r1::release(*this);
0182 }
0183 
0184 inline bool queuing_rw_mutex::scoped_lock::upgrade_to_writer() {
0185     return r1::upgrade_to_writer(*this);
0186 }
0187 
0188 inline bool queuing_rw_mutex::scoped_lock::downgrade_to_reader() {
0189     return r1::downgrade_to_reader(*this);
0190 }
0191 
0192 inline bool queuing_rw_mutex::scoped_lock::is_writer() const {
0193     return r1::is_writer(*this);
0194 }
0195 } // namespace d1
0196 
0197 } // namespace detail
0198 
0199 inline namespace v1 {
0200 using detail::d1::queuing_rw_mutex;
0201 } // namespace v1
0202 namespace profiling {
0203     using detail::d1::set_name;
0204 }
0205 } // namespace tbb
0206 
0207 #endif /* __TBB_queuing_rw_mutex_H */