Warning, file /include/oneapi/tbb/queuing_rw_mutex.h was not indexed
or was modified since last indexation (in which case cross-reference links may be missing, inaccurate or erroneous).
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #ifndef __TBB_queuing_rw_mutex_H
0018 #define __TBB_queuing_rw_mutex_H
0019
0020 #include "detail/_config.h"
0021 #include "detail/_namespace_injection.h"
0022 #include "detail/_assert.h"
0023 #include "detail/_mutex_common.h"
0024
0025 #include "profiling.h"
0026
0027 #include <cstring>
0028 #include <atomic>
0029
0030 namespace tbb {
0031 namespace detail {
0032 namespace r1 {
0033 struct queuing_rw_mutex_impl;
0034 }
0035 namespace d1 {
0036
0037
0038
0039
0040
0041 class queuing_rw_mutex {
0042 friend r1::queuing_rw_mutex_impl;
0043 public:
0044
0045 queuing_rw_mutex() noexcept {
0046 create_itt_sync(this, "tbb::queuing_rw_mutex", "");
0047 }
0048
0049
0050 ~queuing_rw_mutex() {
0051 __TBB_ASSERT(q_tail.load(std::memory_order_relaxed) == nullptr, "destruction of an acquired mutex");
0052 }
0053
0054
0055 queuing_rw_mutex(const queuing_rw_mutex&) = delete;
0056 queuing_rw_mutex& operator=(const queuing_rw_mutex&) = delete;
0057
0058
0059
0060
0061 class scoped_lock {
0062 friend r1::queuing_rw_mutex_impl;
0063
0064 void initialize() {
0065 my_mutex = nullptr;
0066 my_internal_lock.store(0, std::memory_order_relaxed);
0067 my_going.store(0, std::memory_order_relaxed);
0068 #if TBB_USE_ASSERT
0069 my_state = 0xFF;
0070 my_next.store(reinterpret_cast<uintptr_t>(reinterpret_cast<void*>(-1)), std::memory_order_relaxed);
0071 my_prev.store(reinterpret_cast<uintptr_t>(reinterpret_cast<void*>(-1)), std::memory_order_relaxed);
0072 #endif
0073 }
0074
0075 public:
0076
0077
0078 scoped_lock() {initialize();}
0079
0080
0081 scoped_lock( queuing_rw_mutex& m, bool write=true ) {
0082 initialize();
0083 acquire(m,write);
0084 }
0085
0086
0087 ~scoped_lock() {
0088 if( my_mutex ) release();
0089 }
0090
0091
0092 scoped_lock(const scoped_lock&) = delete;
0093 scoped_lock& operator=(const scoped_lock&) = delete;
0094
0095
0096 void acquire( queuing_rw_mutex& m, bool write=true );
0097
0098
0099 bool try_acquire( queuing_rw_mutex& m, bool write=true );
0100
0101
0102 void release();
0103
0104
0105
0106 bool upgrade_to_writer();
0107
0108
0109 bool downgrade_to_reader();
0110
0111 bool is_writer() const;
0112
0113 private:
0114
0115 queuing_rw_mutex* my_mutex;
0116
0117
0118 std::atomic<uintptr_t> my_prev;
0119 std::atomic<uintptr_t> my_next;
0120
0121 using state_t = unsigned char ;
0122
0123
0124 std::atomic<state_t> my_state;
0125
0126
0127
0128 std::atomic<unsigned char> my_going;
0129
0130
0131 std::atomic<unsigned char> my_internal_lock;
0132 };
0133
0134
0135 static constexpr bool is_rw_mutex = true;
0136 static constexpr bool is_recursive_mutex = false;
0137 static constexpr bool is_fair_mutex = true;
0138
0139 private:
0140
0141 std::atomic<scoped_lock*> q_tail{nullptr};
0142 };
0143 #if TBB_USE_PROFILING_TOOLS
0144 inline void set_name(queuing_rw_mutex& obj, const char* name) {
0145 itt_set_sync_name(&obj, name);
0146 }
0147 #if (_WIN32||_WIN64)
0148 inline void set_name(queuing_rw_mutex& obj, const wchar_t* name) {
0149 itt_set_sync_name(&obj, name);
0150 }
0151 #endif
0152 #else
0153 inline void set_name(queuing_rw_mutex&, const char*) {}
0154 #if (_WIN32||_WIN64)
0155 inline void set_name(queuing_rw_mutex&, const wchar_t*) {}
0156 #endif
0157 #endif
0158 }
0159
0160 namespace r1 {
0161 TBB_EXPORT void acquire(d1::queuing_rw_mutex&, d1::queuing_rw_mutex::scoped_lock&, bool);
0162 TBB_EXPORT bool try_acquire(d1::queuing_rw_mutex&, d1::queuing_rw_mutex::scoped_lock&, bool);
0163 TBB_EXPORT void release(d1::queuing_rw_mutex::scoped_lock&);
0164 TBB_EXPORT bool upgrade_to_writer(d1::queuing_rw_mutex::scoped_lock&);
0165 TBB_EXPORT bool downgrade_to_reader(d1::queuing_rw_mutex::scoped_lock&);
0166 TBB_EXPORT bool is_writer(const d1::queuing_rw_mutex::scoped_lock&);
0167 }
0168
0169 namespace d1 {
0170
0171
0172 inline void queuing_rw_mutex::scoped_lock::acquire(queuing_rw_mutex& m,bool write) {
0173 r1::acquire(m, *this, write);
0174 }
0175
0176 inline bool queuing_rw_mutex::scoped_lock::try_acquire(queuing_rw_mutex& m, bool write) {
0177 return r1::try_acquire(m, *this, write);
0178 }
0179
0180 inline void queuing_rw_mutex::scoped_lock::release() {
0181 r1::release(*this);
0182 }
0183
0184 inline bool queuing_rw_mutex::scoped_lock::upgrade_to_writer() {
0185 return r1::upgrade_to_writer(*this);
0186 }
0187
0188 inline bool queuing_rw_mutex::scoped_lock::downgrade_to_reader() {
0189 return r1::downgrade_to_reader(*this);
0190 }
0191
0192 inline bool queuing_rw_mutex::scoped_lock::is_writer() const {
0193 return r1::is_writer(*this);
0194 }
0195 }
0196
0197 }
0198
0199 inline namespace v1 {
0200 using detail::d1::queuing_rw_mutex;
0201 }
0202 namespace profiling {
0203 using detail::d1::set_name;
0204 }
0205 }
0206
0207 #endif