Warning, file /include/oneapi/tbb/queuing_mutex.h was not indexed
or was modified since last indexation (in which case cross-reference links may be missing, inaccurate or erroneous).
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #ifndef __TBB_queuing_mutex_H
0018 #define __TBB_queuing_mutex_H
0019
0020 #include "detail/_namespace_injection.h"
0021 #include "detail/_assert.h"
0022 #include "detail/_utils.h"
0023 #include "detail/_mutex_common.h"
0024
0025 #include "profiling.h"
0026
0027 #include <atomic>
0028
0029 namespace tbb {
0030 namespace detail {
0031 namespace d1 {
0032
0033
0034
0035 class queuing_mutex {
0036 public:
0037
0038 queuing_mutex() noexcept {
0039 create_itt_sync(this, "tbb::queuing_mutex", "");
0040 };
0041
0042 queuing_mutex(const queuing_mutex&) = delete;
0043 queuing_mutex& operator=(const queuing_mutex&) = delete;
0044
0045
0046
0047
0048 class scoped_lock {
0049
0050 void reset() {
0051 m_mutex = nullptr;
0052 }
0053
0054 public:
0055
0056
0057 scoped_lock() = default;
0058
0059
0060 scoped_lock(queuing_mutex& m) {
0061 acquire(m);
0062 }
0063
0064
0065 ~scoped_lock() {
0066 if (m_mutex) release();
0067 }
0068
0069
0070 scoped_lock( const scoped_lock& ) = delete;
0071 scoped_lock& operator=( const scoped_lock& ) = delete;
0072
0073
0074 void acquire( queuing_mutex& m ) {
0075 __TBB_ASSERT(!m_mutex, "scoped_lock is already holding a mutex");
0076
0077
0078
0079 m_mutex = &m;
0080 m_next.store(nullptr, std::memory_order_relaxed);
0081 m_going.store(0U, std::memory_order_relaxed);
0082
0083
0084
0085 scoped_lock* pred = m.q_tail.exchange(this);
0086 if (pred) {
0087 call_itt_notify(prepare, &m);
0088 __TBB_ASSERT(pred->m_next.load(std::memory_order_relaxed) == nullptr, "the predecessor has another successor!");
0089
0090 pred->m_next.store(this, std::memory_order_release);
0091 spin_wait_while_eq(m_going, 0U);
0092 }
0093 call_itt_notify(acquired, &m);
0094
0095 }
0096
0097
0098 bool try_acquire( queuing_mutex& m ) {
0099 __TBB_ASSERT(!m_mutex, "scoped_lock is already holding a mutex");
0100
0101
0102
0103 m_next.store(nullptr, std::memory_order_relaxed);
0104 m_going.store(0U, std::memory_order_relaxed);
0105
0106 scoped_lock* expected = nullptr;
0107
0108
0109
0110 if (!m.q_tail.compare_exchange_strong(expected, this, std::memory_order_acq_rel))
0111 return false;
0112
0113 m_mutex = &m;
0114
0115 call_itt_notify(acquired, &m);
0116 return true;
0117 }
0118
0119
0120 void release()
0121 {
0122 __TBB_ASSERT(this->m_mutex, "no lock acquired");
0123
0124 call_itt_notify(releasing, this->m_mutex);
0125
0126 if (m_next.load(std::memory_order_relaxed) == nullptr) {
0127 scoped_lock* expected = this;
0128 if (m_mutex->q_tail.compare_exchange_strong(expected, nullptr)) {
0129
0130 reset();
0131 return;
0132 }
0133
0134 spin_wait_while_eq(m_next, nullptr);
0135 }
0136 m_next.load(std::memory_order_acquire)->m_going.store(1U, std::memory_order_release);
0137
0138 reset();
0139 }
0140
0141 private:
0142
0143 queuing_mutex* m_mutex{nullptr};
0144
0145
0146 std::atomic<scoped_lock*> m_next{nullptr};
0147
0148
0149
0150
0151
0152 std::atomic<uintptr_t> m_going{0U};
0153 };
0154
0155
0156 static constexpr bool is_rw_mutex = false;
0157 static constexpr bool is_recursive_mutex = false;
0158 static constexpr bool is_fair_mutex = true;
0159
0160 private:
0161
0162 std::atomic<scoped_lock*> q_tail{nullptr};
0163
0164 };
0165
0166 #if TBB_USE_PROFILING_TOOLS
0167 inline void set_name(queuing_mutex& obj, const char* name) {
0168 itt_set_sync_name(&obj, name);
0169 }
0170 #if (_WIN32||_WIN64)
0171 inline void set_name(queuing_mutex& obj, const wchar_t* name) {
0172 itt_set_sync_name(&obj, name);
0173 }
0174 #endif
0175 #else
0176 inline void set_name(queuing_mutex&, const char*) {}
0177 #if (_WIN32||_WIN64)
0178 inline void set_name(queuing_mutex&, const wchar_t*) {}
0179 #endif
0180 #endif
0181 }
0182 }
0183
0184 inline namespace v1 {
0185 using detail::d1::queuing_mutex;
0186 }
0187 namespace profiling {
0188 using detail::d1::set_name;
0189 }
0190 }
0191
0192 #endif