Back to home page

EIC code displayed by LXR

 
 

    


Warning, file /include/oneapi/tbb/queuing_mutex.h was not indexed or was modified since last indexation (in which case cross-reference links may be missing, inaccurate or erroneous).

0001 /*
0002     Copyright (c) 2005-2022 Intel Corporation
0003 
0004     Licensed under the Apache License, Version 2.0 (the "License");
0005     you may not use this file except in compliance with the License.
0006     You may obtain a copy of the License at
0007 
0008         http://www.apache.org/licenses/LICENSE-2.0
0009 
0010     Unless required by applicable law or agreed to in writing, software
0011     distributed under the License is distributed on an "AS IS" BASIS,
0012     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0013     See the License for the specific language governing permissions and
0014     limitations under the License.
0015 */
0016 
0017 #ifndef __TBB_queuing_mutex_H
0018 #define __TBB_queuing_mutex_H
0019 
0020 #include "detail/_namespace_injection.h"
0021 #include "detail/_assert.h"
0022 #include "detail/_utils.h"
0023 #include "detail/_mutex_common.h"
0024 
0025 #include "profiling.h"
0026 
0027 #include <atomic>
0028 
0029 namespace tbb {
0030 namespace detail {
0031 namespace d1 {
0032 
0033 //! Queuing mutex with local-only spinning.
0034 /** @ingroup synchronization */
0035 class queuing_mutex {
0036 public:
0037     //! Construct unacquired mutex.
0038     queuing_mutex() noexcept  {
0039         create_itt_sync(this, "tbb::queuing_mutex", "");
0040     };
0041 
0042     queuing_mutex(const queuing_mutex&) = delete;
0043     queuing_mutex& operator=(const queuing_mutex&) = delete;
0044 
0045     //! The scoped locking pattern
0046     /** It helps to avoid the common problem of forgetting to release lock.
0047         It also nicely provides the "node" for queuing locks. */
0048     class scoped_lock {
0049         //! Reset fields to mean "no lock held".
0050         void reset() {
0051             m_mutex = nullptr;
0052         }
0053 
0054     public:
0055         //! Construct lock that has not acquired a mutex.
0056         /** Equivalent to zero-initialization of *this. */
0057         scoped_lock() = default;
0058 
0059         //! Acquire lock on given mutex.
0060         scoped_lock(queuing_mutex& m) {
0061             acquire(m);
0062         }
0063 
0064         //! Release lock (if lock is held).
0065         ~scoped_lock() {
0066             if (m_mutex) release();
0067         }
0068 
0069         //! No Copy
0070         scoped_lock( const scoped_lock& ) = delete;
0071         scoped_lock& operator=( const scoped_lock& ) = delete;
0072 
0073         //! Acquire lock on given mutex.
0074         void acquire( queuing_mutex& m ) {
0075             __TBB_ASSERT(!m_mutex, "scoped_lock is already holding a mutex");
0076 
0077             // Must set all fields before the exchange, because once the
0078             // exchange executes, *this becomes accessible to other threads.
0079             m_mutex = &m;
0080             m_next.store(nullptr, std::memory_order_relaxed);
0081             m_going.store(0U, std::memory_order_relaxed);
0082 
0083             // x86 compare exchange operation always has a strong fence
0084             // "sending" the fields initialized above to other processors.
0085             scoped_lock* pred = m.q_tail.exchange(this);
0086             if (pred) {
0087                 call_itt_notify(prepare, &m);
0088                 __TBB_ASSERT(pred->m_next.load(std::memory_order_relaxed) == nullptr, "the predecessor has another successor!");
0089 
0090                 pred->m_next.store(this, std::memory_order_release);
0091                 spin_wait_while_eq(m_going, 0U);
0092             }
0093             call_itt_notify(acquired, &m);
0094 
0095         }
0096 
0097         //! Acquire lock on given mutex if free (i.e. non-blocking)
0098         bool try_acquire( queuing_mutex& m ) {
0099             __TBB_ASSERT(!m_mutex, "scoped_lock is already holding a mutex");
0100 
0101             // Must set all fields before the compare_exchange_strong, because once the
0102             // compare_exchange_strong executes, *this becomes accessible to other threads.
0103             m_next.store(nullptr, std::memory_order_relaxed);
0104             m_going.store(0U, std::memory_order_relaxed);
0105 
0106             scoped_lock* expected = nullptr;
0107             // The compare_exchange_strong must have release semantics, because we are
0108             // "sending" the fields initialized above to other processors.
0109             // x86 compare exchange operation always has a strong fence
0110             if (!m.q_tail.compare_exchange_strong(expected, this, std::memory_order_acq_rel))
0111                 return false;
0112 
0113             m_mutex = &m;
0114 
0115             call_itt_notify(acquired, &m);
0116             return true;
0117         }
0118 
0119         //! Release lock.
0120         void release()
0121         {
0122             __TBB_ASSERT(this->m_mutex, "no lock acquired");
0123 
0124             call_itt_notify(releasing, this->m_mutex);
0125 
0126             if (m_next.load(std::memory_order_relaxed) == nullptr) {
0127                 scoped_lock* expected = this;
0128                 if (m_mutex->q_tail.compare_exchange_strong(expected, nullptr)) {
0129                     // this was the only item in the queue, and the queue is now empty.
0130                     reset();
0131                     return;
0132                 }
0133                 // Someone in the queue
0134                 spin_wait_while_eq(m_next, nullptr);
0135             }
0136             m_next.load(std::memory_order_acquire)->m_going.store(1U, std::memory_order_release);
0137 
0138             reset();
0139         }
0140 
0141     private:
0142         //! The pointer to the mutex owned, or nullptr if not holding a mutex.
0143         queuing_mutex* m_mutex{nullptr};
0144 
0145         //! The pointer to the next competitor for a mutex
0146         std::atomic<scoped_lock*> m_next{nullptr};
0147 
0148         //! The local spin-wait variable
0149         /** Inverted (0 - blocked, 1 - acquired the mutex) for the sake of
0150             zero-initialization.  Defining it as an entire word instead of
0151             a byte seems to help performance slightly. */
0152         std::atomic<uintptr_t> m_going{0U};
0153     };
0154 
0155     // Mutex traits
0156     static constexpr bool is_rw_mutex = false;
0157     static constexpr bool is_recursive_mutex = false;
0158     static constexpr bool is_fair_mutex = true;
0159 
0160 private:
0161     //! The last competitor requesting the lock
0162     std::atomic<scoped_lock*> q_tail{nullptr};
0163 
0164 };
0165 
0166 #if TBB_USE_PROFILING_TOOLS
0167 inline void set_name(queuing_mutex& obj, const char* name) {
0168     itt_set_sync_name(&obj, name);
0169 }
0170 #if (_WIN32||_WIN64)
0171 inline void set_name(queuing_mutex& obj, const wchar_t* name) {
0172     itt_set_sync_name(&obj, name);
0173 }
0174 #endif //WIN
0175 #else
0176 inline void set_name(queuing_mutex&, const char*) {}
0177 #if (_WIN32||_WIN64)
0178 inline void set_name(queuing_mutex&, const wchar_t*) {}
0179 #endif //WIN
0180 #endif
0181 } // namespace d1
0182 } // namespace detail
0183 
0184 inline namespace v1 {
0185 using detail::d1::queuing_mutex;
0186 } // namespace v1
0187 namespace profiling {
0188     using detail::d1::set_name;
0189 }
0190 } // namespace tbb
0191 
0192 #endif /* __TBB_queuing_mutex_H */