Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 10:12:59

0001 /*
0002     Copyright (c) 2005-2020 Intel Corporation
0003 
0004     Licensed under the Apache License, Version 2.0 (the "License");
0005     you may not use this file except in compliance with the License.
0006     You may obtain a copy of the License at
0007 
0008         http://www.apache.org/licenses/LICENSE-2.0
0009 
0010     Unless required by applicable law or agreed to in writing, software
0011     distributed under the License is distributed on an "AS IS" BASIS,
0012     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0013     See the License for the specific language governing permissions and
0014     limitations under the License.
0015 */
0016 
0017 #ifndef __TBB_queuing_mutex_H
0018 #define __TBB_queuing_mutex_H
0019 
0020 #define __TBB_queuing_mutex_H_include_area
0021 #include "internal/_warning_suppress_enable_notice.h"
0022 
0023 #include <cstring>
0024 #include "atomic.h"
0025 #include "tbb_profiling.h"
0026 
0027 namespace tbb {
0028 
0029 //! Queuing mutex with local-only spinning.
0030 /** @ingroup synchronization */
0031 class queuing_mutex : internal::mutex_copy_deprecated_and_disabled {
0032 public:
0033     //! Construct unacquired mutex.
0034     queuing_mutex() {
0035         q_tail = NULL;
0036 #if TBB_USE_THREADING_TOOLS
0037         internal_construct();
0038 #endif
0039     }
0040 
0041     //! The scoped locking pattern
0042     /** It helps to avoid the common problem of forgetting to release lock.
0043         It also nicely provides the "node" for queuing locks. */
0044     class scoped_lock: internal::no_copy {
0045         //! Initialize fields to mean "no lock held".
0046         void initialize() {
0047             mutex = NULL;
0048             going = 0;
0049 #if TBB_USE_ASSERT
0050             internal::poison_pointer(next);
0051 #endif /* TBB_USE_ASSERT */
0052         }
0053 
0054     public:
0055         //! Construct lock that has not acquired a mutex.
0056         /** Equivalent to zero-initialization of *this. */
0057         scoped_lock() {initialize();}
0058 
0059         //! Acquire lock on given mutex.
0060         scoped_lock( queuing_mutex& m ) {
0061             initialize();
0062             acquire(m);
0063         }
0064 
0065         //! Release lock (if lock is held).
0066         ~scoped_lock() {
0067             if( mutex ) release();
0068         }
0069 
0070         //! Acquire lock on given mutex.
0071         void __TBB_EXPORTED_METHOD acquire( queuing_mutex& m );
0072 
0073         //! Acquire lock on given mutex if free (i.e. non-blocking)
0074         bool __TBB_EXPORTED_METHOD try_acquire( queuing_mutex& m );
0075 
0076         //! Release lock.
0077         void __TBB_EXPORTED_METHOD release();
0078 
0079     private:
0080         //! The pointer to the mutex owned, or NULL if not holding a mutex.
0081         queuing_mutex* mutex;
0082 
0083         //! The pointer to the next competitor for a mutex
0084         scoped_lock *next;
0085 
0086         //! The local spin-wait variable
0087         /** Inverted (0 - blocked, 1 - acquired the mutex) for the sake of
0088             zero-initialization.  Defining it as an entire word instead of
0089             a byte seems to help performance slightly. */
0090         uintptr_t going;
0091     };
0092 
0093     void __TBB_EXPORTED_METHOD internal_construct();
0094 
0095     // Mutex traits
0096     static const bool is_rw_mutex = false;
0097     static const bool is_recursive_mutex = false;
0098     static const bool is_fair_mutex = true;
0099 
0100 private:
0101     //! The last competitor requesting the lock
0102     atomic<scoped_lock*> q_tail;
0103 
0104 };
0105 
0106 __TBB_DEFINE_PROFILING_SET_NAME(queuing_mutex)
0107 
0108 } // namespace tbb
0109 
0110 #include "internal/_warning_suppress_disable_notice.h"
0111 #undef __TBB_queuing_mutex_H_include_area
0112 
0113 #endif /* __TBB_queuing_mutex_H */