Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 10:12:59

0001 /*
0002     Copyright (c) 2005-2020 Intel Corporation
0003 
0004     Licensed under the Apache License, Version 2.0 (the "License");
0005     you may not use this file except in compliance with the License.
0006     You may obtain a copy of the License at
0007 
0008         http://www.apache.org/licenses/LICENSE-2.0
0009 
0010     Unless required by applicable law or agreed to in writing, software
0011     distributed under the License is distributed on an "AS IS" BASIS,
0012     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0013     See the License for the specific language governing permissions and
0014     limitations under the License.
0015 */
0016 
0017 #ifndef __TBB_spin_mutex_H
0018 #define __TBB_spin_mutex_H
0019 
0020 #define __TBB_spin_mutex_H_include_area
0021 #include "internal/_warning_suppress_enable_notice.h"
0022 
0023 #include <cstddef>
0024 #include <new>
0025 #include "aligned_space.h"
0026 #include "tbb_stddef.h"
0027 #include "tbb_machine.h"
0028 #include "tbb_profiling.h"
0029 #include "internal/_mutex_padding.h"
0030 
0031 namespace tbb {
0032 
0033 //! A lock that occupies a single byte.
0034 /** A spin_mutex is a spin mutex that fits in a single byte.
0035     It should be used only for locking short critical sections
0036     (typically less than 20 instructions) when fairness is not an issue.
0037     If zero-initialized, the mutex is considered unheld.
0038     @ingroup synchronization */
0039 class spin_mutex : internal::mutex_copy_deprecated_and_disabled {
0040     //! 0 if lock is released, 1 if lock is acquired.
0041     __TBB_atomic_flag flag;
0042 
0043 public:
0044     //! Construct unacquired lock.
0045     /** Equivalent to zero-initialization of *this. */
0046     spin_mutex() : flag(0) {
0047 #if TBB_USE_THREADING_TOOLS
0048         internal_construct();
0049 #endif
0050     }
0051 
0052     //! Represents acquisition of a mutex.
0053     class scoped_lock : internal::no_copy {
0054     private:
0055         //! Points to currently held mutex, or NULL if no lock is held.
0056         spin_mutex* my_mutex;
0057 
0058         //! Value to store into spin_mutex::flag to unlock the mutex.
0059         /** This variable is no longer used. Instead, 0 and 1 are used to
0060             represent that the lock is free and acquired, respectively.
0061             We keep the member variable here to ensure backward compatibility */
0062         __TBB_Flag my_unlock_value;
0063 
0064         //! Like acquire, but with ITT instrumentation.
0065         void __TBB_EXPORTED_METHOD internal_acquire( spin_mutex& m );
0066 
0067         //! Like try_acquire, but with ITT instrumentation.
0068         bool __TBB_EXPORTED_METHOD internal_try_acquire( spin_mutex& m );
0069 
0070         //! Like release, but with ITT instrumentation.
0071         void __TBB_EXPORTED_METHOD internal_release();
0072 
0073         friend class spin_mutex;
0074 
0075     public:
0076         //! Construct without acquiring a mutex.
0077         scoped_lock() : my_mutex(NULL), my_unlock_value(0) {}
0078 
0079         //! Construct and acquire lock on a mutex.
0080         scoped_lock( spin_mutex& m ) : my_unlock_value(0) {
0081             internal::suppress_unused_warning(my_unlock_value);
0082 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
0083             my_mutex=NULL;
0084             internal_acquire(m);
0085 #else
0086             my_mutex=&m;
0087             __TBB_LockByte(m.flag);
0088 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/
0089         }
0090 
0091         //! Acquire lock.
0092         void acquire( spin_mutex& m ) {
0093 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
0094             internal_acquire(m);
0095 #else
0096             my_mutex = &m;
0097             __TBB_LockByte(m.flag);
0098 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/
0099         }
0100 
0101         //! Try acquiring lock (non-blocking)
0102         /** Return true if lock acquired; false otherwise. */
0103         bool try_acquire( spin_mutex& m ) {
0104 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
0105             return internal_try_acquire(m);
0106 #else
0107             bool result = __TBB_TryLockByte(m.flag);
0108             if( result )
0109                 my_mutex = &m;
0110             return result;
0111 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/
0112         }
0113 
0114         //! Release lock
0115         void release() {
0116 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
0117             internal_release();
0118 #else
0119             __TBB_UnlockByte(my_mutex->flag);
0120             my_mutex = NULL;
0121 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
0122         }
0123 
0124         //! Destroy lock.  If holding a lock, releases the lock first.
0125         ~scoped_lock() {
0126             if( my_mutex ) {
0127 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
0128                 internal_release();
0129 #else
0130                 __TBB_UnlockByte(my_mutex->flag);
0131 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
0132             }
0133         }
0134     };
0135 
0136     //! Internal constructor with ITT instrumentation.
0137     void __TBB_EXPORTED_METHOD internal_construct();
0138 
0139     // Mutex traits
0140     static const bool is_rw_mutex = false;
0141     static const bool is_recursive_mutex = false;
0142     static const bool is_fair_mutex = false;
0143 
0144     // ISO C++0x compatibility methods
0145 
0146     //! Acquire lock
0147     void lock() {
0148 #if TBB_USE_THREADING_TOOLS
0149         aligned_space<scoped_lock> tmp;
0150         new(tmp.begin()) scoped_lock(*this);
0151 #else
0152         __TBB_LockByte(flag);
0153 #endif /* TBB_USE_THREADING_TOOLS*/
0154     }
0155 
0156     //! Try acquiring lock (non-blocking)
0157     /** Return true if lock acquired; false otherwise. */
0158     bool try_lock() {
0159 #if TBB_USE_THREADING_TOOLS
0160         aligned_space<scoped_lock> tmp;
0161         return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this);
0162 #else
0163         return __TBB_TryLockByte(flag);
0164 #endif /* TBB_USE_THREADING_TOOLS*/
0165     }
0166 
0167     //! Release lock
0168     void unlock() {
0169 #if TBB_USE_THREADING_TOOLS
0170         aligned_space<scoped_lock> tmp;
0171         scoped_lock& s = *tmp.begin();
0172         s.my_mutex = this;
0173         s.internal_release();
0174 #else
0175         __TBB_UnlockByte(flag);
0176 #endif /* TBB_USE_THREADING_TOOLS */
0177     }
0178 
0179     friend class scoped_lock;
0180 }; // end of spin_mutex
0181 
0182 __TBB_DEFINE_PROFILING_SET_NAME(spin_mutex)
0183 
0184 } // namespace tbb
0185 
0186 #if ( __TBB_x86_32 || __TBB_x86_64 )
0187 #include "internal/_x86_eliding_mutex_impl.h"
0188 #endif
0189 
0190 namespace tbb {
0191 //! A cross-platform spin mutex with speculative lock acquisition.
0192 /** On platforms with proper HW support, this lock may speculatively execute
0193     its critical sections, using HW mechanisms to detect real data races and
0194     ensure atomicity of the critical sections. In particular, it uses
0195     Intel(R) Transactional Synchronization Extensions (Intel(R) TSX).
0196     Without such HW support, it behaves like a spin_mutex.
0197     It should be used for locking short critical sections where the lock is
0198     contended but the data it protects are not.  If zero-initialized, the
0199     mutex is considered unheld.
0200     @ingroup synchronization */
0201 
0202 #if ( __TBB_x86_32 || __TBB_x86_64 )
0203 typedef interface7::internal::padded_mutex<interface7::internal::x86_eliding_mutex,false> speculative_spin_mutex;
0204 #else
0205 typedef interface7::internal::padded_mutex<spin_mutex,false> speculative_spin_mutex;
0206 #endif
0207 __TBB_DEFINE_PROFILING_SET_NAME(speculative_spin_mutex)
0208 
0209 } // namespace tbb
0210 
0211 #include "internal/_warning_suppress_disable_notice.h"
0212 #undef __TBB_spin_mutex_H_include_area
0213 
0214 #endif /* __TBB_spin_mutex_H */