File indexing completed on 2025-01-18 10:12:50
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #ifndef __TBB__x86_rtm_rw_mutex_impl_H
0018 #define __TBB__x86_rtm_rw_mutex_impl_H
0019
0020 #ifndef __TBB_spin_rw_mutex_H
0021 #error Do not #include this internal file directly; use public TBB headers instead.
0022 #endif
0023
0024 #if __TBB_TSX_AVAILABLE
0025
0026 #include "../tbb_stddef.h"
0027 #include "../tbb_machine.h"
0028 #include "../tbb_profiling.h"
0029 #include "../spin_rw_mutex.h"
0030
0031 namespace tbb {
0032 namespace interface8 {
0033 namespace internal {
0034
0035 enum RTM_type {
0036 RTM_not_in_mutex,
0037 RTM_transacting_reader,
0038 RTM_transacting_writer,
0039 RTM_real_reader,
0040 RTM_real_writer
0041 };
0042
0043 static const unsigned long speculation_granularity = 64;
0044
0045
0046
0047
0048 class x86_rtm_rw_mutex: private spin_rw_mutex {
0049 #if __TBB_USE_X86_RTM_RW_MUTEX || __TBB_GCC_VERSION < 40000
0050
0051
0052 public:
0053 #else
0054 private:
0055 #endif
0056 friend class interface7::internal::padded_mutex<x86_rtm_rw_mutex,true>;
0057 class scoped_lock;
0058 friend class scoped_lock;
0059 private:
0060
0061
0062
0063 void __TBB_EXPORTED_METHOD internal_construct();
0064
0065
0066
0067 void __TBB_EXPORTED_METHOD internal_acquire_writer(x86_rtm_rw_mutex::scoped_lock&, bool only_speculate=false);
0068
0069
0070
0071 void __TBB_EXPORTED_METHOD internal_acquire_reader(x86_rtm_rw_mutex::scoped_lock&, bool only_speculate=false);
0072
0073
0074 bool __TBB_EXPORTED_METHOD internal_upgrade( x86_rtm_rw_mutex::scoped_lock& );
0075
0076
0077 bool __TBB_EXPORTED_METHOD internal_downgrade( x86_rtm_rw_mutex::scoped_lock& );
0078
0079
0080 bool __TBB_EXPORTED_METHOD internal_try_acquire_writer( x86_rtm_rw_mutex::scoped_lock& );
0081
0082
0083 void __TBB_EXPORTED_METHOD internal_release( x86_rtm_rw_mutex::scoped_lock& );
0084
0085 static x86_rtm_rw_mutex* internal_get_mutex( const spin_rw_mutex::scoped_lock& lock )
0086 {
0087 return static_cast<x86_rtm_rw_mutex*>( lock.mutex );
0088 }
0089 static void internal_set_mutex( spin_rw_mutex::scoped_lock& lock, spin_rw_mutex* mtx )
0090 {
0091 lock.mutex = mtx;
0092 }
0093
0094 public:
0095
0096 x86_rtm_rw_mutex() {
0097 w_flag = false;
0098 #if TBB_USE_THREADING_TOOLS
0099 internal_construct();
0100 #endif
0101 }
0102
0103 #if TBB_USE_ASSERT
0104
0105 ~x86_rtm_rw_mutex() {}
0106 #endif
0107
0108
0109 static const bool is_rw_mutex = true;
0110 static const bool is_recursive_mutex = false;
0111 static const bool is_fair_mutex = false;
0112
0113 #if __TBB_USE_X86_RTM_RW_MUTEX || __TBB_GCC_VERSION < 40000
0114 #else
0115
0116
0117
0118 private:
0119 #endif
0120
0121
0122
0123
0124
0125
0126
0127
0128 class scoped_lock : tbb::internal::no_copy {
0129 friend class x86_rtm_rw_mutex;
0130 spin_rw_mutex::scoped_lock my_scoped_lock;
0131
0132 RTM_type transaction_state;
0133
0134 public:
0135
0136
0137 scoped_lock() : my_scoped_lock(), transaction_state(RTM_not_in_mutex) {
0138 }
0139
0140
0141 scoped_lock( x86_rtm_rw_mutex& m, bool write = true ) : my_scoped_lock(),
0142 transaction_state(RTM_not_in_mutex) {
0143 acquire(m, write);
0144 }
0145
0146
0147 ~scoped_lock() {
0148 if(transaction_state != RTM_not_in_mutex) release();
0149 }
0150
0151
0152 void acquire( x86_rtm_rw_mutex& m, bool write = true ) {
0153 if( write ) m.internal_acquire_writer(*this);
0154 else m.internal_acquire_reader(*this);
0155 }
0156
0157
0158 void release() {
0159 x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock);
0160 __TBB_ASSERT( mutex, "lock is not acquired" );
0161 __TBB_ASSERT( transaction_state!=RTM_not_in_mutex, "lock is not acquired" );
0162 return mutex->internal_release(*this);
0163 }
0164
0165
0166
0167 bool upgrade_to_writer() {
0168 x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock);
0169 __TBB_ASSERT( mutex, "lock is not acquired" );
0170 if (transaction_state == RTM_transacting_writer || transaction_state == RTM_real_writer)
0171 return true;
0172 return mutex->internal_upgrade(*this);
0173 }
0174
0175
0176
0177 bool downgrade_to_reader() {
0178 x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock);
0179 __TBB_ASSERT( mutex, "lock is not acquired" );
0180 if (transaction_state == RTM_transacting_reader || transaction_state == RTM_real_reader)
0181 return true;
0182 return mutex->internal_downgrade(*this);
0183 }
0184
0185
0186
0187 bool try_acquire( x86_rtm_rw_mutex& m, bool write = true ) {
0188 #if TBB_USE_ASSERT
0189 x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock);
0190 __TBB_ASSERT( !mutex, "lock is already acquired" );
0191 #endif
0192
0193
0194 if(write) return m.internal_try_acquire_writer(*this);
0195
0196 m.internal_acquire_reader(*this, true);
0197 if(transaction_state == RTM_transacting_reader) return true;
0198 if( my_scoped_lock.try_acquire(m, false)) {
0199 transaction_state = RTM_real_reader;
0200 return true;
0201 }
0202 return false;
0203 }
0204
0205 };
0206
0207
0208
0209
0210 private:
0211 char pad[speculation_granularity-sizeof(spin_rw_mutex)];
0212
0213
0214 tbb::atomic<bool> w_flag;
0215
0216 };
0217
0218 }
0219 }
0220 }
0221
0222 #endif
0223 #endif