|
|
|||
File indexing completed on 2025-11-19 09:50:48
0001 // Lightweight locks and other synchronization mechanisms. 0002 // 0003 // These implementations are based on WebKit's WTF::Lock. See 0004 // https://webkit.org/blog/6161/locking-in-webkit/ for a description of the 0005 // design. 0006 #ifndef Py_INTERNAL_LOCK_H 0007 #define Py_INTERNAL_LOCK_H 0008 #ifdef __cplusplus 0009 extern "C" { 0010 #endif 0011 0012 #ifndef Py_BUILD_CORE 0013 # error "this header requires Py_BUILD_CORE define" 0014 #endif 0015 0016 //_Py_UNLOCKED is defined as 0 and _Py_LOCKED as 1 in Include/cpython/lock.h 0017 #define _Py_HAS_PARKED 2 0018 #define _Py_ONCE_INITIALIZED 4 0019 0020 static inline int 0021 PyMutex_LockFast(uint8_t *lock_bits) 0022 { 0023 uint8_t expected = _Py_UNLOCKED; 0024 return _Py_atomic_compare_exchange_uint8(lock_bits, &expected, _Py_LOCKED); 0025 } 0026 0027 // Checks if the mutex is currently locked. 0028 static inline int 0029 PyMutex_IsLocked(PyMutex *m) 0030 { 0031 return (_Py_atomic_load_uint8(&m->_bits) & _Py_LOCKED) != 0; 0032 } 0033 0034 // Re-initializes the mutex after a fork to the unlocked state. 0035 static inline void 0036 _PyMutex_at_fork_reinit(PyMutex *m) 0037 { 0038 memset(m, 0, sizeof(*m)); 0039 } 0040 0041 typedef enum _PyLockFlags { 0042 // Do not detach/release the GIL when waiting on the lock. 0043 _Py_LOCK_DONT_DETACH = 0, 0044 0045 // Detach/release the GIL while waiting on the lock. 0046 _PY_LOCK_DETACH = 1, 0047 0048 // Handle signals if interrupted while waiting on the lock. 0049 _PY_LOCK_HANDLE_SIGNALS = 2, 0050 } _PyLockFlags; 0051 0052 // Lock a mutex with an optional timeout and additional options. See 0053 // _PyLockFlags for details. 0054 extern PyLockStatus 0055 _PyMutex_LockTimed(PyMutex *m, PyTime_t timeout_ns, _PyLockFlags flags); 0056 0057 // Lock a mutex with aditional options. See _PyLockFlags for details. 0058 static inline void 0059 PyMutex_LockFlags(PyMutex *m, _PyLockFlags flags) 0060 { 0061 uint8_t expected = _Py_UNLOCKED; 0062 if (!_Py_atomic_compare_exchange_uint8(&m->_bits, &expected, _Py_LOCKED)) { 0063 _PyMutex_LockTimed(m, -1, flags); 0064 } 0065 } 0066 0067 // Unlock a mutex, returns 0 if the mutex is not locked (used for improved 0068 // error messages). 0069 extern int _PyMutex_TryUnlock(PyMutex *m); 0070 0071 0072 // PyEvent is a one-time event notification 0073 typedef struct { 0074 uint8_t v; 0075 } PyEvent; 0076 0077 // Check if the event is set without blocking. Returns 1 if the event is set or 0078 // 0 otherwise. 0079 PyAPI_FUNC(int) _PyEvent_IsSet(PyEvent *evt); 0080 0081 // Set the event and notify any waiting threads. 0082 // Export for '_testinternalcapi' shared extension 0083 PyAPI_FUNC(void) _PyEvent_Notify(PyEvent *evt); 0084 0085 // Wait for the event to be set. If the event is already set, then this returns 0086 // immediately. 0087 PyAPI_FUNC(void) PyEvent_Wait(PyEvent *evt); 0088 0089 // Wait for the event to be set, or until the timeout expires. If the event is 0090 // already set, then this returns immediately. Returns 1 if the event was set, 0091 // and 0 if the timeout expired or thread was interrupted. If `detach` is 0092 // true, then the thread will detach/release the GIL while waiting. 0093 PyAPI_FUNC(int) 0094 PyEvent_WaitTimed(PyEvent *evt, PyTime_t timeout_ns, int detach); 0095 0096 // _PyRawMutex implements a word-sized mutex that that does not depend on the 0097 // parking lot API, and therefore can be used in the parking lot 0098 // implementation. 0099 // 0100 // The mutex uses a packed representation: the least significant bit is used to 0101 // indicate whether the mutex is locked or not. The remaining bits are either 0102 // zero or a pointer to a `struct raw_mutex_entry` (see lock.c). 0103 typedef struct { 0104 uintptr_t v; 0105 } _PyRawMutex; 0106 0107 // Slow paths for lock/unlock 0108 extern void _PyRawMutex_LockSlow(_PyRawMutex *m); 0109 extern void _PyRawMutex_UnlockSlow(_PyRawMutex *m); 0110 0111 static inline void 0112 _PyRawMutex_Lock(_PyRawMutex *m) 0113 { 0114 uintptr_t unlocked = _Py_UNLOCKED; 0115 if (_Py_atomic_compare_exchange_uintptr(&m->v, &unlocked, _Py_LOCKED)) { 0116 return; 0117 } 0118 _PyRawMutex_LockSlow(m); 0119 } 0120 0121 static inline void 0122 _PyRawMutex_Unlock(_PyRawMutex *m) 0123 { 0124 uintptr_t locked = _Py_LOCKED; 0125 if (_Py_atomic_compare_exchange_uintptr(&m->v, &locked, _Py_UNLOCKED)) { 0126 return; 0127 } 0128 _PyRawMutex_UnlockSlow(m); 0129 } 0130 0131 // Type signature for one-time initialization functions. The function should 0132 // return 0 on success and -1 on failure. 0133 typedef int _Py_once_fn_t(void *arg); 0134 0135 // (private) slow path for one time initialization 0136 PyAPI_FUNC(int) 0137 _PyOnceFlag_CallOnceSlow(_PyOnceFlag *flag, _Py_once_fn_t *fn, void *arg); 0138 0139 // Calls `fn` once using `flag`. The `arg` is passed to the call to `fn`. 0140 // 0141 // Returns 0 on success and -1 on failure. 0142 // 0143 // If `fn` returns 0 (success), then subsequent calls immediately return 0. 0144 // If `fn` returns -1 (failure), then subsequent calls will retry the call. 0145 static inline int 0146 _PyOnceFlag_CallOnce(_PyOnceFlag *flag, _Py_once_fn_t *fn, void *arg) 0147 { 0148 if (_Py_atomic_load_uint8(&flag->v) == _Py_ONCE_INITIALIZED) { 0149 return 0; 0150 } 0151 return _PyOnceFlag_CallOnceSlow(flag, fn, arg); 0152 } 0153 0154 // A recursive mutex. The mutex should zero-initialized. 0155 typedef struct { 0156 PyMutex mutex; 0157 unsigned long long thread; // i.e., PyThread_get_thread_ident_ex() 0158 size_t level; 0159 } _PyRecursiveMutex; 0160 0161 PyAPI_FUNC(int) _PyRecursiveMutex_IsLockedByCurrentThread(_PyRecursiveMutex *m); 0162 PyAPI_FUNC(void) _PyRecursiveMutex_Lock(_PyRecursiveMutex *m); 0163 PyAPI_FUNC(void) _PyRecursiveMutex_Unlock(_PyRecursiveMutex *m); 0164 0165 0166 // A readers-writer (RW) lock. The lock supports multiple concurrent readers or 0167 // a single writer. The lock is write-preferring: if a writer is waiting while 0168 // the lock is read-locked then, new readers will be blocked. This avoids 0169 // starvation of writers. 0170 // 0171 // In C++, the equivalent synchronization primitive is std::shared_mutex 0172 // with shared ("read") and exclusive ("write") locking. 0173 // 0174 // The two least significant bits are used to indicate if the lock is 0175 // write-locked and if there are parked threads (either readers or writers) 0176 // waiting to acquire the lock. The remaining bits are used to indicate the 0177 // number of readers holding the lock. 0178 // 0179 // 0b000..00000: unlocked 0180 // 0bnnn..nnn00: nnn..nnn readers holding the lock 0181 // 0bnnn..nnn10: nnn..nnn readers holding the lock and a writer is waiting 0182 // 0b00000..010: unlocked with awoken writer about to acquire lock 0183 // 0b00000..001: write-locked 0184 // 0b00000..011: write-locked and readers or other writers are waiting 0185 // 0186 // Note that reader_count must be zero if the lock is held by a writer, and 0187 // vice versa. The lock can only be held by readers or a writer, but not both. 0188 // 0189 // The design is optimized for simplicity of the implementation. The lock is 0190 // not fair: if fairness is desired, use an additional PyMutex to serialize 0191 // writers. The lock is also not reentrant. 0192 typedef struct { 0193 uintptr_t bits; 0194 } _PyRWMutex; 0195 0196 // Read lock (i.e., shared lock) 0197 PyAPI_FUNC(void) _PyRWMutex_RLock(_PyRWMutex *rwmutex); 0198 PyAPI_FUNC(void) _PyRWMutex_RUnlock(_PyRWMutex *rwmutex); 0199 0200 // Write lock (i.e., exclusive lock) 0201 PyAPI_FUNC(void) _PyRWMutex_Lock(_PyRWMutex *rwmutex); 0202 PyAPI_FUNC(void) _PyRWMutex_Unlock(_PyRWMutex *rwmutex); 0203 0204 // Similar to linux seqlock: https://en.wikipedia.org/wiki/Seqlock 0205 // We use a sequence number to lock the writer, an even sequence means we're unlocked, an odd 0206 // sequence means we're locked. Readers will read the sequence before attempting to read the 0207 // underlying data and then read the sequence number again after reading the data. If the 0208 // sequence has not changed the data is valid. 0209 // 0210 // Differs a little bit in that we use CAS on sequence as the lock, instead of a separate spin lock. 0211 // The writer can also detect that the undelering data has not changed and abandon the write 0212 // and restore the previous sequence. 0213 typedef struct { 0214 uint32_t sequence; 0215 } _PySeqLock; 0216 0217 // Lock the sequence lock for the writer 0218 PyAPI_FUNC(void) _PySeqLock_LockWrite(_PySeqLock *seqlock); 0219 0220 // Unlock the sequence lock and move to the next sequence number. 0221 PyAPI_FUNC(void) _PySeqLock_UnlockWrite(_PySeqLock *seqlock); 0222 0223 // Abandon the current update indicating that no mutations have occurred 0224 // and restore the previous sequence value. 0225 PyAPI_FUNC(void) _PySeqLock_AbandonWrite(_PySeqLock *seqlock); 0226 0227 // Begin a read operation and return the current sequence number. 0228 PyAPI_FUNC(uint32_t) _PySeqLock_BeginRead(_PySeqLock *seqlock); 0229 0230 // End the read operation and confirm that the sequence number has not changed. 0231 // Returns 1 if the read was successful or 0 if the read should be retried. 0232 PyAPI_FUNC(int) _PySeqLock_EndRead(_PySeqLock *seqlock, uint32_t previous); 0233 0234 // Check if the lock was held during a fork and clear the lock. Returns 1 0235 // if the lock was held and any associated data should be cleared. 0236 PyAPI_FUNC(int) _PySeqLock_AfterFork(_PySeqLock *seqlock); 0237 0238 #ifdef __cplusplus 0239 } 0240 #endif 0241 #endif /* !Py_INTERNAL_LOCK_H */
| [ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
|
This page was automatically generated by the 2.3.7 LXR engine. The LXR team |
|