File indexing completed on 2025-01-18 10:06:42
0001 #ifndef Py_ATOMIC_H
0002 #define Py_ATOMIC_H
0003 #ifdef __cplusplus
0004 extern "C" {
0005 #endif
0006
0007 #ifndef Py_BUILD_CORE
0008 # error "this header requires Py_BUILD_CORE define"
0009 #endif
0010
0011 #include "dynamic_annotations.h" /* _Py_ANNOTATE_MEMORY_ORDER */
0012 #include "pyconfig.h"
0013
0014 #ifdef HAVE_STD_ATOMIC
0015 # include <stdatomic.h>
0016 #endif
0017
0018
0019 #if defined(_MSC_VER)
0020 #include <intrin.h>
0021 #if defined(_M_IX86) || defined(_M_X64)
0022 # include <immintrin.h>
0023 #endif
0024 #endif
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #if defined(HAVE_STD_ATOMIC)
0036
0037 typedef enum _Py_memory_order {
0038 _Py_memory_order_relaxed = memory_order_relaxed,
0039 _Py_memory_order_acquire = memory_order_acquire,
0040 _Py_memory_order_release = memory_order_release,
0041 _Py_memory_order_acq_rel = memory_order_acq_rel,
0042 _Py_memory_order_seq_cst = memory_order_seq_cst
0043 } _Py_memory_order;
0044
0045 typedef struct _Py_atomic_address {
0046 atomic_uintptr_t _value;
0047 } _Py_atomic_address;
0048
0049 typedef struct _Py_atomic_int {
0050 atomic_int _value;
0051 } _Py_atomic_int;
0052
0053 #define _Py_atomic_signal_fence( ORDER) \
0054 atomic_signal_fence(ORDER)
0055
0056 #define _Py_atomic_thread_fence( ORDER) \
0057 atomic_thread_fence(ORDER)
0058
0059 #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
0060 atomic_store_explicit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER)
0061
0062 #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
0063 atomic_load_explicit(&((ATOMIC_VAL)->_value), ORDER)
0064
0065
0066 #elif defined(HAVE_BUILTIN_ATOMIC)
0067
0068 typedef enum _Py_memory_order {
0069 _Py_memory_order_relaxed = __ATOMIC_RELAXED,
0070 _Py_memory_order_acquire = __ATOMIC_ACQUIRE,
0071 _Py_memory_order_release = __ATOMIC_RELEASE,
0072 _Py_memory_order_acq_rel = __ATOMIC_ACQ_REL,
0073 _Py_memory_order_seq_cst = __ATOMIC_SEQ_CST
0074 } _Py_memory_order;
0075
0076 typedef struct _Py_atomic_address {
0077 uintptr_t _value;
0078 } _Py_atomic_address;
0079
0080 typedef struct _Py_atomic_int {
0081 int _value;
0082 } _Py_atomic_int;
0083
0084 #define _Py_atomic_signal_fence( ORDER) \
0085 __atomic_signal_fence(ORDER)
0086
0087 #define _Py_atomic_thread_fence( ORDER) \
0088 __atomic_thread_fence(ORDER)
0089
0090 #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
0091 (assert((ORDER) == __ATOMIC_RELAXED \
0092 || (ORDER) == __ATOMIC_SEQ_CST \
0093 || (ORDER) == __ATOMIC_RELEASE), \
0094 __atomic_store_n(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER))
0095
0096 #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
0097 (assert((ORDER) == __ATOMIC_RELAXED \
0098 || (ORDER) == __ATOMIC_SEQ_CST \
0099 || (ORDER) == __ATOMIC_ACQUIRE \
0100 || (ORDER) == __ATOMIC_CONSUME), \
0101 __atomic_load_n(&((ATOMIC_VAL)->_value), ORDER))
0102
0103
0104
0105 #elif defined(__GNUC__) && (defined(__i386__) || defined(__amd64))
0106 typedef enum _Py_memory_order {
0107 _Py_memory_order_relaxed,
0108 _Py_memory_order_acquire,
0109 _Py_memory_order_release,
0110 _Py_memory_order_acq_rel,
0111 _Py_memory_order_seq_cst
0112 } _Py_memory_order;
0113
0114 typedef struct _Py_atomic_address {
0115 uintptr_t _value;
0116 } _Py_atomic_address;
0117
0118 typedef struct _Py_atomic_int {
0119 int _value;
0120 } _Py_atomic_int;
0121
0122
0123 static __inline__ void
0124 _Py_atomic_signal_fence(_Py_memory_order order)
0125 {
0126 if (order != _Py_memory_order_relaxed)
0127 __asm__ volatile("":::"memory");
0128 }
0129
0130 static __inline__ void
0131 _Py_atomic_thread_fence(_Py_memory_order order)
0132 {
0133 if (order != _Py_memory_order_relaxed)
0134 __asm__ volatile("mfence":::"memory");
0135 }
0136
0137
0138 static __inline__ void
0139 _Py_ANNOTATE_MEMORY_ORDER(const volatile void *address, _Py_memory_order order)
0140 {
0141 (void)address;
0142 switch(order) {
0143 case _Py_memory_order_release:
0144 case _Py_memory_order_acq_rel:
0145 case _Py_memory_order_seq_cst:
0146 _Py_ANNOTATE_HAPPENS_BEFORE(address);
0147 break;
0148 case _Py_memory_order_relaxed:
0149 case _Py_memory_order_acquire:
0150 break;
0151 }
0152 switch(order) {
0153 case _Py_memory_order_acquire:
0154 case _Py_memory_order_acq_rel:
0155 case _Py_memory_order_seq_cst:
0156 _Py_ANNOTATE_HAPPENS_AFTER(address);
0157 break;
0158 case _Py_memory_order_relaxed:
0159 case _Py_memory_order_release:
0160 break;
0161 }
0162 }
0163
0164 #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
0165 __extension__ ({ \
0166 __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
0167 __typeof__(atomic_val->_value) new_val = NEW_VAL;\
0168 volatile __typeof__(new_val) *volatile_data = &atomic_val->_value; \
0169 _Py_memory_order order = ORDER; \
0170 _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
0171 \
0172 \
0173 _Py_ANNOTATE_IGNORE_WRITES_BEGIN(); \
0174 switch(order) { \
0175 case _Py_memory_order_release: \
0176 _Py_atomic_signal_fence(_Py_memory_order_release); \
0177 \
0178 case _Py_memory_order_relaxed: \
0179 *volatile_data = new_val; \
0180 break; \
0181 \
0182 case _Py_memory_order_acquire: \
0183 case _Py_memory_order_acq_rel: \
0184 case _Py_memory_order_seq_cst: \
0185 __asm__ volatile("xchg %0, %1" \
0186 : "+r"(new_val) \
0187 : "m"(atomic_val->_value) \
0188 : "memory"); \
0189 break; \
0190 } \
0191 _Py_ANNOTATE_IGNORE_WRITES_END(); \
0192 })
0193
0194 #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
0195 __extension__ ({ \
0196 __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
0197 __typeof__(atomic_val->_value) result; \
0198 volatile __typeof__(result) *volatile_data = &atomic_val->_value; \
0199 _Py_memory_order order = ORDER; \
0200 _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
0201 \
0202 \
0203 _Py_ANNOTATE_IGNORE_READS_BEGIN(); \
0204 switch(order) { \
0205 case _Py_memory_order_release: \
0206 case _Py_memory_order_acq_rel: \
0207 case _Py_memory_order_seq_cst: \
0208 \
0209 \
0210 _Py_atomic_thread_fence(_Py_memory_order_release); \
0211 break; \
0212 default: \
0213 \
0214 break; \
0215 } \
0216 result = *volatile_data; \
0217 switch(order) { \
0218 case _Py_memory_order_acquire: \
0219 case _Py_memory_order_acq_rel: \
0220 case _Py_memory_order_seq_cst: \
0221 \
0222 \
0223 _Py_atomic_signal_fence(_Py_memory_order_acquire); \
0224 break; \
0225 default: \
0226 \
0227 break; \
0228 } \
0229 _Py_ANNOTATE_IGNORE_READS_END(); \
0230 result; \
0231 })
0232
0233 #elif defined(_MSC_VER)
0234
0235
0236
0237
0238
0239
0240
0241 #if defined(_M_IX86) || defined(_M_X64)
0242 typedef enum _Py_memory_order {
0243 _Py_memory_order_relaxed,
0244 _Py_memory_order_acquire,
0245 _Py_memory_order_release,
0246 _Py_memory_order_acq_rel,
0247 _Py_memory_order_seq_cst
0248 } _Py_memory_order;
0249
0250 typedef struct _Py_atomic_address {
0251 volatile uintptr_t _value;
0252 } _Py_atomic_address;
0253
0254 typedef struct _Py_atomic_int {
0255 volatile int _value;
0256 } _Py_atomic_int;
0257
0258
0259 #if defined(_M_X64)
0260 #define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \
0261 switch (ORDER) { \
0262 case _Py_memory_order_acquire: \
0263 _InterlockedExchange64_HLEAcquire((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)(NEW_VAL)); \
0264 break; \
0265 case _Py_memory_order_release: \
0266 _InterlockedExchange64_HLERelease((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)(NEW_VAL)); \
0267 break; \
0268 default: \
0269 _InterlockedExchange64((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)(NEW_VAL)); \
0270 break; \
0271 }
0272 #else
0273 #define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) ((void)0);
0274 #endif
0275
0276 #define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \
0277 switch (ORDER) { \
0278 case _Py_memory_order_acquire: \
0279 _InterlockedExchange_HLEAcquire((volatile long*)&((ATOMIC_VAL)->_value), (int)(NEW_VAL)); \
0280 break; \
0281 case _Py_memory_order_release: \
0282 _InterlockedExchange_HLERelease((volatile long*)&((ATOMIC_VAL)->_value), (int)(NEW_VAL)); \
0283 break; \
0284 default: \
0285 _InterlockedExchange((volatile long*)&((ATOMIC_VAL)->_value), (int)(NEW_VAL)); \
0286 break; \
0287 }
0288
0289 #if defined(_M_X64)
0290
0291
0292
0293
0294 inline intptr_t _Py_atomic_load_64bit_impl(volatile uintptr_t* value, int order) {
0295 __int64 old;
0296 switch (order) {
0297 case _Py_memory_order_acquire:
0298 {
0299 do {
0300 old = *value;
0301 } while(_InterlockedCompareExchange64_HLEAcquire((volatile __int64*)value, old, old) != old);
0302 break;
0303 }
0304 case _Py_memory_order_release:
0305 {
0306 do {
0307 old = *value;
0308 } while(_InterlockedCompareExchange64_HLERelease((volatile __int64*)value, old, old) != old);
0309 break;
0310 }
0311 case _Py_memory_order_relaxed:
0312 old = *value;
0313 break;
0314 default:
0315 {
0316 do {
0317 old = *value;
0318 } while(_InterlockedCompareExchange64((volatile __int64*)value, old, old) != old);
0319 break;
0320 }
0321 }
0322 return old;
0323 }
0324
0325 #define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) \
0326 _Py_atomic_load_64bit_impl((volatile uintptr_t*)&((ATOMIC_VAL)->_value), (ORDER))
0327
0328 #else
0329 #define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) ((ATOMIC_VAL)->_value)
0330 #endif
0331
0332 inline int _Py_atomic_load_32bit_impl(volatile int* value, int order) {
0333 long old;
0334 switch (order) {
0335 case _Py_memory_order_acquire:
0336 {
0337 do {
0338 old = *value;
0339 } while(_InterlockedCompareExchange_HLEAcquire((volatile long*)value, old, old) != old);
0340 break;
0341 }
0342 case _Py_memory_order_release:
0343 {
0344 do {
0345 old = *value;
0346 } while(_InterlockedCompareExchange_HLERelease((volatile long*)value, old, old) != old);
0347 break;
0348 }
0349 case _Py_memory_order_relaxed:
0350 old = *value;
0351 break;
0352 default:
0353 {
0354 do {
0355 old = *value;
0356 } while(_InterlockedCompareExchange((volatile long*)value, old, old) != old);
0357 break;
0358 }
0359 }
0360 return old;
0361 }
0362
0363 #define _Py_atomic_load_32bit(ATOMIC_VAL, ORDER) \
0364 _Py_atomic_load_32bit_impl((volatile int*)&((ATOMIC_VAL)->_value), (ORDER))
0365
0366 #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
0367 if (sizeof((ATOMIC_VAL)->_value) == 8) { \
0368 _Py_atomic_store_64bit((ATOMIC_VAL), NEW_VAL, ORDER) } else { \
0369 _Py_atomic_store_32bit((ATOMIC_VAL), NEW_VAL, ORDER) }
0370
0371 #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
0372 ( \
0373 sizeof((ATOMIC_VAL)->_value) == 8 ? \
0374 _Py_atomic_load_64bit((ATOMIC_VAL), ORDER) : \
0375 _Py_atomic_load_32bit((ATOMIC_VAL), ORDER) \
0376 )
0377 #elif defined(_M_ARM) || defined(_M_ARM64)
0378 typedef enum _Py_memory_order {
0379 _Py_memory_order_relaxed,
0380 _Py_memory_order_acquire,
0381 _Py_memory_order_release,
0382 _Py_memory_order_acq_rel,
0383 _Py_memory_order_seq_cst
0384 } _Py_memory_order;
0385
0386 typedef struct _Py_atomic_address {
0387 volatile uintptr_t _value;
0388 } _Py_atomic_address;
0389
0390 typedef struct _Py_atomic_int {
0391 volatile int _value;
0392 } _Py_atomic_int;
0393
0394
0395 #if defined(_M_ARM64)
0396 #define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \
0397 switch (ORDER) { \
0398 case _Py_memory_order_acquire: \
0399 _InterlockedExchange64_acq((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
0400 break; \
0401 case _Py_memory_order_release: \
0402 _InterlockedExchange64_rel((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
0403 break; \
0404 default: \
0405 _InterlockedExchange64((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
0406 break; \
0407 }
0408 #else
0409 #define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) ((void)0);
0410 #endif
0411
0412 #define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \
0413 switch (ORDER) { \
0414 case _Py_memory_order_acquire: \
0415 _InterlockedExchange_acq((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
0416 break; \
0417 case _Py_memory_order_release: \
0418 _InterlockedExchange_rel((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
0419 break; \
0420 default: \
0421 _InterlockedExchange((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
0422 break; \
0423 }
0424
0425 #if defined(_M_ARM64)
0426
0427
0428
0429
0430 inline intptr_t _Py_atomic_load_64bit_impl(volatile uintptr_t* value, int order) {
0431 uintptr_t old;
0432 switch (order) {
0433 case _Py_memory_order_acquire:
0434 {
0435 do {
0436 old = *value;
0437 } while(_InterlockedCompareExchange64_acq(value, old, old) != old);
0438 break;
0439 }
0440 case _Py_memory_order_release:
0441 {
0442 do {
0443 old = *value;
0444 } while(_InterlockedCompareExchange64_rel(value, old, old) != old);
0445 break;
0446 }
0447 case _Py_memory_order_relaxed:
0448 old = *value;
0449 break;
0450 default:
0451 {
0452 do {
0453 old = *value;
0454 } while(_InterlockedCompareExchange64(value, old, old) != old);
0455 break;
0456 }
0457 }
0458 return old;
0459 }
0460
0461 #define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) \
0462 _Py_atomic_load_64bit_impl((volatile uintptr_t*)&((ATOMIC_VAL)->_value), (ORDER))
0463
0464 #else
0465 #define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) ((ATOMIC_VAL)->_value)
0466 #endif
0467
0468 inline int _Py_atomic_load_32bit_impl(volatile int* value, int order) {
0469 int old;
0470 switch (order) {
0471 case _Py_memory_order_acquire:
0472 {
0473 do {
0474 old = *value;
0475 } while(_InterlockedCompareExchange_acq(value, old, old) != old);
0476 break;
0477 }
0478 case _Py_memory_order_release:
0479 {
0480 do {
0481 old = *value;
0482 } while(_InterlockedCompareExchange_rel(value, old, old) != old);
0483 break;
0484 }
0485 case _Py_memory_order_relaxed:
0486 old = *value;
0487 break;
0488 default:
0489 {
0490 do {
0491 old = *value;
0492 } while(_InterlockedCompareExchange(value, old, old) != old);
0493 break;
0494 }
0495 }
0496 return old;
0497 }
0498
0499 #define _Py_atomic_load_32bit(ATOMIC_VAL, ORDER) \
0500 _Py_atomic_load_32bit_impl((volatile int*)&((ATOMIC_VAL)->_value), (ORDER))
0501
0502 #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
0503 if (sizeof((ATOMIC_VAL)->_value) == 8) { \
0504 _Py_atomic_store_64bit((ATOMIC_VAL), (NEW_VAL), (ORDER)) } else { \
0505 _Py_atomic_store_32bit((ATOMIC_VAL), (NEW_VAL), (ORDER)) }
0506
0507 #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
0508 ( \
0509 sizeof((ATOMIC_VAL)->_value) == 8 ? \
0510 _Py_atomic_load_64bit((ATOMIC_VAL), (ORDER)) : \
0511 _Py_atomic_load_32bit((ATOMIC_VAL), (ORDER)) \
0512 )
0513 #endif
0514 #else
0515 typedef enum _Py_memory_order {
0516 _Py_memory_order_relaxed,
0517 _Py_memory_order_acquire,
0518 _Py_memory_order_release,
0519 _Py_memory_order_acq_rel,
0520 _Py_memory_order_seq_cst
0521 } _Py_memory_order;
0522
0523 typedef struct _Py_atomic_address {
0524 uintptr_t _value;
0525 } _Py_atomic_address;
0526
0527 typedef struct _Py_atomic_int {
0528 int _value;
0529 } _Py_atomic_int;
0530
0531
0532
0533 #define _Py_atomic_signal_fence( ORDER) ((void)0)
0534 #define _Py_atomic_thread_fence( ORDER) ((void)0)
0535 #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
0536 ((ATOMIC_VAL)->_value = NEW_VAL)
0537 #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
0538 ((ATOMIC_VAL)->_value)
0539 #endif
0540
0541
0542 #define _Py_atomic_store(ATOMIC_VAL, NEW_VAL) \
0543 _Py_atomic_store_explicit((ATOMIC_VAL), (NEW_VAL), _Py_memory_order_seq_cst)
0544 #define _Py_atomic_load(ATOMIC_VAL) \
0545 _Py_atomic_load_explicit((ATOMIC_VAL), _Py_memory_order_seq_cst)
0546
0547
0548
0549 #define _Py_atomic_store_relaxed(ATOMIC_VAL, NEW_VAL) \
0550 _Py_atomic_store_explicit((ATOMIC_VAL), (NEW_VAL), _Py_memory_order_relaxed)
0551 #define _Py_atomic_load_relaxed(ATOMIC_VAL) \
0552 _Py_atomic_load_explicit((ATOMIC_VAL), _Py_memory_order_relaxed)
0553
0554 #ifdef __cplusplus
0555 }
0556 #endif
0557 #endif