bpo-33608: Simplify ceval's DISPATCH by hoisting eval_breaker ahead o… · python/cpython@bda918b (original) (raw)
`@@ -58,10 +58,10 @@ typedef struct _Py_atomic_int {
`
58
58
` atomic_thread_fence(ORDER)
`
59
59
``
60
60
`#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
`
61
``
`-
atomic_store_explicit(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER)
`
``
61
`+
atomic_store_explicit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER)
`
62
62
``
63
63
`#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
`
64
``
`-
atomic_load_explicit(&(ATOMIC_VAL)->_value, ORDER)
`
``
64
`+
atomic_load_explicit(&((ATOMIC_VAL)->_value), ORDER)
`
65
65
``
66
66
`/* Use builtin atomic operations in GCC >= 4.7 */
`
67
67
`#elif defined(HAVE_BUILTIN_ATOMIC)
`
`@@ -92,14 +92,14 @@ typedef struct _Py_atomic_int {
`
92
92
` (assert((ORDER) == __ATOMIC_RELAXED \
`
93
93
` || (ORDER) == __ATOMIC_SEQ_CST \
`
94
94
` || (ORDER) == __ATOMIC_RELEASE), \
`
95
``
`-
__atomic_store_n(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER))
`
``
95
`+
__atomic_store_n(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER))
`
96
96
``
97
97
`#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
`
98
98
` (assert((ORDER) == __ATOMIC_RELAXED \
`
99
99
` || (ORDER) == __ATOMIC_SEQ_CST \
`
100
100
` || (ORDER) == __ATOMIC_ACQUIRE \
`
101
101
` || (ORDER) == __ATOMIC_CONSUME), \
`
102
``
`-
__atomic_load_n(&(ATOMIC_VAL)->_value, ORDER))
`
``
102
`+
__atomic_load_n(&((ATOMIC_VAL)->_value), ORDER))
`
103
103
``
104
104
`/* Only support GCC (for expression statements) and x86 (for simple
`
105
105
` * atomic semantics) and MSVC x86/x64/ARM */
`
`@@ -324,7 +324,7 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
`
324
324
`}
`
325
325
``
326
326
`#else
`
327
``
`-
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL
`
``
327
`+
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *(ATOMIC_VAL)
`
328
328
`#endif
`
329
329
``
330
330
`inline int _Py_atomic_load_32bit(volatile int* value, int order) {
`
`@@ -359,15 +359,15 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) {
`
359
359
`}
`
360
360
``
361
361
`#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
`
362
``
`-
if (sizeof(*ATOMIC_VAL._value) == 8) { \
`
363
``
`-
_Py_atomic_store_64bit((volatile long long*)ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \
`
364
``
`-
_Py_atomic_store_32bit((volatile long*)ATOMIC_VAL._value, NEW_VAL, ORDER) }
`
``
362
`+
if (sizeof((ATOMIC_VAL)->_value) == 8) { \
`
``
363
`+
_Py_atomic_store_64bit((volatile long long*)&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) } else { \
`
``
364
`+
_Py_atomic_store_32bit((volatile long*)&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) }
`
365
365
``
366
366
`#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
`
367
367
` ( \
`
368
``
`-
sizeof(*(ATOMIC_VAL._value)) == 8 ? \
`
369
``
`-
_Py_atomic_load_64bit((volatile long long*)ATOMIC_VAL._value, ORDER) : \
`
370
``
`-
_Py_atomic_load_32bit((volatile long*)ATOMIC_VAL._value, ORDER) \
`
``
368
`+
sizeof((ATOMIC_VAL)->_value) == 8 ? \
`
``
369
`+
_Py_atomic_load_64bit((volatile long long*)&((ATOMIC_VAL)->_value), ORDER) : \
`
``
370
`+
_Py_atomic_load_32bit((volatile long*)&((ATOMIC_VAL)->_value), ORDER) \
`
371
371
` )
`
372
372
`#elif defined(_M_ARM) || defined(_M_ARM64)
`
373
373
`typedef enum _Py_memory_order {
`
`@@ -391,13 +391,13 @@ typedef struct _Py_atomic_int {
`
391
391
`#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \
`
392
392
` switch (ORDER) { \
`
393
393
` case _Py_memory_order_acquire: \
`
394
``
`-
_InterlockedExchange64_acq((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
`
``
394
`+
_InterlockedExchange64_acq((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
`
395
395
` break; \
`
396
396
` case _Py_memory_order_release: \
`
397
``
`-
_InterlockedExchange64_rel((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
`
``
397
`+
_InterlockedExchange64_rel((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
`
398
398
` break; \
`
399
399
` default: \
`
400
``
`-
_InterlockedExchange64((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
`
``
400
`+
_InterlockedExchange64((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
`
401
401
` break; \
`
402
402
` }
`
403
403
`#else
`
`@@ -407,13 +407,13 @@ typedef struct _Py_atomic_int {
`
407
407
`#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \
`
408
408
` switch (ORDER) { \
`
409
409
` case _Py_memory_order_acquire: \
`
410
``
`-
_InterlockedExchange_acq((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
`
``
410
`+
_InterlockedExchange_acq((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
`
411
411
` break; \
`
412
412
` case _Py_memory_order_release: \
`
413
``
`-
_InterlockedExchange_rel((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
`
``
413
`+
_InterlockedExchange_rel((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
`
414
414
` break; \
`
415
415
` default: \
`
416
``
`-
_InterlockedExchange((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
`
``
416
`+
_InterlockedExchange((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
`
417
417
` break; \
`
418
418
` }
`
419
419
``
`@@ -454,7 +454,7 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
`
454
454
`}
`
455
455
``
456
456
`#else
`
457
``
`-
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL
`
``
457
`+
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *(ATOMIC_VAL)
`
458
458
`#endif
`
459
459
``
460
460
`inline int _Py_atomic_load_32bit(volatile int* value, int order) {
`
`@@ -489,15 +489,15 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) {
`
489
489
`}
`
490
490
``
491
491
`#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
`
492
``
`-
if (sizeof(*ATOMIC_VAL._value) == 8) { \
`
493
``
`-
_Py_atomic_store_64bit(ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \
`
494
``
`-
_Py_atomic_store_32bit(ATOMIC_VAL._value, NEW_VAL, ORDER) }
`
``
492
`+
if (sizeof((ATOMIC_VAL)->_value) == 8) { \
`
``
493
`+
_Py_atomic_store_64bit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) } else { \
`
``
494
`+
_Py_atomic_store_32bit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) }
`
495
495
``
496
496
`#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
`
497
497
` ( \
`
498
``
`-
sizeof(*(ATOMIC_VAL._value)) == 8 ? \
`
499
``
`-
_Py_atomic_load_64bit(ATOMIC_VAL._value, ORDER) : \
`
500
``
`-
_Py_atomic_load_32bit(ATOMIC_VAL._value, ORDER) \
`
``
498
`+
sizeof((ATOMIC_VAL)->_value) == 8 ? \
`
``
499
`+
_Py_atomic_load_64bit(&((ATOMIC_VAL)->_value), ORDER) : \
`
``
500
`+
_Py_atomic_load_32bit(&((ATOMIC_VAL)->_value), ORDER) \
`
501
501
` )
`
502
502
`#endif
`
503
503
`#else /* !gcc x86 !_msc_ver */
`