Skip to content

Commit bda918b

Browse files
bpo-33608: Simplify ceval's DISPATCH by hoisting eval_breaker ahead of time. (gh-12062)
This includes fixes to various _Py_atomic_* macros.
1 parent b05b711 commit bda918b

File tree

2 files changed

+27
-26
lines changed

2 files changed

+27
-26
lines changed

Include/internal/pycore_atomic.h

+24-24
Original file line numberDiff line numberDiff line change
@@ -58,10 +58,10 @@ typedef struct _Py_atomic_int {
5858
atomic_thread_fence(ORDER)
5959

6060
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
61-
atomic_store_explicit(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER)
61+
atomic_store_explicit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER)
6262

6363
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
64-
atomic_load_explicit(&(ATOMIC_VAL)->_value, ORDER)
64+
atomic_load_explicit(&((ATOMIC_VAL)->_value), ORDER)
6565

6666
/* Use builtin atomic operations in GCC >= 4.7 */
6767
#elif defined(HAVE_BUILTIN_ATOMIC)
@@ -92,14 +92,14 @@ typedef struct _Py_atomic_int {
9292
(assert((ORDER) == __ATOMIC_RELAXED \
9393
|| (ORDER) == __ATOMIC_SEQ_CST \
9494
|| (ORDER) == __ATOMIC_RELEASE), \
95-
__atomic_store_n(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER))
95+
__atomic_store_n(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER))
9696

9797
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
9898
(assert((ORDER) == __ATOMIC_RELAXED \
9999
|| (ORDER) == __ATOMIC_SEQ_CST \
100100
|| (ORDER) == __ATOMIC_ACQUIRE \
101101
|| (ORDER) == __ATOMIC_CONSUME), \
102-
__atomic_load_n(&(ATOMIC_VAL)->_value, ORDER))
102+
__atomic_load_n(&((ATOMIC_VAL)->_value), ORDER))
103103

104104
/* Only support GCC (for expression statements) and x86 (for simple
105105
* atomic semantics) and MSVC x86/x64/ARM */
@@ -324,7 +324,7 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
324324
}
325325

326326
#else
327-
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL
327+
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *(ATOMIC_VAL)
328328
#endif
329329

330330
inline int _Py_atomic_load_32bit(volatile int* value, int order) {
@@ -359,15 +359,15 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) {
359359
}
360360

361361
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
362-
if (sizeof(*ATOMIC_VAL._value) == 8) { \
363-
_Py_atomic_store_64bit((volatile long long*)ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \
364-
_Py_atomic_store_32bit((volatile long*)ATOMIC_VAL._value, NEW_VAL, ORDER) }
362+
if (sizeof((ATOMIC_VAL)->_value) == 8) { \
363+
_Py_atomic_store_64bit((volatile long long*)&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) } else { \
364+
_Py_atomic_store_32bit((volatile long*)&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) }
365365

366366
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
367367
( \
368-
sizeof(*(ATOMIC_VAL._value)) == 8 ? \
369-
_Py_atomic_load_64bit((volatile long long*)ATOMIC_VAL._value, ORDER) : \
370-
_Py_atomic_load_32bit((volatile long*)ATOMIC_VAL._value, ORDER) \
368+
sizeof((ATOMIC_VAL)->_value) == 8 ? \
369+
_Py_atomic_load_64bit((volatile long long*)&((ATOMIC_VAL)->_value), ORDER) : \
370+
_Py_atomic_load_32bit((volatile long*)&((ATOMIC_VAL)->_value), ORDER) \
371371
)
372372
#elif defined(_M_ARM) || defined(_M_ARM64)
373373
typedef enum _Py_memory_order {
@@ -391,13 +391,13 @@ typedef struct _Py_atomic_int {
391391
#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \
392392
switch (ORDER) { \
393393
case _Py_memory_order_acquire: \
394-
_InterlockedExchange64_acq((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
394+
_InterlockedExchange64_acq((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
395395
break; \
396396
case _Py_memory_order_release: \
397-
_InterlockedExchange64_rel((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
397+
_InterlockedExchange64_rel((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
398398
break; \
399399
default: \
400-
_InterlockedExchange64((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
400+
_InterlockedExchange64((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
401401
break; \
402402
}
403403
#else
@@ -407,13 +407,13 @@ typedef struct _Py_atomic_int {
407407
#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \
408408
switch (ORDER) { \
409409
case _Py_memory_order_acquire: \
410-
_InterlockedExchange_acq((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
410+
_InterlockedExchange_acq((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
411411
break; \
412412
case _Py_memory_order_release: \
413-
_InterlockedExchange_rel((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
413+
_InterlockedExchange_rel((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
414414
break; \
415415
default: \
416-
_InterlockedExchange((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
416+
_InterlockedExchange((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
417417
break; \
418418
}
419419

@@ -454,7 +454,7 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
454454
}
455455

456456
#else
457-
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL
457+
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *(ATOMIC_VAL)
458458
#endif
459459

460460
inline int _Py_atomic_load_32bit(volatile int* value, int order) {
@@ -489,15 +489,15 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) {
489489
}
490490

491491
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
492-
if (sizeof(*ATOMIC_VAL._value) == 8) { \
493-
_Py_atomic_store_64bit(ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \
494-
_Py_atomic_store_32bit(ATOMIC_VAL._value, NEW_VAL, ORDER) }
492+
if (sizeof((ATOMIC_VAL)->_value) == 8) { \
493+
_Py_atomic_store_64bit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) } else { \
494+
_Py_atomic_store_32bit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) }
495495

496496
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
497497
( \
498-
sizeof(*(ATOMIC_VAL._value)) == 8 ? \
499-
_Py_atomic_load_64bit(ATOMIC_VAL._value, ORDER) : \
500-
_Py_atomic_load_32bit(ATOMIC_VAL._value, ORDER) \
498+
sizeof((ATOMIC_VAL)->_value) == 8 ? \
499+
_Py_atomic_load_64bit(&((ATOMIC_VAL)->_value), ORDER) : \
500+
_Py_atomic_load_32bit(&((ATOMIC_VAL)->_value), ORDER) \
501501
)
502502
#endif
503503
#else /* !gcc x86 !_msc_ver */

Python/ceval.c

+3-2
Original file line numberDiff line numberDiff line change
@@ -637,6 +637,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
637637
PyObject **fastlocals, **freevars;
638638
PyObject *retval = NULL; /* Return value */
639639
PyThreadState *tstate = _PyThreadState_GET();
640+
_Py_atomic_int *eval_breaker = &tstate->interp->ceval.eval_breaker;
640641
PyCodeObject *co;
641642

642643
/* when tracing we set things up so that
@@ -722,7 +723,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
722723

723724
#define DISPATCH() \
724725
{ \
725-
if (!_Py_atomic_load_relaxed(&tstate->interp->ceval.eval_breaker)) { \
726+
if (!_Py_atomic_load_relaxed(eval_breaker)) { \
726727
FAST_DISPATCH(); \
727728
} \
728729
continue; \
@@ -1024,7 +1025,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
10241025
async I/O handler); see Py_AddPendingCall() and
10251026
Py_MakePendingCalls() above. */
10261027

1027-
if (_Py_atomic_load_relaxed(&(tstate->interp->ceval.eval_breaker))) {
1028+
if (_Py_atomic_load_relaxed(eval_breaker)) {
10281029
opcode = _Py_OPCODE(*next_instr);
10291030
if (opcode == SETUP_FINALLY ||
10301031
opcode == SETUP_WITH ||

0 commit comments

Comments
 (0)