Skip to content

bpo-33608: Simplify DISPATCH by hoisting eval_breaker ahead of time. #12062

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 24 additions & 24 deletions Include/internal/pycore_atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,10 @@ typedef struct _Py_atomic_int {
atomic_thread_fence(ORDER)

#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
atomic_store_explicit(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER)
atomic_store_explicit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER)

#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
atomic_load_explicit(&(ATOMIC_VAL)->_value, ORDER)
atomic_load_explicit(&((ATOMIC_VAL)->_value), ORDER)

/* Use builtin atomic operations in GCC >= 4.7 */
#elif defined(HAVE_BUILTIN_ATOMIC)
Expand Down Expand Up @@ -92,14 +92,14 @@ typedef struct _Py_atomic_int {
(assert((ORDER) == __ATOMIC_RELAXED \
|| (ORDER) == __ATOMIC_SEQ_CST \
|| (ORDER) == __ATOMIC_RELEASE), \
__atomic_store_n(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER))
__atomic_store_n(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER))

#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
(assert((ORDER) == __ATOMIC_RELAXED \
|| (ORDER) == __ATOMIC_SEQ_CST \
|| (ORDER) == __ATOMIC_ACQUIRE \
|| (ORDER) == __ATOMIC_CONSUME), \
__atomic_load_n(&(ATOMIC_VAL)->_value, ORDER))
__atomic_load_n(&((ATOMIC_VAL)->_value), ORDER))

/* Only support GCC (for expression statements) and x86 (for simple
* atomic semantics) and MSVC x86/x64/ARM */
Expand Down Expand Up @@ -324,7 +324,7 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
}

#else
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *(ATOMIC_VAL)
#endif

inline int _Py_atomic_load_32bit(volatile int* value, int order) {
Expand Down Expand Up @@ -359,15 +359,15 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) {
}

#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
if (sizeof(*ATOMIC_VAL._value) == 8) { \
_Py_atomic_store_64bit((volatile long long*)ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \
_Py_atomic_store_32bit((volatile long*)ATOMIC_VAL._value, NEW_VAL, ORDER) }
if (sizeof((ATOMIC_VAL)->_value) == 8) { \
_Py_atomic_store_64bit((volatile long long*)&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) } else { \
_Py_atomic_store_32bit((volatile long*)&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) }

#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
( \
sizeof(*(ATOMIC_VAL._value)) == 8 ? \
_Py_atomic_load_64bit((volatile long long*)ATOMIC_VAL._value, ORDER) : \
_Py_atomic_load_32bit((volatile long*)ATOMIC_VAL._value, ORDER) \
sizeof((ATOMIC_VAL)->_value) == 8 ? \
_Py_atomic_load_64bit((volatile long long*)&((ATOMIC_VAL)->_value), ORDER) : \
_Py_atomic_load_32bit((volatile long*)&((ATOMIC_VAL)->_value), ORDER) \
)
#elif defined(_M_ARM) || defined(_M_ARM64)
typedef enum _Py_memory_order {
Expand All @@ -391,13 +391,13 @@ typedef struct _Py_atomic_int {
#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \
switch (ORDER) { \
case _Py_memory_order_acquire: \
_InterlockedExchange64_acq((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
_InterlockedExchange64_acq((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
break; \
case _Py_memory_order_release: \
_InterlockedExchange64_rel((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
_InterlockedExchange64_rel((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
break; \
default: \
_InterlockedExchange64((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
_InterlockedExchange64((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
break; \
}
#else
Expand All @@ -407,13 +407,13 @@ typedef struct _Py_atomic_int {
#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \
switch (ORDER) { \
case _Py_memory_order_acquire: \
_InterlockedExchange_acq((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
_InterlockedExchange_acq((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
break; \
case _Py_memory_order_release: \
_InterlockedExchange_rel((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
_InterlockedExchange_rel((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
break; \
default: \
_InterlockedExchange((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
_InterlockedExchange((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
break; \
}

Expand Down Expand Up @@ -454,7 +454,7 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
}

#else
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *(ATOMIC_VAL)
#endif

inline int _Py_atomic_load_32bit(volatile int* value, int order) {
Expand Down Expand Up @@ -489,15 +489,15 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) {
}

#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
if (sizeof(*ATOMIC_VAL._value) == 8) { \
_Py_atomic_store_64bit(ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \
_Py_atomic_store_32bit(ATOMIC_VAL._value, NEW_VAL, ORDER) }
if (sizeof((ATOMIC_VAL)->_value) == 8) { \
_Py_atomic_store_64bit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) } else { \
_Py_atomic_store_32bit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER) }

#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
( \
sizeof(*(ATOMIC_VAL._value)) == 8 ? \
_Py_atomic_load_64bit(ATOMIC_VAL._value, ORDER) : \
_Py_atomic_load_32bit(ATOMIC_VAL._value, ORDER) \
sizeof((ATOMIC_VAL)->_value) == 8 ? \
_Py_atomic_load_64bit(&((ATOMIC_VAL)->_value), ORDER) : \
_Py_atomic_load_32bit(&((ATOMIC_VAL)->_value), ORDER) \
)
#endif
#else /* !gcc x86 !_msc_ver */
Expand Down
5 changes: 3 additions & 2 deletions Python/ceval.c
Original file line number Diff line number Diff line change
Expand Up @@ -637,6 +637,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
PyObject **fastlocals, **freevars;
PyObject *retval = NULL; /* Return value */
PyThreadState *tstate = _PyThreadState_GET();
_Py_atomic_int *eval_breaker = &tstate->interp->ceval.eval_breaker;
PyCodeObject *co;

/* when tracing we set things up so that
Expand Down Expand Up @@ -722,7 +723,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)

#define DISPATCH() \
{ \
if (!_Py_atomic_load_relaxed(&tstate->interp->ceval.eval_breaker)) { \
if (!_Py_atomic_load_relaxed(eval_breaker)) { \
FAST_DISPATCH(); \
} \
continue; \
Expand Down Expand Up @@ -1024,7 +1025,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)
async I/O handler); see Py_AddPendingCall() and
Py_MakePendingCalls() above. */

if (_Py_atomic_load_relaxed(&(tstate->interp->ceval.eval_breaker))) {
if (_Py_atomic_load_relaxed(eval_breaker)) {
opcode = _Py_OPCODE(*next_instr);
if (opcode == SETUP_FINALLY ||
opcode == SETUP_WITH ||
Expand Down