diff --git a/Python/ceval_gil.c b/Python/ceval_gil.c index 0b45caba0d4..6f4476d055b 100644 --- a/Python/ceval_gil.c +++ b/Python/ceval_gil.c @@ -49,13 +49,6 @@ (Note: this mechanism is enabled with FORCE_SWITCHING above) */ -// GH-89279: Force inlining by using a macro. -#if defined(_MSC_VER) && SIZEOF_INT == 4 -#define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) (assert(sizeof((ATOMIC_VAL)->_value) == 4), *((volatile int*)&((ATOMIC_VAL)->_value))) -#else -#define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) _Py_atomic_load_relaxed(ATOMIC_VAL) -#endif - // Atomically copy the bits indicated by mask between two values. static inline void copy_eval_breaker_bits(uintptr_t *from, uintptr_t *to, uintptr_t mask) diff --git a/Python/ceval_macros.h b/Python/ceval_macros.h index 2881ed2153a..8b25a5f0ea4 100644 --- a/Python/ceval_macros.h +++ b/Python/ceval_macros.h @@ -375,13 +375,6 @@ do { \ } while (0); -// GH-89279: Force inlining by using a macro. -#if defined(_MSC_VER) && SIZEOF_INT == 4 -#define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) (assert(sizeof((ATOMIC_VAL)->_value) == 4), *((volatile int*)&((ATOMIC_VAL)->_value))) -#else -#define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) _Py_atomic_load_relaxed(ATOMIC_VAL) -#endif - static inline int _Py_EnterRecursivePy(PyThreadState *tstate) { return (tstate->py_recursion_remaining-- <= 0) && _Py_CheckRecursiveCallPy(tstate);