mirror of
https://github.com/python/cpython.git
synced 2024-11-24 17:47:13 +01:00
gh-122860: Remove unused macro _Py_atomic_load_relaxed_int32
(#122861)
This commit is contained in:
parent
bc9d92c679
commit
1069190bad
@ -49,13 +49,6 @@
|
||||
(Note: this mechanism is enabled with FORCE_SWITCHING above)
|
||||
*/
|
||||
|
||||
// GH-89279: Force inlining by using a macro.
|
||||
#if defined(_MSC_VER) && SIZEOF_INT == 4
|
||||
#define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) (assert(sizeof((ATOMIC_VAL)->_value) == 4), *((volatile int*)&((ATOMIC_VAL)->_value)))
|
||||
#else
|
||||
#define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) _Py_atomic_load_relaxed(ATOMIC_VAL)
|
||||
#endif
|
||||
|
||||
// Atomically copy the bits indicated by mask between two values.
|
||||
static inline void
|
||||
copy_eval_breaker_bits(uintptr_t *from, uintptr_t *to, uintptr_t mask)
|
||||
|
@ -375,13 +375,6 @@ do { \
|
||||
} while (0);
|
||||
|
||||
|
||||
// GH-89279: Force inlining by using a macro.
|
||||
#if defined(_MSC_VER) && SIZEOF_INT == 4
|
||||
#define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) (assert(sizeof((ATOMIC_VAL)->_value) == 4), *((volatile int*)&((ATOMIC_VAL)->_value)))
|
||||
#else
|
||||
#define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) _Py_atomic_load_relaxed(ATOMIC_VAL)
|
||||
#endif
|
||||
|
||||
static inline int _Py_EnterRecursivePy(PyThreadState *tstate) {
|
||||
return (tstate->py_recursion_remaining-- <= 0) &&
|
||||
_Py_CheckRecursiveCallPy(tstate);
|
||||
|
Loading…
Reference in New Issue
Block a user