Skip to content

Commit cf6110b

Browse files
authored
gh-111924: Use PyMutex for Runtime-global Locks. (gh-112207)
This replaces some usages of PyThread_type_lock with PyMutex, which does not require memory allocation to initialize. This simplifies some of the runtime initialization and is also one step towards avoiding changing the default raw memory allocator during initialize/finalization, which can be non-thread-safe in some circumstances.
1 parent db46073 commit cf6110b

18 files changed

+97
-241
lines changed

Include/internal/pycore_atexit.h

+4-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,8 @@
11
#ifndef Py_INTERNAL_ATEXIT_H
22
#define Py_INTERNAL_ATEXIT_H
3+
4+
#include "pycore_lock.h" // PyMutex
5+
36
#ifdef __cplusplus
47
extern "C" {
58
#endif
@@ -15,7 +18,7 @@ extern "C" {
1518
typedef void (*atexit_callbackfunc)(void);
1619

1720
struct _atexit_runtime_state {
18-
PyThread_type_lock mutex;
21+
PyMutex mutex;
1922
#define NEXITFUNCS 32
2023
atexit_callbackfunc callbacks[NEXITFUNCS];
2124
int ncallbacks;

Include/internal/pycore_ceval.h

+1-2
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,7 @@ PyAPI_FUNC(int) _PyEval_MakePendingCalls(PyThreadState *);
4141
#endif
4242

4343
extern void _Py_FinishPendingCalls(PyThreadState *tstate);
44-
extern void _PyEval_InitState(PyInterpreterState *, PyThread_type_lock);
45-
extern void _PyEval_FiniState(struct _ceval_state *ceval);
44+
extern void _PyEval_InitState(PyInterpreterState *);
4645
extern void _PyEval_SignalReceived(PyInterpreterState *interp);
4746

4847
// bitwise flags:

Include/internal/pycore_ceval_state.h

+2-1
Original file line numberDiff line numberDiff line change
@@ -8,14 +8,15 @@ extern "C" {
88
# error "this header requires Py_BUILD_CORE define"
99
#endif
1010

11+
#include "pycore_lock.h" // PyMutex
1112
#include "pycore_gil.h" // struct _gil_runtime_state
1213

1314

1415
typedef int (*_Py_pending_call_func)(void *);
1516

1617
struct _pending_calls {
1718
int busy;
18-
PyThread_type_lock lock;
19+
PyMutex mutex;
1920
/* Request for running pending calls. */
2021
int32_t calls_to_do;
2122
#define NPENDINGCALLS 32

Include/internal/pycore_crossinterp.h

+2-1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ extern "C" {
88
# error "this header requires Py_BUILD_CORE define"
99
#endif
1010

11+
#include "pycore_lock.h" // PyMutex
1112
#include "pycore_pyerrors.h"
1213

1314

@@ -128,7 +129,7 @@ struct _xidregitem {
128129
struct _xidregistry {
129130
int global; /* builtin types or heap types */
130131
int initialized;
131-
PyThread_type_lock mutex;
132+
PyMutex mutex;
132133
struct _xidregitem *head;
133134
};
134135

Include/internal/pycore_import.h

+2-1
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ extern "C" {
99
# error "this header requires Py_BUILD_CORE define"
1010
#endif
1111

12+
#include "pycore_lock.h" // PyMutex
1213
#include "pycore_hashtable.h" // _Py_hashtable_t
1314
#include "pycore_time.h" // _PyTime_t
1415

@@ -47,7 +48,7 @@ struct _import_runtime_state {
4748
Py_ssize_t last_module_index;
4849
struct {
4950
/* A lock to guard the cache. */
50-
PyThread_type_lock mutex;
51+
PyMutex mutex;
5152
/* The actual cache of (filename, name, PyModuleDef) for modules.
5253
Only legacy (single-phase init) extension modules are added
5354
and only if they support multiple initialization (m_size >- 0)

Include/internal/pycore_lock.h

+17
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,13 @@ PyMutex_IsLocked(PyMutex *m)
9292
return (_Py_atomic_load_uint8(&m->v) & _Py_LOCKED) != 0;
9393
}
9494

95+
// Re-initializes the mutex after a fork to the unlocked state.
96+
static inline void
97+
_PyMutex_at_fork_reinit(PyMutex *m)
98+
{
99+
memset(m, 0, sizeof(*m));
100+
}
101+
95102
typedef enum _PyLockFlags {
96103
// Do not detach/release the GIL when waiting on the lock.
97104
_Py_LOCK_DONT_DETACH = 0,
@@ -108,6 +115,16 @@ typedef enum _PyLockFlags {
108115
extern PyLockStatus
109116
_PyMutex_LockTimed(PyMutex *m, _PyTime_t timeout_ns, _PyLockFlags flags);
110117

118+
// Lock a mutex with aditional options. See _PyLockFlags for details.
119+
static inline void
120+
PyMutex_LockFlags(PyMutex *m, _PyLockFlags flags)
121+
{
122+
uint8_t expected = _Py_UNLOCKED;
123+
if (!_Py_atomic_compare_exchange_uint8(&m->v, &expected, _Py_LOCKED)) {
124+
_PyMutex_LockTimed(m, -1, flags);
125+
}
126+
}
127+
111128
// Unlock a mutex, returns 0 if the mutex is not locked (used for improved
112129
// error messages).
113130
extern int _PyMutex_TryUnlock(PyMutex *m);

Include/internal/pycore_pymem.h

+4-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,8 @@
11
#ifndef Py_INTERNAL_PYMEM_H
22
#define Py_INTERNAL_PYMEM_H
3+
4+
#include "pycore_lock.h" // PyMutex
5+
36
#ifdef __cplusplus
47
extern "C" {
58
#endif
@@ -30,7 +33,7 @@ typedef struct {
3033
} debug_alloc_api_t;
3134

3235
struct _pymem_allocators {
33-
PyThread_type_lock mutex;
36+
PyMutex mutex;
3437
struct {
3538
PyMemAllocatorEx raw;
3639
PyMemAllocatorEx mem;

Include/internal/pycore_pystate.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -220,9 +220,9 @@ PyAPI_FUNC(int) _PyState_AddModule(
220220
extern int _PyOS_InterruptOccurred(PyThreadState *tstate);
221221

222222
#define HEAD_LOCK(runtime) \
223-
PyThread_acquire_lock((runtime)->interpreters.mutex, WAIT_LOCK)
223+
PyMutex_LockFlags(&(runtime)->interpreters.mutex, _Py_LOCK_DONT_DETACH)
224224
#define HEAD_UNLOCK(runtime) \
225-
PyThread_release_lock((runtime)->interpreters.mutex)
225+
PyMutex_Unlock(&(runtime)->interpreters.mutex)
226226

227227
// Get the configuration of the current interpreter.
228228
// The caller must hold the GIL.

Include/internal/pycore_runtime.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ typedef struct pyruntimestate {
173173
unsigned long _finalizing_id;
174174

175175
struct pyinterpreters {
176-
PyThread_type_lock mutex;
176+
PyMutex mutex;
177177
/* The linked list of interpreters, newest first. */
178178
PyInterpreterState *head;
179179
/* The runtime's initial interpreter, which has a special role
@@ -234,7 +234,7 @@ typedef struct pyruntimestate {
234234
Py_OpenCodeHookFunction open_code_hook;
235235
void *open_code_userdata;
236236
struct {
237-
PyThread_type_lock mutex;
237+
PyMutex mutex;
238238
_Py_AuditHookEntry *head;
239239
} audit_hooks;
240240

Include/internal/pycore_unicodeobject.h

+2-1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ extern "C" {
88
# error "this header requires Py_BUILD_CORE define"
99
#endif
1010

11+
#include "pycore_lock.h" // PyMutex
1112
#include "pycore_fileutils.h" // _Py_error_handler
1213
#include "pycore_identifier.h" // _Py_Identifier
1314
#include "pycore_ucnhash.h" // _PyUnicode_Name_CAPI
@@ -277,7 +278,7 @@ extern PyTypeObject _PyUnicodeASCIIIter_Type;
277278
/* --- Other API ---------------------------------------------------------- */
278279

279280
struct _Py_unicode_runtime_ids {
280-
PyThread_type_lock lock;
281+
PyMutex mutex;
281282
// next_index value must be preserved when Py_Initialize()/Py_Finalize()
282283
// is called multiple times: see _PyUnicode_FromId() implementation.
283284
Py_ssize_t next_index;

Objects/obmalloc.c

+16-45
Original file line numberDiff line numberDiff line change
@@ -329,13 +329,9 @@ int
329329
_PyMem_SetDefaultAllocator(PyMemAllocatorDomain domain,
330330
PyMemAllocatorEx *old_alloc)
331331
{
332-
if (ALLOCATORS_MUTEX == NULL) {
333-
/* The runtime must be initializing. */
334-
return set_default_allocator_unlocked(domain, pydebug, old_alloc);
335-
}
336-
PyThread_acquire_lock(ALLOCATORS_MUTEX, WAIT_LOCK);
332+
PyMutex_Lock(&ALLOCATORS_MUTEX);
337333
int res = set_default_allocator_unlocked(domain, pydebug, old_alloc);
338-
PyThread_release_lock(ALLOCATORS_MUTEX);
334+
PyMutex_Unlock(&ALLOCATORS_MUTEX);
339335
return res;
340336
}
341337

@@ -467,9 +463,9 @@ set_up_allocators_unlocked(PyMemAllocatorName allocator)
467463
int
468464
_PyMem_SetupAllocators(PyMemAllocatorName allocator)
469465
{
470-
PyThread_acquire_lock(ALLOCATORS_MUTEX, WAIT_LOCK);
466+
PyMutex_Lock(&ALLOCATORS_MUTEX);
471467
int res = set_up_allocators_unlocked(allocator);
472-
PyThread_release_lock(ALLOCATORS_MUTEX);
468+
PyMutex_Unlock(&ALLOCATORS_MUTEX);
473469
return res;
474470
}
475471

@@ -554,9 +550,9 @@ get_current_allocator_name_unlocked(void)
554550
const char*
555551
_PyMem_GetCurrentAllocatorName(void)
556552
{
557-
PyThread_acquire_lock(ALLOCATORS_MUTEX, WAIT_LOCK);
553+
PyMutex_Lock(&ALLOCATORS_MUTEX);
558554
const char *name = get_current_allocator_name_unlocked();
559-
PyThread_release_lock(ALLOCATORS_MUTEX);
555+
PyMutex_Unlock(&ALLOCATORS_MUTEX);
560556
return name;
561557
}
562558

@@ -653,14 +649,9 @@ set_up_debug_hooks_unlocked(void)
653649
void
654650
PyMem_SetupDebugHooks(void)
655651
{
656-
if (ALLOCATORS_MUTEX == NULL) {
657-
/* The runtime must not be completely initialized yet. */
658-
set_up_debug_hooks_unlocked();
659-
return;
660-
}
661-
PyThread_acquire_lock(ALLOCATORS_MUTEX, WAIT_LOCK);
652+
PyMutex_Lock(&ALLOCATORS_MUTEX);
662653
set_up_debug_hooks_unlocked();
663-
PyThread_release_lock(ALLOCATORS_MUTEX);
654+
PyMutex_Unlock(&ALLOCATORS_MUTEX);
664655
}
665656

666657
static void
@@ -696,53 +687,33 @@ set_allocator_unlocked(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
696687
void
697688
PyMem_GetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
698689
{
699-
if (ALLOCATORS_MUTEX == NULL) {
700-
/* The runtime must not be completely initialized yet. */
701-
get_allocator_unlocked(domain, allocator);
702-
return;
703-
}
704-
PyThread_acquire_lock(ALLOCATORS_MUTEX, WAIT_LOCK);
690+
PyMutex_Lock(&ALLOCATORS_MUTEX);
705691
get_allocator_unlocked(domain, allocator);
706-
PyThread_release_lock(ALLOCATORS_MUTEX);
692+
PyMutex_Unlock(&ALLOCATORS_MUTEX);
707693
}
708694

709695
void
710696
PyMem_SetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
711697
{
712-
if (ALLOCATORS_MUTEX == NULL) {
713-
/* The runtime must not be completely initialized yet. */
714-
set_allocator_unlocked(domain, allocator);
715-
return;
716-
}
717-
PyThread_acquire_lock(ALLOCATORS_MUTEX, WAIT_LOCK);
698+
PyMutex_Lock(&ALLOCATORS_MUTEX);
718699
set_allocator_unlocked(domain, allocator);
719-
PyThread_release_lock(ALLOCATORS_MUTEX);
700+
PyMutex_Unlock(&ALLOCATORS_MUTEX);
720701
}
721702

722703
void
723704
PyObject_GetArenaAllocator(PyObjectArenaAllocator *allocator)
724705
{
725-
if (ALLOCATORS_MUTEX == NULL) {
726-
/* The runtime must not be completely initialized yet. */
727-
*allocator = _PyObject_Arena;
728-
return;
729-
}
730-
PyThread_acquire_lock(ALLOCATORS_MUTEX, WAIT_LOCK);
706+
PyMutex_Lock(&ALLOCATORS_MUTEX);
731707
*allocator = _PyObject_Arena;
732-
PyThread_release_lock(ALLOCATORS_MUTEX);
708+
PyMutex_Unlock(&ALLOCATORS_MUTEX);
733709
}
734710

735711
void
736712
PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator)
737713
{
738-
if (ALLOCATORS_MUTEX == NULL) {
739-
/* The runtime must not be completely initialized yet. */
740-
_PyObject_Arena = *allocator;
741-
return;
742-
}
743-
PyThread_acquire_lock(ALLOCATORS_MUTEX, WAIT_LOCK);
714+
PyMutex_Lock(&ALLOCATORS_MUTEX);
744715
_PyObject_Arena = *allocator;
745-
PyThread_release_lock(ALLOCATORS_MUTEX);
716+
PyMutex_Unlock(&ALLOCATORS_MUTEX);
746717
}
747718

748719

Objects/unicodeobject.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -1904,7 +1904,7 @@ _PyUnicode_FromId(_Py_Identifier *id)
19041904
if (index < 0) {
19051905
struct _Py_unicode_runtime_ids *rt_ids = &interp->runtime->unicode_state.ids;
19061906

1907-
PyThread_acquire_lock(rt_ids->lock, WAIT_LOCK);
1907+
PyMutex_Lock(&rt_ids->mutex);
19081908
// Check again to detect concurrent access. Another thread can have
19091909
// initialized the index while this thread waited for the lock.
19101910
index = _Py_atomic_load_ssize(&id->index);
@@ -1914,7 +1914,7 @@ _PyUnicode_FromId(_Py_Identifier *id)
19141914
rt_ids->next_index++;
19151915
_Py_atomic_store_ssize(&id->index, index);
19161916
}
1917-
PyThread_release_lock(rt_ids->lock);
1917+
PyMutex_Unlock(&rt_ids->mutex);
19181918
}
19191919
assert(index >= 0);
19201920

Python/ceval_gil.c

+9-28
Original file line numberDiff line numberDiff line change
@@ -589,9 +589,7 @@ _PyEval_ReInitThreads(PyThreadState *tstate)
589589
take_gil(tstate);
590590

591591
struct _pending_calls *pending = &tstate->interp->ceval.pending;
592-
if (_PyThread_at_fork_reinit(&pending->lock) < 0) {
593-
return _PyStatus_ERR("Can't reinitialize pending calls lock");
594-
}
592+
_PyMutex_at_fork_reinit(&pending->mutex);
595593

596594
/* Destroy all threads except the current one */
597595
_PyThreadState_DeleteExcept(tstate);
@@ -720,13 +718,10 @@ _PyEval_AddPendingCall(PyInterpreterState *interp,
720718
assert(_Py_IsMainInterpreter(interp));
721719
pending = &_PyRuntime.ceval.pending_mainthread;
722720
}
723-
/* Ensure that _PyEval_InitState() was called
724-
and that _PyEval_FiniState() is not called yet. */
725-
assert(pending->lock != NULL);
726721

727-
PyThread_acquire_lock(pending->lock, WAIT_LOCK);
722+
PyMutex_Lock(&pending->mutex);
728723
int result = _push_pending_call(pending, func, arg, flags);
729-
PyThread_release_lock(pending->lock);
724+
PyMutex_Unlock(&pending->mutex);
730725

731726
/* signal main loop */
732727
SIGNAL_PENDING_CALLS(interp);
@@ -768,9 +763,9 @@ _make_pending_calls(struct _pending_calls *pending)
768763
int flags = 0;
769764

770765
/* pop one item off the queue while holding the lock */
771-
PyThread_acquire_lock(pending->lock, WAIT_LOCK);
766+
PyMutex_Lock(&pending->mutex);
772767
_pop_pending_call(pending, &func, &arg, &flags);
773-
PyThread_release_lock(pending->lock);
768+
PyMutex_Unlock(&pending->mutex);
774769

775770
/* having released the lock, perform the callback */
776771
if (func == NULL) {
@@ -795,7 +790,7 @@ make_pending_calls(PyInterpreterState *interp)
795790

796791
/* Only one thread (per interpreter) may run the pending calls
797792
at once. In the same way, we don't do recursive pending calls. */
798-
PyThread_acquire_lock(pending->lock, WAIT_LOCK);
793+
PyMutex_Lock(&pending->mutex);
799794
if (pending->busy) {
800795
/* A pending call was added after another thread was already
801796
handling the pending calls (and had already "unsignaled").
@@ -807,11 +802,11 @@ make_pending_calls(PyInterpreterState *interp)
807802
care of any remaining pending calls. Until then, though,
808803
all the interpreter's threads will be tripping the eval
809804
breaker every time it's checked. */
810-
PyThread_release_lock(pending->lock);
805+
PyMutex_Unlock(&pending->mutex);
811806
return 0;
812807
}
813808
pending->busy = 1;
814-
PyThread_release_lock(pending->lock);
809+
PyMutex_Unlock(&pending->mutex);
815810

816811
/* unsignal before starting to call callbacks, so that any callback
817812
added in-between re-signals */
@@ -892,23 +887,9 @@ Py_MakePendingCalls(void)
892887
}
893888

894889
void
895-
_PyEval_InitState(PyInterpreterState *interp, PyThread_type_lock pending_lock)
890+
_PyEval_InitState(PyInterpreterState *interp)
896891
{
897892
_gil_initialize(&interp->_gil);
898-
899-
struct _pending_calls *pending = &interp->ceval.pending;
900-
assert(pending->lock == NULL);
901-
pending->lock = pending_lock;
902-
}
903-
904-
void
905-
_PyEval_FiniState(struct _ceval_state *ceval)
906-
{
907-
struct _pending_calls *pending = &ceval->pending;
908-
if (pending->lock != NULL) {
909-
PyThread_free_lock(pending->lock);
910-
pending->lock = NULL;
911-
}
912893
}
913894

914895

0 commit comments

Comments
 (0)