13
13
#include "pycore_pylifecycle.h"
14
14
#include "pycore_pymem.h" // _PyMem_SetDefaultAllocator()
15
15
#include "pycore_pystate.h" // _PyThreadState_GET()
16
+ #include "pycore_qsbr.h"
16
17
#include "pycore_runtime_init.h" // _PyRuntimeState_INIT
17
18
#include "pycore_sysmodule.h"
18
19
#include "pycore_refcnt.h"
@@ -164,12 +165,13 @@ _PyThreadState_Attach(PyThreadState *tstate)
164
165
& tstate -> status ,
165
166
_Py_THREAD_DETACHED ,
166
167
_Py_THREAD_ATTACHED )) {
168
+ // online for QSBR too
169
+ _Py_qsbr_online (((PyThreadStateImpl * )tstate )-> qsbr );
167
170
168
171
// resume previous critical section
169
172
if (tstate -> critical_section != 0 ) {
170
173
_Py_critical_section_resume (tstate );
171
174
}
172
-
173
175
return 1 ;
174
176
}
175
177
return 0 ;
@@ -178,6 +180,8 @@ _PyThreadState_Attach(PyThreadState *tstate)
178
180
static void
179
181
_PyThreadState_Detach (PyThreadState * tstate )
180
182
{
183
+ _Py_qsbr_offline (((PyThreadStateImpl * )tstate )-> qsbr );
184
+
181
185
if (tstate -> critical_section != 0 ) {
182
186
_Py_critical_section_end_all (tstate );
183
187
}
@@ -737,7 +741,8 @@ free_threadstate(PyThreadState *tstate)
737
741
static void
738
742
init_threadstate (PyThreadState * tstate ,
739
743
PyInterpreterState * interp , uint64_t id ,
740
- PyThreadState * next )
744
+ PyThreadState * next ,
745
+ struct qsbr * empty_qsbr )
741
746
{
742
747
if (tstate -> _initialized ) {
743
748
Py_FatalError ("thread state already initialized" );
@@ -763,6 +768,18 @@ init_threadstate(PyThreadState *tstate,
763
768
tstate -> native_thread_id = PyThread_get_thread_native_id ();
764
769
#endif
765
770
771
+ // First try to recycle an existing qsbr structure
772
+ PyThreadStateImpl * tstate_impl = (PyThreadStateImpl * )tstate ;
773
+ struct qsbr * recycled = _Py_qsbr_recycle (& _PyRuntime .qsbr_shared , tstate );
774
+ if (recycled ) {
775
+ tstate_impl -> qsbr = recycled ;
776
+ }
777
+ else {
778
+ // If no recycled struct, use the newly allocated empty qsbr struct
779
+ tstate_impl -> qsbr = empty_qsbr ;
780
+ _Py_qsbr_register (& _PyRuntime .qsbr_shared , tstate , empty_qsbr );
781
+ }
782
+
766
783
tstate -> py_recursion_limit = interp -> ceval .recursion_limit ,
767
784
tstate -> py_recursion_remaining = interp -> ceval .recursion_limit ,
768
785
tstate -> c_recursion_remaining = C_RECURSION_LIMIT ;
@@ -791,6 +808,12 @@ new_threadstate(PyInterpreterState *interp)
791
808
if (new_tstate == NULL ) {
792
809
return NULL ;
793
810
}
811
+ struct qsbr * qsbr = PyMem_RawCalloc (1 , sizeof (struct qsbr_pad ));
812
+ if (qsbr == NULL ) {
813
+ PyMem_RawFree (new_tstate );
814
+ return NULL ;
815
+ }
816
+
794
817
/* We serialize concurrent creation to protect global state. */
795
818
HEAD_LOCK (runtime );
796
819
@@ -818,13 +841,17 @@ new_threadstate(PyInterpreterState *interp)
818
841
}
819
842
interp -> threads .head = tstate ;
820
843
821
- init_threadstate (tstate , interp , id , old_head );
844
+ init_threadstate (tstate , interp , id , old_head , qsbr );
822
845
823
846
HEAD_UNLOCK (runtime );
824
847
if (!used_newtstate ) {
825
848
// Must be called with lock unlocked to avoid re-entrancy deadlock.
826
849
PyMem_RawFree (new_tstate );
827
850
}
851
+ if (qsbr -> tstate == NULL ) {
852
+ // If the qsbr structure wasn't used, free it here after the unlock.
853
+ PyMem_RawFree (qsbr );
854
+ }
828
855
return tstate ;
829
856
}
830
857
@@ -1062,6 +1089,13 @@ tstate_delete_common(PyThreadState *tstate,
1062
1089
PyThread_tss_set (& gilstate -> autoTSSkey , NULL );
1063
1090
}
1064
1091
1092
+ PyThreadStateImpl * tstate_impl = (PyThreadStateImpl * )tstate ;
1093
+ if (is_current ) {
1094
+ _Py_qsbr_offline (tstate_impl -> qsbr );
1095
+ }
1096
+ _Py_qsbr_unregister (tstate_impl -> qsbr );
1097
+ tstate_impl -> qsbr = NULL ;
1098
+
1065
1099
_PyRuntimeState * runtime = interp -> runtime ;
1066
1100
HEAD_LOCK (runtime );
1067
1101
if (tstate -> prev ) {
0 commit comments