diff --git a/paddle/fluid/eager/pylayer/py_layer_node.cc b/paddle/fluid/eager/pylayer/py_layer_node.cc index 0383251c9a177..11e9d93da478f 100644 --- a/paddle/fluid/eager/pylayer/py_layer_node.cc +++ b/paddle/fluid/eager/pylayer/py_layer_node.cc @@ -34,6 +34,7 @@ GradNodePyLayer::operator()( kSlotSmallVectorSize>& grads, // NOLINT bool create_graph, bool is_new_grad) { + pybind11::gil_scoped_acquire gil; VLOG(3) << "Running Eager Backward Node: " << name(); paddle::small_vector, diff --git a/paddle/fluid/pybind/eager_functions.cc b/paddle/fluid/pybind/eager_functions.cc index f256787805a0f..fd7cd9710c724 100644 --- a/paddle/fluid/pybind/eager_functions.cc +++ b/paddle/fluid/pybind/eager_functions.cc @@ -119,9 +119,12 @@ static PyObject* eager_api_run_backward(PyObject* self, EAGER_TRY auto tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0); auto grad_tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1); - egr::Backward(tensors, - grad_tensors, - CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2)); + { + eager_gil_scoped_release guard; + egr::Backward(tensors, + grad_tensors, + CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2)); + } RETURN_PY_NONE EAGER_CATCH_AND_THROW_RETURN_NULL } @@ -138,15 +141,18 @@ static PyObject* eager_api_run_partial_grad(PyObject* self, auto only_inputs = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 5), 5); auto allow_unused = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 6), 6); auto no_grad_vars = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 7), 7); - - std::vector result = egr::Grad(tensors, - inputs, - grad_tensors, - retain_graph, - create_graph, - only_inputs, - allow_unused, - no_grad_vars); + std::vector result; + { + eager_gil_scoped_release guard; + result = egr::Grad(tensors, + inputs, + grad_tensors, + retain_graph, + create_graph, + only_inputs, + allow_unused, + no_grad_vars); + } VLOG(1) << " in eager_api_run_partial_grad, after runing egr::Grad"; return ToPyObject(result, true /* return_py_none_if_not_initialize */); EAGER_CATCH_AND_THROW_RETURN_NULL @@ -179,18 +185,21 @@ static PyObject* eager_api_read_next_tensor_list(PyObject* self, auto tensor_base_list = CastPyArg2VectorOfTensorBase(PyTuple_GET_ITEM(args, 0), 0); std::vector tensor_list; - tensor_list.reserve(tensor_base_list.size()); - auto func = [](framework::Tensor& tensor_base) { - paddle::experimental::Tensor tensor( - egr::Controller::Instance().GenerateUniqueName()); - auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor); - autograd_meta->SetPersistable(false); - autograd_meta->SetStopGradient(true); - tensor.set_impl(std::make_shared(tensor_base)); - return tensor; - }; - for (auto& tensor_base : tensor_base_list) { - tensor_list.emplace_back(func(tensor_base)); + { + eager_gil_scoped_release guard; + tensor_list.reserve(tensor_base_list.size()); + auto func = [](framework::Tensor& tensor_base) { + paddle::experimental::Tensor tensor( + egr::Controller::Instance().GenerateUniqueName()); + auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor); + autograd_meta->SetPersistable(false); + autograd_meta->SetStopGradient(true); + tensor.set_impl(std::make_shared(tensor_base)); + return tensor; + }; + for (auto& tensor_base : tensor_base_list) { + tensor_list.emplace_back(func(tensor_base)); + } } return ToPyObject(tensor_list); EAGER_CATCH_AND_THROW_RETURN_NULL diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index 25dcd91bed0d1..a97e2a8d2d0a7 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -250,5 +250,17 @@ std::vector GetScopePtrListFromArgs( ssize_t arg_idx, bool dispensable); +class eager_gil_scoped_release { + public: + eager_gil_scoped_release() { tstate = PyEval_SaveThread(); } + ~eager_gil_scoped_release() { + if (!tstate) return; + PyEval_RestoreThread(tstate); + } + + private: + PyThreadState* tstate{nullptr}; +}; + } // namespace pybind } // namespace paddle