Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[phi::DenseTensor] Replace Tensor with phi::DenseTensor #48682

Merged
merged 12 commits into from
Dec 7, 2022
  •  
  •  
  •  
35 changes: 18 additions & 17 deletions paddle/fluid/imperative/gradient_accumulator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ TType* GetEmptyInnerTensor(paddle::experimental::Tensor* dst) {
dst->defined(),
false,
platform::errors::Fatal(
"The underlying Tensor implementation should be nullptr"));
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

imperative目录中已经开始使用上层Tensor的概念了,此文件中的这些注释内容目前不用修改

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

imperative目录中已经开始使用上层Tensor的概念了,此文件中的这些注释内容目前不用修改

done. everything resumed

"The underlying phi::DenseTensor implementation should be nullptr"));
dst->set_impl(std::make_shared<TType>());
auto* dst_tensor = static_cast<TType*>(dst->impl().get());
return dst_tensor;
Expand Down Expand Up @@ -634,25 +634,26 @@ void GradientAccumulator::AccumulateGrad() {
}

void GradientAccumulator::CallGradientHooks() {
PADDLE_ENFORCE_EQ(var_->IsLeafGrad(),
true,
platform::errors::Unavailable(
"Only leaf gradient Tensor can deal with by gradient "
"hook in gradient accumulator."));
PADDLE_ENFORCE_EQ(
SumGradCompleted(),
var_->IsLeafGrad(),
true,
platform::errors::PreconditionNotMet(
"Only can call gradient hooks after sum gradient completed."));
platform::errors::Unavailable(
"Only leaf gradient phi::DenseTensor can deal with by gradient "
"hook in gradient accumulator."));
PADDLE_ENFORCE_EQ(
HasInnerVar(),
SumGradCompleted(),
true,
platform::errors::PreconditionNotMet(
"Leaf Tensor's inner var is nullptr when call gradient hook."));
"Only can call gradient hooks after sum gradient completed."));
PADDLE_ENFORCE_EQ(HasInnerVar(),
true,
platform::errors::PreconditionNotMet(
"Leaf phi::DenseTensor's inner var is nullptr when "
"call gradient hook."));
PADDLE_ENFORCE_EQ(
inner_var_->Var().IsInitialized(),
true,
platform::errors::PreconditionNotMet("Leaf Tensor's inner var "
platform::errors::PreconditionNotMet("Leaf phi::DenseTensor's inner var "
"is not initialized when "
"call gradient hook."));
if (var_->HasVariableWrapperHook()) {
Expand All @@ -671,11 +672,11 @@ void GradientAccumulator::CallGradientHooks() {
}

void GradientAccumulator::CallReduceHooks() {
PADDLE_ENFORCE_EQ(
var_->IsLeafGrad(),
true,
platform::errors::Unavailable("Only leaf gradient Tensor can deal with "
"by reduce hook in gradient accumulator."));
PADDLE_ENFORCE_EQ(var_->IsLeafGrad(),
true,
platform::errors::Unavailable(
"Only leaf gradient phi::DenseTensor can deal with "
"by reduce hook in gradient accumulator."));
PADDLE_ENFORCE_EQ(SumGradCompleted(),
true,
platform::errors::PreconditionNotMet(
Expand Down
4 changes: 1 addition & 3 deletions paddle/fluid/operators/abs_op_mlu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,6 @@ limitations under the Licnse. */
namespace paddle {
namespace operators {

using Tensor = phi::DenseTensor;

template <typename T>
class AbsMLUKernel : public framework::OpKernel<T> {
public:
Expand Down Expand Up @@ -54,7 +52,7 @@ class AbsGradMLUKernel : public framework::OpKernel<T> {
MLUCnnlOpTensorDesc mul_op_desc(
CNNL_OP_TENSOR_MUL, ToCnnlDataType<T>(), CNNL_NOT_PROPAGATE_NAN);

Tensor sign_x;
phi::DenseTensor sign_x;
sign_x.mutable_data<T>(x->dims(), ctx.GetPlace());

MLUCnnl::Sign(ctx,
Expand Down
2 changes: 0 additions & 2 deletions paddle/fluid/operators/abs_op_npu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,6 @@ limitations under the Licnse. */
namespace paddle {
namespace operators {

using Tensor = phi::DenseTensor;

template <typename DeviceContext, typename T>
class AbsNPUKernel : public framework::OpKernel<T> {
public:
Expand Down
4 changes: 1 addition & 3 deletions paddle/fluid/operators/activation_op_mlu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,6 @@ limitations under the Licnse. */
namespace paddle {
namespace operators {

using Tensor = phi::DenseTensor;

template <cnnlActivationMode_t act_mode, typename T>
class ActivationMLUKernel : public framework::OpKernel<T> {
public:
Expand Down Expand Up @@ -442,7 +440,7 @@ class ReciprocalGradMLUKernel : public framework::OpKernel<T> {
auto* dx = ctx.Output<phi::DenseTensor>(framework::GradVarName("X"));
auto place = ctx.GetPlace();
dx->mutable_data<T>(place);
Tensor square_out;
phi::DenseTensor square_out;
square_out.Resize(out->dims());
square_out.mutable_data<T>(place);
MLUCnnlTensorDesc out_desc(*out);
Expand Down
Loading