Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor dygraph to eager -- TensorWrapper, EagerUtils, GlobalUtils #37466

Merged
merged 21 commits into from
Nov 24, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions paddle/fluid/eager/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
add_subdirectory(tests)
add_subdirectory(api)
cc_library(grad_node_info SRCS grad_node_info.cc DEPS pten pten_api)
cc_library(autograd_meta SRCS autograd_meta.cc DEPS pten pten_api)
cc_library(utils SRCS utils.cc DEPS pten pten_api autograd_meta eager_api)
3 changes: 3 additions & 0 deletions paddle/fluid/eager/api/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
add_subdirectory(utils)

cc_library(eager_api SRCS all.cc DEPS global_utils)
18 changes: 18 additions & 0 deletions paddle/fluid/eager/api/all.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

不重要,这几个文件好像都多了一行空的注释,建议后续移除

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

好的 感谢


#include "paddle/fluid/eager/api/all.h"

namespace egr {} // namespace egr
17 changes: 17 additions & 0 deletions paddle/fluid/eager/api/all.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#pragma once

#include "paddle/fluid/eager/api/utils/global_utils.h"
1 change: 1 addition & 0 deletions paddle/fluid/eager/api/utils/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
cc_library(global_utils SRCS global_utils.cc DEPS enforce)
22 changes: 22 additions & 0 deletions paddle/fluid/eager/api/utils/global_utils.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//

#include "paddle/fluid/eager/api/utils/global_utils.h"

namespace egr {

Controller* Controller::controller_ = new Controller();

} // namespace egr
62 changes: 62 additions & 0 deletions paddle/fluid/eager/api/utils/global_utils.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//

#pragma once

#include "paddle/fluid/eager/eager_tensor.h"
#include "paddle/fluid/platform/enforce.h"

namespace egr {

class UniqueNameGenerator {
public:
explicit UniqueNameGenerator(std::string prefix = "") : prefix_(prefix) {}
std::string Generate(std::string key = "eager_tmp") {
return prefix_ + key + "_" + std::to_string(id_++);
}

private:
std::atomic<int> id_{0};
std::string prefix_;
};

// Global
class Controller {
public:
static Controller& Instance() { return *controller_; }
const paddle::platform::Place& GetExpectedPlace() const {
return *expected_place_.get();
}
void SetExpectedPlace(const paddle::platform::Place& place) {
expected_place_ = std::make_shared<paddle::platform::Place>(place);
}
void SetAMPLevel(int level) { amp_level_ = level; }
int GetAMPLevel() const { return amp_level_; }
bool HasGrad() const { return has_grad_; }
std::string GenerateUniqueName(std::string key = "eager_tmp") {
return generator_->Generate(key);
}

private:
Controller() = default;
static Controller* controller_;
std::shared_ptr<paddle::platform::Place> expected_place_ = nullptr;
int amp_level_ = 0;
bool has_grad_ = true;
std::unique_ptr<UniqueNameGenerator> generator_{new UniqueNameGenerator()};
DISABLE_COPY_AND_ASSIGN(Controller);
};

} // namespace egr
1 change: 0 additions & 1 deletion paddle/fluid/eager/eager_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@

#pragma once
// framework deps
#include "paddle/fluid/framework/data_layout_transform.h"
#include "paddle/fluid/framework/pten_utils.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/variable.h"
Expand Down
91 changes: 91 additions & 0 deletions paddle/fluid/eager/tensor_wrapper.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

/**
* We now still need TensorWrapper and it is designed to Copy
* tensor in autograd mode.
*
* Since in autograd usage, we need to pass autograd_meta to
* backward computation however in tensor interface add to much
* autograd_related method is not a good choice.
*
* In TensorWrapper we will keep autograd info to backward, only
* for input var, but for output var it will only copy autograd
* with no grad **/

#pragma once
#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/grad_node_info.h"
#include "paddle/fluid/eager/utils.h"

namespace egr {
class TensorWrapper {
public:
TensorWrapper() = default;
explicit TensorWrapper(const egr::EagerTensor& tensor,
bool full_reserved = false) {
/**
* Normally, we should fully reserved all non-output or non-leaf fwd tensor
* here. And for fwd output tensor, we should not reserve its autogradmeta,
* to avoid recursive depends on GradNodeBase
* **/
full_reserved_ = full_reserved;
if (full_reserved_) {
VLOG(6) << "Fully reserved tensor: " << tensor.name();
intermidiate_tensor_ = tensor;
return;
}

// shallow copy tensor_impl here
intermidiate_tensor_.set_impl(tensor.impl());
intermidiate_tensor_.ResetVar(tensor.Var());
intermidiate_tensor_.set_name(tensor.name() + "@Saved");
PADDLE_ENFORCE_NOT_NULL(
EagerUtils::unsafe_autograd_meta(tensor),
paddle::platform::errors::Fatal(
"Full reserved Tensor should not have null autograd meta, since "
"tensor_wrapper is used to build backward info. There is no way "
"for us to build it with null autograd_meta."));
// copy output_rank
out_rank_info_ = EagerUtils::OutRankInfo(tensor);
}

egr::EagerTensor recover(const std::shared_ptr<GradNodeBase>& grad_node) {
VLOG(6) << "Recover tensor for wrapper";
if ((!intermidiate_tensor_.defined()) &&
(!intermidiate_tensor_.Var().IsInitialized())) {
VLOG(6) << "Return NULL tensor Here. ";
return egr::EagerTensor();
}

// if it's full_reserved just return the full copy of tensor
if (full_reserved_) {
return intermidiate_tensor_;
} else {
std::shared_ptr<GradNodeBase> new_grad_node = grad_node;
auto p_ab_autograd_meta =
std::make_shared<AutogradMeta>(Edge(new_grad_node, out_rank_info_));
intermidiate_tensor_.set_autograd_meta(
std::static_pointer_cast<paddle::experimental::AbstractAutogradMeta>(
p_ab_autograd_meta));
return intermidiate_tensor_;
}
}

private:
bool full_reserved_ = false;
std::pair<size_t, size_t> out_rank_info_;
egr::EagerTensor intermidiate_tensor_;
};
} // namespace egr
1 change: 1 addition & 0 deletions paddle/fluid/eager/tests/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
set(eager_deps pten pten_api)
add_subdirectory(data_structure_tests)
add_subdirectory(task_tests)
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
cc_test(test_egr_ds_eager_tensor SRCS eager_tensor_test.cc DEPS ${eager_deps} )
cc_test(test_egr_ds_auotgrad_meta SRCS autograd_meta_test.cc DEPS ${eager_deps} grad_node_info)
cc_test(test_egr_ds_grad_node_info SRCS grad_node_info_test.cc DEPS ${eager_deps} grad_node_info)
cc_test(test_egr_ds_tensor_wrapper SRCS tensor_wrapper_test.cc DEPS ${eager_deps} grad_node_info utils)
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@
#include "paddle/fluid/eager/eager_tensor.h"
#include "paddle/fluid/eager/grad_node_info.h"
#include "paddle/pten/api/lib/utils/allocator.h"
namespace egr {
class TensorWrapper;
}

namespace eager_test {
class GradTestNode : public egr::GradNodeBase {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "glog/logging.h"
#include "gtest/gtest.h"

#include "paddle/fluid/eager/tensor_wrapper.h"
#include "paddle/fluid/eager/tests/data_structure_tests/grad_node_test.h"
#include "paddle/fluid/eager/utils.h"

TEST(TensorWrapper, Basic) {
VLOG(6) << "Test Full reserved";
egr::EagerTensor et1;
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
meta);
auto* dt_ptr = dt->mutable_data<float>();
dt_ptr[0] = 5.0f;
dt_ptr[1] = 10.0f;
et1.set_impl(dt);
// Create grad node;
auto grad_test_node0 = std::make_shared<eager_test::GradTestNode>(
/* val */ 5.0, /* in_num */ 2, /* out_num */ 2);
egr::Edge edge0(grad_test_node0, 1, 2);
auto auto_grad0 = std::make_shared<egr::AutogradMeta>(edge0);
et1.set_autograd_meta(auto_grad0);
et1.set_name("et1");
auto tw0 = egr::TensorWrapper(et1, true);
auto recover_et1 = tw0.recover(std::make_shared<eager_test::GradTestNode>());
CHECK_EQ(recover_et1.name(), std::string("et1"));
CHECK_EQ(egr::EagerUtils::OutRankInfo(recover_et1).first,
egr::EagerUtils::OutRankInfo(et1).first);
CHECK_EQ(egr::EagerUtils::OutRankInfo(recover_et1).second,
egr::EagerUtils::OutRankInfo(et1).second);
VLOG(6) << "Test reconstruct";
egr::EagerTensor et2;
pten::DenseTensorMeta meta2 = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
std::shared_ptr<pten::DenseTensor> dt2 = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
meta2);
auto* dt_ptr2 = dt->mutable_data<float>();
dt_ptr2[0] = 6.0f;
dt_ptr2[1] = 11.0f;
et2.set_impl(dt2);
et2.set_name("et2");
auto grad_test_node1 =
std::make_shared<eager_test::GradTestNode>(/* val */ 5.0, 2, 2);
egr::Edge edge1(grad_test_node1, 1, 2);
auto auto_grad1 = std::make_shared<egr::AutogradMeta>(edge1);
et2.set_autograd_meta(auto_grad1);
auto tw1 = egr::TensorWrapper(et2, false);
auto recover_et2 = tw1.recover(grad_test_node1);
CHECK_EQ(recover_et2.name(), std::string("et2@Saved"));
CHECK_EQ(egr::EagerUtils::OutRankInfo(recover_et2).first,
egr::EagerUtils::OutRankInfo(et2).first);
CHECK_EQ(egr::EagerUtils::OutRankInfo(recover_et2).second,
egr::EagerUtils::OutRankInfo(et2).second);
// Test Raw recover
egr::EagerTensor et3;
auto tw2 = egr::TensorWrapper(et3, true);
CHECK(
tw2.recover(std::make_shared<eager_test::GradTestNode>()).initialized() ==
false);
}
1 change: 1 addition & 0 deletions paddle/fluid/eager/tests/task_tests/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
cc_test(test_egr_task_eager_utils SRCS eager_utils_test.cc DEPS ${eager_deps} grad_node_info autograd_meta utils)
Loading