Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Cherry-pick] Optimize log (#45783) #46133

Merged
merged 1 commit into from
Sep 19, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 17 additions & 1 deletion paddle/fluid/eager/accumulation/accumulation_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

#include "glog/logging.h"
#include "paddle/fluid/eager/eager_tensor.h"
#include "paddle/fluid/eager/utils.h"
#include "paddle/fluid/imperative/gradient_accumulator.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/enforce.h"
Expand Down Expand Up @@ -89,7 +90,7 @@ GradNodeAccumulation::operator()(
kSlotSmallVectorSize>& grads, // NOLINT
bool create_graph,
bool is_new_grad) {
VLOG(3) << "Running Eager Backward Node: GradNodeAccumulation";
VLOG(3) << "Running AD API Grad: GradNodeAccumulation";
PADDLE_ENFORCE(grads.size() == 1,
paddle::platform::errors::Fatal(
"GradNodeAccumulation should take exactly 1 grad tensor"
Expand Down Expand Up @@ -122,7 +123,22 @@ GradNodeAccumulation::operator()(
if (ReduceHooksRegistered()) {
ApplyReduceHooks();
}
VLOG(3) << "Finish AD API Grad: GradNodeAccumulation";
if (VLOG_IS_ON(4)) {
const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s], Output: [%s] } ";

std::string input_str = "";
std::string output_str = "";
const char* TENSOR_OUT_GRAD_TEMPLATE = "(grads[0][0], [%s]), ";
std::string input_out_grad_str = paddle::string::Sprintf(
TENSOR_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grads[0][0]));
const char* TENSOR_X_GRAD_TEMPLATE = "(grad_out, [%s]), ";
std::string output_x_grad_str = paddle::string::Sprintf(
TENSOR_X_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_out));
output_str += output_x_grad_str;
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}
return {{grad_out}};
}

Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/eager/accumulation/accumulation_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ class GradNodeAccumulation : public GradNodeBase {
public:
// Constructor: configure fwd input tensors to grad node
explicit GradNodeAccumulation(AutogradMeta* meta) : GradNodeBase(1, 1) {
VLOG(6) << "Construct GradNodeAccumulation";
VLOG(5) << "Construct GradNodeAccumulation";
if (meta) {
weak_grad_ = meta->WeakGrad();
}
Expand All @@ -33,7 +33,7 @@ class GradNodeAccumulation : public GradNodeBase {
}

~GradNodeAccumulation() override {
VLOG(6) << "Destruct GradNodeAccumulation";
VLOG(5) << "Destruct GradNodeAccumulation";
}

// Functor: perform backward computations
Expand All @@ -44,7 +44,7 @@ class GradNodeAccumulation : public GradNodeBase {
bool create_graph = false,
bool is_new_grad = false) override;

void ClearTensorWrappers() override { VLOG(6) << "Do nothing here now"; }
void ClearTensorWrappers() override { VLOG(5) << "Do nothing here now"; }

std::string name() { return "GradNodeAccumulation"; }

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@

#include "paddle/phi/api/include/tensor.h"

paddle::experimental::Tensor add_n_dygraph_function(
paddle::experimental::Tensor add_n_ad_func(
const std::vector<paddle::experimental::Tensor>& x);

paddle::experimental::Tensor conv2d_dygraph_function(
paddle::experimental::Tensor conv2d_ad_func(
const paddle::experimental::Tensor& input,
const paddle::experimental::Tensor& filter,
std::vector<int> strides,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
#pragma GCC diagnostic ignored "-Wunused-variable"
DECLARE_bool(check_nan_inf);

paddle::experimental::Tensor add_n_dygraph_function(
paddle::experimental::Tensor add_n_ad_func(
const std::vector<paddle::experimental::Tensor>& x) {
// Dygraph Record Event
paddle::platform::RecordEvent dygraph_entrance_record_event(
Expand All @@ -46,7 +46,7 @@ paddle::experimental::Tensor add_n_dygraph_function(
paddle::imperative::AutoCastGuard guard(
egr::Controller::Instance().GetCurrentTracer(),
paddle::imperative::AmpLevel::O0);
return add_n_dygraph_function(NEW_x);
return add_n_ad_func(NEW_x);
}
}

Expand All @@ -56,7 +56,7 @@ paddle::experimental::Tensor add_n_dygraph_function(
std::vector<egr::AutogradMeta*>* x_autograd_meta = &x_autograd_meta_vec;
// Forward API Call
VLOG(3) << "Final State Running: "
<< "add_n_dygraph_function";
<< "add_n_ad_func";
auto api_result = paddle::experimental::add_n(x);
// Check NaN and Inf if needed
if (FLAGS_check_nan_inf) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
#pragma GCC diagnostic ignored "-Wunused-variable"
DECLARE_bool(check_nan_inf);

paddle::experimental::Tensor conv2d_dygraph_function(
paddle::experimental::Tensor conv2d_ad_func(
const paddle::experimental::Tensor& input,
const paddle::experimental::Tensor& filter,
std::vector<int> strides,
Expand Down Expand Up @@ -60,17 +60,17 @@ paddle::experimental::Tensor conv2d_dygraph_function(
paddle::imperative::AutoCastGuard guard(
egr::Controller::Instance().GetCurrentTracer(),
paddle::imperative::AmpLevel::O0);
return conv2d_dygraph_function(NEW_input,
NEW_filter,
strides,
paddings,
paddding_algorithm,
groups,
dilations,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search);
return conv2d_ad_func(NEW_input,
NEW_filter,
strides,
paddings,
paddding_algorithm,
groups,
dilations,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search);
}
}

Expand All @@ -89,17 +89,17 @@ paddle::experimental::Tensor conv2d_dygraph_function(
bool is_enable_tune =
paddle::imperative::LayoutAutoTune::Instance().UseLayoutAutoTune();
paddle::imperative::LayoutAutoTune::Instance().DisableLayoutAutoTune();
auto out = conv2d_dygraph_function(NEW_input,
filter,
strides,
paddings,
paddding_algorithm,
groups,
dilations,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search);
auto out = conv2d_ad_func(NEW_input,
filter,
strides,
paddings,
paddding_algorithm,
groups,
dilations,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search);
transformer->SetOutTensorLayout(&out);
if (is_enable_tune) {
paddle::imperative::LayoutAutoTune::Instance().EnableLayoutAutoTune();
Expand All @@ -115,7 +115,7 @@ paddle::experimental::Tensor conv2d_dygraph_function(
egr::EagerUtils::nullable_autograd_meta(filter);
// Forward API Call
VLOG(3) << "Final State Running: "
<< "conv2d_dygraph_function";
<< "conv2d_ad_func";
auto api_result = paddle::experimental::conv2d(input,
filter,
strides,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,7 @@ AddNGradNodeFinal::operator()(

// dygraph function
for (size_t i = 0; i < returns[0].size(); i++) {
returns[0][i] =
::scale_dygraph_function(out_grad, phi::Scalar(1.0), 0.0, true);
returns[0][i] = ::scale_ad_func(out_grad, phi::Scalar(1.0), 0.0, true);
}

// Check NaN and Inf id needed
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -531,7 +531,6 @@ fused_attention_dygraph_function(
egr::EagerUtils::SetHistory(p_autograd_Y, grad_node);
grad_node->SetGradInMeta(Y, 19);
egr::EagerUtils::CheckAndRetainGrad(Y);

auto QKVOut_accumulation_node =
std::make_shared<egr::GradNodeAccumulation>(p_autograd_QKVOut);
egr::EagerUtils::SetOutRankWithSlot(p_autograd_QKVOut, 0);
Expand Down
19 changes: 16 additions & 3 deletions paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,11 +161,24 @@ def str2Hump(text):
string = str2Hump(string)
if string.rfind("Grad") == (len(string) - 4):
string = string[:-4]
return f"{string}GradNodeFinal"
return f"{string}GradNode"


def GetDygraphForwardFunctionName(string):
return f"{string}_dygraph_function"
return f"{string}_ad_func"


def GetDygraphLogName(string):

def str2Hump(text):
arr = filter(None, text.split('_'))
res = ''
for i in arr:
res = res + i[0].upper() + i[1:]
return res

string = str2Hump(string)
return string


def GetIntermediateAPIFunctionName(string):
Expand Down Expand Up @@ -198,7 +211,7 @@ def GetInplacedFunctionName(function_name):


def GetForwardFunctionName(string):
return f"{string}_dygraph_function"
return f"{string}_ad_func"


def GetIndent(num):
Expand Down
Loading