Skip to content

Commit

Permalink
[clang-tidy][task 5] enable modernize-make-shared and fix existing …
Browse files Browse the repository at this point in the history
…linter errors (PaddlePaddle#55807)
  • Loading branch information
SigureMo authored and BeingGod committed Sep 9, 2023
1 parent e7b6b58 commit 6aae623
Show file tree
Hide file tree
Showing 17 changed files with 51 additions and 57 deletions.
2 changes: 1 addition & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ modernize-avoid-c-arrays,
-modernize-deprecated-headers,
-modernize-deprecated-ios-base-aliases,
modernize-loop-convert,
-modernize-make-shared,
modernize-make-shared,
modernize-make-unique,
-modernize-pass-by-value,
modernize-raw-string-literal,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ paddle::Tensor add_n_ad_func(const std::vector<paddle::Tensor>& x) {
egr::EagerUtils::PassStopGradient(false, out_autograd_meta);

// Node Construction
auto grad_node =
std::shared_ptr<AddNGradNodeFinal>(new AddNGradNodeFinal(1, 1));
auto grad_node = std::shared_ptr<AddNGradNodeFinal>( // NOLINT
new AddNGradNodeFinal(1, 1));

// Set forward's stack
if (FLAGS_check_nan_inf) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,8 +137,8 @@ paddle::Tensor conv2d_ad_func(const paddle::Tensor& input,
egr::EagerUtils::PassStopGradient(false, out_autograd_meta);

// Node Construction
auto grad_node =
std::shared_ptr<Conv2dGradNodeFinal>(new Conv2dGradNodeFinal(1, 2));
auto grad_node = std::shared_ptr<Conv2dGradNodeFinal>( // NOLINT
new Conv2dGradNodeFinal(1, 2));

// Set forward's stack
if (FLAGS_check_nan_inf) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,8 +132,8 @@ paddle::Tensor multiply_ad_func(const paddle::Tensor& x,
egr::EagerUtils::PassStopGradient(false, out_autograd_meta);

// Node Construction
auto grad_node =
std::shared_ptr<MultiplyGradNode>(new MultiplyGradNode(1, 2));
auto grad_node = std::shared_ptr<MultiplyGradNode>( // NOLINT
new MultiplyGradNode(1, 2));
// Set for forward trace
if (FLAGS_check_nan_inf) {
grad_node->SetForwardTrace(egr::Controller::Instance().GetPythonStack());
Expand Down Expand Up @@ -275,7 +275,8 @@ paddle::Tensor& multiply__ad_func(paddle::Tensor& x, // NOLINT
paddle::platform::TracerEventType::OperatorInner,
1);

grad_node = std::shared_ptr<MultiplyGradNode>(new MultiplyGradNode(1, 2));
grad_node = std::shared_ptr<MultiplyGradNode>( // NOLINT
new MultiplyGradNode(1, 2));
// Set for forward trace
if (FLAGS_check_nan_inf) {
grad_node->SetForwardTrace(egr::Controller::Instance().GetPythonStack());
Expand Down Expand Up @@ -462,8 +463,8 @@ paddle::Tensor multiply_ad_func(const paddle::Tensor& x,
egr::EagerUtils::PassStopGradient(false, out_autograd_meta);

// Node Construction
auto grad_node =
std::shared_ptr<MultiplyGradNode>(new MultiplyGradNode(1, 2));
auto grad_node = std::shared_ptr<MultiplyGradNode>( // NOLINT
new MultiplyGradNode(1, 2));
// Set for forward trace
if (FLAGS_check_nan_inf) {
grad_node->SetForwardTrace(egr::Controller::Instance().GetPythonStack());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -225,8 +225,8 @@ sync_batch_norm__ad_func(const paddle::Tensor& x,
reserve_space_autograd_meta);

// Node Construction
auto grad_node =
std::shared_ptr<SyncBatchNormGradNode>(new SyncBatchNormGradNode(6, 5));
auto grad_node = std::shared_ptr<SyncBatchNormGradNode>( // NOLINT
new SyncBatchNormGradNode(6, 5));

// Set forward's stack
if (FLAGS_check_nan_inf) {
Expand Down Expand Up @@ -567,8 +567,8 @@ sync_batch_norm__ad_func(const paddle::Tensor& x,
reserve_space_autograd_meta);

// Node Construction
auto grad_node =
std::shared_ptr<SyncBatchNormGradNode>(new SyncBatchNormGradNode(6, 5));
auto grad_node = std::shared_ptr<SyncBatchNormGradNode>( // NOLINT
new SyncBatchNormGradNode(6, 5));
egr::Controller::Instance().PushBackForceSequentialNodes(grad_node.get());
// SetAttributes if needed
grad_node->SetAttributemomentum(momentum);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ Conv2dGradNodeFinal::operator()(
1);

// Node Construction
auto grad_node = std::shared_ptr<Conv2dDoubleGradNodeFinal>(
auto grad_node = std::shared_ptr<Conv2dDoubleGradNodeFinal>( // NOLINT
new Conv2dDoubleGradNodeFinal(2, 3));
// SetAttributes if needed
grad_node->SetAttributestrides(strides);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ MultiplyGradNode::operator()(
1);

// Node Construction
auto grad_node = std::shared_ptr<MultiplyDoubleGradNode>(
auto grad_node = std::shared_ptr<MultiplyDoubleGradNode>( // NOLINT
new MultiplyDoubleGradNode(2, 3));
// SetAttributes if needed
grad_node->SetAttributeaxis(axis);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -390,8 +390,9 @@ fused_attention_dygraph_function(
p_autograd_CacheKVOut,
p_autograd_Y);
// Create GradOpNode
auto grad_node = std::shared_ptr<fused_attentionGradNodeCompat>(
new fused_attentionGradNodeCompat(20, 23));
auto grad_node =
std::shared_ptr<fused_attentionGradNodeCompat>( // NOLINT
new fused_attentionGradNodeCompat(20, 23));

bool pre_layer_norm = false;
if (attrs.count("pre_layer_norm")) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -184,9 +184,9 @@ fused_bias_dropout_residual_layer_norm_dygraph_function(
p_autograd_LnVariance,
p_autograd_Y);
// Create GradOpNode
auto grad_node =
std::shared_ptr<fused_bias_dropout_residual_layer_normGradNodeCompat>(
new fused_bias_dropout_residual_layer_normGradNodeCompat(5, 5));
auto grad_node = std::shared_ptr< // NOLINT
fused_bias_dropout_residual_layer_normGradNodeCompat>(
new fused_bias_dropout_residual_layer_normGradNodeCompat(5, 5));

// Set Attributes
grad_node->SetAttrMap(std::move(attrs));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -310,8 +310,9 @@ fused_feedforward_dygraph_function(
p_autograd_Dropout1Out,
p_autograd_Dropout2Out);
// Create GradOpNode
auto grad_node = std::shared_ptr<fused_feedforwardGradNodeCompat>(
new fused_feedforwardGradNodeCompat(11, 11));
auto grad_node =
std::shared_ptr<fused_feedforwardGradNodeCompat>( // NOLINT
new fused_feedforwardGradNodeCompat(11, 11));

bool pre_layer_norm = false;
if (attrs.count("pre_layer_norm")) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -301,8 +301,9 @@ fused_gate_attention_dygraph_function(
p_autograd_GateOut,
p_autograd_Out);
// Create GradOpNode
auto grad_node = std::shared_ptr<fused_gate_attentionGradNodeCompat>(
new fused_gate_attentionGradNodeCompat(9, 12));
auto grad_node =
std::shared_ptr<fused_gate_attentionGradNodeCompat>( // NOLINT
new fused_gate_attentionGradNodeCompat(9, 12));

bool merge_qkv = true;
if (attrs.count("merge_qkv")) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,8 +102,9 @@ paddle::Tensor fused_gemm_epilogue_dygraph_function(
VLOG(6) << " Construct Grad for fused_gemm_epilogue ";
egr::EagerUtils::PassStopGradient(false, p_autograd_Out);
// Create GradOpNode
auto grad_node = std::shared_ptr<fused_gemm_epilogueGradNodeCompat>(
new fused_gemm_epilogueGradNodeCompat(1, 3));
auto grad_node =
std::shared_ptr<fused_gemm_epilogueGradNodeCompat>( // NOLINT
new fused_gemm_epilogueGradNodeCompat(1, 3));

// Set Attributes
grad_node->SetAttrMap(std::move(attrs));
Expand Down
15 changes: 7 additions & 8 deletions paddle/fluid/eager/auto_code_generator/eager_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -827,9 +827,8 @@ static bool CollectGradInformationFromOpInfo(
const std::string& in_name = op_proto.inputs()[0].name();
ins[in_name] = {};
for (size_t i = 0; i < NUM_CREATED_DUP_INPUTS; i++) {
ins[in_name].emplace_back(std::shared_ptr<paddle::imperative::VarBase>(
new paddle::imperative::VarBase("auto_" + in_name + "_" +
std::to_string(i))));
ins[in_name].emplace_back(std::make_shared<paddle::imperative::VarBase>(
"auto_" + in_name + "_" + std::to_string(i)));
ins[in_name][i]->SetOverridedStopGradient(false);
ins[in_name][i]->MutableVar()->GetMutable<phi::DenseTensor>();
}
Expand All @@ -852,8 +851,8 @@ static bool CollectGradInformationFromOpInfo(
// but we only need to identify the slot name order,
// therefore fill in 1 single input VarBase is enough in this scenario

ins[in_name] = {std::shared_ptr<paddle::imperative::VarBase>(
new paddle::imperative::VarBase("auto_" + in_name))};
ins[in_name] = {
std::make_shared<paddle::imperative::VarBase>("auto_" + in_name)};
ins[in_name][0]->SetOverridedStopGradient(false);
ins[in_name][0]->MutableVar()->GetMutable<phi::DenseTensor>();
}
Expand All @@ -870,8 +869,8 @@ static bool CollectGradInformationFromOpInfo(
// We always create output VarBase regardless of its dispensability.
// We dont know the exact number of outputs during code generation,
// however, simply identifying the slot name order would be enough
outs[out_name] = {std::shared_ptr<paddle::imperative::VarBase>(
new paddle::imperative::VarBase("auto_" + out_name))};
outs[out_name] = {
std::make_shared<paddle::imperative::VarBase>("auto_" + out_name)};
outs[out_name][0]->SetOverridedStopGradient(false);
outs[out_name][0]->MutableVar()->GetMutable<phi::DenseTensor>();
}
Expand Down Expand Up @@ -1179,7 +1178,7 @@ static std::string GenerateGradNodeCreationContent(
const char* GRAD_OP_NODE_TEMPLATE =
" auto grad_node = std::shared_ptr<%sGradNodeCompat>(new "
"%sGradNodeCompat(%d, "
"%d));\n";
"%d)); // NOLINT\n";
grad_node_creation_str += " // Create GradOpNode\n";
grad_node_creation_str += paddle::string::Sprintf(GRAD_OP_NODE_TEMPLATE,
op_type,
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/eager/auto_code_generator/generator/eager_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -953,13 +953,13 @@ def GenerateNodeCreationCodes(self, for_backward=False, is_inplaced=False):

# Helper
indent = GetIndent(2)
# NOTE(Aurelius74): DO NOT use make_shared here. Because some Node contains experimental::Scalar
# NOTE(Aurelius84): DO NOT use make_shared here. Because some Node contains experimental::Scalar
# which contains "complex128" as data. "complex128" is memory-aligned manually. But make_shared
# request MEMALIGN for allocation (Maybe).
# See https://stackoverflow.com/questions/31228656/how-can-shared-ptr-disrupt-alignment
# and https://github.com/MRtrix3/mrtrix3/issues/957
node_construction_str = f"{indent}auto grad_node = std::shared_ptr<{grad_node_name}>(new {grad_node_name}({num_backward_inputs}, {num_backward_outputs}));"
node_assignment_str = f"{indent}grad_node = std::shared_ptr<{grad_node_name}>(new {grad_node_name}({num_backward_inputs}, {num_backward_outputs}));"
node_construction_str = f"{indent}auto grad_node = std::shared_ptr<{grad_node_name}>(new {grad_node_name}({num_backward_inputs}, {num_backward_outputs})); // NOLINT"
node_assignment_str = f"{indent}grad_node = std::shared_ptr<{grad_node_name}>(new {grad_node_name}({num_backward_inputs}, {num_backward_outputs})); // NOLINT"

# SetAttributes
set_attributes_list = []
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/pybind/eager_functions.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1187,8 +1187,8 @@ static PyObject* eager_api_to_uva_tensor(PyObject* self,
PyObject* kwargs) {
EAGER_TRY
VLOG(4) << "Running in eager_api_to_uva_tensor.";
auto new_tensor = std::shared_ptr<paddle::Tensor>(
new paddle::Tensor(egr::Controller::Instance().GenerateUniqueName()));
auto new_tensor = std::make_shared<paddle::Tensor>(
egr::Controller::Instance().GenerateUniqueName());
PyObject* obj = PyTuple_GET_ITEM(args, 0);
auto array = py::cast<py::array>(py::handle(obj));

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/pybind/imperative.cc
Original file line number Diff line number Diff line change
Expand Up @@ -976,8 +976,8 @@ void BindImperative(py::module *m_ptr) {
"to_uva_tensor",
[](const py::object &obj, int device_id) {
const auto &tracer = imperative::GetCurrentTracer();
auto new_tensor = std::shared_ptr<imperative::VarBase>(
new imperative::VarBase(tracer->GenerateUniqueName()));
auto new_tensor =
std::make_shared<imperative::VarBase>(tracer->GenerateUniqueName());
auto array = obj.cast<py::array>();
if (py::isinstance<py::array_t<int32_t>>(array)) {
SetUVATensorFromPyArray<int32_t>(new_tensor, array, device_id);
Expand Down
20 changes: 5 additions & 15 deletions test/cpp/eager/performance_tests/benchmark_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -241,9 +241,7 @@ void benchmark_fluid_scale(const std::shared_ptr<imperative::VarBase>& X,
for (size_t i = 0; i < max_num_runs; i++) {
imperative::NameVarBaseMap ins = {{"X", {tmp_out}}};
imperative::NameVarBaseMap outs = {
{"Out",
{std::shared_ptr<imperative::VarBase>(
new imperative::VarBase(true, "Out"))}}};
{"Out", {std::make_shared<imperative::VarBase>(true, "Out")}}};

tracer.TraceOp<VarBase>("scale", ins, outs, attrs, place, true);

Expand Down Expand Up @@ -277,9 +275,7 @@ void benchmark_fluid_matmul(const std::shared_ptr<imperative::VarBase>& X,
framework::AttributeMap attrs;
imperative::NameVarBaseMap ins = {{"X", {tmp_out}}, {"Y", {Y}}};
imperative::NameVarBaseMap outs = {
{"Out",
{std::shared_ptr<imperative::VarBase>(
new imperative::VarBase(true, "Out"))}}};
{"Out", {std::make_shared<imperative::VarBase>(true, "Out")}}};

tracer.TraceOp<VarBase>("matmul_v2", ins, outs, attrs, place, true);

Expand Down Expand Up @@ -316,27 +312,21 @@ void benchmark_fluid_mlp(
for (size_t i = 0; i < MLP_NUM_LINEAR; i++) {
// Matmul0
ins = {{"X", {input0}}, {"Y", {Ws[0]}}};
outs = {{"Out",
{std::shared_ptr<imperative::VarBase>(
new imperative::VarBase(true, "Out"))}}};
outs = {{"Out", {std::make_shared<imperative::VarBase>(true, "Out")}}};

tracer.TraceOp<VarBase>("matmul_v2", ins, outs, attrs, place, true);

// EW-Add0
ins = {{"X", outs["Out"]}, {"Y", {Bs[i]}}};
outs = {{"Out",
{std::shared_ptr<imperative::VarBase>(
new imperative::VarBase(true, "Out"))}}};
outs = {{"Out", {std::make_shared<imperative::VarBase>(true, "Out")}}};

tracer.TraceOp<VarBase>("elementwise_add", ins, outs, attrs, place, true);
input0 = outs["Out"][0];
}

// ReduceSum
ins = {{"X", {input0}}};
outs = {{"Out",
{std::shared_ptr<imperative::VarBase>(
new imperative::VarBase(true, "Out"))}}};
outs = {{"Out", {std::make_shared<imperative::VarBase>(true, "Out")}}};
attrs = {{"reduce_all", true}};

tracer.TraceOp<VarBase>("reduce_sum", ins, outs, attrs, place, true);
Expand Down

0 comments on commit 6aae623

Please sign in to comment.