Skip to content

Commit

Permalink
[clang-tidy] NO.20 clang-analyzer-core.CallAndMessage (#56954)
Browse files Browse the repository at this point in the history
  • Loading branch information
enkilee authored Sep 7, 2023
1 parent 25f78de commit 0e9cc55
Show file tree
Hide file tree
Showing 21 changed files with 70 additions and 61 deletions.
2 changes: 1 addition & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ bugprone-use-after-move,
-clang-analyzer-apiModeling.google.GTest,
-clang-analyzer-apiModeling.llvm.CastValue,
-clang-analyzer-apiModeling.llvm.ReturnValue,
-clang-analyzer-core.CallAndMessage,
clang-analyzer-core.CallAndMessage,
-clang-analyzer-core.DivideZero,
-clang-analyzer-core.DynamicTypePropagation,
clang-analyzer-core.NonNullParamChecker,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/eager/grad_node_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ void GradNodeBase::SetGradInMeta(const paddle::Tensor& fwd_out,
} else if (phi::distributed::DistTensor::classof(fwd_out.impl().get())) {
// TODO(chenweihang): DistTensor contains global and local meta, here
// only set the local meta now, we should set global meta later
dense_tensor =
dense_tensor = // NOLINT
&(static_cast<phi::distributed::DistTensor*>(fwd_out.impl().get())
->value());
} else {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/delete_cast_op_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ int DeleteCastOpPass::ApplyCastWriteReadPass(ir::Graph* graph) const {

std::string cast_out_name = write_to_array_0_x_name + "_fp16";
VarDesc cast_out_desc(cast_out_name);
cast_out_desc.SetShape(write_to_array_0_x->Var()->GetShape());
cast_out_desc.SetShape(write_to_array_0_x->Var()->GetShape()); // NOLINT
cast_out_desc.SetDataType(proto::VarType::Type::VarType_Type_FP16);
auto* cast_out = graph0->CreateVarNode(&cast_out_desc);

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ static int BuildFusion(Graph* graph,

if (with_fc_bias) {
// Add FC-bias with LSTM-bias (into GEMM result to be)
auto* fc_bias_var = scope->FindVar(fc_bias->Name());
auto* fc_bias_var = scope->FindVar(fc_bias->Name()); // NOLINT
const auto& fc_bias_tensor = fc_bias_var->Get<phi::DenseTensor>();
for (int i = 0; i < fc_bias_tensor.numel(); i++) {
combined_biases[i] += fc_bias_tensor.data<float>()[i];
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/framework/ir/fuse_adamw_op_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,8 @@ void InsertOpToGraph(const std::vector<std::vector<Node *>> &inout_node_vectors,
i++;
}

fuse_adamw_op_desc.SetInput("LearningRate", {config.first_lr->Name()});
fuse_adamw_op_desc.SetInput("LearningRate",
{config.first_lr->Name()}); // NOLINT
if (config.use_skip_update) {
fuse_adamw_op_desc.SetInput("SkipUpdate",
{config.first_skip_update->Name()});
Expand Down
10 changes: 6 additions & 4 deletions paddle/fluid/framework/ir/graph_pattern_detector.cc
Original file line number Diff line number Diff line change
Expand Up @@ -72,10 +72,12 @@ PDNode *PDPattern::RetrieveNode(const std::string &id) const {
}

void PDPattern::AddEdge(PDNode *a, PDNode *b) {
PADDLE_ENFORCE_NOT_NULL(
a, platform::errors::NotFound("PDNode %s is not found.", a->name()));
PADDLE_ENFORCE_NOT_NULL(
b, platform::errors::NotFound("PDNode %s is not found.", b->name()));
PADDLE_ENFORCE_NOT_NULL(a,
platform::errors::NotFound("PDNode %s is not found.",
a->name())); // NOLINT
PADDLE_ENFORCE_NOT_NULL(b,
platform::errors::NotFound("PDNode %s is not found.",
b->name())); // NOLINT
PADDLE_ENFORCE_NE(a,
b,
platform::errors::PermissionDenied(
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/transfer_layout_elim_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ void TransferLayoutElimPass::PutTranferlayoutAfterOp(
std::unordered_set<const Node *> remove_nodes;
// Ensure op_node has only one output!
int op_node_useful_output = 0;
Node *var2;
Node *var2 = nullptr;
for (auto ele : op_node->outputs) {
if (!ele->outputs.empty()) {
op_node_useful_output++;
Expand Down
5 changes: 3 additions & 2 deletions paddle/fluid/framework/new_executor/program_interpreter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -909,7 +909,8 @@ void ProgramInterpreter::RunOperator(const Instruction& instr_node) {
}
}

VLOG(4) << "End run " << place << " " << op->DebugStringEx(local_scope);
VLOG(4) << "End run " << place << " "
<< op->DebugStringEx(local_scope); // NOLINT

if (!instr_node.InplaceBackMap().empty()) {
platform::RecordEvent inplaceback_event(
Expand All @@ -933,7 +934,7 @@ void ProgramInterpreter::RunOperator(const Instruction& instr_node) {
instr_node.DeviceContext().Wait();
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError());
VLOG(4) << "Operator(" << op->Type()
VLOG(4) << "Operator(" << op->Type() // NOLINT
<< "): context wait and get last error";
#endif
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/op_desc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1200,7 +1200,7 @@ VarDesc *OpDesc::FindVarRecursive(const std::string &name) {
PADDLE_THROW(platform::errors::NotFound(
"Not found Var(%s) from Block(%d) back into global Block.",
name,
block_->ID()));
block_->ID())); // NOLINT
}

CompileTimeInferShapeContext::CompileTimeInferShapeContext(
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/imperative/layer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,7 @@ void VarBase::CopyFrom(const VarBase& src, const bool blocking) {
src.Name()));
place = Place();
} else {
dst_tensor->set_lod(src_tensor.lod());
dst_tensor->set_lod(src_tensor.lod()); // NOLINT
dst_tensor->Resize(src_tensor.dims());
}
framework::TensorCopy(src_tensor, place, dst_tensor);
Expand Down
15 changes: 8 additions & 7 deletions paddle/fluid/operators/gru_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -513,13 +513,14 @@ class GRUCPUKernel : public framework::OpKernel<T> {
gru_value.gate_value = gate_t.data<T>();
gru_value.reset_output_value = reset_hidden_prev_t.data<T>();

phi::funcs::GRUUnitFunctor<DeviceContext, T>::compute(dev_ctx,
gru_value,
frame_size,
cur_batch_size,
active_node,
active_gate,
origin_mode);
phi::funcs::GRUUnitFunctor<DeviceContext, T>::compute(
dev_ctx, // NOLINT
gru_value,
frame_size,
cur_batch_size,
active_node,
active_gate,
origin_mode);

gru_value.prev_out_value = gru_value.output_value;
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/gru_op.cu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ class GRUKernel : public framework::OpKernel<T> {
gru_value.output_value = hidden_t.data<T>();
gru_value.gate_value = gate_t.data<T>();
gru_value.reset_output_value = reset_hidden_prev_t.data<T>();
phi::funcs::GRUUnitFunctor<DeviceContext, T>::compute(dev_ctx,
phi::funcs::GRUUnitFunctor<DeviceContext, T>::compute(dev_ctx, // NOLINT
gru_value,
frame_size,
cur_batch_size,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -600,7 +600,7 @@ class MatMulGradMKLDNNKernel : public paddle::framework::OpKernel<T> {
phi::DenseTensor *out) const {
// gradient is calculated in a different way when broadcasting is used
bool need_combine = (x->dims().size() == 3 || y->dims().size() == 3) &&
out->dims().size() == 2;
out->dims().size() == 2; // NOLINT

phi::DenseTensor x_combined, y_combined;
if (need_combine) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ class SequenceSoftmaxGradCUDNNKernel : public framework::OpKernel<T> {
auto& lod = x->lod();
const size_t level = lod.size() - 1;

x_grad->mutable_data<T>(ctx.GetPlace());
x_grad->mutable_data<T>(ctx.GetPlace()); // NOLINT
for (int i = 0; i < static_cast<int>(lod[level].size()) - 1; ++i) {
int start_pos = static_cast<int>(lod[level][i]);
int end_pos = static_cast<int>(lod[level][i + 1]);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/rnn_functor.h
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ void RnnFunc(const Context& dev_ctx,
num_layers,
init_h_dims[0]));
if (is_lstm(cell_type)) {
const auto& init_c_dims = init_c->dims();
const auto& init_c_dims = init_c->dims(); // NOLINT
PADDLE_ENFORCE_EQ(init_c_dims[0],
num_layers * direction_num,
phi::errors::InvalidArgument(
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/kernels/cpu/rnn_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ template <typename T>
void BackupTensor(const CPUContext& dev_ctx,
DenseTensor* dst,
DenseTensor* src) {
dst->Resize(src->dims());
dst->Resize(src->dims()); // NOLINT
dev_ctx.Alloc<T>(dst);
Copy(dev_ctx, *src, dev_ctx.GetPlace(), false, dst);
}
Expand Down Expand Up @@ -250,7 +250,7 @@ struct GRUGradCell : GradCell<T> {
gru_value.gate_weight = weight_hh->data<T>();

gru_grad.gate_grad = grad_gate->data<T>();
gru_grad.reset_output_grad = grad_state->data<T>();
gru_grad.reset_output_grad = grad_state->data<T>(); // NOLINT
gru_grad.prev_out_grad = grad_pre_hidden->data<T>();
gru_grad.output_grad = grad_hidden->data<T>();
gru_grad.gate_weight_grad = grad_weight_hh->data<T>();
Expand Down Expand Up @@ -314,9 +314,9 @@ struct LSTMGradCell : GradCell<T> {
lstm_value.gate_value = gate_tensor->data<T>();
lstm_value.state_value = state_tensor->data<T>();
lstm_value.state_active_value = act_state_tensor->data<T>();
lstm_value.prev_state_value = pre_state->data<T>();
lstm_value.prev_state_value = pre_state->data<T>(); // NOLINT

lstm_grad.state_grad = grad_state->data<T>();
lstm_grad.state_grad = grad_state->data<T>(); // NOLINT
lstm_grad.gate_grad = grad_gate->data<T>();
lstm_grad.output_grad = grad_hidden->data<T>();
lstm_grad.prev_state_grad = grad_pre_state->data<T>();
Expand Down
3 changes: 2 additions & 1 deletion paddle/phi/kernels/cpu/send_u_recv_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,8 @@ void GraphSendRecvCpuGradLoop(const int& index_size,
const IndexT& forward_src_idx = d_index[i];
const IndexT& forward_dst_idx = s_index[i];
auto input_slice = input.Slice(forward_src_idx, forward_src_idx + 1);
auto output_slice = output->Slice(forward_dst_idx, forward_dst_idx + 1);
auto output_slice =
output->Slice(forward_dst_idx, forward_dst_idx + 1); // NOLINT
auto eigen_input = phi::EigenVector<T>::Flatten(input_slice);
auto eigen_output = phi::EigenVector<T>::Flatten(output_slice);

Expand Down
55 changes: 29 additions & 26 deletions paddle/phi/kernels/funcs/activation_functor.h
Original file line number Diff line number Diff line change
Expand Up @@ -528,7 +528,7 @@ struct MishFunctor : public BaseActivationFunctor<T> {

template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const {
auto sp = (x > static_cast<T>(threshold))
auto sp = (x > static_cast<T>(threshold)) // NOLINT
.select(x, (static_cast<T>(1) + x.exp()).log());
out.device(d) = x * sp.tanh();
}
Expand All @@ -551,7 +551,7 @@ struct MishGradFunctor : public BaseActivationFunctor<T> {
typename dOut,
typename dX>
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto sp = (x > static_cast<T>(threshold))
auto sp = (x > static_cast<T>(threshold)) // NOLINT
.select(x, (static_cast<T>(1) + x.exp()).log());
auto gsp = static_cast<T>(1) - (-sp).exp();
auto tsp = sp.tanh();
Expand All @@ -571,8 +571,8 @@ struct STanhFunctor : public BaseActivationFunctor<T> {

template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const {
out.device(d) =
static_cast<T>(scale_b) * (static_cast<T>(scale_a) * x).tanh();
out.device(d) = static_cast<T>(scale_b) *
(static_cast<T>(scale_a) * x).tanh(); // NOLINT
}
};

Expand Down Expand Up @@ -738,7 +738,7 @@ struct SoftplusFunctor : public BaseActivationFunctor<T> {

template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const {
auto x_beta = static_cast<T>(beta) * x;
auto x_beta = static_cast<T>(beta) * x; // NOLINT
out.device(d) = (x_beta > static_cast<T>(threshold))
.select(x,
(static_cast<T>(1) + x_beta.exp()).log() /
Expand All @@ -764,7 +764,7 @@ struct SoftplusGradFunctor : public BaseActivationFunctor<T> {
typename dOut,
typename dX>
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto x_beta = static_cast<T>(beta) * x;
auto x_beta = static_cast<T>(beta) * x; // NOLINT
dx.device(d) =
(x_beta > static_cast<T>(threshold))
.select(dout, dout / (static_cast<T>(1) + (-x_beta).exp()));
Expand All @@ -790,7 +790,7 @@ struct SoftplusDoubleGradFunctor : public BaseActivationFunctor<T> {
auto* d = dev.eigen_device();
auto x = EigenVector<T>::Flatten(
GET_DATA_SAFELY(X, "Input", "X", "SoftplusDoubleGrad"));
auto x_beta = static_cast<T>(beta) * x;
auto x_beta = static_cast<T>(beta) * x; // NOLINT
auto ddx = EigenVector<T>::Flatten(
GET_DATA_SAFELY(ddX, "Input", "DDX", "SoftplusDoubleGrad"));

Expand Down Expand Up @@ -1453,8 +1453,8 @@ struct HardTanhFunctor : public BaseActivationFunctor<T> {

template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const {
out.device(d) =
x.cwiseMax(static_cast<T>(t_min)).cwiseMin(static_cast<T>(t_max));
out.device(d) = x.cwiseMax(static_cast<T>(t_min))
.cwiseMin(static_cast<T>(t_max)); // NOLINT
}
};

Expand All @@ -1471,9 +1471,9 @@ struct HardTanhGradFunctor : public BaseActivationFunctor<T> {
typename dOut,
typename dX>
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
dx.device(d) =
dout * ((x > static_cast<T>(t_min)) * (x < static_cast<T>(t_max)))
.template cast<T>();
dx.device(d) = dout * ((x > static_cast<T>(t_min)) *
(x < static_cast<T>(t_max))) // NOLINT
.template cast<T>();
}

static constexpr ActBwdOpFwdDeps FwdDeps() { return ActBwdOpFwdDeps::kDepX; }
Expand Down Expand Up @@ -1508,8 +1508,8 @@ struct LeakyReluGradFunctor : public BaseActivationFunctor<T> {
typename dOut,
typename dX>
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto temp1 =
static_cast<T>(alpha) * (x < static_cast<T>(0)).template cast<T>();
auto temp1 = static_cast<T>(alpha) *
(x < static_cast<T>(0)).template cast<T>(); // NOLINT
auto temp2 = (x >= static_cast<T>(0)).template cast<T>();
dx.device(d) = dout * (temp1 + temp2).template cast<T>();
}
Expand Down Expand Up @@ -1593,8 +1593,8 @@ struct Relu6Functor : public BaseActivationFunctor<T> {

template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const {
out.device(d) =
x.cwiseMax(static_cast<T>(0)).cwiseMin(static_cast<T>(threshold));
out.device(d) = x.cwiseMax(static_cast<T>(0))
.cwiseMin(static_cast<T>(threshold)); // NOLINT
}
};

Expand Down Expand Up @@ -1731,7 +1731,8 @@ struct ELUFunctor : public BaseActivationFunctor<T> {
void operator()(Device d, X x, Out out) const {
out.device(d) =
(x < static_cast<T>(0))
.select(static_cast<T>(alpha) * (x.exp() - static_cast<T>(1)), x);
.select(static_cast<T>(alpha) * (x.exp() - static_cast<T>(1)),
x); // NOLINT
}
};

Expand Down Expand Up @@ -2099,7 +2100,7 @@ struct HardSigmoidFunctor : public BaseActivationFunctor<T> {

template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const {
auto temp = x * static_cast<T>(slope) + static_cast<T>(offset);
auto temp = x * static_cast<T>(slope) + static_cast<T>(offset); // NOLINT
out.device(d) =
temp.cwiseMax(static_cast<T>(0)).cwiseMin(static_cast<T>(1));
}
Expand All @@ -2118,7 +2119,7 @@ struct HardSigmoidGradFunctor : public BaseActivationFunctor<T> {
typename dOut,
typename dX>
void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const {
dx.device(d) = dout *
dx.device(d) = dout * // NOLINT
((out > static_cast<T>(0)) * (out < static_cast<T>(1)))
.template cast<T>() *
static_cast<T>(slope);
Expand Down Expand Up @@ -2353,7 +2354,7 @@ struct HardSwishFunctor : public BaseActivationFunctor<T> {

template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const {
out.device(d) = (x + static_cast<T>(offset))
out.device(d) = (x + static_cast<T>(offset)) // NOLINT
.cwiseMax(static_cast<T>(0))
.cwiseMin(static_cast<T>(threshold)) *
x / static_cast<T>(scale);
Expand All @@ -2375,8 +2376,9 @@ struct HardSwishGradFunctor : public BaseActivationFunctor<T> {
typename dOut,
typename dX>
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto tmp = ((x + static_cast<T>(offset)) < static_cast<T>(threshold))
.template cast<T>();
auto tmp =
((x + static_cast<T>(offset)) < static_cast<T>(threshold)) // NOLINT
.template cast<T>();
dx.device(d) =
dout *
(((x + static_cast<T>(offset)) > static_cast<T>(0)).template cast<T>() *
Expand All @@ -2397,7 +2399,8 @@ struct SwishFunctor : public BaseActivationFunctor<T> {

template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const {
out.device(d) = x / (static_cast<T>(1) + (static_cast<T>(-beta) * x).exp());
out.device(d) =
x / (static_cast<T>(1) + (static_cast<T>(-beta) * x).exp()); // NOLINT
}
};

Expand Down Expand Up @@ -2431,7 +2434,7 @@ struct PowFunctor : public BaseActivationFunctor<T> {
}
template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const {
out.device(d) = x.pow(static_cast<T>(factor));
out.device(d) = x.pow(static_cast<T>(factor)); // NOLINT
}
};

Expand Down Expand Up @@ -2585,8 +2588,8 @@ struct CELUFunctor : public BaseActivationFunctor<T> {
void operator()(Device d, X x, Out out) const {
out.device(d) =
(x < static_cast<T>(0))
.select(static_cast<T>(alpha) *
((x / static_cast<T>(alpha)).exp() - static_cast<T>(1)),
.select(static_cast<T>(alpha) * ((x / static_cast<T>(alpha)).exp() -
static_cast<T>(1)), // NOLINT
x);
}
};
Expand Down
Loading

0 comments on commit 0e9cc55

Please sign in to comment.