Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions paddle/fluid/eager/accumulation/accumulation_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -161,12 +161,12 @@ GradNodeAccumulation::operator()(
VLOG(3) << "Running AD API Grad: GradNodeAccumulation";
PADDLE_ENFORCE(grads.size() == 1,
common::errors::Fatal(
"GradNodeAccumulation should take exactly 1 grad tensor"
"GradNodeAccumulation should take exactly 1 grad tensor. "
"However received: %d slot.",
grads.size()));
PADDLE_ENFORCE(grads[0].size() == 1,
common::errors::Fatal(
"GradNodeAccumulation should take exactly 1 grad tensor"
"GradNodeAccumulation should take exactly 1 grad tensor. "
"However received: %d in slot %d .",
grads[0].size(),
0));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -484,8 +484,8 @@ MultiplyDoubleGradNode::operator()(
if (need_skip) {
if (trace_backward) {
PADDLE_THROW(common::errors::Unavailable(
"The Op multiply_double_grad doesn't have any grad"
"op. If you don't intend calculating higher order"
"The Op multiply_double_grad doesn't have any grad "
"op. If you don't intend calculating higher order "
"derivatives, please set `create_graph`to False."));
}
}
Expand Down Expand Up @@ -669,8 +669,8 @@ MultiplyGradNode::operator()(
// Create Grad Node
if (trace_backward) {
PADDLE_THROW(common::errors::Unavailable(
"The Op multiply_grad doesn't have any grad"
"op. If you don't intend calculating higher order"
"The Op multiply_grad doesn't have any grad "
"op. If you don't intend calculating higher order "
"derivatives, please set `create_graph`to False."));
}
VLOG(4) << "Finish AD API GRAD: multiply_grad";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -201,8 +201,8 @@ SyncBatchNormGradNode::operator()(
// Create Grad Node
if (trace_backward) {
PADDLE_THROW(common::errors::Unavailable(
"The Op sync_batch_norm_grad doesn't have any grad"
"op. If you don't intend calculating higher order"
"The Op sync_batch_norm_grad doesn't have any grad "
"op. If you don't intend calculating higher order "
"derivatives, please set `create_graph`to False."));
}
VLOG(4) << "Finish AD API GRAD: sync_batch_norm_grad";
Expand Down Expand Up @@ -442,8 +442,8 @@ SyncBatchNormGradNode::operator()(
// Create Grad Node
if (trace_backward) {
PADDLE_THROW(common::errors::Unavailable(
"The Op sync_batch_norm_grad doesn't have any grad"
"op. If you don't intend calculating higher order"
"The Op sync_batch_norm_grad doesn't have any grad "
"op. If you don't intend calculating higher order "
"derivatives, please set `create_graph`to False."));
}
VLOG(4) << "Finish AD API GRAD: sync_batch_norm_grad";
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/eager/general_grad.h
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@ class GeneralGrad {

PADDLE_ENFORCE_NOT_NULL(
target_node,
common::errors::Fatal("There is no grad op for inputs:[%d] or it's"
common::errors::Fatal("There is no grad op for inputs:[%d] or it's "
"stop_gradient=True.",
i));

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/eager/to_static/run_program_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1070,7 +1070,7 @@ void LegacyRunProgramGradImpl(
if (!cache.Has(cache_key)) {
phi::RecordEvent record_event(
"create_new_interpretercore", phi::TracerEventType::UserDefined, 1);
VLOG(2) << "No interpretercore cache, so create a new interpretercore"
VLOG(2) << "No interpretercore cache, so create a new interpretercore "
"for program: "
<< program_id;
details::ShareTensorsIntoScope(out_grad, global_inner_scope);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/elementwiseadd_transpose_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ int ElementwiseAddTransposeFusePass::ApplyEleTransPattern(
<< "found that shape_attr[3](channel size) mod 8 !=0 for reshape op "
"in elementwise_add_transpose, "
"currently, the elementwiseadd transpose pass only support "
"channel size mod 8 == 0 for khwc8 trt format"
"channel size mod 8 == 0 for khwc8 trt format. "
"Therefore, the fusion will be stopped.";
return;
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/preln_skip_layernorm_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -113,9 +113,9 @@ void PrelnSkipLayerNormFusePass::ApplyImpl(ir::Graph *graph) const {
graph->Has(framework::ir::kMultiheadMatmulPass) && !pos_id.empty() &&
!mask_id.empty() && with_dynamic_shape)) {
VLOG(3) << "preln_skip_layernorm_fuse_pass need: use_trt, enable_int8, "
"with_interleaved"
"with_interleaved, "
"use_varseqlen, preln_embedding_eltwise_layernorm_fuse_pass, "
"trt_multihead_matmul_fuse_pass"
"trt_multihead_matmul_fuse_pass, "
"set pos_id, set mask_id, with_dynamic_shape. Stop this pass, "
"please "
"reconfig.";
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/transfer_layout_elim_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,8 @@ void TransferLayoutElimPass::PutTransferlayoutAfterOp(
PADDLE_ENFORCE_EQ(
var2_shape.size() >= 4L,
true,
common::errors::InvalidArgument("var2_shape.size is too small"
"expected no small than 4L"
common::errors::InvalidArgument("var2_shape.size is too small, "
"expected no small than 4L, "
"received %d",
var2_shape.size()));
auto new_var2_shape = var2_shape;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -632,7 +632,7 @@ void BuildOpFuncList(const phi::Place& place,
"conditional_block",
"conditional_block_grad",
"pylayer",
"pylayer_grad"
"pylayer_grad",
"recurrent_grad",
"while",
"while_grad"};
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/op_registry.cc
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ static VariableNameMap ConvertOpDescVarsToVarNameMap(

std::unique_ptr<OperatorBase> OpRegistry::CreateOp(
const proto::OpDesc& op_desc) {
VLOG(1) << "CreateOp directly from OpDesc is deprecated. It should only be"
VLOG(1) << "CreateOp directly from OpDesc is deprecated. It should only be "
"used in unit tests. Use CreateOp(const OpDesc& op_desc) "
"instead.";
VariableNameMap inputs = ConvertOpDescVarsToVarNameMap(op_desc.inputs());
Expand Down
14 changes: 7 additions & 7 deletions paddle/fluid/imperative/reducer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -652,13 +652,13 @@ void Reducer::PrepareForBackward(
// concat + allreduce + split is emitted in turn according to next_group_.
// 3, FinalizeBackward: after the end, synchronize each stream.
void Reducer::AddDistHook(size_t var_index) {
PADDLE_ENFORCE_LT(
var_index,
variable_locators_.size(),
common::errors::OutOfRange("Out of bounds variable index. it must be less"
"than %d, but it is %d",
variable_locators_.size(),
var_index));
PADDLE_ENFORCE_LT(var_index,
variable_locators_.size(),
common::errors::OutOfRange(
"Out of bounds variable index. it must be less "
"than %d, but it is %d",
variable_locators_.size(),
var_index));

// gradient synchronization is not required when grad_need_hooks_ is false.
if (!grad_need_hooks_) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/api/details/zero_copy_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,7 @@ void Tensor::CopyStringsFromCpu(const paddle_infer::Strings *data) {
0,
common::errors::PreconditionNotMet(
"You should call Tensor::Reshape(const "
"std::size_t &shape)function before copying"
"std::size_t &shape) function before copying "
"the string data from cpu."));
*tensor = *data;
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/op_teller.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1430,7 +1430,7 @@ struct SimpleOpTypeSetTeller : public Teller {
input_type != framework::proto::VarType::FP32 &&
input_type != framework::proto::VarType::FP64) {
VLOG(3) << "the fill_any_like only supports "
"int32/int64/float32/float64 by"
"int32/int64/float32/float64 by "
"trt8.4 below";
return false;
}
Expand Down
19 changes: 9 additions & 10 deletions paddle/fluid/operators/hierarchical_sigmoid_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -88,18 +88,18 @@ class HierarchicalSigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
"(phi::DenseTensor, required), The parameters of hierarchical "
"sigmoid operator, each of them is a 2-D tensor, the shape is"
"[K, D]. Which K is the num of non-leaf node in Path Tree");
AddInput("Label",
"(phi::DenseTensor, required), The labels of training data. It's a"
"tensor with shape [N, 1].");
AddInput(
"PathTable",
"(phi::DenseTensor, optional), The Path Table from root to current word"
"it should have shape like [N, L], L is the length of the Path")
"Label",
"(phi::DenseTensor, required), The labels of training data. It's a "
"tensor with shape [N, 1].");
AddInput("PathTable",
"(phi::DenseTensor, optional), The Path Table from root to "
"current word, it should have shape like [N, L], L is the length "
"of the Path")
.AsDispensable();
AddInput("PathCode",
"(phi::DenseTensor, optional), The Code on each Node of the Path "
"from root "
"to current word"
"from root to current word, "
"it should have shape like [N, L], L is the length of the Path")
.AsDispensable();
AddInput("Bias",
Expand All @@ -118,8 +118,7 @@ class HierarchicalSigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
.AsIntermediate();
AddOutput("W_Out",
"(phi::DenseTensor, optional) using input 'W' as Output to make "
"it mutable"
"When we are using prefetch")
"it mutable when we are using prefetch")
.AsIntermediate();
AddAttr<AttrType>("num_classes", "(int, optional), The number of classes")
.SetDefault(2);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/lookup_table_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<std::string>("entry_config",
"embedding sparse feature entry config, "
" probability entry / counting "
" this can only be used in distributed training"
" this can only be used in distributed training "
"entry")
.SetDefault("");

Expand Down