Skip to content

Commit

Permalink
fix CI
Browse files Browse the repository at this point in the history
  • Loading branch information
zhwesky2010 committed Oct 10, 2022
1 parent f748afe commit 3b7fc76
Show file tree
Hide file tree
Showing 19 changed files with 152 additions and 157 deletions.
13 changes: 2 additions & 11 deletions paddle/fluid/framework/details/scale_loss_grad_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

#include <string>

#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/profiler/event_tracing.h"

namespace phi {
Expand Down Expand Up @@ -101,23 +102,13 @@ std::string ScaleLossGradOpHandle::LossGradName() const {
void ScaleLossGradOpHandle::RunImpl() {
platform::RecordEvent record_event(
Name(), platform::TracerEventType::UserDefined, 2);
// VLOG(0) << "LossGradName: " << LossGradName();
// VLOG(0) << "local_exec_scopes_[0]->Size(): " <<
// local_exec_scopes_[0]->Size(); for (size_t i=0; i<
// local_exec_scopes_[0]->Size(); ++i) {
// VLOG(0) << "local_exec_scopes_[0]->LocalVarNames()[i]: " <<
// local_exec_scopes_[0]->LocalVarNames()[i]; VLOG(0) <<
// "local_exec_scopes_[0]->LocalVars()[i]->GetMutable<LoDTensor>()->dims().size():
// " <<
// local_exec_scopes_[0]->LocalVars()[i]->GetMutable<LoDTensor>()->dims().size();
// }

RunOnVar(local_exec_scopes_[0]->FindVar(LossGradName()), true);
}

void ScaleLossGradOpHandle::RunOnVar(Variable *var, bool record_event) {
auto *tensor = var->GetMutable<LoDTensor>();
tensor->Resize(phi::make_ddim({}));
tensor->Resize(phi::make_ddim({1}));

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
ScaleLossGradFunctor func(
Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/framework/infershape_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,8 @@ DDim CompatMetaTensor::dims() const {
auto* var = PADDLE_GET_CONST(VarDesc*, var_);

return phi::make_ddim(var->GetShape());
// return var->GetShape().empty() ? phi::make_ddim({0UL}) :
// phi::make_ddim(var->GetShape());
}
}

Expand Down
3 changes: 0 additions & 3 deletions paddle/fluid/framework/lod_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -341,8 +341,6 @@ std::vector<LoDTensor> SplitLoDTensor(
void MergeLoDTensor(LoDTensor *target,
const std::vector<const LoDTensor *> &lod_tensors,
platform::Place dst_place) {
// VLOG(0) << "run MergeLoDTensor==========>\n";
// VLOG(0) << "lod_tensors.size()==========> " << lod_tensors.size() << "\n";
PADDLE_ENFORCE_EQ(lod_tensors.empty(),
false,
platform::errors::InvalidArgument(
Expand All @@ -362,7 +360,6 @@ void MergeLoDTensor(LoDTensor *target,

LoD new_lod = lod_tensors[0]->lod();
auto rank = lod_tensors[0]->dims().size();
// VLOG(0) << "rank: " << rank << "\n";

for (size_t i = 1; i < lod_tensors.size(); ++i) {
auto *t = lod_tensors[i];
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1687,7 +1687,6 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
1,
platform::EventRole::kInnerOp);
RuntimeInferShapeContext infer_shape_ctx(*this, *runtime_ctx);
// VLOG(0) << "op.name: " << this->Type() << " runtime infer shape";
this->Info().infer_shape_(&infer_shape_ctx);
record_event.End();
platform::RecordOpInfoSupplement(
Expand Down
31 changes: 11 additions & 20 deletions paddle/phi/core/ddim.cc
Original file line number Diff line number Diff line change
Expand Up @@ -171,34 +171,25 @@ DDim stride_numel(const DDim& ddim) {
return strides;
}

DDim DDim::reshape(const std::vector<int>& shape) const {
DDim DDim::reshape(std::vector<int>& shape) const {
const DDim& in_dims = *this;
std::vector<int> new_shape(shape);

for (uint64_t i = 0; i < new_shape.size(); ++i) {
if (new_shape[i] == 0) {
new_shape[i] = in_dims.at(i);
for (uint64_t i = 0; i < shape.size(); ++i) {
if (shape[i] == 0) {
shape[i] = in_dims.at(i);
}
}

// Dim marked as "-1" must be inferred
auto it = std::find(new_shape.begin(), new_shape.end(), -1);
if (it != new_shape.end()) {
int index = std::distance(new_shape.begin(), it);
int reshape_out_product = std::accumulate(
new_shape.begin(), new_shape.end(), -1, std::multiplies<int>());
new_shape[index] = product(in_dims) / reshape_out_product;
} else {
int reshape_out_product = std::accumulate(
new_shape.begin(), new_shape.end(), 1, std::multiplies<int>());
PADDLE_ENFORCE_EQ(
product(in_dims),
reshape_out_product,
phi::errors::InvalidArgument(
"The numel after reshape must be same with the original"));
auto it = std::find(shape.begin(), shape.end(), -1);
if (it != shape.end()) {
int index = std::distance(shape.begin(), it);
int reshape_out_product =
std::accumulate(shape.begin(), shape.end(), -1, std::multiplies<int>());
shape[index] = product(in_dims) / reshape_out_product;
}

return phi::make_ddim(new_shape);
return phi::make_ddim(shape);
}

DDim DDim::transpose(const std::vector<int>& axis) const {
Expand Down
3 changes: 1 addition & 2 deletions paddle/phi/core/ddim.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,6 @@ class DDim {
public:
constexpr static int kMaxRank = 9;

// Default is zero-sized Tensor [0]
DDim() : rank_(1) { dim_[0] = 0; }

DDim(const DDim& ddim) : dim_() { CopyFrom(ddim); }
Expand Down Expand Up @@ -156,7 +155,7 @@ class DDim {

std::string to_str() const;

DDim reshape(const std::vector<int>& shape) const;
DDim reshape(std::vector<int>& shape) const;

DDim transpose(const std::vector<int>& axis) const;

Expand Down
9 changes: 7 additions & 2 deletions paddle/phi/infermeta/multiary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -305,9 +305,14 @@ void AddNInferMeta(const std::vector<const MetaTensor*>& x,
if (x[i]->is_selected_rows() && x_dim.size() == 1) {
continue;
}
// for zero-sized tensor
if (phi::product(x_dim) == 0) {
continue;
}
// for 0D tensor
if (x_dim.size() == 0) {
continue;
}
if (phi::product(in_dim) == 0) {
in_dim = x_dim;
} else {
Expand Down Expand Up @@ -2538,8 +2543,8 @@ void WarpctcInferMeta(const MetaTensor& logits,
const MetaTensor& labels_length,
int blank,
bool norm_by_times,
MetaTensor* warpctcgrad,
MetaTensor* loss) {
MetaTensor* loss,
MetaTensor* warpctcgrad) {
auto logits_dims = logits.dims();
int sequence_width = 0;

Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/infermeta/multiary.h
Original file line number Diff line number Diff line change
Expand Up @@ -483,8 +483,8 @@ void WarpctcInferMeta(const MetaTensor& logits,
const MetaTensor& labels_length,
int blank,
bool norm_by_times,
MetaTensor* warpctcgrad,
MetaTensor* loss);
MetaTensor* loss,
MetaTensor* warpctcgrad);

void WhereInferMeta(const MetaTensor& condition,
const MetaTensor& x,
Expand Down
18 changes: 18 additions & 0 deletions paddle/phi/kernels/funcs/unsqueeze.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,24 @@ inline DDim GetOutputSqueezeShape(const std::vector<int> squeeze_dims,
}
} else {
for (size_t i = 0; i < num_squeeze_dims; ++i) {
if (in_dims.size() == 0) {
PADDLE_ENFORCE_GE(
squeeze_dims[i],
-1,
phi::errors::InvalidArgument(
"For 0D Tensor, Each axis in Attr(axes) should be in the range "
"of [-1, 0]"
"But current axis is:%d, input tensor's shape = [%s]."));
PADDLE_ENFORCE_LE(
squeeze_dims[i],
0,
phi::errors::InvalidArgument(
"For 0D Tensor, Each axis in Attr(axes) should be in the range "
"of [-1, 0]"
"But current axis is:%d, input tensor's shape = [%s]."));
continue;
}

int current = squeeze_dims[i] < 0 ? squeeze_dims[i] + in_dims.size()
: squeeze_dims[i];

Expand Down
3 changes: 1 addition & 2 deletions paddle/phi/kernels/onednn/reduce_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,7 @@ inline std::vector<int64_t> CalculateReducedDims(
bool keep_dim) {
if (keep_dim) return vectorize(output->dims());

if (reduce_all && reduce_dims.size() > 0)
return std::vector<int64_t>(input->dims().size(), 1);
if (reduce_all) return std::vector<int64_t>(input->dims().size(), 1);

std::vector<int64_t> output_dims(vectorize(input->dims()));
for (size_t i = 0; i < reduce_dims.size(); ++i) {
Expand Down
3 changes: 2 additions & 1 deletion paddle/phi/tests/core/test_ddim.cc
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ TEST(DDim, Equality) {
EXPECT_EQ(zero_ddim_vec.size(), size_t(0));

// reshape zero-DDim
phi::DDim reshape_ddim = zero_ddim.reshape({1});
std::vector<int> reshape_vec = {1};
phi::DDim reshape_ddim = zero_ddim.reshape(reshape_vec);
EXPECT_EQ(arity(reshape_ddim), 1);
EXPECT_EQ(reshape_ddim.size(), 1);
EXPECT_EQ(phi::product(reshape_ddim), 1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -688,8 +688,10 @@ def _insert_sendrecv_ops_in_while_block(
})
else:
var_shape = list(var.shape)
var_shape[0] = self.micro_batch_size if var_shape[
0] < 0 else var_shape[0]
print(var_name)
if len(var.shape) > 0:
var_shape[0] = self.micro_batch_size if var_shape[
0] < 0 else var_shape[0]
block._insert_op_without_sync(
index=index,
type='recv_v2',
Expand Down
3 changes: 1 addition & 2 deletions python/paddle/fluid/backward.py
Original file line number Diff line number Diff line change
Expand Up @@ -372,8 +372,7 @@ def _create_op_desc_(op_type, inputs, outputs, attrs):
def _create_loss_op_desc_(loss):
op_desc = _create_op_desc_(
"fill_constant", {}, {"Out": [_append_grad_suffix_(loss.name)]}, {
"shape":
loss.shape,
"shape": [1],
"value":
1.0,
"dtype":
Expand Down
47 changes: 28 additions & 19 deletions python/paddle/fluid/layers/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,30 @@
}


def _get_reduce_dim(dim, input):
"""
Internal function for reduce_sum, reduce_mean, reduce_max, reduce_min, reduce_prod.
It computes the attribute reduce_all value based on axis.
"""
if dim is not None and not isinstance(dim, list):
if isinstance(dim, (tuple, range)):
dim = list(dim)
elif isinstance(dim, int):
dim = [dim]
else:
raise TypeError(
"The type of dim must be int, list, tuple or range, but received {}"
.format(type(axis)))
if dim is None:
dim = []
if dim == [] or len(dim) == len(input.shape):
reduce_all = True
else:
reduce_all = False

return reduce_all, dim


@dygraph_only
def _elementwise_op_in_dygraph(x,
y,
Expand Down Expand Up @@ -4690,29 +4714,14 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None):
if dim is not None and not isinstance(dim, list):
dim = [dim]

reduce_all, dim = _get_reduce_dim(dim, input)

if in_dygraph_mode():
reduce_all = True if dim == None or dim == [] or len(dim) == len(
input.shape) else False
dim = dim if dim != None and dim != [] else [0]
if reduce_all:
return _C_ops.sum(input, [], None, keep_dim)
else:
return _C_ops.sum(input, dim, None, keep_dim)
return _C_ops.sum(input, dim, None, keep_dim)
elif _in_legacy_dygraph():
reduce_all = True if dim == None or dim == [] or len(dim) == len(
input.shape) else False
dim = dim if dim != None and dim != [] else [0]
return _legacy_C_ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim,
'reduce_all', reduce_all)
attrs = {
'dim':
dim if dim != None and dim != [] else [0],
'keep_dim':
keep_dim,
'reduce_all':
True
if dim == None or dim == [] or len(dim) == len(input.shape) else False
}
attrs = {'dim': dim, 'keep_dim': keep_dim, 'reduce_all': reduce_all}
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
'reduce_sum')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ def test_hybrid_parallel_inference_helper_mp1pp2(self):
value=0,
force_cpu=False,
name="cond_int")
print(cond_int.shape)
cond = layers.less_than(x=step_idx, y=max_len)
while_op = layers.While(cond, is_test=True)

Expand Down
Loading

0 comments on commit 3b7fc76

Please sign in to comment.