Skip to content

Commit

Permalink
[Zero-Dim] support input 0D Tensor for some unary api (PaddlePaddle#4…
Browse files Browse the repository at this point in the history
…5992)

* [Zero-Dim] support input 0D Tensor for unary api

* fix CI
  • Loading branch information
zhwesky2010 committed Oct 27, 2022
1 parent 0369cd0 commit 92a11c1
Show file tree
Hide file tree
Showing 28 changed files with 909 additions and 173 deletions.
42 changes: 29 additions & 13 deletions paddle/fluid/framework/details/fetch_async_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -164,24 +164,32 @@ void FetchAsyncOpHandle::FetchMergedLodTensor(
}
}

bool find_first_dims = false;
for (auto *t : src_lodtensors) {
if (t->numel() && t->IsInitialized()) {
if (!find_first_dims) {
new_dim = t->dims();
find_first_dims = true;
} else {
new_dim[0] += t->dims()[0];
}
}
}

// check src type,layout,dim,lod consistence
for (size_t i = 1; i < src_lodtensors.size(); ++i) {
CheckTensorAttrs(
src_lodtensors[i], new_type, new_layout, check_dim, new_lod, offset_);
}

auto rank = src_lodtensors[0]->dims().size();

// for 0D tensor, can't concat eath tensor. So stack 0D and concat 1+D tensor
if (rank == 0) {
int src_lodtensor_size = src_lodtensors.size();
new_dim = phi::make_ddim(std::vector<int>({src_lodtensor_size}));
} else {
bool find_first_dims = false;
for (auto *t : src_lodtensors) {
if (t->numel() && t->IsInitialized()) {
if (!find_first_dims) {
new_dim = t->dims();
find_first_dims = true;
} else {
new_dim[0] += t->dims()[0];
}
}
}
}

// set dst tensor
dst_lodtensor->Resize(new_dim);
dst_lodtensor->set_layout(src_lodtensors[0]->layout());
Expand All @@ -195,9 +203,17 @@ void FetchAsyncOpHandle::FetchMergedLodTensor(
}

// slice and memcpy
// for 0D tensor, can't concat eath tensor, stack them. for 1+D tensor, concat
// them
int begin = 0;
int end = 0;
for (auto *src : src_lodtensors) {
int end = begin + src->dims()[0];
if (rank == 0) {
end = begin + 1;
} else {
end = begin + src->dims()[0];
}

if (end == begin) {
continue;
}
Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/framework/details/scale_loss_grad_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

#include <string>

#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/profiler/event_tracing.h"

namespace phi {
Expand Down Expand Up @@ -101,6 +102,7 @@ std::string ScaleLossGradOpHandle::LossGradName() const {
void ScaleLossGradOpHandle::RunImpl() {
platform::RecordEvent record_event(
Name(), platform::TracerEventType::UserDefined, 2);

RunOnVar(local_exec_scopes_[0]->FindVar(LossGradName()), true);
}

Expand Down
5 changes: 3 additions & 2 deletions paddle/fluid/framework/infershape_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -192,8 +192,9 @@ DDim CompatMetaTensor::dims() const {
} else {
auto* var = PADDLE_GET_CONST(VarDesc*, var_);

return var->GetShape().empty() ? phi::make_ddim({0UL})
: phi::make_ddim(var->GetShape());
return phi::make_ddim(var->GetShape());
// return var->GetShape().empty() ? phi::make_ddim({0UL}) :
// phi::make_ddim(var->GetShape());
}
}

Expand Down
39 changes: 29 additions & 10 deletions paddle/fluid/framework/lod_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -262,6 +262,16 @@ std::vector<LoDTensor> SplitLoDTensor(
platform::errors::InvalidArgument(
"Place number cannot be empty when splitting."));
src.check_memory_size();
auto rank = src.dims().size();
// if rank is 0, just return #places.size() copys of src
if (rank == 0) {
LoDTensor dst;
framework::TensorCopy(src, src.place(), &dst);
std::vector<LoDTensor> ret;
ret.emplace_back(std::move(dst));
return ret;
}

size_t batch_size = src.lod().empty() ? static_cast<size_t>(src.dims()[0])
: src.lod()[0].size() - 1;

Expand Down Expand Up @@ -349,6 +359,7 @@ void MergeLoDTensor(LoDTensor *target,
}

LoD new_lod = lod_tensors[0]->lod();
auto rank = lod_tensors[0]->dims().size();

for (size_t i = 1; i < lod_tensors.size(); ++i) {
auto *t = lod_tensors[i];
Expand All @@ -369,16 +380,24 @@ void MergeLoDTensor(LoDTensor *target,
"actual layout is %s.",
DataLayoutToString(new_layout),
DataLayoutToString(t->layout())));
PADDLE_ENFORCE_EQ(
phi::product(new_dim) / new_dim[0],
phi::product(t->dims()) / t->dims()[0],
platform::errors::InvalidArgument(
"LoDTensor dimension does not match, all dimensions except the "
"first dimension need to be equal,"
"but expected dimension is %s, actual dimension is %s.",
new_dim,
t->dims()));
new_dim[0] += t->dims()[0];
auto tensor_dims = t->dims();
PADDLE_ENFORCE_EQ(tensor_dims.size(),
new_dim.size(),
platform::errors::InvalidArgument(
"dimensions of LoDTensor does not match"));
for (int j = 1; j < t->dims().size(); j++) {
PADDLE_ENFORCE_EQ(
tensor_dims[j],
new_dim[j],
platform::errors::InvalidArgument(
"LoDTensor.ddim[%d] should eaqual to %d, but is %d",
j,
new_dim[j],
tensor_dims[j]));
}
if (rank > 0) {
new_dim[0] += t->dims()[0];
}
}

auto &lod = t->lod();
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/op_desc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -362,7 +362,7 @@ class CompileTimeInferShapeContext : public InferShapeContext {
DDim res;
try {
auto shape = var->GetShape();
res = shape.empty() ? phi::make_ddim({0UL}) : phi::make_ddim(shape);
res = phi::make_ddim(shape);
} catch (...) {
VLOG(5) << "GetDim of variable " << name << " error";
std::rethrow_exception(std::current_exception());
Expand Down Expand Up @@ -1263,7 +1263,7 @@ std::vector<DDim> CompileTimeInferShapeContext::GetRepeatedDims(
try {
auto shapes = var->GetShapes();
for (const auto &s : shapes) {
res.push_back(s.empty() ? phi::make_ddim({0UL}) : phi::make_ddim(s));
res.push_back(phi::make_ddim(s));
}
} catch (...) {
VLOG(5) << "GetRepeatedDim of variable " << name << " error.";
Expand Down
11 changes: 8 additions & 3 deletions paddle/phi/core/utils/dim.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,10 +72,15 @@ HOSTDEVICE inline Dim<sizeof...(Args)> make_dim(Args... idxes) {
// Allows us to output a Dim
template <int D>
inline std::ostream& operator<<(std::ostream& os, const Dim<D>& d) {
os << d[0];
for (int i = 1; i < D; ++i) {
os << ", " << d[i];
if (D > 0) {
os << d[0];
for (int i = 1; i < D; ++i) {
os << ", " << d[i];
}
} else {
os << "";
}

return os;
}

Expand Down
13 changes: 11 additions & 2 deletions paddle/phi/infermeta/multiary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -301,9 +301,18 @@ void AddNInferMeta(const std::vector<const MetaTensor*>& x,
phi::DDim in_dim({0});
for (size_t i = 0; i < x.size(); ++i) {
auto x_dim = x[i]->dims();
// x_dim.size() == 1 means the real dim of selected rows is [0]
if (x[i]->is_selected_rows() && x_dim.size() == 1) {
continue;
}
// for zero-sized tensor
if (phi::product(x_dim) == 0) {
continue;
}
// for 0D tensor
if (x_dim.size() == 0) {
continue;
}
if (phi::product(in_dim) == 0) {
in_dim = x_dim;
} else {
Expand Down Expand Up @@ -2491,8 +2500,8 @@ void WarpctcInferMeta(const MetaTensor& logits,
const MetaTensor& labels_length,
int blank,
bool norm_by_times,
MetaTensor* warpctcgrad,
MetaTensor* loss) {
MetaTensor* loss,
MetaTensor* warpctcgrad) {
auto logits_dims = logits.dims();
int sequence_width = 0;

Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/infermeta/multiary.h
Original file line number Diff line number Diff line change
Expand Up @@ -474,8 +474,8 @@ void WarpctcInferMeta(const MetaTensor& logits,
const MetaTensor& labels_length,
int blank,
bool norm_by_times,
MetaTensor* warpctcgrad,
MetaTensor* loss);
MetaTensor* loss,
MetaTensor* warpctcgrad);

void WhereInferMeta(const MetaTensor& condition,
const MetaTensor& x,
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2668,7 +2668,7 @@ DDim ReduceInferDim(const MetaTensor& x,
x_rank,
errors::InvalidArgument(
"The reduce dim index %d should be in the "
"range [-dimension(X), dimension(X)] "
"range [ -dimension(X), dimension(X) ) "
"which dimesion = %d. But received dim index = %d.",
i,
x_rank,
Expand All @@ -2677,7 +2677,7 @@ DDim ReduceInferDim(const MetaTensor& x,
-x_rank,
errors::InvalidArgument(
"The reduce dim index %d should be in the "
"range [-dimension(X), dimension(X)] "
"range [ -dimension(X), dimension(X) ) "
"which dimesion = %d. But received dim index = %d.",
i,
x_rank,
Expand Down
18 changes: 18 additions & 0 deletions paddle/phi/kernels/funcs/unsqueeze.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,24 @@ inline DDim GetOutputSqueezeShape(const std::vector<int> squeeze_dims,
}
} else {
for (size_t i = 0; i < num_squeeze_dims; ++i) {
if (in_dims.size() == 0) {
PADDLE_ENFORCE_GE(
squeeze_dims[i],
-1,
phi::errors::InvalidArgument(
"For 0D Tensor, Each axis in Attr(axes) should be in the range "
"of [-1, 0]"
"But current axis is:%d, input tensor's shape = [%s]."));
PADDLE_ENFORCE_LE(
squeeze_dims[i],
0,
phi::errors::InvalidArgument(
"For 0D Tensor, Each axis in Attr(axes) should be in the range "
"of [-1, 0]"
"But current axis is:%d, input tensor's shape = [%s]."));
continue;
}

int current = squeeze_dims[i] < 0 ? squeeze_dims[i] + in_dims.size()
: squeeze_dims[i];

Expand Down
3 changes: 1 addition & 2 deletions paddle/phi/kernels/onednn/reduce_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,7 @@ inline std::vector<int64_t> CalculateReducedDims(
bool keep_dim) {
if (keep_dim) return vectorize(output->dims());

if (reduce_all && reduce_dims.size() > 0)
return std::vector<int64_t>(input->dims().size(), 1);
if (reduce_all) return std::vector<int64_t>(input->dims().size(), 1);

std::vector<int64_t> output_dims(vectorize(input->dims()));
for (size_t i = 0; i < reduce_dims.size(); ++i) {
Expand Down
Loading

0 comments on commit 92a11c1

Please sign in to comment.