Skip to content

Commit

Permalink
Dev add contiguous view ops (#7503)
Browse files Browse the repository at this point in the history
* view op

* narrow op

* squeeze unsqueeze op

* revert narrow
  • Loading branch information
Flowingsun007 authored and marigoold committed Mar 15, 2022
1 parent 0b2779d commit 225c074
Show file tree
Hide file tree
Showing 9 changed files with 312 additions and 107 deletions.
162 changes: 127 additions & 35 deletions oneflow/core/framework/tensor_methods.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,15 @@ Maybe<bool> IsContiguous(const std::shared_ptr<Tensor>& tensor) {

namespace view {

bool IsViewApplicable(const std::shared_ptr<Tensor>& input) {
// NOTE: only eager local tensor support view for now
// elem_cnt() > 1 used to excluding 0 shape tensor
if (input->is_local() && !(LazyMode::is_enabled()) && input->shape()->elem_cnt() >= 1) {
return true;
}
return false;
}

Maybe<Tensor> BasicView(const std::shared_ptr<Tensor>& input, const Shape& target_shape,
int64_t storage_offset) {
/**
Expand All @@ -64,7 +73,6 @@ Maybe<Tensor> BasicView(const std::shared_ptr<Tensor>& input, const Shape& targe

Maybe<Tensor> BasicView(const std::shared_ptr<Tensor>& input, const Shape& target_shape,
const Stride& target_stride, int64_t storage_offset) {
storage_offset = storage_offset + JUST(JUST(input->AsMirroredTensor())->storage_offset());
// TODO(): Check shape compatible.
auto device = JUST(input->device());
auto tensor_meta = std::make_shared<MirroredTensorMeta>(
Expand All @@ -86,38 +94,23 @@ Maybe<Tensor> BasicView(const std::shared_ptr<Tensor>& input, const Shape& targe
return output;
}

Maybe<Tensor> Reshape(const std::shared_ptr<Tensor>& input, const Shape& shape) {
if (!(input->is_eager() && input->is_local())) {
return Error::RuntimeError() << "view::Reshape(): input should be eager local tensor, but got "
<< (input->is_lazy() ? "lazy" : "consistent");
}
int need_infer_axis = -1;
size_t count = 1;
for (int i = 0; i < shape.NumAxes(); ++i) {
if (shape.At(i) < -1) {
return Error::RuntimeError() << "Invalid shape dimension " << shape.At(i);
} else if (shape.At(i) == -1) {
CHECK_EQ_OR_RETURN(need_infer_axis, -1)
<< "Shape " << shape.ToString() << " has more than 1 axis that needs to be infered.";
need_infer_axis = i;
} else {
count *= shape.At(i);
}
}
Maybe<Tensor> Reshape(const std::shared_ptr<Tensor>& input, const Shape& target_shape) {
Stride target_stride(target_shape);
return Reshape(input, target_shape, target_stride);
}

std::shared_ptr<Tensor> output;
size_t x_count = input->shape()->Count(0);
if (need_infer_axis == -1) {
CHECK_EQ_OR_RETURN(shape.Count(0), x_count);
output = JUST(BasicView(input, shape, 0));
} else {
Shape infered_shape = shape;
infered_shape.Set(need_infer_axis, x_count / count);
CHECK_EQ_OR_RETURN(infered_shape.Count(0), x_count)
<< "Shape " << shape.ToString() << " is invalid for input of shape "
<< input->shape()->ToString();
output = JUST(BasicView(input, infered_shape, 0));
}
Maybe<Tensor> Reshape(const std::shared_ptr<Tensor>& input, const Shape& target_shape,
const Stride& target_stride) {
// TODO:(zhaoluyang) check input tensor is contiguous
CHECK_OR_RETURN(IsViewApplicable(input))
<< Error::RuntimeError()
<< "view::Reshape(): input should be eager local tensor with element count >=1 , but got "
<< (input->is_lazy() ? "lazy tensor" : "consistent tensor")
<< " with shape: " << input->shape()->ToString() << "; element count: " << input->nelement();

int64_t storage_offset = JUST(JUST(input->AsMirroredTensor())->storage_offset());
std::shared_ptr<Tensor> output =
JUST(BasicView(input, target_shape, target_stride, storage_offset));

if (autograd::GradMode::is_enabled() && input->requires_grad()) {
Shape input_shape(input->shape()->dim_vec());
Expand All @@ -128,7 +121,8 @@ Maybe<Tensor> Reshape(const std::shared_ptr<Tensor>& input, const Shape& shape)
autograd::AutoGradMode mode(create_graph);
CHECK_EQ_OR_RETURN(out_grads.size(), 1);
in_grads->resize(1);
in_grads->at(0) = JUST(functional::Reshape(out_grads.at(0), input_shape));
*JUST(oneflow::VectorAt(in_grads, 0)) =
JUST(functional::Reshape(JUST(oneflow::VectorAt(out_grads, 0)), input_shape));
return Maybe<void>::Ok();
});
TensorTuple outputs{output};
Expand All @@ -140,9 +134,10 @@ Maybe<Tensor> Reshape(const std::shared_ptr<Tensor>& input, const Shape& shape)

Maybe<Tensor> Slice(const std::shared_ptr<Tensor>& input, const std::vector<int64_t>& starts,
const std::vector<int64_t>& ends, const std::vector<int64_t>& steps) {
CHECK_OR_RETURN(input->is_eager() && input->is_local())
CHECK_OR_RETURN(IsViewApplicable(input))
<< Error::RuntimeError() << "view::Slice(): input should be eager local tensor, but is "
<< (input->is_lazy() ? "lazy" : "consistent");
<< (input->is_lazy() ? "lazy tensor" : "consistent tensor")
<< " with shape: " << input->shape()->ToString() << "; element count: " << input->nelement();
const auto& shape = input->shape();
const auto& strides = JUST(input->stride());
const int64_t ndim = starts.size();
Expand Down Expand Up @@ -192,6 +187,103 @@ Maybe<Tensor> Slice(const std::shared_ptr<Tensor>& input, const std::vector<int6
return output;
}

Maybe<Tensor> Unsqueeze(const std::shared_ptr<Tensor>& input, const int32_t& expand_dim) {
CHECK_OR_RETURN(IsViewApplicable(input))
<< Error::RuntimeError() << "view::Unsqueeze(): input should be eager local tensor, but got "
<< (input->is_lazy() ? "lazy tensor" : "consistent tensor")
<< " with shape: " << input->shape()->ToString() << "; element count: " << input->nelement();

const auto& shape = input->shape();
const auto& strides = JUST(input->stride());
const auto& ndim = shape->NumAxes();

DimVector target_dim_vec(ndim + 1);
StrideVector target_stride_vec(ndim + 1);

{
int cnt = 0;
for (int i = 0; i < ndim; i++) {
if (i == expand_dim) { cnt++; }
target_dim_vec[cnt] = shape->At(i);
target_stride_vec[cnt] = strides->At(i);
cnt++;
}
target_dim_vec[expand_dim] = 1;
target_stride_vec[expand_dim] = strides->At(expand_dim);
}

int64_t storage_offset = JUST(JUST(input->AsMirroredTensor())->storage_offset());
std::shared_ptr<Tensor> output =
JUST(BasicView(input, Shape(target_dim_vec), Stride(target_stride_vec), storage_offset));

if (autograd::GradMode::is_enabled() && input->requires_grad()) {
auto backward_fn =
std::make_shared<std::function<Maybe<void>(const TensorTuple&, TensorTuple*, bool)>>(
[=](const TensorTuple& out_grads, TensorTuple* in_grads,
bool create_graph) -> Maybe<void> {
autograd::AutoGradMode mode(create_graph);
CHECK_EQ_OR_RETURN(out_grads.size(), 1);
in_grads->resize(1);
*JUST(oneflow::VectorAt(in_grads, 0)) =
JUST(functional::Reshape(JUST(oneflow::VectorAt(out_grads, 0)), *shape));
return Maybe<void>::Ok();
});
TensorTuple outputs{output};
JUST(GetThreadLocalAutogradEngine()->AddBackwardFuncPtr("view::unsqueeze_backward", backward_fn,
{input}, &outputs));
}
return output;
}

Maybe<Tensor> Squeeze(const std::shared_ptr<Tensor>& input,
const std::vector<int32_t>& squeeze_dims) {
CHECK_OR_RETURN(IsViewApplicable(input))
<< Error::RuntimeError() << "view::Squeeze(): input should be eager local tensor, but got "
<< (input->is_lazy() ? "lazy tensor" : "consistent tensor")
<< " with shape: " << input->shape()->ToString() << "; element count: " << input->nelement();

const auto& shape = input->shape();
const auto& strides = JUST(input->stride());
const int64_t ndim = shape->NumAxes();

const int target_ndim = ndim - squeeze_dims.size();
DimVector target_dim_vec(target_ndim);
StrideVector target_stride_vec(target_ndim);

{
int cnt = 0;
for (int i = 0; i < ndim; i++) {
if (find(squeeze_dims.begin(), squeeze_dims.end(), i) == squeeze_dims.end()) {
target_dim_vec[cnt] = shape->At(i);
target_stride_vec[cnt] = strides->At(i);
cnt++;
}
}
}

int64_t storage_offset = JUST(JUST(input->AsMirroredTensor())->storage_offset());
std::shared_ptr<Tensor> output =
JUST(BasicView(input, Shape(target_dim_vec), Stride(target_stride_vec), storage_offset));

if (autograd::GradMode::is_enabled() && input->requires_grad()) {
auto backward_fn =
std::make_shared<std::function<Maybe<void>(const TensorTuple&, TensorTuple*, bool)>>(
[=](const TensorTuple& out_grads, TensorTuple* in_grads,
bool create_graph) -> Maybe<void> {
autograd::AutoGradMode mode(create_graph);
CHECK_EQ_OR_RETURN(out_grads.size(), 1);
in_grads->resize(1);
*JUST(oneflow::VectorAt(in_grads, 0)) = JUST(functional::Reshape(
JUST(oneflow::VectorAt(out_grads, 0)), Shape(input->shape()->dim_vec())));
return Maybe<void>::Ok();
});
TensorTuple outputs{output};
JUST(GetThreadLocalAutogradEngine()->AddBackwardFuncPtr("view::squeeze_backward", backward_fn,
{input}, &outputs));
}
return output;
}

} // namespace view
} // namespace one
} // namespace oneflow
17 changes: 15 additions & 2 deletions oneflow/core/framework/tensor_methods.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,27 @@ Maybe<bool> IsContiguous(const std::shared_ptr<Tensor>& tensor);

namespace view {

bool IsViewApplicable(const std::shared_ptr<Tensor>& input);

Maybe<Tensor> BasicView(const std::shared_ptr<Tensor>& input, const Shape& target_shape,
int64_t storage_offset);

Maybe<Tensor> BasicView(const std::shared_ptr<Tensor>& input, const Shape& target_shape,
const Stride& target_strides, int64_t storage_offset);
const Stride& target_stride, int64_t storage_offset);

Maybe<Tensor> Reshape(const std::shared_ptr<Tensor>& input, const Shape& shape);
Maybe<Tensor> Reshape(const std::shared_ptr<Tensor>& input, const Shape& target_shape);

Maybe<Tensor> Reshape(const std::shared_ptr<Tensor>& input, const Shape& target_shape,
const Stride& target_stride);

Maybe<Tensor> Slice(const std::shared_ptr<Tensor>& input, const std::vector<int64_t>& starts,
const std::vector<int64_t>& ends, const std::vector<int64_t>& steps);

Maybe<Tensor> Unsqueeze(const std::shared_ptr<Tensor>& input, const int32_t& expand_dim);

Maybe<Tensor> Squeeze(const std::shared_ptr<Tensor>& input,
const std::vector<int32_t>& squeeze_dims);

} // namespace view
} // namespace one
} // namespace oneflow
Expand Down
16 changes: 10 additions & 6 deletions oneflow/core/functional/functional_api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -788,6 +788,12 @@
signature: "Tensor (Tensor input, Int32 dim) => Unsqueeze"
bind_python: True

- name: "squeeze"
signature: [
"Tensor (Tensor x, Int32List[1] dim=None) => Squeeze",
]
bind_python: True

- name: "exp"
signature: "Tensor (Tensor x) => Exp"
bind_python: True
Expand Down Expand Up @@ -1111,6 +1117,10 @@
signature: "Tensor (Tensor x, Shape shape) => Reshape"
bind_python: True

- name: "view"
signature: "Tensor (Tensor x, Shape shape) => View"
bind_python: True

- name: "slice_view_1d_contiguous"
signature: "Tensor (Tensor x, Int64 start, Int64 end) => SliceView1dContiguous"
bind_python: True
Expand Down Expand Up @@ -1143,12 +1153,6 @@
signature: "Void (Tensor ref, Tensor value, Int64List start, Int64List stop, Int64List step) => LogicalSliceAssign"
bind_python: True

- name: "squeeze"
signature: [
"Tensor (Tensor x, Int32List[1] dim=None) => Squeeze",
]
bind_python: True

- name: "copy"
signature: "Tensor (Tensor x, String device_type, Int64 device_id) => Copy"
bind_python: True
Expand Down
Loading

0 comments on commit 225c074

Please sign in to comment.