-
Notifications
You must be signed in to change notification settings - Fork 5.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add im2sequence op. #4866
Add im2sequence op. #4866
Changes from 18 commits
48556ba
d2fda53
f1ca3f7
6197c09
d5a3745
5a9dd8a
45f16c9
32db8db
d3ac339
4422a55
4838a57
dbe0583
e11d442
25a3d2d
e82f100
92baa88
bfe7e24
09adb76
fe45f21
500e29a
3a48282
648ca7a
da0d95c
c9e208c
09544bc
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,160 @@ | ||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
|
||
#include "paddle/operators/block_expand_op.h" | ||
|
||
namespace paddle { | ||
namespace operators { | ||
|
||
class BlockExpandOp : public framework::OperatorWithKernel { | ||
public: | ||
using framework::OperatorWithKernel::OperatorWithKernel; | ||
|
||
protected: | ||
void InferShape(framework::InferShapeContext* ctx) const override { | ||
PADDLE_ENFORCE(ctx->HasInput("X"), | ||
"Input of BlockExpandOp should not be null."); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Input --> Input(X) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done. |
||
PADDLE_ENFORCE(ctx->HasOutput("Out"), | ||
"Output of BlockExpandOp op should not be null."); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Output --> Output(Out) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done. |
||
|
||
auto in_dim = ctx->GetInputDim("X"); | ||
PADDLE_ENFORCE_EQ(in_dim.size(), 4, | ||
"Input(X) format must be 4D tensor, eg., NCHW."); | ||
|
||
int block_height = ctx->Attrs().Get<int>("block_height"); | ||
int block_width = ctx->Attrs().Get<int>("block_width"); | ||
int stride_height = ctx->Attrs().Get<int>("stride_height"); | ||
int stride_width = ctx->Attrs().Get<int>("stride_width"); | ||
int padding_height = ctx->Attrs().Get<int>("padding_height"); | ||
int padding_width = ctx->Attrs().Get<int>("padding_width"); | ||
|
||
int batch_size = in_dim[0]; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Need to confirm the layout is NCHW. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It seems that the layout of input tensor is not available in InferShape context. |
||
int img_channels = in_dim[1]; | ||
int img_height = in_dim[2]; | ||
int img_width = in_dim[3]; | ||
|
||
int output_height = get_output_size(img_height, block_height, stride_height, | ||
padding_height); | ||
int output_width = | ||
get_output_size(img_width, block_width, stride_width, padding_width); | ||
|
||
ctx->SetOutputDim("Out", {batch_size * output_height * output_width, | ||
img_channels * block_height * block_width}); | ||
// TODO(wanghaoshuang): cal lod in complie time | ||
} | ||
}; | ||
|
||
class BlockExpandOpMaker : public framework::OpProtoAndCheckerMaker { | ||
public: | ||
BlockExpandOpMaker(OpProto* proto, OpAttrChecker* op_checker) | ||
: OpProtoAndCheckerMaker(proto, op_checker) { | ||
AddInput("X", | ||
"(Tensor)The input tensor has NCHW format." | ||
"N: batch size" | ||
"C: channels" | ||
"H: height" | ||
"W: width"); | ||
AddOutput("Out", "(LodTensor)The output data of block_expand op,"); | ||
AddAttr<int>("block_height", "(int)height of block."); | ||
AddAttr<int>("block_width", "(int)width of block."); | ||
AddAttr<int>("stride_height", "(int)height of stride."); | ||
AddAttr<int>("stride_width", "(int)width of stride."); | ||
AddAttr<int>("padding_height", "(int)height of padding."); | ||
AddAttr<int>("padding_width", "(int)width of padding."); | ||
AddComment(R"DOC( | ||
Expand feature map to minibatch matrix. | ||
- matirx height is: output_height * output_width | ||
- matrix width is: block_height * block_width * channels | ||
|
||
output_height = | ||
1 + (2 * padding_height + img_height - block_height + stride_height - 1) / | ||
stride_height; | ||
output_width = | ||
1 + (2 * padding_width + img_width - block_width + stride_width - 1) / | ||
stride_width; | ||
|
||
After expanding, The number of time steps are output_height * output_width | ||
and the dimension of each time step is block_height * block_width * channels. | ||
This op can be used after convolution neural network, and before recurrent neural network. | ||
|
||
Given: | ||
|
||
x = [[[[ 6. 2. 1.] | ||
[ 8. 3. 5.] | ||
[ 0. 2. 6.]] | ||
|
||
[[ 2. 4. 4.] | ||
[ 6. 3. 0.] | ||
[ 6. 4. 7.]]] | ||
|
||
[[[ 6. 7. 1.] | ||
[ 5. 7. 9.] | ||
[ 2. 4. 8.]] | ||
|
||
[[ 1. 2. 1.] | ||
[ 1. 3. 5.] | ||
[ 9. 0. 8.]]]] | ||
x.dims = {2, 2, 3, 3} | ||
|
||
And: | ||
|
||
block_height = 2 | ||
block_width = 2 | ||
stride_height = 1 | ||
stride_width = 1 | ||
padding_height = 0 | ||
padding_width = 0 | ||
|
||
Then: | ||
|
||
output.data = [[ 6. 2. 8. 3. 2. 4. 6. 3.] | ||
[ 2. 1. 3. 5. 4. 4. 3. 0.] | ||
[ 8. 3. 0. 2. 6. 3. 6. 4.] | ||
[ 3. 5. 2. 6. 3. 0. 4. 7.] | ||
[ 6. 7. 5. 7. 1. 2. 1. 3.] | ||
[ 7. 1. 7. 9. 2. 1. 3. 5.] | ||
[ 5. 7. 2. 4. 1. 3. 9. 0.] | ||
[ 7. 9. 4. 8. 3. 5. 0. 8.]] | ||
output.dims = {8, 9} | ||
output.lod = [[0, 4, 8]] | ||
|
||
)DOC"); | ||
} | ||
}; | ||
|
||
class BlockExpandGradOp : public framework::OperatorWithKernel { | ||
public: | ||
using framework::OperatorWithKernel::OperatorWithKernel; | ||
|
||
protected: | ||
void InferShape(framework::InferShapeContext* ctx) const override { | ||
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); | ||
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), | ||
"Input(Out@GRAD) shouldn't be null."); | ||
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); | ||
} | ||
}; | ||
|
||
} // namespace operators | ||
} // namespace paddle | ||
|
||
namespace ops = paddle::operators; | ||
REGISTER_OP(block_expand, ops::BlockExpandOp, ops::BlockExpandOpMaker, | ||
block_expand_grad, ops::BlockExpandGradOp); | ||
REGISTER_OP_CPU_KERNEL( | ||
block_expand, | ||
ops::BlockExpandKernel<paddle::platform::CPUDeviceContext, float>); | ||
REGISTER_OP_CPU_KERNEL( | ||
block_expand_grad, | ||
ops::BlockExpandGradKernel<paddle::platform::CPUDeviceContext, float>); |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
|
||
#define EIGEN_USE_GPU | ||
#include "paddle/operators/block_expand_op.h" | ||
|
||
namespace ops = paddle::operators; | ||
|
||
REGISTER_OP_CUDA_KERNEL( | ||
block_expand, | ||
ops::BlockExpandKernel<paddle::platform::CUDADeviceContext, float>); | ||
REGISTER_OP_CUDA_KERNEL( | ||
block_expand_grad, | ||
ops::BlockExpandGradKernel<paddle::platform::CUDADeviceContext, float>); |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,145 @@ | ||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
You may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
|
||
#pragma once | ||
|
||
#include "paddle/operators/math/math_function.h" | ||
|
||
#include "paddle/framework/eigen.h" | ||
#include "paddle/framework/op_registry.h" | ||
#include "paddle/operators/math/im2col.h" | ||
|
||
namespace paddle { | ||
namespace operators { | ||
|
||
using Tensor = framework::Tensor; | ||
using LoDTensor = framework::LoDTensor; | ||
|
||
inline int get_output_size(int img_size, int block_size, int stride, | ||
int padding) { | ||
return (1 + (img_size + 2 * padding - block_size + stride - 1) / stride); | ||
} | ||
|
||
template <typename DeviceContext, typename T> | ||
class BlockExpandKernel : public framework::OpKernel<T> { | ||
public: | ||
void Compute(const framework::ExecutionContext& ctx) const override { | ||
const Tensor* in = ctx.Input<Tensor>("X"); | ||
LoDTensor* out = ctx.Output<LoDTensor>("Out"); | ||
out->mutable_data<T>(ctx.GetPlace()); | ||
|
||
auto in_dim = in->dims(); | ||
int batch_size = in_dim[0]; | ||
int img_channels = in_dim[1]; | ||
int img_height = in_dim[2]; | ||
int img_width = in_dim[3]; | ||
int block_height = ctx.Attr<int>("block_height"); | ||
int block_width = ctx.Attr<int>("block_width"); | ||
int stride_height = ctx.Attr<int>("stride_height"); | ||
int stride_width = ctx.Attr<int>("stride_width"); | ||
int padding_height = ctx.Attr<int>("padding_height"); | ||
int padding_width = ctx.Attr<int>("padding_width"); | ||
|
||
int output_height = get_output_size(img_height, block_height, stride_height, | ||
padding_height); | ||
int output_width = | ||
get_output_size(img_width, block_width, stride_width, padding_width); | ||
|
||
const std::vector<int> dilations({1, 1}); | ||
const std::vector<int> strides( | ||
{stride_height, stride_width, stride_height, stride_width}); | ||
const std::vector<int> paddings( | ||
{padding_height, padding_width, padding_height, padding_width}); | ||
|
||
auto out_dims = out->dims(); | ||
out->Resize({batch_size, out->numel() / batch_size}); | ||
for (int i = 0; i < batch_size; i++) { | ||
const Tensor src = | ||
in->Slice(i, i + 1).Resize({img_channels, img_height, img_width}); | ||
Tensor dst = out->Slice(i, i + 1).Resize({output_height, output_width, | ||
img_channels, block_height, | ||
block_width}); | ||
|
||
math::Im2ColFunctor<math::ColFormat::kOCF, DeviceContext, T> f; | ||
auto& dev_ctx = ctx.template device_context<DeviceContext>(); | ||
f(dev_ctx, src, dilations, strides, paddings, &dst); | ||
} | ||
out->Resize(out_dims); | ||
|
||
// set lod information | ||
// TODO(wanghaoshuang): Move this to InferShape | ||
framework::LoD lod(1); | ||
for (int i = 0, offset = 0; i < batch_size + 1; ++i) { | ||
lod[0].push_back(offset); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please reserve memory for There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thx. Fixed. |
||
offset += output_height * output_width; | ||
} | ||
out->set_lod(lod); | ||
} | ||
}; | ||
|
||
template <typename DeviceContext, typename T> | ||
class BlockExpandGradKernel : public framework::OpKernel<T> { | ||
public: | ||
void Compute(const framework::ExecutionContext& ctx) const override { | ||
auto* in = ctx.Input<Tensor>("X"); | ||
Tensor* d_out = | ||
const_cast<Tensor*>(ctx.Input<Tensor>(framework::GradVarName("Out"))); | ||
auto* d_x = ctx.Output<Tensor>(framework::GradVarName("X")); | ||
d_x->mutable_data<T>(ctx.GetPlace()); | ||
|
||
auto x_v = framework::EigenVector<T>::Flatten(*d_x); | ||
auto& place = *ctx.template device_context<DeviceContext>().eigen_device(); | ||
x_v.device(place) = x_v.constant(0.0); | ||
|
||
auto in_dim = in->dims(); | ||
int batch_size = in_dim[0]; | ||
int img_channels = in_dim[1]; | ||
int img_height = in_dim[2]; | ||
int img_width = in_dim[3]; | ||
|
||
int block_height = ctx.Attr<int>("block_height"); | ||
int block_width = ctx.Attr<int>("block_width"); | ||
int stride_height = ctx.Attr<int>("stride_height"); | ||
int stride_width = ctx.Attr<int>("stride_width"); | ||
int padding_height = ctx.Attr<int>("padding_height"); | ||
int padding_width = ctx.Attr<int>("padding_width"); | ||
int output_height = get_output_size(img_height, block_height, stride_height, | ||
padding_height); | ||
int output_width = | ||
get_output_size(img_width, block_width, stride_width, padding_width); | ||
|
||
const std::vector<int> dilations({1, 1}); | ||
const std::vector<int> strides( | ||
{stride_height, stride_width, stride_height, stride_width}); | ||
const std::vector<int> paddings( | ||
{padding_height, padding_width, padding_height, padding_width}); | ||
|
||
auto d_out_dims = d_out->dims(); | ||
d_out->Resize({batch_size, d_out->numel() / batch_size}); | ||
for (int i = 0; i < batch_size; i++) { | ||
Tensor dst = | ||
d_x->Slice(i, i + 1).Resize({img_channels, img_height, img_width}); | ||
const Tensor src = d_out->Slice(i, i + 1).Resize( | ||
{output_height, output_width, img_channels, block_height, | ||
block_width}); | ||
math::Col2ImFunctor<math::ColFormat::kOCF, DeviceContext, T> f; | ||
auto& dev_ctx = ctx.template device_context<DeviceContext>(); | ||
f(dev_ctx, src, dilations, strides, paddings, &dst); | ||
} | ||
d_out->Resize(d_out_dims); | ||
} | ||
}; | ||
|
||
} // namespace operators | ||
} // namespace paddle |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We can discuss the name of this operator. It seems that
BlockExpandOp
isn't self-explain well. I think we need a better name here such as flatten_block_conv_way_opThere was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Rename 'block_expand' to
im2sequence
. Because this op is actually a wrapper ofim2col
functor.