Skip to content
This repository has been archived by the owner on Jan 24, 2024. It is now read-only.

【PaddlePaddle Hackathon 77】Add squeeze op #863

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions cinn/frontend/base_builder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,14 @@ Variable BaseBuilder::Reshape(const Variable& operand, const std::vector<int>& s
return instr.GetOutput(0);
}

Variable BaseBuilder::Squeeze(const Variable& operand, const std::vector<int>& axis) {
Instruction instr("squeeze", {operand});
instr.SetAttr("axis", axis);
InferShape(instr);
AppendInstruction(instr);
return instr.GetOutput(0);
}

Variable BaseBuilder::Transpose(const Variable& operand, const std::vector<int>& axis) {
Instruction instr("transpose", {operand});
instr.SetAttr("axis", axis);
Expand Down
2 changes: 2 additions & 0 deletions cinn/frontend/base_builder.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,8 @@ class BaseBuilder {

Variable Reshape(const Variable& operand, const std::vector<int>& shape);

Variable Squeeze(const Variable& operand, const std::vector<int>& axis);

Variable Transpose(const Variable& operand, const std::vector<int>& axis);

Variable Slice(const Variable& operand,
Expand Down
1 change: 1 addition & 0 deletions cinn/frontend/op_mappers/paddle/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,5 @@ gather_srcs(cinnapi_src SRCS
slice.cc
dropout.cc
transpose.cc
squeeze.cc
reshape.cc)
74 changes: 74 additions & 0 deletions cinn/frontend/op_mappers/paddle/squeeze.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
// Copyright (c) 2021 CINN Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "cinn/backends/cuda_util.h"
#include "cinn/frontend/op_mapper_registry.h"
#include "cinn/frontend/op_mappers/common_utils.h"

namespace cinn {
namespace frontend {
namespace paddle_mappers {

void SqueezeOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) {
CHECK_EQ(op_desc.Input("X").size(), 1UL);
auto x_name = op_desc.Input("X").front();
auto x = ctx.GetVar(x_name);

auto axes = utils::GetAttrOrDefault<std::vector<int>>(op_desc, "axes");

VLOG(4) << "x shape: " << cinn::utils::Join(x->shape, ",");

auto out = ctx.Builder()->Squeeze(x, axes);

CHECK_EQ(op_desc.Output("Out").size(), 1UL);
auto out_name = op_desc.Output("Out").front();
ctx.AddVar(out_name, out);
ctx.AddVarModelToProgram(out_name, out->id);
}

void SqueezeGradOpMapper(const paddle::cpp::OpDesc& op_desc, const OpMapperContext& ctx) {
auto get_input_var = [&op_desc, &ctx](const std::string& op_name) {
CHECK_EQ(op_desc.Input(op_name).size(), 1UL);
auto var_name = op_desc.Input(op_name).front();
return ctx.GetVar(var_name);
};

auto get_output_name = [&op_desc](const std::string& op_name) {
CHECK_EQ(op_desc.Output(op_name).size(), 1UL);
return op_desc.Output(op_name).front();
};

auto dout = get_input_var(paddle::GradVarName("Out"));
VLOG(4) << "dout shape: " << cinn::utils::Join(dout->shape, ",");

auto x = get_input_var("X");
VLOG(4) << "x shape: " << cinn::utils::Join(x->shape, ",");

auto out = ctx.Builder()->Reshape(dout, x->shape);

auto out_name = get_output_name(paddle::GradVarName("X"));
ctx.AddVar(out_name, out);
ctx.AddVarModelToProgram(out_name, out->id);
}

} // namespace paddle_mappers
} // namespace frontend
} // namespace cinn

CINN_REGISTER_HELPER(paddle_squeeze) {
CINN_REGISTER_OP_MAPPER(squeeze, cinn::frontend::paddle_mappers::SqueezeOpMapper)

CINN_REGISTER_OP_MAPPER(squeeze_grad, cinn::frontend::paddle_mappers::SqueezeGradOpMapper)
return true;
}
1 change: 1 addition & 0 deletions cinn/frontend/op_mappers/use_op_mappers.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ CINN_USE_REGISTER(paddle_pool2d)
CINN_USE_REGISTER(paddle_conv2d)
CINN_USE_REGISTER(paddle_transpose)
CINN_USE_REGISTER(paddle_reshape)
CINN_USE_REGISTER(paddle_squeeze)

CINN_USE_REGISTER(science_broadcast)
CINN_USE_REGISTER(science_transform)
Expand Down
15 changes: 15 additions & 0 deletions cinn/frontend/paddle_model_to_program.cc
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,21 @@ void PaddleModelToProgram::AddOpMapper_reshape2() {
};
}

void PaddleModelToProgram::AddOpMapper_squeeze2() {
op_mappers_["squeeze2"] = [&](const paddle::cpp::OpDesc& op_desc) {
CHECK_EQ(op_desc.Input("X").size(), 1UL);
auto x_name = op_desc.Input("X").front();
auto x = GetVar(utils::TransValidVarName(x_name));
std::vector<int> axes = op_desc.GetAttr<std::vector<int>>("axes");
VLOG(4) << "x shape: " << utils::Join(x->shape, ",");
auto out = program_->squeeze(x, axes);
CHECK_EQ(op_desc.Output("Out").size(), 1UL);
auto out_name = op_desc.Output("Out").front();
AddVar(utils::TransValidVarName(out_name), out);
var_model_to_program_map_[out_name] = out->id;
};
}

void PaddleModelToProgram::AddOpMapper_concat() {
op_mappers_["concat"] = [&](const paddle::cpp::OpDesc& op_desc) {
int input_size = op_desc.Input("X").size();
Expand Down
2 changes: 2 additions & 0 deletions cinn/frontend/paddle_model_to_program.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ class PaddleModelToProgram {
AddOpMapper_dropout_infer();
AddOpMapper_matmul();
AddOpMapper_reshape2();
AddOpMapper_squeeze2();
AddOpMapper_concat();
AddOpMapper_assign();
AddOpMapper_fill_constant();
Expand Down Expand Up @@ -96,6 +97,7 @@ class PaddleModelToProgram {
void AddOpMapper_dropout_infer();
void AddOpMapper_matmul();
void AddOpMapper_reshape2();
void AddOpMapper_squeeze2();
void AddOpMapper_concat();
void AddOpMapper_assign();
void AddOpMapper_fill_constant();
Expand Down
7 changes: 7 additions & 0 deletions cinn/frontend/syntax.cc
Original file line number Diff line number Diff line change
Expand Up @@ -457,6 +457,13 @@ Variable Program::reshape(const Variable& a, const std::vector<int>& shape) {
return instr.GetOutput(0);
}

Variable Program::squeeze(const Variable& a, const std::vector<int>& axes) {
Instruction instr("squeeze", {a});
instr.SetAttr("axes", axes);
AppendInstruction(instr);
return instr.GetOutput(0);
}

Variable Program::concat(const std::vector<Variable>& input_vars, int axis) {
Instruction instr("concat", input_vars);
instr.SetAttr("axis", axis);
Expand Down
8 changes: 8 additions & 0 deletions cinn/frontend/syntax.h
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,14 @@ struct Program {
*/
Variable reshape(const Variable& a, const std::vector<int>& shape);

/**
* Squeeze a tensor.
* @param a The input tensor.
* @param axis The tensor's axis we specified.
* @return The squeezed output tensor.
*/
Variable squeeze(const Variable& a, const std::vector<int>& axes);

/**
* Concat tensors.
* @param input_vars The input tensors.
Expand Down
144 changes: 144 additions & 0 deletions cinn/hlir/op/transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -414,6 +414,137 @@ std::vector<std::vector<std::string>> InferLayoutForReshape(const std::vector<fr
}
}

std::shared_ptr<OpStrategy> StrategyForSqueeze(const framework::NodeAttr &attrs,
const std::vector<ir::Tensor> &inputs,
const std::vector<Type> &out_type,
const std::vector<std::vector<int>> &output_shapes,
const Target &target) {
CHECK(attrs.attr_store.count("axes")) << "find no attr of axes";
std::vector<int> axes = absl::get<std::vector<int>>(attrs.attr_store.at("axes"));

framework::CINNCompute squeeze_compute([=](lang::Args args, lang::RetValue *ret) {
CHECK(!args.empty()) << "The input arguments of Squeeze compute is empty! Please check.\n";
CINNValuePack a = args[0];
CHECK_GE(a.size(), 1U) << "at least 1 input tensors for Squeeze compute\n";
Expr A = a[0];
CHECK(A.as_tensor());
CHECK(!output_shapes.empty());
auto tensor_A = A.as_tensor_ref();
auto stages = CreateStages({tensor_A});
VLOG(3) << "A shape: " << utils::Join(tensor_A->shape, ", ")
<< ", output_shapes: " << utils::Join(output_shapes[0], ", ");
ir::Tensor out = pe::Squeeze(tensor_A, axes, stages, UniqName("Squeeze_out"));
std::vector<CINNValue> res;
stages->InsertLazily(out);
res.push_back(CINNValue(out));
CHECK(!out_type.empty()) << "Output type of Squeeze is empty! Please check.\n";
res.push_back(CINNValue(stages));
*ret = CINNValuePack{res};
});

framework::CINNSchedule squeeze_schedule([=](lang::Args args, lang::RetValue *ret) {
CHECK(!args.empty()) << "The input argument of reshape schedule is empty! Please check.\n";
CINNValuePack arg_pack = args[0];
int arg_size = arg_pack.size();
poly::StageMap stages = arg_pack.back();
Expr out = arg_pack[0];
CHECK(out.as_tensor());
if (target.arch == Target::Arch::NVGPU) {
pe::CudaScheduleInjective(stages[out.as_tensor_ref()], output_shapes[0], target);
} else if (target.arch == Target::Arch::X86) {
pe::ScheduleInjectiveCPU(stages[out.as_tensor_ref()], output_shapes[0], target);
}
*ret = arg_pack;
});

auto strategy = std::make_shared<framework::OpStrategy>();
strategy->AddImpl(squeeze_compute, squeeze_schedule, "strategy.squeeze.x86", 1);
return strategy;
}

std::vector<std::vector<int>> InferShapeForSqueeze(const std::vector<std::vector<int>> &inputs_shape,
const framework::AttrMapType &attrs) {
CHECK_EQ(inputs_shape.size(), 1U) << "The input's shape size should be 1! Please check again.";
std::vector<int> axes;
for (auto &iter : attrs) {
if (iter.first == "axes") {
axes = absl::get<std::vector<int>>(iter.second);
break;
}
}

std::vector<int> output_shape;
int tensor_size = 1;
if (axes.size()!=0){
std::vector<int> temp_shape = inputs_shape[0];
for (auto& a : axes) {
CHECK(a<temp_shape.size());
temp_shape[a] = 0;
}
for (auto& i : temp_shape) {
if(i != 0){
output_shape.push_back(i);
tensor_size *= i;
}
}
}else{
for (auto& i : inputs_shape[0]) {
if(i != 1){
output_shape.push_back(i);
tensor_size *= i;
}
}
}

CHECK(!output_shape.empty()) << "infer_shape for squeeze turns out to be empty. Please check\n";
int flag_index = -1;
for (int i = 0; i < output_shape.size(); i++) {
if (output_shape[i] > 0) {
CHECK_EQ(tensor_size % output_shape[i], 0)
<< "Incompatible input shape and output shape in op reshape: " << tensor_size << ", " << output_shape[i];
tensor_size /= output_shape[i];
} else if (output_shape[i] == 0) {
CHECK_LT(i, inputs_shape[0].size())
<< "In op reshape, when attribute shape[i] == 0, shape[i] = input_shape[i]. But now the size of input_shape "
"<= i, which is incompatible. Please check!";
output_shape[i] = inputs_shape[0][i];
CHECK_EQ(tensor_size % output_shape[i], 0)
<< "Incompatible input shape and output shape in op reshape: " << tensor_size << ", " << output_shape[i];
tensor_size /= output_shape[i];
} else if (output_shape[i] == -1 && flag_index == -1) {
flag_index = i;
} else if (output_shape[i] == -1) {
LOG(FATAL) << "More than one -1 in output_shape of op reshape.";
} else {
LOG(FATAL) << "Unsupported output_shape " << output_shape[i];
}
}
if (flag_index >= 0) output_shape[flag_index] = tensor_size;
std::vector<std::vector<int>> res{output_shape};
return res;
}

std::vector<Type> InferDtypeForSqueeze(const std::vector<Type> &inputs_type, const framework::AttrMapType &attrs) {
CHECK(!inputs_type.empty()) << "The input's type size is 0! Please check again.";
std::vector<Type> res{inputs_type[0]};
return res;
}

std::vector<std::vector<std::string>> InferLayoutForSqueeze(const std::vector<framework::shape_t> &input_shapes,
const std::vector<std::string> &input_layouts,
const framework::NodeAttr &attrs,
const Target &target) {
CHECK_EQ(input_shapes.size(), 1U) << "The input's shape size is not 1! Please check again.";
CHECK_EQ(input_layouts.size(), 1U) << "The input's layout size is not 1! Please check again.";
std::vector<std::string> new_input_layouts = input_layouts;
if (input_shapes[0].size() > 4) {
// alter input layout back
new_input_layouts[0] = "NCHW";
VLOG(3) << "alter input layout from " << input_layouts[0] << " to " << new_input_layouts[0];
}
return {new_input_layouts, new_input_layouts};
}

std::shared_ptr<OpStrategy> StrategyForSplit(const framework::NodeAttr &attrs,
const std::vector<ir::Tensor> &inputs,
const std::vector<Type> &out_type,
Expand Down Expand Up @@ -1892,6 +2023,19 @@ CINN_REGISTER_HELPER(transform_ops) {
.set_attr<cinn::hlir::framework::OpPatternKind>("OpPattern", cinn::hlir::framework::OpPatternKind::kOpaque)
.set_support_level(4);

CINN_REGISTER_OP(squeeze)
.describe("This operator is used to squeeze input tensor X.")
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr<cinn::hlir::framework::StrategyFunction>("CINNStrategy", cinn::hlir::op::StrategyForSqueeze)
.set_attr("infershape", MakeOpFunction(cinn::hlir::op::InferShapeForSqueeze))
.set_attr("inferdtype", MakeOpFunction(cinn::hlir::op::InferDtypeForSqueeze))
#ifndef CINN_WITH_CUDA
.set_attr("inferlayout", MakeOpFunction(cinn::hlir::op::InferLayoutForSqueeze))
#endif
.set_attr<cinn::hlir::framework::OpPatternKind>("OpPattern", cinn::hlir::framework::OpPatternKind::kOpaque)
.set_support_level(4);

CINN_REGISTER_OP(split)
.describe("This operator is used to split tensors X to 'sections' sub-tensor on specified axis.")
.set_num_inputs(1)
Expand Down
Loading