Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

match inplace slice copy pattern, rewrite copy uses #4338

Merged
merged 8 commits into from
Nov 10, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions tools/pnnx/src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,7 @@ set(pnnx_pass_level2_SRCS
pass_level2/F_upsample_nearest.cpp
pass_level2/F_upsample.cpp
pass_level2/Tensor_contiguous.cpp
pass_level2/Tensor_copy.cpp
pass_level2/Tensor_expand.cpp
pass_level2/Tensor_expand_as.cpp
pass_level2/Tensor_index.cpp
Expand Down Expand Up @@ -314,6 +315,7 @@ set(pnnx_pass_level5_SRCS
pass_level5/fuse_contiguous_view.cpp
pass_level5/fuse_linear_batchnorm1d.cpp
pass_level5/fuse_select_to_unbind.cpp
pass_level5/fuse_slice_copy.cpp
pass_level5/fuse_slice_indices.cpp
pass_level5/fuse_slice_to_tensor_split.cpp
pass_level5/fuse_static_conv.cpp
Expand Down
7 changes: 7 additions & 0 deletions tools/pnnx/src/ir.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1658,6 +1658,13 @@ int Graph::python(const std::string& pypath, const std::string& pnnxbinpath)
std::string slice_expr = make_slice_expression(op);
fprintf(pyfp, "v_%s = v_%s[%s]\n", sanitize_identifier(op->outputs[0]->name).c_str(), sanitize_identifier(op->inputs[0]->name).c_str(), slice_expr.c_str());
}
else if (op->type == "Tensor.slice_copy")
{
// slice copy expr
std::string slice_expr = make_slice_expression(op);
fprintf(pyfp, "v_%s = v_%s\n", sanitize_identifier(op->outputs[0]->name).c_str(), sanitize_identifier(op->inputs[0]->name).c_str());
fprintf(pyfp, " v_%s[%s] = v_%s\n", sanitize_identifier(op->outputs[0]->name).c_str(), slice_expr.c_str(), sanitize_identifier(op->inputs[1]->name).c_str());
}
else if (op->type == "Tensor.index")
{
// index expr
Expand Down
4 changes: 0 additions & 4 deletions tools/pnnx/src/pass_level1.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -376,10 +376,6 @@ void pass_level1(const torch::jit::Module& mod, const std::shared_ptr<torch::jit

Operator* op = pg.new_operator(n->kind().toDisplayString(), name);

// always treat inplace op type as non-inplace version
if (op->type.size() > 2 && op->type[op->type.size() - 2] != '_' && op->type[op->type.size() - 1] == '_')
op->type = op->type.substr(0, op->type.size() - 1);

for (int i = 0; i < (int)n->inputs().size(); i++)
{
const auto& in = n->input(i);
Expand Down
104 changes: 104 additions & 0 deletions tools/pnnx/src/pass_level2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -502,8 +502,112 @@ void pnnx_graph_rewrite(Graph& graph, const GraphRewriterPass* pass, int& opinde
}
}

static void fix_inplace_copy_output(Graph& graph)
{
while (1)
{
bool matched = false;
for (size_t i = 0; i < graph.ops.size(); i++)
{
Operator* op = graph.ops[i];

bool is_inplace_op = op->type.size() > 2 && op->type[op->type.size() - 2] != '_' && op->type[op->type.size() - 1] == '_';
if (!is_inplace_op)
continue;

// replace inplace op with non-inplace version
op->type = op->type.substr(0, op->type.size() - 1);

if (op->type == "aten::copy")
continue;

if (op->outputs[0]->consumers.size() != 0)
continue;

matched = true;

// find in0 from slice / select chain
Operand* in0 = op->inputs[0];
while (in0->producer->type == "aten::slice" || in0->producer->type == "aten::select")
{
in0 = in0->producer->inputs[0];
}

// append copy for inplace op
Operator* op_copy = graph.new_operator_after("aten::copy", op->name + "_copy", op);
Operand* copy_out = graph.new_operand(op->name + "_copy_out");

copy_out->shape = in0->shape;

op_copy->inputs.push_back(op->inputs[0]);
op_copy->inputs.push_back(op->outputs[0]);
op->inputs[0]->consumers.push_back(op_copy);
op->outputs[0]->consumers.push_back(op_copy);

op_copy->outputs.push_back(copy_out);
copy_out->producer = op_copy;

break;
}

if (!matched)
break;
}

for (size_t i = 0; i < graph.ops.size(); i++)
{
Operator* op = graph.ops[i];

if (op->type != "aten::copy")
continue;

if (op->outputs[0]->consumers.size() != 0)
continue;

// aten::slice 5 1 in0 .... a
// aten::slice 5 1 a .... b
// aten::copy 2 1 b in1 out

// aten::select 3 1 in0 .... a
// aten::copy 2 1 a in1 out

// find in0 from slice / select chain
Operand* in0 = op->inputs[0];
while (in0->producer->type == "aten::slice" || in0->producer->type == "aten::select")
{
in0 = in0->producer->inputs[0];
}

// replace all the following uses of in0 with out
Operand* out0 = op->outputs[0];
out0->shape = in0->shape;
for (size_t j = i; j < graph.ops.size(); j++)
{
Operator* op2 = graph.ops[j];

bool use_in0 = false;
for (size_t k = 0; k < op2->inputs.size(); k++)
{
if (op2->inputs[k] == in0)
{
op2->inputs[k] = out0;
use_in0 = true;
}
}

if (use_in0)
{
in0->remove_consumer(op2);
out0->consumers.push_back(op2);
}
}
}
}

void pass_level2(Graph& g)
{
fix_inplace_copy_output(g);

int opindex = 0;
for (auto x : g_global_pnnx_graph_rewriter_passes)
{
Expand Down
64 changes: 64 additions & 0 deletions tools/pnnx/src/pass_level2/Tensor_copy.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

#include "pass_level2.h"

namespace pnnx {

class Tensor_copy : public GraphRewriterPass
{
public:
const char* match_pattern_graph() const
{
return R"PNNXIR(7767517
5 4
pnnx.Input input_0 0 1 self
pnnx.Input input_1 0 1 src
prim::Constant op_0 0 1 non_blocking value=*
aten::copy op_1 3 1 self src non_blocking out
pnnx.Output output 1 0 out
)PNNXIR";
}

const char* type_str() const
{
return "Tensor.copy";
}
};

REGISTER_GLOBAL_PNNX_GRAPH_REWRITER_PASS(Tensor_copy, 20)

class Tensor_copy_1 : public GraphRewriterPass
{
public:
const char* match_pattern_graph() const
{
return R"PNNXIR(7767517
4 3
pnnx.Input input_0 0 1 self
pnnx.Input input_1 0 1 src
aten::copy op_1 2 1 self src out
pnnx.Output output 1 0 out
)PNNXIR";
}

const char* type_str() const
{
return "Tensor.copy";
}
};

REGISTER_GLOBAL_PNNX_GRAPH_REWRITER_PASS(Tensor_copy_1, 20)

} // namespace pnnx
3 changes: 3 additions & 0 deletions tools/pnnx/src/pass_level5.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
#include "pass_level5/fuse_contiguous_view.h"
#include "pass_level5/fuse_linear_batchnorm1d.h"
#include "pass_level5/fuse_select_to_unbind.h"
#include "pass_level5/fuse_slice_copy.h"
#include "pass_level5/fuse_slice_indices.h"
#include "pass_level5/fuse_slice_to_tensor_split.h"
#include "pass_level5/fuse_static_conv.h"
Expand Down Expand Up @@ -66,6 +67,8 @@ void pass_level5(Graph& g, const std::set<std::string>& foldable_constants, cons

fuse_slice_to_tensor_split(g);

fuse_slice_copy(g);

fuse_static_conv(g);

fuse_conv1d_batchnorm1d(g);
Expand Down
3 changes: 3 additions & 0 deletions tools/pnnx/src/pass_level5/fold_constants.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ namespace pnnx {

void fold_constants(Graph& graph, const std::set<std::string>& foldable_constants, const std::string& foldable_constants_zippath)
{
if (foldable_constants.empty())
return;

StoreZipReader zip;
zip.open(foldable_constants_zippath);

Expand Down
Loading