Skip to content

Commit

Permalink
[REFACTOR][RELAY] Move invoke_tvm_op and shape_func to vm dialect (ap…
Browse files Browse the repository at this point in the history
…ache#5958)

* [REFACTOR][RELAY] Move invoke_tvm_op and shape_func to vm dialect

* address comments
  • Loading branch information
zhiics authored and trevor-m committed Jul 14, 2020
1 parent 43a3560 commit 3d77cd9
Show file tree
Hide file tree
Showing 11 changed files with 230 additions and 185 deletions.
13 changes: 0 additions & 13 deletions include/tvm/relay/attrs/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,19 +74,6 @@ struct AllocTensorAttrs : public tvm::AttrsNode<AllocTensorAttrs> {
}
};

/*!
* \brief Options for the shape function operator.
*/
struct ShapeFuncAttrs : public tvm::AttrsNode<ShapeFuncAttrs> {
Array<Integer> is_input;

TVM_DECLARE_ATTRS(ShapeFuncAttrs, "relay.attrs.ShapeFuncAttrs") {
TVM_ATTR_FIELD(is_input).describe(
"A bool indicating whether the shape function should"
"expect shape or input in each position.");
}
};

} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_ATTRS_MEMORY_H_
47 changes: 47 additions & 0 deletions include/tvm/relay/attrs/vm.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

/*!
* \file tvm/relay/attrs/vm.h
* \brief Attributes for Relay vm operators.
*/
#ifndef TVM_RELAY_ATTRS_VM_H_
#define TVM_RELAY_ATTRS_VM_H_

#include <tvm/ir/attrs.h>

namespace tvm {
namespace relay {

/*!
* \brief Options for the shape function operator.
*/
struct ShapeFuncAttrs : public tvm::AttrsNode<ShapeFuncAttrs> {
Array<Integer> is_input;

TVM_DECLARE_ATTRS(ShapeFuncAttrs, "relay.attrs.ShapeFuncAttrs") {
TVM_ATTR_FIELD(is_input).describe(
"A bool indicating whether the shape function should"
"expect shape or input in each position.");
}
};

} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_ATTRS_VM_H_
2 changes: 1 addition & 1 deletion python/tvm/relay/op/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
from .tensor import *
from .transform import *
from .algorithm import *
from .vm import *
from . import vm
from . import nn
from . import annotation
from . import memory
Expand Down
40 changes: 0 additions & 40 deletions python/tvm/relay/op/memory/memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,27 +19,6 @@
from __future__ import absolute_import as _abs
from . import _make

def invoke_tvm_op(func, inputs, outputs):
"""Call a primitive function with the TVM operator calling convention.
Parameters
----------
func : tvm.relay.Expr
The input expr.
inputs : tvm.relay.Expr
A tuple of the inputs to pass to the TVM function.
outputs : tvm.relay.Expr
A tuple of the outputs to pass to the TVM function.
Returns
-------
result : tvm.relay.Expr
The invoke_tvm_op call node.
"""
return _make.invoke_tvm_op(func, inputs, outputs)

def alloc_tensor(storage, offset, shape, dtype='float32', assert_shape=None):
"""Allocate a tensor with the provided shape, and dtype.
Expand Down Expand Up @@ -85,25 +64,6 @@ def alloc_storage(size, alignment, ctx, dtype_hint='float32'):
"""
return _make.alloc_storage(size, alignment, ctx, dtype_hint)

def shape_func(func, inputs, outputs, dependent=False):
"""Invoke the shape function of the passed function.
Parameters
----------
func : tvm.relay.Expr
The primitive function from which to compute the shape function.
inputs : tvm.relay.Tuple
The tupled inputs.
outputs : tvm.relay.Tuple
The tupled outputs.
Returns
-------
result : tvm.relay.Expr
The shape function expression.
"""
return _make.shape_func(func, inputs, outputs, dependent)

def flatten_tuple_type(ty):
"""Return a sequence of the types contained in the tuple type in order.
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/op/vm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,4 @@
# pylint: disable=wildcard-import
"""Dialect operators for Relay VM."""
from __future__ import absolute_import as _abs
from . import vm
from .vm import *
48 changes: 48 additions & 0 deletions python/tvm/relay/op/vm/vm.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,51 @@ def shape_of(expr):
The expression with the evaluated tensor shape.
"""
return _ffi_api.shape_of(expr)


def invoke_tvm_op(func, inputs, outputs):
"""Call a primitive function with the TVM operator calling convention.
Parameters
----------
func : tvm.relay.Expr
The input expr.
inputs : tvm.relay.Expr
A tuple of the inputs to pass to the TVM function.
outputs : tvm.relay.Expr
A tuple of the outputs to pass to the TVM function.
Returns
-------
result : tvm.relay.Expr
The invoke_tvm_op call node.
"""
return _ffi_api.invoke_tvm_op(func, inputs, outputs)


def shape_func(func, inputs, outputs, is_inputs):
"""Invoke the shape function of the passed function.
Parameters
----------
func : tvm.relay.Expr
The primitive function from which to compute the shape function.
inputs : tvm.relay.Tuple
The tupled inputs.
outputs : tvm.relay.Tuple
The tupled outputs.
is_inputs : List[bool]
A boolean list indicating whether the shape function should expect
shape or input at each position.
Returns
-------
result : tvm.relay.Expr
The shape function expression.
"""
return _ffi_api.shape_func(func, inputs, outputs, is_inputs)
4 changes: 2 additions & 2 deletions python/tvm/relay/transform/memory_alloc.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@ class ManifestAllocPass(ExprMutator):
"""A pass for explicitly manifesting all memory allocations in Relay."""

def __init__(self, target_host):
self.invoke_tvm = op.memory.invoke_tvm_op
self.shape_func = op.memory.shape_func
self.invoke_tvm = op.vm.invoke_tvm_op
self.shape_func = op.vm.shape_func
self.shape_of = op.vm.shape_of
self.scopes = [ScopeBuilder()]
self.target_host = target_host
Expand Down
4 changes: 2 additions & 2 deletions src/relay/backend/vm/compiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -519,7 +519,7 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
if (op.as<OpNode>()) {
OpMatch<void> matcher;
matcher
.Match("memory.invoke_tvm_op",
.Match("vm.invoke_tvm_op",
[this](const Array<Expr>& args, const Attrs& attrs, const Array<Type>& type_arg) {
CHECK_EQ(args.size(), 3);
EmitInvokeTVMOp(Downcast<Function>(args[0]), args[1], args[2]);
Expand Down Expand Up @@ -581,7 +581,7 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {

Emit(Instruction::AllocStorage(size_register, alignment, dtype, NewRegister()));
})
.Match("memory.shape_func",
.Match("vm.shape_func",
[this](const Array<Expr>& args, const Attrs& attrs, const Array<Type>& type_arg) {
CHECK_EQ(args.size(), 3);
auto shape_func = Downcast<Function>(args[0]);
Expand Down
124 changes: 0 additions & 124 deletions src/relay/op/memory/memory.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ namespace relay {

TVM_REGISTER_NODE_TYPE(AllocStorageAttrs);
TVM_REGISTER_NODE_TYPE(AllocTensorAttrs);
TVM_REGISTER_NODE_TYPE(ShapeFuncAttrs);

// The passing value in attrs and args doesn't seem super great.
// We should consider a better solution, i.e the type relation
Expand Down Expand Up @@ -197,54 +196,6 @@ RELAY_REGISTER_OP("memory.alloc_tensor")
return {topi::identity(inputs[0])};
});

bool InvokeTVMOPRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
CHECK_EQ(types.size(), 4u);
auto func_type = types[0].as<FuncTypeNode>();
CHECK(func_type != nullptr) << "input must be operator with known type";
auto input_type = types[1].as<TupleTypeNode>();
auto output_type = types[2].as<TupleTypeNode>();
CHECK(input_type != nullptr)
<< "internal invariant violated: invoke_tvm_op inputs must be a tuple";
CHECK(output_type != nullptr)
<< "internal invariant violated: invoke_tvm_op outputs must be a tuple";
Type ex_output;
if (func_type->ret_type.as<TensorTypeNode>()) {
ex_output = TupleType({func_type->ret_type});
} else {
CHECK(func_type->ret_type.as<TupleTypeNode>()) << "should be tuple type";
ex_output = func_type->ret_type;
}
auto ex_input = TupleType(func_type->arg_types);
reporter->Assign(ex_input, GetRef<Type>(input_type));
reporter->Assign(ex_output, GetRef<Type>(output_type));
reporter->Assign(types[3], TupleType::Empty());
return true;
}

TVM_REGISTER_GLOBAL("relay.op.memory._make.invoke_tvm_op")
.set_body_typed([](Expr func, Expr inputs, Expr outputs) {
return Call(Op::Get("memory.invoke_tvm_op"), {func, inputs, outputs}, Attrs());
});

RELAY_REGISTER_OP("memory.invoke_tvm_op")
.describe(R"code(Invoke an operation compiled by TVM.)code" TVM_ADD_FILELINE)
.set_num_inputs(3)
.add_argument("op", "Function", "The operation to call")
.add_argument("ins", "Tuple", "The input tensors.")
.add_argument("outs", "Tuple", "The output tensors.")
.add_type_rel("InvokeTVMOP", InvokeTVMOPRel)
.set_support_level(10)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
[](const Attrs& attrs, const Array<te::Tensor>& inputs,
const Type& out_dtype) -> Array<te::Tensor> {
return {topi::identity(inputs[0])};
});

bool KillRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
CHECK_EQ(types.size(), 2u);
Expand All @@ -269,14 +220,6 @@ RELAY_REGISTER_OP("memory.kill")
return {topi::identity(inputs[0])};
});

TVM_REGISTER_GLOBAL("relay.op.memory._make.shape_func")
.set_body_typed([](Expr func, Expr inputs, Expr outputs, Array<tvm::Integer> is_input) {
static const Op& op = Op::Get("memory.shape_func");
auto attrs = make_object<ShapeFuncAttrs>();
attrs->is_input = is_input;
return Call(op, {func, inputs, outputs}, Attrs(attrs), {});
});

static void FlattenTupleTypeAux(const Type& type, std::vector<TensorType>* out) {
if (auto tt = type.as<TensorTypeNode>()) {
out->push_back(GetRef<TensorType>(tt));
Expand Down Expand Up @@ -356,72 +299,5 @@ TVM_REGISTER_GLOBAL("relay.op.memory._make.ToTupleType")
return ToTupleType(t, std::vector<Expr>(array.begin(), array.end()));
});

bool ShapeFuncRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
CHECK_EQ(types.size(), 4u);
auto shape_func_attrs = attrs.as<ShapeFuncAttrs>();
CHECK(shape_func_attrs != nullptr) << "Internal compiler error";

auto func_type = types[0].as<FuncTypeNode>();
CHECK(func_type != nullptr);

auto tuple = TupleType(func_type->arg_types);
auto in_types = FlattenTupleType(tuple);
auto out_types = FlattenTupleType(func_type->ret_type);
Array<Integer> is_input;
for (size_t i = 0; i < func_type->arg_types.size(); ++i) {
auto const& aty = func_type->arg_types[i];
size_t num_types = 1;
if (aty.as<TupleTypeNode>()) {
num_types = FlattenTupleType(aty).size();
}
for (size_t j = 0; j < num_types; ++j) {
is_input.push_back(shape_func_attrs->is_input[i]);
}
}

Array<Type> shape_func_ins, shape_func_outs;
for (size_t i = 0; i < in_types.size(); i++) {
auto in_type = in_types[i];

if (is_input[i]) {
shape_func_ins.push_back(in_type);
} else {
auto shape = RankShape(in_type->shape);
shape_func_ins.push_back(TensorType(shape, DataType::Int(64)));
}
}

for (auto out_type : out_types) {
auto rank_shape = RankShape(out_type->shape);
shape_func_outs.push_back(TensorType(rank_shape, DataType::Int(64)));
}

auto input_type = TupleType(shape_func_ins);
auto output_type = TupleType(shape_func_outs);

reporter->Assign(types[1], input_type);
reporter->Assign(types[2], output_type);
reporter->Assign(types[3], TupleType::Empty());

return true;
}

RELAY_REGISTER_OP("memory.shape_func")
.describe(R"code(Get the shape of a tensor.)code" TVM_ADD_FILELINE)
.set_num_inputs(3)
.add_argument("tensor", "Tensor", "The tensor to retrieve the shape for.")
.add_type_rel("ShapeFuncRel", ShapeFuncRel)
.set_support_level(10)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
[](const Attrs& attrs, const Array<te::Tensor>& inputs,
const Type& out_dtype) -> Array<te::Tensor> {
return {topi::identity(inputs[0])};
});

} // namespace relay
} // namespace tvm
Loading

0 comments on commit 3d77cd9

Please sign in to comment.