Skip to content

Commit

Permalink
Fix
Browse files Browse the repository at this point in the history
  • Loading branch information
jroesch committed Oct 21, 2019
1 parent 4354e06 commit 6a43034
Show file tree
Hide file tree
Showing 2 changed files with 143 additions and 148 deletions.
275 changes: 127 additions & 148 deletions src/relay/op/memory/memory.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,14 @@
* \brief Operators for manifest shape-aware memory allocation in Relay.
*/

#include <topi/elemwise.h>
#include <tvm/relay/attrs/memory.h>
#include <tvm/relay/expr.h>
#include <tvm/relay/op.h>
#include <tvm/relay/op_attr_types.h>
#include <topi/elemwise.h>

#include "../type_relations.h"
#include "../../pass/alter_op_layout.h"
#include "../type_relations.h"

namespace tvm {
namespace relay {
Expand All @@ -43,17 +43,15 @@ TVM_REGISTER_NODE_TYPE(ShapeFuncAttrs);
// We should consider a better solution, i.e the type relation
// being able to see the arguments as well?
TVM_REGISTER_API("relay.op.memory._make.alloc_storage")
.set_body_typed<Expr(Expr, Expr, DataType)>([](Expr size, Expr alignment, DataType dtype) {
auto attrs = make_node<AllocTensorAttrs>();
attrs->dtype = dtype;
static const Op& op = Op::Get("memory.alloc_storage");
return CallNode::make(op, {size, alignment}, Attrs(attrs), {});
});

bool AllocStorageRel(const Array<Type>& types,
int num_inputs,
const Attrs& attrs,
const TypeReporter& reporter) {
.set_body_typed<Expr(Expr, Expr, DataType)>([](Expr size, Expr alignment, DataType dtype) {
auto attrs = make_node<AllocTensorAttrs>();
attrs->dtype = dtype;
static const Op& op = Op::Get("memory.alloc_storage");
return CallNode::make(op, {size, alignment}, Attrs(attrs), {});
});

bool AllocStorageRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
CHECK_EQ(types.size(), 3u);
auto size_type = types[0];
auto tensor_type = size_type.as<TensorTypeNode>();
Expand All @@ -74,63 +72,57 @@ bool AllocStorageRel(const Array<Type>& types,
}

RELAY_REGISTER_OP("memory.alloc_storage")
.describe(R"code(Explicitly allocate storage to be used by tensors.)code"
TVM_ADD_FILELINE)
.set_num_inputs(2)
.add_argument("size", "Tensor", "The size of the storage to allocate.")
.add_argument("alignment", "Tensor", "The alignment of the storage.")
.add_type_rel("AllocStorage", AllocStorageRel)
.set_support_level(10)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
[](const Attrs& attrs, const Array<Tensor>& inputs,
const Type& out_dtype, const Target& target) -> Array<Tensor> {
return {topi::identity(inputs[0])};
});
.describe(R"code(Explicitly allocate storage to be used by tensors.)code" TVM_ADD_FILELINE)
.set_num_inputs(2)
.add_argument("size", "Tensor", "The size of the storage to allocate.")
.add_argument("alignment", "Tensor", "The alignment of the storage.")
.add_type_rel("AllocStorage", AllocStorageRel)
.set_support_level(10)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
[](const Attrs& attrs, const Array<Tensor>& inputs,
const Type& out_dtype, const Target& target) -> Array<Tensor> {
return {topi::identity(inputs[0])};
});

TVM_REGISTER_API("relay.op.memory._make.alloc_tensor")
.set_body_typed<Expr(Expr, Expr, DataType, Array<IndexExpr> assert_shape)>(
[](Expr storage, tvm::relay::Expr shape, DataType dtype, Array<IndexExpr> assert_shape) {
auto attrs = make_node<AllocTensorAttrs>();
attrs->dtype = dtype;
if (assert_shape.defined()) {
attrs->assert_shape = assert_shape;
} else {
attrs->const_shape = Downcast<Constant>(shape);
}
static const Op& op = Op::Get("memory.alloc_tensor");
return CallNode::make(op, {storage, shape}, Attrs(attrs), {});
});
.set_body_typed<Expr(Expr, Expr, DataType, Array<IndexExpr> assert_shape)>(
[](Expr storage, tvm::relay::Expr shape, DataType dtype, Array<IndexExpr> assert_shape) {
auto attrs = make_node<AllocTensorAttrs>();
attrs->dtype = dtype;
if (assert_shape.defined()) {
attrs->assert_shape = assert_shape;
} else {
attrs->const_shape = Downcast<Constant>(shape);
}
static const Op& op = Op::Get("memory.alloc_tensor");
return CallNode::make(op, {storage, shape}, Attrs(attrs), {});
});

std::vector<int64_t> FromConstShape(Constant konst) {
// TODO: convert from NDArray.
runtime::NDArray shape = konst->data;
std::vector<int64_t> raw_shape;
DLTensor tensor = shape.ToDLPack()->dl_tensor;
CHECK_EQ(tensor.ndim, 1u);
CHECK_EQ(tensor.dtype.code, 0U)
<< "found " << tensor.dtype.code;
CHECK_EQ(tensor.dtype.code, 0U) << "found " << tensor.dtype.code;

CHECK_LE(tensor.dtype.bits, 64)
<< "found " << (int)tensor.dtype.bits;
int64_t* int_ptr = (int64_t*)tensor.data;
CHECK_LE(tensor.dtype.bits, 64) << "found " << (int)tensor.dtype.bits;
int64_t* int_ptr = (int64_t*)tensor.data;
for (auto i = 0; i < tensor.shape[0]; i++) {
raw_shape.push_back(int_ptr[i]);
}
return raw_shape;
}

bool AllocTensorRel(const Array<Type>& types,
int num_inputs,
const Attrs& attrs,
bool AllocTensorRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
CHECK_EQ(types.size(), 3u);
auto alloc_attrs = attrs.as<AllocTensorAttrs>();
CHECK(alloc_attrs != nullptr)
<< "must be alloc_tensor attributes";
CHECK(alloc_attrs != nullptr) << "must be alloc_tensor attributes";
// First argument should be storage.
auto mod = reporter->GetModule();
CHECK(mod.defined());
Expand All @@ -139,8 +131,7 @@ bool AllocTensorRel(const Array<Type>& types,
reporter->Assign(types[0], storage);
// Second argument should be shape tensor.
auto tt = types[1].as<TensorTypeNode>();
CHECK(tt != nullptr)
<< "must be tensor type";
CHECK(tt != nullptr) << "must be tensor type";
auto rank = tt->shape[0].as<tvm::IntImm>();
CHECK(rank != nullptr);
auto dims = rank->value;
Expand All @@ -158,7 +149,7 @@ bool AllocTensorRel(const Array<Type>& types,
return true;
} else {
CHECK(alloc_attrs->assert_shape.defined())
<< "the assert_shape must be set when const_shape is not";
<< "the assert_shape must be set when const_shape is not";
auto alloc_type = TensorTypeNode::make(alloc_attrs->assert_shape, alloc_attrs->dtype);
reporter->Assign(types[2], alloc_type);
return true;
Expand All @@ -168,43 +159,38 @@ bool AllocTensorRel(const Array<Type>& types,
}

RELAY_REGISTER_OP("memory.alloc_tensor")
.describe(R"code(Explicitly allocate storage to be used by tensors.)code"
TVM_ADD_FILELINE)
.set_num_inputs(2)
.add_argument("storage", "Storage", "The storage to allocate from.")
.add_argument("shape", "Tensor", "The shape of the tensor to allocate.")
.add_type_rel("AllocTensor", AllocTensorRel)
.set_support_level(10)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
[](const Attrs& attrs, const Array<Tensor>& inputs,
const Type& out_dtype, const Target& target) -> Array<Tensor> {
return {topi::identity(inputs[0])};
});

bool InvokeTVMOPRel(const Array<Type>& types,
int num_inputs,
const Attrs& attrs,
const TypeReporter& reporter) {
.describe(R"code(Explicitly allocate storage to be used by tensors.)code" TVM_ADD_FILELINE)
.set_num_inputs(2)
.add_argument("storage", "Storage", "The storage to allocate from.")
.add_argument("shape", "Tensor", "The shape of the tensor to allocate.")
.add_type_rel("AllocTensor", AllocTensorRel)
.set_support_level(10)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
[](const Attrs& attrs, const Array<Tensor>& inputs,
const Type& out_dtype, const Target& target) -> Array<Tensor> {
return {topi::identity(inputs[0])};
});

bool InvokeTVMOPRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
CHECK_EQ(types.size(), 4u);
auto func_type = types[0].as<FuncTypeNode>();
CHECK(func_type != nullptr) <<
"inupt must be opeartor with known type";
CHECK(func_type != nullptr) << "inupt must be opeartor with known type";
auto input_type = types[1].as<TupleTypeNode>();
auto output_type = types[2].as<TupleTypeNode>();
CHECK(input_type != nullptr)
<< "internal invariant violated: invoke_tvm_op inputs must be a tuple";
<< "internal invariant violated: invoke_tvm_op inputs must be a tuple";
CHECK(output_type != nullptr)
<< "internal invariant violated: invoke_tvm_op outputs must be a tuple";
<< "internal invariant violated: invoke_tvm_op outputs must be a tuple";
Type ex_output;
if (func_type->ret_type.as<TensorTypeNode>()) {
ex_output = TupleTypeNode::make({func_type->ret_type});
} else {
CHECK(func_type->ret_type.as<TupleTypeNode>())
<< "should be tuple type";
CHECK(func_type->ret_type.as<TupleTypeNode>()) << "should be tuple type";
ex_output = func_type->ret_type;
}
auto ex_input = TupleTypeNode::make(func_type->arg_types);
Expand All @@ -215,58 +201,55 @@ bool InvokeTVMOPRel(const Array<Type>& types,
}

RELAY_REGISTER_OP("memory.invoke_tvm_op")
.describe(R"code(Invoke an operation compiled by TVM.)code"
TVM_ADD_FILELINE)
.set_num_inputs(3)
.add_argument("op", "Function", "The operation to call")
.add_argument("ins", "Tuple", "The input tensors.")
.add_argument("outs", "Tuple", "The output tensors.")
.add_type_rel("InvokeTVMOP", InvokeTVMOPRel)
.set_support_level(10)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
[](const Attrs& attrs, const Array<Tensor>& inputs,
const Type& out_dtype, const Target& target) -> Array<Tensor> {
return {topi::identity(inputs[0])};
});

bool KillRel(const Array<Type>& types,
int num_inputs,
const Attrs& attrs,
const TypeReporter& reporter) {
.describe(R"code(Invoke an operation compiled by TVM.)code" TVM_ADD_FILELINE)
.set_num_inputs(3)
.add_argument("op", "Function", "The operation to call")
.add_argument("ins", "Tuple", "The input tensors.")
.add_argument("outs", "Tuple", "The output tensors.")
.add_type_rel("InvokeTVMOP", InvokeTVMOPRel)
.set_support_level(10)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
[](const Attrs& attrs, const Array<Tensor>& inputs,
const Type& out_dtype, const Target& target) -> Array<Tensor> {
return {topi::identity(inputs[0])};
});

bool KillRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
CHECK_EQ(types.size(), 2u);
// TODO: should only support tensors.
reporter->Assign(types[1], TupleTypeNode::make({}));
return true;
}

RELAY_REGISTER_OP("memory.kill")
.describe(R"code(Mark a tensor for release to the allocator.)code"
TVM_ADD_FILELINE)
.set_num_inputs(3)
.add_argument("to_free", "Tensor", "The tensor to free.")
.add_type_rel("Kill", KillRel)
.set_support_level(10)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
[](const Attrs& attrs, const Array<Tensor>& inputs,
const Type& out_dtype, const Target& target) -> Array<Tensor> {
return {topi::identity(inputs[0])};
});
.describe(R"code(Mark a tensor for release to the allocator.)code" TVM_ADD_FILELINE)
.set_num_inputs(3)
.add_argument("to_free", "Tensor", "The tensor to free.")
.add_type_rel("Kill", KillRel)
.set_support_level(10)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
[](const Attrs& attrs, const Array<Tensor>& inputs,
const Type& out_dtype, const Target& target) -> Array<Tensor> {
return {topi::identity(inputs[0])};
});

TVM_REGISTER_API("relay.op.memory._make.shape_func")
.set_body_typed<Expr(Expr, Expr, Expr, bool)>([](Expr func, Expr inputs, Expr outputs, bool dependent) {
static const Op& op = Op::Get("memory.shape_func");
auto attrs = make_node<ShapeFuncAttrs>();
attrs->dependent = dependent;
return CallNode::make(op, {func, inputs, outputs}, Attrs(attrs), {});
});
.set_body_typed<Expr(Expr, Expr, Expr, bool)>([](Expr func, Expr inputs, Expr outputs,
bool dependent) {
static const Op& op = Op::Get("memory.shape_func");
auto attrs = make_node<ShapeFuncAttrs>();
attrs->dependent = dependent;
return CallNode::make(op, {func, inputs, outputs}, Attrs(attrs), {});
});

static void FlattenTypeAux(const Type& type, std::vector<TensorType>& out) {
if (auto tt = type.as<TensorTypeNode>()) {
Expand All @@ -291,14 +274,11 @@ Expr PackByType(const Type& t, const Array<Expr>& exprs) {
return Expr();
}

bool ShapeFuncRel(const Array<Type>& types,
int num_inputs,
const Attrs& attrs,
const TypeReporter& reporter) {
bool ShapeFuncRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
CHECK_EQ(types.size(), 4u);
auto shape_func_attrs = attrs.as<ShapeFuncAttrs>();
CHECK(shape_func_attrs != nullptr)
<< "Internal compiler error";
CHECK(shape_func_attrs != nullptr) << "Internal compiler error";

auto func_type = types[0].as<FuncTypeNode>();
// TODO: CHECK FUNC TYPE
Expand All @@ -325,7 +305,7 @@ bool ShapeFuncRel(const Array<Type>& types,
if (in_rank == 0) {
shape = {};
} else {
shape = { in_rank };
shape = {in_rank};
}
shape_func_ins.push_back(TensorTypeNode::make(shape, Int(64)));
}
Expand All @@ -335,9 +315,9 @@ bool ShapeFuncRel(const Array<Type>& types,
auto out_rank = out_type->shape.size();
if (out_rank == 1) {
// out_shapes.push_back({});
out_shapes.push_back({ tvm::Integer(out_rank) });
out_shapes.push_back({tvm::Integer(out_rank)});
} else {
out_shapes.push_back({ tvm::Integer(out_rank) });
out_shapes.push_back({tvm::Integer(out_rank)});
}
}

Expand All @@ -356,21 +336,20 @@ bool ShapeFuncRel(const Array<Type>& types,
}

RELAY_REGISTER_OP("memory.shape_func")
.describe(R"code(Get the shape of a tensor.)code"
TVM_ADD_FILELINE)
.set_num_inputs(3)
.add_argument("tensor", "Tensor", "The tensor to retrieve the shape for.")
.add_type_rel("ShapeFuncRel", ShapeFuncRel)
.set_support_level(10)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
[](const Attrs& attrs, const Array<Tensor>& inputs,
const Type& out_dtype, const Target& target) -> Array<Tensor> {
return {topi::identity(inputs[0])};
});
.describe(R"code(Get the shape of a tensor.)code" TVM_ADD_FILELINE)
.set_num_inputs(3)
.add_argument("tensor", "Tensor", "The tensor to retrieve the shape for.")
.add_type_rel("ShapeFuncRel", ShapeFuncRel)
.set_support_level(10)
.set_attr<TOpPattern>("TOpPattern", kOpaque)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
[](const Attrs& attrs, const Array<Tensor>& inputs,
const Type& out_dtype, const Target& target) -> Array<Tensor> {
return {topi::identity(inputs[0])};
});

} // namespace relay
} // namespace tvm
Loading

0 comments on commit 6a43034

Please sign in to comment.