From 6a43034dce9ed075e892621665ecc8cb9869c7ea Mon Sep 17 00:00:00 2001 From: Jared Roesch Date: Mon, 21 Oct 2019 16:57:07 -0700 Subject: [PATCH] Fix --- src/relay/op/memory/memory.cc | 275 +++++++++++------------- tests/python/relay/test_memory_alloc.py | 16 ++ 2 files changed, 143 insertions(+), 148 deletions(-) diff --git a/src/relay/op/memory/memory.cc b/src/relay/op/memory/memory.cc index 7bebd4dc4395c..e3eb5d1744aea 100644 --- a/src/relay/op/memory/memory.cc +++ b/src/relay/op/memory/memory.cc @@ -24,14 +24,14 @@ * \brief Operators for manifest shape-aware memory allocation in Relay. */ +#include #include #include #include #include -#include -#include "../type_relations.h" #include "../../pass/alter_op_layout.h" +#include "../type_relations.h" namespace tvm { namespace relay { @@ -43,17 +43,15 @@ TVM_REGISTER_NODE_TYPE(ShapeFuncAttrs); // We should consider a better solution, i.e the type relation // being able to see the arguments as well? TVM_REGISTER_API("relay.op.memory._make.alloc_storage") -.set_body_typed([](Expr size, Expr alignment, DataType dtype) { - auto attrs = make_node(); - attrs->dtype = dtype; - static const Op& op = Op::Get("memory.alloc_storage"); - return CallNode::make(op, {size, alignment}, Attrs(attrs), {}); -}); - -bool AllocStorageRel(const Array& types, - int num_inputs, - const Attrs& attrs, - const TypeReporter& reporter) { + .set_body_typed([](Expr size, Expr alignment, DataType dtype) { + auto attrs = make_node(); + attrs->dtype = dtype; + static const Op& op = Op::Get("memory.alloc_storage"); + return CallNode::make(op, {size, alignment}, Attrs(attrs), {}); + }); + +bool AllocStorageRel(const Array& types, int num_inputs, const Attrs& attrs, + const TypeReporter& reporter) { CHECK_EQ(types.size(), 3u); auto size_type = types[0]; auto tensor_type = size_type.as(); @@ -74,36 +72,35 @@ bool AllocStorageRel(const Array& types, } RELAY_REGISTER_OP("memory.alloc_storage") -.describe(R"code(Explicitly allocate storage to be used by tensors.)code" -TVM_ADD_FILELINE) -.set_num_inputs(2) -.add_argument("size", "Tensor", "The size of the storage to allocate.") -.add_argument("alignment", "Tensor", "The alignment of the storage.") -.add_type_rel("AllocStorage", AllocStorageRel) -.set_support_level(10) -.set_attr("TOpPattern", kOpaque) -.set_attr("TOpIsStateful", false) -.set_attr("TNonComputational", true) -.set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) -.set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { - return {topi::identity(inputs[0])}; - }); + .describe(R"code(Explicitly allocate storage to be used by tensors.)code" TVM_ADD_FILELINE) + .set_num_inputs(2) + .add_argument("size", "Tensor", "The size of the storage to allocate.") + .add_argument("alignment", "Tensor", "The alignment of the storage.") + .add_type_rel("AllocStorage", AllocStorageRel) + .set_support_level(10) + .set_attr("TOpPattern", kOpaque) + .set_attr("TOpIsStateful", false) + .set_attr("TNonComputational", true) + .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) + .set_attr("FTVMCompute", + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype, const Target& target) -> Array { + return {topi::identity(inputs[0])}; + }); TVM_REGISTER_API("relay.op.memory._make.alloc_tensor") -.set_body_typed assert_shape)>( - [](Expr storage, tvm::relay::Expr shape, DataType dtype, Array assert_shape) { - auto attrs = make_node(); - attrs->dtype = dtype; - if (assert_shape.defined()) { - attrs->assert_shape = assert_shape; - } else { - attrs->const_shape = Downcast(shape); - } - static const Op& op = Op::Get("memory.alloc_tensor"); - return CallNode::make(op, {storage, shape}, Attrs(attrs), {}); -}); + .set_body_typed assert_shape)>( + [](Expr storage, tvm::relay::Expr shape, DataType dtype, Array assert_shape) { + auto attrs = make_node(); + attrs->dtype = dtype; + if (assert_shape.defined()) { + attrs->assert_shape = assert_shape; + } else { + attrs->const_shape = Downcast(shape); + } + static const Op& op = Op::Get("memory.alloc_tensor"); + return CallNode::make(op, {storage, shape}, Attrs(attrs), {}); + }); std::vector FromConstShape(Constant konst) { // TODO: convert from NDArray. @@ -111,26 +108,21 @@ std::vector FromConstShape(Constant konst) { std::vector raw_shape; DLTensor tensor = shape.ToDLPack()->dl_tensor; CHECK_EQ(tensor.ndim, 1u); - CHECK_EQ(tensor.dtype.code, 0U) - << "found " << tensor.dtype.code; + CHECK_EQ(tensor.dtype.code, 0U) << "found " << tensor.dtype.code; - CHECK_LE(tensor.dtype.bits, 64) - << "found " << (int)tensor.dtype.bits; - int64_t* int_ptr = (int64_t*)tensor.data; + CHECK_LE(tensor.dtype.bits, 64) << "found " << (int)tensor.dtype.bits; + int64_t* int_ptr = (int64_t*)tensor.data; for (auto i = 0; i < tensor.shape[0]; i++) { raw_shape.push_back(int_ptr[i]); } return raw_shape; } -bool AllocTensorRel(const Array& types, - int num_inputs, - const Attrs& attrs, +bool AllocTensorRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { CHECK_EQ(types.size(), 3u); auto alloc_attrs = attrs.as(); - CHECK(alloc_attrs != nullptr) - << "must be alloc_tensor attributes"; + CHECK(alloc_attrs != nullptr) << "must be alloc_tensor attributes"; // First argument should be storage. auto mod = reporter->GetModule(); CHECK(mod.defined()); @@ -139,8 +131,7 @@ bool AllocTensorRel(const Array& types, reporter->Assign(types[0], storage); // Second argument should be shape tensor. auto tt = types[1].as(); - CHECK(tt != nullptr) - << "must be tensor type"; + CHECK(tt != nullptr) << "must be tensor type"; auto rank = tt->shape[0].as(); CHECK(rank != nullptr); auto dims = rank->value; @@ -158,7 +149,7 @@ bool AllocTensorRel(const Array& types, return true; } else { CHECK(alloc_attrs->assert_shape.defined()) - << "the assert_shape must be set when const_shape is not"; + << "the assert_shape must be set when const_shape is not"; auto alloc_type = TensorTypeNode::make(alloc_attrs->assert_shape, alloc_attrs->dtype); reporter->Assign(types[2], alloc_type); return true; @@ -168,43 +159,38 @@ bool AllocTensorRel(const Array& types, } RELAY_REGISTER_OP("memory.alloc_tensor") -.describe(R"code(Explicitly allocate storage to be used by tensors.)code" -TVM_ADD_FILELINE) -.set_num_inputs(2) -.add_argument("storage", "Storage", "The storage to allocate from.") -.add_argument("shape", "Tensor", "The shape of the tensor to allocate.") -.add_type_rel("AllocTensor", AllocTensorRel) -.set_support_level(10) -.set_attr("TOpPattern", kOpaque) -.set_attr("TOpIsStateful", false) -.set_attr("TNonComputational", true) -.set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) -.set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { - return {topi::identity(inputs[0])}; - }); - -bool InvokeTVMOPRel(const Array& types, - int num_inputs, - const Attrs& attrs, - const TypeReporter& reporter) { + .describe(R"code(Explicitly allocate storage to be used by tensors.)code" TVM_ADD_FILELINE) + .set_num_inputs(2) + .add_argument("storage", "Storage", "The storage to allocate from.") + .add_argument("shape", "Tensor", "The shape of the tensor to allocate.") + .add_type_rel("AllocTensor", AllocTensorRel) + .set_support_level(10) + .set_attr("TOpPattern", kOpaque) + .set_attr("TOpIsStateful", false) + .set_attr("TNonComputational", true) + .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) + .set_attr("FTVMCompute", + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype, const Target& target) -> Array { + return {topi::identity(inputs[0])}; + }); + +bool InvokeTVMOPRel(const Array& types, int num_inputs, const Attrs& attrs, + const TypeReporter& reporter) { CHECK_EQ(types.size(), 4u); auto func_type = types[0].as(); - CHECK(func_type != nullptr) << - "inupt must be opeartor with known type"; + CHECK(func_type != nullptr) << "inupt must be opeartor with known type"; auto input_type = types[1].as(); auto output_type = types[2].as(); CHECK(input_type != nullptr) - << "internal invariant violated: invoke_tvm_op inputs must be a tuple"; + << "internal invariant violated: invoke_tvm_op inputs must be a tuple"; CHECK(output_type != nullptr) - << "internal invariant violated: invoke_tvm_op outputs must be a tuple"; + << "internal invariant violated: invoke_tvm_op outputs must be a tuple"; Type ex_output; if (func_type->ret_type.as()) { ex_output = TupleTypeNode::make({func_type->ret_type}); } else { - CHECK(func_type->ret_type.as()) - << "should be tuple type"; + CHECK(func_type->ret_type.as()) << "should be tuple type"; ex_output = func_type->ret_type; } auto ex_input = TupleTypeNode::make(func_type->arg_types); @@ -215,28 +201,25 @@ bool InvokeTVMOPRel(const Array& types, } RELAY_REGISTER_OP("memory.invoke_tvm_op") -.describe(R"code(Invoke an operation compiled by TVM.)code" -TVM_ADD_FILELINE) -.set_num_inputs(3) -.add_argument("op", "Function", "The operation to call") -.add_argument("ins", "Tuple", "The input tensors.") -.add_argument("outs", "Tuple", "The output tensors.") -.add_type_rel("InvokeTVMOP", InvokeTVMOPRel) -.set_support_level(10) -.set_attr("TOpPattern", kOpaque) -.set_attr("TOpIsStateful", false) -.set_attr("TNonComputational", true) -.set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) -.set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { - return {topi::identity(inputs[0])}; - }); - -bool KillRel(const Array& types, - int num_inputs, - const Attrs& attrs, - const TypeReporter& reporter) { + .describe(R"code(Invoke an operation compiled by TVM.)code" TVM_ADD_FILELINE) + .set_num_inputs(3) + .add_argument("op", "Function", "The operation to call") + .add_argument("ins", "Tuple", "The input tensors.") + .add_argument("outs", "Tuple", "The output tensors.") + .add_type_rel("InvokeTVMOP", InvokeTVMOPRel) + .set_support_level(10) + .set_attr("TOpPattern", kOpaque) + .set_attr("TOpIsStateful", false) + .set_attr("TNonComputational", true) + .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) + .set_attr("FTVMCompute", + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype, const Target& target) -> Array { + return {topi::identity(inputs[0])}; + }); + +bool KillRel(const Array& types, int num_inputs, const Attrs& attrs, + const TypeReporter& reporter) { CHECK_EQ(types.size(), 2u); // TODO: should only support tensors. reporter->Assign(types[1], TupleTypeNode::make({})); @@ -244,29 +227,29 @@ bool KillRel(const Array& types, } RELAY_REGISTER_OP("memory.kill") -.describe(R"code(Mark a tensor for release to the allocator.)code" -TVM_ADD_FILELINE) -.set_num_inputs(3) -.add_argument("to_free", "Tensor", "The tensor to free.") -.add_type_rel("Kill", KillRel) -.set_support_level(10) -.set_attr("TOpPattern", kOpaque) -.set_attr("TOpIsStateful", false) -.set_attr("TNonComputational", true) -.set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) -.set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { - return {topi::identity(inputs[0])}; - }); + .describe(R"code(Mark a tensor for release to the allocator.)code" TVM_ADD_FILELINE) + .set_num_inputs(3) + .add_argument("to_free", "Tensor", "The tensor to free.") + .add_type_rel("Kill", KillRel) + .set_support_level(10) + .set_attr("TOpPattern", kOpaque) + .set_attr("TOpIsStateful", false) + .set_attr("TNonComputational", true) + .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) + .set_attr("FTVMCompute", + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype, const Target& target) -> Array { + return {topi::identity(inputs[0])}; + }); TVM_REGISTER_API("relay.op.memory._make.shape_func") -.set_body_typed([](Expr func, Expr inputs, Expr outputs, bool dependent) { - static const Op& op = Op::Get("memory.shape_func"); - auto attrs = make_node(); - attrs->dependent = dependent; - return CallNode::make(op, {func, inputs, outputs}, Attrs(attrs), {}); -}); + .set_body_typed([](Expr func, Expr inputs, Expr outputs, + bool dependent) { + static const Op& op = Op::Get("memory.shape_func"); + auto attrs = make_node(); + attrs->dependent = dependent; + return CallNode::make(op, {func, inputs, outputs}, Attrs(attrs), {}); + }); static void FlattenTypeAux(const Type& type, std::vector& out) { if (auto tt = type.as()) { @@ -291,14 +274,11 @@ Expr PackByType(const Type& t, const Array& exprs) { return Expr(); } -bool ShapeFuncRel(const Array& types, - int num_inputs, - const Attrs& attrs, - const TypeReporter& reporter) { +bool ShapeFuncRel(const Array& types, int num_inputs, const Attrs& attrs, + const TypeReporter& reporter) { CHECK_EQ(types.size(), 4u); auto shape_func_attrs = attrs.as(); - CHECK(shape_func_attrs != nullptr) - << "Internal compiler error"; + CHECK(shape_func_attrs != nullptr) << "Internal compiler error"; auto func_type = types[0].as(); // TODO: CHECK FUNC TYPE @@ -325,7 +305,7 @@ bool ShapeFuncRel(const Array& types, if (in_rank == 0) { shape = {}; } else { - shape = { in_rank }; + shape = {in_rank}; } shape_func_ins.push_back(TensorTypeNode::make(shape, Int(64))); } @@ -335,9 +315,9 @@ bool ShapeFuncRel(const Array& types, auto out_rank = out_type->shape.size(); if (out_rank == 1) { // out_shapes.push_back({}); - out_shapes.push_back({ tvm::Integer(out_rank) }); + out_shapes.push_back({tvm::Integer(out_rank)}); } else { - out_shapes.push_back({ tvm::Integer(out_rank) }); + out_shapes.push_back({tvm::Integer(out_rank)}); } } @@ -356,21 +336,20 @@ bool ShapeFuncRel(const Array& types, } RELAY_REGISTER_OP("memory.shape_func") -.describe(R"code(Get the shape of a tensor.)code" -TVM_ADD_FILELINE) -.set_num_inputs(3) -.add_argument("tensor", "Tensor", "The tensor to retrieve the shape for.") -.add_type_rel("ShapeFuncRel", ShapeFuncRel) -.set_support_level(10) -.set_attr("TOpPattern", kOpaque) -.set_attr("TOpIsStateful", false) -.set_attr("TNonComputational", true) -.set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) -.set_attr("FTVMCompute", - [](const Attrs& attrs, const Array& inputs, - const Type& out_dtype, const Target& target) -> Array { - return {topi::identity(inputs[0])}; - }); + .describe(R"code(Get the shape of a tensor.)code" TVM_ADD_FILELINE) + .set_num_inputs(3) + .add_argument("tensor", "Tensor", "The tensor to retrieve the shape for.") + .add_type_rel("ShapeFuncRel", ShapeFuncRel) + .set_support_level(10) + .set_attr("TOpPattern", kOpaque) + .set_attr("TOpIsStateful", false) + .set_attr("TNonComputational", true) + .set_attr("FInferCorrectLayout", ElemwiseArbitraryLayout) + .set_attr("FTVMCompute", + [](const Attrs& attrs, const Array& inputs, + const Type& out_dtype, const Target& target) -> Array { + return {topi::identity(inputs[0])}; + }); } // namespace relay } // namespace tvm diff --git a/tests/python/relay/test_memory_alloc.py b/tests/python/relay/test_memory_alloc.py index 65d9d4b0b79dd..1f1a7f5f57c48 100644 --- a/tests/python/relay/test_memory_alloc.py +++ b/tests/python/relay/test_memory_alloc.py @@ -1,3 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License import tvm import numpy as np from tvm import relay