Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Relay][VM] Add AllocTensor instruction and better instruction printer #3306

Merged
merged 9 commits into from
Jun 14, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,7 @@ add_library(tvm_runtime SHARED ${RUNTIME_SRCS})
if(USE_RELAY_DEBUG)
message(STATUS "Building Relay in debug mode...")
set_target_properties(tvm PROPERTIES COMPILE_DEFINITIONS "USE_RELAY_DEBUG")
else()
set_target_properties(tvm PROPERTIES COMPILE_DEFINITIONS "NDEBUG")
endif(USE_RELAY_DEBUG)

Expand Down
36 changes: 26 additions & 10 deletions include/tvm/runtime/vm.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,13 +56,14 @@ enum class Opcode {
InvokeClosure = 3U,
InvokePacked = 4U,
AllocTensor = 5U,
AllocDatatype = 6U,
AllocClosure = 7U,
GetField = 8U,
If = 9U,
Select = 10U,
LoadConst = 11U,
Goto = 12U
AllocTensorReg = 6U,
AllocDatatype = 7U,
AllocClosure = 8U,
GetField = 9U,
If = 10U,
Select = 11U,
LoadConst = 12U,
Goto = 13U
};

/*! \brief A single virtual machine instruction.
Expand All @@ -83,11 +84,19 @@ struct Instruction {

union {
struct /* AllocTensor Operands */ {
/*! \brief The number of dimensions. */
uint32_t ndim;
/*! \brief The shape of tensor. */
int64_t* shape;
/*! \brief The datatype of tensor to be allocated. */
DLDataType dtype;
} alloc_tensor;
struct /* AllocTensorReg Operands */ {
/*! \brief The register to read the shape out of. */
RegName shape_register;
/*! \brief The datatype of tensor to be allocated. */
DLDataType dtype;
};
} alloc_tensor_reg;
struct /* InvokeClosure Operands */ {
/*! \brief The register containing the closure. */
RegName closure;
Expand Down Expand Up @@ -192,13 +201,20 @@ struct Instruction {
*/
static Instruction InvokePacked(Index packed_index, Index arity, Index output_size,
const std::vector<RegName>& args);
/*! \brief Construct an allocate tensor instruction.
/*! \brief Construct an allocate tensor instruction with constant shape.
* \param shape The shape of the tensor.
* \param dtype The dtype of the tensor.
* \param dst The destination register.
* \return The allocate tensor instruction.
*/
static Instruction AllocTensor(std::vector<int64_t> shape, DLDataType dtype, RegName dst);
/*! \brief Construct an allocate tensor instruction with register.
* \param shape_register The register containing the shape.
* \param dtype The dtype of the tensor.
* \param dst The destination register.
* \return The allocate tensor instruction.
*/
static Instruction AllocTensor(RegName shape_register, DLDataType dtype, RegName dst);
static Instruction AllocTensorReg(RegName shape_register, DLDataType dtype, RegName dst);
/*! \brief Construct an allocate datatype instruction.
* \param tag The datatype tag.
* \param num_fields The number of fields for the datatype.
Expand Down
75 changes: 11 additions & 64 deletions src/relay/backend/vm/compiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -103,62 +103,13 @@ struct ConstantPool : ExprVisitor {
}
}

void AddConstantTensorShape(TensorType expr, NDArray value) {
auto it = this->const_tensor_shape_map.find(expr);
if (it == this->const_tensor_shape_map.end()) {
this->const_tensor_shape_map.insert({expr, std::make_pair(index++, value)});
}
}

void VisitExpr_(const ConstantNode* const_node) {
auto konst = GetRef<Constant>(const_node);
auto it = this->const_map.find(konst);
if (it == this->const_map.end()) {
this->const_map.insert({konst, index++});
}
}

NDArray GetTensorConstant(const TensorTypeNode* ttype) {
std::vector<int64_t> shapes;
for (auto sh : ttype->shape) {
shapes.push_back(Downcast<tvm::Integer>(sh)->value);
}
int64_t s = shapes.size();
DLContext cpu_ctx;
cpu_ctx.device_type = kDLCPU;
cpu_ctx.device_id = 0;
auto shape_tensor = NDArray::Empty({s}, Type2TVMType(Int(64)), cpu_ctx);
int64_t* dims = static_cast<int64_t*>(shape_tensor->data);
for (size_t i = 0; i < shapes.size(); ++i) {
dims[i] = shapes[i];
}
return shape_tensor;
}

void VisitExpr_(const CallNode* call_node) {
for (auto arg : call_node->args) {
this->VisitExpr(arg);
}

Expr op = call_node->op;
auto func_node = op.as<FunctionNode>();
if (func_node) {
auto ret_type = call_node->checked_type();
if (const TensorTypeNode* ttype = ret_type.as<TensorTypeNode>()) {
auto shape = GetTensorConstant(ttype);
auto tensor_type = GetRef<TensorType>(ttype);
AddConstantTensorShape(tensor_type, shape);
} else if (const TupleTypeNode* ttype = ret_type.as<TupleTypeNode>()) {
for (size_t i = 0; i < ttype->fields.size(); ++i) {
auto f = ttype->fields[i];
auto f_type = f.as<TensorTypeNode>();
auto shape = GetTensorConstant(f_type);
auto tensor_type = GetRef<TensorType>(f_type);
AddConstantTensorShape(tensor_type, shape);
}
}
}
}
};

std::tuple<ConstMap, ConstTensorShapeMap> LayoutConstantPool(const Module& module) {
Expand Down Expand Up @@ -206,6 +157,7 @@ struct VMCompiler : ExprFunctor<void(const Expr& expr)> {
switch (instr.op) {
case Opcode::AllocDatatype:
case Opcode::AllocTensor:
case Opcode::AllocTensorReg:
case Opcode::GetField:
case Opcode::LoadConst:
case Opcode::Select:
Expand Down Expand Up @@ -259,14 +211,14 @@ struct VMCompiler : ExprFunctor<void(const Expr& expr)> {

void VisitExpr_(const MatchNode* match_node) {
auto match = GetRef<Match>(match_node);
LOG(FATAL) << "translation of match nodes to the VM is "
<< "currently unsupported" << std::endl;
LOG(FATAL) << "translation of match nodes to the VM is"
<< "currently unsupported";
}

void VisitExpr_(const LetNode* let_node) {
DLOG(INFO) << let_node->value << std::endl;
DLOG(INFO) << let_node->value;
this->VisitExpr(let_node->value);
DLOG(INFO) << this->last_register << std::endl;
DLOG(INFO) << this->last_register;
var_register_map.insert({let_node->var, this->last_register});
this->VisitExpr(let_node->body);
}
Expand Down Expand Up @@ -327,18 +279,13 @@ struct VMCompiler : ExprFunctor<void(const Expr& expr)> {
}

Instruction AllocTensorFromType(const TensorTypeNode* ttype) {
DataType dtype = ttype->dtype;
TVMType dltype = Type2TVMType(dtype);

TVMType dltype = Type2TVMType(ttype->dtype);
auto tensor_type = GetRef<TensorType>(ttype);
auto it = this->context->const_tensor_shape_map.find(tensor_type);
if (it == this->context->const_tensor_shape_map.end()) {
DLOG(INFO) << "Can not find constant shape for " << tensor_type;
} else {
Emit(Instruction::LoadConst(it->second.first, NewRegister()));
std::vector<int64_t> shape;
for (auto dim : tensor_type->shape) {
shape.push_back(Downcast<tvm::Integer>(dim)->value);
}

return Instruction::AllocTensor(last_register, dltype, NewRegister());
return Instruction::AllocTensor(shape, dltype, NewRegister());
}

void EmitInvokePrimitive(const Function& func,
Expand Down Expand Up @@ -532,7 +479,7 @@ void PopulatePackedFuncMap(const std::vector<LoweredFunc>& lowered_funcs,
}

VMFunction CompileFunc(VMCompilerContext* context, const GlobalVar& var, const Function& func) {
DLOG(INFO) << "CompileFunc: " << var << std::endl << AsText(func, false) << std::endl;
DLOG(INFO) << "CompileFunc: " << var << std::endl << AsText(func, false);
size_t params = func->params.size();
VMCompiler compiler(context);
compiler.Compile(func);
Expand Down
Loading