Skip to content

Commit

Permalink
VM compiler refactor (apache#25)
Browse files Browse the repository at this point in the history
* Return Instruction::Arg for each CodeGenLLVM::VisitExpr_.

* Change VMCompiler to be an Object from ModuleNode.

* Introduce intrinsics and attrs.

* Generic handling of attribute codegen.

* Do to-non-dataflow transform in call_dps_rewrite.

* Back to special attr handling.

* Address comments.

* Standalone to_non_dataflow pass; more tests.

* Rename decode/make shape to store/load shape.

* Update.

* Fix namespace, add comments.

* rebase

* Rename files.

* nit
  • Loading branch information
YuchenJin authored and yongwww committed Aug 14, 2022
1 parent 7f117c4 commit af69a1e
Show file tree
Hide file tree
Showing 14 changed files with 596 additions and 446 deletions.
14 changes: 8 additions & 6 deletions include/tvm/relax/attrs/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,29 +29,31 @@
namespace tvm {
namespace relax {
/*!
* \brief Options for allocating storage.
* \brief Attributes for allocating storage.
*/
struct AllocStorageAttrs : public tvm::AttrsNode<AllocStorageAttrs> {
DataType dtype;
int device_id;
int device_type;
DataType dtype;

TVM_DECLARE_ATTRS(AllocStorageAttrs, "relax.attrs.AllocStorageAttrs") {
TVM_ATTR_FIELD(device_type).describe("The device type on which to allocate memory.");
TVM_ATTR_FIELD(dtype)
.describe("The dtype of the tensor to allocate.")
.set_default(DataType::Float(32, 1));
TVM_ATTR_FIELD(device_id).describe("The device id on which to allocate memory.");
TVM_ATTR_FIELD(device_type).describe("The device type on which to allocate memory.");
}
};

/*!
* \brief Options for allocating tensors.
* \brief Attributes for allocating tensors.
*/
struct AllocTensorAttrs : public tvm::AttrsNode<AllocTensorAttrs> {
int offset;
DataType dtype;

TVM_DECLARE_ATTRS(AllocTensorAttrs, "relax.attrs.AllocTensorAttrs") {
TVM_ATTR_FIELD(offset)
.describe("Storage offset to allocate the tensor.")
.set_default(0);
TVM_ATTR_FIELD(dtype)
.describe("The dtype of the tensor to allocate.")
.set_default(DataType::Float(32, 1));
Expand Down
44 changes: 44 additions & 0 deletions include/tvm/relax/attrs/shape.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

/*!
* \file tvm/relax/attrs/shape.h
* \brief Attributes for shape operators.
*/
#ifndef TVM_RELAX_ATTRS_SHAPE_H_
#define TVM_RELAX_ATTRS_SHAPE_H_

#include <tvm/ir/attrs.h>

namespace tvm {
namespace relax {
/*!
* \brief Attributes for decoding/making shape to/from VM heap.
*/
struct ShapeHeapAttrs : public tvm::AttrsNode<ShapeHeapAttrs> {
Array<Integer> indices;

TVM_DECLARE_ATTRS(ShapeHeapAttrs, "relax.attrs.ShapeHeapAttrs") {
TVM_ATTR_FIELD(indices).describe("The indices of the heap to store/load the shape to/from.");
}
};

} // namespace relax
} // namespace tvm
#endif // TVM_RELAX_ATTRS_SHAPE_H_
15 changes: 8 additions & 7 deletions python/tvm/relax/transform/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ def fma_rewrite(expr):
"""
return _ffi_api.fma_rewrite(expr)


def to_non_dataflow(mod: IRModule) -> IRModule:
"""Transform all dataflow structure to non-dataflow version.
Expand All @@ -42,7 +43,7 @@ def to_non_dataflow(mod: IRModule) -> IRModule:


def call_dps_rewrite(mod: IRModule) -> IRModule:
"""Perform explicit memory allocation for call_dps.
"""Perform explicit tensor allocation for call_dps.
Parameters
----------
Expand All @@ -52,23 +53,23 @@ def call_dps_rewrite(mod: IRModule) -> IRModule:
return _ffi_api.call_dps_rewrite(mod)


def memory_lower(mod: IRModule) -> IRModule:
"""Perform memory lowering. Lower the relax.builtin.alloc_tensor op to VM builtin functions.
def vm_memory_lower(mod: IRModule) -> IRModule:
"""Perform memory lowering. Lowers the relax.builtin.alloc_tensor intrinsic to VM intrinsics.
Parameters
----------
mod : tvm.IRModule
The input module.
"""
return _ffi_api.memory_lower(mod)
return _ffi_api.vm_memory_lower(mod)


def shape_lower(mod: IRModule) -> IRModule:
"""Lower the shape expression in relax to shape heap and TIR functions.
def vm_shape_lower(mod: IRModule) -> IRModule:
"""Lower the shape expression in relax to VM shape heap and TIR functions.
Parameters
----------
mod : tvm.IRModule
The input module.
"""
return _ffi_api.shape_lower(mod)
return _ffi_api.vm_shape_lower(mod)
4 changes: 2 additions & 2 deletions python/tvm/relax/vm.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ def build(mod: tvm.IRModule,
"""
new_mod = transform.to_non_dataflow(mod)
new_mod = transform.call_dps_rewrite(new_mod)
new_mod = transform.memory_lower(new_mod)
new_mod = transform.shape_lower(new_mod)
new_mod = transform.vm_memory_lower(new_mod)
new_mod = transform.vm_shape_lower(new_mod)
ex, lib = _ffi_api.VMBuild(new_mod, target, target_host)
return ex, lib
Loading

0 comments on commit af69a1e

Please sign in to comment.