Skip to content

Commit

Permalink
Merge branch 'main' of https://github.com/apache/tvm into vm_te_migra…
Browse files Browse the repository at this point in the history
…tion
  • Loading branch information
mikepapadim committed Jul 19, 2021
2 parents ea022e1 + 6d88bdd commit a33e069
Show file tree
Hide file tree
Showing 7 changed files with 346 additions and 58 deletions.
8 changes: 4 additions & 4 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -45,12 +45,12 @@

// NOTE: these lines are scanned by docker/dev_common.sh. Please update the regex as needed. -->
ci_lint = "tlcpack/ci-lint:v0.66"
ci_gpu = "tlcpack/ci-gpu:v0.75"
ci_cpu = "tlcpack/ci-cpu:v0.74"
ci_gpu = "tlcpack/ci-gpu:v0.76"
ci_cpu = "tlcpack/ci-cpu:v0.75"
ci_wasm = "tlcpack/ci-wasm:v0.71"
ci_i386 = "tlcpack/ci-i386:v0.73"
ci_qemu = "tlcpack/ci-qemu:v0.05"
ci_arm = "tlcpack/ci-arm:v0.05"
ci_qemu = "tlcpack/ci-qemu:v0.06"
ci_arm = "tlcpack/ci-arm:v0.06"
// <--- End of regex-scanned config.

// tvm libraries
Expand Down
Empty file.
64 changes: 64 additions & 0 deletions python/tvm/relay/frontend/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -585,6 +585,70 @@ def _impl_v1(cls, inputs, attr, params):
out = _op.nn.bias_add(out, inputs[2])
return out

@classmethod
def _impl_v11(cls, inputs, attr, params):
# get number of channels
out_type = infer_type(inputs[1])
out_shapes = [get_const_tuple(out_type.checked_type.shape)]
channels = out_shapes[0][1]
attr["channels"] = channels
groups = attr.get("group", 1)

if "kernel_shape" not in attr:
attr["kernel_shape"] = out_shapes[0][2:]

attr["groups"] = groups
# infer pads for auto_pad
data = inputs[0]
input_shape = infer_shape(data)
ndim = len(input_shape)
if "auto_pad" in attr:
attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
# Warning: Convolution does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
kernel_shape = attr["kernel_shape"]
kndim = len(kernel_shape)
dilations = attr.get("dilations", [1] * kndim)
output_padding = attr.get("output_padding", [0] * kndim)
strides = attr["strides"]
total_pad = [0] * kndim
for i in range(kndim):
total_pad[i] = (
output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - strides[i]
)
left = [p // 2 for p in total_pad]
right = [total_pad[i] - left[i] for i in range(kndim)]
if "LOWER" in attr["auto_pad"]:
pad = left + right
else:
pad = right + left
attr["pads"] = pad
elif attr["auto_pad"] == "VALID":
attr["pads"] = tuple([0 for i in range(ndim - 2)])
elif attr["auto_pad"] == "NOTSET":
pass
else:
msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.'
raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"]))
attr.pop("auto_pad")

out = AttrCvt(
op_name=dimension_picker("conv", "_transpose"),
transforms={
"kernel_shape": "kernel_size",
"dilations": ("dilation", 1),
"pads": ("padding", 0),
"group": ("groups", 1),
},
disables=["output_shape"],
custom_check=dimension_constraint(),
)([data, inputs[1]], attr, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out


class GlobalAveragePool(OnnxOpConverter):
"""Operator converter for GlobalAveragePool"""
Expand Down
7 changes: 2 additions & 5 deletions src/relay/backend/vm/compiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,6 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
ExprDeviceMap expr_device_map)
: last_register_(0),
registers_num_(0),
// engine_(CompileEngine::Global()), //TODO: replace with TE
context_(context),
target_host_(target_host),
expr_device_map_(std::move(expr_device_map)) {
Expand Down Expand Up @@ -467,7 +466,7 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
void EmitShapeFunc(Function func, Array<Expr> inputs, Array<Expr> outputs) {
// Lower shape function
CCacheKey key(func, target_host_);
auto cfunc = compiler_->LowerShapeFunc(key); //TODO: replace with TE
auto cfunc = compiler_->LowerShapeFunc(key);
int op_index = -1;
// pick the only function inside the context
ICHECK_EQ(cfunc->funcs->functions.size(), 1);
Expand Down Expand Up @@ -553,7 +552,7 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {

CCacheKey key(func, target);
auto mangle_fn = [](String name) { return name; };
auto cfunc = compiler_->Lower(key, mangle_fn); //TODO: replace with TE
auto cfunc = compiler_->Lower(key, mangle_fn);

auto op_index = -1;
if (func->GetAttr<String>(attr::kCompiler).defined()) {
Expand Down Expand Up @@ -1189,8 +1188,6 @@ void VMCompiler::Codegen() {
TECompiler compiler;
auto ext_mods = compiler->LowerExternalFunctions();

//targets = target

runtime::Module lib;
if (funcs.size() > 0) {
lib = tvm::build(funcs, target_host_);
Expand Down
5 changes: 4 additions & 1 deletion src/runtime/module.cc
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,10 @@ const PackedFunc* ModuleNode::GetFuncFromEnv(const std::string& name) {
if (pf == nullptr) {
const PackedFunc* f = Registry::Get(name);
ICHECK(f != nullptr) << "Cannot find function " << name
<< " in the imported modules or global registry";
<< " in the imported modules or global registry."
<< " If this involves ops from a contrib library like"
<< " cuDNN, ensure TVM was built with the relevant"
<< " library.";
return f;
} else {
import_cache_.insert(std::make_pair(name, std::make_shared<PackedFunc>(pf)));
Expand Down
Loading

0 comments on commit a33e069

Please sign in to comment.