diff --git a/apps/benchmark/util.py b/apps/benchmark/util.py index c7de3a1dda319..86d139f1c8516 100644 --- a/apps/benchmark/util.py +++ b/apps/benchmark/util.py @@ -34,7 +34,7 @@ def get_network(name, batch_size, dtype='float32'): Returns ------- - net: relay.Module + net: tvm.IRModule The relay function of network definition params: dict The random parameters for benchmark @@ -70,7 +70,7 @@ def get_network(name, batch_size, dtype='float32'): net, params = relay.frontend.from_mxnet(block, shape={'data': input_shape}, dtype=dtype) net = net["main"] net = relay.Function(net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs) - net = relay.Module.from_expr(net) + net = tvm.IRModule.from_expr(net) else: raise ValueError("Unsupported network: " + name) diff --git a/python/tvm/__init__.py b/python/tvm/__init__.py index 567cc65a823f2..bd4311a66f442 100644 --- a/python/tvm/__init__.py +++ b/python/tvm/__init__.py @@ -34,6 +34,7 @@ from .runtime import ndarray as nd # tvm.ir +from .ir import IRModule from .ir import transform from . import ir diff --git a/python/tvm/autotvm/graph_tuner/base_graph_tuner.py b/python/tvm/autotvm/graph_tuner/base_graph_tuner.py index bdff057c5a7ee..b02c289cb10f5 100644 --- a/python/tvm/autotvm/graph_tuner/base_graph_tuner.py +++ b/python/tvm/autotvm/graph_tuner/base_graph_tuner.py @@ -141,7 +141,7 @@ def __init__(self, graph, input_shapes, records, target_ops, self._logger.propagate = False # Generate workload and schedule dictionaries. - if isinstance(graph, relay.Module): + if isinstance(graph, tvm.IRModule): graph = graph["main"] if isinstance(graph, relay.expr.Function): diff --git a/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py b/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py index d3a27cbc1ecd6..06db115faffcf 100644 --- a/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py +++ b/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py @@ -83,7 +83,7 @@ def expr2graph(expr, target_ops, node_dict, node_list): def _infer_type(node): """A method to infer the type of a relay expression.""" - mod = relay.Module.from_expr(node) + mod = tvm.IRModule.from_expr(node) mod = transform.InferType()(mod) entry = mod["main"] return entry if isinstance(node, relay.Function) else entry.body @@ -136,7 +136,7 @@ def _traverse_expr(node): free_var = relay.Var("var_%d" % i, input_type) params.append(free_var) call = relay.Call(node.op, params, node.attrs) - mod = relay.Module.from_expr(relay.Function(params, call)) + mod = tvm.IRModule.from_expr(relay.Function(params, call)) relay.backend.compile_engine.get().clear() build_thread = threading.Thread(target=relay.build, args=(mod, diff --git a/python/tvm/autotvm/graph_tuner/utils/utils.py b/python/tvm/autotvm/graph_tuner/utils/utils.py index d73f2c35f50e4..d17669ae5f625 100644 --- a/python/tvm/autotvm/graph_tuner/utils/utils.py +++ b/python/tvm/autotvm/graph_tuner/utils/utils.py @@ -136,7 +136,7 @@ def bind_inputs(expr, input_shapes=None, input_dtypes="float32"): rebind_dict[var] = updated_input_dict[var.name_hint] updated_expr = relay.expr.bind(expr, rebind_dict) - mod = relay.Module.from_expr(updated_expr) + mod = tvm.IRModule.from_expr(updated_expr) mod = transform.InferType()(mod) entry = mod["main"] return entry if isinstance(updated_expr, relay.Function) else entry.body diff --git a/python/tvm/autotvm/task/relay_integration.py b/python/tvm/autotvm/task/relay_integration.py index 7471ca3d6c8f7..87d28b7a810ae 100644 --- a/python/tvm/autotvm/task/relay_integration.py +++ b/python/tvm/autotvm/task/relay_integration.py @@ -63,7 +63,7 @@ def extract_from_program(mod, params, ops, target, target_host=None, Parameters ---------- - mod: relay.module.Module or relay.expr.Function + mod: tvm.IRModule or relay.expr.Function The module or function to tune params: dict of str to numpy array The associated parameters of the program @@ -95,7 +95,7 @@ def extract_from_multiple_program(mods, params, ops, target, target_host=None, Parameters ---------- - mods: List[relay.module.Module] or List[relay.expr.Function] + mods: List[tvm.IRModule] or List[relay.expr.Function] The list of modules or functions to tune params: List of dict of str to numpy array The associated parameters of the programs @@ -151,8 +151,8 @@ def extract_from_multiple_program(mods, params, ops, target, target_host=None, for mod, param in zip(mods, params): if isinstance(mod, relay.expr.Function): - mod = relay.Module.from_expr(mod) - assert isinstance(mod, relay.module.Module), \ + mod = tvm.IRModule.from_expr(mod) + assert isinstance(mod, tvm.IRModule), \ "only support relay Module or Function to be tuned" relay.backend.compile_engine.get().clear() # wrap build call in thread to avoid multiprocessing problems diff --git a/python/tvm/contrib/sparse.py b/python/tvm/contrib/sparse.py index 2a51637fe6ced..966e180ec2b87 100644 --- a/python/tvm/contrib/sparse.py +++ b/python/tvm/contrib/sparse.py @@ -38,7 +38,7 @@ def __init__(self, arg1, ctx=None, shape=None): The corresponding a dense numpy array, or a tuple for constructing a sparse matrix directly. - ctx: tvm.TVMContext + ctx: tvmContext The corresponding context. shape : tuple of int diff --git a/python/tvm/ir/__init__.py b/python/tvm/ir/__init__.py index 4bc8a461fb934..d843e39ab5f7a 100644 --- a/python/tvm/ir/__init__.py +++ b/python/tvm/ir/__init__.py @@ -23,4 +23,5 @@ from .type_relation import TypeCall, TypeRelation from .tensor_type import TensorType from .adt import Constructor, TypeData +from .module import IRModule from . import transform diff --git a/python/tvm/relay/module.py b/python/tvm/ir/module.py similarity index 69% rename from python/tvm/relay/module.py rename to python/tvm/ir/module.py index 68704ed7072b8..ae1564b27105f 100644 --- a/python/tvm/relay/module.py +++ b/python/tvm/ir/module.py @@ -14,36 +14,26 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable, wildcard-import -"""A global module storing everything needed to interpret or compile a Relay program.""" -import os -from .base import register_relay_node, RelayNode -from .. import register_func -from .._ffi import base as _base -from . import _make -from . import _module -from . import expr as _expr -from . import ty as _ty +"""IRModule that holds the functions and type definitions.""" +from tvm._ffi.base import string_types +import tvm._ffi -__STD_PATH__ = os.path.join(os.path.dirname(os.path.realpath(__file__)), "std") +from .base import Node +from . import expr as _expr +from . import type as _ty +from . import _ffi_api -@register_func("tvm.relay.std_path") -def _std_path(): - global __STD_PATH__ - return __STD_PATH__ -@register_relay_node -class Module(RelayNode): - """The global Relay module containing collection of functions. +@tvm._ffi.register_object("relay.Module") +class IRModule(Node): + """IRModule that holds functions and type definitions. - Each global function is identified by an unique tvm.relay.GlobalVar. - tvm.relay.GlobalVar and Module is necessary in order to enable - recursions in function to avoid cyclic reference in the function.x + IRModule is the basic unit for all IR transformations across the stack. Parameters ---------- functions: Optional[dict]. - Map of global var to Function + Map of global var to BaseFunc """ def __init__(self, functions=None, type_definitions=None): if functions is None: @@ -51,7 +41,7 @@ def __init__(self, functions=None, type_definitions=None): elif isinstance(functions, dict): mapped_funcs = {} for k, v in functions.items(): - if isinstance(k, _base.string_types): + if isinstance(k, string_types): k = _expr.GlobalVar(k) if not isinstance(k, _expr.GlobalVar): raise TypeError("Expect functions to be Dict[GlobalVar, Function]") @@ -62,13 +52,13 @@ def __init__(self, functions=None, type_definitions=None): elif isinstance(type_definitions, dict): mapped_type_defs = {} for k, v in type_definitions.items(): - if isinstance(k, _base.string_types): + if isinstance(k, string_types): k = _ty.GlobalTypeVar(k) if not isinstance(k, _ty.GlobalTypeVar): raise TypeError("Expect type_definitions to be Dict[GlobalTypeVar, Type]") mapped_type_defs[k] = v type_definitions = mapped_type_defs - self.__init_handle_by_constructor__(_make.Module, functions, type_definitions) + self.__init_handle_by_constructor__(_ffi_api.IRModule, functions, type_definitions) def __setitem__(self, var, val): @@ -86,17 +76,17 @@ def __setitem__(self, var, val): def _add(self, var, val, update=False): if isinstance(val, _expr.RelayExpr): - if isinstance(var, _base.string_types): - if _module.Module_ContainGlobalVar(self, var): - var = _module.Module_GetGlobalVar(self, var) + if isinstance(var, string_types): + if _ffi_api.Module_ContainGlobalVar(self, var): + var = _ffi_api.Module_GetGlobalVar(self, var) else: var = _expr.GlobalVar(var) - _module.Module_Add(self, var, val, update) + _ffi_api.Module_Add(self, var, val, update) else: assert isinstance(val, _ty.Type) - if isinstance(var, _base.string_types): + if isinstance(var, string_types): var = _ty.GlobalTypeVar(var) - _module.Module_AddDef(self, var, val, update) + _ffi_api.Module_AddDef(self, var, val, update) def __getitem__(self, var): """Lookup a global definition by name or by variable. @@ -111,12 +101,11 @@ def __getitem__(self, var): val: Union[Function, Type] The definition referenced by :code:`var` (either a function or type). """ - if isinstance(var, _base.string_types): - return _module.Module_Lookup_str(self, var) - elif isinstance(var, _expr.GlobalVar): - return _module.Module_Lookup(self, var) - else: - return _module.Module_LookupDef(self, var) + if isinstance(var, string_types): + return _ffi_api.Module_Lookup_str(self, var) + if isinstance(var, _expr.GlobalVar): + return _ffi_api.Module_Lookup(self, var) + return _ffi_api.Module_LookupDef(self, var) def update(self, other): """Insert functions in another Module to current one. @@ -128,7 +117,7 @@ def update(self, other): """ if isinstance(other, dict): other = Module(other) - return _module.Module_Update(self, other) + return _ffi_api.Module_Update(self, other) def get_global_var(self, name): """Get a global variable in the function by name. @@ -145,9 +134,9 @@ def get_global_var(self, name): Raises ------ - tvm.TVMError if we cannot find corresponding global var. + tvm.error.TVMError if we cannot find corresponding global var. """ - return _module.Module_GetGlobalVar(self, name) + return _ffi_api.Module_GetGlobalVar(self, name) def get_global_vars(self): """Collect all global vars defined in this module. @@ -157,7 +146,7 @@ def get_global_vars(self): global_vars: tvm.Array[GlobalVar] An array of global vars. """ - return _module.Module_GetGlobalVars(self) + return _ffi_api.Module_GetGlobalVars(self) def get_global_type_vars(self): """Collect all global type vars defined in this module. @@ -167,7 +156,7 @@ def get_global_type_vars(self): global_type_vars: tvm.Array[GlobalTypeVar] An array of global type vars. """ - return _module.Module_GetGlobalTypeVars(self) + return _ffi_api.Module_GetGlobalTypeVars(self) def get_global_type_var(self, name): """Get a global type variable in the function by name. @@ -184,9 +173,9 @@ def get_global_type_var(self, name): Raises ------ - tvm.TVMError if we cannot find corresponding global type var. + tvm.error.TVMError if we cannot find corresponding global type var. """ - return _module.Module_GetGlobalTypeVar(self, name) + return _ffi_api.Module_GetGlobalTypeVar(self, name) def get_constructor(self, tag): """Look up an ADT constructor by tag. @@ -203,9 +192,9 @@ def get_constructor(self, tag): Raises ------ - tvm.TVMError if the corresponding constructor cannot be found. + tvm.error.TVMError if the corresponding constructor cannot be found. """ - return _module.Module_LookupTag(self, tag) + return _ffi_api.Module_LookupTag(self, tag) @staticmethod def from_expr(expr, functions=None, type_defs=None): @@ -213,14 +202,15 @@ def from_expr(expr, functions=None, type_defs=None): Parameters ---------- - expr: Expr + expr: RelayExpr The starting expression + global_funcs: Optional[dict] Map of global vars to function definitions + type_defs: Optional[dict] Map of global type vars to type definitions - Returns ------- mod: Module @@ -230,10 +220,10 @@ def from_expr(expr, functions=None, type_defs=None): """ funcs = functions if functions is not None else {} defs = type_defs if type_defs is not None else {} - return _module.Module_FromExpr(expr, funcs, defs) + return _ffi_api.Module_FromExpr(expr, funcs, defs) def _import(self, file_to_import): - return _module.Module_Import(self, file_to_import) + return _ffi_api.Module_Import(self, file_to_import) def import_from_std(self, file_to_import): - return _module.Module_ImportFromStd(self, file_to_import) + return _ffi_api.Module_ImportFromStd(self, file_to_import) diff --git a/python/tvm/ir/transform.py b/python/tvm/ir/transform.py index e8ba3458a75c0..619250459b5cb 100644 --- a/python/tvm/ir/transform.py +++ b/python/tvm/ir/transform.py @@ -130,12 +130,12 @@ def __call__(self, mod): Parameters ---------- - mod : tvm.relay.Module + mod : tvm.IRModule The module that a certain optimization is performed on. Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The updated module after applying this pass. """ return _ffi_transform_api.RunPass(self, mod) @@ -143,7 +143,7 @@ def __call__(self, mod): @tvm._ffi.register_object("relay.ModulePass") class ModulePass(Pass): - """A pass that works on tvm.relay.Module. Users don't need to interact with + """A pass that works on tvm.IRModule. Users don't need to interact with this class directly. Instead, a module pass should be created through `module_pass`, because the design of the `module_pass` API is flexible enough to handle the creation of a module pass in different manners. In @@ -293,7 +293,7 @@ def transform(mod, ctx): x = relay.var("x", tp) gv = relay.GlobalVar("var") func = relay.Function([x], relay.abs(x)) - new_mod = relay.Module({gv: func}) + new_mod = tvm.IRModule({gv: func}) new_mod.update(mod) return new_mod diff --git a/python/tvm/relay/__init__.py b/python/tvm/relay/__init__.py index 7125833b29706..417967b64fd87 100644 --- a/python/tvm/relay/__init__.py +++ b/python/tvm/relay/__init__.py @@ -16,7 +16,6 @@ # under the License. # pylint: disable=wildcard-import, redefined-builtin, invalid-name """The Relay IR namespace containing the IR definition and compiler.""" -from __future__ import absolute_import import os from sys import setrecursionlimit from ..api import register_func @@ -25,7 +24,6 @@ from . import expr from . import type_functor from . import expr_functor -from . import module from . import adt from . import analysis from . import transform @@ -67,9 +65,6 @@ # Span Span = base.Span -# Env -Module = module.Module - # Type Type = ty.Type TupleType = ty.TupleType diff --git a/python/tvm/relay/_module.py b/python/tvm/relay/_module.py deleted file mode 100644 index aedb74a054863..0000000000000 --- a/python/tvm/relay/_module.py +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable -"""The interface to the Module exposed from C++.""" -import tvm._ffi - -tvm._ffi._init_api("relay._module", __name__) diff --git a/python/tvm/relay/_parser.py b/python/tvm/relay/_parser.py index 3ae89af01f6e9..c53d4ffe7624c 100644 --- a/python/tvm/relay/_parser.py +++ b/python/tvm/relay/_parser.py @@ -38,8 +38,8 @@ def __new__(cls, *args, **kwds): import tvm import tvm.ir._ffi_api +from tvm.ir import IRModule -from . import module from .base import Span, SourceName from . import adt from . import expr @@ -202,7 +202,7 @@ class ParseTreeToRelayIR(RelayVisitor): def __init__(self, source_name: str) -> None: self.source_name = source_name - self.module = module.Module({}) # type: module.Module + self.module = IRModule({}) # type: IRModule # Adding an empty scope allows naked lets without pain. self.var_scopes = deque([deque()]) # type: Scopes[expr.Var] @@ -353,7 +353,7 @@ def getTypeExpr(self, ctx: Optional[RelayParser.TypeExprContext]) -> Optional[ty return self.visit(ctx) - def visitProg(self, ctx: RelayParser.ProgContext) -> Union[expr.Expr, module.Module]: + def visitProg(self, ctx: RelayParser.ProgContext) -> Union[expr.Expr, IRModule]: self.meta = None if ctx.METADATA(): header, data = str(ctx.METADATA()).split("\n", 1) @@ -747,7 +747,7 @@ def reportAttemptingFullContext(self, def reportContextSensitivity(self, recognizer, dfa, startIndex, stopIndex, prediction, configs): raise Exception("Context Sensitivity in:\n" + self.text) -def fromtext(data: str, source_name: str = None) -> Union[expr.Expr, module.Module]: +def fromtext(data: str, source_name: str = None) -> Union[expr.Expr, IRModule]: """Parse a Relay program.""" if data == "": raise ParseError("cannot parse the empty string.") diff --git a/python/tvm/relay/analysis.py b/python/tvm/relay/analysis.py index c4158781756cf..372be749029cc 100644 --- a/python/tvm/relay/analysis.py +++ b/python/tvm/relay/analysis.py @@ -20,12 +20,11 @@ This file contains the set of passes for Relay, which exposes an interface for configuring the passes and scripting them in Python. """ -from tvm.ir import RelayExpr +from tvm.ir import RelayExpr, IRModule from . import _analysis from . import _make from .ty import Type -from .module import Module from .feature import Feature @@ -71,7 +70,7 @@ def check_kind(t, mod=None): t : tvm.relay.Type The type to check - mod : Optional[tvm.relay.Module] + mod : Optional[tvm.IRModule] The global module. Returns @@ -170,7 +169,7 @@ def free_type_vars(expr, mod=None): expr : Union[tvm.relay.Expr,tvm.relay.Type] The input expression/type - mod : Optional[tvm.relay.Module] + mod : Optional[tvm.IRModule] The global module Returns @@ -178,7 +177,7 @@ def free_type_vars(expr, mod=None): free : List[tvm.relay.TypeVar] The list of free type variables in post-DFS order """ - use_mod = mod if mod is not None else Module() + use_mod = mod if mod is not None else IRModule() return _analysis.free_type_vars(expr, use_mod) @@ -190,7 +189,7 @@ def bound_type_vars(expr, mod=None): expr : Union[tvm.relay.Expr,tvm.relay.Type] The input expression/type - mod : Optional[tvm.relay.Module] + mod : Optional[tvm.IRModule] The global module Returns @@ -198,7 +197,7 @@ def bound_type_vars(expr, mod=None): free : List[tvm.relay.TypeVar] The list of bound type variables in post-DFS order """ - use_mod = mod if mod is not None else Module() + use_mod = mod if mod is not None else IRModule() return _analysis.bound_type_vars(expr, use_mod) @@ -210,7 +209,7 @@ def all_type_vars(expr, mod=None): expr : Union[tvm.relay.Expr,tvm.relay.Type] The input expression/type - mod : Optional[tvm.relay.Module] + mod : Optional[tvm.IRModule] The global module Returns @@ -218,7 +217,7 @@ def all_type_vars(expr, mod=None): free : List[tvm.relay.TypeVar] The list of all type variables in post-DFS order """ - use_mod = mod if mod is not None else Module() + use_mod = mod if mod is not None else IRModule() return _analysis.all_type_vars(expr, use_mod) @@ -354,7 +353,7 @@ def unmatched_cases(match, mod=None): match : tvm.relay.Match The match expression - mod : Optional[tvm.relay.Module] + mod : Optional[tvm.IRModule] The module (defaults to an empty module) Returns @@ -371,10 +370,10 @@ def detect_feature(a, b=None): Parameters ---------- - a : Union[tvm.relay.Expr, tvm.relay.Module] + a : Union[tvm.relay.Expr, tvm.IRModule] The input expression or module. - b : Optional[Union[tvm.relay.Expr, tvm.relay.Module]] + b : Optional[Union[tvm.relay.Expr, tvm.IRModule]] The input expression or module. The two arguments cannot both be expression or module. diff --git a/python/tvm/relay/backend/interpreter.py b/python/tvm/relay/backend/interpreter.py index 3759bc9950afd..de2f57e925ebc 100644 --- a/python/tvm/relay/backend/interpreter.py +++ b/python/tvm/relay/backend/interpreter.py @@ -20,10 +20,11 @@ import numpy as np +from tvm.ir import IRModule + from tvm import container from . import _backend from .. import _make, analysis, transform -from .. import module from ... import nd from ..base import Object, register_relay_node from ..expr import Tuple, RefCreate, Call, Constant, GlobalVar, Function, const @@ -186,10 +187,10 @@ class Interpreter(Executor): Parameters ---------- - mod : tvm.relay.Module + mod : tvm.IRModule The module to support the execution. - ctx : tvm.TVMContext + ctx : tvmContext The runtime context to run the code on. target : tvm.Target @@ -205,7 +206,7 @@ def optimize(self): Returns ------- - opt_mod : tvm.relay.Module + opt_mod : tvm.IRModule The optimized module. """ seq = transform.Sequential([transform.SimplifyInference(), @@ -239,7 +240,7 @@ def _interp_wrapper(*args, **kwargs): if self.mod: self.mod["main"] = func else: - self.mod = module.Module.from_expr(func) + self.mod = IRModule.from_expr(func) mod = self.optimize() opt_expr = Call(mod["main"], relay_args) diff --git a/python/tvm/relay/backend/vm.py b/python/tvm/relay/backend/vm.py index 67afe0945ccca..499128e846317 100644 --- a/python/tvm/relay/backend/vm.py +++ b/python/tvm/relay/backend/vm.py @@ -113,7 +113,7 @@ def save(self): # define a simple network. x = relay.var('x', shape=(10, 10)) f = relay.Function([x], x + x) - mod = relay.Module({"main": f}) + mod = tvm.IRModule({"main": f}) # create a Relay VM. ctx = tvm.cpu() target = "llvm" @@ -368,7 +368,7 @@ def compile(mod, target=None, target_host=None, params=None): Parameters ---------- - mod : relay.Module + mod : tvm.IRModule The Relay module to build. target : str, :any:`tvm.target.Target`, or dict of str(i.e. @@ -442,7 +442,7 @@ def lower(self, mod, target=None, target_host=None): Parameters ---------- - mod : relay.Module + mod : tvm.IRModule The Relay module to build. target : str, :any:`tvm.target.Target`, or dict of str(i.e. @@ -474,7 +474,7 @@ def optimize(self, mod, target=None, params=None): Parameters ---------- - mod : relay.Module + mod : tvm.IRModule target : str, :any:`tvm.target.Target`, or dict of str (i.e. device/context name) to str/tvm.target.Target, optional @@ -485,7 +485,7 @@ def optimize(self, mod, target=None, params=None): Returns ------- - mod : relay.Module + mod : tvm.IRModule The optimized relay module. params : dict @@ -560,10 +560,10 @@ class VMExecutor(Executor): Parameters ---------- - mod : :py:class:`~tvm.relay.module.Module` + mod : :py:class:`~tvm.IRModule` The module to support the execution. - ctx : :py:class:`~tvm.TVMContext` + ctx : :py:class:`~tvmContext` The runtime context to run the code on. target : :py:class:`Target` diff --git a/python/tvm/relay/base.py b/python/tvm/relay/base.py index 5f113f5c33941..0d6f22f446cd7 100644 --- a/python/tvm/relay/base.py +++ b/python/tvm/relay/base.py @@ -16,6 +16,7 @@ # under the License. # pylint: disable=no-else-return, unidiomatic-typecheck, unused-import """The base node types for the Relay language.""" +import os import tvm._ffi from tvm.runtime import Object @@ -25,6 +26,13 @@ from . import _base +__STD_PATH__ = os.path.join(os.path.dirname(os.path.realpath(__file__)), "std") + +@tvm._ffi.register_func("tvm.relay.std_path") +def _std_path(): + return __STD_PATH__ + + def register_relay_node(type_key=None): """Register a Relay node type. diff --git a/python/tvm/relay/build_module.py b/python/tvm/relay/build_module.py index ea7a4cacfc60d..fa812cb357033 100644 --- a/python/tvm/relay/build_module.py +++ b/python/tvm/relay/build_module.py @@ -21,13 +21,14 @@ import warnings import numpy as np +from tvm.ir import IRModule + from tvm import expr as tvm_expr from .. import nd as _nd, target as _target, autotvm from ..contrib import graph_runtime as _graph_rt from . import _build_module from . import ty as _ty from . import expr as _expr -from .module import Module as _Module from .backend import interpreter as _interpreter from .backend.vm import VMExecutor @@ -141,7 +142,7 @@ def optimize(self, func, target=None, params=None): Returns ------- - mod : relay.Module + mod : tvm.IRModule The optimized relay module. params : dict @@ -185,7 +186,7 @@ def build(mod, target=None, target_host=None, params=None): Parameters ---------- - mod : relay.Module + mod : tvm.IRModule The module to build. Using relay.Function is deprecated. target : str, :any:`tvm.target.Target`, or dict of str(i.e. device/context @@ -217,16 +218,16 @@ def build(mod, target=None, target_host=None, params=None): params : dict The parameters of the final graph. """ - if isinstance(mod, _Module): + if isinstance(mod, IRModule): func = mod["main"] elif isinstance(mod, _expr.Function): func = mod warnings.warn( - "Please use input parameter mod (tvm.relay.module.Module) " + "Please use input parameter mod (tvm.IRModule) " "instead of deprecated parameter func (tvm.relay.expr.Function)", DeprecationWarning) else: - raise ValueError("Type of input parameter mod must be tvm.relay.module.Module") + raise ValueError("Type of input parameter mod must be tvm.IRModule") target = _update_target(target) @@ -254,7 +255,7 @@ def optimize(mod, target=None, params=None): Parameters ---------- - mod : relay.Module + mod : tvm.IRModule The module to build. Using relay.Function is deprecated. target : str, :any:`tvm.target.Target`, or dict of str(i.e. device/context @@ -268,7 +269,7 @@ def optimize(mod, target=None, params=None): Returns ------- - mod : relay.Module + mod : tvm.IRModule The optimized relay module. params : dict @@ -279,11 +280,11 @@ def optimize(mod, target=None, params=None): elif isinstance(mod, _expr.Function): func = mod warnings.warn( - "Please use input parameter mod (tvm.relay.module.Module) " + "Please use input parameter mod (tvm.IRModule) " "instead of deprecated parameter func (tvm.relay.expr.Function)", DeprecationWarning) else: - raise ValueError("Type of input parameter mod must be tvm.relay.module.Module") + raise ValueError("Type of input parameter mod must be tvm.IRModule") target = _update_target(target) @@ -330,7 +331,7 @@ class GraphExecutor(_interpreter.Executor): Parameters ---------- - mod : :py:class:`~tvm.relay.module.Module` + mod : :py:class:`~tvm.IRModule` The module to support the execution. ctx : :py:class:`TVMContext` @@ -385,17 +386,17 @@ def create_executor(kind="debug", kind : str The type of executor - mod : :py:class:`~tvm.relay.module.Module` + mod : :py:class:`~tvm.IRModule` The Relay module containing collection of functions - ctx : :py:class:`tvm.TVMContext` + ctx : :py:class:`tvmContext` The context to execute the code. target : :py:class:`tvm.Target` The corresponding context """ if mod is None: - mod = _Module() + mod = IRModule() if ctx is not None: assert ctx.device_type == _nd.context(str(target), 0).device_type else: diff --git a/python/tvm/relay/frontend/caffe2.py b/python/tvm/relay/frontend/caffe2.py index 566851d7f7ed9..da0cc6479818e 100644 --- a/python/tvm/relay/frontend/caffe2.py +++ b/python/tvm/relay/frontend/caffe2.py @@ -16,11 +16,11 @@ # under the License. # pylint: disable=import-self, invalid-name, line-too-long, unused-argument """Caffe2 frontend""" -from __future__ import absolute_import as _abs import tvm +from tvm.ir import IRModule + from .. import analysis from .. import expr as _expr -from .. import module as _module from .. import op as _op from ... import nd as _nd from .common import AttrCvt, Renamer @@ -383,7 +383,7 @@ def __init__(self, shape, dtype): self._ops = {} self._shape = shape self._dtype = dtype - self._mod = _module.Module({}) + self._mod = IRModule({}) def from_caffe2(self, init_net, predict_net): """Construct Relay expression from caffe2 graph. @@ -395,7 +395,7 @@ def from_caffe2(self, init_net, predict_net): Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The module that optimizations will be performed on. params : dict @@ -565,7 +565,7 @@ def from_caffe2(init_net, predict_net, shape=None, dtype="float32"): Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The module that optimizations will be performed on. params : dict of str to tvm.nd.NDArray diff --git a/python/tvm/relay/frontend/common.py b/python/tvm/relay/frontend/common.py index a0af826de32be..d427fe953085d 100644 --- a/python/tvm/relay/frontend/common.py +++ b/python/tvm/relay/frontend/common.py @@ -20,9 +20,10 @@ import numpy as np import tvm +from tvm.ir import IRModule from topi.util import get_const_tuple + from .. import expr as _expr -from .. import module as _module from .. import transform as _transform from .. import op as _op from .. import analysis @@ -453,7 +454,7 @@ def get_name(node): def infer_type(node, mod=None): """A method to infer the type of an intermediate node in the relay graph.""" - new_mod = _module.Module.from_expr(node) + new_mod = IRModule.from_expr(node) if mod is not None: new_mod.update(mod) new_mod = _transform.InferType()(new_mod) diff --git a/python/tvm/relay/frontend/coreml.py b/python/tvm/relay/frontend/coreml.py index 719a2783fd3b7..99a3930a4ea14 100644 --- a/python/tvm/relay/frontend/coreml.py +++ b/python/tvm/relay/frontend/coreml.py @@ -21,9 +21,10 @@ import math import numpy as np import tvm +from tvm.ir import IRModule + from .. import analysis from .. import expr as _expr -from .. import module as _module from .. import op as _op from ... import nd as _nd from ..._ffi import base as _base @@ -449,7 +450,7 @@ def from_coreml(model, shape=None): Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The relay module for compilation. params : dict of str to tvm.nd.NDArray @@ -505,4 +506,4 @@ def from_coreml(model, shape=None): outexpr = outexpr[0] func = _expr.Function(analysis.free_vars(outexpr), outexpr) params = {k:_nd.array(np.array(v, dtype=np.float32)) for k, v in etab.params.items()} - return _module.Module.from_expr(func), params + return IRModule.from_expr(func), params diff --git a/python/tvm/relay/frontend/darknet.py b/python/tvm/relay/frontend/darknet.py index 0ed7b21123832..7623df293cb9b 100644 --- a/python/tvm/relay/frontend/darknet.py +++ b/python/tvm/relay/frontend/darknet.py @@ -23,9 +23,10 @@ from enum import Enum import numpy as np import tvm +from tvm.ir import IRModule + from .. import analysis from .. import expr as _expr -from .. import module as _module from .common import get_relay_op, new_var __all__ = ['from_darknet'] @@ -822,7 +823,7 @@ def from_darknet(self): outputs = _as_list(sym) + self._outs outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs) sym = _expr.Function(analysis.free_vars(outputs), outputs) - return _module.Module.from_expr(sym), self._tvmparams + return IRModule.from_expr(sym), self._tvmparams def from_darknet(net, shape=None, @@ -840,7 +841,7 @@ def from_darknet(net, Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The relay module for compilation. params : dict of str to tvm.nd.NDArray diff --git a/python/tvm/relay/frontend/keras.py b/python/tvm/relay/frontend/keras.py index 740d600739068..d21f1af124cac 100644 --- a/python/tvm/relay/frontend/keras.py +++ b/python/tvm/relay/frontend/keras.py @@ -19,9 +19,10 @@ import sys import numpy as np import tvm +from tvm.ir import IRModule + from .. import analysis from .. import expr as _expr -from .. import module as _module from .. import op as _op from ... import nd as _nd from .common import ExprTable, new_var @@ -752,7 +753,7 @@ def from_keras(model, shape=None): Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The relay module for compilation. params : dict of str to tvm.nd.NDArray @@ -837,4 +838,4 @@ def _convert_input_layer(keras_layer): outexpr = outexpr[0] if len(outexpr) == 1 else _expr.Tuple(outexpr) func = _expr.Function(analysis.free_vars(outexpr), outexpr) params = {k:_nd.array(np.array(v, dtype=np.float32)) for k, v in etab.params.items()} - return _module.Module.from_expr(func), params + return IRModule.from_expr(func), params diff --git a/python/tvm/relay/frontend/mxnet.py b/python/tvm/relay/frontend/mxnet.py index 97e28a933c898..d74277bbe402f 100644 --- a/python/tvm/relay/frontend/mxnet.py +++ b/python/tvm/relay/frontend/mxnet.py @@ -21,12 +21,13 @@ import json import numpy as np import tvm +from tvm.ir import IRModule + from tvm import relay from topi.util import get_const_tuple from .. import analysis from .. import expr as _expr from .. import op as _op -from .. import module as _module from .. import scope_builder as _scope_builder from ... import nd as _nd @@ -1902,7 +1903,7 @@ def _from_mxnet_impl(symbol, shape_dict, dtype_info, params=None, mod=None): dtype_info : dict or str. Known parameter dtypes - mod : tvm.relay.Module + mod : tvm.IRModule The module that contains global information. It will be used for converting ops that need global information, e.g. control-flow ops. @@ -2009,7 +2010,7 @@ def from_mxnet(symbol, Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The relay module for compilation params : dict of str to tvm.nd.NDArray @@ -2020,7 +2021,7 @@ def from_mxnet(symbol, except ImportError as e: raise ImportError("{}. MXNet is required to parse symbols.".format(e)) - mod = _module.Module() + mod = IRModule() if isinstance(symbol, mx.sym.Symbol): params = {} arg_params = arg_params if arg_params else {} diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index e83ab1f729568..841d64da8321b 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -17,14 +17,13 @@ # pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines # pylint: disable=import-outside-toplevel """ONNX: Open Neural Network Exchange frontend for Relay.""" -from __future__ import absolute_import as _abs - import numpy as np import tvm +from tvm.ir import IRModule + from ... import nd as _nd from .. import analysis from .. import expr as _expr -from .. import module as _module from .. import op as _op from .common import AttrCvt, Renamer from .common import get_relay_op, new_var, infer_shape, infer_channels @@ -1568,7 +1567,7 @@ def from_onnx(self, graph, opset): Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The returned relay module params : dict @@ -1661,7 +1660,7 @@ def from_onnx(self, graph, opset): outputs = [self._nodes[self._parse_value_proto(i)] for i in graph.output] outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs) func = _expr.Function(analysis.free_vars(outputs), outputs) - return _module.Module.from_expr(func), self._params + return IRModule.from_expr(func), self._params def _parse_value_proto(self, value_proto): """Parse ValueProto or raw str.""" @@ -1789,7 +1788,7 @@ def from_onnx(model, Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The relay module for compilation params : dict of str to tvm.nd.NDArray diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 3aeb1d4f3d6d7..ac2ea9d0b1bb0 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -29,13 +29,13 @@ import tvm +from tvm.ir import IRModule from tvm.relay.prelude import Prelude from .. import analysis from .. import expr as _expr from .. import op as _op from ..expr_functor import ExprMutator -from .. import module as _module from .common import AttrCvt, get_relay_op from .common import infer_type as _infer_type from .common import infer_shape as _infer_shape @@ -2136,7 +2136,7 @@ def __init__(self): self._input_shapes = {} self._loops = {} self._branches = {} - self._mod = _module.Module({}) + self._mod = IRModule({}) self._prelude = Prelude(self._mod) def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): @@ -2171,7 +2171,7 @@ def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None): Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The module that optimizations will be performed on. params : dict @@ -2653,7 +2653,7 @@ def from_tensorflow(graph, layout="NHWC", shape=None, outputs=None): Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The module that optimizations will be performed on. params : dict of str to tvm.nd.NDArray diff --git a/python/tvm/relay/frontend/tflite.py b/python/tvm/relay/frontend/tflite.py index ab630472c3728..a0b0c0fce5268 100644 --- a/python/tvm/relay/frontend/tflite.py +++ b/python/tvm/relay/frontend/tflite.py @@ -17,14 +17,14 @@ # pylint: disable=invalid-name, unused-argument, too-many-lines, import-outside-toplevel """Tensorflow lite frontend.""" -from __future__ import absolute_import as _abs import math import numpy as np import tvm +from tvm.ir import IRModule + from tvm import relay from .. import analysis from .. import expr as _expr -from .. import module as _module from .. import op as _op from .. import qnn as _qnn from ..util import get_scalar_from_constant @@ -1901,7 +1901,7 @@ def from_tflite(model, shape_dict, dtype_dict): Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The relay module for compilation. params : dict of str to tvm.nd.NDArray @@ -1940,5 +1940,5 @@ def from_tflite(model, shape_dict, dtype_dict): outputs = [exp_tab.get_expr(get_tensor_name(subgraph, i)) for i in model_outputs] outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs) func = _expr.Function(analysis.free_vars(outputs), outputs) - mod = _module.Module.from_expr(func) + mod = IRModule.from_expr(func) return mod, params diff --git a/python/tvm/relay/prelude.py b/python/tvm/relay/prelude.py index 94a75749ce5cc..5288a2e080113 100644 --- a/python/tvm/relay/prelude.py +++ b/python/tvm/relay/prelude.py @@ -16,13 +16,15 @@ # under the License. # pylint: disable=no-else-return, unidiomatic-typecheck, invalid-name """A prelude containing useful global functions and ADT definitions.""" +from tvm.ir import IRModule + from .ty import GlobalTypeVar, TensorType, Any, scalar_type from .expr import Var, Function, GlobalVar, If, const from .op.tensor import add, subtract, equal from .adt import Constructor, TypeData, Clause, Match from .adt import PatternConstructor, PatternVar, PatternWildcard from . import op -from .module import Module + class TensorArrayOps(object): """Contains tensor array related ops""" @@ -648,7 +650,7 @@ class Prelude: def __init__(self, mod=None): if mod is None: - mod = Module() + mod = IRModule() self.mod = mod self.load_prelude() diff --git a/python/tvm/relay/qnn/transform.py b/python/tvm/relay/qnn/transform.py index a76bdaf6310f8..6d38490b2d194 100644 --- a/python/tvm/relay/qnn/transform.py +++ b/python/tvm/relay/qnn/transform.py @@ -42,7 +42,7 @@ def CanonicalizeOps(): # We want to utilize all the existing Relay infrastructure. So, instead of supporting this # QNN requantize op, we convert it into a sequence of existing Relay operators. - mod = relay.Module.from_expr(qnn_expr) + mod = tvm.IRModule.from_expr(qnn_expr) mod = relay.qnn.transform.CanonicalizeOps()(mod) relay_expr = mod['main'] print(relay_expr) diff --git a/python/tvm/relay/quantize/_calibrate.py b/python/tvm/relay/quantize/_calibrate.py index d904fed489bc3..482a6f292f544 100644 --- a/python/tvm/relay/quantize/_calibrate.py +++ b/python/tvm/relay/quantize/_calibrate.py @@ -20,12 +20,12 @@ import multiprocessing as mp import numpy as np import tvm +from tvm.ir import IRModule from . import _quantize from . import quantize from .. import op as _op from .. import expr as _expr -from .. import module as _module from .. import analysis as _analysis from .. import transform as _transform from .. import build_module as _build_module @@ -141,7 +141,7 @@ def _make_const(val): func = mod['main'] _analysis.post_order_visit(func, visit_func) func = _expr.bind(func, const_params) - return _module.Module.from_expr(func) + return IRModule.from_expr(func) # weight scale functions diff --git a/python/tvm/relay/testing/__init__.py b/python/tvm/relay/testing/__init__.py index bcf8985657dae..bff01e859a50d 100644 --- a/python/tvm/relay/testing/__init__.py +++ b/python/tvm/relay/testing/__init__.py @@ -47,7 +47,7 @@ def run_opt_pass(expr, opt_pass): assert isinstance(opt_pass, transform.Pass) - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) mod = opt_pass(mod) entry = mod["main"] return entry if isinstance(expr, relay.Function) else entry.body diff --git a/python/tvm/relay/testing/dcgan.py b/python/tvm/relay/testing/dcgan.py index 6907eb01c88ce..9d7bdaaf8c06c 100644 --- a/python/tvm/relay/testing/dcgan.py +++ b/python/tvm/relay/testing/dcgan.py @@ -103,7 +103,7 @@ def get_workload(batch_size, oshape=(3, 64, 64), ngf=128, random_len=100, dtype= Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The relay module that contains a DCGAN network. params : dict of str to NDArray The parameters. diff --git a/python/tvm/relay/testing/densenet.py b/python/tvm/relay/testing/densenet.py index 9818f446cf751..de140fbc15ab0 100644 --- a/python/tvm/relay/testing/densenet.py +++ b/python/tvm/relay/testing/densenet.py @@ -105,7 +105,7 @@ def get_workload(densenet_size=121, classes=1000, batch_size=4, Returns ------- - mod: tvm.relay.Module + mod: tvm.IRModule The relay module that contains a DenseNet network. params : dict of str to NDArray diff --git a/python/tvm/relay/testing/dqn.py b/python/tvm/relay/testing/dqn.py index cdf9d24af996a..10da37001f129 100644 --- a/python/tvm/relay/testing/dqn.py +++ b/python/tvm/relay/testing/dqn.py @@ -72,7 +72,7 @@ def get_workload(batch_size, num_actions=18, image_shape=(4, 84, 84), dtype="flo The data type Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The relay module that contains a DQN network. params : dict of str to NDArray The parameters. diff --git a/python/tvm/relay/testing/inception_v3.py b/python/tvm/relay/testing/inception_v3.py index fa4233d67b315..8a540e598b77b 100644 --- a/python/tvm/relay/testing/inception_v3.py +++ b/python/tvm/relay/testing/inception_v3.py @@ -290,7 +290,7 @@ def get_workload(batch_size=1, num_classes=1000, Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The relay module that contains an Inception V3 network. params : dict of str to NDArray diff --git a/python/tvm/relay/testing/init.py b/python/tvm/relay/testing/init.py index 0b8ab2b42029b..352230a6150f3 100644 --- a/python/tvm/relay/testing/init.py +++ b/python/tvm/relay/testing/init.py @@ -144,13 +144,13 @@ def create_workload(net, initializer=None, seed=0): Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The created relay module. params : dict of str to NDArray The parameters. """ - mod = relay.Module.from_expr(net) + mod = tvm.IRModule.from_expr(net) mod = relay.transform.InferType()(mod) shape_dict = { v.name_hint : v.checked_type for v in mod["main"].params} diff --git a/python/tvm/relay/testing/lstm.py b/python/tvm/relay/testing/lstm.py index d0134c1a864d6..2480d15f79bbe 100644 --- a/python/tvm/relay/testing/lstm.py +++ b/python/tvm/relay/testing/lstm.py @@ -173,7 +173,7 @@ def get_workload(iterations, num_hidden, batch_size=1, dtype="float32"): The data type Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The relay module that contains a LSTM network. params : dict of str to NDArray The parameters. diff --git a/python/tvm/relay/testing/mlp.py b/python/tvm/relay/testing/mlp.py index 337bde5d5889e..d11873165097d 100644 --- a/python/tvm/relay/testing/mlp.py +++ b/python/tvm/relay/testing/mlp.py @@ -84,7 +84,7 @@ def get_workload(batch_size, Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The relay module that contains a mlp network. params : dict of str to NDArray diff --git a/python/tvm/relay/testing/mobilenet.py b/python/tvm/relay/testing/mobilenet.py index 1b3ce03d19d9e..9aaefdfdb02d1 100644 --- a/python/tvm/relay/testing/mobilenet.py +++ b/python/tvm/relay/testing/mobilenet.py @@ -151,7 +151,7 @@ def get_workload(batch_size=1, num_classes=1000, image_shape=(3, 224, 224), Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The relay module that contains a MobileNet network. params : dict of str to NDArray diff --git a/python/tvm/relay/testing/py_converter.py b/python/tvm/relay/testing/py_converter.py index 72b835dddee76..35269b867404d 100644 --- a/python/tvm/relay/testing/py_converter.py +++ b/python/tvm/relay/testing/py_converter.py @@ -584,7 +584,7 @@ def visit_op(self, _): def to_python(expr: Expr, mod=None, target=tvm.target.create('llvm')): """Converts the given Relay expression into a Python script (as a Python AST object). For easiest debugging, import the astor package and use to_source().""" - mod = mod if mod is not None else relay.Module() + mod = mod if mod is not None else tvm.IRModule() converter = PythonConverter(mod, target) return converter.convert(expr) @@ -592,7 +592,7 @@ def to_python(expr: Expr, mod=None, target=tvm.target.create('llvm')): def run_as_python(expr: Expr, mod=None, target=tvm.target.create('llvm')): """Converts the given Relay expression into a Python script and executes it.""" - mod = mod if mod is not None else relay.Module() + mod = mod if mod is not None else tvm.IRModule() py_ast = to_python(expr, mod, target) code = compile(py_ast, '', 'exec') var_map = { diff --git a/python/tvm/relay/testing/resnet.py b/python/tvm/relay/testing/resnet.py index bde788e1f9b9d..97b6bdc7e6177 100644 --- a/python/tvm/relay/testing/resnet.py +++ b/python/tvm/relay/testing/resnet.py @@ -262,7 +262,7 @@ def get_workload(batch_size=1, Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The relay module that contains a ResNet network. params : dict of str to NDArray diff --git a/python/tvm/relay/testing/squeezenet.py b/python/tvm/relay/testing/squeezenet.py index 1e9ea73e9360e..1a946b6eaa9a2 100644 --- a/python/tvm/relay/testing/squeezenet.py +++ b/python/tvm/relay/testing/squeezenet.py @@ -149,7 +149,7 @@ def get_workload(batch_size=1, Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The relay module that contains a SqueezeNet network. params : dict of str to NDArray diff --git a/python/tvm/relay/testing/vgg.py b/python/tvm/relay/testing/vgg.py index 205c5b1fa8e39..686230b9fbaf8 100644 --- a/python/tvm/relay/testing/vgg.py +++ b/python/tvm/relay/testing/vgg.py @@ -124,7 +124,7 @@ def get_workload(batch_size, Returns ------- - mod : tvm.relay.Module + mod : tvm.IRModule The relay module that contains a VGG network. params : dict of str to NDArray diff --git a/python/tvm/relay/transform.py b/python/tvm/relay/transform.py index f330e0ccfef65..4c2bf873778ab 100644 --- a/python/tvm/relay/transform.py +++ b/python/tvm/relay/transform.py @@ -59,7 +59,7 @@ def build_config(opt_level=2, "CombineParallelDense": 4 } - fallback_device : int, str, or tvm.TVMContext, optional + fallback_device : int, str, or tvmContext, optional The fallback device. It is also used as the default device for operators without specified device during heterogeneous execution. @@ -547,7 +547,7 @@ def gradient(expr, mod=None, mode='higher_order'): expr : tvm.relay.Expr The input expression, which is a Function or a GlobalVar. - mod : Optional[tvm.relay.Module] + mod : Optional[tvm.IRModule] mode : Optional[String] The mode of the automatic differentiation algorithm. @@ -578,7 +578,7 @@ def to_cps(func, mod=None): func: tvm.relay.Function The input function. - mod: Optional[tvm.relay.Module] + mod: Optional[tvm.IRModule] The global module. Returns diff --git a/src/ir/module.cc b/src/ir/module.cc index 7f3796ed07f5d..04fe5d55bceb5 100644 --- a/src/ir/module.cc +++ b/src/ir/module.cc @@ -338,13 +338,13 @@ IRModule IRModule::FromText(const std::string& text, const std::string& source_p TVM_REGISTER_NODE_TYPE(IRModuleNode); -TVM_REGISTER_GLOBAL("relay._make.Module") +TVM_REGISTER_GLOBAL("ir.IRModule") .set_body_typed([](tvm::Map funcs, tvm::Map types) { return IRModule(funcs, types, {}); }); -TVM_REGISTER_GLOBAL("relay._module.Module_Add") +TVM_REGISTER_GLOBAL("ir.Module_Add") .set_body([](TVMArgs args, TVMRetValue* ret) { IRModule mod = args[0]; GlobalVar var = args[1]; @@ -369,67 +369,67 @@ TVM_REGISTER_GLOBAL("relay._module.Module_Add") *ret = mod; }); -TVM_REGISTER_GLOBAL("relay._module.Module_AddDef") +TVM_REGISTER_GLOBAL("ir.Module_AddDef") .set_body_method(&IRModuleNode::AddTypeDef); -TVM_REGISTER_GLOBAL("relay._module.Module_GetGlobalVar") +TVM_REGISTER_GLOBAL("ir.Module_GetGlobalVar") .set_body_method(&IRModuleNode::GetGlobalVar); -TVM_REGISTER_GLOBAL("relay._module.Module_GetGlobalVars") +TVM_REGISTER_GLOBAL("ir.Module_GetGlobalVars") .set_body_method(&IRModuleNode::GetGlobalVars); -TVM_REGISTER_GLOBAL("relay._module.Module_GetGlobalTypeVars") +TVM_REGISTER_GLOBAL("ir.Module_GetGlobalTypeVars") .set_body_method(&IRModuleNode::GetGlobalTypeVars); -TVM_REGISTER_GLOBAL("relay._module.Module_ContainGlobalVar") +TVM_REGISTER_GLOBAL("ir.Module_ContainGlobalVar") .set_body_method(&IRModuleNode::ContainGlobalVar); -TVM_REGISTER_GLOBAL("relay._module.Module_GetGlobalTypeVar") +TVM_REGISTER_GLOBAL("ir.Module_GetGlobalTypeVar") .set_body_method(&IRModuleNode::GetGlobalTypeVar); -TVM_REGISTER_GLOBAL("relay._module.Module_Lookup") +TVM_REGISTER_GLOBAL("ir.Module_Lookup") .set_body_typed([](IRModule mod, GlobalVar var) { return mod->Lookup(var); }); -TVM_REGISTER_GLOBAL("relay._module.Module_Lookup_str") +TVM_REGISTER_GLOBAL("ir.Module_Lookup_str") .set_body_typed([](IRModule mod, std::string var) { return mod->Lookup(var); }); -TVM_REGISTER_GLOBAL("relay._module.Module_LookupDef") +TVM_REGISTER_GLOBAL("ir.Module_LookupDef") .set_body_typed([](IRModule mod, GlobalTypeVar var) { return mod->LookupTypeDef(var); }); -TVM_REGISTER_GLOBAL("relay._module.Module_LookupDef_str") +TVM_REGISTER_GLOBAL("ir.Module_LookupDef_str") .set_body_typed([](IRModule mod, std::string var) { return mod->LookupTypeDef(var); }); -TVM_REGISTER_GLOBAL("relay._module.Module_LookupTag") +TVM_REGISTER_GLOBAL("ir.Module_LookupTag") .set_body_typed([](IRModule mod, int32_t tag) { return mod->LookupTag(tag); }); -TVM_REGISTER_GLOBAL("relay._module.Module_FromExpr") +TVM_REGISTER_GLOBAL("ir.Module_FromExpr") .set_body_typed([](RelayExpr e, tvm::Map funcs, tvm::Map type_defs) { return IRModule::FromExpr(e, funcs, type_defs); }); -TVM_REGISTER_GLOBAL("relay._module.Module_Update") +TVM_REGISTER_GLOBAL("ir.Module_Update") .set_body_typed([](IRModule mod, IRModule from) { mod->Update(from); }); -TVM_REGISTER_GLOBAL("relay._module.Module_Import") +TVM_REGISTER_GLOBAL("ir.Module_Import") .set_body_typed([](IRModule mod, std::string path) { mod->Import(path); }); -TVM_REGISTER_GLOBAL("relay._module.Module_ImportFromStd") +TVM_REGISTER_GLOBAL("ir.Module_ImportFromStd") .set_body_typed([](IRModule mod, std::string path) { mod->ImportFromStd(path); });; diff --git a/tests/python/contrib/test_rpc_tracker.py b/tests/python/contrib/test_rpc_tracker.py index 6abfc90c352b6..11e7766f374b1 100644 --- a/tests/python/contrib/test_rpc_tracker.py +++ b/tests/python/contrib/test_rpc_tracker.py @@ -84,7 +84,7 @@ def myfunc(remote): f1 = remote2.get_function("rpc.test2.addone") assert f1(10) == 11 - except tvm.TVMError as e: + except tvm.error.TVMError as e: pass remote3 = tclient.request("abc") f1 = remote3.get_function("rpc.test2.addone") diff --git a/tests/python/frontend/mxnet/test_graph.py b/tests/python/frontend/mxnet/test_graph.py index 467d5529d0c14..1ef1f96bbb2a9 100644 --- a/tests/python/frontend/mxnet/test_graph.py +++ b/tests/python/frontend/mxnet/test_graph.py @@ -99,7 +99,7 @@ def relay_compose(F, **kwargs): z = F.split(x, **kwargs) z = F.subtract(F.add(z[0], z[2]), y) func = relay.Function(relay.analysis.free_vars(z), z) - return relay.Module.from_expr(func) + return tvm.IRModule.from_expr(func) mx_sym = mx_compose(mx, num_outputs=3, axis=1) mod, _ = relay.frontend.from_mxnet( diff --git a/tests/python/frontend/mxnet/test_qnn_ops_utils.py b/tests/python/frontend/mxnet/test_qnn_ops_utils.py index 0c7374d4d8a76..4ee5f2e3c3c32 100644 --- a/tests/python/frontend/mxnet/test_qnn_ops_utils.py +++ b/tests/python/frontend/mxnet/test_qnn_ops_utils.py @@ -34,7 +34,7 @@ def dequantize_test_driver(in_dtype, quant_args, in_data, verify_output_data): max_range=max_range, in_dtype=in_dtype) mod = relay.Function(relay.analysis.free_vars(dequantized_output), dequantized_output) - mod = relay.Module.from_expr(mod) + mod = tvm.IRModule.from_expr(mod) with relay.build_config(opt_level=3): graph, lib, params = relay.build(mod, "llvm", params=None) rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) @@ -90,7 +90,7 @@ def quantize_test_driver(out_dtype, quant_args, in_data, verify_output_data): max_range=max_range, out_dtype=out_dtype) mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output) - mod = relay.Module.from_expr(mod) + mod = tvm.IRModule.from_expr(mod) with relay.build_config(opt_level=3): graph, lib, params = relay.build(mod, "llvm", params=None) rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) diff --git a/tests/python/relay/test_adt.py b/tests/python/relay/test_adt.py index 00c07b928b1ab..eb32aa2e58135 100644 --- a/tests/python/relay/test_adt.py +++ b/tests/python/relay/test_adt.py @@ -23,7 +23,7 @@ import numpy as np -mod = relay.Module() +mod = tvm.IRModule() p = Prelude(mod) add_nat_definitions(p) @@ -730,7 +730,7 @@ def check_tensor_array(ta_mod, ref_res, *args, dtype="float32", def test_tensor_expand_dims(): def run(dtype): x = relay.var('x') - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) expand_dims_func = p.get_var('tensor_expand_dims', dtype) tensor1 = p.get_var('tensor1', dtype) @@ -745,7 +745,7 @@ def run(dtype): def test_tensor_array_constructor(): def run(dtype): x = relay.var('x') - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) tensor_array = p.get_var('tensor_array', dtype) mod["main"] = relay.Function([x], tensor_array(x)) @@ -757,7 +757,7 @@ def run(dtype): def test_tensor_array_read(): def run(dtype): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) l = relay.var('l') i = relay.var('i') @@ -773,7 +773,7 @@ def run(dtype): def test_tensor_array_write(): def run(dtype): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) v1 = relay.var('v1') v2 = relay.var('v2') @@ -793,7 +793,7 @@ def run(dtype): def test_tensor_array_stack(): def run(dtype): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) tensor_array = p.get_var('tensor_array', dtype) tensor1 = p.get_var('tensor1', dtype) @@ -815,7 +815,7 @@ def run(dtype): def test_tensor_array_unstack(): def run(dtype): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) unstack_tensor1 = p.get_var('tensor_array_unstack_tensor1', dtype) v = relay.var('v') @@ -828,7 +828,7 @@ def run(dtype): def test_tensor_take(): def run(dtype): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) take = p.get_var('tensor_take', dtype) tensor2 = p.get_var('tensor2', dtype) @@ -847,7 +847,7 @@ def run(dtype): def test_tensor_concatenate(): def run(dtype): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) concat = p.get_var('tensor_concatenate', dtype) tensor1 = p.get_var('tensor1', dtype) @@ -865,7 +865,7 @@ def run(dtype): def test_tensor_array_concat(): def run(dtype): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) v1 = relay.var('v1') v2 = relay.var('v2') @@ -888,9 +888,9 @@ def run(dtype): def test_tensor_array_scatter(): def run(dtype): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) - + # tensor array v1 = relay.var('v1') v2 = relay.var('v2') @@ -938,9 +938,9 @@ def run(dtype): def test_tensor_array_split(): def run(dtype): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) - + # tensor array v1 = relay.var('v1') v2 = relay.var('v2') diff --git a/tests/python/relay/test_any.py b/tests/python/relay/test_any.py index fe2e9e9bb82e1..3e392a8e630fc 100644 --- a/tests/python/relay/test_any.py +++ b/tests/python/relay/test_any.py @@ -37,7 +37,7 @@ def verify_any_broadcast(x_shape, y_shape, x_np_shape, y_np_shape, op, np_op): dtype = 'float32' x = relay.var('x', shape=x_shape, dtype=dtype) y = relay.var('y', shape=y_shape, dtype=dtype) - mod = relay.module.Module() + mod = tvm.IRModule() mod["main"] = relay.Function([x, y], op(x, y)) x_np = np.random.uniform(size=x_np_shape).astype(dtype) y_np = np.random.uniform(size=y_np_shape).astype(dtype) @@ -62,7 +62,7 @@ def test_any_broadcast(): def verify_any_elemwise(x_shape, x_np_shape, op, np_op): dtype = 'float32' x = relay.var('x', shape=x_shape, dtype=dtype) - mod = relay.module.Module() + mod = tvm.IRModule() mod["main"] = relay.Function([x], op(x)) x_np = np.random.uniform(size=x_np_shape).astype(dtype) res_np = np_op(x_np) @@ -96,7 +96,7 @@ def check_fail(x_shape, y_shape, x_np_shape, y_np_shape, op, np_op): def verify_any_full(x_shape, x_np_shape, relay_op, np_op, dtype='float32'): x = relay.var('x', shape=x_shape, dtype=dtype) - mod = relay.module.Module() + mod = tvm.IRModule() mod['main'] = relay.Function([x], relay.zeros_like(x)) x_np = np.random.uniform(size=x_np_shape).astype(dtype) res_np = np.zeros_like(x_np) @@ -126,7 +126,7 @@ def test_any_concat(): xx = x - relay.expr.const(3.0) yy = y * relay.expr.const(5.0) z = relay.op.concatenate([xx, yy], axis=0) - mod = relay.module.Module() + mod = tvm.IRModule() mod["main"] = relay.Function([x, y], z) x_np = np.random.uniform(size=(3, 2)).astype('float32') y_np = np.random.uniform(size=(1, 2)).astype('float32') @@ -139,7 +139,7 @@ def test_any_concat(): def verify_any_reshape(x_shape, newshape, x_np_shape, out_shape): x = relay.var('x', shape=x_shape, dtype="float32") y = relay.reshape(x, newshape=newshape) - mod = relay.module.Module() + mod = tvm.IRModule() mod["main"] = relay.Function([x], y) data = np.random.uniform(size=x_np_shape).astype('float32') for kind in ["debug", "vm"]: @@ -158,7 +158,7 @@ def test_any_reshape(): def verify_any_argwhere(x_shape, x_np_shape, dtype="bool"): x = relay.var('x', shape=x_shape, dtype=dtype) y = relay.argwhere(x) - mod = relay.module.Module() + mod = tvm.IRModule() mod["main"] = relay.Function([x], y) data = np.random.choice([0, 1, 2, 3], size=x_np_shape).astype(dtype) expected = np.argwhere(data) @@ -186,7 +186,7 @@ def test_any_argwhere(): verify_any_argwhere(any_dims(5), (5, 5, 5, 5, 5), "int8") def verify_any_take(data_shape, indices_shape, axis, data_np_shape, indices_np_shape): - mod = relay.Module() + mod = tvm.IRModule() data = relay.var('data', shape=data_shape, dtype='float32') indices = relay.var('indices', shape=indices_shape, dtype='int32') y = relay.take(data, indices, axis=axis) @@ -212,7 +212,7 @@ def test_any_take(): verify_any_take(any_dims(2), any_dims(4), -1, (4, 5), (2, 3, 4, 5)) def verify_any_tile(dshape, reps, np_dshape, np_reps): - mod = relay.Module() + mod = tvm.IRModule() x = relay.var("x", shape=dshape, dtype="float32") y = relay.tile(x, reps=reps) mod["main"] = relay.Function([x], y) @@ -233,7 +233,7 @@ def test_any_tile(): def test_any_shape_of(): x = relay.var('x', shape=any_dims(2), dtype='float32') y = relay.shape_of(x) - mod = relay.module.Module() + mod = tvm.IRModule() mod["main"] = relay.Function([x], y) data = np.random.uniform(size=(3, 4)).astype('float32') for kind in ["debug", "vm"]: @@ -244,7 +244,7 @@ def test_any_shape_of(): x = relay.var('x', shape=any_dims(3), dtype='float32') y0 = relay.shape_of(x) y1 = relay.take(y0, relay.const(1, 'int32')) - mod = relay.module.Module() + mod = tvm.IRModule() mod["main"] = relay.Function([x], y1) data = np.random.uniform(size=(2, 3, 4)).astype('float32') for kind in ["debug", "vm"]: @@ -254,7 +254,7 @@ def test_any_shape_of(): def verify_any_reduce(reduce_op, data_shape, axis, exclude, keepdims, static_data_shape, ref_out_shape): - mod = relay.Module() + mod = tvm.IRModule() dtype = "bool" if reduce_op == relay.all else "float32" data = relay.var('data', shape=data_shape, dtype=dtype) y = reduce_op(data, axis, keepdims, exclude) @@ -277,7 +277,7 @@ def test_any_reduce(): verify_any_reduce(relay.variance, any_dims(5), (2, 4), False, False, (3, 4, 5, 6, 7), (3, 4, 6)) def verify_any_layout_transform(data_shape, src_layout, dst_layout, static_data_shape, ref_out_shape): - mod = relay.Module() + mod = tvm.IRModule() dtype = "float32" data = relay.var('data', shape=data_shape, dtype=dtype) y = relay.layout_transform(data, src_layout, dst_layout) @@ -297,7 +297,7 @@ def test_any_layout_transform(): verify_any_layout_transform((16, 1), "CH", "C4cH", (16, 1), (4, 4, 1)) def verify_any_expand_dims(data_shape, axis, num_newaxis, static_data_shape, ref_out_shape): - mod = relay.Module() + mod = tvm.IRModule() dtype = "float32" data = relay.var('data', shape=data_shape, dtype=dtype) y = relay.expand_dims(data, axis=axis, num_newaxis=num_newaxis) @@ -314,7 +314,7 @@ def test_any_expand_dims(): verify_any_expand_dims(any_dims(3), -1, 2, (1, 2, 3), (1, 2, 3, 1, 1)) def verify_any_transpose(data_shape, axes, static_data_shape): - mod = relay.Module() + mod = tvm.IRModule() dtype = "float32" data = relay.var('data', shape=data_shape, dtype=dtype) y = relay.transpose(data, axes=axes) @@ -332,7 +332,7 @@ def test_any_transpose(): verify_any_transpose(any_dims(6), (0, 1, 3, 2, 5, 4), (11, 12, 2, 1, 9, 17)) def verify_any_squeeze(data_shape, axis, static_data_shape): - mod = relay.Module() + mod = tvm.IRModule() dtype = "float32" data = relay.var('data', shape=data_shape, dtype=dtype) y = relay.squeeze(data, axis=axis) @@ -349,7 +349,7 @@ def test_any_squeeze(): verify_any_squeeze((1, relay.Any(), relay.Any(), 1, relay.Any(), relay.Any()), (0, 3), (1, 12, 2, 1, 9, 17)) def test_any_reshape_like(): - mod = relay.Module() + mod = tvm.IRModule() dtype = "float32" data = relay.var('data', shape=(relay.Any(), 3, 10), dtype=dtype) shape_like = relay.var('data', shape=(relay.Any(), 5, 6), dtype=dtype) @@ -366,7 +366,7 @@ def test_any_reshape_like(): def verify_any_conv2d_NCHWc(data_shape, kernel_shape, strides, padding, dilation, data_layout, kernel_layout, out_layout, static_data_shape, ref_out_shape): - mod = relay.Module() + mod = tvm.IRModule() dtype = "float32" data = relay.var('data', shape=data_shape, dtype=dtype) kernel = relay.var('kernel', shape=kernel_shape, dtype=dtype) @@ -392,7 +392,7 @@ def test_any_conv2d_NCHWc(): def verify_any_pool2d(pool_type, data_shape, pool_size, strides, padding, layout, static_data_shape, ref_out_shape): - mod = relay.Module() + mod = tvm.IRModule() dtype = "float32" pool_func = relay.nn.max_pool2d if pool_type == "max" else relay.nn.avg_pool2d data = relay.var('data', shape=data_shape, dtype=dtype) @@ -414,7 +414,7 @@ def test_any_pool2d(): (3, 3), (2, 2), (1, 1), "NCHW4c", (2, 3, 220, 220, 4), (2, 3, 110, 110, 4)) def verify_any_global_pool2d(pool_type, data_shape, layout, static_data_shape, ref_out_shape): - mod = relay.Module() + mod = tvm.IRModule() dtype = "float32" pool_func = relay.nn.global_max_pool2d if pool_type == "max" else relay.nn.global_avg_pool2d data = relay.var('data', shape=data_shape, dtype=dtype) @@ -436,7 +436,7 @@ def test_any_global_pool2d(): "NCHW4c", (2, 3, 220, 220, 4), (2, 3, 1, 1, 4)) def verify_any_split(data_shape, indices_or_sections, axis, static_data_shape, ref_out_shape): - mod = relay.Module() + mod = tvm.IRModule() dtype = "float32" data = relay.var('data', shape=data_shape, dtype=dtype) y = relay.split(data, indices_or_sections, axis) @@ -454,7 +454,7 @@ def test_any_split(): verify_any_split((relay.Any(), 12), (1, 4, 8), 1, (7, 12), [(7, 1), (7, 3), (7, 4)]) def test_any_batch_flatten(): - mod = relay.Module() + mod = tvm.IRModule() dtype = "float32" data = relay.var('data', shape=any_dims(3), dtype=dtype) y = relay.nn.batch_flatten(data) @@ -469,7 +469,7 @@ def test_any_batch_flatten(): def verify_any_dense(data_shape, weight_shape, units, static_data_shape, static_weight_shape, ref_out_shape): - mod = relay.Module() + mod = tvm.IRModule() dtype = "float32" data = relay.var('data', shape=data_shape, dtype=dtype) weight = relay.var('weight', shape=weight_shape, dtype=dtype) @@ -488,7 +488,7 @@ def test_any_dense(): verify_any_dense(any_dims(2), (50, relay.Any()), 50, (4, 40), (50, 40), (4, 50)) def verify_any_pad(data_shape, pad_width, static_data_shape): - mod = relay.Module() + mod = tvm.IRModule() dtype = "float32" data = relay.var('data', shape=data_shape, dtype=dtype) y = relay.nn.pad(data, pad_width) @@ -505,7 +505,7 @@ def test_any_pad(): verify_any_pad(any_dims(4), ((1, 0), (1, 3), (0, 2), (9, 0)), (13, 11, 3, 1)) def verify_any_softmax(data_shape, axis, static_data_shape, ref_out_shape): - mod = relay.Module() + mod = tvm.IRModule() dtype = "float32" data = relay.var('data', shape=data_shape, dtype=dtype) y = relay.nn.softmax(data, axis) @@ -525,7 +525,7 @@ def test_fused_ops(): x = relay.var('x', shape=(relay.Any(), relay.Any()), dtype='float32') y0 = x + relay.const(1.0, 'float32') y1 = y0 * relay.const(2.0, 'float32') - mod = relay.module.Module() + mod = tvm.IRModule() mod["main"] = relay.Function([x], y1) data = np.random.uniform(size=(5, 4)).astype('float32') for kind in ["vm"]: @@ -542,7 +542,7 @@ def test_arange_with_dynamic_shape(): y2 = relay.op.arange(y1, dtype="int32") y3 = y2 + relay.const(1, dtype="int32") data = np.random.rand(10, 5, 3).astype('float32') - mod = relay.module.Module() + mod = tvm.IRModule() mod["main"] = relay.Function([x], y3) for kind in ["debug", "vm"]: ex = relay.create_executor(kind, mod=mod, ctx=tvm.cpu(), target="llvm") @@ -577,7 +577,7 @@ def _body(i, st): start = relay.var('start', shape=(), dtype='int32') body = loop(start, relay.op.reshape(relay.const(0), newshape=(1, 1))) func = relay.Function([start], relay.TupleGetItem(body, 1)) - mod = relay.module.Module() + mod = tvm.IRModule() mod["main"] = func data = np.array(0.0, dtype='int32') ref = np.array([0] + list(range(10))).reshape((11, 1)).astype("int32") diff --git a/tests/python/relay/test_backend_compile_engine.py b/tests/python/relay/test_backend_compile_engine.py index 43090eea15f07..640da0bd2ebef 100644 --- a/tests/python/relay/test_backend_compile_engine.py +++ b/tests/python/relay/test_backend_compile_engine.py @@ -27,7 +27,7 @@ def get_func(shape): y = relay.add(x, x) z = relay.add(y, x) f = relay.Function([x], z) - mod = relay.Module.from_expr(f) + mod = tvm.IRModule.from_expr(f) mod = relay.transform.InferType()(mod) return mod["main"] z1 = engine.lower(get_func((10,)), "llvm") @@ -59,7 +59,7 @@ def test_compile_placeholder_bypass(): result = relay.Tuple([x, relay.op.concatenate([y, z], axis=0)]) func = relay.Function(relay.analysis.free_vars(result), result) with relay.build_config(opt_level=0): - graph, lib, params = relay.build(relay.Module.from_expr(func), 'llvm') + graph, lib, params = relay.build(tvm.IRModule.from_expr(func), 'llvm') def test_compile_injective_with_tuple(): @@ -68,7 +68,7 @@ def test_compile_injective_with_tuple(): x_transpose = relay.transpose(x) output = relay.Tuple([x_transpose, y]) func = relay.Function([x, y], output) - relay.build(relay.Module.from_expr(func), 'llvm') + relay.build(tvm.IRModule.from_expr(func), 'llvm') def test_compile_tuple_dup(): @@ -76,7 +76,7 @@ def test_compile_tuple_dup(): log = relay.log(x) output = relay.Tuple([log, log]) f = relay.Function([x], output) - relay.build(relay.Module.from_expr(f), 'llvm') + relay.build(tvm.IRModule.from_expr(f), 'llvm') def test_compile_full(): @@ -88,7 +88,7 @@ def test_compile_full(): tvm.expr.IntImm('int32', 64)) output = relay.full(relay.const(0, 'int32'), shape=shape, dtype='int32') f = relay.Function([], output) - mod = relay.Module.from_expr(f) + mod = tvm.IRModule.from_expr(f) mod = relay.qnn.transform.CanonicalizeOps()(mod) relay.build(mod, 'llvm') diff --git a/tests/python/relay/test_backend_graph_runtime.py b/tests/python/relay/test_backend_graph_runtime.py index fbccb94bc6708..d5d29b645cfa9 100644 --- a/tests/python/relay/test_backend_graph_runtime.py +++ b/tests/python/relay/test_backend_graph_runtime.py @@ -21,7 +21,6 @@ from tvm.contrib import graph_runtime from tvm.relay.scope_builder import ScopeBuilder from tvm.relay.op import add -from tvm.relay.module import Module from tvm.relay.testing.config import ctx_list # @tq, @jr should we put this in testing ns? @@ -100,7 +99,7 @@ def test_with_params(): x_data = np.random.rand(10, 5).astype('float32') y_data = np.random.rand(1, 5).astype('float32') params = {"y": y_data} - graph, lib, params = relay.build(relay.Module.from_expr(func), "llvm", params=params) + graph, lib, params = relay.build(tvm.IRModule.from_expr(func), "llvm", params=params) mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) mod.set_input(**params) mod.set_input(x=x_data) @@ -123,7 +122,7 @@ def test_plan_memory(): z = relay.exp(z) z = relay.exp(z) func = relay.Function([x, y], z) - mod = relay.Module.from_expr(func) + mod = tvm.IRModule.from_expr(func) mod = relay.transform.FuseOps(0)(mod) func = mod["main"] smap = relay.backend._backend.GraphPlanMemory(func) @@ -169,7 +168,7 @@ def unit_numpy(X, W): for target, ctx in ctx_list(): with relay.build_config(opt_level=2): - graph, lib, params = relay.build(relay.Module.from_expr(z), target) + graph, lib, params = relay.build(tvm.IRModule.from_expr(z), target) m = graph_runtime.create(graph, lib, ctx) m.set_input("X", tvm.nd.array(x.astype(dtype))) m.set_input("y", tvm.nd.array(y.astype(dtype))) diff --git a/tests/python/relay/test_backend_interpreter.py b/tests/python/relay/test_backend_interpreter.py index 11a9e05b3e7f1..faca6c0ea615f 100644 --- a/tests/python/relay/test_backend_interpreter.py +++ b/tests/python/relay/test_backend_interpreter.py @@ -92,7 +92,7 @@ def test_subtract(): def test_simple_loop(): - mod = relay.module.Module({}) + mod = tvm.IRModule({}) sum_up = relay.GlobalVar('sum_up') i = relay.var('i', shape=[], dtype='int32') sb = ScopeBuilder() @@ -109,7 +109,7 @@ def test_simple_loop(): def test_loop(): - mod = relay.module.Module({}) + mod = tvm.IRModule({}) sum_up = relay.GlobalVar('sum_up') i = relay.var('i', shape=[], dtype='int32') accum = relay.var('accum', shape=[], dtype='int32') @@ -128,7 +128,7 @@ def test_loop(): def test_ref(): - mod = relay.Module() + mod = tvm.IRModule() three_with_ref = relay.GlobalVar('three_with_ref') i = relay.Var('i') iv = relay.Var('iv') @@ -167,7 +167,7 @@ def test_kwargs_params(): def test_function_taking_adt_ref_tuple(): - mod = relay.Module() + mod = tvm.IRModule() prelude = relay.prelude.Prelude(mod) intrp = create_executor("debug", mod) @@ -211,7 +211,7 @@ def test_tuple_passing(): relay.ty.TensorType((), 'int64')])) fn = relay.Function([x], relay.expr.TupleGetItem(x, 0)) - mod = relay.Module({}) + mod = tvm.IRModule({}) gv = relay.GlobalVar('main') mod[gv] = fn mod = relay.transform.InferType()(mod) diff --git a/tests/python/relay/test_cpp_build_module.py b/tests/python/relay/test_cpp_build_module.py index 165e00d9c7028..2af4a2030f4fa 100644 --- a/tests/python/relay/test_cpp_build_module.py +++ b/tests/python/relay/test_cpp_build_module.py @@ -43,7 +43,7 @@ def test_basic_build(): targets = { tvm.expr.IntImm("int32", ctx.device_type): tgt } - g_json, mmod, params = relay.build(relay.Module.from_expr(func), targets, "llvm", params=params) + g_json, mmod, params = relay.build(tvm.IRModule.from_expr(func), targets, "llvm", params=params) # test rt = tvm.contrib.graph_runtime.create(g_json, mmod, ctx) @@ -115,7 +115,7 @@ def check_conversion(tgt, ctx): # build with relay.build_config(opt_level=1): - g_json, mmod, params = relay.build(relay.Module.from_expr(func), tgt) + g_json, mmod, params = relay.build(tvm.IRModule.from_expr(func), tgt) # test rt = tvm.contrib.graph_runtime.create(g_json, mmod, ctx) diff --git a/tests/python/relay/test_error_reporting.py b/tests/python/relay/test_error_reporting.py index 74e651884803f..aef93ad9f4dc9 100644 --- a/tests/python/relay/test_error_reporting.py +++ b/tests/python/relay/test_error_reporting.py @@ -19,12 +19,12 @@ def check_type_err(expr, msg): try: - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) mod = relay.transform.InferType()(mod) entry = mod["main"] expr = entry if isinstance(expr, relay.Function) else entry.body assert False - except tvm.TVMError as err: + except tvm.error.TVMError as err: assert msg in str(err) def test_wellformed(): diff --git a/tests/python/relay/test_external_codegen.py b/tests/python/relay/test_external_codegen.py index 13193fc87e071..b70054349247e 100644 --- a/tests/python/relay/test_external_codegen.py +++ b/tests/python/relay/test_external_codegen.py @@ -125,7 +125,7 @@ def test_multi_node_subgraph(): r = relay.concatenate((call0, call1, q2), axis=0) f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r) - mod = relay.Module() + mod = tvm.IRModule() mod["main"] = f mod = relay.transform.InferType()(mod) @@ -154,7 +154,7 @@ def test_extern_gcc_single_op(): f = relay.Function([x0, y0], z) f = set_external_func_attr(f, "ccompiler", "ccompiler_0") call = relay.Call(f, [x, y]) - mod = relay.Module.from_expr(call) + mod = tvm.IRModule.from_expr(call) x_data = np.random.rand(8, 8).astype('float32') y_data = np.random.rand(8, 8).astype('float32') @@ -188,7 +188,7 @@ def test_extern_gcc(): sub = relay.Function([x2, y2], sub) sub = set_external_func_attr(sub, "ccompiler", "ccompiler_0") call_sub = relay.Call(sub, [call_mul, call_add]) - mod = relay.Module.from_expr(call_sub) + mod = tvm.IRModule.from_expr(call_sub) x_data = np.random.rand(2, 2).astype('float32') y_data = np.random.rand(2, 2).astype('float32') @@ -223,12 +223,12 @@ def test_extern_dnnl(): out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2) f = relay.Function([data1, weight1, weight2], out) - ref_mod = relay.Module() + ref_mod = tvm.IRModule() ref_mod['main'] = f f = set_external_func_attr(f, "dnnl", "dnnl_0") call = relay.Call(f, [data0, weight0, weight0]) - mod = relay.Module.from_expr(call) + mod = tvm.IRModule.from_expr(call) i_data = np.random.uniform(0, 1, ishape).astype(dtype) w_data = np.random.uniform(0, 1, w1shape).astype(dtype) diff --git a/tests/python/relay/test_external_runtime.py b/tests/python/relay/test_external_runtime.py index 5fc03df8a9a08..7e8c832174513 100644 --- a/tests/python/relay/test_external_runtime.py +++ b/tests/python/relay/test_external_runtime.py @@ -330,7 +330,7 @@ def get_synthetic_lib(): sub2 = relay.subtract(add2, w7) ret = relay.concatenate((subgraph0_ret, subgraph1_ret, sub2), 0) func = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], ret) - mod = relay.Module.from_expr(func) + mod = tvm.IRModule.from_expr(func) _, lib, _ = relay.build(mod, "llvm") return lib diff --git a/tests/python/relay/test_feature.py b/tests/python/relay/test_feature.py index 64eda9d04e7c4..9066e85cf6da1 100644 --- a/tests/python/relay/test_feature.py +++ b/tests/python/relay/test_feature.py @@ -50,7 +50,7 @@ def test_ad(): x = relay.var("x", t) func = relay.Function([x], x + x) func = run_infer_type(func) - mod = relay.Module.from_expr(gradient(func)) + mod = tvm.IRModule.from_expr(gradient(func)) mod = relay.transform.InferType()(mod) back_func = mod["main"] feats = detect_feature(back_func) diff --git a/tests/python/relay/test_ir_module.py b/tests/python/relay/test_ir_module.py index 72a92c8697fc4..90381edd48d82 100644 --- a/tests/python/relay/test_ir_module.py +++ b/tests/python/relay/test_ir_module.py @@ -17,7 +17,6 @@ """Tests for module functionality.""" import tvm from tvm import relay -from tvm.relay import Module from tvm.relay.prelude import Prelude from tvm.relay.testing import add_nat_definitions @@ -30,10 +29,10 @@ def adt_list(p): def test_constructor_tag_round_trip(): - mod1 = Module() + mod1 = tvm.IRModule() p1 = Prelude(mod1) add_nat_definitions(p1) - mod2 = Module() + mod2 = tvm.IRModule() p2 = Prelude(mod2) add_nat_definitions(p2) diff --git a/tests/python/relay/test_ir_nodes.py b/tests/python/relay/test_ir_nodes.py index 2c6eddd462ed9..1a49d063d13f5 100644 --- a/tests/python/relay/test_ir_nodes.py +++ b/tests/python/relay/test_ir_nodes.py @@ -31,7 +31,7 @@ def check_json_roundtrip(node): def test_bad_constructor(): try: x = relay.ty.TensorType("xx", "xx") - except tvm.TVMError: + except tvm.error.TVMError: pass diff --git a/tests/python/relay/test_ir_parser.py b/tests/python/relay/test_ir_parser.py index a871ae1443874..261cbb97c4af6 100644 --- a/tests/python/relay/test_ir_parser.py +++ b/tests/python/relay/test_ir_parser.py @@ -355,7 +355,7 @@ def @id(%x: int32) -> int32 { %x } """) - assert isinstance(id_defn, relay.Module) + assert isinstance(id_defn, tvm.IRModule) def test_recursive_call(): @@ -365,7 +365,7 @@ def @id(%x: int32) -> int32 { @id(%x) } """) - assert isinstance(id_defn, relay.Module) + assert isinstance(id_defn, tvm.IRModule) def test_ifelse(): @@ -639,7 +639,7 @@ def test_tuple_type(): def test_adt_defn(): - mod = relay.Module() + mod = tvm.IRModule() glob_typ_var = relay.GlobalTypeVar("Ayy") prog = relay.TypeData( @@ -656,7 +656,7 @@ def test_adt_defn(): def test_empty_adt_defn(): - mod = relay.Module() + mod = tvm.IRModule() glob_typ_var = relay.GlobalTypeVar("Ayy") prog = relay.TypeData(glob_typ_var, [], []) @@ -670,7 +670,7 @@ def test_empty_adt_defn(): def test_multiple_cons_defn(): - mod = relay.Module() + mod = tvm.IRModule() list_var = relay.GlobalTypeVar("List") typ_var = relay.TypeVar("A") @@ -696,7 +696,7 @@ def test_multiple_type_param_defn(): relay.Constructor("Left", [typ_var_a], glob_typ_var), relay.Constructor("Right", [typ_var_b], glob_typ_var), ]) - mod = relay.Module() + mod = tvm.IRModule() mod[glob_typ_var] = prog assert parses_as( """ @@ -713,7 +713,7 @@ def test_match(): # pair each match keyword with whether it specifies a complete match or not match_keywords = [("match", True), ("match?", False)] for (match_keyword, is_complete) in match_keywords: - mod = relay.Module() + mod = tvm.IRModule() list_var = relay.GlobalTypeVar("List") typ_var = relay.TypeVar("A") @@ -773,7 +773,7 @@ def @length[A](%%xs: List[A]) -> int32 { def test_adt_cons_expr(): - mod = relay.Module() + mod = tvm.IRModule() list_var = relay.GlobalTypeVar("List") typ_var = relay.TypeVar("A") @@ -853,7 +853,7 @@ def @id[A](%x: A) -> A { x } def test_extern_adt_defn(): # TODO(weberlo): update this test once extern is implemented - mod = relay.Module() + mod = tvm.IRModule() extern_var = relay.GlobalTypeVar("T") typ_var = relay.TypeVar("A") diff --git a/tests/python/relay/test_ir_text_printer.py b/tests/python/relay/test_ir_text_printer.py index e84de67651778..e2a0bdc205d66 100644 --- a/tests/python/relay/test_ir_text_printer.py +++ b/tests/python/relay/test_ir_text_printer.py @@ -58,7 +58,7 @@ def test_env(): z = relay.add(x, y) z = relay.add(z, z) f = relay.Function([x, y], z) - env = relay.Module() + env = tvm.IRModule() env["myf"] = f text = astext(env) assert "def @myf" in text diff --git a/tests/python/relay/test_ir_well_formed.py b/tests/python/relay/test_ir_well_formed.py index bee0a021ac5b6..fbbfbd23a6c2d 100644 --- a/tests/python/relay/test_ir_well_formed.py +++ b/tests/python/relay/test_ir_well_formed.py @@ -50,7 +50,7 @@ def test_tuple_get_item(): def test_adt(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) x = relay.Var("x") some_case = relay.Clause(relay.PatternConstructor(p.some, diff --git a/tests/python/relay/test_memory_alloc.py b/tests/python/relay/test_memory_alloc.py index 5c1bbc72bf226..18b1500dfc3cc 100644 --- a/tests/python/relay/test_memory_alloc.py +++ b/tests/python/relay/test_memory_alloc.py @@ -20,7 +20,7 @@ from tvm.relay import memory_alloc def check_vm_alloc(func, check_fn): - mod = relay.Module() + mod = tvm.IRModule() mod['main'] = func ex = relay.create_executor('vm', mod) args = [] @@ -37,11 +37,11 @@ def storage_type(mod): return relay.TypeCall(mod.get_global_type_var("Storage"), []) def test_tyck_alloc_storage(): - mod = relay.Module() + mod = tvm.IRModule() mod.import_from_std("core.rly") def test_tyck_alloc_tensor(): - mod = relay.Module() + mod = tvm.IRModule() mod.import_from_std("core.rly") sto = relay.Var("x", storage_type(mod)) sh = relay.const(np.array([1, 2]), dtype="int64") diff --git a/tests/python/relay/test_op_level10.py b/tests/python/relay/test_op_level10.py index 6a6f21d9241fd..c3033e9181cb2 100644 --- a/tests/python/relay/test_op_level10.py +++ b/tests/python/relay/test_op_level10.py @@ -55,7 +55,7 @@ def test_checkpoint_alpha_equal(): with transform.PassContext(opt_level=3): passes = [transform.PartialEvaluate(), transform.DeadCodeElimination(inline_once=True)] - mod = transform.Sequential(passes)(relay.Module.from_expr(df)) + mod = transform.Sequential(passes)(tvm.IRModule.from_expr(df)) df = mod["main"] df_parsed = relay.parser.fromtext( @@ -111,7 +111,7 @@ def test_checkpoint_alpha_equal_tuple(): with transform.PassContext(opt_level=3): passes = [transform.PartialEvaluate(), transform.DeadCodeElimination(inline_once=True)] - mod = transform.Sequential(passes)(relay.Module.from_expr(df)) + mod = transform.Sequential(passes)(tvm.IRModule.from_expr(df)) df = mod["main"] df_parsed = relay.parser.fromtext( @@ -424,7 +424,7 @@ def _get_oshape(indices_shape, depth, axis): else: oshape.append(indices_shape[indices_index]) indices_index += 1 - + return oshape def _verify(indices_shape, depth, on_value, off_value, axis, dtype): @@ -443,7 +443,7 @@ def _verify(indices_shape, depth, on_value, off_value, axis, dtype): intrp = relay.create_executor(kind, ctx=ctx, target=target) out_relay = intrp.evaluate(func)(indices_np) tvm.testing.assert_allclose(out_relay.asnumpy(), out_np) - + _verify((3,), 3, 1, 0, -1, "int32") _verify((3,), 3, 1.0, 0.0, -1, "float32") _verify((2, 2), 5, 2, -2, 0, "int32") diff --git a/tests/python/relay/test_op_level2.py b/tests/python/relay/test_op_level2.py index 0acd83639363e..ea729618097e7 100644 --- a/tests/python/relay/test_op_level2.py +++ b/tests/python/relay/test_op_level2.py @@ -237,7 +237,7 @@ def compile_test_conv2d_arm_cpu(dtype, out_dtype, scale, dshape, kshape, groups=groups, **attrs) func = relay.Function([x, w], y) - mod = tvm.relay.Module() + mod = tvm.IRModule() mod["main"] = func test_schedule='{"i": ["llvm -device=arm_cpu", "topi_nn_depthwise_conv2d_nchw", \ @@ -276,7 +276,7 @@ def compile_test_conv2d_arm_cpu(dtype, out_dtype, scale, dshape, kshape, dshape = (1, 512, 32, 32) kshape = (512, 1, 3, 3) compile_test_conv2d_arm_cpu("float32", "float32", 1, dshape, kshape, - padding=(1, 1), channels=512, + padding=(1, 1), channels=512, groups=512, kernel_size=(3 ,3)) # CUDA is disabled for 'direct' schedule: @@ -344,7 +344,7 @@ def run_test_conv2d_cuda(dtype, out_dtype, scale, dshape, kshape, groups=groups, **attrs) func = relay.Function([x, w], y) - mod = relay.Module() + mod = tvm.IRModule() mod['main'] = func mod = relay.transform.InferType()(mod) diff --git a/tests/python/relay/test_op_qnn_add.py b/tests/python/relay/test_op_qnn_add.py index 033a1041b5797..e1f54ed4b78c9 100644 --- a/tests/python/relay/test_op_qnn_add.py +++ b/tests/python/relay/test_op_qnn_add.py @@ -35,7 +35,7 @@ def test_tflite_same_io_qnn_params(): output_zero_point=relay.const(127, 'int32')) func = relay.Function([x, y], z) - mod = relay.Module.from_expr(func) + mod = tvm.IRModule.from_expr(func) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] @@ -73,7 +73,7 @@ def test_tflite_different_io_qnn_params(): output_zero_point=relay.const(128, 'int32')) func = relay.Function([x, y], z) - mod = relay.Module.from_expr(func) + mod = tvm.IRModule.from_expr(func) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] @@ -111,7 +111,7 @@ def test_saturation(): output_zero_point=relay.const(0, 'int32')) func = relay.Function([x, y], z) - mod = relay.Module.from_expr(func) + mod = tvm.IRModule.from_expr(func) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] @@ -133,7 +133,7 @@ def test_saturation(): output_zero_point=relay.const(0, 'int32')) func = relay.Function([x, y], z) - mod = relay.Module.from_expr(func) + mod = tvm.IRModule.from_expr(func) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] @@ -155,7 +155,7 @@ def test_saturation(): output_zero_point=relay.const(0, 'int32')) func = relay.Function([x, y], z) - mod = relay.Module.from_expr(func) + mod = tvm.IRModule.from_expr(func) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] @@ -177,7 +177,7 @@ def test_saturation(): output_zero_point=relay.const(0, 'int32')) func = relay.Function([x, y], z) - mod = relay.Module.from_expr(func) + mod = tvm.IRModule.from_expr(func) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] diff --git a/tests/python/relay/test_op_qnn_concatenate.py b/tests/python/relay/test_op_qnn_concatenate.py index ed496941cf8e4..35c2f971a7911 100644 --- a/tests/python/relay/test_op_qnn_concatenate.py +++ b/tests/python/relay/test_op_qnn_concatenate.py @@ -40,7 +40,7 @@ def test_same_io_qnn_params(): axis=axis) func = relay.Function([x, y], z) - mod = relay.Module.from_expr(func) + mod = tvm.IRModule.from_expr(func) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] @@ -71,7 +71,7 @@ def test_different_io_qnn_params(): axis=axis) func = relay.Function([x, y], z) - mod = relay.Module.from_expr(func) + mod = tvm.IRModule.from_expr(func) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] @@ -102,7 +102,7 @@ def test_few_same_io_qnn_params(): axis=axis) func = relay.Function([x, y], z) - mod = relay.Module.from_expr(func) + mod = tvm.IRModule.from_expr(func) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] @@ -133,7 +133,7 @@ def test_same_i_qnn_params(): axis=axis) func = relay.Function([x, y], z) - mod = relay.Module.from_expr(func) + mod = tvm.IRModule.from_expr(func) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] diff --git a/tests/python/relay/test_op_qnn_conv2d.py b/tests/python/relay/test_op_qnn_conv2d.py index ced12c843563f..264475ca34324 100644 --- a/tests/python/relay/test_op_qnn_conv2d.py +++ b/tests/python/relay/test_op_qnn_conv2d.py @@ -98,7 +98,7 @@ def get_qnn_func(data, kernel_layout=kernel_layout) mod = relay.Function(relay.analysis.free_vars(func), func) - mod = relay.Module.from_expr(mod) + mod = tvm.IRModule.from_expr(mod) return mod def get_funcs(data_shape, @@ -138,7 +138,7 @@ def get_funcs(data_shape, groups, channels) ref_func = run_infer_type(ref_func) - ref_func = relay.Module.from_expr(ref_func) + ref_func = tvm.IRModule.from_expr(ref_func) qnn_func = get_qnn_func(data, kernel, input_zero_point, @@ -759,7 +759,7 @@ def test_broadcast_layout(): func = relay.add(bias, func) func = relay.add(func, bias) func = relay.Function(relay.analysis.free_vars(func), func) - mod = relay.Module.from_expr(func) + mod = tvm.IRModule.from_expr(func) with relay.build_config(opt_level=3): graph, lib, params = relay.build(mod, "llvm -mcpu=skylake-avx512") @@ -896,7 +896,7 @@ def test_per_channel_kernel_scale(): out_dtype="int32") mod = relay.Function(relay.analysis.free_vars(func), func) - mod = relay.Module.from_expr(mod) + mod = tvm.IRModule.from_expr(mod) if __name__ == "__main__": test_no_zero_point() diff --git a/tests/python/relay/test_op_qnn_dense.py b/tests/python/relay/test_op_qnn_dense.py index 11987a55b855d..0e7c284653f4e 100644 --- a/tests/python/relay/test_op_qnn_dense.py +++ b/tests/python/relay/test_op_qnn_dense.py @@ -201,7 +201,7 @@ def qnn_dense_driver(test_configuration): expected_out_dtype = requantize_config['out_dtype'] mod = relay.Function(relay.analysis.free_vars(mod), mod) - mod = relay.Module.from_expr(mod) + mod = tvm.IRModule.from_expr(mod) mod = relay.qnn.transform.CanonicalizeOps()(mod) with relay.build_config(opt_level=2): graph, lib, params = relay.build(mod, "llvm", params=None) diff --git a/tests/python/relay/test_op_qnn_dequantize.py b/tests/python/relay/test_op_qnn_dequantize.py index 4510c570c9ffe..b1965c97ad0d0 100644 --- a/tests/python/relay/test_op_qnn_dequantize.py +++ b/tests/python/relay/test_op_qnn_dequantize.py @@ -28,7 +28,7 @@ def quantize_test_driver(in_dtype, quant_args, in_data, verify_output_data): quantized_output = relay.qnn.op.dequantize(input_data, input_scale=input_scale, input_zero_point=input_zero_point) mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output) - mod = relay.Module.from_expr(mod) + mod = tvm.IRModule.from_expr(mod) with relay.build_config(opt_level=3): graph, lib, params = relay.build(mod, "llvm", params=None) rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) diff --git a/tests/python/relay/test_op_qnn_mul.py b/tests/python/relay/test_op_qnn_mul.py index 16f0be78ff0fa..959a02a976adc 100644 --- a/tests/python/relay/test_op_qnn_mul.py +++ b/tests/python/relay/test_op_qnn_mul.py @@ -52,7 +52,7 @@ def test_tflite_same_io_qnn_params(): output_zero_point=relay.const(output_zero_point, 'int32')) func = relay.Function([x, y], z) - mod = relay.Module.from_expr(func) + mod = tvm.IRModule.from_expr(func) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] @@ -103,7 +103,7 @@ def test_tflite_different_io_qnn_params(): output_zero_point=relay.const(output_zero_point, 'int32')) func = relay.Function([x, y], z) - mod = relay.Module.from_expr(func) + mod = tvm.IRModule.from_expr(func) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] @@ -149,7 +149,7 @@ def test_saturation(): output_zero_point=relay.const(output_zero_point, 'int32')) func = relay.Function([x, y], z) - mod = relay.Module.from_expr(func) + mod = tvm.IRModule.from_expr(func) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] @@ -180,7 +180,7 @@ def test_saturation(): output_zero_point=relay.const(output_zero_point, 'int32')) func = relay.Function([x, y], z) - mod = relay.Module.from_expr(func) + mod = tvm.IRModule.from_expr(func) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] @@ -212,7 +212,7 @@ def test_saturation(): output_zero_point=relay.const(output_zero_point, 'int32')) func = relay.Function([x, y], z) - mod = relay.Module.from_expr(func) + mod = tvm.IRModule.from_expr(func) mod = relay.qnn.transform.CanonicalizeOps()(mod) func = mod["main"] diff --git a/tests/python/relay/test_op_qnn_quantize.py b/tests/python/relay/test_op_qnn_quantize.py index 45caedaf4a443..bdc7bc04d6daf 100644 --- a/tests/python/relay/test_op_qnn_quantize.py +++ b/tests/python/relay/test_op_qnn_quantize.py @@ -30,7 +30,7 @@ def quantize_test_driver(in_dtype, quant_args, axis, out_dtype, in_data, verify_ axis=axis, out_dtype=out_dtype) mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output) - mod = relay.Module.from_expr(mod) + mod = tvm.IRModule.from_expr(mod) with relay.build_config(opt_level=3): graph, lib, params = relay.build(mod, "llvm", params=None) rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) diff --git a/tests/python/relay/test_op_qnn_requantize.py b/tests/python/relay/test_op_qnn_requantize.py index b682498cb10b2..8af778160ccb7 100644 --- a/tests/python/relay/test_op_qnn_requantize.py +++ b/tests/python/relay/test_op_qnn_requantize.py @@ -59,7 +59,7 @@ def get_mod(data_shape, data_dtype, out_dtype, input_scale, output_scale, out_dtype=out_dtype) mod = relay.Function(relay.analysis.free_vars(mod), mod) - mod = relay.Module.from_expr(mod) + mod = tvm.IRModule.from_expr(mod) return mod def test_same_scale(): diff --git a/tests/python/relay/test_pass_alpha_equal.py b/tests/python/relay/test_pass_alpha_equal.py index 6daffb5864356..63e3ab86d7eba 100644 --- a/tests/python/relay/test_pass_alpha_equal.py +++ b/tests/python/relay/test_pass_alpha_equal.py @@ -515,7 +515,7 @@ def test_if_alpha_equal(): def test_constructor_alpha_equal(): # smoke test: it should be pointer equality - mod = relay.Module() + mod = tvm.IRModule() p = relay.prelude.Prelude(mod) assert alpha_equal(p.nil, p.nil) @@ -524,7 +524,7 @@ def test_constructor_alpha_equal(): def test_match_alpha_equal(): - mod = relay.Module() + mod = tvm.IRModule() p = relay.prelude.Prelude(mod) x = relay.Var('x') diff --git a/tests/python/relay/test_pass_alter_op_layout.py b/tests/python/relay/test_pass_alter_op_layout.py index b01e1bbe05044..2ec3f282a6c44 100644 --- a/tests/python/relay/test_pass_alter_op_layout.py +++ b/tests/python/relay/test_pass_alter_op_layout.py @@ -23,7 +23,7 @@ def run_opt_pass(expr, passes): passes = passes if isinstance(passes, list) else [passes] - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) seq = transform.Sequential(passes) with transform.PassContext(opt_level=3): mod = seq(mod) @@ -1005,7 +1005,7 @@ def before(): kernel_size=(3, 3), padding=(1, 1)) y = relay.nn.relu(y) - mod = relay.Module() + mod = tvm.IRModule() foo = relay.GlobalVar('foo') mod[foo] = relay.Function([x, weight], y) mod["main"] = relay.Function([x, weight], foo(x, weight)) @@ -1024,7 +1024,7 @@ def expected(): kernel_size=(3, 3), padding=(1, 1)) y = relay.nn.relu(y) - mod = relay.Module() + mod = tvm.IRModule() foo = relay.GlobalVar('foo') mod[foo] = relay.Function([x, weight], y) mod["main"] = relay.Function([x, weight], foo(x, weight)) diff --git a/tests/python/relay/test_pass_annotation.py b/tests/python/relay/test_pass_annotation.py index 69ce4c5211bed..3e7d916c96fa4 100644 --- a/tests/python/relay/test_pass_annotation.py +++ b/tests/python/relay/test_pass_annotation.py @@ -27,7 +27,7 @@ def run_opt_pass(expr, passes): passes = passes if isinstance(passes, list) else [passes] - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) seq = transform.Sequential(passes) with transform.PassContext(opt_level=3): mod = seq(mod) diff --git a/tests/python/relay/test_pass_canonicalize_cast.py b/tests/python/relay/test_pass_canonicalize_cast.py index b72ded21ef521..672b4b192995c 100644 --- a/tests/python/relay/test_pass_canonicalize_cast.py +++ b/tests/python/relay/test_pass_canonicalize_cast.py @@ -17,7 +17,6 @@ import tvm import tvm.relay as relay -import tvm.relay.module as _module import tvm.relay.transform as _transform @@ -53,7 +52,7 @@ def check(shape): bias1 = relay.var("bias1", shape=(16, 1, 1), dtype="int32") bias2 = relay.var("bias2", shape=(16, 1, 1), dtype="int32") y = before(data, conv_weight, bias1, bias2) - mod = _module.Module.from_expr(y) + mod = tvm.IRModule.from_expr(y) seq = _transform.Sequential([_transform.InferType(), _transform.CanonicalizeCast(), _transform.InferType()]) with _transform.PassContext(opt_level=3): diff --git a/tests/python/relay/test_pass_check_kind.py b/tests/python/relay/test_pass_check_kind.py index 511ba6c8bfa96..a6655e66afb54 100644 --- a/tests/python/relay/test_pass_check_kind.py +++ b/tests/python/relay/test_pass_check_kind.py @@ -96,13 +96,13 @@ def test_global_typevar_kind(): def test_typecall_kind(): gtv = relay.GlobalTypeVar('gtv') - mod = relay.Module() + mod = tvm.IRModule() data = relay.TypeData(gtv, [], []) mod[gtv] = data empty_call = relay.TypeCall(gtv, []) assert check_kind(empty_call, mod) == relay.TypeKind.Type - new_mod = relay.Module() + new_mod = tvm.IRModule() tv = relay.TypeVar('tv') new_data = relay.TypeData(gtv, [tv], []) new_mod[gtv] = new_data @@ -165,7 +165,7 @@ def test_typecall_invalid_callee(): @pytest.mark.xfail(raises=tvm.error.TVMError) def test_typecall_invalid_args(): # args must all be type kind - mod = relay.Module() + mod = tvm.IRModule() gtv = relay.GlobalTypeVar('v1') data = relay.TypeData(gtv, [], []) mod[gtv] = data @@ -175,7 +175,7 @@ def test_typecall_invalid_args(): @pytest.mark.xfail(raises=tvm.error.TVMError) def test_typecall_invalid_num_args(): - mod = relay.Module() + mod = tvm.IRModule() gtv = relay.GlobalTypeVar('v1') tv = relay.TypeVar('tv') data = relay.TypeData(gtv, [tv], []) diff --git a/tests/python/relay/test_pass_combine_parallel_conv2d.py b/tests/python/relay/test_pass_combine_parallel_conv2d.py index 599b308b21367..acfa55f66e111 100644 --- a/tests/python/relay/test_pass_combine_parallel_conv2d.py +++ b/tests/python/relay/test_pass_combine_parallel_conv2d.py @@ -19,13 +19,13 @@ def run_combine_parallel(expr, min_num_branches=3): - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) mod = transform.CombineParallelConv2D(min_num_branches)(mod) return mod["main"] def run_opt_pass(expr, opt_pass): assert isinstance(opt_pass, transform.Pass) - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) mod = opt_pass(mod) return mod["main"] diff --git a/tests/python/relay/test_pass_combine_parallel_dense.py b/tests/python/relay/test_pass_combine_parallel_dense.py index 070ab8658b88d..5e1a130b4d0f3 100644 --- a/tests/python/relay/test_pass_combine_parallel_dense.py +++ b/tests/python/relay/test_pass_combine_parallel_dense.py @@ -19,13 +19,13 @@ def run_combine_parallel(expr, min_num_branches=3): - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) mod = transform.CombineParallelDense(min_num_branches)(mod) return mod["main"] def run_opt_pass(expr, opt_pass): assert isinstance(opt_pass, transform.Pass) - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) mod = opt_pass(mod) return mod["main"] diff --git a/tests/python/relay/test_pass_convert_op_layout.py b/tests/python/relay/test_pass_convert_op_layout.py index dfd745164069e..4b80d6ca120d3 100644 --- a/tests/python/relay/test_pass_convert_op_layout.py +++ b/tests/python/relay/test_pass_convert_op_layout.py @@ -24,7 +24,7 @@ def run_opt_pass(expr, passes): passes = passes if isinstance(passes, list) else [passes] - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) seq = transform.Sequential(passes) with transform.PassContext(opt_level=3): mod = seq(mod) diff --git a/tests/python/relay/test_pass_dead_code_elimination.py b/tests/python/relay/test_pass_dead_code_elimination.py index 89bae1f71b47c..3f1ec9efd5e1a 100644 --- a/tests/python/relay/test_pass_dead_code_elimination.py +++ b/tests/python/relay/test_pass_dead_code_elimination.py @@ -47,7 +47,7 @@ def __init__(self): def run_opt_pass(expr, opt_pass): assert isinstance(opt_pass, transform.Pass) - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) mod = opt_pass(mod) entry = mod["main"] return entry if isinstance(expr, relay.Function) else entry.body diff --git a/tests/python/relay/test_pass_eliminate_common_subexpr.py b/tests/python/relay/test_pass_eliminate_common_subexpr.py index 09ea7044daf5f..ba4eec5c9cfac 100644 --- a/tests/python/relay/test_pass_eliminate_common_subexpr.py +++ b/tests/python/relay/test_pass_eliminate_common_subexpr.py @@ -22,7 +22,7 @@ def run_opt_pass(expr, opt_pass): assert isinstance(opt_pass, transform.Pass) - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) mod = opt_pass(mod) entry = mod["main"] return entry if isinstance(expr, relay.Function) else entry.body diff --git a/tests/python/relay/test_pass_fold_constant.py b/tests/python/relay/test_pass_fold_constant.py index ca901b16b8423..08834f14e8512 100644 --- a/tests/python/relay/test_pass_fold_constant.py +++ b/tests/python/relay/test_pass_fold_constant.py @@ -25,7 +25,7 @@ def run_opt_pass(expr, opt_pass): assert isinstance(opt_pass, transform.Pass) - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) mod = opt_pass(mod) entry = mod["main"] return entry if isinstance(expr, relay.Function) else entry.body diff --git a/tests/python/relay/test_pass_fold_scale_axis.py b/tests/python/relay/test_pass_fold_scale_axis.py index d6f471bef04a2..78f9c5ad310e3 100644 --- a/tests/python/relay/test_pass_fold_scale_axis.py +++ b/tests/python/relay/test_pass_fold_scale_axis.py @@ -25,7 +25,7 @@ def _get_positive_scale(size): def run_opt_pass(expr, opt_pass): assert isinstance(opt_pass, transform.Pass) - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) mod = opt_pass(mod) entry = mod["main"] return entry if isinstance(expr, relay.Function) else entry.body diff --git a/tests/python/relay/test_pass_fuse_ops.py b/tests/python/relay/test_pass_fuse_ops.py index 7ec21eab12df1..18916f758a6c7 100644 --- a/tests/python/relay/test_pass_fuse_ops.py +++ b/tests/python/relay/test_pass_fuse_ops.py @@ -353,8 +353,8 @@ def expected(p0): dshape = (1, 16, 64, 64) x = relay.var("x", shape=dshape) orig = before(x) - fuse0(relay.Module.from_expr(orig)) - m = fuse2(relay.Module.from_expr(orig)) + fuse0(tvm.IRModule.from_expr(orig)) + m = fuse2(tvm.IRModule.from_expr(orig)) relay.build(m, 'llvm') after = run_opt_pass(expected(x), transform.InferType()) assert relay.analysis.alpha_equal(m["main"], after) @@ -408,8 +408,8 @@ def expected(dshape): dshape = (1, 16, 64, 64) x = relay.var("x", shape=dshape) orig = before(x) - fuse0(relay.Module.from_expr(orig)) - m = fuse2(relay.Module.from_expr(orig)) + fuse0(tvm.IRModule.from_expr(orig)) + m = fuse2(tvm.IRModule.from_expr(orig)) relay.build(m, 'llvm') after = run_opt_pass(expected(dshape), transform.InferType()) assert relay.analysis.alpha_equal(m["main"], after) @@ -475,8 +475,8 @@ def expected(dshape): dshape = (1, 16, 64, 64) orig = before(dshape) - fuse0(relay.Module.from_expr(orig)) - m = fuse2(relay.Module.from_expr(orig)) + fuse0(tvm.IRModule.from_expr(orig)) + m = fuse2(tvm.IRModule.from_expr(orig)) relay.build(m, 'llvm') after = run_opt_pass(expected(dshape), transform.InferType()) assert relay.analysis.alpha_equal(m["main"], after) @@ -519,7 +519,7 @@ def before(): y = relay.add(x, relay.const(1, "float32")) z = relay.exp(y) w = relay.squeeze(z) - mod = relay.module.Module() + mod = tvm.IRModule() mod["main"] = relay.Function([x], w) return mod @@ -531,7 +531,7 @@ def expected(): f1 = relay.Function([x], w) x = relay.var("x", shape=(10, 20)) y = relay.Call(f1, [x]) - mod = relay.module.Module() + mod = tvm.IRModule() mod["main"] = relay.Function([x], y) return mod @@ -548,7 +548,7 @@ def test_split(): a = relay.TupleGetItem(y, 0) b = relay.TupleGetItem(y, 1) c = relay.TupleGetItem(y, 2) - mod = relay.module.Module() + mod = tvm.IRModule() mod["main"] = relay.Function([x], a + relay.RefRead(relay.RefCreate(b)) + c) mod = transform.FuseOps()(mod) diff --git a/tests/python/relay/test_pass_gradient.py b/tests/python/relay/test_pass_gradient.py index 8e4b7010de307..6c2ea8ffa3b36 100644 --- a/tests/python/relay/test_pass_gradient.py +++ b/tests/python/relay/test_pass_gradient.py @@ -188,7 +188,7 @@ def test_tuple(): def test_pow(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) add_nat_definitions(p) shape = (10, 10) diff --git a/tests/python/relay/test_pass_lambda_lift.py b/tests/python/relay/test_pass_lambda_lift.py index 550c85d4476b5..a66c4c7d745ae 100644 --- a/tests/python/relay/test_pass_lambda_lift.py +++ b/tests/python/relay/test_pass_lambda_lift.py @@ -22,7 +22,7 @@ from tvm.relay import transform def test_basic(): - mod = relay.Module() + mod = tvm.IRModule() x2 = relay.var('x2', shape=(10, 5)) y2 = relay.var('y2', shape=(1, 5)) level2_func = relay.Function([x2, y2], relay.op.add(x2, y2)) @@ -36,7 +36,7 @@ def test_basic(): assert len(new_mod.functions) == 2 def test_closure(): - mod = relay.Module() + mod = tvm.IRModule() x = relay.var('x', shape=(2,)) y = relay.var('y', shape=(2,)) @@ -47,9 +47,9 @@ def test_closure(): new_mod = transform.LambdaLift()(mod) assert len(new_mod.functions) == 3 - + def test_recursive(): - mod = relay.Module() + mod = tvm.IRModule() x = relay.var('x', shape=(2,)) i = relay.var('i', shape=(), dtype='int32') diff --git a/tests/python/relay/test_pass_legalize.py b/tests/python/relay/test_pass_legalize.py index e38c1aaa7a0eb..e4e16c002abf8 100644 --- a/tests/python/relay/test_pass_legalize.py +++ b/tests/python/relay/test_pass_legalize.py @@ -26,7 +26,7 @@ def run_opt_pass(expr, passes): passes = passes if isinstance(passes, list) else [passes] - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) seq = transform.Sequential(passes) with transform.PassContext(opt_level=3): mod = seq(mod) diff --git a/tests/python/relay/test_pass_mac_count.py b/tests/python/relay/test_pass_mac_count.py index 0ad1e3abe7595..5ce0e41cfbac0 100644 --- a/tests/python/relay/test_pass_mac_count.py +++ b/tests/python/relay/test_pass_mac_count.py @@ -23,7 +23,7 @@ def run_opt_pass(expr, opt_pass): assert isinstance(opt_pass, transform.Pass) - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) mod = opt_pass(mod) entry = mod["main"] return entry if isinstance(expr, relay.Function) else entry.body diff --git a/tests/python/relay/test_pass_manager.py b/tests/python/relay/test_pass_manager.py index 9a759c24bfefc..a13e5e93ea9c2 100644 --- a/tests/python/relay/test_pass_manager.py +++ b/tests/python/relay/test_pass_manager.py @@ -79,9 +79,9 @@ class OptTester(): """A helper class for testing the pass manager.""" def __init__(self, mod): - if not isinstance(mod, relay.Module): + if not isinstance(mod, tvm.IRModule): raise TypeError("mod is expected to be the type of " - "relay.Module") + "tvm.IRModule") self.mod = mod def analysis(self): @@ -91,10 +91,10 @@ def analysis(self): @staticmethod def transform(node, ctx=None): """Perform optimization on node.""" - if isinstance(node, relay.Module): + if isinstance(node, tvm.IRModule): # Add a function to the module and return an updated module. gv, func = get_var_func() - mod = relay.Module({gv: func}) + mod = tvm.IRModule({gv: func}) mod.update(node) return mod if isinstance(node, relay.Function): @@ -121,7 +121,7 @@ def test_module_pass(): y = relay.var("y", tp) v_add = relay.GlobalVar("myAdd") func = relay.Function([x, y], x + y) - mod = relay.Module({v_add: func}) + mod = tvm.IRModule({v_add: func}) pass_name = "module_pass_test" opt_level = 0 @@ -153,7 +153,7 @@ def test_pass_run(): assert pass_name in str(module_pass) updated_mod = module_pass(mod) - assert isinstance(updated_mod, relay.Module) + assert isinstance(updated_mod, tvm.IRModule) # Check the abs function in the updated module. v_abs, myabs = get_var_func() @@ -206,10 +206,10 @@ def transform_function(self, func, mod, ctx): fpass = TestReplaceFunc(f1) assert fpass.info.opt_level == 1 assert fpass.info.name == "TestReplaceFunc" - mod = relay.Module.from_expr(f2) + mod = tvm.IRModule.from_expr(f2) mod = fpass(mod) # wrap in expr - mod2 = relay.Module.from_expr(f1) + mod2 = tvm.IRModule.from_expr(f1) assert relay.alpha_equal(mod["main"], mod2["main"]) @@ -220,7 +220,7 @@ def test_function_pass(): x = relay.var("x", tp) v_log = relay.GlobalVar("myLog") log = relay.Function([x], relay.log(x)) - mod = relay.Module({v_log: log}) + mod = tvm.IRModule({v_log: log}) pass_name = "function_pass_test" opt_level = 1 @@ -256,7 +256,7 @@ def test_pass_run(): assert pass_name in str(function_pass) updated_mod = function_pass(mod) - assert isinstance(updated_mod, relay.Module) + assert isinstance(updated_mod, tvm.IRModule) # Check the log function in the updated module. new_v_log = updated_mod.get_global_var(v_log.name_hint) @@ -297,8 +297,8 @@ def transform_module(self, mod, ctx): return mod x = relay.var("x", shape=(10, 20)) - m1 = relay.Module.from_expr(relay.Function([x], x)) - m2 = relay.Module.from_expr(relay.Function([x], relay.log(x))) + m1 = tvm.IRModule.from_expr(relay.Function([x], x)) + m2 = tvm.IRModule.from_expr(relay.Function([x], relay.log(x))) fpass = TestPipeline(m2, replace=True) assert fpass.info.name == "TestPipeline" mod3 = fpass(m1) @@ -326,7 +326,7 @@ def test_sequential_pass(): v_log = relay.GlobalVar("myLog") log = relay.Function([z], relay.log(z)) - mod = relay.Module({v_sub: sub, v_log: log}) + mod = tvm.IRModule({v_sub: sub, v_log: log}) def get_ref_log(): ref_log = relay.Function([x], relay.log(relay.add(x, x))) @@ -408,7 +408,7 @@ def test_only_function_pass(): def test_multiple_passes(): # Reset the current module since mod has been polluted by the previous # function pass. - mod = relay.Module({v_sub: sub, v_log: log}) + mod = tvm.IRModule({v_sub: sub, v_log: log}) passes = [module_pass, function_pass] sequential = _transform.Sequential(opt_level=1, passes=passes) required = ["mod_transform", "func_transform"] @@ -488,7 +488,7 @@ def expected(): relay.transform.AlterOpLayout() ]) - mod = relay.Module({"main": before()}) + mod = tvm.IRModule({"main": before()}) with relay.build_config(opt_level=3): with tvm.target.create("llvm"): mod = seq(mod) @@ -513,7 +513,7 @@ def test_print_ir(capfd): relay.transform.DeadCodeElimination() ]) - mod = relay.Module({"main": func}) + mod = tvm.IRModule({"main": func}) with relay.build_config(opt_level=3): mod = seq(mod) @@ -545,7 +545,7 @@ def test_print_debug_callback(): ]) assert __TRACE_COUNTER__ == 0 - mod = relay.Module({"main": func}) + mod = tvm.IRModule({"main": func}) with relay.build_config(opt_level=3, trace=_tracer): mod = seq(mod) diff --git a/tests/python/relay/test_pass_partial_eval.py b/tests/python/relay/test_pass_partial_eval.py index cf4f8f6cee74d..2bec98c173d94 100644 --- a/tests/python/relay/test_pass_partial_eval.py +++ b/tests/python/relay/test_pass_partial_eval.py @@ -22,7 +22,7 @@ from tvm.relay.prelude import Prelude from tvm.relay import op, create_executor, transform from tvm.relay import Var, TypeVar, TupleGetItem, Let, Function, const, RefRead, RefWrite, RefCreate -from tvm.relay import TensorType, Tuple, If, Module, Clause, PatternConstructor, PatternVar, Match +from tvm.relay import TensorType, Tuple, If, Clause, PatternConstructor, PatternVar, Match from tvm.relay import GlobalVar, Call from tvm.relay.transform import gradient from tvm.relay.testing import add_nat_definitions, make_nat_expr, run_infer_type @@ -37,7 +37,7 @@ def check_eval(expr, expected_result, mod=None, rtol=1e-07): def run_opt_pass(expr, passes): passes = passes if isinstance(passes, list) else [passes] - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) seq = transform.Sequential(passes) with transform.PassContext(opt_level=3): mod = seq(mod) @@ -171,7 +171,7 @@ def test_function_invalidate(): def test_head_cons(): - mod = Module() + mod = tvm.IRModule() p = Prelude(mod) hd = p.hd t = TypeVar("t") @@ -183,7 +183,7 @@ def test_head_cons(): def test_map(): - mod = Module() + mod = tvm.IRModule() p = Prelude(mod) f = GlobalVar("f") t = TypeVar("t") @@ -200,7 +200,7 @@ def test_map(): def test_loop(): - mod = Module() + mod = tvm.IRModule() t = TypeVar("t") x = Var("x", t) loop = GlobalVar("loop") @@ -214,7 +214,7 @@ def test_loop(): def test_swap_loop(): - mod = Module() + mod = tvm.IRModule() p = Prelude(mod) add_nat_definitions(p) nat = p.nat() @@ -230,7 +230,7 @@ def test_swap_loop(): def test_abs_diff(): # TODO(@M.K.): refactor using tuple pattern (not yet implemented) - mod = Module() + mod = tvm.IRModule() p = Prelude(mod) add_nat_definitions(p) nat = p.nat() @@ -251,7 +251,7 @@ def test_abs_diff(): def test_match_nat_id(): - mod = Module() + mod = tvm.IRModule() p = Prelude(mod) add_nat_definitions(p) nat = p.nat() @@ -268,7 +268,7 @@ def test_match_nat_id(): def test_nat_id(): - mod = Module() + mod = tvm.IRModule() p = Prelude(mod) add_nat_definitions(p) nat = p.nat() @@ -283,7 +283,7 @@ def test_nat_id(): def test_global_match_nat_id(): - mod = Module() + mod = tvm.IRModule() p = Prelude(mod) add_nat_definitions(p) nat = p.nat() @@ -297,7 +297,7 @@ def test_global_match_nat_id(): def test_double(): - mod = Module() + mod = tvm.IRModule() p = Prelude(mod) add_nat_definitions(p) orig = p.double(make_nat_expr(p, 3)) @@ -324,7 +324,7 @@ def test_triangle_number(): def test_nat_update(): - m = Module() + m = tvm.IRModule() p = Prelude(m) add_nat_definitions(p) m = transform.ToANormalForm()(m) diff --git a/tests/python/relay/test_pass_partition_graph.py b/tests/python/relay/test_pass_partition_graph.py index 75d3c932f05ac..5bcf213050386 100644 --- a/tests/python/relay/test_pass_partition_graph.py +++ b/tests/python/relay/test_pass_partition_graph.py @@ -244,7 +244,7 @@ def test_multi_node_compiler(): r = relay.concatenate((q0, q1, q2), axis=0) f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r) - mod = relay.Module() + mod = tvm.IRModule() ann = CcompilerAnnotator() mod["main"] = ann.visit(f) mod = transform.PartitionGraph()(mod) @@ -285,7 +285,7 @@ def visit_call(self, call): f = relay.Function([x, y], z) x_data = np.random.rand(8, 8).astype('float32') y_data = np.random.rand(8, 8).astype('float32') - mod = relay.Module() + mod = tvm.IRModule() mod["main"] = f mod = MyAnnotator()(mod) mod = transform.PartitionGraph()(mod) @@ -318,7 +318,7 @@ def expected(): tvm.expr.IntImm("int32", 1)) fused_call = relay.Call(fused_func, [add_call]) main = relay.Function([x, y], fused_call) - mod = relay.Module() + mod = tvm.IRModule() mod["main"] = main return mod @@ -329,7 +329,7 @@ def expected(): exp = relay.exp(add) concat = relay.concatenate([log, exp], axis=0) f = relay.Function([x, y], concat) - mod = relay.Module() + mod = tvm.IRModule() mod["main"] = f mod = WhiteListAnnotator(["add", "subtract", "multiply"], "ccompiler")(mod) mod = transform.PartitionGraph()(mod) @@ -353,7 +353,7 @@ def test_extern_ccompiler(): f = relay.Function([x, y], p - z) x_data = np.random.rand(2, 2).astype('float32') y_data = np.random.rand(2, 2).astype('float32') - mod = relay.Module() + mod = tvm.IRModule() mod["main"] = f mod = WhiteListAnnotator(["add", "subtract", "multiply"], "ccompiler")(mod) mod = transform.PartitionGraph()(mod) @@ -385,11 +385,11 @@ def test_extern_dnnl(): f = relay.Function([data, weight1], out) - mod = relay.Module() + mod = tvm.IRModule() mod['main'] = WholeGraphAnnotator('dnnl').visit(f) mod = transform.PartitionGraph()(mod) - ref_mod = relay.Module() + ref_mod = tvm.IRModule() ref_mod['main'] = f i_data = np.random.uniform(0, 1, ishape).astype(dtype) diff --git a/tests/python/relay/test_pass_qnn_legalize.py b/tests/python/relay/test_pass_qnn_legalize.py index 6992f288c4542..38fdb7dd07b19 100644 --- a/tests/python/relay/test_pass_qnn_legalize.py +++ b/tests/python/relay/test_pass_qnn_legalize.py @@ -34,7 +34,7 @@ def alpha_equal(x, y): def run_opt_pass(expr, passes): passes = passes if isinstance(passes, list) else [passes] - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) seq = transform.Sequential(passes) with transform.PassContext(opt_level=3): mod = seq(mod) @@ -114,7 +114,7 @@ def _get_mod(data_dtype, kernel_dtype): kernel_layout='OIHW') mod = relay.Function(relay.analysis.free_vars(func), func) - mod = relay.Module.from_expr(mod) + mod = tvm.IRModule.from_expr(mod) return mod # Check uint8 x uint8 and int8 x int8 transformation @@ -193,7 +193,7 @@ def _get_mod(data_dtype, kernel_dtype): out_dtype='int32') mod = relay.Function(relay.analysis.free_vars(func), func) - mod = relay.Module.from_expr(mod) + mod = tvm.IRModule.from_expr(mod) return mod # Check uint8 x uint8 and int8 x int8 transformation diff --git a/tests/python/relay/test_pass_remove_unused_functions.py b/tests/python/relay/test_pass_remove_unused_functions.py index 2a4cbd2579e7f..bacc3126c7c4e 100644 --- a/tests/python/relay/test_pass_remove_unused_functions.py +++ b/tests/python/relay/test_pass_remove_unused_functions.py @@ -22,7 +22,7 @@ def test_remove_all_prelude_functions(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) x = relay.var("x", shape=(1, 16)) mod["main"] = relay.Function([x], x) @@ -32,7 +32,7 @@ def test_remove_all_prelude_functions(): def test_remove_all_prelude_functions_but_referenced_functions(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) x = relay.var("x", shape=(1, 16)) id_func = relay.Function([x], x) @@ -46,7 +46,7 @@ def test_remove_all_prelude_functions_but_referenced_functions(): def test_keep_only_referenced_prelude_functions(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) l = p.nil() for i in [4, 3, 2, 1, 0]: @@ -59,7 +59,7 @@ def test_keep_only_referenced_prelude_functions(): def test_multiple_entry_functions(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) l = p.nil() for i in [4, 3, 2, 1, 0]: @@ -78,7 +78,7 @@ def test_multiple_entry_functions(): def test_globalvar_as_call_arg(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) tensor_array = p.get_var('tensor_array', 'int32') tensor1 = p.get_var('tensor1', 'int32') @@ -96,7 +96,7 @@ def test_globalvar_as_call_arg(): def test_call_globalvar_without_args(): def get_mod(): - mod = relay.Module({}) + mod = tvm.IRModule({}) fn1 = relay.Function([], relay.const(1)) fn2 = relay.Function([], relay.const(2)) g1 = relay.GlobalVar('g1') diff --git a/tests/python/relay/test_pass_to_a_normal_form.py b/tests/python/relay/test_pass_to_a_normal_form.py index 865729002745f..46bde4f490b86 100644 --- a/tests/python/relay/test_pass_to_a_normal_form.py +++ b/tests/python/relay/test_pass_to_a_normal_form.py @@ -26,7 +26,7 @@ def run_opt_pass(expr, passes): passes = passes if isinstance(passes, list) else [passes] - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) seq = transform.Sequential(passes) with transform.PassContext(opt_level=3): mod = seq(mod) @@ -110,7 +110,7 @@ def test_recursion(): } f(5); """ - mod = relay.Module() + mod = tvm.IRModule() i64 = relay.TensorType((), 'int64') f = relay.GlobalVar("f") n = relay.Var("n", i64) @@ -143,7 +143,7 @@ def test_ref(): def test_nat_add(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) add_nat_definitions(p) nat = p.nat @@ -192,7 +192,7 @@ def test_gradient_if(): net = relay.If(cond, x, x) net = relay.add(x, net) net = relay.Function([cond,x,y], net) - mod = relay.Module.from_expr(net) + mod = tvm.IRModule.from_expr(net) mod = relay.transform.ToANormalForm()(mod) mod["main"] = relay.transform.gradient(mod["main"], mode='higher_order') mod = relay.transform.ToANormalForm()(mod) diff --git a/tests/python/relay/test_pass_to_cps.py b/tests/python/relay/test_pass_to_cps.py index 1d09c0d67f5b5..4645e20c74686 100644 --- a/tests/python/relay/test_pass_to_cps.py +++ b/tests/python/relay/test_pass_to_cps.py @@ -42,7 +42,7 @@ def test_double(): # make sure cps work for recursion. def test_recursion(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) add_nat_definitions(p) shape = (10, 10) diff --git a/tests/python/relay/test_pass_to_graph_normal_form.py b/tests/python/relay/test_pass_to_graph_normal_form.py index a29172471d484..5c5221f65a46d 100644 --- a/tests/python/relay/test_pass_to_graph_normal_form.py +++ b/tests/python/relay/test_pass_to_graph_normal_form.py @@ -22,7 +22,7 @@ def run_opt_pass(expr, opt_pass): - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) mod = opt_pass(mod) entry = mod["main"] return entry if isinstance(expr, relay.Function) else entry.body @@ -30,7 +30,7 @@ def run_opt_pass(expr, opt_pass): def check_eval(expr, args, expected_result, mod=None, rtol=1e-07): if mod is None: - mod = relay.Module() + mod = tvm.IRModule() ctx = tvm.context("llvm", 0) intrp = create_executor(mod=mod, ctx=ctx, target="llvm") diff --git a/tests/python/relay/test_pass_unmatched_cases.py b/tests/python/relay/test_pass_unmatched_cases.py index b06de4c8e3845..615d4e092291c 100644 --- a/tests/python/relay/test_pass_unmatched_cases.py +++ b/tests/python/relay/test_pass_unmatched_cases.py @@ -47,7 +47,7 @@ def test_trivial_matches(): def test_single_constructor_adt(): - mod = relay.Module() + mod = tvm.IRModule() box = relay.GlobalTypeVar('box') a = relay.TypeVar('a') box_ctor = relay.Constructor('box', [a], box) @@ -76,7 +76,7 @@ def test_single_constructor_adt(): def test_too_specific_match(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) v = relay.Var('v') @@ -117,7 +117,7 @@ def test_too_specific_match(): def test_multiple_constructor_clauses(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) v = relay.Var('v') @@ -147,7 +147,7 @@ def test_multiple_constructor_clauses(): def test_missing_in_the_middle(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) v = relay.Var('v') @@ -185,7 +185,7 @@ def test_missing_in_the_middle(): def test_mixed_adt_constructors(): - mod = relay.Module() + mod = tvm.IRModule() box = relay.GlobalTypeVar('box') a = relay.TypeVar('a') box_ctor = relay.Constructor('box', [a], box) diff --git a/tests/python/relay/test_pass_vars.py b/tests/python/relay/test_pass_vars.py index 70eb047ad03ea..d8b77ba356121 100644 --- a/tests/python/relay/test_pass_vars.py +++ b/tests/python/relay/test_pass_vars.py @@ -82,7 +82,7 @@ def test_bound_vars(): def test_match_vars(): - mod = relay.Module() + mod = tvm.IRModule() p = relay.prelude.Prelude(mod) x = relay.Var('x') diff --git a/tests/python/relay/test_py_converter.py b/tests/python/relay/test_py_converter.py index 76aa697a2aaba..17486148b30cf 100644 --- a/tests/python/relay/test_py_converter.py +++ b/tests/python/relay/test_py_converter.py @@ -199,7 +199,7 @@ def test_local_function(): def test_global_function(): - mod = relay.Module() + mod = tvm.IRModule() ident = relay.GlobalVar('ident') a = relay.TypeVar('a') v = relay.Var('v', a) @@ -218,7 +218,7 @@ def test_global_function(): def test_constructor(): - mod = relay.Module() + mod = tvm.IRModule() box, box_ctor = init_box_adt(mod) init_box_int = box_ctor(relay.const(1)) @@ -235,7 +235,7 @@ def test_constructor(): def test_match_wildcard(): - mod = relay.Module() + mod = tvm.IRModule() box, box_ctor = init_box_adt(mod) v = relay.Var('v') match = relay.Let( @@ -249,7 +249,7 @@ def test_match_wildcard(): def test_match_var(): - mod = relay.Module() + mod = tvm.IRModule() box, box_ctor = init_box_adt(mod) v = relay.Var('v') w = relay.Var('w') @@ -265,7 +265,7 @@ def test_match_var(): def test_match_pattern(): - mod = relay.Module() + mod = tvm.IRModule() box, box_ctor = init_box_adt(mod) v = relay.Var('v') w = relay.Var('w') @@ -279,7 +279,7 @@ def test_match_pattern(): def test_nested_match_pattern(): - mod = relay.Module() + mod = tvm.IRModule() box, box_ctor = init_box_adt(mod) v = relay.Var('v') w = relay.Var('w') @@ -296,7 +296,7 @@ def test_nested_match_pattern(): assert_tensor_value(match_val, 2) def test_match_order(): - mod = relay.Module() + mod = tvm.IRModule() box, box_ctor = init_box_adt(mod) v = relay.Var('v') w = relay.Var('w') @@ -316,7 +316,7 @@ def test_match_order(): def test_local_recursion(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) v = relay.Var('v') @@ -342,11 +342,11 @@ def test_local_recursion(): assert_tensor_value(val.fields[1].fields[0], 2) assert_constructor_value(val.fields[1].fields[1], p.cons, 2) assert_tensor_value(val.fields[1].fields[1].fields[0], 3) - assert_constructor_value(val.fields[1].fields[1].fields[1], p.nil, 0) + assert_constructor_value(val.fields[1].fields[1].fields[1], p.nil, 0) def test_global_recursion(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) copy = relay.GlobalVar('copy') # same as above: it copies the given list @@ -398,7 +398,7 @@ def test_higher_order_call(): def test_match_effect_exactly_once(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) # the list should be of length 1! @@ -423,7 +423,7 @@ def test_match_effect_exactly_once(): def test_arbitrary_let_nesting(): # something that is tricky to do in Python but comes naturally in Relay - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) x = relay.Var('x') r = relay.Var('r') diff --git a/tests/python/relay/test_type_infer.py b/tests/python/relay/test_type_infer.py index 3f6b0d2eb895b..892c91d9c43a3 100644 --- a/tests/python/relay/test_type_infer.py +++ b/tests/python/relay/test_type_infer.py @@ -17,6 +17,7 @@ """Test that type checker correcly computes types for expressions. """ +import tvm from tvm import relay from tvm.relay import op, transform, analysis from tvm.relay.analysis import assert_alpha_equal @@ -24,7 +25,7 @@ def run_infer_type(expr, mod=None): if not mod: - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) mod = transform.InferType()(mod) entry = mod["main"] return entry if isinstance(expr, relay.Function) else entry.body @@ -44,7 +45,7 @@ def run_infer_type(expr, mod=None): return mod[gv].body -def assert_has_type(expr, typ, mod=relay.module.Module({})): +def assert_has_type(expr, typ, mod=tvm.IRModule({})): checked_expr = run_infer_type(expr, mod) checked_type = checked_expr.checked_type if checked_type != typ: @@ -152,7 +153,7 @@ def @f(%n: int32, %data: float32) -> float32 { sb.ret(data) with sb.else_scope(): sb.ret(f(relay.subtract(n, relay.const(1, ti32)), relay.log(data))) - mod = relay.Module() + mod = tvm.IRModule() mod[f] = relay.Function([n, data], sb.get()) assert "@f(%1, %2) /* ty=float32 */" in mod.astext() assert mod[f].checked_type == relay.FuncType([ti32, tf32], tf32) @@ -267,7 +268,7 @@ def test_type_args(): def test_global_var_recursion(): - mod = relay.Module({}) + mod = tvm.IRModule({}) gv = relay.GlobalVar("main") x = relay.var('x', shape=[]) tt = relay.scalar_type('float32') @@ -289,7 +290,7 @@ def test_equal(): def test_constructor_type(): - mod = relay.Module() + mod = tvm.IRModule() box, constructor = initialize_box_adt(mod) a = relay.TypeVar('a') @@ -300,7 +301,7 @@ def test_constructor_type(): def test_constructor_call(): - mod = relay.Module() + mod = tvm.IRModule() box, constructor = initialize_box_adt(mod) box_unit = constructor(relay.Tuple([])) @@ -313,7 +314,7 @@ def test_constructor_call(): def test_adt_match(): - mod = relay.Module() + mod = tvm.IRModule() box, constructor = initialize_box_adt(mod) v = relay.Var('v', relay.TensorType((), 'float32')) @@ -331,7 +332,7 @@ def test_adt_match(): def test_adt_match_type_annotations(): - mod = relay.Module() + mod = tvm.IRModule() box, constructor = initialize_box_adt(mod) # the only type annotation is inside the match pattern var diff --git a/tests/python/relay/test_typecall.py b/tests/python/relay/test_typecall.py index 1c663d2301e92..dfa40ab26dfe6 100644 --- a/tests/python/relay/test_typecall.py +++ b/tests/python/relay/test_typecall.py @@ -23,14 +23,14 @@ def test_dup_type(): make_id = relay.Function([av], relay.Tuple([av, av]), None, [a]) t = relay.scalar_type("float32") b = relay.Var("b", t) - mod = relay.Module.from_expr(make_id(b)) + mod = tvm.IRModule.from_expr(make_id(b)) mod = transform.InferType()(mod) inferred = mod["main"].body assert inferred.checked_type == relay.TupleType([t, t]) def test_id_type(): - mod = relay.Module() + mod = tvm.IRModule() id_type = relay.GlobalTypeVar("id") a = relay.TypeVar("a") mod[id_type] = relay.TypeData(id_type, [a], []) diff --git a/tests/python/relay/test_vm.py b/tests/python/relay/test_vm.py index 9ea939ce9c832..3b75b1e95c01f 100644 --- a/tests/python/relay/test_vm.py +++ b/tests/python/relay/test_vm.py @@ -46,10 +46,10 @@ def check_result(args, expected_result, mod=None): def veval(f, *args, ctx=tvm.cpu(), target="llvm"): if isinstance(f, relay.Expr): - mod = relay.Module() + mod = tvm.IRModule() mod["main"] = f else: - assert isinstance(f, relay.Module), "expected expression or module" + assert isinstance(f, tvm.IRModule), "expected expression or module" mod = f exe = relay.vm.compile(mod, target) vm = relay.vm.VirtualMachine(exe) @@ -92,7 +92,7 @@ def test_id(): x = relay.var('x', shape=(10, 10), dtype='float64') f = relay.Function([x], x) x_data = np.random.rand(10, 10).astype('float64') - mod = relay.Module() + mod = tvm.IRModule() mod["main"] = f check_result([x_data], x_data, mod=mod) @@ -100,7 +100,7 @@ def test_op(): x = relay.var('x', shape=(10, 10)) f = relay.Function([x], x + x) x_data = np.random.rand(10, 10).astype('float32') - mod = relay.Module() + mod = tvm.IRModule() mod["main"] = f check_result([x_data], 2 * x_data, mod=mod) @@ -116,7 +116,7 @@ def test_cond(): x_data = np.random.rand(10, 10).astype('float32') y_data = np.random.rand(10, 10).astype('float32') - mod = relay.Module() + mod = tvm.IRModule() mod["main"] = f # same check_result([x_data, x_data], True, mod=mod) @@ -132,7 +132,7 @@ def test_simple_if(): x_data = np.random.rand(10, 10).astype('float32') y_data = np.random.rand(10, 10).astype('float32') - mod = relay.Module() + mod = tvm.IRModule() mod["main"] = f # same check_result([x_data, x_data], x_data, mod=mod) @@ -141,7 +141,7 @@ def test_simple_if(): check_result([x_data, y_data], y_data, mod=mod) def test_simple_call(): - mod = relay.module.Module({}) + mod = tvm.IRModule({}) sum_up = relay.GlobalVar('sum_up') i = relay.var('i', shape=[], dtype='int32') sb = ScopeBuilder() @@ -154,7 +154,7 @@ def test_simple_call(): check_result([i_data], i_data, mod=mod) def test_count_loop(): - mod = relay.module.Module({}) + mod = tvm.IRModule({}) sum_up = relay.GlobalVar('sum_up') i = relay.var('i', shape=[], dtype='int32') sb = ScopeBuilder() @@ -174,7 +174,7 @@ def test_count_loop(): check_result([i_data], i_data, mod=mod) def test_sum_loop(): - mod = relay.module.Module({}) + mod = tvm.IRModule({}) sum_up = relay.GlobalVar('sum_up') i = relay.var('i', shape=[], dtype='int32') accum = relay.var('accum', shape=[], dtype='int32') @@ -201,7 +201,7 @@ def test_tuple_fst(): f = relay.Function([tup], relay.TupleGetItem(tup, 0)) i_data = np.random.rand(41).astype('float32') j_data = np.random.rand(10).astype('float32') - mod = relay.Module() + mod = tvm.IRModule() mod["main"] = f check_result([(i_data, j_data)], i_data, mod=mod) @@ -211,12 +211,12 @@ def test_tuple_second(): f = relay.Function([tup], relay.TupleGetItem(tup, 1)) i_data = np.random.rand(41).astype('float32') j_data = np.random.rand(10).astype('float32') - mod = relay.Module() + mod = tvm.IRModule() mod["main"] = f check_result([(i_data, j_data)], j_data, mod=mod) def test_list_constructor(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) nil = p.nil @@ -251,7 +251,7 @@ def test_let_tensor(): f = relay.Function([x], body) x_data = np.random.rand(*shape).astype('float32') - mod = relay.Module() + mod = tvm.IRModule() mod["main"] = f check_result([x_data], x_data + 42.0, mod=mod) @@ -267,12 +267,12 @@ def test_let_scalar(): f = relay.Function([x], body) x_data = np.array(np.random.rand()).astype('float32') - mod = relay.Module() + mod = tvm.IRModule() mod["main"] = f check_result([x_data], x_data + 42.0, mod=mod) def test_compose(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) compose = p.compose @@ -305,7 +305,7 @@ def test_compose(): tvm.testing.assert_allclose(result.asnumpy(), x_data + 2.0) def test_list_hd(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) nil = p.nil @@ -326,7 +326,7 @@ def test_list_hd(): @pytest.mark.xfail def test_list_tl_empty_list(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) nil = p.nil @@ -341,7 +341,7 @@ def test_list_tl_empty_list(): print(result) def test_list_tl(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) nil = p.nil @@ -364,7 +364,7 @@ def test_list_nth(): expected = list(range(10)) for i in range(len(expected)): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) nil = p.nil @@ -382,7 +382,7 @@ def test_list_nth(): def test_list_update(): expected = list(range(10)) - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) nil = p.nil @@ -406,7 +406,7 @@ def test_list_update(): def test_list_length(): expected = list(range(10)) - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) nil = p.nil @@ -426,7 +426,7 @@ def test_list_length(): tvm.testing.assert_allclose(result.asnumpy(), 10) def test_list_map(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) x = relay.var('x', 'int32') @@ -444,7 +444,7 @@ def test_list_map(): tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 2])) def test_list_foldl(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) nil = p.nil @@ -462,7 +462,7 @@ def test_list_foldl(): tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 3, 2, 2, 1, 1])) def test_list_foldr(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) nil = p.nil @@ -480,7 +480,7 @@ def test_list_foldr(): tvm.testing.assert_allclose(vmobj_to_list(result), np.array([1, 2, 3])) def test_list_sum(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) nil = p.nil @@ -494,7 +494,7 @@ def test_list_sum(): tvm.testing.assert_allclose(result.asnumpy(), 6) def test_list_filter(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) nil = p.nil @@ -530,7 +530,7 @@ def test_add_op_scalar(): return x + y; } """ - mod = relay.Module() + mod = tvm.IRModule() x = relay.var('x', shape=()) y = relay.var('y', shape=()) func = relay.Function([x, y], relay.op.add(x, y)) @@ -546,7 +546,7 @@ def test_add_op_tensor(): return x + y; } """ - mod = relay.Module() + mod = tvm.IRModule() x = relay.var('x', shape=(10, 5)) y = relay.var('y', shape=(10, 5)) func = relay.Function([x, y], relay.op.add(x, y)) @@ -562,7 +562,7 @@ def test_add_op_broadcast(): return x + y; } """ - mod = relay.Module() + mod = tvm.IRModule() x = relay.var('x', shape=(10, 5)) y = relay.var('y', shape=(1, 5)) func = relay.Function([x, y], relay.op.add(x, y)) diff --git a/tests/python/relay/test_vm_serialization.py b/tests/python/relay/test_vm_serialization.py index 6f4e09a393ff1..15ea2e8cda5cc 100644 --- a/tests/python/relay/test_vm_serialization.py +++ b/tests/python/relay/test_vm_serialization.py @@ -20,7 +20,6 @@ import tvm from tvm import relay -from tvm.relay.module import Module as rly_module from tvm.relay import vm as _vm from tvm.relay.scope_builder import ScopeBuilder from tvm.relay.prelude import Prelude @@ -29,12 +28,12 @@ def create_exec(f, target="llvm", params=None): if isinstance(f, relay.Expr): - mod = relay.Module() + mod = tvm.IRModule() mod["main"] = f executable = _vm.compile(mod, target=target, params=params) return executable else: - assert isinstance(f, relay.Module), "expected mod as relay.Module" + assert isinstance(f, tvm.IRModule), "expected mod as tvm.IRModule" executable = _vm.compile(f, target=target, params=params) return executable @@ -75,7 +74,7 @@ def get_serialized_output(mod, data, params, target, ctx, dtype='float32'): def test_serializer(): - mod = rly_module({}) + mod = tvm.IRModule({}) a = relay.const(1.0, "float32") x = relay.var('x', shape=(10, 10), dtype='float32') f1 = relay.Function([x], x + a) @@ -186,7 +185,7 @@ def test_if(): def test_loop(): - mod = relay.module.Module({}) + mod = tvm.IRModule({}) sum_up = relay.GlobalVar('sum_up') i = relay.var('i', shape=[], dtype='int32') accum = relay.var('accum', shape=[], dtype='int32') @@ -234,7 +233,7 @@ def test_tuple(): def test_adt_list(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) l1 = p.cons(relay.const(1), p.nil()) @@ -262,7 +261,7 @@ def test_adt_list(): def test_adt_compose(): - mod = relay.Module() + mod = tvm.IRModule() p = Prelude(mod) compose = p.compose diff --git a/tests/python/unittest/test_codegen_cross_llvm.py b/tests/python/unittest/test_codegen_cross_llvm.py index 6604038ab2ef7..1827ccf63d79d 100644 --- a/tests/python/unittest/test_codegen_cross_llvm.py +++ b/tests/python/unittest/test_codegen_cross_llvm.py @@ -71,7 +71,7 @@ def build_arm(): port = int(os.environ['TVM_RPC_ARM_PORT']) try: remote = rpc.connect(host, port) - except tvm.TVMError as e: + except tvm.error.TVMError as e: pass if remote: diff --git a/tests/python/unittest/test_container.py b/tests/python/unittest/test_container.py index 7bdab82d7a659..ea988422345c1 100644 --- a/tests/python/unittest/test_container.py +++ b/tests/python/unittest/test_container.py @@ -42,7 +42,7 @@ def test_tuple_object(): ])) fn = relay.Function([x], relay.expr.TupleGetItem(x, 0)) - mod = relay.Module.from_expr(fn) + mod = tvm.IRModule.from_expr(fn) exe = relay.create_executor( kind="vm", mod=mod, ctx=nd.cpu(), target="llvm") diff --git a/tests/python/unittest/test_graph_tuner_core.py b/tests/python/unittest/test_graph_tuner_core.py index 32c16e239461b..a8b22fd787ee9 100644 --- a/tests/python/unittest/test_graph_tuner_core.py +++ b/tests/python/unittest/test_graph_tuner_core.py @@ -159,7 +159,7 @@ def test_DPTuner_run(): target_ops = [relay.nn.conv2d] g, records, ltf_records, ltf_keys, tasks = _create_data(target, dshape, dtype, layout) - mod = relay.module.Module() + mod = tvm.IRModule() mod["main"] = g costs = [0.02, 0.02, 0.045] config_list = [] diff --git a/tests/python/unittest/test_lang_group.py b/tests/python/unittest/test_lang_group.py index 3efc9bc5096b0..e78ffb3541d31 100644 --- a/tests/python/unittest/test_lang_group.py +++ b/tests/python/unittest/test_lang_group.py @@ -46,7 +46,7 @@ def test_scan_group(): # compute outside group error. s[s_update2].compute_at(s[s_init], s_init.op.axis[0]) assert False - except tvm.TVMError: + except tvm.error.TVMError: pass def test_compute_group(): diff --git a/tests/python/unittest/test_lang_operator.py b/tests/python/unittest/test_lang_operator.py index ac2ee6d88cc55..26783e62db133 100644 --- a/tests/python/unittest/test_lang_operator.py +++ b/tests/python/unittest/test_lang_operator.py @@ -19,7 +19,7 @@ def check_throws(f): try: f() - except tvm.TVMError: + except tvm.error.TVMError: pass else: raise AssertionError("Should have raised an exception but didn't.") diff --git a/tests/python/unittest/test_lang_reflection.py b/tests/python/unittest/test_lang_reflection.py index 18230bf5e1fa2..5df77083f443b 100644 --- a/tests/python/unittest/test_lang_reflection.py +++ b/tests/python/unittest/test_lang_reflection.py @@ -57,13 +57,13 @@ def test_make_attrs(): try: x = tvm.make.node("attrs.TestAttrs", unknown_key=1, name="xx") assert False - except tvm.TVMError as e: + except tvm.error.TVMError as e: assert str(e).find("unknown_key") != -1 try: x = tvm.make.node("attrs.TestAttrs", axis=100, name="xx") assert False - except tvm.TVMError as e: + except tvm.error.TVMError as e: assert str(e).find("upper bound") != -1 x = tvm.make.node("attrs.TestAttrs", name="xx", padding=(3,4)) diff --git a/tests/python/unittest/test_lang_schedule.py b/tests/python/unittest/test_lang_schedule.py index eeab81b965b4e..9bfd1a427e9a9 100644 --- a/tests/python/unittest/test_lang_schedule.py +++ b/tests/python/unittest/test_lang_schedule.py @@ -65,7 +65,7 @@ def test_reorder(): # must raise an error s[T].reorder(xi2, xi1, xi2) assert False - except tvm.TVMError: + except tvm.error.TVMError: pass def test_split(): diff --git a/tests/python/unittest/test_pass_inline.py b/tests/python/unittest/test_pass_inline.py index 511a1438f4bec..e87353ed98a1d 100644 --- a/tests/python/unittest/test_pass_inline.py +++ b/tests/python/unittest/test_pass_inline.py @@ -32,7 +32,7 @@ def test_inline(): stmt = tvm.ir_pass.Inline( T.op, [1,2,3], T.op.body, stmt) assert False - except tvm.TVMError: + except tvm.error.TVMError: pass def test_inline2(): diff --git a/tests/python/unittest/test_runtime_rpc.py b/tests/python/unittest/test_runtime_rpc.py index 43bb79cf03637..ff5f46536d831 100644 --- a/tests/python/unittest/test_runtime_rpc.py +++ b/tests/python/unittest/test_runtime_rpc.py @@ -78,7 +78,7 @@ def remotethrow(name): try: f3("abc") assert False - except tvm.TVMError as e: + except tvm.error.TVMError as e: assert "abc" in str(e) f2 = client.get_function("rpc.test.strcat") diff --git a/tutorials/autotvm/tune_relay_arm.py b/tutorials/autotvm/tune_relay_arm.py index 67b7d96f38946..4cbdf52163d68 100644 --- a/tutorials/autotvm/tune_relay_arm.py +++ b/tutorials/autotvm/tune_relay_arm.py @@ -99,7 +99,7 @@ def get_network(name, batch_size): mod, params = relay.frontend.from_mxnet(block, shape={'data': input_shape}, dtype=dtype) net = mod["main"] net = relay.Function(net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs) - mod = relay.Module.from_expr(net) + mod = tvm.IRModule.from_expr(net) else: raise ValueError("Unsupported network: " + name) diff --git a/tutorials/autotvm/tune_relay_cuda.py b/tutorials/autotvm/tune_relay_cuda.py index 2cd99497259d9..72fc2bed3d0ed 100644 --- a/tutorials/autotvm/tune_relay_cuda.py +++ b/tutorials/autotvm/tune_relay_cuda.py @@ -99,7 +99,7 @@ def get_network(name, batch_size): mod, params = relay.frontend.from_mxnet(block, shape={'data': input_shape}, dtype=dtype) net = mod["main"] net = relay.Function(net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs) - mod = relay.Module.from_expr(net) + mod = tvm.IRModule.from_expr(net) else: raise ValueError("Unsupported network: " + name) diff --git a/tutorials/autotvm/tune_relay_mobile_gpu.py b/tutorials/autotvm/tune_relay_mobile_gpu.py index eb7b96e6972b5..3c56524078c2c 100644 --- a/tutorials/autotvm/tune_relay_mobile_gpu.py +++ b/tutorials/autotvm/tune_relay_mobile_gpu.py @@ -100,7 +100,7 @@ def get_network(name, batch_size): mod, params = relay.frontend.from_mxnet(block, shape={'data': input_shape}, dtype=dtype) net = mod["main"] net = relay.Function(net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs) - mod = relay.Module.from_expr(net) + mod = tvm.IRModule.from_expr(net) else: raise ValueError("Unsupported network: " + name) diff --git a/tutorials/autotvm/tune_relay_x86.py b/tutorials/autotvm/tune_relay_x86.py index 93a073170388a..5e26f5858bbc2 100644 --- a/tutorials/autotvm/tune_relay_x86.py +++ b/tutorials/autotvm/tune_relay_x86.py @@ -69,7 +69,7 @@ def get_network(name, batch_size): mod, params = relay.frontend.from_mxnet(block, shape={input_name: input_shape}, dtype=dtype) net = mod["main"] net = relay.Function(net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs) - mod = relay.Module.from_expr(net) + mod = tvm.IRModule.from_expr(net) else: raise ValueError("Unsupported network: " + name) diff --git a/tutorials/dev/relay_pass_infra.py b/tutorials/dev/relay_pass_infra.py index 87a3bf1c3ca7f..d27e236a2572f 100644 --- a/tutorials/dev/relay_pass_infra.py +++ b/tutorials/dev/relay_pass_infra.py @@ -99,7 +99,7 @@ def alter_conv2d(attrs, inputs, tinfos): # Let's first create a relay Module which contains one or multiple Relay # functions for optimization. f = example() -mod = relay.Module.from_expr(f) +mod = tvm.IRModule.from_expr(f) # Now we can apply constant folding on the module. # fold_const here is a callback that doesn't take any parameters. @@ -151,7 +151,7 @@ def alter_conv2d(attrs, inputs, tinfos): # Now let's execute some passes through `Sequential`_ f = example() -mod = relay.Module.from_expr(f) +mod = tvm.IRModule.from_expr(f) # Glob the interested passes. seq = relay.transform.Sequential([relay.transform.FoldConstant(), relay.transform.EliminateCommonSubexpr(), @@ -228,7 +228,7 @@ def visit_const(self, c): return ReplaceConstant().visit(func) f = example() -mod = relay.Module.from_expr(f) +mod = tvm.IRModule.from_expr(f) custom_pass = CustomPipeline(multiplier=relay.const(3, "float")) assert custom_pass.info.name == "CustomPipeline" mod3 = custom_pass(mod) @@ -243,7 +243,7 @@ def visit_const(self, c): # them. f = example() -mod = relay.Module.from_expr(f) +mod = tvm.IRModule.from_expr(f) seq = relay.transform.Sequential([relay.transform.FoldConstant(), relay.transform.PrintIR(), relay.transform.EliminateCommonSubexpr(), diff --git a/vta/python/vta/build_module.py b/vta/python/vta/build_module.py index cec217cbd393c..df67faaac2bf9 100644 --- a/vta/python/vta/build_module.py +++ b/vta/python/vta/build_module.py @@ -33,7 +33,7 @@ def early_rewrite(stmt): """Try to do storage rewrite in early pass.""" try: return tvm.ir_pass.StorageRewrite(stmt) - except tvm.TVMError: + except tvm.error.TVMError: return stmt diff --git a/vta/python/vta/top/graphpack.py b/vta/python/vta/top/graphpack.py index b14f937b35df0..69f2546c9241d 100644 --- a/vta/python/vta/top/graphpack.py +++ b/vta/python/vta/top/graphpack.py @@ -24,7 +24,7 @@ def run_opt_pass(expr, opt_pass): """Exectue a relay pass.""" assert isinstance(opt_pass, transform.Pass) - mod = relay.Module.from_expr(expr) + mod = tvm.IRModule.from_expr(expr) mod = opt_pass(mod) entry = mod["main"] return entry if isinstance(expr, relay.Function) else entry.body diff --git a/vta/tutorials/autotvm/tune_relay_vta.py b/vta/tutorials/autotvm/tune_relay_vta.py index 4cf08e93ba14a..3221c3b77b1fb 100644 --- a/vta/tutorials/autotvm/tune_relay_vta.py +++ b/vta/tutorials/autotvm/tune_relay_vta.py @@ -353,7 +353,7 @@ def tune_and_evaluate(tuning_opt): # Perform task extraction on Relay program print("Extract tasks...") relay_prog, params = compile_network(env, target, network, start_pack, stop_pack) - mod = relay.Module.from_expr(relay_prog) + mod = tvm.IRModule.from_expr(relay_prog) tasks = autotvm.task.extract_from_program(mod, params=params, ops=(tvm.relay.op.nn.conv2d, ),