Skip to content

Commit

Permalink
save
Browse files Browse the repository at this point in the history
  • Loading branch information
MarisaKirisame committed Aug 21, 2020
1 parent 2b29d7a commit 805bbc8
Show file tree
Hide file tree
Showing 120 changed files with 288 additions and 266 deletions.
2 changes: 1 addition & 1 deletion apps/benchmark/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def get_network(name, batch_size, dtype='float32'):
net, params = relay.frontend.from_mxnet(block, shape={'data': input_shape}, dtype=dtype)
net = net["main"]
net = relay.Function(net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs)
net = tvm.IRModule.from_expr(net)
net = tvm.IRModule.from_func(net)
else:
raise ValueError("Unsupported network: " + name)

Expand Down
2 changes: 1 addition & 1 deletion apps/bundle_deploy/build_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def build_test_module(opts):
for runtime_name, file_format_str in RUNTIMES.items():
with tvm.transform.PassContext(opt_level=3, config={'tir.disable_vectorize': True}):
graph, lib, lowered_params = relay.build(
tvm.IRModule.from_expr(func), f"llvm --runtime={runtime_name} --system-lib", params=params)
tvm.IRModule.from_func(func), f"llvm --runtime={runtime_name} --system-lib", params=params)

build_dir = os.path.abspath(opts.out_dir)
if not os.path.isdir(build_dir):
Expand Down
2 changes: 1 addition & 1 deletion docs/deploy/arm_compute_lib.rst
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ max_pool2d operator).
data = relay.var('data', shape=data_shape, dtype=data_type)
out = relay.nn.max_pool2d(data, pool_size=pool_size, strides=strides, layout=layout, padding=padding)
module = tvm.IRModule.from_expr(out)
module = tvm.IRModule.from_func(out)
Annotate and partition the graph for ACL.
Expand Down
2 changes: 1 addition & 1 deletion docs/langref/relay_pattern.rst
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ if a specific parameter in a subgraph has been bound or not.
conv2d = relay.op.nn.conv2d(x, w)
out = relay.op.nn.bias_add(conv2d, b)
func = relay.Function([x, w, b], out)
mod = tvm.IRModule.from_expr(func)
mod = tvm.IRModule.from_func(func)
# Two inputs of the conv2d in the graph are VarNode by default, so no match.
assert not pattern.match(mod['main'].body)
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/autotvm/feature.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def ana_lower(sch, args,
bounds = schedule.InferBound(sch)
stmt = schedule.ScheduleOps(sch, bounds, True)
func = schedule.SchedulePostProcToPrimFunc(args, stmt, None)
mod = tvm.IRModule.from_expr(func._move())
mod = tvm.IRModule.from_func(func._move())
mod = tvm.tir.transform.StorageFlatten(64)(mod._move())
mod = tvm.tir.transform.Simplify()(mod._move())
assert simple_mode
Expand Down
4 changes: 2 additions & 2 deletions python/tvm/autotvm/graph_tuner/utils/traverse_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def expr2graph(expr, target_ops, node_dict, node_list):

def _infer_type(node):
"""A method to infer the type of a relay expression."""
mod = tvm.IRModule.from_expr(node)
mod = tvm.IRModule.from_func(node)
mod = transform.InferType()(mod)
entry = mod["main"]
return entry if isinstance(node, relay.Function) else entry.body
Expand Down Expand Up @@ -125,7 +125,7 @@ def _traverse_expr(node):
free_var = relay.Var("var_%d" % i, input_type)
params.append(free_var)
call = relay.Call(node.op, params, node.attrs)
mod = tvm.IRModule.from_expr(relay.Function(params, call))
mod = tvm.IRModule.from_func(relay.Function(params, call))
relay.backend.compile_engine.get().clear()
build_thread = threading.Thread(target=relay.build,
args=(mod,
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/autotvm/graph_tuner/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ def bind_inputs(expr, input_shapes=None, input_dtypes="float32"):
rebind_dict[var] = updated_input_dict[var.name_hint]
updated_expr = relay.expr.bind(expr, rebind_dict)

mod = tvm.IRModule.from_expr(updated_expr)
mod = tvm.IRModule.from_func(updated_expr)
mod = transform.InferType()(mod)
entry = mod["main"]
return entry if isinstance(updated_expr, relay.Function) else entry.body
2 changes: 1 addition & 1 deletion python/tvm/autotvm/task/relay_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ def extract_from_multiple_program(mods, params, target, target_host=None, ops=No

for mod, param in zip(mods, params):
if isinstance(mod, relay.function.Function):
mod = tvm.IRModule.from_expr(mod)
mod = tvm.IRModule.from_func(mod)
assert isinstance(mod, tvm.IRModule), \
"only support relay Module or Function to be tuned"
relay.backend.compile_engine.get().clear()
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/contrib/target/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def get_onnx_version():

def infer_type(node):
"""A method to infer the type of a relay expression."""
mod = tvm.IRModule.from_expr(node)
mod = tvm.IRModule.from_func(node)
mod = relay.transform.InferType()(mod)
entry = mod["main"]
return entry if isinstance(node, relay.Function) else entry.body
Expand Down
26 changes: 24 additions & 2 deletions python/tvm/ir/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,10 +217,32 @@ def from_func(func, functions=None, type_defs=None):
where expr is set as the entry point
(wrapped in a function if necessary)
"""
funcs = functions if functions is not None else {}
defs = type_defs if type_defs is not None else {}
return _ffi_api.Module_FromFunc(func, funcs, defs)

@staticmethod
def from_graph(expr, functions=None, type_defs=None):
"""Construct a module from a graph.
Parameters
----------
expr: RelayExpr
The starting function
global_funcs: Optional[dict]
Map of global vars to function definitions
type_defs: Optional[dict]
Map of global type vars to type definitions
Returns
-------
mod: Module
A module containing the passed definitions,
where expr is set as the entry point
(wrapped in a function if necessary)
"""
return _ffi_api.Module_FromGraph(func, funcs, defs)

def _import(self, file_to_import):
return _ffi_api.Module_Import(self, file_to_import)

Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/backend/interpreter.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ def _interp_wrapper(*args, **kwargs):
if self.mod:
self.mod["main"] = func
else:
self.mod = IRModule.from_expr(func)
self.mod = IRModule.from_func(func)

mod = self.optimize()
opt_expr = Call(mod["main"], relay_args)
Expand Down
4 changes: 2 additions & 2 deletions python/tvm/relay/build_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ def build(mod, target=None, target_host=None, params=None, mod_name='default'):
if isinstance(mod, _function.Function):
if params:
mod = bind_params_by_name(mod, params)
mod = IRModule.from_expr(mod)
mod = IRModule.from_func(mod)
warnings.warn(
"Please use input parameter mod (tvm.IRModule) "
"instead of deprecated parameter mod (tvm.relay.function.Function)",
Expand Down Expand Up @@ -288,7 +288,7 @@ def optimize(mod, target=None, params=None):
if isinstance(mod, _function.Function):
if params:
mod = bind_params_by_name(mod, params)
mod = IRModule.from_expr(mod)
mod = IRModule.from_func(mod)
warnings.warn(
"Please use input parameter mod (tvm.IRModule) "
"instead of deprecated parameter func (tvm.relay.function.Function)",
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/data_dep_optimization/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,6 @@ def _run_opt_pass(expr, opt_pass):
Optimized Expr by running opt_pass
"""
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = tvm.IRModule.from_func(expr)
mod = opt_pass(mod)
return mod["main"]
4 changes: 2 additions & 2 deletions python/tvm/relay/frontend/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -462,7 +462,7 @@ def infer_type(node, mod=None):
entry = mod["main"]
ret = entry.body
else:
new_mod = IRModule.from_expr(node)
new_mod = IRModule.from_func(node)
if mod is not None:
new_mod.update(mod)
new_mod = _transform.InferType()(new_mod)
Expand Down Expand Up @@ -516,7 +516,7 @@ def infer_value(input_val, params, mod=None):
if isinstance(mod, IRModule):
mod["main"] = _function.Function(analysis.free_vars(input_val), input_val)
else:
mod = IRModule.from_expr(input_val)
mod = IRModule.from_func(input_val)
exc = tvm.relay.create_executor("debug", mod=mod, ctx=tvm.cpu(), target="llvm")
inputs = []
for param in mod['main'].params:
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/frontend/coreml.py
Original file line number Diff line number Diff line change
Expand Up @@ -562,4 +562,4 @@ def from_coreml(model, shape=None):
outexpr = outexpr[0]
func = _function.Function(analysis.free_vars(outexpr), outexpr)
params = {k:_nd.array(np.array(v, dtype=np.float32)) for k, v in etab.params.items()}
return IRModule.from_expr(func), params
return IRModule.from_func(func), params
2 changes: 1 addition & 1 deletion python/tvm/relay/frontend/darknet.py
Original file line number Diff line number Diff line change
Expand Up @@ -823,7 +823,7 @@ def from_darknet(self):
outputs = _as_list(sym) + self._outs
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
sym = _function.Function(analysis.free_vars(outputs), outputs)
return IRModule.from_expr(sym), self._tvmparams
return IRModule.from_func(sym), self._tvmparams

def from_darknet(net,
shape=None,
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/frontend/keras.py
Original file line number Diff line number Diff line change
Expand Up @@ -1103,4 +1103,4 @@ def _convert_input_layer(keras_layer):
outexpr = outexpr[0] if len(outexpr) == 1 else _expr.Tuple(outexpr)
func = _function.Function(analysis.free_vars(outexpr), outexpr)
params = {k:_nd.array(np.array(v, dtype=np.float32)) for k, v in etab.params.items()}
return IRModule.from_expr(func), params
return IRModule.from_func(func), params
2 changes: 1 addition & 1 deletion python/tvm/relay/frontend/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -2271,7 +2271,7 @@ def from_onnx(self, graph, opset):
outputs = [self._nodes[self._parse_value_proto(i)] for i in graph.output]
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
func = _function.Function(analysis.free_vars(outputs), outputs)
return IRModule.from_expr(func), self._params
return IRModule.from_func(func), self._params

def _parse_value_proto(self, value_proto):
"""Parse ValueProto or raw str."""
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/frontend/tflite.py
Original file line number Diff line number Diff line change
Expand Up @@ -3133,5 +3133,5 @@ def from_tflite(model, shape_dict, dtype_dict):
outputs = [exp_tab.get_expr(get_tensor_name(subgraph, i)) for i in model_outputs]
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
func = _function.Function(analysis.free_vars(outputs), outputs)
mod = IRModule.from_expr(func)
mod = IRModule.from_func(func)
return mod, params
2 changes: 1 addition & 1 deletion python/tvm/relay/qnn/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def CanonicalizeOps():
# We want to utilize all the existing Relay infrastructure. So, instead of supporting this
# QNN requantize op, we convert it into a sequence of existing Relay operators.
mod = tvm.IRModule.from_expr(qnn_expr)
mod = tvm.IRModule.from_func(qnn_expr)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
relay_expr = mod['main']
print(relay_expr)
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/quantize/_calibrate.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ def _make_const(val):
for global_var, func in mod.functions.items():
if global_var.name_hint != 'main':
func_dict[global_var] = func
return IRModule.from_expr(main_func, func_dict)
return IRModule.from_func(main_func, func_dict)


# weight scale functions
Expand Down
10 changes: 5 additions & 5 deletions python/tvm/relay/quantize/_partition_conversions.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ def partition_prefix(mod, quantized_dtypes):
mid_func = relay.Function(
relay.analysis.free_vars(mid_body),
mid_body)
mid_mod = tvm.IRModule.from_expr(mid_func)
mid_mod = tvm.IRModule.from_func(mid_func)

scope_builder = prefix_cutter.prefix_sb
# make sure we pass through all inputs in the prefix function's return expr
Expand All @@ -234,7 +234,7 @@ def partition_prefix(mod, quantized_dtypes):
scope_builder.ret(ret_expr)
pre_func_body = scope_builder.get()
pre_func = relay.Function(relay.analysis.free_vars(pre_func_body), pre_func_body)
pre_mod = tvm.IRModule.from_expr(pre_func)
pre_mod = tvm.IRModule.from_func(pre_func)

return pre_mod, mid_mod

Expand Down Expand Up @@ -288,7 +288,7 @@ def partition_suffix(mod, quantized_dtypes):
relay.analysis.free_vars(post_body),
post_body,
func.ret_type)
post_mod = tvm.IRModule.from_expr(post_func)
post_mod = tvm.IRModule.from_func(post_func)

mid_body = suffix_cutter.mid_body
if mid_body is None:
Expand All @@ -300,12 +300,12 @@ def partition_suffix(mod, quantized_dtypes):
post_func = relay.Function(
[post_body],
post_body)
post_mod = tvm.IRModule.from_expr(post_func)
post_mod = tvm.IRModule.from_func(post_func)
else:
mid_func = relay.Function(
func.params,
mid_body)
mid_mod = tvm.IRModule.from_expr(mid_func)
mid_mod = tvm.IRModule.from_func(mid_func)

return mid_mod, post_mod

Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/testing/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@

def run_opt_pass(expr, opt_pass, import_prelude=False):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = tvm.IRModule.from_func(expr)
if import_prelude:
Prelude(mod)
mod = opt_pass(mod)
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/testing/init.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ def create_workload(net, initializer=None, seed=0):
params : dict of str to NDArray
The parameters.
"""
mod = tvm.IRModule.from_expr(net)
mod = tvm.IRModule.from_func(net)
mod = relay.transform.InferType()(mod)
shape_dict = {
v.name_hint : v.checked_type for v in mod["main"].params}
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/testing/py_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def optimize(self, prog: Expr):
# unwrap tuple wrappers (some op calls produce them)
unwrapped = prog.astuple() if isinstance(prog, relay.TupleWrapper) else prog
assert relay.analysis.well_formed(unwrapped)
mod = self.mod.from_expr(unwrapped, self.mod.functions, self.mod.type_definitions)
mod = self.mod.from_func(unwrapped, self.mod.functions, self.mod.type_definitions)

# necessary pass: SimplifyInference (otherwise we can't generate code for some operators)
# and fusion (to get primitive functions)
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/transform/memory_plan.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def mk_let(bindings, body):
return body

def const_eval(mod, exp):
mod = IRModule.from_expr(exp, type_defs=mod.type_definitions)
mod = IRModule.from_func(exp, type_defs=mod.type_definitions)
mod = transform.FoldConstant()(mod)
return mod["main"]

Expand Down
2 changes: 1 addition & 1 deletion tests/micro/test_runtime_micro_on_arm.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ def test_conv2d():
padding=(1, 1),
channels=4)
func = relay.Function(relay.analysis.free_vars(conv_expr), conv_expr)
mod = tvm.IRModule.from_expr(func)
mod = tvm.IRModule.from_func(func)
mod = transform.InferType()(mod)

x_shape = list(map(lambda x: x.value, mod['main'].params[0].checked_type.shape))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def skip_codegen_test():
def build_module(mod, target, params=None, enable_acl=True, tvm_ops=0, acl_partitions=1):
"""Build module with option to build for ACL."""
if isinstance(mod, tvm.relay.expr.Call):
mod = tvm.IRModule.from_expr(mod)
mod = tvm.IRModule.from_func(mod)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
if enable_acl:
mod = arm_compute_lib.partition_for_arm_compute_lib(mod, params)
Expand Down
2 changes: 1 addition & 1 deletion tests/python/contrib/test_ethosn/infrastructure.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def make_module(func, params):
func = relay.Function(relay.analysis.free_vars(func), func)
if params:
relay.build_module.bind_params_by_name(func, params)
return tvm.IRModule.from_expr(func)
return tvm.IRModule.from_func(func)


def make_ethosn_composite(ethosn_expr, name):
Expand Down
2 changes: 1 addition & 1 deletion tests/python/contrib/test_onnx_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ def test_partition():
func = relay.Function([in_1, in_2, in_3, in_4, in_5, in_6, in_7, in_8, in_9, in_10], end7)

target = 'llvm'
mod = IRModule.from_expr(func)
mod = IRModule.from_func(func)
mod = transform.PartitionGraph()(mod)

with tvm.transform.PassContext(opt_level=3, disabled_pass=['FuseOps']):
Expand Down
2 changes: 1 addition & 1 deletion tests/python/frontend/mxnet/test_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def relay_compose(F, **kwargs):
z = F.split(x, **kwargs)
z = F.subtract(F.add(z[0], z[2]), y)
func = relay.Function(relay.analysis.free_vars(z), z)
return tvm.IRModule.from_expr(func)
return tvm.IRModule.from_func(func)

mx_sym = mx_compose(mx, num_outputs=3, axis=1)
mod, _ = relay.frontend.from_mxnet(
Expand Down
4 changes: 2 additions & 2 deletions tests/python/frontend/mxnet/test_qnn_ops_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def dequantize_test_driver(in_dtype, quant_args, in_data, verify_output_data):
max_range=max_range,
in_dtype=in_dtype)
mod = relay.Function(relay.analysis.free_vars(dequantized_output), dequantized_output)
mod = tvm.IRModule.from_expr(mod)
mod = tvm.IRModule.from_func(mod)
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(mod, "llvm", params=None)
rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
Expand Down Expand Up @@ -92,7 +92,7 @@ def quantize_test_driver(out_dtype, quant_args, in_data, verify_output_data):
max_range=max_range,
out_dtype=out_dtype)
mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output)
mod = tvm.IRModule.from_expr(mod)
mod = tvm.IRModule.from_func(mod)
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(mod, "llvm", params=None)
rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
Expand Down
4 changes: 2 additions & 2 deletions tests/python/relay/dyn/test_dynamic_op_level10.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def test_dyn_broadcast_to():
for target, ctx in ctx_list():
if (target != 'cuda'): #skip cuda because we don't have dynamic support for GPU
for kind in ["vm", "debug"]:
mod = tvm.ir.IRModule.from_expr(func)
mod = tvm.ir.IRModule.from_func(func)
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x, np.array(dyn_shape).astype(shape_type))
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
Expand Down Expand Up @@ -80,7 +80,7 @@ def _verify(indices_shape, depth, on_value, off_value, axis, dtype):
for target, ctx in ctx_list():
if (target != 'cuda'): #skip cuda because we don't have dynamic support for GPU
for kind in ["vm", "debug"]:
mod = tvm.ir.IRModule.from_expr(func)
mod = tvm.ir.IRModule.from_func(func)
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
out_relay = intrp.evaluate()(indices_np, np.array(depth).astype("int32"))
tvm.testing.assert_allclose(out_relay.asnumpy(), out_np)
Expand Down
Loading

0 comments on commit 805bbc8

Please sign in to comment.