Skip to content

Commit

Permalink
Remove top-level exposures.
Browse files Browse the repository at this point in the history
  • Loading branch information
tqchen committed Feb 27, 2020
1 parent 130d194 commit 7595ed6
Show file tree
Hide file tree
Showing 130 changed files with 719 additions and 720 deletions.
7 changes: 1 addition & 6 deletions python/tvm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,9 @@

# tvm.target
from . import target
from .target import build_config

# tvm.te
from .te import decl_tensor_intrin, create_schedule, tag_scope
from . import te

# tvm.testing
from . import testing
Expand All @@ -64,10 +63,6 @@
# others
from . import arith

# backward compact for topi, to be removed later
from .tir import expr, stmt, ir_builder, ir_pass, generic
from .te import tensor, schedule

# Contrib initializers
from .contrib import rocm as _rocm, nvcc as _nvcc, sdaccel as _sdaccel

Expand Down
12 changes: 8 additions & 4 deletions python/tvm/autotvm/measure/measure_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,13 @@

import numpy as np

from ... import ir_pass, build, build_config, nd, TVMError, register_func, \
rpc as _rpc, target as _target
from ...contrib import nvcc, ndk, tar
import tvm._ffi
from tvm import nd, rpc as _rpc, target as _target
from tvm.tir import ir_pass
from tvm.error import TVMError
from tvm.target import build_config
from tvm.driver import build
from tvm.contrib import nvcc, ndk, tar

from ..util import get_const_tuple
from ..env import AutotvmGlobalScope
Expand Down Expand Up @@ -581,7 +585,7 @@ def _check():
return not t.is_alive()


@register_func
@tvm._ffi.register_func
def tvm_callback_cuda_compile(code):
"""use nvcc to generate ptx code for better optimization"""
curr_cuda_target_arch = AutotvmGlobalScope.current.cuda_target_arch
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/autotvm/task/code_hash.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
import inspect
import zlib

from tvm import schedule
from tvm.te import schedule

def attach_code_hash(s):
"""Decorator for attaching a code hash to a schedule
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/autotvm/task/topi_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@
"""
import tvm.te._ffi_api
from tvm import target as _target
from tvm.te import tensor

from ... import tensor
from .task import args_to_workload, DispatchContext, \
register_task_compute, register_task_schedule, serialize_args

Expand Down
2 changes: 1 addition & 1 deletion python/tvm/autotvm/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@

import numpy as np

from .. import expr, ir_pass
from tvm.tir import expr, ir_pass

logger = logging.getLogger('autotvm')

Expand Down
2 changes: 1 addition & 1 deletion python/tvm/contrib/peak.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ def measure_compute_mad(total_item, item_per_thread, base_type, bits, lanes,
def extern(ins, outs):
# pylint: disable=unused-argument
"""construct measurement function by building IR directly"""
ib = tvm.ir_builder.create()
ib = tvm.tir.ir_builder.create()

bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
Expand Down
12 changes: 6 additions & 6 deletions python/tvm/contrib/tedd.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,23 +282,23 @@ def get_leaf_itervar_index(itervar, leaf_iv):
def encode_itervar_relation(obj_manager, rel):
"""Extract and encode IterVar Relationship visualization data to a dictionary"""
rel_type = type(rel)
if rel_type is tvm.schedule.Split:
if rel_type is tvm.te.schedule.Split:
node_type = 'Split_Relation'
rel_dict = {
"type": node_type,
"parent": obj_manager.get_dom_path(rel.parent),
"outer": obj_manager.get_dom_path(rel.outer),
"inner": obj_manager.get_dom_path(rel.inner),
}
elif rel_type is tvm.schedule.Fuse:
elif rel_type is tvm.te.schedule.Fuse:
node_type = 'Fuse_Relation'
rel_dict = {
"type": node_type,
"fused": obj_manager.get_dom_path(rel.fused),
"outer": obj_manager.get_dom_path(rel.outer),
"inner": obj_manager.get_dom_path(rel.inner),
}
elif rel_type is tvm.schedule.Singleton:
elif rel_type is tvm.te.schedule.Singleton:
node_type = 'Singleton_Relation'
rel_dict = {
"type": node_type,
Expand Down Expand Up @@ -377,12 +377,12 @@ def encode_schedule(sch, need_range):
dict : dictionary
A nested dictionary
"""
assert isinstance(sch, tvm.schedule.Schedule
), 'Input is not a tvm.schedule.Schedule object.'
assert isinstance(sch, tvm.te.schedule.Schedule
), 'Input is not a tvm.te.schedule.Schedule object.'
range_map = None
if need_range:
try:
range_map = tvm.schedule.InferBound(sch)
range_map = tvm.te.schedule.InferBound(sch)
except tvm._ffi.base.TVMError as expt:
warnings.warn(
'Ranges are not available, because InferBound fails with the following error:\n'
Expand Down
10 changes: 5 additions & 5 deletions python/tvm/driver/build_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def form_body(sch):
"""According to the given schedule, form the raw body
Parameters
----------
sch : tvm.schedule.Schedule
sch : tvm.te.schedule.Schedule
The given scheduler to form the raw body
Returns
Expand All @@ -113,7 +113,7 @@ def lower(sch,
Parameters
----------
sch : tvm.schedule.Schedule
sch : tvm.te.schedule.Schedule
The schedule to be built
args : list of Buffer or Tensor or Var
Expand Down Expand Up @@ -286,7 +286,7 @@ def build(inputs,
Parameters
----------
inputs : tvm.Schedule, LoweredFunc, or dict of target to LoweredFunc list
inputs : tvm.te.Schedule, LoweredFunc, or dict of target to LoweredFunc list
The schedule to be built
args : list of Buffer or Tensor or Var, optional
Expand Down Expand Up @@ -328,7 +328,7 @@ def build(inputs,
A = te.placeholder((n,), name='A')
B = te.placeholder((n,), name='B')
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name='C')
s = tvm.create_schedule(C.op)
s = tvm.te.create_schedule(C.op)
f = tvm.lower(s, [A, B, C], name="test_add")
m = tvm.build(f, target="llvm")
Expand All @@ -340,7 +340,7 @@ def build(inputs,
A = te.placeholder((n,), name='A')
B = te.placeholder((n,), name='B')
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name='C')
s1 = tvm.create_schedule(C.op)
s1 = tvm.te.create_schedule(C.op)
with tvm.target.cuda() as cuda_tgt:
s2 = topi.cuda.schedule_injective(cuda_tgt, [C])
f1 = tvm.lower(s1, [A, B, C], name="test_add1")
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/hybrid/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def _pruned_source(func):
def replace_io(body, rmap):
"""Replacing tensors usage according to the dict given"""
# pylint: disable=import-outside-toplevel
from .. import ir_pass
from tvm.tir import ir_pass

def replace(op):
if isinstance(op, _stmt.Provide) and op.func in rmap.keys():
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/backend/_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def lower(sch, inputs, func_name, source_func):
Parameters
----------
sch : tvm.Schedule
sch : tvm.te.Schedule
The schedule.
inputs : List[tvm.te.Tensor]
Expand Down
8 changes: 4 additions & 4 deletions python/tvm/relay/backend/compile_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,11 +80,11 @@ def get_shape(shape):
"""Convert the shape to correct dtype and vars."""
ret = []
for dim in shape:
if isinstance(dim, tvm.expr.IntImm):
if isinstance(dim, tvm.tir.IntImm):
val = int(dim)
assert val <= np.iinfo(np.int32).max
ret.append(tvm.expr.IntImm("int32", val))
elif isinstance(dim, tvm.expr.Any):
ret.append(tvm.tir.IntImm("int32", val))
elif isinstance(dim, tvm.tir.Any):
ret.append(te.var("any_dim", "int32"))
else:
ret.append(dim)
Expand Down Expand Up @@ -130,7 +130,7 @@ def get_valid_implementations(op, attrs, inputs, out_type, target):
flag = True
for clause in spec.condition.clauses:
clause = analyzer.canonical_simplify(clause)
if isinstance(clause, tvm.expr.IntImm) and clause.value:
if isinstance(clause, tvm.tir.IntImm) and clause.value:
continue
flag = False
break
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/backend/graph_runtime_codegen.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
from tvm.runtime.ndarray import empty
from tvm.relay import _build_module
from tvm import target as _target
from tvm import expr as _expr
from tvm.tir import expr as _expr

class GraphRuntimeCodegen(object):
"""The compiler from Relay to the TVM runtime system."""
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/build_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@

from tvm.ir import IRModule

from tvm import expr as tvm_expr
from tvm.tir import expr as tvm_expr
from .. import nd as _nd, target as _target, autotvm
from ..contrib import graph_runtime as _graph_rt
from . import _build_module
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/op/op.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ def schedule(self, attrs, outs, target):
Returns
-------
schedule : tvm.Schedule
schedule : tvm.te.Schedule
The schedule.
"""
return _OpImplementationSchedule(self, attrs, outs, target)
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/target/build_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class DumpIR(object):
-----------
.. code-block:: python
with tvm.build_config(dump_pass_ir=True)
with tvm.target.build_config(dump_pass_ir=True)
run()
"""
scope_level = 0
Expand Down
4 changes: 2 additions & 2 deletions python/tvm/te/tag.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,11 +80,11 @@ def tag_scope(tag):
B = te.placeholder((m, l), name='B')
k = te.reduce_axis((0, l), name='k')
with tvm.tag_scope(tag='matmul'):
with tvm.te.tag_scope(tag='matmul'):
C = te.compute((n, m), lambda i, j: te.sum(A[i, k] * B[j, k], axis=k))
# or use tag_scope as decorator
@tvm.tag_scope(tag="conv")
@tvm.te.tag_scope(tag="conv")
def compute_relu(data):
return te.compute(data.shape, lambda *i: tvm.select(data(*i) < 0, 0.0, data(*i)))
"""
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/tir/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
# under the License.
"""Generic opertors in TVM.
We follow the numpy naming convention for this interface
(e.g., tvm.generic.multitply ~ numpy.multiply).
(e.g., tvm.tir.generic.multitply ~ numpy.multiply).
The default implementation is used by tvm.ExprOp.
"""
# pylint: disable=unused-argument
Expand Down
10 changes: 5 additions & 5 deletions python/tvm/tir/ir_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ class IRBuilder(object):
--------
.. code-block:: python
ib = tvm.ir_builder.create()
ib = tvm.tir.ir_builder.create()
n = te.var("n")
A = ib.allocate("float32", n, name="A")
with ib.for_range(0, n, name="i") as i:
Expand Down Expand Up @@ -158,7 +158,7 @@ def scope_attr(self, node, attr_key, value):
--------
.. code-block:: python
ib = tvm.ir_builder.create()
ib = tvm.tir.ir_builder.create()
i = te.var("i")
x = ib.pointer("float32")
ib.scope_attr(x, "storage_scope", "global")
Expand Down Expand Up @@ -200,7 +200,7 @@ def for_range(self, begin, end, name="i", dtype="int32", for_type="serial"):
--------
.. code-block:: python
ib = tvm.ir_builder.create()
ib = tvm.tir.ir_builder.create()
x = ib.pointer("float32")
with ib.for_range(1, 10, name="i") as i:
x[i] = x[i - 1] + 1
Expand Down Expand Up @@ -243,7 +243,7 @@ def if_scope(self, cond):
--------
.. code-block:: python
ib = tvm.ir_builder.create()
ib = tvm.tir.ir_builder.create()
i = te.var("i")
x = ib.pointer("float32")
with ib.if_scope((i % 2) == 0):
Expand All @@ -268,7 +268,7 @@ def else_scope(self):
--------
.. code-block:: python
ib = tvm.ir_builder.create()
ib = tvm.tir.ir_builder.create()
i = te.var("i")
x = ib.pointer("float32")
with ib.if_scope((i % 2) == 0):
Expand Down
2 changes: 1 addition & 1 deletion rust/runtime/tests/test_tvm_basic/src/build_test_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def main():
A = te.placeholder((n,), name='A')
B = te.placeholder((n,), name='B')
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name='C')
s = tvm.create_schedule(C.op)
s = tvm.te.create_schedule(C.op)
s[C].parallel(s[C].op.axis[0])
print(tvm.lower(s, [A, B, C], simple_mode=True))
tvm.build(s, [A, B, C], 'llvm --system-lib').save(osp.join(sys.argv[1], 'test.o'))
Expand Down
2 changes: 1 addition & 1 deletion rust/runtime/tests/test_tvm_dso/src/build_test_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def main():
A = te.placeholder((n,), name='A')
B = te.placeholder((n,), name='B')
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name='C')
s = tvm.create_schedule(C.op)
s = tvm.te.create_schedule(C.op)
s[C].parallel(s[C].op.axis[0])
print(tvm.lower(s, [A, B, C], simple_mode=True))
obj_file = osp.join(sys.argv[1], 'test.o')
Expand Down
10 changes: 5 additions & 5 deletions tests/python/integration/test_dot.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,11 @@ def lower(s, args, name="mydot"):
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
stmt = tvm.ir_pass.StorageFlatten(stmt, binds, 16)
stmt = tvm.ir_pass.CanonicalSimplify(stmt)
stmt = tvm.ir_pass.Simplify(stmt)
fapi = tvm.ir_pass.MakeAPI(stmt, name, arg_list, 0, True)
fapi = tvm.ir_pass.LowerTVMBuiltin(fapi)
stmt = tvm.tir.ir_pass.StorageFlatten(stmt, binds, 16)
stmt = tvm.tir.ir_pass.CanonicalSimplify(stmt)
stmt = tvm.tir.ir_pass.Simplify(stmt)
fapi = tvm.tir.ir_pass.MakeAPI(stmt, name, arg_list, 0, True)
fapi = tvm.tir.ir_pass.LowerTVMBuiltin(fapi)
return fapi


Expand Down
2 changes: 1 addition & 1 deletion tests/python/relay/test_pass_fold_constant.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def fail(x):
raise RuntimeError()

# the fold constant should work on any context.
with tvm.build_config(add_lower_pass=[(0, fail)]):
with tvm.target.build_config(add_lower_pass=[(0, fail)]):
with tvm.target.create("cuda"):
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
Expand Down
4 changes: 2 additions & 2 deletions tests/python/unittest/test_arith_canonical_simplify.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def __init__(self):

def verify(self, data, expected):
res = self.analyzer.canonical_simplify(data)
assert tvm.ir_pass.Equal(res, expected), "\ndata={}\nres={}\nexpected={}".format(data, res, expected)
assert tvm.tir.ir_pass.Equal(res, expected), "\ndata={}\nres={}\nexpected={}".format(data, res, expected)


def test_mul_sum_simplify():
Expand Down Expand Up @@ -197,7 +197,7 @@ def test_reduce_combiner_simplify():

# Check that the remaining components are the expected ones.
for lhs, rhs in zip(simplified.source, reference_simplified_sources[j]):
assert tvm.ir_pass.Equal(lhs, rhs)
assert tvm.tir.ir_pass.Equal(lhs, rhs)

# Test that components with side effects are not removed
side_effect = lambda *xs: tvm.tir.Call("int32", "dummy", xs, tvm.tir.Call.Intrinsic, None, 0)
Expand Down
Loading

0 comments on commit 7595ed6

Please sign in to comment.