Skip to content

Commit

Permalink
[REFACTOR][PY][API-CHANGE] Remove legacy python files. (apache#4943)
Browse files Browse the repository at this point in the history
* [REFACTOR][PY][API-CHANGE] Remove legacy python files.

Remove legacy python files.
Use the te namespace for most of the tensor expression primitives.

- tvm.create_schedule -> tvm.te.create_schedule
- tvm.placeholder -> tvm.te.placeholder
- tvm.compute -> tvm.te.compute

* Remove top-level exposures.
  • Loading branch information
tqchen authored and zhiics committed Apr 17, 2020
1 parent a08c135 commit 6a05409
Show file tree
Hide file tree
Showing 595 changed files with 9,038 additions and 8,687 deletions.
21 changes: 11 additions & 10 deletions apps/android_rpc/tests/android_rpc_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
"""

import tvm
from tvm import te
import os
from tvm import rpc
from tvm.contrib import util, ndk
Expand All @@ -44,9 +45,9 @@

def test_rpc_module():
# graph
n = tvm.convert(1024)
A = tvm.placeholder((n,), name='A')
B = tvm.compute(A.shape, lambda *i: A(*i) + 1.0, name='B')
n = tvm.runtime.convert(1024)
A = te.placeholder((n,), name='A')
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name='B')
a_np = np.random.uniform(size=1024).astype(A.dtype)
temp = util.tempdir()

Expand All @@ -56,7 +57,7 @@ def test_rpc_module():
session_timeout=60)

# Compile the Graph for CPU target
s = tvm.create_schedule(B.op)
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=64)
s[B].parallel(xi)
s[B].pragma(xo, "parallel_launch_point")
Expand All @@ -79,10 +80,10 @@ def test_rpc_module():

# Compile the Graph for OpenCL target
if test_opencl:
s = tvm.create_schedule(B.op)
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=64)
s[B].bind(xi, tvm.thread_axis("threadIdx.x"))
s[B].bind(xo, tvm.thread_axis("blockIdx.x"))
s[B].bind(xi, te.thread_axis("threadIdx.x"))
s[B].bind(xo, te.thread_axis("blockIdx.x"))
# Build the dynamic lib.
# If we don't want to do metal and only use cpu, just set target to be target
f = tvm.build(s, [A, B], "opencl", target_host=target, name="myadd")
Expand All @@ -102,10 +103,10 @@ def test_rpc_module():

# Compile the Graph for Vulkan target
if test_vulkan:
s = tvm.create_schedule(B.op)
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=64)
s[B].bind(xi, tvm.thread_axis("threadIdx.x"))
s[B].bind(xo, tvm.thread_axis("blockIdx.x"))
s[B].bind(xi, te.thread_axis("threadIdx.x"))
s[B].bind(xo, te.thread_axis("blockIdx.x"))
# Build the dynamic lib.
# If we don't want to do metal and only use cpu, just set target to be target
f = tvm.build(s, [A, B], "vulkan", target_host=target, name="myadd")
Expand Down
1 change: 1 addition & 0 deletions apps/benchmark/arm_cpu_imagenet_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import numpy as np

import tvm
from tvm import te
from tvm.contrib.util import tempdir
import tvm.contrib.graph_runtime as runtime
from tvm import relay
Expand Down
1 change: 1 addition & 0 deletions apps/benchmark/gpu_imagenet_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import numpy as np

import tvm
from tvm import te
import tvm.contrib.graph_runtime as runtime
from tvm import relay

Expand Down
1 change: 1 addition & 0 deletions apps/benchmark/mobile_gpu_imagenet_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import numpy as np

import tvm
from tvm import te
from tvm.contrib.util import tempdir
import tvm.contrib.graph_runtime as runtime
from tvm import relay
Expand Down
1 change: 1 addition & 0 deletions apps/bundle_deploy/build_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import os
from tvm import relay
import tvm
from tvm import te
import logging


Expand Down
1 change: 1 addition & 0 deletions apps/dso_plugin_module/test_plugin_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import os

def test_plugin_module():
Expand Down
1 change: 1 addition & 0 deletions apps/extension/python/tvm_ext/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import ctypes
# Import TVM first to get library symbols
import tvm
from tvm import te

def load_lib():
"""Load library, the functions will be registered into TVM"""
Expand Down
23 changes: 13 additions & 10 deletions apps/extension/tests/test_ext.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
# under the License.
import tvm_ext
import tvm
import tvm._ffi.registry
from tvm import te
import numpy as np

def test_bind_add():
Expand All @@ -26,9 +28,9 @@ def add(a, b):

def test_ext_dev():
n = 10
A = tvm.placeholder((n,), name='A')
B = tvm.compute((n,), lambda *i: A(*i) + 1.0, name='B')
s = tvm.create_schedule(B.op)
A = te.placeholder((n,), name='A')
B = te.compute((n,), lambda *i: A(*i) + 1.0, name='B')
s = te.create_schedule(B.op)
def check_llvm():
if not tvm.runtime.enabled("llvm"):
return
Expand All @@ -43,8 +45,8 @@ def check_llvm():


def test_sym_add():
a = tvm.var('a')
b = tvm.var('b')
a = te.var('a')
b = te.var('b')
c = tvm_ext.sym_add(a, b)
assert c.a == a and c.b == b

Expand All @@ -59,19 +61,20 @@ def ivec_cb(v2):
assert(isinstance(v2, tvm_ext.IntVec))
assert v2[2] == 3

tvm.convert(ivec_cb)(ivec)
tvm.runtime.convert(ivec_cb)(ivec)


def test_extract_ext():
fdict = tvm.extract_ext_funcs(tvm_ext._LIB.TVMExtDeclare)
fdict = tvm._ffi.registry.extract_ext_funcs(
tvm_ext._LIB.TVMExtDeclare)
assert fdict["mul"](3, 4) == 12


def test_extern_call():
n = 10
A = tvm.placeholder((n,), name='A')
B = tvm.compute((n,), lambda *i: tvm.call_extern("float32", "TVMTestAddOne", A(*i)), name='B')
s = tvm.create_schedule(B.op)
A = te.placeholder((n,), name='A')
B = te.compute((n,), lambda *i: tvm.tir.call_extern("float32", "TVMTestAddOne", A(*i)), name='B')
s = te.create_schedule(B.op)

def check_llvm():
if not tvm.runtime.enabled("llvm"):
Expand Down
9 changes: 5 additions & 4 deletions apps/howto_deploy/prepare_test_libs.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,14 @@
# under the License.
"""Script to prepare test_addone.so"""
import tvm
from tvm import te
import os

def prepare_test_libs(base_path):
n = tvm.var("n")
A = tvm.placeholder((n,), name='A')
B = tvm.compute(A.shape, lambda *i: A(*i) + 1.0, name='B')
s = tvm.create_schedule(B.op)
n = te.var("n")
A = te.placeholder((n,), name='A')
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name='B')
s = te.create_schedule(B.op)
# Compile library as dynamic library
fadd_dylib = tvm.build(s, [A, B], "llvm", name="addone")
dylib_path = os.path.join(base_path, "test_addone_dll.so")
Expand Down
1 change: 1 addition & 0 deletions apps/howto_deploy/python_deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
# file python_deploy.py

import tvm
from tvm import te
import numpy as np

def verify(mod, fname):
Expand Down
15 changes: 8 additions & 7 deletions apps/ios_rpc/tests/ios_rpc_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
"""

import tvm
from tvm import te
import os
import re
import sys
Expand Down Expand Up @@ -54,14 +55,14 @@ def compile_metal(src):

def test_rpc_module():
# graph
n = tvm.convert(1024)
A = tvm.placeholder((n,), name='A')
B = tvm.compute(A.shape, lambda *i: A(*i) + 1.0, name='B')
n = tvm.runtime.convert(1024)
A = te.placeholder((n,), name='A')
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name='B')
temp = util.tempdir()
s = tvm.create_schedule(B.op)
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=64)
s[B].bind(xi, tvm.thread_axis("threadIdx.x"))
s[B].bind(xo, tvm.thread_axis("blockIdx.x"))
s[B].bind(xi, te.thread_axis("threadIdx.x"))
s[B].bind(xo, te.thread_axis("blockIdx.x"))
# Build the dynamic lib.
# If we don't want to do metal and only use cpu, just set target to be target
f = tvm.build(s, [A, B], "metal", target_host=target, name="myadd")
Expand All @@ -70,7 +71,7 @@ def test_rpc_module():
arch=arch, sdk=sdk)
xcode.codesign(path_dso1)

s = tvm.create_schedule(B.op)
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=64)
s[B].parallel(xi)
s[B].pragma(xo, "parallel_launch_point")
Expand Down
1 change: 1 addition & 0 deletions apps/sgx/enclave/src/build_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from tvm import relay
from tvm.relay import testing
import tvm
from tvm import te


def main():
Expand Down
1 change: 1 addition & 0 deletions apps/sgx/run_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import os.path as osp
import numpy as np
import tvm
from tvm import te

CWD = osp.abspath(osp.dirname(__file__))

Expand Down
1 change: 1 addition & 0 deletions docs/api/python/te.rst
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ tvm.te
:members:
:imported-members:
:exclude-members:
any, all, min_value, max_value, trace,
exp, erf, tanh, sigmoid, log, cos, sin, atan, sqrt, rsqrt, floor, ceil,
trunc, abs, round, nearbyint, isnan, power, popcount, fmod, if_then_else,
div, indexdiv, indexmod, truncdiv, truncmod, floordiv, floormod,
Expand Down
2 changes: 1 addition & 1 deletion docs/api/python/tir.rst
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,5 @@ tvm.tir
.. automodule:: tvm.tir
:members:
:imported-members:
:exclude-members: PrimExpr
:exclude-members: PrimExpr, const
:autosummary:
1 change: 1 addition & 0 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@
os.environ['TVM_BUILD_DOC'] = '1'
# Version information.
import tvm
from tvm import te
version = tvm.__version__
release = tvm.__version__

Expand Down
11 changes: 6 additions & 5 deletions golang/sample/deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from __future__ import absolute_import, print_function

import tvm
from tvm import te
import numpy as np

# Global declarations of environment.
Expand All @@ -31,15 +32,15 @@
######################################################################
# Describe the Computation
# ------------------------
n = tvm.var("n")
A = tvm.placeholder((n,), name='A')
B = tvm.placeholder((n,), name='B')
C = tvm.compute(A.shape, lambda i: A[i] + B[i], name="C")
n = te.var("n")
A = te.placeholder((n,), name='A')
B = te.placeholder((n,), name='B')
C = te.compute(A.shape, lambda i: A[i] + B[i], name="C")

######################################################################
# Schedule the Computation
# ------------------------
s = tvm.create_schedule(C.op)
s = te.create_schedule(C.op)

######################################################################
# Compilation
Expand Down
11 changes: 6 additions & 5 deletions jvm/core/src/test/scripts/test_add_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,15 @@
import os

import tvm
from tvm import te
from tvm.contrib import cc, util

def test_add(target_dir):
n = tvm.var("n")
A = tvm.placeholder((n,), name='A')
B = tvm.placeholder((n,), name='B')
C = tvm.compute(A.shape, lambda i: A[i] + B[i], name="C")
s = tvm.create_schedule(C.op)
n = te.var("n")
A = te.placeholder((n,), name='A')
B = te.placeholder((n,), name='B')
C = te.compute(A.shape, lambda i: A[i] + B[i], name="C")
s = te.create_schedule(C.op)
fadd = tvm.build(s, [A, B, C], "llvm", target_host="llvm", name="myadd")

fadd.save(os.path.join(target_dir, "add_cpu.o"))
Expand Down
15 changes: 8 additions & 7 deletions jvm/core/src/test/scripts/test_add_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,22 +17,23 @@
import os

import tvm
from tvm import te
from tvm.contrib import cc, util

def test_add(target_dir):
if not tvm.runtime.enabled("cuda"):
print("skip %s because cuda is not enabled..." % __file__)
return
n = tvm.var("n")
A = tvm.placeholder((n,), name='A')
B = tvm.placeholder((n,), name='B')
C = tvm.compute(A.shape, lambda i: A[i] + B[i], name="C")
n = te.var("n")
A = te.placeholder((n,), name='A')
B = te.placeholder((n,), name='B')
C = te.compute(A.shape, lambda i: A[i] + B[i], name="C")

s = tvm.create_schedule(C.op)
s = te.create_schedule(C.op)

bx, tx = s[C].split(C.op.axis[0], factor=64)
s[C].bind(bx, tvm.thread_axis("blockIdx.x"))
s[C].bind(tx, tvm.thread_axis("threadIdx.x"))
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
fadd_cuda = tvm.build(s, [A, B, C], "cuda", target_host="llvm", name="myadd")

fadd_cuda.save(os.path.join(target_dir, "add_gpu.o"))
Expand Down
7 changes: 4 additions & 3 deletions jvm/core/src/test/scripts/test_graph_runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,15 @@
import os

import tvm
from tvm import te
import json
from tvm.contrib import graph_runtime

def dump_graph_lib(target_dir):
dim = 4
A = tvm.placeholder((dim,), name='A')
B = tvm.compute(A.shape, lambda *i: A(*i) + 1.0, name='B')
sched = tvm.create_schedule(B.op)
A = te.placeholder((dim,), name='A')
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name='B')
sched = te.create_schedule(B.op)

node0 = {"op": "null", "name": "x", "inputs": []}
node1 = {"op": "tvm_op", "name": "add",
Expand Down
Loading

0 comments on commit 6a05409

Please sign in to comment.