Skip to content

Commit

Permalink
[Unity] Update tests again to adapt to latest TVMScript syntax (#14115)
Browse files Browse the repository at this point in the history
* finished

* fix

* rollback merge_composite_functions
  • Loading branch information
Ubospica authored and tqchen committed Mar 13, 2023
1 parent bd2f63a commit d6733d6
Show file tree
Hide file tree
Showing 11 changed files with 80 additions and 84 deletions.
4 changes: 2 additions & 2 deletions python/tvm/relax/block_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -516,8 +516,8 @@ def te_func(var_rxplaceholder: T.handle, var_rxplaceholder_1: T.handle,
var_compute: T.handle) -> None:
# function attr dict
T.func_attr({"tir.noalias": True})
m = T.var("int64")
n = T.var("int64")
m = T.int64()
n = T.int64()
rxplaceholder = T.match_buffer(var_rxplaceholder, [n, m], dtype="float32")
rxplaceholder_1 = T.match_buffer(var_rxplaceholder_1, [n, m], dtype="float32")
compute = T.match_buffer(var_compute, [128, 128], dtype="float32")
Expand Down
12 changes: 6 additions & 6 deletions python/tvm/relax/transform/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -422,9 +422,9 @@ def main(
@T.prim_func
def add(
A: T.Buffer[(2, 3), "float32"],
B: T.Buffer[(2, 3), "float32"],
T_add: T.Buffer[(2, 3), "float32"],
A: T.Buffer((2, 3), "float32"),
B: T.Buffer((2, 3), "float32"),
T_add: T.Buffer((2, 3), "float32"),
):
T.func_attr({"tir.noalias": True})
for ax0, ax1 in T.grid(2, 3):
Expand All @@ -436,9 +436,9 @@ def add(
@T.prim_func
def multiply(
A: T.Buffer[(2, 3), "float32"],
B: T.Buffer[(2, 3), "float32"],
T_multiply: T.Buffer[(2, 3), "float32"],
A: T.Buffer((2, 3), "float32"),
B: T.Buffer((2, 3), "float32"),
T_multiply: T.Buffer((2, 3), "float32"),
):
T.func_attr({"tir.noalias": True})
for ax0, ax1 in T.grid(2, 3):
Expand Down
7 changes: 1 addition & 6 deletions src/script/printer/relax/tir.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,7 @@ Doc PrintTIRVar(tir::Var n, ObjectPath n_p, IRDocsifier d) {
}
IdDoc var = d->Define(n, GetRef<Frame>(f), n->name_hint.empty() ? "v" : n->name_hint);
var->source_paths.push_back(n_p);
f->stmts.push_back(AssignDoc(var,
TIR(d, "Var")->Call({
LiteralDoc::Str(var->name, n_p->Attr("name_hint")),
LiteralDoc::DataType(n->dtype, n_p->Attr("dtype")),
}),
NullOpt));
f->stmts.push_back(AssignDoc(var, TIR(d, DType2Str(n->dtype))->Call({}), NullOpt));
}
if (Optional<ExprDoc> doc = d->GetVarDoc(n)) {
return doc.value();
Expand Down
8 changes: 4 additions & 4 deletions tests/python/relax/test_backend_transform_shape_lower.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,8 +164,8 @@ class Before:
def main(
x: R.Tensor(["n", "m"], "float32"), y: R.Tensor(ndim=3, dtype=None)
) -> R.Shape(ndim=3):
n = T.Var("n", "int64")
k = T.Var("k", "int64")
n = T.int64()
k = T.int64()
z = R.match_cast(y, R.Tensor([k, m, k + 1], dtype=None))
return R.shape([k + 1, m, 2])

Expand All @@ -185,8 +185,8 @@ def shape_func(H: T.Buffer(T.int64(4), "int64")):
def main(
x: R.Tensor(["n", "m"], "float32"), y: R.Tensor(ndim=3, dtype=None)
) -> R.Shape(ndim=3):
n = T.Var("n", "int64")
k = T.Var("k", "int64")
n = T.int64()
k = T.int64()
shape_heap = R.call_builtin_with_ctx(
"vm.builtin.alloc_shape_heap",
[R.prim_value(4)],
Expand Down
12 changes: 6 additions & 6 deletions tests/python/relax/test_transform_canonicalize_bindings.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ class TestMatchCast:
@R.function
def main(x: R.Tensor):
q = x
m, n = T.var("int64"), T.var("int64")
m, n = T.int64(), T.int64()
z = R.match_cast(q, R.Tensor((m, n)))
w = z
return w
Expand All @@ -153,7 +153,7 @@ class Expected:
def main(x: R.Tensor):
q = x
# can't get rid of z because its shape_ is different from x's
m, n = T.var("int64"), T.var("int64")
m, n = T.int64(), T.int64()
z = R.match_cast(x, R.Tensor((m, n)))
w = z
return z
Expand All @@ -167,7 +167,7 @@ def test_same_shape():
class TestSameShape:
@R.function
def main(x: R.Tensor(("m", "n"), "float32")):
m, n = T.var("int64"), T.var("int64")
m, n = T.int64(), T.int64()
y = x
# trivial check
z = R.match_cast(x, R.Tensor((m, n), "float32"))
Expand All @@ -179,7 +179,7 @@ def main(x: R.Tensor(("m", "n"), "float32")):
class Expected:
@R.function
def main(x: R.Tensor(("m", "n"), "float32")):
m, n = T.var("int64"), T.var("int64")
m, n = T.int64(), T.int64()
y = x
# canonicalized into a var binding
z = x
Expand All @@ -198,7 +198,7 @@ class TestChangeShape:
def main(x: R.Tensor(("m", "n"))):
y = x
# not trivial: introduces new shape vars
o, p = T.var("int64"), T.var("int64")
o, p = T.int64(), T.int64()
z = R.match_cast(x, R.Tensor((o, p)))
w = z
q = R.add(w, y)
Expand All @@ -209,7 +209,7 @@ class Expected:
@R.function
def main(x: R.Tensor(("m", "n"))):
y = x
o, p = T.var("int64"), T.var("int64")
o, p = T.int64(), T.int64()
z = R.match_cast(x, R.Tensor((o, p)))
w = z
# the shape_ field on q will need to be updated
Expand Down
8 changes: 4 additions & 4 deletions tests/python/relax/test_transform_legalize_ops_manipulate.py
Original file line number Diff line number Diff line change
Expand Up @@ -802,7 +802,7 @@ def main(x: R.Tensor((2, 3), "float32"), y: R.Tensor((1, 3), "float32")) -> R.Te
return gv

@T.prim_func
def collapse_sum(rxplaceholder: T.Buffer[(T.int64(2), T.int64(3)), "float32"], rxplaceholder_red: T.Buffer[(T.int64(1), T.int64(3)), "float32"]):
def collapse_sum(rxplaceholder: T.Buffer((T.int64(2), T.int64(3)), "float32"), rxplaceholder_red: T.Buffer((T.int64(1), T.int64(3)), "float32")):
T.func_attr({"tir.noalias": True})
for i0, i1, i2 in T.grid(T.int64(1), T.int64(3), T.int64(2)):
with T.block("rxplaceholder_red"):
Expand All @@ -825,7 +825,7 @@ def test_collapse_sum_like_symbolic():
class CollapseSumLike:
@R.function
def main(x: R.Tensor(("a", "b", "a"), "float32"), y: R.Tensor(("b", 1), "float32")) -> R.Tensor(("b", 1), "float32"):
b = T.var("int64")
b = T.int64()
gv: R.Tensor((b, 1), "float32") = R.collapse_sum_like(x, y)
return gv

Expand Down Expand Up @@ -855,7 +855,7 @@ def main(
return gv

@T.prim_func
def collapse_sum(rxplaceholder: T.Buffer[(T.int64(3), T.int64(2), T.int64(3)), "float32"], rxplaceholder_red: T.Buffer[(T.int64(2), T.int64(1)), "float32"]):
def collapse_sum(rxplaceholder: T.Buffer((T.int64(3), T.int64(2), T.int64(3)), "float32"), rxplaceholder_red: T.Buffer((T.int64(2), T.int64(1)), "float32")):
T.func_attr({"tir.noalias": True})
for ax0, ax1, k0, k2 in T.grid(T.int64(2), T.int64(1), T.int64(3), T.int64(3)):
with T.block("rxplaceholder_red"):
Expand All @@ -878,7 +878,7 @@ def test_collapse_sum_to_symbolic():
class CollapseSumTo:
@R.function
def main(x: R.Tensor(("a", "b", "c"), "float32")) -> R.Tensor(("b", 1), "float32"):
b = T.var("int64")
b = T.int64()
gv: R.Tensor((b, 1), "float32") = R.collapse_sum_to(x, (b, 1))
return gv

Expand Down
34 changes: 17 additions & 17 deletions tests/python/relax/test_transform_legalize_ops_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -868,7 +868,7 @@ def main(x: R.Tensor((2, 3, 16, 32), dtype="float32")) -> R.Tensor((2, 3, 16, 32
return gv

@T.prim_func
def log_softmax(rxplaceholder: T.Buffer[(T.int64(2), T.int64(3), T.int64(16), T.int64(32)), "float32"], compute: T.Buffer[(T.int64(2), T.int64(3), T.int64(16), T.int64(32)), "float32"],):
def log_softmax(rxplaceholder: T.Buffer((T.int64(2), T.int64(3), T.int64(16), T.int64(32)), "float32"), compute: T.Buffer((T.int64(2), T.int64(3), T.int64(16), T.int64(32)), "float32"),):
T.func_attr({"tir.noalias": True})
T_softmax_maxelem = T.alloc_buffer([T.int64(2), T.int64(3), T.int64(32)], dtype="float32")
compute_1 = T.alloc_buffer([T.int64(2), T.int64(3), T.int64(32)], dtype="float32")
Expand Down Expand Up @@ -907,29 +907,29 @@ def test_log_softmax_symbolic():
class LogSoftmax:
@R.function
def main(x: R.Tensor(("a", "b", "c"), "float32")) -> R.Tensor(("a", "b", "c"), "float32"):
a = T.var("int64")
b = T.var("int64")
c = T.var("int64")
a = T.int64()
b = T.int64()
c = T.int64()
gv: R.Tensor((a, b, c), "float32") = R.nn.log_softmax(x)
return gv

@tvm.script.ir_module
class Expected:
@R.function
def main(x: R.Tensor(("a", "b", "c"), dtype="float32")) -> R.Tensor(("a", "b", "c"), dtype="float32"):
a = T.var("int64")
b = T.var("int64")
c = T.var("int64")
a = T.int64()
b = T.int64()
c = T.int64()
# block 0
gv = R.call_tir(log_softmax, (x,), R.Tensor((a, b, c), dtype="float32"))
return gv

@T.prim_func
def log_softmax(var_rxplaceholder: T.handle, var_compute: T.handle):
T.func_attr({"tir.noalias": True})
a = T.var("int64")
b = T.var("int64")
c = T.var("int64")
a = T.int64()
b = T.int64()
c = T.int64()
rxplaceholder = T.match_buffer(var_rxplaceholder, [a, b, c], dtype="float32")
compute = T.match_buffer(var_compute, [a, b, c], dtype="float32")
T_softmax_maxelem = T.alloc_buffer([a, b], dtype="float32")
Expand Down Expand Up @@ -980,7 +980,7 @@ def main(x: R.Tensor((3,), dtype="float32"), y: R.Tensor((3,), dtype="float32"))
return gv

@T.prim_func
def cross_entropy_with_logits(rxplaceholder: T.Buffer[T.int64(3), "float32"], rxplaceholder_1: T.Buffer[T.int64(3), "float32"], T_multiply: T.Buffer[(), "float32"]):
def cross_entropy_with_logits(rxplaceholder: T.Buffer(T.int64(3), "float32"), rxplaceholder_1: T.Buffer(T.int64(3), "float32"), T_multiply: T.Buffer((), "float32")):
T.func_attr({"tir.noalias": True})
T_multiply_1 = T.alloc_buffer([T.int64(3)], dtype="float32")
T_multiply_red = T.alloc_buffer([], dtype="float32")
Expand Down Expand Up @@ -1026,7 +1026,7 @@ def main(x: R.Tensor((2, 3), dtype="float32"), y: R.Tensor((2, 3), dtype="float3
return gv

@T.prim_func
def cross_entropy_with_logits(rxplaceholder: T.Buffer[(T.int64(2), T.int64(3)), "float32"], rxplaceholder_1: T.Buffer[(T.int64(2), T.int64(3)), "float32"], T_divide: T.Buffer[(), "float32"]):
def cross_entropy_with_logits(rxplaceholder: T.Buffer((T.int64(2), T.int64(3)), "float32"), rxplaceholder_1: T.Buffer((T.int64(2), T.int64(3)), "float32"), T_divide: T.Buffer((), "float32")):
T.func_attr({"tir.noalias": True})
T_multiply = T.alloc_buffer([T.int64(2), T.int64(3)], dtype="float32")
T_multiply_red = T.alloc_buffer([], dtype="float32")
Expand Down Expand Up @@ -1067,8 +1067,8 @@ def test_cross_entropy_with_logits_batch_symbolic():
class CrossEntropyWithLogits:
@R.function
def main(x: R.Tensor(("n", "m"), "float32"), y: R.Tensor(("n", "m"), "float32")) -> R.Tensor(None, "float32", ndim=2):
n = T.var("int64")
m = T.var("int64")
n = T.int64()
m = T.int64()
gv: R.Tensor((), "float32") = R.nn.cross_entropy_with_logits(x, y)
return gv

Expand All @@ -1080,10 +1080,10 @@ def main(x: R.Tensor(("n", "m"), dtype="float32"), y: R.Tensor(("n", "m"), dtype
return gv

@T.prim_func
def cross_entropy_with_logits(var_rxplaceholder: T.handle, var_rxplaceholder_1: T.handle, T_divide: T.Buffer[(), "float32"]):
def cross_entropy_with_logits(var_rxplaceholder: T.handle, var_rxplaceholder_1: T.handle, T_divide: T.Buffer((), "float32")):
T.func_attr({"tir.noalias": True})
m = T.var("int64")
n = T.var("int64")
m = T.int64()
n = T.int64()
rxplaceholder = T.match_buffer(var_rxplaceholder, [n, m], dtype="float32")
rxplaceholder_1 = T.match_buffer(var_rxplaceholder_1, [n, m], dtype="float32")
T_multiply = T.alloc_buffer([n, m], dtype="float32")
Expand Down
32 changes: 16 additions & 16 deletions tests/python/relax/test_transform_remove_unused_funcs.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@ def test_unused_relax_func():
class InputModule:
@T.prim_func
def tir_add(
x: T.Buffer[(16, 16), "float32"],
y: T.Buffer[(16, 16), "float32"],
z: T.Buffer[(16, 16), "float32"],
x: T.Buffer((16, 16), "float32"),
y: T.Buffer((16, 16), "float32"),
z: T.Buffer((16, 16), "float32"),
) -> None:
for i, j in T.grid(16, 16):
with T.block("add"):
Expand Down Expand Up @@ -68,9 +68,9 @@ def test_unused_relax_func_custom_entry_func():
class InputModule:
@T.prim_func
def tir_add(
x: T.Buffer[(16, 16), "float32"],
y: T.Buffer[(16, 16), "float32"],
z: T.Buffer[(16, 16), "float32"],
x: T.Buffer((16, 16), "float32"),
y: T.Buffer((16, 16), "float32"),
z: T.Buffer((16, 16), "float32"),
) -> None:
for i, j in T.grid(16, 16):
with T.block("add"):
Expand Down Expand Up @@ -105,9 +105,9 @@ def test_unused_relax_func_symbolic_shape():
class InputModule:
@T.prim_func
def tir_add(
x: T.Buffer[(16, 16), "float32"],
y: T.Buffer[(16, 16), "float32"],
z: T.Buffer[(16, 16), "float32"],
x: T.Buffer((16, 16), "float32"),
y: T.Buffer((16, 16), "float32"),
z: T.Buffer((16, 16), "float32"),
) -> None:
for i, j in T.grid(16, 16):
with T.block("add"):
Expand All @@ -121,7 +121,7 @@ def unused_func(x: R.Tensor(("m", "n"), "float32"), w: R.Tensor(("n", "k"), "flo

@R.function
def main(x: R.Tensor(("m", "n"), "float32"), w: R.Tensor(("n", "k"), "float32")):
m, k = T.var("int64"), T.var("int64")
m, k = T.int64(), T.int64()
gv0 = R.call_tir(tir_add, (x, w), R.Tensor((m + 1, k), dtype="float32"))
return gv0

Expand All @@ -139,9 +139,9 @@ def test_unused_prim_func():
class InputModule:
@T.prim_func
def unused_func(
x: T.Buffer[(16, 16), "float32"],
y: T.Buffer[(16, 16), "float32"],
z: T.Buffer[(16, 16), "float32"],
x: T.Buffer((16, 16), "float32"),
y: T.Buffer((16, 16), "float32"),
z: T.Buffer((16, 16), "float32"),
) -> None:
T.func_attr({"global_symbol": "tir_unused"})
for i, j in T.grid(16, 16):
Expand Down Expand Up @@ -175,9 +175,9 @@ def test_multiple_unused_funcs():
class InputModule:
@T.prim_func
def unused_func1(
x: T.Buffer[(16, 16), "float32"],
y: T.Buffer[(16, 16), "float32"],
z: T.Buffer[(16, 16), "float32"],
x: T.Buffer((16, 16), "float32"),
y: T.Buffer((16, 16), "float32"),
z: T.Buffer((16, 16), "float32"),
) -> None:
T.func_attr({"global_symbol": "tir_unused"})
for i, j in T.grid(16, 16):
Expand Down
Loading

0 comments on commit d6733d6

Please sign in to comment.