Skip to content

Commit dfc841c

Browse files
author
Siyuan Feng
committed
update
1 parent e91677c commit dfc841c

17 files changed

+83
-90
lines changed

docs/deep_dive/tensor_ir/tutorials/tir_transformation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ def main(
7878

7979

8080
def evaluate(mod: tvm.IRModule):
81-
lib = tvm.compile(mod, target="llvm")
81+
lib = tvm.tir.build(mod, target="llvm")
8282
# check correctness
8383
lib(a_nd, b_nd, c_nd)
8484
np.testing.assert_allclose(c_nd.numpy(), c_np, rtol=1e-5)

python/tvm/dlight/benchmark/bench.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ def benchmark(
121121
# append scalar input tensors for rotary embedding
122122
input_tensors.extend(scalar_input_tensors)
123123
# build locally
124-
rt_mod = tvm.compile(mod, target=target)
124+
rt_mod = tvm.tir.build(mod, target=target)
125125
# set up evaluator config
126126
evaluator_config = EvaluatorConfig._normalized( # pylint: disable=protected-access
127127
evaluator_config

src/target/source/codegen_c_host.cc

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -31,10 +31,6 @@
3131
#include <utility>
3232
#include <vector>
3333

34-
#include "../../support/str_escape.h"
35-
#include "../build_common.h"
36-
#include "codegen_params.h"
37-
3834
namespace tvm {
3935
namespace codegen {
4036

@@ -51,6 +47,7 @@ void CodeGenCHost::Init(bool output_ssa, bool emit_asserts, bool emit_fwd_func_d
5147
decl_stream << "#include \"tvm/runtime/c_backend_api.h\"\n";
5248
decl_stream << "#include <math.h>\n";
5349
decl_stream << "#include <stdbool.h>\n";
50+
CodeGenCHost::InitGlobalContext();
5451
CodeGenC::Init(output_ssa);
5552
}
5653

@@ -92,7 +89,6 @@ void CodeGenCHost::AddFunction(const GlobalVar& gvar, const PrimFunc& func,
9289
}
9390

9491
void CodeGenCHost::GenerateForwardFunctionDeclarations(String global_symbol,
95-
9692
const Array<Type>& arg_types,
9793
const Type& ret_type) {
9894
if (!emit_fwd_func_decl_) {
@@ -443,9 +439,6 @@ runtime::Module BuildCHost(IRModule mod, Target target) {
443439
return sort_key(kv_a) < sort_key(kv_b);
444440
});
445441

446-
// Declare all functions first. This ensures that all functions,
447-
// including the __tvm_main__ used in AOT, have access to forward
448-
// declarations of other functions in the IRModule.
449442
for (const auto& [gvar, prim_func] : funcs) {
450443
cg.DeclareFunction(gvar, prim_func);
451444
}
@@ -457,11 +450,6 @@ runtime::Module BuildCHost(IRModule mod, Target target) {
457450
cg.AddFunction(gvar, prim_func, emit_fwd_func_decl);
458451
}
459452

460-
if (target->GetAttr<Bool>("system-lib").value_or(Bool(false))) {
461-
ICHECK_EQ(target->GetAttr<String>("runtime").value_or(""), "c")
462-
<< "c target only supports generating C runtime SystemLibs";
463-
}
464-
465453
std::string code = cg.Finish();
466454
return CSourceModuleCreate(code, "c", cg.GetFunctionNames());
467455
}

tests/python/codegen/test_target_codegen_cuda_fp4.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ def add(
129129
sch.bind(tx, "threadIdx.x")
130130
sch.vectorize(vec)
131131

132-
fadd = tvm.build(sch.mod, target=target)
132+
fadd = tvm.compile(sch.mod, target=target)
133133

134134
numpytype = "float4_e2m1fn"
135135
promoted_base_dtype = promoted_dtype
@@ -189,7 +189,7 @@ def reinterpret(
189189
# Part 1. reinterpret float4_e2m1fn to uint8
190190
for vector_length in [1, 2, 4]:
191191
mod = get_reinterpret_mod("float4_e2m1fn", "uint8", vector_length)
192-
f = tvm.build(mod, target=target)
192+
f = tvm.compile(mod, target=target)
193193
a_np = np.random.uniform(low=-6, high=6, size=(n,)).astype("float4_e2m1fn")
194194
a = tvm.nd.empty(shape=(n,), dtype="float4_e2m1fn", device=dev)
195195
a.copyfrom(a_np)
@@ -200,7 +200,7 @@ def reinterpret(
200200
# Part 2. reinterpret uint8 to float4_e2m1fn
201201
for vector_length in [1, 2, 4]:
202202
mod = get_reinterpret_mod("uint8", "float4_e2m1fn", vector_length)
203-
f = tvm.build(mod, target=target)
203+
f = tvm.compile(mod, target=target)
204204
a_np = np.random.uniform(low=-6, high=6, size=(n,)).astype("uint8")
205205
a = tvm.nd.empty(shape=(n,), dtype="uint8", device=dev)
206206
a.copyfrom(a_np)

tests/python/codegen/test_target_codegen_opencl.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def check_if_then_else(dev, n, dtype):
3838
sch = tvm.tir.Schedule(func)
3939
(x,) = sch.get_loops(sch.get_block("C"))
4040
sch.bind(x, "threadIdx.x")
41-
fun = tvm.compile(sch.mod, target=target)
41+
fun = tvm.tir.build(sch.mod, target=target)
4242
a = tvm.nd.empty((n,), A.dtype, dev)
4343
c = tvm.nd.empty((n,), A.dtype, dev)
4444
# Only need to test compiling here
@@ -55,7 +55,7 @@ def check_select(dev, n, dtype):
5555
sch = tvm.tir.Schedule(func)
5656
(x,) = sch.get_loops(sch.get_block("C"))
5757
sch.bind(x, "threadIdx.x")
58-
fun = tvm.compile(sch.mod, target=target)
58+
fun = tvm.tir.build(sch.mod, target=target)
5959

6060
a = tvm.nd.empty((n,), A.dtype, dev)
6161
c = tvm.nd.empty((n,), A.dtype, dev)
@@ -85,7 +85,7 @@ def check_inf_nan(dev, n, value, dtype):
8585
sch = tvm.tir.Schedule(func)
8686
(x,) = sch.get_loops(sch.get_block("C"))
8787
sch.bind(x, "threadIdx.x")
88-
fun = tvm.compile(sch.mod, target=target)
88+
fun = tvm.tir.build(sch.mod, target=target)
8989
a = tvm.nd.empty((n,), A.dtype, dev)
9090
c = tvm.nd.empty((n,), A.dtype, dev)
9191
# Only need to test compiling here
@@ -113,7 +113,7 @@ def check_max(dev, n, dtype):
113113
sch = tvm.tir.Schedule(func)
114114
(x,) = sch.get_loops(sch.get_block("C"))
115115
sch.bind(x, "threadIdx.x")
116-
fun = tvm.compile(sch.mod, target=target)
116+
fun = tvm.tir.build(sch.mod, target=target)
117117

118118
a = tvm.nd.empty((n,), A.dtype, dev)
119119
c = tvm.nd.empty((n,), A.dtype, dev)
@@ -178,7 +178,7 @@ def check_type_casting(ctx, n, dtype):
178178
sch.bind(tx, "threadIdx.x")
179179
sch.vectorize(vx)
180180

181-
fun = tvm.compile(sch.mod, target=target)
181+
fun = tvm.tir.build(sch.mod, target=target)
182182
c = tvm.nd.empty((n,), dtype, ctx)
183183
assembly = fun.imported_modules[0].get_source()
184184
lcond = "convert_int4(((convert_uint4(((uint4)(((convert_int(get_local_id(0))) == 3), ((convert_int(get_local_id(0))) == 3), ((convert_int(get_local_id(0))) == 3), ((convert_int(get_local_id(0))) == 3)))))"
@@ -210,7 +210,7 @@ def _check(target, n, dtype):
210210
(x,) = sch.get_loops(sch.get_block("C"))
211211
sch.bind(x, "threadIdx.x")
212212

213-
fun = tvm.compile(sch.mod, target=target)
213+
fun = tvm.tir.build(sch.mod, target=target)
214214
assembly = fun.imported_modules[0].get_source()
215215
if "adreno" in target:
216216
pattern = "convert_float"
@@ -225,7 +225,7 @@ def _get_maximum_kernel_args(source):
225225
def get_kernel_args(source):
226226
import re
227227

228-
p = re.compile(r"__kernel void .+\((.*)\)")
228+
p = re.tir.build(r"__kernel void .+\((.*)\)")
229229
args = p.findall(source)
230230
return args
231231

tests/python/codegen/test_target_codegen_vulkan.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def test_vector_comparison(target, dev, dtype):
8282
sch.vectorize(vx)
8383

8484
# Build
85-
f = tvm.compile(sch.mod, target=target)
85+
f = tvm.tir.build(sch.mod, target=target)
8686

8787
# Verify we generate the boolx4 type declaration and the OpSelect
8888
# v4{float,half,int} instruction

tests/python/contrib/test_cblas.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,9 @@ def verify(target="llvm"):
230230
return
231231
dev = tvm.cpu(0)
232232
name = "test_batch_matmul"
233-
f = tvm.compile(te.create_prim_func([input1_data, input2_data, final_result]), target=target)
233+
f = tvm.compile(
234+
te.create_prim_func([input1_data, input2_data, final_result]), target=target
235+
)
234236
if target == "c":
235237
f = compiling(f, name)
236238
matrix_input1 = tvm.nd.array(np.random.uniform(size=ashape).astype(input1_data.dtype), dev)

tests/python/relax/test_relax_operators.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ def run_cpu(mod, func_name, *args, exec_mode):
4747
mod = tvm.IRModule.from_expr(func)
4848

4949
target = tvm.target.Target("llvm")
50-
ex = tvm.compile(mod, target, exec_mode=exec_mode)
50+
ex = relax.build(mod, target, exec_mode=exec_mode)
5151
vm = relax.VirtualMachine(ex, tvm.cpu())
5252

5353
return vm[func_name](*args)

tests/python/relax/test_vm_alloc_storage_with_scope.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ def test_alloc_storage_with_scope_global():
6262
mod = Module
6363
target = "llvm"
6464
with tvm.transform.PassContext(opt_level=3):
65-
lib = tvm.compile(mod, target, exec_mode="compiled")
65+
lib = tvm.relax.build(mod, target=target, exec_mode="compiled")
6666

6767
dev = tvm.cpu()
6868
# This is the important line which tests nd allocator
@@ -72,3 +72,7 @@ def test_alloc_storage_with_scope_global():
7272
vm_rt.invoke_stateful("main")
7373
output = vm_rt.get_outputs("main").numpy()
7474
tvm.testing.assert_allclose(output_ref, output)
75+
76+
77+
if __name__ == "__main__":
78+
tvm.testing.main()

0 commit comments

Comments
 (0)