diff --git a/ffi/python/tvm_ffi/cpp/load_inline.py b/ffi/python/tvm_ffi/cpp/load_inline.py index 754a9d74652f..111dee8d5276 100644 --- a/ffi/python/tvm_ffi/cpp/load_inline.py +++ b/ffi/python/tvm_ffi/cpp/load_inline.py @@ -140,6 +140,9 @@ def _generate_ninja_build( """Generate the content of build.ninja for building the module.""" default_include_paths = [find_include_path(), find_dlpack_include_path()] + tvm_ffi_lib = find_libtvm_ffi() + tvm_ffi_lib_path = os.path.dirname(tvm_ffi_lib) + tvm_ffi_lib_name = os.path.splitext(os.path.basename(tvm_ffi_lib))[0] if IS_WINDOWS: default_cflags = [ "/std:c++17", @@ -157,17 +160,11 @@ def _generate_ninja_build( "/EHsc", ] default_cuda_cflags = ["-Xcompiler", "/std:c++17", "/O2"] - # Find the TVM FFI library for linking - tvm_ffi_lib = find_libtvm_ffi() - tvm_ffi_lib_path = os.path.dirname(tvm_ffi_lib) - tvm_ffi_lib_name = os.path.splitext(os.path.basename(tvm_ffi_lib))[ - 0 - ] # Remove .dll extension default_ldflags = ["/DLL", f"/LIBPATH:{tvm_ffi_lib_path}", f"{tvm_ffi_lib_name}.lib"] else: default_cflags = ["-std=c++17", "-fPIC", "-O2"] default_cuda_cflags = ["-Xcompiler", "-fPIC", "-std=c++17", "-O2"] - default_ldflags = ["-shared"] + default_ldflags = ["-shared", "-L{}".format(tvm_ffi_lib_path), "-ltvm_ffi"] if with_cuda: # determine the compute capability of the current GPU diff --git a/ffi/tests/python/test_load_inline.py b/ffi/tests/python/test_load_inline.py index dbaf4394081c..6510cca540bf 100644 --- a/ffi/tests/python/test_load_inline.py +++ b/ffi/tests/python/test_load_inline.py @@ -28,10 +28,6 @@ from tvm_ffi.module import Module -@pytest.mark.xfail( - not sys.platform.startswith("linux") and not sys.platform.startswith("win32"), - reason="need to support other platforms", -) def test_load_inline_cpp(): mod: Module = tvm_ffi.cpp.load_inline( name="hello", @@ -58,10 +54,6 @@ def test_load_inline_cpp(): numpy.testing.assert_equal(x + 1, y) -@pytest.mark.xfail( - not sys.platform.startswith("linux") and not sys.platform.startswith("win32"), - reason="need to support other platforms", -) def test_load_inline_cpp_with_docstrings(): mod: Module = tvm_ffi.cpp.load_inline( name="hello", @@ -88,10 +80,6 @@ def test_load_inline_cpp_with_docstrings(): numpy.testing.assert_equal(x + 1, y) -@pytest.mark.xfail( - not sys.platform.startswith("linux") and not sys.platform.startswith("win32"), - reason="need to support other platforms", -) def test_load_inline_cpp_multiple_sources(): mod: Module = tvm_ffi.cpp.load_inline( name="hello", @@ -134,10 +122,6 @@ def test_load_inline_cpp_multiple_sources(): numpy.testing.assert_equal(x + 1, y) -@pytest.mark.xfail( - not sys.platform.startswith("linux") and not sys.platform.startswith("win32"), - reason="need to support other platforms", -) def test_load_inline_cpp_build_dir(): mod: Module = tvm_ffi.cpp.load_inline( name="hello",