Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion ffi/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

[project]
name = "apache-tvm-ffi"
version = "0.1.0a11"
version = "0.1.0a12"
description = "tvm ffi"

authors = [{ name = "TVM FFI team" }]
Expand Down
22 changes: 12 additions & 10 deletions ffi/python/tvm_ffi/_optional_torch_c_dlpack.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,9 +117,11 @@ def load_torch_c_dlpack_extension():
case ScalarType::Float8_e8m0fnu:
dtype.code = DLDataTypeCode::kDLFloat8_e8m0fnu;
break;
#if TORCH_VERSION_MAJOR >= 2 && TORCH_VERSION_MINOR >= 8
case ScalarType::Float4_e2m1fn_x2:
dtype.code = DLDataTypeCode::kDLFloat4_e2m1fn;
break;
#endif
default:
TORCH_CHECK(false, "Unsupported scalar type: ");
}
Expand Down Expand Up @@ -311,7 +313,7 @@ def load_torch_c_dlpack_extension():
} // namespace
} // namespace at

int TorchDLPackPyObjectExporter(void* py_obj, DLManagedTensorVersioned** out, void** env_stream) {
int TorchDLPackFromPyObject(void* py_obj, DLManagedTensorVersioned** out, void** env_stream) {
try {
py::handle handle(static_cast<PyObject*>(py_obj));
at::Tensor tensor = handle.cast<at::Tensor>();
Expand All @@ -326,7 +328,7 @@ def load_torch_c_dlpack_extension():
}
}

int TorchDLPackPyObjectImporter(DLManagedTensorVersioned* src, void** py_obj_out) {
int TorchDLPackToPyObject(DLManagedTensorVersioned* src, void** py_obj_out) {
try {
at::Tensor tensor = at::fromDLPackImpl<DLManagedTensorVersioned>(src, nullptr);
*py_obj_out = THPVariable_Wrap(tensor);
Expand Down Expand Up @@ -355,12 +357,12 @@ def load_torch_c_dlpack_extension():
}
}

int64_t TorchDLPackPyObjectExporterPtr() {
return reinterpret_cast<int64_t>(TorchDLPackPyObjectExporter);
int64_t TorchDLPackFromPyObjectPtr() {
return reinterpret_cast<int64_t>(TorchDLPackFromPyObject);
}

int64_t TorchDLPackPyObjectImporterPtr() {
return reinterpret_cast<int64_t>(TorchDLPackPyObjectImporter);
int64_t TorchDLPackToPyObjectPtr() {
return reinterpret_cast<int64_t>(TorchDLPackToPyObject);
}

int64_t TorchDLPackTensorAllocatorPtr() {
Expand All @@ -376,17 +378,17 @@ def load_torch_c_dlpack_extension():
name="to_dlpack",
cpp_sources=cpp_source,
functions=[
"TorchDLPackPyObjectExporterPtr",
"TorchDLPackPyObjectImporterPtr",
"TorchDLPackFromPyObjectPtr",
"TorchDLPackToPyObjectPtr",
"TorchDLPackTensorAllocatorPtr",
],
extra_cflags=["-O3"],
extra_include_paths=libinfo.include_paths() + cpp_extension.include_paths("cuda"),
verbose=True,
)
# set the dlpack related flags
torch.Tensor.__c_dlpack_exporter__ = mod.TorchDLPackPyObjectExporterPtr()
torch.Tensor.__c_dlpack_importer__ = mod.TorchDLPackPyObjectImporterPtr()
torch.Tensor.__c_dlpack_from_pyobject__ = mod.TorchDLPackFromPyObjectPtr()
torch.Tensor.__c_dlpack_to_pyobject__ = mod.TorchDLPackToPyObjectPtr()
torch.Tensor.__c_dlpack_tensor_allocator__ = mod.TorchDLPackTensorAllocatorPtr()
return mod
except ImportError:
Expand Down
12 changes: 6 additions & 6 deletions ffi/python/tvm_ffi/cython/base.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -247,11 +247,11 @@ cdef extern from "tvm/ffi/extra/c_env_api.h":
cdef extern from "tvm_ffi_python_helpers.h":
# no need to expose fields of the call context
# setter data structure
ctypedef int (*DLPackPyObjectExporter)(
ctypedef int (*DLPackFromPyObject)(
void* py_obj, DLManagedTensorVersioned** out, TVMFFIStreamHandle* env_stream
) except -1

ctypedef int (*DLPackPyObjectImporter)(
ctypedef int (*DLPackToPyObject)(
DLManagedTensorVersioned* tensor, void** py_obj_out
) except -1
ctypedef int (*DLPackTensorAllocator)(
Expand All @@ -263,13 +263,13 @@ cdef extern from "tvm_ffi_python_helpers.h":
int device_type
int device_id
TVMFFIStreamHandle stream
DLPackPyObjectImporter c_dlpack_importer
DLPackToPyObject c_dlpack_to_pyobject
DLPackTensorAllocator c_dlpack_tensor_allocator

ctypedef struct TVMFFIPyArgSetter:
int (*func)(TVMFFIPyArgSetter* handle, TVMFFIPyCallContext* ctx, PyObject* py_arg, TVMFFIAny* out) except -1
DLPackPyObjectExporter c_dlpack_exporter
DLPackPyObjectImporter c_dlpack_importer
DLPackFromPyObject c_dlpack_from_pyobject
DLPackToPyObject c_dlpack_to_pyobject
DLPackTensorAllocator c_dlpack_tensor_allocator

ctypedef int (*TVMFFIPyArgSetterFactory)(PyObject* value, TVMFFIPyArgSetter* out) except -1
Expand All @@ -281,7 +281,7 @@ cdef extern from "tvm_ffi_python_helpers.h":
TVMFFIAny* result,
int* c_api_ret_code,
int release_gil,
DLPackPyObjectImporter* out_dlpack_importer
DLPackToPyObject* out_dlpack_importer
) except -1

int TVMFFIPyCallFieldSetter(
Expand Down
36 changes: 18 additions & 18 deletions ffi/python/tvm_ffi/cython/function.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -47,13 +47,13 @@ cdef inline object make_ret_small_bytes(TVMFFIAny result):
return PyBytes_FromStringAndSize(bytes.data, bytes.size)


cdef inline object make_ret(TVMFFIAny result, DLPackPyObjectImporter c_dlpack_importer = NULL):
cdef inline object make_ret(TVMFFIAny result, DLPackToPyObject c_dlpack_to_pyobject = NULL):
"""convert result to return value."""
cdef int32_t type_index
type_index = result.type_index
if type_index == kTVMFFITensor:
# specially handle Tensor as it needs a special dltensor field
return make_tensor_from_any(result, c_dlpack_importer)
return make_tensor_from_any(result, c_dlpack_to_pyobject)
elif type_index == kTVMFFIOpaquePyObject:
return make_ret_opaque_object(result)
elif type_index >= kTVMFFIStaticObjectBegin:
Expand Down Expand Up @@ -121,18 +121,18 @@ cdef int TVMFFIPyArgSetterDLPackCExporter_(
cdef TVMFFIObjectHandle temp_chandle
cdef TVMFFIStreamHandle env_stream = NULL

if this.c_dlpack_importer != NULL:
ctx.c_dlpack_importer = this.c_dlpack_importer
if this.c_dlpack_to_pyobject != NULL:
ctx.c_dlpack_to_pyobject = this.c_dlpack_to_pyobject
if this.c_dlpack_tensor_allocator != NULL:
ctx.c_dlpack_tensor_allocator = this.c_dlpack_tensor_allocator

if ctx.device_id != -1:
# already queried device, do not do it again, pass NULL to stream
if (this.c_dlpack_exporter)(arg, &temp_managed_tensor, NULL) != 0:
if (this.c_dlpack_from_pyobject)(arg, &temp_managed_tensor, NULL) != 0:
return -1
else:
# query string on the envrionment stream
if (this.c_dlpack_exporter)(arg, &temp_managed_tensor, &env_stream) != 0:
if (this.c_dlpack_from_pyobject)(arg, &temp_managed_tensor, &env_stream) != 0:
return -1
# If device is not CPU, we should set the device type and id
if temp_managed_tensor.dl_tensor.device.device_type != kDLCPU:
Expand All @@ -148,7 +148,7 @@ cdef int TVMFFIPyArgSetterDLPackCExporter_(
return 0


cdef int TorchDLPackPyObjectImporterFallback_(
cdef int TorchDLPackToPyObjectFallback_(
DLManagedTensorVersioned* dltensor, void** py_obj_out
) except -1:
# a bit convoluted but ok as a fallback
Expand All @@ -173,7 +173,7 @@ cdef int TVMFFIPyArgSetterTorchFallback_(
out.type_index = kTVMFFITensor
out.v_ptr = (<Tensor>arg).chandle
temp_dltensor = TVMFFITensorGetDLTensorPtr((<Tensor>arg).chandle)
ctx.c_dlpack_importer = TorchDLPackPyObjectImporterFallback_
ctx.c_dlpack_to_pyobject = TorchDLPackToPyObjectFallback_
# record the stream and device for torch context
if is_cuda and ctx.device_type != -1:
ctx.device_type = temp_dltensor.device.device_type
Expand Down Expand Up @@ -370,15 +370,15 @@ cdef int TVMFFIPyArgSetterFactory_(PyObject* value, TVMFFIPyArgSetter* out) exce
if isinstance(arg, ObjectRValueRef):
out.func = TVMFFIPyArgSetterObjectRValueRef_
return 0
if os.environ.get("TVM_FFI_SKIP_C_DLPACK_EXPORTER", "0") != "1":
if os.environ.get("TVM_FFI_SKIP_c_dlpack_from_pyobject", "0") != "1":
# external tensors
if hasattr(arg, "__c_dlpack_exporter__"):
if hasattr(arg, "__c_dlpack_from_pyobject__"):
out.func = TVMFFIPyArgSetterDLPackCExporter_
temp_ptr = arg.__c_dlpack_exporter__
out.c_dlpack_exporter = <DLPackPyObjectExporter>temp_ptr
if hasattr(arg, "__c_dlpack_importer__"):
temp_ptr = arg.__c_dlpack_importer__
out.c_dlpack_importer = <DLPackPyObjectImporter>temp_ptr
temp_ptr = arg.__c_dlpack_from_pyobject__
out.c_dlpack_from_pyobject = <DLPackFromPyObject>temp_ptr
if hasattr(arg, "__c_dlpack_to_pyobject__"):
temp_ptr = arg.__c_dlpack_to_pyobject__
out.c_dlpack_to_pyobject = <DLPackToPyObject>temp_ptr
if hasattr(arg, "__c_dlpack_tensor_allocator__"):
temp_ptr = arg.__c_dlpack_tensor_allocator__
out.c_dlpack_tensor_allocator = <DLPackTensorAllocator>temp_ptr
Expand Down Expand Up @@ -470,7 +470,7 @@ cdef class Function(Object):
def __call__(self, *args):
cdef TVMFFIAny result
cdef int c_api_ret_code
cdef DLPackPyObjectImporter c_dlpack_importer = NULL
cdef DLPackToPyObject c_dlpack_to_pyobject = NULL
# IMPORTANT: caller need to initialize result->type_index to kTVMFFINone
result.type_index = kTVMFFINone
result.v_int64 = 0
Expand All @@ -480,12 +480,12 @@ cdef class Function(Object):
&result,
&c_api_ret_code,
self.release_gil,
&c_dlpack_importer
&c_dlpack_to_pyobject
)
# NOTE: logic is same as check_call
# directly inline here to simplify traceback
if c_api_ret_code == 0:
return make_ret(result, c_dlpack_importer)
return make_ret(result, c_dlpack_to_pyobject)
elif c_api_ret_code == -2:
raise_existing_error()
raise move_from_last_error().py_error()
Expand Down
18 changes: 9 additions & 9 deletions ffi/python/tvm_ffi/cython/tensor.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ _set_class_tensor(Tensor)
_register_object_by_index(kTVMFFITensor, Tensor)


cdef int _dltensor_test_wrapper_c_dlpack_exporter(
cdef int _dltensor_test_wrapper_c_dlpack_from_pyobject(
void* obj, DLManagedTensorVersioned** out, TVMFFIStreamHandle* env_stream
) except -1:
cdef PyObject* py_obj = <PyObject*>obj
Expand All @@ -291,8 +291,8 @@ cdef int _dltensor_test_wrapper_c_dlpack_exporter(
return TVMFFITensorToDLPackVersioned(wrapper.tensor.chandle, out)


def _dltensor_test_wrapper_c_dlpack_exporter_as_intptr():
cdef DLPackPyObjectExporter converter_func = _dltensor_test_wrapper_c_dlpack_exporter
def _dltensor_test_wrapper_c_dlpack_from_pyobject_as_intptr():
cdef DLPackFromPyObject converter_func = _dltensor_test_wrapper_c_dlpack_from_pyobject
cdef void* temp_ptr = <void*>converter_func
cdef long long temp_int_ptr = <long long>temp_ptr
return temp_int_ptr
Expand All @@ -301,7 +301,7 @@ def _dltensor_test_wrapper_c_dlpack_exporter_as_intptr():
cdef class DLTensorTestWrapper:
"""Wrapper of a Tensor that exposes DLPack protocol, only for testing purpose.
"""
__c_dlpack_exporter__ = _dltensor_test_wrapper_c_dlpack_exporter_as_intptr()
__c_dlpack_from_pyobject__ = _dltensor_test_wrapper_c_dlpack_from_pyobject_as_intptr()

cdef Tensor tensor
cdef dict __dict__
Expand Down Expand Up @@ -333,19 +333,19 @@ cdef inline object make_ret_dltensor(TVMFFIAny result):
return tensor


cdef inline object make_tensor_from_chandle(TVMFFIObjectHandle chandle, DLPackPyObjectImporter c_dlpack_importer = NULL):
cdef inline object make_tensor_from_chandle(TVMFFIObjectHandle chandle, DLPackToPyObject c_dlpack_to_pyobject = NULL):
# TODO: Implement
cdef Tensor tensor
cdef void* py_obj
cdef DLManagedTensorVersioned* dlpack

if c_dlpack_importer != NULL:
if c_dlpack_to_pyobject != NULL:
# try convert and import into the environment array if possible
if TVMFFITensorToDLPackVersioned(chandle, &dlpack) == 0:
try:
# note that py_obj already holds an extra reference to the tensor
# so we need to decref it after the conversion
c_dlpack_importer(dlpack, &py_obj)
c_dlpack_to_pyobject(dlpack, &py_obj)
tensor = <Tensor>(<PyObject*>py_obj)
Py_DECREF(tensor)
return tensor
Expand All @@ -358,5 +358,5 @@ cdef inline object make_tensor_from_chandle(TVMFFIObjectHandle chandle, DLPackPy
return tensor


cdef inline object make_tensor_from_any(TVMFFIAny any, DLPackPyObjectImporter c_dlpack_importer):
return make_tensor_from_chandle(any.v_ptr, c_dlpack_importer)
cdef inline object make_tensor_from_any(TVMFFIAny any, DLPackToPyObject c_dlpack_to_pyobject):
return make_tensor_from_chandle(any.v_ptr, c_dlpack_to_pyobject)
19 changes: 9 additions & 10 deletions ffi/python/tvm_ffi/cython/tvm_ffi_python_helpers.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,7 @@
* \note We use void* to avoid dependency on Python.h so this specific type is
* not dependent on Python.h and can be copied to dlpack.h
*/
typedef int (*DLPackPyObjectExporter)(void* py_obj, DLManagedTensorVersioned** out,
void** env_stream);
typedef int (*DLPackFromPyObject)(void* py_obj, DLManagedTensorVersioned** out, void** env_stream);
/*!
* \brief C-style function pointer to speed convert a DLManagedTensorVersioned to a PyObject Tensor.
* \param tensor The DLManagedTensorVersioned to convert.
Expand All @@ -54,7 +53,7 @@ typedef int (*DLPackPyObjectExporter)(void* py_obj, DLManagedTensorVersioned** o
* \note We use void* to avoid dependency on Python.h so this specific type is
* not dependent on Python.h and can be copied to dlpack.h
*/
typedef int (*DLPackPyObjectImporter)(DLManagedTensorVersioned* tensor, void** py_obj_out);
typedef int (*DLPackToPyObject)(DLManagedTensorVersioned* tensor, void** py_obj_out);

///--------------------------------------------------------------------------------
/// We deliberately designed the data structure and function to be C-style
Expand Down Expand Up @@ -82,7 +81,7 @@ struct TVMFFIPyCallContext {
/*! \brief the number of temporary arguments */
int num_temp_py_objects = 0;
/*! \brief the DLPack exporter, if any */
DLPackPyObjectImporter c_dlpack_importer{nullptr};
DLPackToPyObject c_dlpack_to_pyobject{nullptr};
/*! \brief the DLPack allocator, if any */
DLPackTensorAllocator c_dlpack_tensor_allocator{nullptr};
};
Expand All @@ -102,11 +101,11 @@ struct TVMFFIPyArgSetter {
/*!
* \brief Optional DLPack exporter for for setters that leverages DLPack protocol.
*/
DLPackPyObjectExporter c_dlpack_exporter{nullptr};
DLPackFromPyObject c_dlpack_from_pyobject{nullptr};
/*!
* \brief Optional DLPack importer for for setters that leverages DLPack protocol.
*/
DLPackPyObjectImporter c_dlpack_importer{nullptr};
DLPackToPyObject c_dlpack_to_pyobject{nullptr};
/*!
* \brief Optional DLPack allocator for for setters that leverages DLPack protocol.
*/
Expand Down Expand Up @@ -273,7 +272,7 @@ class TVMFFIPyCallManager {
*/
int Call(TVMFFIPyArgSetterFactory setter_factory, void* func_handle, PyObject* py_arg_tuple,
TVMFFIAny* result, int* c_api_ret_code, bool release_gil,
DLPackPyObjectImporter* optional_out_dlpack_importer) {
DLPackToPyObject* optional_out_dlpack_importer) {
int64_t num_args = PyTuple_Size(py_arg_tuple);
if (num_args == -1) return -1;
try {
Expand Down Expand Up @@ -321,8 +320,8 @@ class TVMFFIPyCallManager {
c_api_ret_code[0] = TVMFFIEnvSetTensorAllocator(prev_tensor_allocator, 0, nullptr);
if (c_api_ret_code[0] != 0) return 0;
}
if (optional_out_dlpack_importer != nullptr && ctx.c_dlpack_importer != nullptr) {
*optional_out_dlpack_importer = ctx.c_dlpack_importer;
if (optional_out_dlpack_importer != nullptr && ctx.c_dlpack_to_pyobject != nullptr) {
*optional_out_dlpack_importer = ctx.c_dlpack_to_pyobject;
}
return 0;
} catch (const std::exception& ex) {
Expand Down Expand Up @@ -430,7 +429,7 @@ class TVMFFIPyCallManager {
inline int TVMFFIPyFuncCall(TVMFFIPyArgSetterFactory setter_factory, void* func_handle,
PyObject* py_arg_tuple, TVMFFIAny* result, int* c_api_ret_code,
bool release_gil = true,
DLPackPyObjectImporter* out_dlpack_importer = nullptr) {
DLPackToPyObject* out_dlpack_importer = nullptr) {
return TVMFFIPyCallManager::ThreadLocal()->Call(setter_factory, func_handle, py_arg_tuple, result,
c_api_ret_code, release_gil, out_dlpack_importer);
}
Expand Down
Loading