diff --git a/ffi/pyproject.toml b/ffi/pyproject.toml index 11e65a9065d2..8c146f41c4e2 100644 --- a/ffi/pyproject.toml +++ b/ffi/pyproject.toml @@ -17,7 +17,7 @@ [project] name = "apache-tvm-ffi" -version = "0.1.0a11" +version = "0.1.0a12" description = "tvm ffi" authors = [{ name = "TVM FFI team" }] diff --git a/ffi/python/tvm_ffi/_optional_torch_c_dlpack.py b/ffi/python/tvm_ffi/_optional_torch_c_dlpack.py index f4af39302521..fc5851af170d 100644 --- a/ffi/python/tvm_ffi/_optional_torch_c_dlpack.py +++ b/ffi/python/tvm_ffi/_optional_torch_c_dlpack.py @@ -117,9 +117,11 @@ def load_torch_c_dlpack_extension(): case ScalarType::Float8_e8m0fnu: dtype.code = DLDataTypeCode::kDLFloat8_e8m0fnu; break; +#if TORCH_VERSION_MAJOR >= 2 && TORCH_VERSION_MINOR >= 8 case ScalarType::Float4_e2m1fn_x2: dtype.code = DLDataTypeCode::kDLFloat4_e2m1fn; break; +#endif default: TORCH_CHECK(false, "Unsupported scalar type: "); } @@ -311,7 +313,7 @@ def load_torch_c_dlpack_extension(): } // namespace } // namespace at -int TorchDLPackPyObjectExporter(void* py_obj, DLManagedTensorVersioned** out, void** env_stream) { +int TorchDLPackFromPyObject(void* py_obj, DLManagedTensorVersioned** out, void** env_stream) { try { py::handle handle(static_cast(py_obj)); at::Tensor tensor = handle.cast(); @@ -326,7 +328,7 @@ def load_torch_c_dlpack_extension(): } } -int TorchDLPackPyObjectImporter(DLManagedTensorVersioned* src, void** py_obj_out) { +int TorchDLPackToPyObject(DLManagedTensorVersioned* src, void** py_obj_out) { try { at::Tensor tensor = at::fromDLPackImpl(src, nullptr); *py_obj_out = THPVariable_Wrap(tensor); @@ -355,12 +357,12 @@ def load_torch_c_dlpack_extension(): } } -int64_t TorchDLPackPyObjectExporterPtr() { - return reinterpret_cast(TorchDLPackPyObjectExporter); +int64_t TorchDLPackFromPyObjectPtr() { + return reinterpret_cast(TorchDLPackFromPyObject); } -int64_t TorchDLPackPyObjectImporterPtr() { - return reinterpret_cast(TorchDLPackPyObjectImporter); +int64_t TorchDLPackToPyObjectPtr() { + return reinterpret_cast(TorchDLPackToPyObject); } int64_t TorchDLPackTensorAllocatorPtr() { @@ -376,8 +378,8 @@ def load_torch_c_dlpack_extension(): name="to_dlpack", cpp_sources=cpp_source, functions=[ - "TorchDLPackPyObjectExporterPtr", - "TorchDLPackPyObjectImporterPtr", + "TorchDLPackFromPyObjectPtr", + "TorchDLPackToPyObjectPtr", "TorchDLPackTensorAllocatorPtr", ], extra_cflags=["-O3"], @@ -385,8 +387,8 @@ def load_torch_c_dlpack_extension(): verbose=True, ) # set the dlpack related flags - torch.Tensor.__c_dlpack_exporter__ = mod.TorchDLPackPyObjectExporterPtr() - torch.Tensor.__c_dlpack_importer__ = mod.TorchDLPackPyObjectImporterPtr() + torch.Tensor.__c_dlpack_from_pyobject__ = mod.TorchDLPackFromPyObjectPtr() + torch.Tensor.__c_dlpack_to_pyobject__ = mod.TorchDLPackToPyObjectPtr() torch.Tensor.__c_dlpack_tensor_allocator__ = mod.TorchDLPackTensorAllocatorPtr() return mod except ImportError: diff --git a/ffi/python/tvm_ffi/cython/base.pxi b/ffi/python/tvm_ffi/cython/base.pxi index a1de1de1cd89..fdb06f51055e 100644 --- a/ffi/python/tvm_ffi/cython/base.pxi +++ b/ffi/python/tvm_ffi/cython/base.pxi @@ -247,11 +247,11 @@ cdef extern from "tvm/ffi/extra/c_env_api.h": cdef extern from "tvm_ffi_python_helpers.h": # no need to expose fields of the call context # setter data structure - ctypedef int (*DLPackPyObjectExporter)( + ctypedef int (*DLPackFromPyObject)( void* py_obj, DLManagedTensorVersioned** out, TVMFFIStreamHandle* env_stream ) except -1 - ctypedef int (*DLPackPyObjectImporter)( + ctypedef int (*DLPackToPyObject)( DLManagedTensorVersioned* tensor, void** py_obj_out ) except -1 ctypedef int (*DLPackTensorAllocator)( @@ -263,13 +263,13 @@ cdef extern from "tvm_ffi_python_helpers.h": int device_type int device_id TVMFFIStreamHandle stream - DLPackPyObjectImporter c_dlpack_importer + DLPackToPyObject c_dlpack_to_pyobject DLPackTensorAllocator c_dlpack_tensor_allocator ctypedef struct TVMFFIPyArgSetter: int (*func)(TVMFFIPyArgSetter* handle, TVMFFIPyCallContext* ctx, PyObject* py_arg, TVMFFIAny* out) except -1 - DLPackPyObjectExporter c_dlpack_exporter - DLPackPyObjectImporter c_dlpack_importer + DLPackFromPyObject c_dlpack_from_pyobject + DLPackToPyObject c_dlpack_to_pyobject DLPackTensorAllocator c_dlpack_tensor_allocator ctypedef int (*TVMFFIPyArgSetterFactory)(PyObject* value, TVMFFIPyArgSetter* out) except -1 @@ -281,7 +281,7 @@ cdef extern from "tvm_ffi_python_helpers.h": TVMFFIAny* result, int* c_api_ret_code, int release_gil, - DLPackPyObjectImporter* out_dlpack_importer + DLPackToPyObject* out_dlpack_importer ) except -1 int TVMFFIPyCallFieldSetter( diff --git a/ffi/python/tvm_ffi/cython/function.pxi b/ffi/python/tvm_ffi/cython/function.pxi index bd486c5f77f5..9b86054b7102 100644 --- a/ffi/python/tvm_ffi/cython/function.pxi +++ b/ffi/python/tvm_ffi/cython/function.pxi @@ -47,13 +47,13 @@ cdef inline object make_ret_small_bytes(TVMFFIAny result): return PyBytes_FromStringAndSize(bytes.data, bytes.size) -cdef inline object make_ret(TVMFFIAny result, DLPackPyObjectImporter c_dlpack_importer = NULL): +cdef inline object make_ret(TVMFFIAny result, DLPackToPyObject c_dlpack_to_pyobject = NULL): """convert result to return value.""" cdef int32_t type_index type_index = result.type_index if type_index == kTVMFFITensor: # specially handle Tensor as it needs a special dltensor field - return make_tensor_from_any(result, c_dlpack_importer) + return make_tensor_from_any(result, c_dlpack_to_pyobject) elif type_index == kTVMFFIOpaquePyObject: return make_ret_opaque_object(result) elif type_index >= kTVMFFIStaticObjectBegin: @@ -121,18 +121,18 @@ cdef int TVMFFIPyArgSetterDLPackCExporter_( cdef TVMFFIObjectHandle temp_chandle cdef TVMFFIStreamHandle env_stream = NULL - if this.c_dlpack_importer != NULL: - ctx.c_dlpack_importer = this.c_dlpack_importer + if this.c_dlpack_to_pyobject != NULL: + ctx.c_dlpack_to_pyobject = this.c_dlpack_to_pyobject if this.c_dlpack_tensor_allocator != NULL: ctx.c_dlpack_tensor_allocator = this.c_dlpack_tensor_allocator if ctx.device_id != -1: # already queried device, do not do it again, pass NULL to stream - if (this.c_dlpack_exporter)(arg, &temp_managed_tensor, NULL) != 0: + if (this.c_dlpack_from_pyobject)(arg, &temp_managed_tensor, NULL) != 0: return -1 else: # query string on the envrionment stream - if (this.c_dlpack_exporter)(arg, &temp_managed_tensor, &env_stream) != 0: + if (this.c_dlpack_from_pyobject)(arg, &temp_managed_tensor, &env_stream) != 0: return -1 # If device is not CPU, we should set the device type and id if temp_managed_tensor.dl_tensor.device.device_type != kDLCPU: @@ -148,7 +148,7 @@ cdef int TVMFFIPyArgSetterDLPackCExporter_( return 0 -cdef int TorchDLPackPyObjectImporterFallback_( +cdef int TorchDLPackToPyObjectFallback_( DLManagedTensorVersioned* dltensor, void** py_obj_out ) except -1: # a bit convoluted but ok as a fallback @@ -173,7 +173,7 @@ cdef int TVMFFIPyArgSetterTorchFallback_( out.type_index = kTVMFFITensor out.v_ptr = (arg).chandle temp_dltensor = TVMFFITensorGetDLTensorPtr((arg).chandle) - ctx.c_dlpack_importer = TorchDLPackPyObjectImporterFallback_ + ctx.c_dlpack_to_pyobject = TorchDLPackToPyObjectFallback_ # record the stream and device for torch context if is_cuda and ctx.device_type != -1: ctx.device_type = temp_dltensor.device.device_type @@ -370,15 +370,15 @@ cdef int TVMFFIPyArgSetterFactory_(PyObject* value, TVMFFIPyArgSetter* out) exce if isinstance(arg, ObjectRValueRef): out.func = TVMFFIPyArgSetterObjectRValueRef_ return 0 - if os.environ.get("TVM_FFI_SKIP_C_DLPACK_EXPORTER", "0") != "1": + if os.environ.get("TVM_FFI_SKIP_c_dlpack_from_pyobject", "0") != "1": # external tensors - if hasattr(arg, "__c_dlpack_exporter__"): + if hasattr(arg, "__c_dlpack_from_pyobject__"): out.func = TVMFFIPyArgSetterDLPackCExporter_ - temp_ptr = arg.__c_dlpack_exporter__ - out.c_dlpack_exporter = temp_ptr - if hasattr(arg, "__c_dlpack_importer__"): - temp_ptr = arg.__c_dlpack_importer__ - out.c_dlpack_importer = temp_ptr + temp_ptr = arg.__c_dlpack_from_pyobject__ + out.c_dlpack_from_pyobject = temp_ptr + if hasattr(arg, "__c_dlpack_to_pyobject__"): + temp_ptr = arg.__c_dlpack_to_pyobject__ + out.c_dlpack_to_pyobject = temp_ptr if hasattr(arg, "__c_dlpack_tensor_allocator__"): temp_ptr = arg.__c_dlpack_tensor_allocator__ out.c_dlpack_tensor_allocator = temp_ptr @@ -470,7 +470,7 @@ cdef class Function(Object): def __call__(self, *args): cdef TVMFFIAny result cdef int c_api_ret_code - cdef DLPackPyObjectImporter c_dlpack_importer = NULL + cdef DLPackToPyObject c_dlpack_to_pyobject = NULL # IMPORTANT: caller need to initialize result->type_index to kTVMFFINone result.type_index = kTVMFFINone result.v_int64 = 0 @@ -480,12 +480,12 @@ cdef class Function(Object): &result, &c_api_ret_code, self.release_gil, - &c_dlpack_importer + &c_dlpack_to_pyobject ) # NOTE: logic is same as check_call # directly inline here to simplify traceback if c_api_ret_code == 0: - return make_ret(result, c_dlpack_importer) + return make_ret(result, c_dlpack_to_pyobject) elif c_api_ret_code == -2: raise_existing_error() raise move_from_last_error().py_error() diff --git a/ffi/python/tvm_ffi/cython/tensor.pxi b/ffi/python/tvm_ffi/cython/tensor.pxi index 2fd80bc1a6c8..1255f0b0c3ff 100644 --- a/ffi/python/tvm_ffi/cython/tensor.pxi +++ b/ffi/python/tvm_ffi/cython/tensor.pxi @@ -275,7 +275,7 @@ _set_class_tensor(Tensor) _register_object_by_index(kTVMFFITensor, Tensor) -cdef int _dltensor_test_wrapper_c_dlpack_exporter( +cdef int _dltensor_test_wrapper_c_dlpack_from_pyobject( void* obj, DLManagedTensorVersioned** out, TVMFFIStreamHandle* env_stream ) except -1: cdef PyObject* py_obj = obj @@ -291,8 +291,8 @@ cdef int _dltensor_test_wrapper_c_dlpack_exporter( return TVMFFITensorToDLPackVersioned(wrapper.tensor.chandle, out) -def _dltensor_test_wrapper_c_dlpack_exporter_as_intptr(): - cdef DLPackPyObjectExporter converter_func = _dltensor_test_wrapper_c_dlpack_exporter +def _dltensor_test_wrapper_c_dlpack_from_pyobject_as_intptr(): + cdef DLPackFromPyObject converter_func = _dltensor_test_wrapper_c_dlpack_from_pyobject cdef void* temp_ptr = converter_func cdef long long temp_int_ptr = temp_ptr return temp_int_ptr @@ -301,7 +301,7 @@ def _dltensor_test_wrapper_c_dlpack_exporter_as_intptr(): cdef class DLTensorTestWrapper: """Wrapper of a Tensor that exposes DLPack protocol, only for testing purpose. """ - __c_dlpack_exporter__ = _dltensor_test_wrapper_c_dlpack_exporter_as_intptr() + __c_dlpack_from_pyobject__ = _dltensor_test_wrapper_c_dlpack_from_pyobject_as_intptr() cdef Tensor tensor cdef dict __dict__ @@ -333,19 +333,19 @@ cdef inline object make_ret_dltensor(TVMFFIAny result): return tensor -cdef inline object make_tensor_from_chandle(TVMFFIObjectHandle chandle, DLPackPyObjectImporter c_dlpack_importer = NULL): +cdef inline object make_tensor_from_chandle(TVMFFIObjectHandle chandle, DLPackToPyObject c_dlpack_to_pyobject = NULL): # TODO: Implement cdef Tensor tensor cdef void* py_obj cdef DLManagedTensorVersioned* dlpack - if c_dlpack_importer != NULL: + if c_dlpack_to_pyobject != NULL: # try convert and import into the environment array if possible if TVMFFITensorToDLPackVersioned(chandle, &dlpack) == 0: try: # note that py_obj already holds an extra reference to the tensor # so we need to decref it after the conversion - c_dlpack_importer(dlpack, &py_obj) + c_dlpack_to_pyobject(dlpack, &py_obj) tensor = (py_obj) Py_DECREF(tensor) return tensor @@ -358,5 +358,5 @@ cdef inline object make_tensor_from_chandle(TVMFFIObjectHandle chandle, DLPackPy return tensor -cdef inline object make_tensor_from_any(TVMFFIAny any, DLPackPyObjectImporter c_dlpack_importer): - return make_tensor_from_chandle(any.v_ptr, c_dlpack_importer) +cdef inline object make_tensor_from_any(TVMFFIAny any, DLPackToPyObject c_dlpack_to_pyobject): + return make_tensor_from_chandle(any.v_ptr, c_dlpack_to_pyobject) diff --git a/ffi/python/tvm_ffi/cython/tvm_ffi_python_helpers.h b/ffi/python/tvm_ffi/cython/tvm_ffi_python_helpers.h index c7d847b85780..87b426829d1a 100644 --- a/ffi/python/tvm_ffi/cython/tvm_ffi_python_helpers.h +++ b/ffi/python/tvm_ffi/cython/tvm_ffi_python_helpers.h @@ -44,8 +44,7 @@ * \note We use void* to avoid dependency on Python.h so this specific type is * not dependent on Python.h and can be copied to dlpack.h */ -typedef int (*DLPackPyObjectExporter)(void* py_obj, DLManagedTensorVersioned** out, - void** env_stream); +typedef int (*DLPackFromPyObject)(void* py_obj, DLManagedTensorVersioned** out, void** env_stream); /*! * \brief C-style function pointer to speed convert a DLManagedTensorVersioned to a PyObject Tensor. * \param tensor The DLManagedTensorVersioned to convert. @@ -54,7 +53,7 @@ typedef int (*DLPackPyObjectExporter)(void* py_obj, DLManagedTensorVersioned** o * \note We use void* to avoid dependency on Python.h so this specific type is * not dependent on Python.h and can be copied to dlpack.h */ -typedef int (*DLPackPyObjectImporter)(DLManagedTensorVersioned* tensor, void** py_obj_out); +typedef int (*DLPackToPyObject)(DLManagedTensorVersioned* tensor, void** py_obj_out); ///-------------------------------------------------------------------------------- /// We deliberately designed the data structure and function to be C-style @@ -82,7 +81,7 @@ struct TVMFFIPyCallContext { /*! \brief the number of temporary arguments */ int num_temp_py_objects = 0; /*! \brief the DLPack exporter, if any */ - DLPackPyObjectImporter c_dlpack_importer{nullptr}; + DLPackToPyObject c_dlpack_to_pyobject{nullptr}; /*! \brief the DLPack allocator, if any */ DLPackTensorAllocator c_dlpack_tensor_allocator{nullptr}; }; @@ -102,11 +101,11 @@ struct TVMFFIPyArgSetter { /*! * \brief Optional DLPack exporter for for setters that leverages DLPack protocol. */ - DLPackPyObjectExporter c_dlpack_exporter{nullptr}; + DLPackFromPyObject c_dlpack_from_pyobject{nullptr}; /*! * \brief Optional DLPack importer for for setters that leverages DLPack protocol. */ - DLPackPyObjectImporter c_dlpack_importer{nullptr}; + DLPackToPyObject c_dlpack_to_pyobject{nullptr}; /*! * \brief Optional DLPack allocator for for setters that leverages DLPack protocol. */ @@ -273,7 +272,7 @@ class TVMFFIPyCallManager { */ int Call(TVMFFIPyArgSetterFactory setter_factory, void* func_handle, PyObject* py_arg_tuple, TVMFFIAny* result, int* c_api_ret_code, bool release_gil, - DLPackPyObjectImporter* optional_out_dlpack_importer) { + DLPackToPyObject* optional_out_dlpack_importer) { int64_t num_args = PyTuple_Size(py_arg_tuple); if (num_args == -1) return -1; try { @@ -321,8 +320,8 @@ class TVMFFIPyCallManager { c_api_ret_code[0] = TVMFFIEnvSetTensorAllocator(prev_tensor_allocator, 0, nullptr); if (c_api_ret_code[0] != 0) return 0; } - if (optional_out_dlpack_importer != nullptr && ctx.c_dlpack_importer != nullptr) { - *optional_out_dlpack_importer = ctx.c_dlpack_importer; + if (optional_out_dlpack_importer != nullptr && ctx.c_dlpack_to_pyobject != nullptr) { + *optional_out_dlpack_importer = ctx.c_dlpack_to_pyobject; } return 0; } catch (const std::exception& ex) { @@ -430,7 +429,7 @@ class TVMFFIPyCallManager { inline int TVMFFIPyFuncCall(TVMFFIPyArgSetterFactory setter_factory, void* func_handle, PyObject* py_arg_tuple, TVMFFIAny* result, int* c_api_ret_code, bool release_gil = true, - DLPackPyObjectImporter* out_dlpack_importer = nullptr) { + DLPackToPyObject* out_dlpack_importer = nullptr) { return TVMFFIPyCallManager::ThreadLocal()->Call(setter_factory, func_handle, py_arg_tuple, result, c_api_ret_code, release_gil, out_dlpack_importer); }