diff --git a/executorch-sys/build.rs b/executorch-sys/build.rs index f187978..8b9a65e 100644 --- a/executorch-sys/build.rs +++ b/executorch-sys/build.rs @@ -37,17 +37,16 @@ fn build_c_extension() { fn generate_bindings() { let c_ext_dir = cpp_ext_dir(); + let cpp_dir = Path::new(&env!("CARGO_MANIFEST_DIR")).join("cpp"); + println!("cargo::rerun-if-changed={}", cpp_dir.to_str().unwrap()); - let bindings_h = Path::new(&env!("CARGO_MANIFEST_DIR")) - .join("cpp") - .join("bindings.hpp"); + let bindings_h = cpp_dir.join("bindings.hpp"); let bindings_defines_h = c_ext_dir.parent().unwrap().join("executorch_rs_defines.h"); let mut bindings_defines = String::from("#pragma once\n"); for define in cpp_defines() { bindings_defines.push_str(&format!("#define {}\n", define)); } - println!("cargo::rerun-if-changed={}", bindings_h.to_str().unwrap()); let bindings = bindgen::Builder::default() .clang_arg(format!( "-I{}", @@ -114,12 +113,9 @@ fn generate_bindings() { .opaque_type("torch::executor::util::BufferDataLoader") // feature module .opaque_type("torch::executor::Module") - .rustified_enum("torch::executor::Error") - .rustified_enum("torch::executor::ScalarType") - .rustified_enum("torch::executor::Tag") - .rustified_enum("torch::executor::Program_Verification") - .rustified_enum("torch::executor::Program_HeaderStatus") - .rustified_enum("torch::executor::TensorShapeDynamism") + .default_enum_style(bindgen::EnumVariation::Rust { + non_exhaustive: false, + }) // feature data-loader .rustified_enum("torch::executor::util::MmapDataLoader_MlockConfig") // feature module diff --git a/executorch-sys/cpp/executorch/runtime/core/exec_aten/util/dim_order_util.h b/executorch-sys/cpp/executorch/runtime/core/exec_aten/util/dim_order_util.h index 33aa4f8..3bfc9b5 100644 --- a/executorch-sys/cpp/executorch/runtime/core/exec_aten/util/dim_order_util.h +++ b/executorch-sys/cpp/executorch/runtime/core/exec_aten/util/dim_order_util.h @@ -19,7 +19,7 @@ namespace executor { namespace { template bool validate_dim_order(const DimOrderType* dim_order, const size_t dims) { - for (int32_t i = 0; i < dims; ++i) { + for (size_t i = 0; i < dims; ++i) { if (dim_order[i] >= dims) { return false; } @@ -39,7 +39,7 @@ template inline bool is_contiguous_dim_order( const DimOrderType* dim_order, const size_t dims) { - for (int i = 0; i < dims; ++i) { + for (size_t i = 0; i < dims; ++i) { if (dim_order[i] != i) { return false; } @@ -71,7 +71,7 @@ bool is_channels_last_dim_order( if (dim_order[0] != 0) { return false; } - int d = 1; + size_t d = 1; while (d < dims - 1) { if (dim_order[d] != d + 1) { return false; @@ -246,7 +246,7 @@ __ET_NODISCARD inline Error stride_to_dim_order( sorter.quick_sort(array, 0, dims - 1); - for (auto i = 0; i < dims; i++) { + for (size_t i = 0; i < dims; i++) { dim_order[i] = array[i].dim_order; } return Error::Ok; diff --git a/executorch-sys/cpp/executorch/runtime/core/exec_aten/util/tensor_util.h b/executorch-sys/cpp/executorch/runtime/core/exec_aten/util/tensor_util.h index 196e7d9..aebbcb4 100644 --- a/executorch-sys/cpp/executorch/runtime/core/exec_aten/util/tensor_util.h +++ b/executorch-sys/cpp/executorch/runtime/core/exec_aten/util/tensor_util.h @@ -556,7 +556,7 @@ inline bool tensors_have_same_dtype( inline bool tensor_is_rank(exec_aten::Tensor t, size_t rank) { ET_LOG_MSG_AND_RETURN_IF_FALSE( - t.dim() == rank, + static_cast(t.dim()) == rank, "Expected tensor.dim() to be %zu, but got %zu", static_cast(rank), static_cast(t.dim())); @@ -568,7 +568,7 @@ inline bool tensor_has_rank_greater_or_equal_to( exec_aten::Tensor t, size_t rank) { ET_LOG_MSG_AND_RETURN_IF_FALSE( - t.dim() >= rank, + static_cast(t.dim()) >= rank, "Expected tensor.dim() to be >= %zu, but got %zu", static_cast(rank), static_cast(t.dim())); @@ -580,7 +580,7 @@ inline bool tensor_has_rank_smaller_or_equal_to( exec_aten::Tensor t, size_t rank) { ET_LOG_MSG_AND_RETURN_IF_FALSE( - t.dim() <= rank, + static_cast(t.dim()) <= rank, "Expected tensor.dim() to be <= %zu, but got %zu", static_cast(rank), static_cast(t.dim())); @@ -636,12 +636,12 @@ inline bool tensors_have_same_size_at_dims( exec_aten::Tensor b, size_t dim_b) { ET_LOG_MSG_AND_RETURN_IF_FALSE( - dim_a < a.dim(), + dim_a < static_cast(a.dim()), "Cannot retrieve dim %zu from tensor with dim %zu", static_cast(dim_a), static_cast(a.dim())); ET_LOG_MSG_AND_RETURN_IF_FALSE( - dim_b < b.dim(), + dim_b < static_cast(b.dim()), "Cannot retrieve dim %zu from tensor with dim %zu", static_cast(dim_b), static_cast(b.dim())); @@ -671,7 +671,7 @@ inline bool tensors_have_same_shape(exec_aten::Tensor a, exec_aten::Tensor b) { static_cast(b.numel()), static_cast(a.dim()), static_cast(b.dim())); - for (size_t d = 0; d < ET_MIN2(a.dim(), b.dim()); ++d) { + for (size_t d = 0; d < static_cast(ET_MIN2(a.dim(), b.dim())); ++d) { ET_LOG( Error, " size(%zu): (%zu, %zu)", @@ -708,7 +708,7 @@ inline bool tensors_have_same_shape( static_cast(a.dim()), static_cast(b.dim()), static_cast(c.dim())); - for (size_t d = 0; d < ET_MIN3(a.dim(), b.dim(), c.dim()); ++d) { + for (size_t d = 0; d < static_cast(ET_MIN3(a.dim(), b.dim(), c.dim())); ++d) { ET_LOG( Error, " size(%zu): (%zu, %zu, %zu)", @@ -771,7 +771,7 @@ inline bool tensors_have_same_strides( ET_TENSOR_CHECK_PREFIX__ ": dim=(%zu, %zu)", static_cast(a.dim()), static_cast(b.dim())); - for (size_t d = 0; d < ET_MIN2(a.dim(), b.dim()); ++d) { + for (size_t d = 0; d < static_cast(ET_MIN2(a.dim(), b.dim())); ++d) { ET_LOG( Error, " stride(%zu): (%zu, %zu)", @@ -796,7 +796,7 @@ inline bool tensors_have_same_strides( static_cast(a.dim()), static_cast(b.dim()), static_cast(c.dim())); - for (size_t d = 0; d < ET_MIN3(a.dim(), b.dim(), c.dim()); ++d) { + for (size_t d = 0; d < static_cast(ET_MIN3(a.dim(), b.dim(), c.dim())); ++d) { ET_LOG( Error, " stride(%zu): (%zu, %zu, %zu)", @@ -869,7 +869,7 @@ inline size_t getLeadingDims(const Tensor& tensor, int64_t dim) { dim, ssize_t(tensor.dim())); size_t dims = 1; - for (size_t i = 0; i < dim; ++i) { + for (size_t i = 0; i < static_cast(dim); ++i) { dims *= static_cast(tensor.size(i)); } return dims; @@ -884,7 +884,7 @@ inline size_t getTrailingDims(const Tensor& tensor, int64_t dim) { dim, ssize_t(tensor.dim())); size_t dims = 1; - for (size_t i = dim + 1; i < tensor.dim(); ++i) { + for (size_t i = dim + 1; i < static_cast(tensor.dim()); ++i) { dims *= static_cast(tensor.size(i)); } return dims; @@ -923,7 +923,7 @@ inline size_t coordinateToIndex( */ inline void indexToCoordinate(const Tensor& tensor, size_t index, size_t* coordinate) { - ET_CHECK(index < tensor.numel()); + ET_CHECK(index < static_cast(tensor.numel())); for (auto i = 0; i < tensor.dim(); ++i) { auto dim = tensor.dim() - 1 - i; size_t dim_size = tensor.size(dim);