Skip to content

Commit

Permalink
Fix some cpp warnings
Browse files Browse the repository at this point in the history
  • Loading branch information
barakugav committed Aug 31, 2024
1 parent f5d6926 commit 675399a
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 26 deletions.
16 changes: 6 additions & 10 deletions executorch-sys/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,17 +37,16 @@ fn build_c_extension() {

fn generate_bindings() {
let c_ext_dir = cpp_ext_dir();
let cpp_dir = Path::new(&env!("CARGO_MANIFEST_DIR")).join("cpp");
println!("cargo::rerun-if-changed={}", cpp_dir.to_str().unwrap());

let bindings_h = Path::new(&env!("CARGO_MANIFEST_DIR"))
.join("cpp")
.join("bindings.hpp");
let bindings_h = cpp_dir.join("bindings.hpp");
let bindings_defines_h = c_ext_dir.parent().unwrap().join("executorch_rs_defines.h");
let mut bindings_defines = String::from("#pragma once\n");
for define in cpp_defines() {
bindings_defines.push_str(&format!("#define {}\n", define));
}

println!("cargo::rerun-if-changed={}", bindings_h.to_str().unwrap());
let bindings = bindgen::Builder::default()
.clang_arg(format!(
"-I{}",
Expand Down Expand Up @@ -114,12 +113,9 @@ fn generate_bindings() {
.opaque_type("torch::executor::util::BufferDataLoader")
// feature module
.opaque_type("torch::executor::Module")
.rustified_enum("torch::executor::Error")
.rustified_enum("torch::executor::ScalarType")
.rustified_enum("torch::executor::Tag")
.rustified_enum("torch::executor::Program_Verification")
.rustified_enum("torch::executor::Program_HeaderStatus")
.rustified_enum("torch::executor::TensorShapeDynamism")
.default_enum_style(bindgen::EnumVariation::Rust {
non_exhaustive: false,
})
// feature data-loader
.rustified_enum("torch::executor::util::MmapDataLoader_MlockConfig")
// feature module
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ namespace executor {
namespace {
template <typename DimOrderType>
bool validate_dim_order(const DimOrderType* dim_order, const size_t dims) {
for (int32_t i = 0; i < dims; ++i) {
for (size_t i = 0; i < dims; ++i) {
if (dim_order[i] >= dims) {
return false;
}
Expand All @@ -39,7 +39,7 @@ template <typename DimOrderType>
inline bool is_contiguous_dim_order(
const DimOrderType* dim_order,
const size_t dims) {
for (int i = 0; i < dims; ++i) {
for (size_t i = 0; i < dims; ++i) {
if (dim_order[i] != i) {
return false;
}
Expand Down Expand Up @@ -71,7 +71,7 @@ bool is_channels_last_dim_order(
if (dim_order[0] != 0) {
return false;
}
int d = 1;
size_t d = 1;
while (d < dims - 1) {
if (dim_order[d] != d + 1) {
return false;
Expand Down Expand Up @@ -246,7 +246,7 @@ __ET_NODISCARD inline Error stride_to_dim_order(

sorter.quick_sort(array, 0, dims - 1);

for (auto i = 0; i < dims; i++) {
for (size_t i = 0; i < dims; i++) {
dim_order[i] = array[i].dim_order;
}
return Error::Ok;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -556,7 +556,7 @@ inline bool tensors_have_same_dtype(

inline bool tensor_is_rank(exec_aten::Tensor t, size_t rank) {
ET_LOG_MSG_AND_RETURN_IF_FALSE(
t.dim() == rank,
static_cast<size_t>(t.dim()) == rank,
"Expected tensor.dim() to be %zu, but got %zu",
static_cast<size_t>(rank),
static_cast<size_t>(t.dim()));
Expand All @@ -568,7 +568,7 @@ inline bool tensor_has_rank_greater_or_equal_to(
exec_aten::Tensor t,
size_t rank) {
ET_LOG_MSG_AND_RETURN_IF_FALSE(
t.dim() >= rank,
static_cast<size_t>(t.dim()) >= rank,
"Expected tensor.dim() to be >= %zu, but got %zu",
static_cast<size_t>(rank),
static_cast<size_t>(t.dim()));
Expand All @@ -580,7 +580,7 @@ inline bool tensor_has_rank_smaller_or_equal_to(
exec_aten::Tensor t,
size_t rank) {
ET_LOG_MSG_AND_RETURN_IF_FALSE(
t.dim() <= rank,
static_cast<size_t>(t.dim()) <= rank,
"Expected tensor.dim() to be <= %zu, but got %zu",
static_cast<size_t>(rank),
static_cast<size_t>(t.dim()));
Expand Down Expand Up @@ -636,12 +636,12 @@ inline bool tensors_have_same_size_at_dims(
exec_aten::Tensor b,
size_t dim_b) {
ET_LOG_MSG_AND_RETURN_IF_FALSE(
dim_a < a.dim(),
dim_a < static_cast<size_t>(a.dim()),
"Cannot retrieve dim %zu from tensor with dim %zu",
static_cast<size_t>(dim_a),
static_cast<size_t>(a.dim()));
ET_LOG_MSG_AND_RETURN_IF_FALSE(
dim_b < b.dim(),
dim_b < static_cast<size_t>(b.dim()),
"Cannot retrieve dim %zu from tensor with dim %zu",
static_cast<size_t>(dim_b),
static_cast<size_t>(b.dim()));
Expand Down Expand Up @@ -671,7 +671,7 @@ inline bool tensors_have_same_shape(exec_aten::Tensor a, exec_aten::Tensor b) {
static_cast<size_t>(b.numel()),
static_cast<size_t>(a.dim()),
static_cast<size_t>(b.dim()));
for (size_t d = 0; d < ET_MIN2(a.dim(), b.dim()); ++d) {
for (size_t d = 0; d < static_cast<size_t>(ET_MIN2(a.dim(), b.dim())); ++d) {
ET_LOG(
Error,
" size(%zu): (%zu, %zu)",
Expand Down Expand Up @@ -708,7 +708,7 @@ inline bool tensors_have_same_shape(
static_cast<size_t>(a.dim()),
static_cast<size_t>(b.dim()),
static_cast<size_t>(c.dim()));
for (size_t d = 0; d < ET_MIN3(a.dim(), b.dim(), c.dim()); ++d) {
for (size_t d = 0; d < static_cast<size_t>(ET_MIN3(a.dim(), b.dim(), c.dim())); ++d) {
ET_LOG(
Error,
" size(%zu): (%zu, %zu, %zu)",
Expand Down Expand Up @@ -771,7 +771,7 @@ inline bool tensors_have_same_strides(
ET_TENSOR_CHECK_PREFIX__ ": dim=(%zu, %zu)",
static_cast<size_t>(a.dim()),
static_cast<size_t>(b.dim()));
for (size_t d = 0; d < ET_MIN2(a.dim(), b.dim()); ++d) {
for (size_t d = 0; d < static_cast<size_t>(ET_MIN2(a.dim(), b.dim())); ++d) {
ET_LOG(
Error,
" stride(%zu): (%zu, %zu)",
Expand All @@ -796,7 +796,7 @@ inline bool tensors_have_same_strides(
static_cast<size_t>(a.dim()),
static_cast<size_t>(b.dim()),
static_cast<size_t>(c.dim()));
for (size_t d = 0; d < ET_MIN3(a.dim(), b.dim(), c.dim()); ++d) {
for (size_t d = 0; d < static_cast<size_t>(ET_MIN3(a.dim(), b.dim(), c.dim())); ++d) {
ET_LOG(
Error,
" stride(%zu): (%zu, %zu, %zu)",
Expand Down Expand Up @@ -869,7 +869,7 @@ inline size_t getLeadingDims(const Tensor& tensor, int64_t dim) {
dim,
ssize_t(tensor.dim()));
size_t dims = 1;
for (size_t i = 0; i < dim; ++i) {
for (size_t i = 0; i < static_cast<size_t>(dim); ++i) {
dims *= static_cast<size_t>(tensor.size(i));
}
return dims;
Expand All @@ -884,7 +884,7 @@ inline size_t getTrailingDims(const Tensor& tensor, int64_t dim) {
dim,
ssize_t(tensor.dim()));
size_t dims = 1;
for (size_t i = dim + 1; i < tensor.dim(); ++i) {
for (size_t i = dim + 1; i < static_cast<size_t>(tensor.dim()); ++i) {
dims *= static_cast<size_t>(tensor.size(i));
}
return dims;
Expand Down Expand Up @@ -923,7 +923,7 @@ inline size_t coordinateToIndex(
*/
inline void
indexToCoordinate(const Tensor& tensor, size_t index, size_t* coordinate) {
ET_CHECK(index < tensor.numel());
ET_CHECK(index < static_cast<size_t>(tensor.numel()));
for (auto i = 0; i < tensor.dim(); ++i) {
auto dim = tensor.dim() - 1 - i;
size_t dim_size = tensor.size(dim);
Expand Down

0 comments on commit 675399a

Please sign in to comment.