diff --git a/diesel/src/lib.rs b/diesel/src/lib.rs index 6e0dfd2edd73..3283e62ac2ae 100644 --- a/diesel/src/lib.rs +++ b/diesel/src/lib.rs @@ -244,7 +244,10 @@ clippy::enum_glob_use, clippy::if_not_else, clippy::items_after_statements, - clippy::used_underscore_binding + clippy::used_underscore_binding, + clippy::cast_possible_wrap, + clippy::cast_possible_truncation, + clippy::cast_sign_loss )] #![deny(unsafe_code)] #![cfg_attr(test, allow(clippy::map_unwrap_or, clippy::unwrap_used))] diff --git a/diesel/src/mysql/connection/bind.rs b/diesel/src/mysql/connection/bind.rs index 3aed940799bd..0bbceede6045 100644 --- a/diesel/src/mysql/connection/bind.rs +++ b/diesel/src/mysql/connection/bind.rs @@ -179,7 +179,10 @@ impl Clone for BindData { // written. At the time of writing this comment, the `BindData::bind_for_truncated_data` // function is only called by `Binds::populate_dynamic_buffers` which ensures the corresponding // invariant. - std::slice::from_raw_parts(ptr.as_ptr(), self.length as usize) + std::slice::from_raw_parts( + ptr.as_ptr(), + self.length.try_into().expect("usize is at least 32bit"), + ) }; let mut vec = slice.to_owned(); let ptr = NonNull::new(vec.as_mut_ptr()); @@ -416,7 +419,10 @@ impl BindData { // written. At the time of writing this comment, the `BindData::bind_for_truncated_data` // function is only called by `Binds::populate_dynamic_buffers` which ensures the corresponding // invariant. - std::slice::from_raw_parts(data.as_ptr(), self.length as usize) + std::slice::from_raw_parts( + data.as_ptr(), + self.length.try_into().expect("Usize is at least 32 bit"), + ) }; Some(MysqlValue::new_internal(slice, tpe)) } @@ -429,7 +435,10 @@ impl BindData { fn update_buffer_length(&mut self) { use std::cmp::min; - let actual_bytes_in_buffer = min(self.capacity, self.length as usize); + let actual_bytes_in_buffer = min( + self.capacity, + self.length.try_into().expect("Usize is at least 32 bit"), + ); self.length = actual_bytes_in_buffer as libc::c_ulong; } @@ -475,7 +484,8 @@ impl BindData { self.bytes = None; let offset = self.capacity; - let truncated_amount = self.length as usize - offset; + let truncated_amount = + usize::try_from(self.length).expect("Usize is at least 32 bit") - offset; debug_assert!( truncated_amount > 0, @@ -505,7 +515,7 @@ impl BindData { // offset is zero here as we don't have a buffer yet // we know the requested length here so we can just request // the correct size - let mut vec = vec![0_u8; self.length as usize]; + let mut vec = vec![0_u8; self.length.try_into().expect("usize is at least 32 bit")]; self.capacity = vec.capacity(); self.bytes = NonNull::new(vec.as_mut_ptr()); mem::forget(vec); diff --git a/diesel/src/mysql/connection/mod.rs b/diesel/src/mysql/connection/mod.rs index 992d92c4f915..1dacabc69f27 100644 --- a/diesel/src/mysql/connection/mod.rs +++ b/diesel/src/mysql/connection/mod.rs @@ -187,7 +187,7 @@ impl Connection for MysqlConnection { // we have not called result yet, so calling `execute` is // fine let stmt_use = unsafe { stmt.execute() }?; - Ok(stmt_use.affected_rows()) + stmt_use.affected_rows() }), &mut self.transaction_state, &mut self.instrumentation, diff --git a/diesel/src/mysql/connection/stmt/mod.rs b/diesel/src/mysql/connection/stmt/mod.rs index 8bb527b34645..986a344ed5af 100644 --- a/diesel/src/mysql/connection/stmt/mod.rs +++ b/diesel/src/mysql/connection/stmt/mod.rs @@ -153,9 +153,11 @@ pub(super) struct StatementUse<'a> { } impl<'a> StatementUse<'a> { - pub(in crate::mysql::connection) fn affected_rows(&self) -> usize { + pub(in crate::mysql::connection) fn affected_rows(&self) -> QueryResult { let affected_rows = unsafe { ffi::mysql_stmt_affected_rows(self.inner.stmt.as_ptr()) }; - affected_rows as usize + affected_rows + .try_into() + .map_err(|e| Error::DeserializationError(Box::new(e))) } /// This function should be called after `execute` only @@ -167,14 +169,19 @@ impl<'a> StatementUse<'a> { pub(super) fn populate_row_buffers(&self, binds: &mut OutputBinds) -> QueryResult> { let next_row_result = unsafe { ffi::mysql_stmt_fetch(self.inner.stmt.as_ptr()) }; - match next_row_result as libc::c_uint { - ffi::MYSQL_NO_DATA => Ok(None), - ffi::MYSQL_DATA_TRUNCATED => binds.populate_dynamic_buffers(self).map(Some), - 0 => { - binds.update_buffer_lengths(); - Ok(Some(())) + if next_row_result < 0 { + self.inner.did_an_error_occur().map(Some) + } else { + #[allow(clippy::cast_sign_loss)] // that's how it's supposed to be based on the API + match next_row_result as libc::c_uint { + ffi::MYSQL_NO_DATA => Ok(None), + ffi::MYSQL_DATA_TRUNCATED => binds.populate_dynamic_buffers(self).map(Some), + 0 => { + binds.update_buffer_lengths(); + Ok(Some(())) + } + _error => self.inner.did_an_error_occur().map(Some), } - _error => self.inner.did_an_error_occur().map(Some), } } @@ -187,7 +194,8 @@ impl<'a> StatementUse<'a> { ffi::mysql_stmt_fetch_column( self.inner.stmt.as_ptr(), bind, - idx as libc::c_uint, + idx.try_into() + .map_err(|e| Error::DeserializationError(Box::new(e)))?, offset as libc::c_ulong, ); self.inner.did_an_error_occur() diff --git a/diesel/src/mysql/types/date_and_time/chrono.rs b/diesel/src/mysql/types/date_and_time/chrono.rs index 786dcd39b9f3..80f188f79ff6 100644 --- a/diesel/src/mysql/types/date_and_time/chrono.rs +++ b/diesel/src/mysql/types/date_and_time/chrono.rs @@ -26,7 +26,7 @@ impl FromSql for NaiveDateTime { impl ToSql for NaiveDateTime { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Mysql>) -> serialize::Result { let mysql_time = MysqlTime { - year: self.year() as libc::c_uint, + year: self.year().try_into()?, month: self.month() as libc::c_uint, day: self.day() as libc::c_uint, hour: self.hour() as libc::c_uint, @@ -48,16 +48,16 @@ impl FromSql for NaiveDateTime { fn from_sql(bytes: MysqlValue<'_>) -> deserialize::Result { let mysql_time = >::from_sql(bytes)?; - NaiveDate::from_ymd_opt(mysql_time.year as i32, mysql_time.month, mysql_time.day) - .and_then(|v| { - v.and_hms_micro_opt( - mysql_time.hour, - mysql_time.minute, - mysql_time.second, - mysql_time.second_part as u32, - ) - }) - .ok_or_else(|| format!("Cannot parse this date: {mysql_time:?}").into()) + let micro = mysql_time.second_part.try_into()?; + NaiveDate::from_ymd_opt( + mysql_time.year.try_into()?, + mysql_time.month, + mysql_time.day, + ) + .and_then(|v| { + v.and_hms_micro_opt(mysql_time.hour, mysql_time.minute, mysql_time.second, micro) + }) + .ok_or_else(|| format!("Cannot parse this date: {mysql_time:?}").into()) } } @@ -94,7 +94,7 @@ impl FromSql for NaiveTime { impl ToSql for NaiveDate { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Mysql>) -> serialize::Result { let mysql_time = MysqlTime { - year: self.year() as libc::c_uint, + year: self.year().try_into()?, month: self.month() as libc::c_uint, day: self.day() as libc::c_uint, hour: 0, @@ -114,8 +114,12 @@ impl ToSql for NaiveDate { impl FromSql for NaiveDate { fn from_sql(bytes: MysqlValue<'_>) -> deserialize::Result { let mysql_time = >::from_sql(bytes)?; - NaiveDate::from_ymd_opt(mysql_time.year as i32, mysql_time.month, mysql_time.day) - .ok_or_else(|| format!("Unable to convert {mysql_time:?} to chrono").into()) + NaiveDate::from_ymd_opt( + mysql_time.year.try_into()?, + mysql_time.month, + mysql_time.day, + ) + .ok_or_else(|| format!("Unable to convert {mysql_time:?} to chrono").into()) } } diff --git a/diesel/src/mysql/types/date_and_time/time.rs b/diesel/src/mysql/types/date_and_time/time.rs index 0877e8734b8c..e4bec4c9a5f7 100644 --- a/diesel/src/mysql/types/date_and_time/time.rs +++ b/diesel/src/mysql/types/date_and_time/time.rs @@ -15,7 +15,7 @@ fn to_time(dt: MysqlTime) -> Result> { ("year", dt.year), ("month", dt.month), ("day", dt.day), - ("offset", dt.time_zone_displacement as u32), + ("offset", dt.time_zone_displacement.try_into()?), ] { if field != 0 { return Err(format!("Unable to convert {dt:?} to time: {name} must be 0").into()); @@ -63,7 +63,7 @@ fn to_primitive_datetime(dt: OffsetDateTime) -> PrimitiveDateTime { impl ToSql for PrimitiveDateTime { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Mysql>) -> serialize::Result { let mysql_time = MysqlTime { - year: self.year() as libc::c_uint, + year: self.year().try_into()?, month: self.month() as libc::c_uint, day: self.day() as libc::c_uint, hour: self.hour() as libc::c_uint, @@ -171,7 +171,7 @@ impl FromSql for NaiveTime { impl ToSql for NaiveDate { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Mysql>) -> serialize::Result { let mysql_time = MysqlTime { - year: self.year() as libc::c_uint, + year: self.year().try_into()?, month: self.month() as libc::c_uint, day: self.day() as libc::c_uint, hour: 0, diff --git a/diesel/src/mysql/types/mod.rs b/diesel/src/mysql/types/mod.rs index d52d32afe228..afd475f780cb 100644 --- a/diesel/src/mysql/types/mod.rs +++ b/diesel/src/mysql/types/mod.rs @@ -25,7 +25,7 @@ impl ToSql for i8 { impl FromSql for i8 { fn from_sql(value: MysqlValue<'_>) -> deserialize::Result { let bytes = value.as_bytes(); - Ok(bytes[0] as i8) + Ok(i8::from_be_bytes([bytes[0]])) } } @@ -69,12 +69,14 @@ where #[cfg(feature = "mysql_backend")] impl ToSql, Mysql> for u8 { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Mysql>) -> serialize::Result { - ToSql::::to_sql(&(*self as i8), &mut out.reborrow()) + out.write_u8(*self)?; + Ok(IsNull::No) } } #[cfg(feature = "mysql_backend")] impl FromSql, Mysql> for u8 { + #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)] // that's what we want fn from_sql(bytes: MysqlValue<'_>) -> deserialize::Result { let signed: i8 = FromSql::::from_sql(bytes)?; Ok(signed as u8) @@ -84,12 +86,18 @@ impl FromSql, Mysql> for u8 { #[cfg(feature = "mysql_backend")] impl ToSql, Mysql> for u16 { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Mysql>) -> serialize::Result { - ToSql::::to_sql(&(*self as i16), &mut out.reborrow()) + out.write_u16::(*self)?; + Ok(IsNull::No) } } #[cfg(feature = "mysql_backend")] impl FromSql, Mysql> for u16 { + #[allow( + clippy::cast_possible_wrap, + clippy::cast_sign_loss, + clippy::cast_possible_truncation + )] // that's what we want fn from_sql(bytes: MysqlValue<'_>) -> deserialize::Result { let signed: i32 = FromSql::::from_sql(bytes)?; Ok(signed as u16) @@ -99,12 +107,18 @@ impl FromSql, Mysql> for u16 { #[cfg(feature = "mysql_backend")] impl ToSql, Mysql> for u32 { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Mysql>) -> serialize::Result { - ToSql::::to_sql(&(*self as i32), &mut out.reborrow()) + out.write_u32::(*self)?; + Ok(IsNull::No) } } #[cfg(feature = "mysql_backend")] impl FromSql, Mysql> for u32 { + #[allow( + clippy::cast_possible_wrap, + clippy::cast_sign_loss, + clippy::cast_possible_truncation + )] // that's what we want fn from_sql(bytes: MysqlValue<'_>) -> deserialize::Result { let signed: i64 = FromSql::::from_sql(bytes)?; Ok(signed as u32) @@ -114,12 +128,18 @@ impl FromSql, Mysql> for u32 { #[cfg(feature = "mysql_backend")] impl ToSql, Mysql> for u64 { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Mysql>) -> serialize::Result { - ToSql::::to_sql(&(*self as i64), &mut out.reborrow()) + out.write_u64::(*self)?; + Ok(IsNull::No) } } #[cfg(feature = "mysql_backend")] impl FromSql, Mysql> for u64 { + #[allow( + clippy::cast_possible_wrap, + clippy::cast_sign_loss, + clippy::cast_possible_truncation + )] // that's what we want fn from_sql(bytes: MysqlValue<'_>) -> deserialize::Result { let signed: i64 = FromSql::::from_sql(bytes)?; Ok(signed as u64) diff --git a/diesel/src/mysql/types/primitives.rs b/diesel/src/mysql/types/primitives.rs index 7a680018775f..2de855a4dbd5 100644 --- a/diesel/src/mysql/types/primitives.rs +++ b/diesel/src/mysql/types/primitives.rs @@ -22,6 +22,7 @@ where } } +#[allow(clippy::cast_possible_truncation)] // that's what we want here fn f32_to_i64(f: f32) -> deserialize::Result { if f <= i64::MAX as f32 && f >= i64::MIN as f32 { Ok(f.trunc() as i64) @@ -32,6 +33,7 @@ fn f32_to_i64(f: f32) -> deserialize::Result { } } +#[allow(clippy::cast_possible_truncation)] // that's what we want here fn f64_to_i64(f: f64) -> deserialize::Result { if f <= i64::MAX as f64 && f >= i64::MIN as f64 { Ok(f.trunc() as i64) @@ -128,6 +130,8 @@ impl FromSql for f32 { NumericRepresentation::Medium(x) => Ok(x as Self), NumericRepresentation::Big(x) => Ok(x as Self), NumericRepresentation::Float(x) => Ok(x), + // there is currently no way to do this in a better way + #[allow(clippy::cast_possible_truncation)] NumericRepresentation::Double(x) => Ok(x as Self), NumericRepresentation::Decimal(bytes) => Ok(str::from_utf8(bytes)?.parse()?), } diff --git a/diesel/src/mysql/value.rs b/diesel/src/mysql/value.rs index 98550bf8bae7..e72c6a926b74 100644 --- a/diesel/src/mysql/value.rs +++ b/diesel/src/mysql/value.rs @@ -60,7 +60,7 @@ impl<'a> MysqlValue<'a> { pub(crate) fn numeric_value(&self) -> deserialize::Result> { Ok(match self.tpe { MysqlType::UnsignedTiny | MysqlType::Tiny => { - NumericRepresentation::Tiny(self.raw[0] as i8) + NumericRepresentation::Tiny(self.raw[0].try_into()?) } MysqlType::UnsignedShort | MysqlType::Short => { NumericRepresentation::Small(i16::from_ne_bytes((&self.raw[..2]).try_into()?)) diff --git a/diesel/src/pg/connection/copy.rs b/diesel/src/pg/connection/copy.rs index 8c0002fca5a2..d8d14614cf26 100644 --- a/diesel/src/pg/connection/copy.rs +++ b/diesel/src/pg/connection/copy.rs @@ -102,7 +102,10 @@ impl<'conn> BufRead for CopyToBuffer<'conn> { let len = pq_sys::PQgetCopyData(self.conn.internal_connection.as_ptr(), &mut self.ptr, 0); match len { - len if len >= 0 => self.len = len as usize + 1, + len if len >= 0 => { + self.len = 1 + usize::try_from(len) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))? + } -1 => self.len = 0, _ => { let error = self.conn.last_error_message(); diff --git a/diesel/src/pg/connection/raw.rs b/diesel/src/pg/connection/raw.rs index 8ae3e499f226..b62791962204 100644 --- a/diesel/src/pg/connection/raw.rs +++ b/diesel/src/pg/connection/raw.rs @@ -147,7 +147,9 @@ impl RawConnection { pq_sys::PQputCopyData( self.internal_connection.as_ptr(), c.as_ptr() as *const libc::c_char, - c.len() as libc::c_int, + c.len() + .try_into() + .map_err(|e| Error::SerializationError(Box::new(e)))?, ) }; if res != 1 { diff --git a/diesel/src/pg/connection/result.rs b/diesel/src/pg/connection/result.rs index b4f501669f3d..a5542d0a51ce 100644 --- a/diesel/src/pg/connection/result.rs +++ b/diesel/src/pg/connection/result.rs @@ -16,8 +16,8 @@ use std::cell::OnceCell; #[allow(missing_debug_implementations)] pub struct PgResult { internal_result: RawResult, - column_count: usize, - row_count: usize, + column_count: libc::c_int, + row_count: libc::c_int, // We store field names as pointer // as we cannot put a correct lifetime here // The value is valid as long as we haven't freed `RawResult` @@ -34,8 +34,8 @@ impl PgResult { | ExecStatusType::PGRES_COPY_IN | ExecStatusType::PGRES_COPY_OUT | ExecStatusType::PGRES_TUPLES_OK => { - let column_count = unsafe { PQnfields(internal_result.as_ptr()) as usize }; - let row_count = unsafe { PQntuples(internal_result.as_ptr()) as usize }; + let column_count = unsafe { PQnfields(internal_result.as_ptr()) }; + let row_count = unsafe { PQntuples(internal_result.as_ptr()) }; Ok(PgResult { internal_result, column_count, @@ -108,7 +108,10 @@ impl PgResult { } pub(super) fn num_rows(&self) -> usize { - self.row_count + self.row_count.try_into().expect( + "Diesel expects to run on a >= 32 bit OS \ + (or libpq is giving out negative row count)", + ) } pub(super) fn get_row(self: Rc, idx: usize) -> PgRow { @@ -119,29 +122,38 @@ impl PgResult { if self.is_null(row_idx, col_idx) { None } else { - let row_idx = row_idx as libc::c_int; - let col_idx = col_idx as libc::c_int; + let row_idx = row_idx.try_into().ok()?; + let col_idx = col_idx.try_into().ok()?; unsafe { let value_ptr = PQgetvalue(self.internal_result.as_ptr(), row_idx, col_idx) as *const u8; let num_bytes = PQgetlength(self.internal_result.as_ptr(), row_idx, col_idx); - Some(slice::from_raw_parts(value_ptr, num_bytes as usize)) + Some(slice::from_raw_parts( + value_ptr, + num_bytes + .try_into() + .expect("Diesel expects at least a 32 bit operating system"), + )) } } } pub(super) fn is_null(&self, row_idx: usize, col_idx: usize) -> bool { - unsafe { - 0 != PQgetisnull( - self.internal_result.as_ptr(), - row_idx as libc::c_int, - col_idx as libc::c_int, - ) - } + let row_idx = row_idx + .try_into() + .expect("Row indices are expected to fit into 32 bit"); + let col_idx = col_idx + .try_into() + .expect("Column indices are expected to fit into 32 bit"); + + unsafe { 0 != PQgetisnull(self.internal_result.as_ptr(), row_idx, col_idx) } } pub(in crate::pg) fn column_type(&self, col_idx: usize) -> NonZeroU32 { - let type_oid = unsafe { PQftype(self.internal_result.as_ptr(), col_idx as libc::c_int) }; + let col_idx: i32 = col_idx + .try_into() + .expect("Column indices are expected to fit into 32 bit"); + let type_oid = unsafe { PQftype(self.internal_result.as_ptr(), col_idx) }; NonZeroU32::new(type_oid).expect( "Got a zero oid from postgres. If you see this error message \ please report it as issue on the diesel github bug tracker.", @@ -180,7 +192,10 @@ impl PgResult { } pub(super) fn column_count(&self) -> usize { - self.column_count + self.column_count.try_into().expect( + "Diesel expects to run on a >= 32 bit OS \ + (or libpq is giving out negative column count)", + ) } } diff --git a/diesel/src/pg/connection/stmt/mod.rs b/diesel/src/pg/connection/stmt/mod.rs index 5e2a5c36ec89..624574a7b973 100644 --- a/diesel/src/pg/connection/stmt/mod.rs +++ b/diesel/src/pg/connection/stmt/mod.rs @@ -33,12 +33,17 @@ impl Statement { .collect::>(); let param_lengths = param_data .iter() - .map(|data| data.as_ref().map(|d| d.len() as libc::c_int).unwrap_or(0)) - .collect::>(); + .map(|data| data.as_ref().map(|d| d.len().try_into()).unwrap_or(Ok(0))) + .collect::, _>>() + .map_err(|e| crate::result::Error::SerializationError(Box::new(e)))?; + let param_count: libc::c_int = params_pointer + .len() + .try_into() + .map_err(|e| crate::result::Error::SerializationError(Box::new(e)))?; unsafe { raw_connection.send_query_prepared( self.name.as_ptr(), - params_pointer.len() as libc::c_int, + param_count, params_pointer.as_ptr(), param_lengths.as_ptr(), self.param_formats.as_ptr(), @@ -66,10 +71,14 @@ impl Statement { .map_err(|e| crate::result::Error::SerializationError(Box::new(e)))?; let internal_result = unsafe { + let param_count: libc::c_int = param_types + .len() + .try_into() + .map_err(|e| crate::result::Error::SerializationError(Box::new(e)))?; raw_connection.prepare( name.as_ptr(), sql.as_ptr(), - param_types.len() as libc::c_int, + param_count, param_types_to_ptr(Some(¶m_types_vec)), ) }; diff --git a/diesel/src/pg/expression/extensions/interval_dsl.rs b/diesel/src/pg/expression/extensions/interval_dsl.rs index 6cf5df0d24d1..5a5acc6406ba 100644 --- a/diesel/src/pg/expression/extensions/interval_dsl.rs +++ b/diesel/src/pg/expression/extensions/interval_dsl.rs @@ -213,14 +213,19 @@ impl IntervalDsl for i64 { } fn days(self) -> PgInterval { - (self as i32).days() + i32::try_from(self) + .expect("Maximal supported day interval size is 32 bit") + .days() } fn months(self) -> PgInterval { - (self as i32).months() + i32::try_from(self) + .expect("Maximal supported month interval size is 32 bit") + .months() } } +#[allow(clippy::cast_possible_truncation)] // we want to truncate impl IntervalDsl for f64 { fn microseconds(self) -> PgInterval { (self.round() as i64).microseconds() diff --git a/diesel/src/pg/query_builder/copy/copy_from.rs b/diesel/src/pg/query_builder/copy/copy_from.rs index a5dfe7c08e15..baa3e19ac03c 100644 --- a/diesel/src/pg/query_builder/copy/copy_from.rs +++ b/diesel/src/pg/query_builder/copy/copy_from.rs @@ -203,6 +203,10 @@ macro_rules! impl_copy_from_insertable_helper_for_values_clause { $($TT: ToSql<$T, Pg>,)* { type Target = ($($ST,)*); + + // statically known to always fit + // as we don't support more than 128 columns + #[allow(clippy::cast_possible_truncation)] const COLUMN_COUNT: i16 = $Tuple as i16; fn write_to_buffer(&self, idx: i16, out: &mut Vec) -> QueryResult { @@ -234,6 +238,10 @@ macro_rules! impl_copy_from_insertable_helper_for_values_clause { $($TT: ToSql<$T, Pg>,)* { type Target = ($($ST,)*); + + // statically known to always fit + // as we don't support more than 128 columns + #[allow(clippy::cast_possible_truncation)] const COLUMN_COUNT: i16 = $Tuple as i16; fn write_to_buffer(&self, idx: i16, out: &mut Vec) -> QueryResult { @@ -309,7 +317,9 @@ where if is_null == IsNull::No { // fill in the length afterwards let len_after = buffer.len(); - let diff = (len_after - len_before) as i32; + let diff = (len_after - len_before) + .try_into() + .map_err(|e| crate::result::Error::SerializationError(Box::new(e)))?; let bytes = i32::to_be_bytes(diff); for (b, t) in bytes.into_iter().zip(&mut buffer[len_before - 4..]) { *t = b; diff --git a/diesel/src/pg/query_builder/copy/copy_to.rs b/diesel/src/pg/query_builder/copy/copy_to.rs index 49778514f607..2ffd3f664dc6 100644 --- a/diesel/src/pg/query_builder/copy/copy_to.rs +++ b/diesel/src/pg/query_builder/copy/copy_to.rs @@ -273,10 +273,7 @@ where C::get_buffer(out) } - fn execute<'conn, T>( - &'conn mut self, - command: CopyToCommand, - ) -> QueryResult> + fn execute(&mut self, command: CopyToCommand) -> QueryResult> where T: CopyTarget, { @@ -335,12 +332,13 @@ where format!("Unexpected flag value: {flags_backward_incompatible:x}").into(), )); } - let header_size = i32::from_be_bytes( + let header_size = usize::try_from(i32::from_be_bytes( (&buffer[super::COPY_MAGIC_HEADER.len() + 4..super::COPY_MAGIC_HEADER.len() + 8]) .try_into() .expect("Exactly 4 byte"), - ); - out.consume(super::COPY_MAGIC_HEADER.len() + 8 + header_size as usize); + )) + .map_err(|e| crate::result::Error::DeserializationError(Box::new(e)))?; + out.consume(super::COPY_MAGIC_HEADER.len() + 8 + header_size); let mut len = None; Ok(std::iter::from_fn(move || { if let Some(len) = len { @@ -354,7 +352,13 @@ where let tuple_count = i16::from_be_bytes((&buffer[..2]).try_into().expect("Exactly 2 bytes")); if tuple_count > 0 { - let mut buffers = Vec::with_capacity(tuple_count as usize); + let tuple_count = match usize::try_from(tuple_count) { + Ok(o) => o, + Err(e) => { + return Some(Err(crate::result::Error::DeserializationError(Box::new(e)))) + } + }; + let mut buffers = Vec::with_capacity(tuple_count); let mut offset = 2; for _t in 0..tuple_count { let data_size = i32::from_be_bytes( @@ -362,11 +366,21 @@ where .try_into() .expect("Exactly 4 bytes"), ); + if data_size < 0 { buffers.push(None); } else { - buffers.push(Some(&buffer[offset + 4..offset + 4 + data_size as usize])); - offset = offset + 4 + data_size as usize; + match usize::try_from(data_size) { + Ok(data_size) => { + buffers.push(Some(&buffer[offset + 4..offset + 4 + data_size])); + offset = offset + 4 + data_size; + } + Err(e) => { + return Some(Err(crate::result::Error::DeserializationError( + Box::new(e), + ))); + } + } } } diff --git a/diesel/src/pg/types/array.rs b/diesel/src/pg/types/array.rs index c3141cb5ca7d..354ce1393a3d 100644 --- a/diesel/src/pg/types/array.rs +++ b/diesel/src/pg/types/array.rs @@ -49,7 +49,7 @@ where if has_null && elem_size == -1 { T::from_nullable_sql(None) } else { - let (elem_bytes, new_bytes) = bytes.split_at(elem_size as usize); + let (elem_bytes, new_bytes) = bytes.split_at(elem_size.try_into()?); bytes = new_bytes; T::from_sql(PgValue::new_internal(elem_bytes, &value)) } @@ -116,7 +116,7 @@ where }; if let IsNull::No = is_null { - out.write_i32::(buffer.len() as i32)?; + out.write_i32::(buffer.len().try_into()?)?; out.write_all(&buffer)?; buffer.clear(); } else { diff --git a/diesel/src/pg/types/date_and_time/chrono.rs b/diesel/src/pg/types/date_and_time/chrono.rs index bcbac6ec0543..756e0bf7814f 100644 --- a/diesel/src/pg/types/date_and_time/chrono.rs +++ b/diesel/src/pg/types/date_and_time/chrono.rs @@ -116,7 +116,7 @@ fn pg_epoch_date() -> NaiveDate { impl ToSql for NaiveDate { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { let days_since_epoch = self.signed_duration_since(pg_epoch_date()).num_days(); - ToSql::::to_sql(&PgDate(days_since_epoch as i32), &mut out.reborrow()) + ToSql::::to_sql(&PgDate(days_since_epoch.try_into()?), &mut out.reborrow()) } } diff --git a/diesel/src/pg/types/date_and_time/quickcheck_impls.rs b/diesel/src/pg/types/date_and_time/quickcheck_impls.rs index ae1d7438c8a5..259de285dab9 100644 --- a/diesel/src/pg/types/date_and_time/quickcheck_impls.rs +++ b/diesel/src/pg/types/date_and_time/quickcheck_impls.rs @@ -1,6 +1,10 @@ -extern crate quickcheck; - -use self::quickcheck::{Arbitrary, Gen}; +// it's test code +#![allow( + clippy::cast_possible_wrap, + clippy::cast_sign_loss, + clippy::cast_possible_truncation +)] +use quickcheck::{Arbitrary, Gen}; use super::{PgDate, PgInterval, PgTime, PgTimestamp}; diff --git a/diesel/src/pg/types/date_and_time/std_time.rs b/diesel/src/pg/types/date_and_time/std_time.rs index b019c59b5be7..90910f27e597 100644 --- a/diesel/src/pg/types/date_and_time/std_time.rs +++ b/diesel/src/pg/types/date_and_time/std_time.rs @@ -18,9 +18,9 @@ impl ToSql for SystemTime { Err(time_err) => (true, time_err.duration()), }; let time_since_epoch = if before_epoch { - -(duration_to_usecs(duration) as i64) + -(i64::try_from(duration_to_usecs(duration))?) } else { - duration_to_usecs(duration) as i64 + duration_to_usecs(duration).try_into()? }; ToSql::::to_sql(&time_since_epoch, &mut out.reborrow()) } diff --git a/diesel/src/pg/types/date_and_time/time.rs b/diesel/src/pg/types/date_and_time/time.rs index ac0770281ce5..d1d1affab937 100644 --- a/diesel/src/pg/types/date_and_time/time.rs +++ b/diesel/src/pg/types/date_and_time/time.rs @@ -39,8 +39,7 @@ impl ToSql for PrimitiveDateTime { let error_message = format!("{self:?} as microseconds is too large to fit in an i64"); return Err(error_message.into()); } - let micros = micros as i64; - ToSql::::to_sql(&PgTimestamp(micros), &mut out.reborrow()) + ToSql::::to_sql(&PgTimestamp(micros.try_into()?), &mut out.reborrow()) } } @@ -79,9 +78,10 @@ impl ToSql for OffsetDateTime { impl ToSql for NaiveTime { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { let duration = *self - NaiveTime::MIDNIGHT; - // microseconds in a day cannot overflow i64 - let micros = duration.whole_microseconds() as i64; - ToSql::::to_sql(&PgTime(micros), &mut out.reborrow()) + ToSql::::to_sql( + &PgTime(duration.whole_microseconds().try_into()?), + &mut out.reborrow(), + ) } } @@ -100,7 +100,7 @@ const PG_EPOCH_DATE: NaiveDate = date!(2000 - 1 - 1); impl ToSql for NaiveDate { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { let days_since_epoch = (*self - PG_EPOCH_DATE).whole_days(); - ToSql::::to_sql(&PgDate(days_since_epoch as i32), &mut out.reborrow()) + ToSql::::to_sql(&PgDate(days_since_epoch.try_into()?), &mut out.reborrow()) } } diff --git a/diesel/src/pg/types/floats/mod.rs b/diesel/src/pg/types/floats/mod.rs index b35ee0467966..da6256937eff 100644 --- a/diesel/src/pg/types/floats/mod.rs +++ b/diesel/src/pg/types/floats/mod.rs @@ -101,7 +101,7 @@ impl ToSql for PgNumeric { PgNumeric::Positive { scale, .. } | PgNumeric::Negative { scale, .. } => scale, PgNumeric::NaN => 0, }; - out.write_u16::(digits.len() as u16)?; + out.write_u16::(digits.len().try_into()?)?; out.write_i16::(weight)?; out.write_u16::(sign)?; out.write_u16::(scale)?; diff --git a/diesel/src/pg/types/floats/quickcheck_impls.rs b/diesel/src/pg/types/floats/quickcheck_impls.rs index b32d0ba87383..6211d5b9e9d5 100644 --- a/diesel/src/pg/types/floats/quickcheck_impls.rs +++ b/diesel/src/pg/types/floats/quickcheck_impls.rs @@ -1,6 +1,6 @@ -extern crate quickcheck; +#![allow(clippy::cast_sign_loss)] // test code -use self::quickcheck::{Arbitrary, Gen}; +use quickcheck::{Arbitrary, Gen}; use super::PgNumeric; diff --git a/diesel/src/pg/types/ipnet_address.rs b/diesel/src/pg/types/ipnet_address.rs index 10a5e6b8c84d..177074b9b59b 100644 --- a/diesel/src/pg/types/ipnet_address.rs +++ b/diesel/src/pg/types/ipnet_address.rs @@ -16,6 +16,8 @@ const AF_INET: u8 = 2; // Maybe not used, but defining to follow Rust's libstd/net/sys #[cfg(target_os = "redox")] const AF_INET: u8 = 1; + +#[allow(clippy::cast_possible_truncation)] // it's 2 #[cfg(not(any(windows, target_os = "redox")))] const AF_INET: u8 = libc::AF_INET as u8; diff --git a/diesel/src/pg/types/network_address.rs b/diesel/src/pg/types/network_address.rs index a25e64a70e94..fa215ef59bfd 100644 --- a/diesel/src/pg/types/network_address.rs +++ b/diesel/src/pg/types/network_address.rs @@ -17,6 +17,8 @@ const AF_INET: u8 = 2; // Maybe not used, but defining to follow Rust's libstd/net/sys #[cfg(target_os = "redox")] const AF_INET: u8 = 1; + +#[allow(clippy::cast_possible_truncation)] // it's 2 #[cfg(not(any(windows, target_os = "redox")))] const AF_INET: u8 = libc::AF_INET as u8; diff --git a/diesel/src/pg/types/numeric.rs b/diesel/src/pg/types/numeric.rs index 7530b5917b8a..18a7f9778def 100644 --- a/diesel/src/pg/types/numeric.rs +++ b/diesel/src/pg/types/numeric.rs @@ -58,10 +58,10 @@ mod bigdecimal { }; let mut result = BigUint::default(); - let count = digits.len() as i64; + let count = i64::try_from(digits.len())?; for digit in digits { result *= BigUint::from(10_000u64); - result += BigUint::from(*digit as u64); + result += BigUint::from(u64::try_from(*digit)?); } // First digit got factor 10_000^(digits.len() - 1), but should get 10_000^weight let correction_exp = 4 * (i64::from(weight) - count + 1); @@ -80,6 +80,8 @@ mod bigdecimal { } } + // that should likely be a `TryFrom` impl + // TODO: diesel 3.0 #[cfg(all(feature = "postgres_backend", feature = "numeric"))] impl<'a> From<&'a BigDecimal> for PgNumeric { // NOTE(clippy): No `std::ops::MulAssign` impl for `BigInt` @@ -98,7 +100,9 @@ mod bigdecimal { } 0 } else { - scale as u16 + scale + .try_into() + .expect("Scale is expected to be 16bit large") }; integer = integer.abs(); @@ -112,7 +116,11 @@ mod bigdecimal { let mut digits = ToBase10000(Some(integer)).collect::>(); digits.reverse(); let digits_after_decimal = scale / 4 + 1; - let weight = digits.len() as i16 - digits_after_decimal as i16 - 1; + let weight = i16::try_from(digits.len()) + .expect("Max digit number is expected to fit into 16 bit") + - i16::try_from(digits_after_decimal) + .expect("Max digit number is expected to fit into 16 bit") + - 1; let unnecessary_zeroes = digits.iter().rev().take_while(|i| i.is_zero()).count(); diff --git a/diesel/src/pg/types/ranges.rs b/diesel/src/pg/types/ranges.rs index 77fd2548d853..33602a17ccb9 100644 --- a/diesel/src/pg/types/ranges.rs +++ b/diesel/src/pg/types/ranges.rs @@ -140,7 +140,7 @@ where Output::new(ByteWrapper(&mut buffer), out.metadata_lookup()); value.to_sql(&mut inner_buffer)?; } - out.write_u32::(buffer.len() as u32)?; + out.write_u32::(buffer.len().try_into()?)?; out.write_all(&buffer)?; buffer.clear(); } @@ -154,7 +154,7 @@ where Output::new(ByteWrapper(&mut buffer), out.metadata_lookup()); value.to_sql(&mut inner_buffer)?; } - out.write_u32::(buffer.len() as u32)?; + out.write_u32::(buffer.len().try_into()?)?; out.write_all(&buffer)?; } Bound::Unbounded => {} diff --git a/diesel/src/pg/types/record.rs b/diesel/src/pg/types/record.rs index 9118c686d698..d589f3caaa0c 100644 --- a/diesel/src/pg/types/record.rs +++ b/diesel/src/pg/types/record.rs @@ -52,7 +52,7 @@ macro_rules! tuple_impls { if num_bytes == -1 { $T::from_nullable_sql(None)? } else { - let (elem_bytes, new_bytes) = bytes.split_at(num_bytes as usize); + let (elem_bytes, new_bytes) = bytes.split_at(num_bytes.try_into()?); bytes = new_bytes; $T::from_sql(PgValue::new_internal( elem_bytes, @@ -117,7 +117,7 @@ macro_rules! tuple_impls { }; if let IsNull::No = is_null { - out.write_i32::(buffer.len() as i32)?; + out.write_i32::(buffer.len().try_into()?)?; out.write_all(&buffer)?; buffer.clear(); } else { diff --git a/diesel/src/sqlite/connection/bind_collector.rs b/diesel/src/sqlite/connection/bind_collector.rs index df4306632755..b95b755d4b4d 100644 --- a/diesel/src/sqlite/connection/bind_collector.rs +++ b/diesel/src/sqlite/connection/bind_collector.rs @@ -125,7 +125,10 @@ impl std::fmt::Display for InternalSqliteBindValue<'_> { impl InternalSqliteBindValue<'_> { #[allow(unsafe_code)] // ffi function calls - pub(in crate::sqlite) fn result_of(self, ctx: &mut libsqlite3_sys::sqlite3_context) { + pub(in crate::sqlite) fn result_of( + self, + ctx: &mut libsqlite3_sys::sqlite3_context, + ) -> Result<(), std::num::TryFromIntError> { use libsqlite3_sys as ffi; use std::os::raw as libc; // This unsafe block assumes the following invariants: @@ -136,25 +139,25 @@ impl InternalSqliteBindValue<'_> { InternalSqliteBindValue::BorrowedString(s) => ffi::sqlite3_result_text( ctx, s.as_ptr() as *const libc::c_char, - s.len() as libc::c_int, + s.len().try_into()?, ffi::SQLITE_TRANSIENT(), ), InternalSqliteBindValue::String(s) => ffi::sqlite3_result_text( ctx, s.as_ptr() as *const libc::c_char, - s.len() as libc::c_int, + s.len().try_into()?, ffi::SQLITE_TRANSIENT(), ), InternalSqliteBindValue::Binary(b) => ffi::sqlite3_result_blob( ctx, b.as_ptr() as *const libc::c_void, - b.len() as libc::c_int, + b.len().try_into()?, ffi::SQLITE_TRANSIENT(), ), InternalSqliteBindValue::BorrowedBinary(b) => ffi::sqlite3_result_blob( ctx, b.as_ptr() as *const libc::c_void, - b.len() as libc::c_int, + b.len().try_into()?, ffi::SQLITE_TRANSIENT(), ), InternalSqliteBindValue::I32(i) => ffi::sqlite3_result_int(ctx, i as libc::c_int), @@ -165,6 +168,7 @@ impl InternalSqliteBindValue<'_> { InternalSqliteBindValue::Null => ffi::sqlite3_result_null(ctx), } } + Ok(()) } } diff --git a/diesel/src/sqlite/connection/functions.rs b/diesel/src/sqlite/connection/functions.rs index db330ee23998..454a6bcd3c57 100644 --- a/diesel/src/sqlite/connection/functions.rs +++ b/diesel/src/sqlite/connection/functions.rs @@ -202,10 +202,10 @@ impl<'a> Row<'a, Sqlite> for FunctionRow<'a> { 'a: 'b, Self: crate::row::RowIndex, { - let idx = self.idx(idx)?; + let col_idx = self.idx(idx)?; Some(FunctionArgument { args: self.args.borrow(), - col_idx: idx as i32, + col_idx, }) } @@ -232,7 +232,7 @@ impl<'a, 'b> RowIndex<&'a str> for FunctionRow<'b> { struct FunctionArgument<'a> { args: Ref<'a, ManuallyDrop>>, - col_idx: i32, + col_idx: usize, } impl<'a> Field<'a, Sqlite> for FunctionArgument<'a> { diff --git a/diesel/src/sqlite/connection/mod.rs b/diesel/src/sqlite/connection/mod.rs index fa77c0571a81..12c59ccc960d 100644 --- a/diesel/src/sqlite/connection/mod.rs +++ b/diesel/src/sqlite/connection/mod.rs @@ -185,9 +185,11 @@ impl Connection for SqliteConnection { T: QueryFragment + QueryId, { let statement_use = self.prepared_query(source)?; - statement_use - .run() - .map(|_| self.raw_connection.rows_affected_by_last_query()) + statement_use.run().and_then(|_| { + self.raw_connection + .rows_affected_by_last_query() + .map_err(Error::DeserializationError) + }) } fn transaction_state(&mut self) -> &mut AnsiTransactionManager diff --git a/diesel/src/sqlite/connection/owned_row.rs b/diesel/src/sqlite/connection/owned_row.rs index 43e225d0e2ae..8de0ac9c28bc 100644 --- a/diesel/src/sqlite/connection/owned_row.rs +++ b/diesel/src/sqlite/connection/owned_row.rs @@ -41,7 +41,7 @@ impl<'a> Row<'a, Sqlite> for OwnedSqliteRow { let idx = self.idx(idx)?; Some(OwnedSqliteField { row: self, - col_idx: i32::try_from(idx).ok()?, + col_idx: idx, }) } @@ -71,14 +71,14 @@ impl<'idx> RowIndex<&'idx str> for OwnedSqliteRow { #[allow(missing_debug_implementations)] pub struct OwnedSqliteField<'row> { pub(super) row: &'row OwnedSqliteRow, - pub(super) col_idx: i32, + pub(super) col_idx: usize, } impl<'row> Field<'row, Sqlite> for OwnedSqliteField<'row> { fn field_name(&self) -> Option<&str> { self.row .column_names - .get(self.col_idx as usize) + .get(self.col_idx) .and_then(|o| o.as_ref().map(|s| s.as_ref())) } diff --git a/diesel/src/sqlite/connection/raw.rs b/diesel/src/sqlite/connection/raw.rs index bf5e910d3fab..57d6a5fbcf40 100644 --- a/diesel/src/sqlite/connection/raw.rs +++ b/diesel/src/sqlite/connection/raw.rs @@ -80,8 +80,12 @@ impl RawConnection { ensure_sqlite_ok(result, self.internal_connection.as_ptr()) } - pub(super) fn rows_affected_by_last_query(&self) -> usize { - unsafe { ffi::sqlite3_changes(self.internal_connection.as_ptr()) as usize } + pub(super) fn rows_affected_by_last_query( + &self, + ) -> Result> { + let r = unsafe { ffi::sqlite3_changes(self.internal_connection.as_ptr()) }; + + Ok(r.try_into()?) } pub(super) fn register_sql_function( @@ -105,12 +109,15 @@ impl RawConnection { })); let fn_name = Self::get_fn_name(fn_name)?; let flags = Self::get_flags(deterministic); + let num_args = num_args + .try_into() + .map_err(|e| Error::SerializationError(Box::new(e)))?; let result = unsafe { ffi::sqlite3_create_function_v2( self.internal_connection.as_ptr(), fn_name.as_ptr(), - num_args as _, + num_args, flags, callback_fn as *mut _, Some(run_custom_function::), @@ -136,12 +143,15 @@ impl RawConnection { { let fn_name = Self::get_fn_name(fn_name)?; let flags = Self::get_flags(false); + let num_args = num_args + .try_into() + .map_err(|e| Error::SerializationError(Box::new(e)))?; let result = unsafe { ffi::sqlite3_create_function_v2( self.internal_connection.as_ptr(), fn_name.as_ptr(), - num_args as _, + num_args, flags, ptr::null_mut(), None, @@ -195,11 +205,19 @@ impl RawConnection { &mut size as *mut _, 0, ); - SerializedDatabase::new(data_ptr, size as usize) + SerializedDatabase::new( + data_ptr, + size.try_into() + .expect("Cannot fit the serialized database into memory"), + ) } } pub(super) fn deserialize(&mut self, data: &[u8]) -> QueryResult<()> { + let db_size = data + .len() + .try_into() + .map_err(|e| Error::DeserializationError(Box::new(e)))?; // the cast for `ffi::SQLITE_DESERIALIZE_READONLY` is required for old libsqlite3-sys versions #[allow(clippy::unnecessary_cast)] unsafe { @@ -207,8 +225,8 @@ impl RawConnection { self.internal_connection.as_ptr(), std::ptr::null(), data.as_ptr() as *mut u8, - data.len() as i64, - data.len() as i64, + db_size, + db_size, ffi::SQLITE_DESERIALIZE_READONLY as u32, ); @@ -402,6 +420,9 @@ where static NULL_CTX_ERR: &str = "We've written the aggregator to the aggregate context, but it could not be retrieved."; + let n_bytes: i32 = std::mem::size_of::>() + .try_into() + .expect("Aggregate context should be larger than 2^32"); let aggregate_context = unsafe { // This block of unsafe code makes the following assumptions: // @@ -424,7 +445,7 @@ where // the memory will have a correct alignment. // (Note I(weiznich): would assume that it is aligned correctly, but we // we cannot guarantee it, so better be safe than sorry) - ffi::sqlite3_aggregate_context(ctx, std::mem::size_of::>() as i32) + ffi::sqlite3_aggregate_context(ctx, n_bytes) }; let aggregate_context = NonNull::new(aggregate_context as *mut OptionalAggregator); let aggregator = unsafe { @@ -486,9 +507,10 @@ extern "C" fn run_aggregator_final_function { diff --git a/diesel/src/sqlite/connection/row.rs b/diesel/src/sqlite/connection/row.rs index 75cc27369e4d..5ff7787f4fee 100644 --- a/diesel/src/sqlite/connection/row.rs +++ b/diesel/src/sqlite/connection/row.rs @@ -145,7 +145,7 @@ impl<'stmt, 'query> Row<'stmt, Sqlite> for SqliteRow<'stmt, 'query> { let idx = self.idx(idx)?; Some(SqliteField { row: self.inner.borrow(), - col_idx: i32::try_from(idx).ok()?, + col_idx: idx, }) } @@ -178,15 +178,19 @@ impl<'stmt, 'idx, 'query> RowIndex<&'idx str> for SqliteRow<'stmt, 'query> { #[allow(missing_debug_implementations)] pub struct SqliteField<'stmt, 'query> { pub(super) row: Ref<'stmt, PrivateSqliteRow<'stmt, 'query>>, - pub(super) col_idx: i32, + pub(super) col_idx: usize, } impl<'stmt, 'query> Field<'stmt, Sqlite> for SqliteField<'stmt, 'query> { fn field_name(&self) -> Option<&str> { match &*self.row { - PrivateSqliteRow::Direct(stmt) => stmt.field_name(self.col_idx), + PrivateSqliteRow::Direct(stmt) => stmt.field_name( + self.col_idx + .try_into() + .expect("Diesel expects to run at least on a 32 bit platform"), + ), PrivateSqliteRow::Duplicated { column_names, .. } => column_names - .get(self.col_idx as usize) + .get(self.col_idx) .and_then(|t| t.as_ref().map(|n| n as &str)), } } @@ -339,6 +343,7 @@ mod tests { #[test] #[cfg(feature = "returning_clauses_for_sqlite_3_35")] + #[allow(clippy::cast_sign_loss)] fn parallel_iter_with_error() { use crate::connection::Connection; use crate::connection::LoadConnection; diff --git a/diesel/src/sqlite/connection/sqlite_value.rs b/diesel/src/sqlite/connection/sqlite_value.rs index 7210d104bb4c..f0d1a4afbda7 100644 --- a/diesel/src/sqlite/connection/sqlite_value.rs +++ b/diesel/src/sqlite/connection/sqlite_value.rs @@ -49,12 +49,16 @@ unsafe impl Send for OwnedSqliteValue {} impl<'row, 'stmt, 'query> SqliteValue<'row, 'stmt, 'query> { pub(super) fn new( row: Ref<'row, PrivateSqliteRow<'stmt, 'query>>, - col_idx: i32, + col_idx: usize, ) -> Option> { let value = match &*row { - PrivateSqliteRow::Direct(stmt) => stmt.column_value(col_idx)?, + PrivateSqliteRow::Direct(stmt) => stmt.column_value( + col_idx + .try_into() + .expect("Diesel expects to run at least on a 32 bit platform"), + )?, PrivateSqliteRow::Duplicated { values, .. } => { - values.get(col_idx as usize).and_then(|v| v.as_ref())?.value + values.get(col_idx).and_then(|v| v.as_ref())?.value } }; @@ -71,13 +75,9 @@ impl<'row, 'stmt, 'query> SqliteValue<'row, 'stmt, 'query> { pub(super) fn from_owned_row( row: &'row OwnedSqliteRow, - col_idx: i32, + col_idx: usize, ) -> Option> { - let value = row - .values - .get(col_idx as usize) - .and_then(|v| v.as_ref())? - .value; + let value = row.values.get(col_idx).and_then(|v| v.as_ref())?.value; let ret = Self { _row: None, value }; if ret.value_type().is_none() { None @@ -90,7 +90,11 @@ impl<'row, 'stmt, 'query> SqliteValue<'row, 'stmt, 'query> { let s = unsafe { let ptr = ffi::sqlite3_value_text(self.value.as_ptr()); let len = ffi::sqlite3_value_bytes(self.value.as_ptr()); - let bytes = slice::from_raw_parts(ptr, len as usize); + let bytes = slice::from_raw_parts( + ptr, + len.try_into() + .expect("Diesel expects to run at least on a 32 bit platform"), + ); // The string is guaranteed to be utf8 according to // https://www.sqlite.org/c3ref/value_blob.html str::from_utf8_unchecked(bytes) @@ -111,7 +115,11 @@ impl<'row, 'stmt, 'query> SqliteValue<'row, 'stmt, 'query> { // slices without elements from a pointer &[] } else { - slice::from_raw_parts(ptr as *const u8, len as usize) + slice::from_raw_parts( + ptr as *const u8, + len.try_into() + .expect("Diesel expects to run at least on a 32 bit platform"), + ) } } } diff --git a/diesel/src/sqlite/connection/statement_iterator.rs b/diesel/src/sqlite/connection/statement_iterator.rs index 393ec9e471c8..39ae81237ffc 100644 --- a/diesel/src/sqlite/connection/statement_iterator.rs +++ b/diesel/src/sqlite/connection/statement_iterator.rs @@ -104,7 +104,10 @@ impl<'stmt, 'query> Iterator for StatementIterator<'stmt, 'query> { Err(e) => Some(Err(e)), Ok(false) => None, Ok(true) => { - let field_count = stmt.column_count() as usize; + let field_count = stmt + .column_count() + .try_into() + .expect("Diesel expects to run at least on a 32 bit platform"); self.field_count = field_count; let inner = Rc::new(RefCell::new(PrivateSqliteRow::Direct(stmt))); self.inner = Started(inner.clone()); diff --git a/diesel/src/sqlite/connection/stmt.rs b/diesel/src/sqlite/connection/stmt.rs index 92b12465d772..99bb271918a3 100644 --- a/diesel/src/sqlite/connection/stmt.rs +++ b/diesel/src/sqlite/connection/stmt.rs @@ -27,13 +27,17 @@ impl Statement { ) -> QueryResult { let mut stmt = ptr::null_mut(); let mut unused_portion = ptr::null(); + let n_byte = sql + .len() + .try_into() + .map_err(|e| Error::SerializationError(Box::new(e)))?; // the cast for `ffi::SQLITE_PREPARE_PERSISTENT` is required for old libsqlite3-sys versions #[allow(clippy::unnecessary_cast)] let prepare_result = unsafe { ffi::sqlite3_prepare_v3( raw_connection.internal_connection.as_ptr(), CString::new(sql)?.as_ptr(), - sql.len() as libc::c_int, + n_byte, if matches!(is_cached, PrepareForCache::Yes) { ffi::SQLITE_PREPARE_PERSISTENT as u32 } else { @@ -68,16 +72,23 @@ impl Statement { ffi::sqlite3_bind_null(self.inner_statement.as_ptr(), bind_index) } (SqliteType::Binary, InternalSqliteBindValue::BorrowedBinary(bytes)) => { + let n = bytes + .len() + .try_into() + .map_err(|e| Error::SerializationError(Box::new(e)))?; ffi::sqlite3_bind_blob( self.inner_statement.as_ptr(), bind_index, bytes.as_ptr() as *const libc::c_void, - bytes.len() as libc::c_int, + n, ffi::SQLITE_STATIC(), ) } (SqliteType::Binary, InternalSqliteBindValue::Binary(mut bytes)) => { - let len = bytes.len(); + let len = bytes + .len() + .try_into() + .map_err(|e| Error::SerializationError(Box::new(e)))?; // We need a separate pointer here to pass it to sqlite // as the returned pointer is a pointer to a dyn sized **slice** // and not the pointer to the first element of the slice @@ -87,22 +98,29 @@ impl Statement { self.inner_statement.as_ptr(), bind_index, ptr as *const libc::c_void, - len as libc::c_int, + len, ffi::SQLITE_STATIC(), ) } (SqliteType::Text, InternalSqliteBindValue::BorrowedString(bytes)) => { + let len = bytes + .len() + .try_into() + .map_err(|e| Error::SerializationError(Box::new(e)))?; ffi::sqlite3_bind_text( self.inner_statement.as_ptr(), bind_index, bytes.as_ptr() as *const libc::c_char, - bytes.len() as libc::c_int, + len, ffi::SQLITE_STATIC(), ) } (SqliteType::Text, InternalSqliteBindValue::String(bytes)) => { let mut bytes = Box::<[u8]>::from(bytes); - let len = bytes.len(); + let len = bytes + .len() + .try_into() + .map_err(|e| Error::SerializationError(Box::new(e)))?; // We need a separate pointer here to pass it to sqlite // as the returned pointer is a pointer to a dyn sized **slice** // and not the pointer to the first element of the slice @@ -112,7 +130,7 @@ impl Statement { self.inner_statement.as_ptr(), bind_index, ptr as *const libc::c_char, - len as libc::c_int, + len, ffi::SQLITE_STATIC(), ) } @@ -481,7 +499,10 @@ impl<'stmt, 'query> StatementUse<'stmt, 'query> { pub(super) fn index_for_column_name(&mut self, field_name: &str) -> Option { (0..self.column_count()) .find(|idx| self.field_name(*idx) == Some(field_name)) - .map(|v| v as usize) + .map(|v| { + v.try_into() + .expect("Diesel expects to run at least on a 32 bit platform") + }) } pub(super) fn field_name(&self, idx: i32) -> Option<&str> { @@ -497,7 +518,7 @@ impl<'stmt, 'query> StatementUse<'stmt, 'query> { }); column_names - .get(idx as usize) + .get(usize::try_from(idx).expect("Diesel expects to run at least on a 32 bit platform")) .and_then(|c| unsafe { c.as_ref() }) } diff --git a/diesel/src/sqlite/types/date_and_time/chrono.rs b/diesel/src/sqlite/types/date_and_time/chrono.rs index a8df1a0099e8..5e600e76ecbf 100644 --- a/diesel/src/sqlite/types/date_and_time/chrono.rs +++ b/diesel/src/sqlite/types/date_and_time/chrono.rs @@ -73,7 +73,10 @@ fn parse_julian(julian_days: f64) -> Option { const EPOCH_IN_JULIAN_DAYS: f64 = 2_440_587.5; const SECONDS_IN_DAY: f64 = 86400.0; let timestamp = (julian_days - EPOCH_IN_JULIAN_DAYS) * SECONDS_IN_DAY; - let seconds = timestamp as i64; + #[allow(clippy::cast_possible_truncation)] // we want to truncate + let seconds = timestamp.trunc() as i64; + // that's not true, `fract` is always > 0 + #[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)] let nanos = (timestamp.fract() * 1E9) as u32; #[allow(deprecated)] // otherwise we would need to bump our minimal chrono version NaiveDateTime::from_timestamp_opt(seconds, nanos) diff --git a/diesel/src/sqlite/types/date_and_time/mod.rs b/diesel/src/sqlite/types/date_and_time/mod.rs index 09330d0d7c61..0725a3705a07 100644 --- a/diesel/src/sqlite/types/date_and_time/mod.rs +++ b/diesel/src/sqlite/types/date_and_time/mod.rs @@ -94,6 +94,7 @@ impl ToSql for String { } #[cfg(all(test, feature = "chrono", feature = "time"))] +#[allow(clippy::cast_possible_truncation)] // it's a test mod tests { extern crate chrono; extern crate time; diff --git a/diesel/src/sqlite/types/date_and_time/time.rs b/diesel/src/sqlite/types/date_and_time/time.rs index ea97bcd0c1b1..5d88ad93688c 100644 --- a/diesel/src/sqlite/types/date_and_time/time.rs +++ b/diesel/src/sqlite/types/date_and_time/time.rs @@ -131,6 +131,7 @@ fn parse_julian(julian_days: f64) -> Result { const EPOCH_IN_JULIAN_DAYS: f64 = 2_440_587.5; const SECONDS_IN_DAY: f64 = 86400.0; let timestamp = (julian_days - EPOCH_IN_JULIAN_DAYS) * SECONDS_IN_DAY; + #[allow(clippy::cast_possible_truncation)] // we multiply by 1E9 to prevent that OffsetDateTime::from_unix_timestamp_nanos((timestamp * 1E9) as i128).map(naive_utc) } diff --git a/diesel/src/sqlite/types/mod.rs b/diesel/src/sqlite/types/mod.rs index 90db3d3ab2d9..88ef5f8cde26 100644 --- a/diesel/src/sqlite/types/mod.rs +++ b/diesel/src/sqlite/types/mod.rs @@ -54,6 +54,7 @@ impl Queryable for *const [u8] { } #[cfg(feature = "sqlite")] +#[allow(clippy::cast_possible_truncation)] // we want to truncate here impl FromSql for i16 { fn from_sql(value: SqliteValue<'_, '_, '_>) -> deserialize::Result { Ok(value.read_integer() as i16) @@ -82,6 +83,7 @@ impl FromSql for i64 { } #[cfg(feature = "sqlite")] +#[allow(clippy::cast_possible_truncation)] // we want to truncate here impl FromSql for f32 { fn from_sql(value: SqliteValue<'_, '_, '_>) -> deserialize::Result { Ok(value.read_double() as f32)