diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2d120e6158d0..810a41d19358 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,3 +1,5 @@ +# This file was autogenerated by cargo-dist: https://opensource.axo.dev/cargo-dist/ +# # Copyright 2022-2024, axodotdev # SPDX-License-Identifier: MIT or Apache-2.0 # @@ -12,9 +14,8 @@ # with the appropriate title/body, and will be undrafted for you. name: Release - permissions: - contents: write + "contents": "write" # This task will run whenever you push a git tag that looks like a version # like "1.0.0", "v0.1.0-prerelease.1", "my-app/0.1.0", "releases/v1.0.0", etc. @@ -38,15 +39,15 @@ permissions: # If there's a prerelease-style suffix to the version, then the release(s) # will be marked as a prerelease. on: + pull_request: push: tags: - '**[0-9]+.[0-9]+.[0-9]+*' - pull_request: jobs: # Run 'cargo dist plan' (or host) to determine what tasks we need to do plan: - runs-on: ubuntu-latest + runs-on: "ubuntu-20.04" outputs: val: ${{ steps.plan.outputs.manifest }} tag: ${{ !github.event.pull_request && github.ref_name || '' }} @@ -62,7 +63,12 @@ jobs: # we specify bash to get pipefail; it guards against the `curl` command # failing. otherwise `sh` won't catch that `curl` returned non-0 shell: bash - run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.14.1/cargo-dist-installer.sh | sh" + run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.21.1/cargo-dist-installer.sh | sh" + - name: Cache cargo-dist + uses: actions/upload-artifact@v4 + with: + name: cargo-dist-cache + path: ~/.cargo/bin/cargo-dist # sure would be cool if github gave us proper conditionals... # so here's a doubly-nested ternary-via-truthiness to try to provide the best possible # functionality based on whether this is a pull_request, and whether it's from a fork. @@ -111,9 +117,6 @@ jobs: - uses: actions/checkout@v4 with: submodules: recursive - - uses: swatinem/rust-cache@v2 - with: - key: ${{ join(matrix.targets, '-') }} - name: Install cargo-dist run: ${{ matrix.install_dist }} # Get the dist-manifest @@ -165,9 +168,12 @@ jobs: - uses: actions/checkout@v4 with: submodules: recursive - - name: Install cargo-dist - shell: bash - run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.14.1/cargo-dist-installer.sh | sh" + - name: Install cached cargo-dist + uses: actions/download-artifact@v4 + with: + name: cargo-dist-cache + path: ~/.cargo/bin/ + - run: chmod +x ~/.cargo/bin/cargo-dist # Get all the local artifacts for the global tasks to use (for e.g. checksums) - name: Fetch local artifacts uses: actions/download-artifact@v4 @@ -211,8 +217,12 @@ jobs: - uses: actions/checkout@v4 with: submodules: recursive - - name: Install cargo-dist - run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.14.1/cargo-dist-installer.sh | sh" + - name: Install cached cargo-dist + uses: actions/download-artifact@v4 + with: + name: cargo-dist-cache + path: ~/.cargo/bin/ + - run: chmod +x ~/.cargo/bin/cargo-dist # Fetch artifacts from scratch-storage - name: Fetch artifacts uses: actions/download-artifact@v4 @@ -220,7 +230,6 @@ jobs: pattern: artifacts-* path: target/distrib/ merge-multiple: true - # This is a harmless no-op for GitHub Releases, hosting for that happens in "announce" - id: host shell: bash run: | @@ -234,8 +243,27 @@ jobs: # Overwrite the previous copy name: artifacts-dist-manifest path: dist-manifest.json + # Create a GitHub Release while uploading all files to it + - name: "Download GitHub Artifacts" + uses: actions/download-artifact@v4 + with: + pattern: artifacts-* + path: artifacts + merge-multiple: true + - name: Cleanup + run: | + # Remove the granular manifests + rm -f artifacts/*-dist-manifest.json + - name: Create GitHub Release + env: + PRERELEASE_FLAG: "${{ fromJson(steps.host.outputs.manifest).announcement_is_prerelease && '--prerelease' || '' }}" + RELEASE_COMMIT: "${{ github.sha }}" + run: | + # If we're editing a release in place, we need to upload things ahead of time + gh release upload "${{ needs.plan.outputs.tag }}" artifacts/* + + gh release edit "${{ needs.plan.outputs.tag }}" --target "$RELEASE_COMMIT" $PRERELEASE_FLAG --draft=false - # Create a GitHub Release while uploading all files to it announce: needs: - plan @@ -251,23 +279,3 @@ jobs: - uses: actions/checkout@v4 with: submodules: recursive - - name: "Download GitHub Artifacts" - uses: actions/download-artifact@v4 - with: - pattern: artifacts-* - path: artifacts - merge-multiple: true - - name: Cleanup - run: | - # Remove the granular manifests - rm -f artifacts/*-dist-manifest.json - - name: Create GitHub Release - uses: ncipollo/release-action@v1 - with: - tag: ${{ needs.plan.outputs.tag }} - allowUpdates: true - updateOnlyUnreleased: true - omitBodyDuringUpdate: true - omitNameDuringUpdate: true - prerelease: ${{ fromJson(needs.host.outputs.val).announcement_is_prerelease }} - artifacts: "artifacts/*" diff --git a/CHANGELOG.md b/CHANGELOG.md index b0b235c1bf1c..364378555acc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,15 @@ Increasing the minimal supported Rust version will always be coupled at least wi ## Unreleased +## [2.2.3] 2024-08-23 + +### Fixed + +* Support for libsqlite3-sys 0.30.0 +* Fixed a possible vulnerability in how Diesel handled protocol level bind parameters. + See the [SQL Injection isn't Dead: Smuggling Queries at Protocol Level](http://web.archive.org/web/20240812130923/https://media.defcon.org/DEF%20CON%2032/DEF%20CON%2032%20presentations/DEF%20CON%2032%20-%20Paul%20Gerste%20-%20SQL%20Injection%20Isn't%20Dead%20Smuggling%20Queries%20at%20the%20Protocol%20Level.pdf>) presentation from DEF CON for details +* Fixed an issue with a possibly ambiguous trait resolution in `#[derive(QueryableByName)]` + ## [2.2.2] 2024-07-19 ### Fixed @@ -2127,3 +2136,4 @@ queries or set `PIPES_AS_CONCAT` manually. [2.2.0]: https://github.com/diesel-rs/diesel/compare/v.2.1.0...v2.2.0 [2.2.1]: https://github.com/diesel-rs/diesel/compare/v.2.2.0...v2.2.1 [2.2.2]: https://github.com/diesel-rs/diesel/compare/v.2.2.1...v2.2.2 +[2.2.2]: https://github.com/diesel-rs/diesel/compare/v.2.2.2...v2.2.3 diff --git a/Cargo.toml b/Cargo.toml index 6cfb74ff62c4..45921f03545f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,24 +35,26 @@ rust-version = "1.78.0" include = ["src/**/*.rs", "tests/**/*.rs", "LICENSE-*", "README.md"] [workspace.dependencies] -libsqlite3-sys = "0.29" +libsqlite3-sys = "0.30.1" # Config for 'cargo dist' [workspace.metadata.dist] # The preferred cargo-dist version to use in CI (Cargo.toml SemVer syntax) -cargo-dist-version = "0.14.1" +cargo-dist-version = "0.21.1" # CI backends to support ci = "github" # The installers to generate for each app installers = ["shell", "powershell"] # Target platforms to build apps for (Rust target-triple syntax) targets = ["aarch64-apple-darwin", "x86_64-apple-darwin", "x86_64-unknown-linux-gnu", "x86_64-pc-windows-msvc"] -# Publish jobs to run in CI +# Which actions to run on pull requests pr-run-mode = "plan" # Whether cargo-dist should create a Github Release or use an existing draft create-release = false # Whether to install an updater program install-updater = false +# Path that installers should place binaries in +install-path = "CARGO_HOME" [workspace.metadata.dist.github-custom-runners] aarch64-apple-darwin = "macos-14" diff --git a/diesel/Cargo.toml b/diesel/Cargo.toml index d3353a4e5d48..a62c32293b28 100644 --- a/diesel/Cargo.toml +++ b/diesel/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "diesel" -version = "2.2.2" +version = "2.2.3" license = "MIT OR Apache-2.0" description = "A safe, extensible ORM and Query Builder for PostgreSQL, SQLite, and MySQL" readme = "README.md" @@ -24,7 +24,7 @@ include = [ byteorder = { version = "1.0", optional = true } chrono = { version = "0.4.20", optional = true, default-features = false, features = ["clock", "std"] } libc = { version = "0.2.0", optional = true } -libsqlite3-sys = { version = ">=0.17.2, <0.30.0", optional = true, features = ["bundled_bindings"] } +libsqlite3-sys = { version = ">=0.17.2, <0.31.0", optional = true, features = ["bundled_bindings"] } mysqlclient-sys = { version = ">=0.2.5, <0.5.0", optional = true } mysqlclient-src = { version = "0.1.0", optional = true } pq-sys = { version = ">=0.4.0, <0.7.0", optional = true } diff --git a/diesel/src/expression/array_comparison.rs b/diesel/src/expression/array_comparison.rs index 3c5002fc7386..bade87beea01 100644 --- a/diesel/src/expression/array_comparison.rs +++ b/diesel/src/expression/array_comparison.rs @@ -157,12 +157,12 @@ impl_selectable_expression!(NotIn); /// Diesel provided several implementations here: /// /// - An implementation for any [`Iterator`] over values -/// that implement [`AsExpression`] for the corresponding -/// sql type ST. The corresponding values clause will contain -/// bind statements for each individual value. +/// that implement [`AsExpression`] for the corresponding +/// sql type ST. The corresponding values clause will contain +/// bind statements for each individual value. /// - An implementation for select statements, that returns -/// a single field. The corresponding values clause will contain -/// the sub query. +/// a single field. The corresponding values clause will contain +/// the sub query. /// /// This trait is exposed for custom third party backends so /// that they can restrict the [`QueryFragment`] implementations diff --git a/diesel/src/lib.rs b/diesel/src/lib.rs index 6e0dfd2edd73..b6f9e5f6f171 100644 --- a/diesel/src/lib.rs +++ b/diesel/src/lib.rs @@ -151,64 +151,64 @@ //! //! - `sqlite`: This feature enables the diesel sqlite backend. Enabling this feature requires per default -//! a compatible copy of `libsqlite3` for your target architecture. Alternatively, you can add `libsqlite3-sys` -//! with the `bundled` feature as a dependency to your crate so SQLite will be bundled: -//! ```toml -//! [dependencies] -//! libsqlite3-sys = { version = "0.29", features = ["bundled"] } -//! ``` +//! a compatible copy of `libsqlite3` for your target architecture. Alternatively, you can add `libsqlite3-sys` +//! with the `bundled` feature as a dependency to your crate so SQLite will be bundled: +//! ```toml +//! [dependencies] +//! libsqlite3-sys = { version = "0.29", features = ["bundled"] } +//! ``` //! - `postgres`: This feature enables the diesel postgres backend. Enabling this feature requires a compatible -//! copy of `libpq` for your target architecture. This features implies `postgres_backend` +//! copy of `libpq` for your target architecture. This features implies `postgres_backend` //! - `mysql`: This feature enables the idesel mysql backend. Enabling this feature requires a compatible copy -//! of `libmysqlclient` for your target architecture. This feature implies `mysql_backend` +//! of `libmysqlclient` for your target architecture. This feature implies `mysql_backend` //! - `postgres_backend`: This feature enables those parts of diesels postgres backend, that are not dependent -//! on `libpq`. Diesel does not provide any connection implementation with only this feature enabled. -//! This feature can be used to implement a custom implementation of diesels `Connection` trait for the -//! postgres backend outside of diesel itself, while reusing the existing query dsl extensions for the -//! postgres backend +//! on `libpq`. Diesel does not provide any connection implementation with only this feature enabled. +//! This feature can be used to implement a custom implementation of diesels `Connection` trait for the +//! postgres backend outside of diesel itself, while reusing the existing query dsl extensions for the +//! postgres backend //! - `mysql_backend`: This feature enables those parts of diesels mysql backend, that are not dependent -//! on `libmysqlclient`. Diesel does not provide any connection implementation with only this feature enabled. -//! This feature can be used to implement a custom implementation of diesels `Connection` trait for the -//! mysql backend outside of diesel itself, while reusing the existing query dsl extensions for the -//! mysql backend +//! on `libmysqlclient`. Diesel does not provide any connection implementation with only this feature enabled. +//! This feature can be used to implement a custom implementation of diesels `Connection` trait for the +//! mysql backend outside of diesel itself, while reusing the existing query dsl extensions for the +//! mysql backend //! - `returning_clauses_for_sqlite_3_35`: This feature enables support for `RETURNING` clauses in the sqlite backend. -//! Enabling this feature requires sqlite 3.35.0 or newer. +//! Enabling this feature requires sqlite 3.35.0 or newer. //! - `32-column-tables`: This feature enables support for tables with up to 32 columns. -//! This feature is enabled by default. Consider disabling this feature if you write a library crate -//! providing general extensions for diesel or if you do not need to support tables with more than 16 columns -//! and you want to minimize your compile times. +//! This feature is enabled by default. Consider disabling this feature if you write a library crate +//! providing general extensions for diesel or if you do not need to support tables with more than 16 columns +//! and you want to minimize your compile times. //! - `64-column-tables`: This feature enables support for tables with up to 64 columns. It implies the -//! `32-column-tables` feature. Enabling this feature will increase your compile times. +//! `32-column-tables` feature. Enabling this feature will increase your compile times. //! - `128-column-tables`: This feature enables support for tables with up to 128 columns. It implies the -//! `64-column-tables` feature. Enabling this feature will increase your compile times significantly. +//! `64-column-tables` feature. Enabling this feature will increase your compile times significantly. //! - `i-implement-a-third-party-backend-and-opt-into-breaking-changes`: This feature opens up some otherwise -//! private API, that can be useful to implement a third party [`Backend`](crate::backend::Backend) -//! or write a custom [`Connection`] implementation. **Do not use this feature for -//! any other usecase**. By enabling this feature you explicitly opt out diesel stability guarantees. We explicitly -//! reserve us the right to break API's exported under this feature flag in any upcoming minor version release. -//! If you publish a crate depending on this feature flag consider to restrict the supported diesel version to the -//! currently released minor version. +//! private API, that can be useful to implement a third party [`Backend`](crate::backend::Backend) +//! or write a custom [`Connection`] implementation. **Do not use this feature for +//! any other usecase**. By enabling this feature you explicitly opt out diesel stability guarantees. We explicitly +//! reserve us the right to break API's exported under this feature flag in any upcoming minor version release. +//! If you publish a crate depending on this feature flag consider to restrict the supported diesel version to the +//! currently released minor version. //! - `serde_json`: This feature flag enables support for (de)serializing json values from the database using -//! types provided by `serde_json`. +//! types provided by `serde_json`. //! - `chrono`: This feature flags enables support for (de)serializing date/time values from the database using -//! types provided by `chrono` +//! types provided by `chrono` //! - `uuid`: This feature flag enables support for (de)serializing uuid values from the database using types -//! provided by `uuid` +//! provided by `uuid` //! - `network-address`: This feature flag enables support for (de)serializing -//! IP values from the database using types provided by `ipnetwork`. +//! IP values from the database using types provided by `ipnetwork`. //! - `ipnet-address`: This feature flag enables support for (de)serializing IP -//! values from the database using types provided by `ipnet`. +//! values from the database using types provided by `ipnet`. //! - `numeric`: This feature flag enables support for (de)serializing numeric values from the database using types -//! provided by `bigdecimal` +//! provided by `bigdecimal` //! - `r2d2`: This feature flag enables support for the `r2d2` connection pool implementation. //! - `extras`: This feature enables the feature flagged support for any third party crate. This implies the -//! following feature flags: `serde_json`, `chrono`, `uuid`, `network-address`, `numeric`, `r2d2` +//! following feature flags: `serde_json`, `chrono`, `uuid`, `network-address`, `numeric`, `r2d2` //! - `with-deprecated`: This feature enables items marked as `#[deprecated]`. It is enabled by default. -//! disabling this feature explicitly opts out diesels stability guarantee. +//! disabling this feature explicitly opts out diesels stability guarantee. //! - `without-deprecated`: This feature disables any item marked as `#[deprecated]`. Enabling this feature -//! explicitly opts out the stability guarantee given by diesel. This feature overrides the `with-deprecated`. -//! Note that this may also remove items that are not shown as `#[deprecated]` in our documentation, due to -//! various bugs in rustdoc. It can be used to check if you depend on any such hidden `#[deprecated]` item. +//! explicitly opts out the stability guarantee given by diesel. This feature overrides the `with-deprecated`. +//! Note that this may also remove items that are not shown as `#[deprecated]` in our documentation, due to +//! various bugs in rustdoc. It can be used to check if you depend on any such hidden `#[deprecated]` item. //! //! By default the following features are enabled: //! @@ -244,7 +244,10 @@ clippy::enum_glob_use, clippy::if_not_else, clippy::items_after_statements, - clippy::used_underscore_binding + clippy::used_underscore_binding, + clippy::cast_possible_wrap, + clippy::cast_possible_truncation, + clippy::cast_sign_loss )] #![deny(unsafe_code)] #![cfg_attr(test, allow(clippy::map_unwrap_or, clippy::unwrap_used))] diff --git a/diesel/src/mysql/connection/bind.rs b/diesel/src/mysql/connection/bind.rs index 3aed940799bd..0bbceede6045 100644 --- a/diesel/src/mysql/connection/bind.rs +++ b/diesel/src/mysql/connection/bind.rs @@ -179,7 +179,10 @@ impl Clone for BindData { // written. At the time of writing this comment, the `BindData::bind_for_truncated_data` // function is only called by `Binds::populate_dynamic_buffers` which ensures the corresponding // invariant. - std::slice::from_raw_parts(ptr.as_ptr(), self.length as usize) + std::slice::from_raw_parts( + ptr.as_ptr(), + self.length.try_into().expect("usize is at least 32bit"), + ) }; let mut vec = slice.to_owned(); let ptr = NonNull::new(vec.as_mut_ptr()); @@ -416,7 +419,10 @@ impl BindData { // written. At the time of writing this comment, the `BindData::bind_for_truncated_data` // function is only called by `Binds::populate_dynamic_buffers` which ensures the corresponding // invariant. - std::slice::from_raw_parts(data.as_ptr(), self.length as usize) + std::slice::from_raw_parts( + data.as_ptr(), + self.length.try_into().expect("Usize is at least 32 bit"), + ) }; Some(MysqlValue::new_internal(slice, tpe)) } @@ -429,7 +435,10 @@ impl BindData { fn update_buffer_length(&mut self) { use std::cmp::min; - let actual_bytes_in_buffer = min(self.capacity, self.length as usize); + let actual_bytes_in_buffer = min( + self.capacity, + self.length.try_into().expect("Usize is at least 32 bit"), + ); self.length = actual_bytes_in_buffer as libc::c_ulong; } @@ -475,7 +484,8 @@ impl BindData { self.bytes = None; let offset = self.capacity; - let truncated_amount = self.length as usize - offset; + let truncated_amount = + usize::try_from(self.length).expect("Usize is at least 32 bit") - offset; debug_assert!( truncated_amount > 0, @@ -505,7 +515,7 @@ impl BindData { // offset is zero here as we don't have a buffer yet // we know the requested length here so we can just request // the correct size - let mut vec = vec![0_u8; self.length as usize]; + let mut vec = vec![0_u8; self.length.try_into().expect("usize is at least 32 bit")]; self.capacity = vec.capacity(); self.bytes = NonNull::new(vec.as_mut_ptr()); mem::forget(vec); diff --git a/diesel/src/mysql/connection/mod.rs b/diesel/src/mysql/connection/mod.rs index 992d92c4f915..56fc2ce920b6 100644 --- a/diesel/src/mysql/connection/mod.rs +++ b/diesel/src/mysql/connection/mod.rs @@ -31,7 +31,7 @@ use crate::RunQueryDsl; /// * `ssl_cert` accepts a path to the client's certificate file /// * `ssl_key` accepts a path to the client's private key file /// * `ssl_mode` expects a value defined for MySQL client command option `--ssl-mode` -/// See +/// See /// /// # Supported loading model implementations /// @@ -154,7 +154,7 @@ impl Connection for MysqlConnection { /// * `ssl_cert` accepts a path to the client's certificate file /// * `ssl_key` accepts a path to the client's private key file /// * `ssl_mode` expects a value defined for MySQL client command option `--ssl-mode` - /// See + /// See fn establish(database_url: &str) -> ConnectionResult { let mut instrumentation = crate::connection::instrumentation::get_default_instrumentation(); instrumentation.on_connection_event(InstrumentationEvent::StartEstablishConnection { @@ -187,7 +187,7 @@ impl Connection for MysqlConnection { // we have not called result yet, so calling `execute` is // fine let stmt_use = unsafe { stmt.execute() }?; - Ok(stmt_use.affected_rows()) + stmt_use.affected_rows() }), &mut self.transaction_state, &mut self.instrumentation, diff --git a/diesel/src/mysql/connection/stmt/mod.rs b/diesel/src/mysql/connection/stmt/mod.rs index 8bb527b34645..986a344ed5af 100644 --- a/diesel/src/mysql/connection/stmt/mod.rs +++ b/diesel/src/mysql/connection/stmt/mod.rs @@ -153,9 +153,11 @@ pub(super) struct StatementUse<'a> { } impl<'a> StatementUse<'a> { - pub(in crate::mysql::connection) fn affected_rows(&self) -> usize { + pub(in crate::mysql::connection) fn affected_rows(&self) -> QueryResult { let affected_rows = unsafe { ffi::mysql_stmt_affected_rows(self.inner.stmt.as_ptr()) }; - affected_rows as usize + affected_rows + .try_into() + .map_err(|e| Error::DeserializationError(Box::new(e))) } /// This function should be called after `execute` only @@ -167,14 +169,19 @@ impl<'a> StatementUse<'a> { pub(super) fn populate_row_buffers(&self, binds: &mut OutputBinds) -> QueryResult> { let next_row_result = unsafe { ffi::mysql_stmt_fetch(self.inner.stmt.as_ptr()) }; - match next_row_result as libc::c_uint { - ffi::MYSQL_NO_DATA => Ok(None), - ffi::MYSQL_DATA_TRUNCATED => binds.populate_dynamic_buffers(self).map(Some), - 0 => { - binds.update_buffer_lengths(); - Ok(Some(())) + if next_row_result < 0 { + self.inner.did_an_error_occur().map(Some) + } else { + #[allow(clippy::cast_sign_loss)] // that's how it's supposed to be based on the API + match next_row_result as libc::c_uint { + ffi::MYSQL_NO_DATA => Ok(None), + ffi::MYSQL_DATA_TRUNCATED => binds.populate_dynamic_buffers(self).map(Some), + 0 => { + binds.update_buffer_lengths(); + Ok(Some(())) + } + _error => self.inner.did_an_error_occur().map(Some), } - _error => self.inner.did_an_error_occur().map(Some), } } @@ -187,7 +194,8 @@ impl<'a> StatementUse<'a> { ffi::mysql_stmt_fetch_column( self.inner.stmt.as_ptr(), bind, - idx as libc::c_uint, + idx.try_into() + .map_err(|e| Error::DeserializationError(Box::new(e)))?, offset as libc::c_ulong, ); self.inner.did_an_error_occur() diff --git a/diesel/src/mysql/types/date_and_time/chrono.rs b/diesel/src/mysql/types/date_and_time/chrono.rs index 786dcd39b9f3..80f188f79ff6 100644 --- a/diesel/src/mysql/types/date_and_time/chrono.rs +++ b/diesel/src/mysql/types/date_and_time/chrono.rs @@ -26,7 +26,7 @@ impl FromSql for NaiveDateTime { impl ToSql for NaiveDateTime { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Mysql>) -> serialize::Result { let mysql_time = MysqlTime { - year: self.year() as libc::c_uint, + year: self.year().try_into()?, month: self.month() as libc::c_uint, day: self.day() as libc::c_uint, hour: self.hour() as libc::c_uint, @@ -48,16 +48,16 @@ impl FromSql for NaiveDateTime { fn from_sql(bytes: MysqlValue<'_>) -> deserialize::Result { let mysql_time = >::from_sql(bytes)?; - NaiveDate::from_ymd_opt(mysql_time.year as i32, mysql_time.month, mysql_time.day) - .and_then(|v| { - v.and_hms_micro_opt( - mysql_time.hour, - mysql_time.minute, - mysql_time.second, - mysql_time.second_part as u32, - ) - }) - .ok_or_else(|| format!("Cannot parse this date: {mysql_time:?}").into()) + let micro = mysql_time.second_part.try_into()?; + NaiveDate::from_ymd_opt( + mysql_time.year.try_into()?, + mysql_time.month, + mysql_time.day, + ) + .and_then(|v| { + v.and_hms_micro_opt(mysql_time.hour, mysql_time.minute, mysql_time.second, micro) + }) + .ok_or_else(|| format!("Cannot parse this date: {mysql_time:?}").into()) } } @@ -94,7 +94,7 @@ impl FromSql for NaiveTime { impl ToSql for NaiveDate { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Mysql>) -> serialize::Result { let mysql_time = MysqlTime { - year: self.year() as libc::c_uint, + year: self.year().try_into()?, month: self.month() as libc::c_uint, day: self.day() as libc::c_uint, hour: 0, @@ -114,8 +114,12 @@ impl ToSql for NaiveDate { impl FromSql for NaiveDate { fn from_sql(bytes: MysqlValue<'_>) -> deserialize::Result { let mysql_time = >::from_sql(bytes)?; - NaiveDate::from_ymd_opt(mysql_time.year as i32, mysql_time.month, mysql_time.day) - .ok_or_else(|| format!("Unable to convert {mysql_time:?} to chrono").into()) + NaiveDate::from_ymd_opt( + mysql_time.year.try_into()?, + mysql_time.month, + mysql_time.day, + ) + .ok_or_else(|| format!("Unable to convert {mysql_time:?} to chrono").into()) } } diff --git a/diesel/src/mysql/types/date_and_time/time.rs b/diesel/src/mysql/types/date_and_time/time.rs index 0877e8734b8c..e4bec4c9a5f7 100644 --- a/diesel/src/mysql/types/date_and_time/time.rs +++ b/diesel/src/mysql/types/date_and_time/time.rs @@ -15,7 +15,7 @@ fn to_time(dt: MysqlTime) -> Result> { ("year", dt.year), ("month", dt.month), ("day", dt.day), - ("offset", dt.time_zone_displacement as u32), + ("offset", dt.time_zone_displacement.try_into()?), ] { if field != 0 { return Err(format!("Unable to convert {dt:?} to time: {name} must be 0").into()); @@ -63,7 +63,7 @@ fn to_primitive_datetime(dt: OffsetDateTime) -> PrimitiveDateTime { impl ToSql for PrimitiveDateTime { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Mysql>) -> serialize::Result { let mysql_time = MysqlTime { - year: self.year() as libc::c_uint, + year: self.year().try_into()?, month: self.month() as libc::c_uint, day: self.day() as libc::c_uint, hour: self.hour() as libc::c_uint, @@ -171,7 +171,7 @@ impl FromSql for NaiveTime { impl ToSql for NaiveDate { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Mysql>) -> serialize::Result { let mysql_time = MysqlTime { - year: self.year() as libc::c_uint, + year: self.year().try_into()?, month: self.month() as libc::c_uint, day: self.day() as libc::c_uint, hour: 0, diff --git a/diesel/src/mysql/types/mod.rs b/diesel/src/mysql/types/mod.rs index d52d32afe228..afd475f780cb 100644 --- a/diesel/src/mysql/types/mod.rs +++ b/diesel/src/mysql/types/mod.rs @@ -25,7 +25,7 @@ impl ToSql for i8 { impl FromSql for i8 { fn from_sql(value: MysqlValue<'_>) -> deserialize::Result { let bytes = value.as_bytes(); - Ok(bytes[0] as i8) + Ok(i8::from_be_bytes([bytes[0]])) } } @@ -69,12 +69,14 @@ where #[cfg(feature = "mysql_backend")] impl ToSql, Mysql> for u8 { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Mysql>) -> serialize::Result { - ToSql::::to_sql(&(*self as i8), &mut out.reborrow()) + out.write_u8(*self)?; + Ok(IsNull::No) } } #[cfg(feature = "mysql_backend")] impl FromSql, Mysql> for u8 { + #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)] // that's what we want fn from_sql(bytes: MysqlValue<'_>) -> deserialize::Result { let signed: i8 = FromSql::::from_sql(bytes)?; Ok(signed as u8) @@ -84,12 +86,18 @@ impl FromSql, Mysql> for u8 { #[cfg(feature = "mysql_backend")] impl ToSql, Mysql> for u16 { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Mysql>) -> serialize::Result { - ToSql::::to_sql(&(*self as i16), &mut out.reborrow()) + out.write_u16::(*self)?; + Ok(IsNull::No) } } #[cfg(feature = "mysql_backend")] impl FromSql, Mysql> for u16 { + #[allow( + clippy::cast_possible_wrap, + clippy::cast_sign_loss, + clippy::cast_possible_truncation + )] // that's what we want fn from_sql(bytes: MysqlValue<'_>) -> deserialize::Result { let signed: i32 = FromSql::::from_sql(bytes)?; Ok(signed as u16) @@ -99,12 +107,18 @@ impl FromSql, Mysql> for u16 { #[cfg(feature = "mysql_backend")] impl ToSql, Mysql> for u32 { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Mysql>) -> serialize::Result { - ToSql::::to_sql(&(*self as i32), &mut out.reborrow()) + out.write_u32::(*self)?; + Ok(IsNull::No) } } #[cfg(feature = "mysql_backend")] impl FromSql, Mysql> for u32 { + #[allow( + clippy::cast_possible_wrap, + clippy::cast_sign_loss, + clippy::cast_possible_truncation + )] // that's what we want fn from_sql(bytes: MysqlValue<'_>) -> deserialize::Result { let signed: i64 = FromSql::::from_sql(bytes)?; Ok(signed as u32) @@ -114,12 +128,18 @@ impl FromSql, Mysql> for u32 { #[cfg(feature = "mysql_backend")] impl ToSql, Mysql> for u64 { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Mysql>) -> serialize::Result { - ToSql::::to_sql(&(*self as i64), &mut out.reborrow()) + out.write_u64::(*self)?; + Ok(IsNull::No) } } #[cfg(feature = "mysql_backend")] impl FromSql, Mysql> for u64 { + #[allow( + clippy::cast_possible_wrap, + clippy::cast_sign_loss, + clippy::cast_possible_truncation + )] // that's what we want fn from_sql(bytes: MysqlValue<'_>) -> deserialize::Result { let signed: i64 = FromSql::::from_sql(bytes)?; Ok(signed as u64) diff --git a/diesel/src/mysql/types/primitives.rs b/diesel/src/mysql/types/primitives.rs index 7a680018775f..2de855a4dbd5 100644 --- a/diesel/src/mysql/types/primitives.rs +++ b/diesel/src/mysql/types/primitives.rs @@ -22,6 +22,7 @@ where } } +#[allow(clippy::cast_possible_truncation)] // that's what we want here fn f32_to_i64(f: f32) -> deserialize::Result { if f <= i64::MAX as f32 && f >= i64::MIN as f32 { Ok(f.trunc() as i64) @@ -32,6 +33,7 @@ fn f32_to_i64(f: f32) -> deserialize::Result { } } +#[allow(clippy::cast_possible_truncation)] // that's what we want here fn f64_to_i64(f: f64) -> deserialize::Result { if f <= i64::MAX as f64 && f >= i64::MIN as f64 { Ok(f.trunc() as i64) @@ -128,6 +130,8 @@ impl FromSql for f32 { NumericRepresentation::Medium(x) => Ok(x as Self), NumericRepresentation::Big(x) => Ok(x as Self), NumericRepresentation::Float(x) => Ok(x), + // there is currently no way to do this in a better way + #[allow(clippy::cast_possible_truncation)] NumericRepresentation::Double(x) => Ok(x as Self), NumericRepresentation::Decimal(bytes) => Ok(str::from_utf8(bytes)?.parse()?), } diff --git a/diesel/src/mysql/value.rs b/diesel/src/mysql/value.rs index 98550bf8bae7..e72c6a926b74 100644 --- a/diesel/src/mysql/value.rs +++ b/diesel/src/mysql/value.rs @@ -60,7 +60,7 @@ impl<'a> MysqlValue<'a> { pub(crate) fn numeric_value(&self) -> deserialize::Result> { Ok(match self.tpe { MysqlType::UnsignedTiny | MysqlType::Tiny => { - NumericRepresentation::Tiny(self.raw[0] as i8) + NumericRepresentation::Tiny(self.raw[0].try_into()?) } MysqlType::UnsignedShort | MysqlType::Short => { NumericRepresentation::Small(i16::from_ne_bytes((&self.raw[..2]).try_into()?)) diff --git a/diesel/src/pg/connection/copy.rs b/diesel/src/pg/connection/copy.rs index 8c0002fca5a2..d8d14614cf26 100644 --- a/diesel/src/pg/connection/copy.rs +++ b/diesel/src/pg/connection/copy.rs @@ -102,7 +102,10 @@ impl<'conn> BufRead for CopyToBuffer<'conn> { let len = pq_sys::PQgetCopyData(self.conn.internal_connection.as_ptr(), &mut self.ptr, 0); match len { - len if len >= 0 => self.len = len as usize + 1, + len if len >= 0 => { + self.len = 1 + usize::try_from(len) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))? + } -1 => self.len = 0, _ => { let error = self.conn.last_error_message(); diff --git a/diesel/src/pg/connection/raw.rs b/diesel/src/pg/connection/raw.rs index 8ae3e499f226..b62791962204 100644 --- a/diesel/src/pg/connection/raw.rs +++ b/diesel/src/pg/connection/raw.rs @@ -147,7 +147,9 @@ impl RawConnection { pq_sys::PQputCopyData( self.internal_connection.as_ptr(), c.as_ptr() as *const libc::c_char, - c.len() as libc::c_int, + c.len() + .try_into() + .map_err(|e| Error::SerializationError(Box::new(e)))?, ) }; if res != 1 { diff --git a/diesel/src/pg/connection/result.rs b/diesel/src/pg/connection/result.rs index b4f501669f3d..a5542d0a51ce 100644 --- a/diesel/src/pg/connection/result.rs +++ b/diesel/src/pg/connection/result.rs @@ -16,8 +16,8 @@ use std::cell::OnceCell; #[allow(missing_debug_implementations)] pub struct PgResult { internal_result: RawResult, - column_count: usize, - row_count: usize, + column_count: libc::c_int, + row_count: libc::c_int, // We store field names as pointer // as we cannot put a correct lifetime here // The value is valid as long as we haven't freed `RawResult` @@ -34,8 +34,8 @@ impl PgResult { | ExecStatusType::PGRES_COPY_IN | ExecStatusType::PGRES_COPY_OUT | ExecStatusType::PGRES_TUPLES_OK => { - let column_count = unsafe { PQnfields(internal_result.as_ptr()) as usize }; - let row_count = unsafe { PQntuples(internal_result.as_ptr()) as usize }; + let column_count = unsafe { PQnfields(internal_result.as_ptr()) }; + let row_count = unsafe { PQntuples(internal_result.as_ptr()) }; Ok(PgResult { internal_result, column_count, @@ -108,7 +108,10 @@ impl PgResult { } pub(super) fn num_rows(&self) -> usize { - self.row_count + self.row_count.try_into().expect( + "Diesel expects to run on a >= 32 bit OS \ + (or libpq is giving out negative row count)", + ) } pub(super) fn get_row(self: Rc, idx: usize) -> PgRow { @@ -119,29 +122,38 @@ impl PgResult { if self.is_null(row_idx, col_idx) { None } else { - let row_idx = row_idx as libc::c_int; - let col_idx = col_idx as libc::c_int; + let row_idx = row_idx.try_into().ok()?; + let col_idx = col_idx.try_into().ok()?; unsafe { let value_ptr = PQgetvalue(self.internal_result.as_ptr(), row_idx, col_idx) as *const u8; let num_bytes = PQgetlength(self.internal_result.as_ptr(), row_idx, col_idx); - Some(slice::from_raw_parts(value_ptr, num_bytes as usize)) + Some(slice::from_raw_parts( + value_ptr, + num_bytes + .try_into() + .expect("Diesel expects at least a 32 bit operating system"), + )) } } } pub(super) fn is_null(&self, row_idx: usize, col_idx: usize) -> bool { - unsafe { - 0 != PQgetisnull( - self.internal_result.as_ptr(), - row_idx as libc::c_int, - col_idx as libc::c_int, - ) - } + let row_idx = row_idx + .try_into() + .expect("Row indices are expected to fit into 32 bit"); + let col_idx = col_idx + .try_into() + .expect("Column indices are expected to fit into 32 bit"); + + unsafe { 0 != PQgetisnull(self.internal_result.as_ptr(), row_idx, col_idx) } } pub(in crate::pg) fn column_type(&self, col_idx: usize) -> NonZeroU32 { - let type_oid = unsafe { PQftype(self.internal_result.as_ptr(), col_idx as libc::c_int) }; + let col_idx: i32 = col_idx + .try_into() + .expect("Column indices are expected to fit into 32 bit"); + let type_oid = unsafe { PQftype(self.internal_result.as_ptr(), col_idx) }; NonZeroU32::new(type_oid).expect( "Got a zero oid from postgres. If you see this error message \ please report it as issue on the diesel github bug tracker.", @@ -180,7 +192,10 @@ impl PgResult { } pub(super) fn column_count(&self) -> usize { - self.column_count + self.column_count.try_into().expect( + "Diesel expects to run on a >= 32 bit OS \ + (or libpq is giving out negative column count)", + ) } } diff --git a/diesel/src/pg/connection/stmt/mod.rs b/diesel/src/pg/connection/stmt/mod.rs index 5e2a5c36ec89..624574a7b973 100644 --- a/diesel/src/pg/connection/stmt/mod.rs +++ b/diesel/src/pg/connection/stmt/mod.rs @@ -33,12 +33,17 @@ impl Statement { .collect::>(); let param_lengths = param_data .iter() - .map(|data| data.as_ref().map(|d| d.len() as libc::c_int).unwrap_or(0)) - .collect::>(); + .map(|data| data.as_ref().map(|d| d.len().try_into()).unwrap_or(Ok(0))) + .collect::, _>>() + .map_err(|e| crate::result::Error::SerializationError(Box::new(e)))?; + let param_count: libc::c_int = params_pointer + .len() + .try_into() + .map_err(|e| crate::result::Error::SerializationError(Box::new(e)))?; unsafe { raw_connection.send_query_prepared( self.name.as_ptr(), - params_pointer.len() as libc::c_int, + param_count, params_pointer.as_ptr(), param_lengths.as_ptr(), self.param_formats.as_ptr(), @@ -66,10 +71,14 @@ impl Statement { .map_err(|e| crate::result::Error::SerializationError(Box::new(e)))?; let internal_result = unsafe { + let param_count: libc::c_int = param_types + .len() + .try_into() + .map_err(|e| crate::result::Error::SerializationError(Box::new(e)))?; raw_connection.prepare( name.as_ptr(), sql.as_ptr(), - param_types.len() as libc::c_int, + param_count, param_types_to_ptr(Some(¶m_types_vec)), ) }; diff --git a/diesel/src/pg/expression/extensions/interval_dsl.rs b/diesel/src/pg/expression/extensions/interval_dsl.rs index 6cf5df0d24d1..5a5acc6406ba 100644 --- a/diesel/src/pg/expression/extensions/interval_dsl.rs +++ b/diesel/src/pg/expression/extensions/interval_dsl.rs @@ -213,14 +213,19 @@ impl IntervalDsl for i64 { } fn days(self) -> PgInterval { - (self as i32).days() + i32::try_from(self) + .expect("Maximal supported day interval size is 32 bit") + .days() } fn months(self) -> PgInterval { - (self as i32).months() + i32::try_from(self) + .expect("Maximal supported month interval size is 32 bit") + .months() } } +#[allow(clippy::cast_possible_truncation)] // we want to truncate impl IntervalDsl for f64 { fn microseconds(self) -> PgInterval { (self.round() as i64).microseconds() diff --git a/diesel/src/pg/query_builder/copy/copy_from.rs b/diesel/src/pg/query_builder/copy/copy_from.rs index a5dfe7c08e15..baa3e19ac03c 100644 --- a/diesel/src/pg/query_builder/copy/copy_from.rs +++ b/diesel/src/pg/query_builder/copy/copy_from.rs @@ -203,6 +203,10 @@ macro_rules! impl_copy_from_insertable_helper_for_values_clause { $($TT: ToSql<$T, Pg>,)* { type Target = ($($ST,)*); + + // statically known to always fit + // as we don't support more than 128 columns + #[allow(clippy::cast_possible_truncation)] const COLUMN_COUNT: i16 = $Tuple as i16; fn write_to_buffer(&self, idx: i16, out: &mut Vec) -> QueryResult { @@ -234,6 +238,10 @@ macro_rules! impl_copy_from_insertable_helper_for_values_clause { $($TT: ToSql<$T, Pg>,)* { type Target = ($($ST,)*); + + // statically known to always fit + // as we don't support more than 128 columns + #[allow(clippy::cast_possible_truncation)] const COLUMN_COUNT: i16 = $Tuple as i16; fn write_to_buffer(&self, idx: i16, out: &mut Vec) -> QueryResult { @@ -309,7 +317,9 @@ where if is_null == IsNull::No { // fill in the length afterwards let len_after = buffer.len(); - let diff = (len_after - len_before) as i32; + let diff = (len_after - len_before) + .try_into() + .map_err(|e| crate::result::Error::SerializationError(Box::new(e)))?; let bytes = i32::to_be_bytes(diff); for (b, t) in bytes.into_iter().zip(&mut buffer[len_before - 4..]) { *t = b; diff --git a/diesel/src/pg/query_builder/copy/copy_to.rs b/diesel/src/pg/query_builder/copy/copy_to.rs index 49778514f607..2ffd3f664dc6 100644 --- a/diesel/src/pg/query_builder/copy/copy_to.rs +++ b/diesel/src/pg/query_builder/copy/copy_to.rs @@ -273,10 +273,7 @@ where C::get_buffer(out) } - fn execute<'conn, T>( - &'conn mut self, - command: CopyToCommand, - ) -> QueryResult> + fn execute(&mut self, command: CopyToCommand) -> QueryResult> where T: CopyTarget, { @@ -335,12 +332,13 @@ where format!("Unexpected flag value: {flags_backward_incompatible:x}").into(), )); } - let header_size = i32::from_be_bytes( + let header_size = usize::try_from(i32::from_be_bytes( (&buffer[super::COPY_MAGIC_HEADER.len() + 4..super::COPY_MAGIC_HEADER.len() + 8]) .try_into() .expect("Exactly 4 byte"), - ); - out.consume(super::COPY_MAGIC_HEADER.len() + 8 + header_size as usize); + )) + .map_err(|e| crate::result::Error::DeserializationError(Box::new(e)))?; + out.consume(super::COPY_MAGIC_HEADER.len() + 8 + header_size); let mut len = None; Ok(std::iter::from_fn(move || { if let Some(len) = len { @@ -354,7 +352,13 @@ where let tuple_count = i16::from_be_bytes((&buffer[..2]).try_into().expect("Exactly 2 bytes")); if tuple_count > 0 { - let mut buffers = Vec::with_capacity(tuple_count as usize); + let tuple_count = match usize::try_from(tuple_count) { + Ok(o) => o, + Err(e) => { + return Some(Err(crate::result::Error::DeserializationError(Box::new(e)))) + } + }; + let mut buffers = Vec::with_capacity(tuple_count); let mut offset = 2; for _t in 0..tuple_count { let data_size = i32::from_be_bytes( @@ -362,11 +366,21 @@ where .try_into() .expect("Exactly 4 bytes"), ); + if data_size < 0 { buffers.push(None); } else { - buffers.push(Some(&buffer[offset + 4..offset + 4 + data_size as usize])); - offset = offset + 4 + data_size as usize; + match usize::try_from(data_size) { + Ok(data_size) => { + buffers.push(Some(&buffer[offset + 4..offset + 4 + data_size])); + offset = offset + 4 + data_size; + } + Err(e) => { + return Some(Err(crate::result::Error::DeserializationError( + Box::new(e), + ))); + } + } } } diff --git a/diesel/src/pg/types/array.rs b/diesel/src/pg/types/array.rs index 8afcf878151f..354ce1393a3d 100644 --- a/diesel/src/pg/types/array.rs +++ b/diesel/src/pg/types/array.rs @@ -49,7 +49,7 @@ where if has_null && elem_size == -1 { T::from_nullable_sql(None) } else { - let (elem_bytes, new_bytes) = bytes.split_at(elem_size as usize); + let (elem_bytes, new_bytes) = bytes.split_at(elem_size.try_into()?); bytes = new_bytes; T::from_sql(PgValue::new_internal(elem_bytes, &value)) } @@ -101,7 +101,7 @@ where out.write_i32::(flags)?; let element_oid = Pg::metadata(out.metadata_lookup()).oid()?; out.write_u32::(element_oid)?; - out.write_i32::(self.len() as i32)?; + out.write_i32::(self.len().try_into()?)?; let lower_bound = 1; out.write_i32::(lower_bound)?; @@ -116,7 +116,7 @@ where }; if let IsNull::No = is_null { - out.write_i32::(buffer.len() as i32)?; + out.write_i32::(buffer.len().try_into()?)?; out.write_all(&buffer)?; buffer.clear(); } else { diff --git a/diesel/src/pg/types/date_and_time/chrono.rs b/diesel/src/pg/types/date_and_time/chrono.rs index bcbac6ec0543..756e0bf7814f 100644 --- a/diesel/src/pg/types/date_and_time/chrono.rs +++ b/diesel/src/pg/types/date_and_time/chrono.rs @@ -116,7 +116,7 @@ fn pg_epoch_date() -> NaiveDate { impl ToSql for NaiveDate { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { let days_since_epoch = self.signed_duration_since(pg_epoch_date()).num_days(); - ToSql::::to_sql(&PgDate(days_since_epoch as i32), &mut out.reborrow()) + ToSql::::to_sql(&PgDate(days_since_epoch.try_into()?), &mut out.reborrow()) } } diff --git a/diesel/src/pg/types/date_and_time/quickcheck_impls.rs b/diesel/src/pg/types/date_and_time/quickcheck_impls.rs index ae1d7438c8a5..259de285dab9 100644 --- a/diesel/src/pg/types/date_and_time/quickcheck_impls.rs +++ b/diesel/src/pg/types/date_and_time/quickcheck_impls.rs @@ -1,6 +1,10 @@ -extern crate quickcheck; - -use self::quickcheck::{Arbitrary, Gen}; +// it's test code +#![allow( + clippy::cast_possible_wrap, + clippy::cast_sign_loss, + clippy::cast_possible_truncation +)] +use quickcheck::{Arbitrary, Gen}; use super::{PgDate, PgInterval, PgTime, PgTimestamp}; diff --git a/diesel/src/pg/types/date_and_time/std_time.rs b/diesel/src/pg/types/date_and_time/std_time.rs index b019c59b5be7..90910f27e597 100644 --- a/diesel/src/pg/types/date_and_time/std_time.rs +++ b/diesel/src/pg/types/date_and_time/std_time.rs @@ -18,9 +18,9 @@ impl ToSql for SystemTime { Err(time_err) => (true, time_err.duration()), }; let time_since_epoch = if before_epoch { - -(duration_to_usecs(duration) as i64) + -(i64::try_from(duration_to_usecs(duration))?) } else { - duration_to_usecs(duration) as i64 + duration_to_usecs(duration).try_into()? }; ToSql::::to_sql(&time_since_epoch, &mut out.reborrow()) } diff --git a/diesel/src/pg/types/date_and_time/time.rs b/diesel/src/pg/types/date_and_time/time.rs index ac0770281ce5..d1d1affab937 100644 --- a/diesel/src/pg/types/date_and_time/time.rs +++ b/diesel/src/pg/types/date_and_time/time.rs @@ -39,8 +39,7 @@ impl ToSql for PrimitiveDateTime { let error_message = format!("{self:?} as microseconds is too large to fit in an i64"); return Err(error_message.into()); } - let micros = micros as i64; - ToSql::::to_sql(&PgTimestamp(micros), &mut out.reborrow()) + ToSql::::to_sql(&PgTimestamp(micros.try_into()?), &mut out.reborrow()) } } @@ -79,9 +78,10 @@ impl ToSql for OffsetDateTime { impl ToSql for NaiveTime { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { let duration = *self - NaiveTime::MIDNIGHT; - // microseconds in a day cannot overflow i64 - let micros = duration.whole_microseconds() as i64; - ToSql::::to_sql(&PgTime(micros), &mut out.reborrow()) + ToSql::::to_sql( + &PgTime(duration.whole_microseconds().try_into()?), + &mut out.reborrow(), + ) } } @@ -100,7 +100,7 @@ const PG_EPOCH_DATE: NaiveDate = date!(2000 - 1 - 1); impl ToSql for NaiveDate { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { let days_since_epoch = (*self - PG_EPOCH_DATE).whole_days(); - ToSql::::to_sql(&PgDate(days_since_epoch as i32), &mut out.reborrow()) + ToSql::::to_sql(&PgDate(days_since_epoch.try_into()?), &mut out.reborrow()) } } diff --git a/diesel/src/pg/types/floats/mod.rs b/diesel/src/pg/types/floats/mod.rs index b35ee0467966..da6256937eff 100644 --- a/diesel/src/pg/types/floats/mod.rs +++ b/diesel/src/pg/types/floats/mod.rs @@ -101,7 +101,7 @@ impl ToSql for PgNumeric { PgNumeric::Positive { scale, .. } | PgNumeric::Negative { scale, .. } => scale, PgNumeric::NaN => 0, }; - out.write_u16::(digits.len() as u16)?; + out.write_u16::(digits.len().try_into()?)?; out.write_i16::(weight)?; out.write_u16::(sign)?; out.write_u16::(scale)?; diff --git a/diesel/src/pg/types/floats/quickcheck_impls.rs b/diesel/src/pg/types/floats/quickcheck_impls.rs index b32d0ba87383..6211d5b9e9d5 100644 --- a/diesel/src/pg/types/floats/quickcheck_impls.rs +++ b/diesel/src/pg/types/floats/quickcheck_impls.rs @@ -1,6 +1,6 @@ -extern crate quickcheck; +#![allow(clippy::cast_sign_loss)] // test code -use self::quickcheck::{Arbitrary, Gen}; +use quickcheck::{Arbitrary, Gen}; use super::PgNumeric; diff --git a/diesel/src/pg/types/ipnet_address.rs b/diesel/src/pg/types/ipnet_address.rs index 10a5e6b8c84d..177074b9b59b 100644 --- a/diesel/src/pg/types/ipnet_address.rs +++ b/diesel/src/pg/types/ipnet_address.rs @@ -16,6 +16,8 @@ const AF_INET: u8 = 2; // Maybe not used, but defining to follow Rust's libstd/net/sys #[cfg(target_os = "redox")] const AF_INET: u8 = 1; + +#[allow(clippy::cast_possible_truncation)] // it's 2 #[cfg(not(any(windows, target_os = "redox")))] const AF_INET: u8 = libc::AF_INET as u8; diff --git a/diesel/src/pg/types/network_address.rs b/diesel/src/pg/types/network_address.rs index a25e64a70e94..fa215ef59bfd 100644 --- a/diesel/src/pg/types/network_address.rs +++ b/diesel/src/pg/types/network_address.rs @@ -17,6 +17,8 @@ const AF_INET: u8 = 2; // Maybe not used, but defining to follow Rust's libstd/net/sys #[cfg(target_os = "redox")] const AF_INET: u8 = 1; + +#[allow(clippy::cast_possible_truncation)] // it's 2 #[cfg(not(any(windows, target_os = "redox")))] const AF_INET: u8 = libc::AF_INET as u8; diff --git a/diesel/src/pg/types/numeric.rs b/diesel/src/pg/types/numeric.rs index 7530b5917b8a..18a7f9778def 100644 --- a/diesel/src/pg/types/numeric.rs +++ b/diesel/src/pg/types/numeric.rs @@ -58,10 +58,10 @@ mod bigdecimal { }; let mut result = BigUint::default(); - let count = digits.len() as i64; + let count = i64::try_from(digits.len())?; for digit in digits { result *= BigUint::from(10_000u64); - result += BigUint::from(*digit as u64); + result += BigUint::from(u64::try_from(*digit)?); } // First digit got factor 10_000^(digits.len() - 1), but should get 10_000^weight let correction_exp = 4 * (i64::from(weight) - count + 1); @@ -80,6 +80,8 @@ mod bigdecimal { } } + // that should likely be a `TryFrom` impl + // TODO: diesel 3.0 #[cfg(all(feature = "postgres_backend", feature = "numeric"))] impl<'a> From<&'a BigDecimal> for PgNumeric { // NOTE(clippy): No `std::ops::MulAssign` impl for `BigInt` @@ -98,7 +100,9 @@ mod bigdecimal { } 0 } else { - scale as u16 + scale + .try_into() + .expect("Scale is expected to be 16bit large") }; integer = integer.abs(); @@ -112,7 +116,11 @@ mod bigdecimal { let mut digits = ToBase10000(Some(integer)).collect::>(); digits.reverse(); let digits_after_decimal = scale / 4 + 1; - let weight = digits.len() as i16 - digits_after_decimal as i16 - 1; + let weight = i16::try_from(digits.len()) + .expect("Max digit number is expected to fit into 16 bit") + - i16::try_from(digits_after_decimal) + .expect("Max digit number is expected to fit into 16 bit") + - 1; let unnecessary_zeroes = digits.iter().rev().take_while(|i| i.is_zero()).count(); diff --git a/diesel/src/pg/types/ranges.rs b/diesel/src/pg/types/ranges.rs index 8d2dca05c70b..33602a17ccb9 100644 --- a/diesel/src/pg/types/ranges.rs +++ b/diesel/src/pg/types/ranges.rs @@ -73,7 +73,7 @@ where if !flags.contains(RangeFlags::LB_INF) { let elem_size = bytes.read_i32::()?; - let (elem_bytes, new_bytes) = bytes.split_at(elem_size as usize); + let (elem_bytes, new_bytes) = bytes.split_at(elem_size.try_into()?); bytes = new_bytes; let value = T::from_sql(PgValue::new_internal(elem_bytes, &value))?; @@ -140,7 +140,7 @@ where Output::new(ByteWrapper(&mut buffer), out.metadata_lookup()); value.to_sql(&mut inner_buffer)?; } - out.write_u32::(buffer.len() as u32)?; + out.write_u32::(buffer.len().try_into()?)?; out.write_all(&buffer)?; buffer.clear(); } @@ -154,7 +154,7 @@ where Output::new(ByteWrapper(&mut buffer), out.metadata_lookup()); value.to_sql(&mut inner_buffer)?; } - out.write_u32::(buffer.len() as u32)?; + out.write_u32::(buffer.len().try_into()?)?; out.write_all(&buffer)?; } Bound::Unbounded => {} diff --git a/diesel/src/pg/types/record.rs b/diesel/src/pg/types/record.rs index 9118c686d698..d589f3caaa0c 100644 --- a/diesel/src/pg/types/record.rs +++ b/diesel/src/pg/types/record.rs @@ -52,7 +52,7 @@ macro_rules! tuple_impls { if num_bytes == -1 { $T::from_nullable_sql(None)? } else { - let (elem_bytes, new_bytes) = bytes.split_at(num_bytes as usize); + let (elem_bytes, new_bytes) = bytes.split_at(num_bytes.try_into()?); bytes = new_bytes; $T::from_sql(PgValue::new_internal( elem_bytes, @@ -117,7 +117,7 @@ macro_rules! tuple_impls { }; if let IsNull::No = is_null { - out.write_i32::(buffer.len() as i32)?; + out.write_i32::(buffer.len().try_into()?)?; out.write_all(&buffer)?; buffer.clear(); } else { diff --git a/diesel/src/query_builder/combination_clause.rs b/diesel/src/query_builder/combination_clause.rs index 67b2ebfca804..ba7908e60df2 100644 --- a/diesel/src/query_builder/combination_clause.rs +++ b/diesel/src/query_builder/combination_clause.rs @@ -8,18 +8,6 @@ use crate::query_builder::insert_statement::InsertFromSelect; use crate::query_builder::{AsQuery, AstPass, Query, QueryFragment, QueryId, SelectQuery}; use crate::{CombineDsl, Insertable, QueryResult, RunQueryDsl, Table}; -#[derive(Debug, Clone, Copy, QueryId)] -pub(crate) struct NoCombinationClause; - -impl QueryFragment for NoCombinationClause -where - DB: Backend + DieselReserveSpecialization, -{ - fn walk_ast<'b>(&'b self, _: AstPass<'_, 'b, DB>) -> QueryResult<()> { - Ok(()) - } -} - #[derive(Debug, Copy, Clone, QueryId)] #[must_use = "Queries are only executed when calling `load`, `get_result` or similar."] /// Combine queries using a combinator like `UNION`, `INTERSECT` or `EXPECT` diff --git a/diesel/src/sqlite/connection/bind_collector.rs b/diesel/src/sqlite/connection/bind_collector.rs index df4306632755..b95b755d4b4d 100644 --- a/diesel/src/sqlite/connection/bind_collector.rs +++ b/diesel/src/sqlite/connection/bind_collector.rs @@ -125,7 +125,10 @@ impl std::fmt::Display for InternalSqliteBindValue<'_> { impl InternalSqliteBindValue<'_> { #[allow(unsafe_code)] // ffi function calls - pub(in crate::sqlite) fn result_of(self, ctx: &mut libsqlite3_sys::sqlite3_context) { + pub(in crate::sqlite) fn result_of( + self, + ctx: &mut libsqlite3_sys::sqlite3_context, + ) -> Result<(), std::num::TryFromIntError> { use libsqlite3_sys as ffi; use std::os::raw as libc; // This unsafe block assumes the following invariants: @@ -136,25 +139,25 @@ impl InternalSqliteBindValue<'_> { InternalSqliteBindValue::BorrowedString(s) => ffi::sqlite3_result_text( ctx, s.as_ptr() as *const libc::c_char, - s.len() as libc::c_int, + s.len().try_into()?, ffi::SQLITE_TRANSIENT(), ), InternalSqliteBindValue::String(s) => ffi::sqlite3_result_text( ctx, s.as_ptr() as *const libc::c_char, - s.len() as libc::c_int, + s.len().try_into()?, ffi::SQLITE_TRANSIENT(), ), InternalSqliteBindValue::Binary(b) => ffi::sqlite3_result_blob( ctx, b.as_ptr() as *const libc::c_void, - b.len() as libc::c_int, + b.len().try_into()?, ffi::SQLITE_TRANSIENT(), ), InternalSqliteBindValue::BorrowedBinary(b) => ffi::sqlite3_result_blob( ctx, b.as_ptr() as *const libc::c_void, - b.len() as libc::c_int, + b.len().try_into()?, ffi::SQLITE_TRANSIENT(), ), InternalSqliteBindValue::I32(i) => ffi::sqlite3_result_int(ctx, i as libc::c_int), @@ -165,6 +168,7 @@ impl InternalSqliteBindValue<'_> { InternalSqliteBindValue::Null => ffi::sqlite3_result_null(ctx), } } + Ok(()) } } diff --git a/diesel/src/sqlite/connection/functions.rs b/diesel/src/sqlite/connection/functions.rs index db330ee23998..454a6bcd3c57 100644 --- a/diesel/src/sqlite/connection/functions.rs +++ b/diesel/src/sqlite/connection/functions.rs @@ -202,10 +202,10 @@ impl<'a> Row<'a, Sqlite> for FunctionRow<'a> { 'a: 'b, Self: crate::row::RowIndex, { - let idx = self.idx(idx)?; + let col_idx = self.idx(idx)?; Some(FunctionArgument { args: self.args.borrow(), - col_idx: idx as i32, + col_idx, }) } @@ -232,7 +232,7 @@ impl<'a, 'b> RowIndex<&'a str> for FunctionRow<'b> { struct FunctionArgument<'a> { args: Ref<'a, ManuallyDrop>>, - col_idx: i32, + col_idx: usize, } impl<'a> Field<'a, Sqlite> for FunctionArgument<'a> { diff --git a/diesel/src/sqlite/connection/mod.rs b/diesel/src/sqlite/connection/mod.rs index fa77c0571a81..12c59ccc960d 100644 --- a/diesel/src/sqlite/connection/mod.rs +++ b/diesel/src/sqlite/connection/mod.rs @@ -185,9 +185,11 @@ impl Connection for SqliteConnection { T: QueryFragment + QueryId, { let statement_use = self.prepared_query(source)?; - statement_use - .run() - .map(|_| self.raw_connection.rows_affected_by_last_query()) + statement_use.run().and_then(|_| { + self.raw_connection + .rows_affected_by_last_query() + .map_err(Error::DeserializationError) + }) } fn transaction_state(&mut self) -> &mut AnsiTransactionManager diff --git a/diesel/src/sqlite/connection/owned_row.rs b/diesel/src/sqlite/connection/owned_row.rs index 43e225d0e2ae..8de0ac9c28bc 100644 --- a/diesel/src/sqlite/connection/owned_row.rs +++ b/diesel/src/sqlite/connection/owned_row.rs @@ -41,7 +41,7 @@ impl<'a> Row<'a, Sqlite> for OwnedSqliteRow { let idx = self.idx(idx)?; Some(OwnedSqliteField { row: self, - col_idx: i32::try_from(idx).ok()?, + col_idx: idx, }) } @@ -71,14 +71,14 @@ impl<'idx> RowIndex<&'idx str> for OwnedSqliteRow { #[allow(missing_debug_implementations)] pub struct OwnedSqliteField<'row> { pub(super) row: &'row OwnedSqliteRow, - pub(super) col_idx: i32, + pub(super) col_idx: usize, } impl<'row> Field<'row, Sqlite> for OwnedSqliteField<'row> { fn field_name(&self) -> Option<&str> { self.row .column_names - .get(self.col_idx as usize) + .get(self.col_idx) .and_then(|o| o.as_ref().map(|s| s.as_ref())) } diff --git a/diesel/src/sqlite/connection/raw.rs b/diesel/src/sqlite/connection/raw.rs index bf5e910d3fab..57d6a5fbcf40 100644 --- a/diesel/src/sqlite/connection/raw.rs +++ b/diesel/src/sqlite/connection/raw.rs @@ -80,8 +80,12 @@ impl RawConnection { ensure_sqlite_ok(result, self.internal_connection.as_ptr()) } - pub(super) fn rows_affected_by_last_query(&self) -> usize { - unsafe { ffi::sqlite3_changes(self.internal_connection.as_ptr()) as usize } + pub(super) fn rows_affected_by_last_query( + &self, + ) -> Result> { + let r = unsafe { ffi::sqlite3_changes(self.internal_connection.as_ptr()) }; + + Ok(r.try_into()?) } pub(super) fn register_sql_function( @@ -105,12 +109,15 @@ impl RawConnection { })); let fn_name = Self::get_fn_name(fn_name)?; let flags = Self::get_flags(deterministic); + let num_args = num_args + .try_into() + .map_err(|e| Error::SerializationError(Box::new(e)))?; let result = unsafe { ffi::sqlite3_create_function_v2( self.internal_connection.as_ptr(), fn_name.as_ptr(), - num_args as _, + num_args, flags, callback_fn as *mut _, Some(run_custom_function::), @@ -136,12 +143,15 @@ impl RawConnection { { let fn_name = Self::get_fn_name(fn_name)?; let flags = Self::get_flags(false); + let num_args = num_args + .try_into() + .map_err(|e| Error::SerializationError(Box::new(e)))?; let result = unsafe { ffi::sqlite3_create_function_v2( self.internal_connection.as_ptr(), fn_name.as_ptr(), - num_args as _, + num_args, flags, ptr::null_mut(), None, @@ -195,11 +205,19 @@ impl RawConnection { &mut size as *mut _, 0, ); - SerializedDatabase::new(data_ptr, size as usize) + SerializedDatabase::new( + data_ptr, + size.try_into() + .expect("Cannot fit the serialized database into memory"), + ) } } pub(super) fn deserialize(&mut self, data: &[u8]) -> QueryResult<()> { + let db_size = data + .len() + .try_into() + .map_err(|e| Error::DeserializationError(Box::new(e)))?; // the cast for `ffi::SQLITE_DESERIALIZE_READONLY` is required for old libsqlite3-sys versions #[allow(clippy::unnecessary_cast)] unsafe { @@ -207,8 +225,8 @@ impl RawConnection { self.internal_connection.as_ptr(), std::ptr::null(), data.as_ptr() as *mut u8, - data.len() as i64, - data.len() as i64, + db_size, + db_size, ffi::SQLITE_DESERIALIZE_READONLY as u32, ); @@ -402,6 +420,9 @@ where static NULL_CTX_ERR: &str = "We've written the aggregator to the aggregate context, but it could not be retrieved."; + let n_bytes: i32 = std::mem::size_of::>() + .try_into() + .expect("Aggregate context should be larger than 2^32"); let aggregate_context = unsafe { // This block of unsafe code makes the following assumptions: // @@ -424,7 +445,7 @@ where // the memory will have a correct alignment. // (Note I(weiznich): would assume that it is aligned correctly, but we // we cannot guarantee it, so better be safe than sorry) - ffi::sqlite3_aggregate_context(ctx, std::mem::size_of::>() as i32) + ffi::sqlite3_aggregate_context(ctx, n_bytes) }; let aggregate_context = NonNull::new(aggregate_context as *mut OptionalAggregator); let aggregator = unsafe { @@ -486,9 +507,10 @@ extern "C" fn run_aggregator_final_function { diff --git a/diesel/src/sqlite/connection/row.rs b/diesel/src/sqlite/connection/row.rs index 75cc27369e4d..5ff7787f4fee 100644 --- a/diesel/src/sqlite/connection/row.rs +++ b/diesel/src/sqlite/connection/row.rs @@ -145,7 +145,7 @@ impl<'stmt, 'query> Row<'stmt, Sqlite> for SqliteRow<'stmt, 'query> { let idx = self.idx(idx)?; Some(SqliteField { row: self.inner.borrow(), - col_idx: i32::try_from(idx).ok()?, + col_idx: idx, }) } @@ -178,15 +178,19 @@ impl<'stmt, 'idx, 'query> RowIndex<&'idx str> for SqliteRow<'stmt, 'query> { #[allow(missing_debug_implementations)] pub struct SqliteField<'stmt, 'query> { pub(super) row: Ref<'stmt, PrivateSqliteRow<'stmt, 'query>>, - pub(super) col_idx: i32, + pub(super) col_idx: usize, } impl<'stmt, 'query> Field<'stmt, Sqlite> for SqliteField<'stmt, 'query> { fn field_name(&self) -> Option<&str> { match &*self.row { - PrivateSqliteRow::Direct(stmt) => stmt.field_name(self.col_idx), + PrivateSqliteRow::Direct(stmt) => stmt.field_name( + self.col_idx + .try_into() + .expect("Diesel expects to run at least on a 32 bit platform"), + ), PrivateSqliteRow::Duplicated { column_names, .. } => column_names - .get(self.col_idx as usize) + .get(self.col_idx) .and_then(|t| t.as_ref().map(|n| n as &str)), } } @@ -339,6 +343,7 @@ mod tests { #[test] #[cfg(feature = "returning_clauses_for_sqlite_3_35")] + #[allow(clippy::cast_sign_loss)] fn parallel_iter_with_error() { use crate::connection::Connection; use crate::connection::LoadConnection; diff --git a/diesel/src/sqlite/connection/sqlite_value.rs b/diesel/src/sqlite/connection/sqlite_value.rs index 7210d104bb4c..f0d1a4afbda7 100644 --- a/diesel/src/sqlite/connection/sqlite_value.rs +++ b/diesel/src/sqlite/connection/sqlite_value.rs @@ -49,12 +49,16 @@ unsafe impl Send for OwnedSqliteValue {} impl<'row, 'stmt, 'query> SqliteValue<'row, 'stmt, 'query> { pub(super) fn new( row: Ref<'row, PrivateSqliteRow<'stmt, 'query>>, - col_idx: i32, + col_idx: usize, ) -> Option> { let value = match &*row { - PrivateSqliteRow::Direct(stmt) => stmt.column_value(col_idx)?, + PrivateSqliteRow::Direct(stmt) => stmt.column_value( + col_idx + .try_into() + .expect("Diesel expects to run at least on a 32 bit platform"), + )?, PrivateSqliteRow::Duplicated { values, .. } => { - values.get(col_idx as usize).and_then(|v| v.as_ref())?.value + values.get(col_idx).and_then(|v| v.as_ref())?.value } }; @@ -71,13 +75,9 @@ impl<'row, 'stmt, 'query> SqliteValue<'row, 'stmt, 'query> { pub(super) fn from_owned_row( row: &'row OwnedSqliteRow, - col_idx: i32, + col_idx: usize, ) -> Option> { - let value = row - .values - .get(col_idx as usize) - .and_then(|v| v.as_ref())? - .value; + let value = row.values.get(col_idx).and_then(|v| v.as_ref())?.value; let ret = Self { _row: None, value }; if ret.value_type().is_none() { None @@ -90,7 +90,11 @@ impl<'row, 'stmt, 'query> SqliteValue<'row, 'stmt, 'query> { let s = unsafe { let ptr = ffi::sqlite3_value_text(self.value.as_ptr()); let len = ffi::sqlite3_value_bytes(self.value.as_ptr()); - let bytes = slice::from_raw_parts(ptr, len as usize); + let bytes = slice::from_raw_parts( + ptr, + len.try_into() + .expect("Diesel expects to run at least on a 32 bit platform"), + ); // The string is guaranteed to be utf8 according to // https://www.sqlite.org/c3ref/value_blob.html str::from_utf8_unchecked(bytes) @@ -111,7 +115,11 @@ impl<'row, 'stmt, 'query> SqliteValue<'row, 'stmt, 'query> { // slices without elements from a pointer &[] } else { - slice::from_raw_parts(ptr as *const u8, len as usize) + slice::from_raw_parts( + ptr as *const u8, + len.try_into() + .expect("Diesel expects to run at least on a 32 bit platform"), + ) } } } diff --git a/diesel/src/sqlite/connection/statement_iterator.rs b/diesel/src/sqlite/connection/statement_iterator.rs index 393ec9e471c8..39ae81237ffc 100644 --- a/diesel/src/sqlite/connection/statement_iterator.rs +++ b/diesel/src/sqlite/connection/statement_iterator.rs @@ -104,7 +104,10 @@ impl<'stmt, 'query> Iterator for StatementIterator<'stmt, 'query> { Err(e) => Some(Err(e)), Ok(false) => None, Ok(true) => { - let field_count = stmt.column_count() as usize; + let field_count = stmt + .column_count() + .try_into() + .expect("Diesel expects to run at least on a 32 bit platform"); self.field_count = field_count; let inner = Rc::new(RefCell::new(PrivateSqliteRow::Direct(stmt))); self.inner = Started(inner.clone()); diff --git a/diesel/src/sqlite/connection/stmt.rs b/diesel/src/sqlite/connection/stmt.rs index 92b12465d772..99bb271918a3 100644 --- a/diesel/src/sqlite/connection/stmt.rs +++ b/diesel/src/sqlite/connection/stmt.rs @@ -27,13 +27,17 @@ impl Statement { ) -> QueryResult { let mut stmt = ptr::null_mut(); let mut unused_portion = ptr::null(); + let n_byte = sql + .len() + .try_into() + .map_err(|e| Error::SerializationError(Box::new(e)))?; // the cast for `ffi::SQLITE_PREPARE_PERSISTENT` is required for old libsqlite3-sys versions #[allow(clippy::unnecessary_cast)] let prepare_result = unsafe { ffi::sqlite3_prepare_v3( raw_connection.internal_connection.as_ptr(), CString::new(sql)?.as_ptr(), - sql.len() as libc::c_int, + n_byte, if matches!(is_cached, PrepareForCache::Yes) { ffi::SQLITE_PREPARE_PERSISTENT as u32 } else { @@ -68,16 +72,23 @@ impl Statement { ffi::sqlite3_bind_null(self.inner_statement.as_ptr(), bind_index) } (SqliteType::Binary, InternalSqliteBindValue::BorrowedBinary(bytes)) => { + let n = bytes + .len() + .try_into() + .map_err(|e| Error::SerializationError(Box::new(e)))?; ffi::sqlite3_bind_blob( self.inner_statement.as_ptr(), bind_index, bytes.as_ptr() as *const libc::c_void, - bytes.len() as libc::c_int, + n, ffi::SQLITE_STATIC(), ) } (SqliteType::Binary, InternalSqliteBindValue::Binary(mut bytes)) => { - let len = bytes.len(); + let len = bytes + .len() + .try_into() + .map_err(|e| Error::SerializationError(Box::new(e)))?; // We need a separate pointer here to pass it to sqlite // as the returned pointer is a pointer to a dyn sized **slice** // and not the pointer to the first element of the slice @@ -87,22 +98,29 @@ impl Statement { self.inner_statement.as_ptr(), bind_index, ptr as *const libc::c_void, - len as libc::c_int, + len, ffi::SQLITE_STATIC(), ) } (SqliteType::Text, InternalSqliteBindValue::BorrowedString(bytes)) => { + let len = bytes + .len() + .try_into() + .map_err(|e| Error::SerializationError(Box::new(e)))?; ffi::sqlite3_bind_text( self.inner_statement.as_ptr(), bind_index, bytes.as_ptr() as *const libc::c_char, - bytes.len() as libc::c_int, + len, ffi::SQLITE_STATIC(), ) } (SqliteType::Text, InternalSqliteBindValue::String(bytes)) => { let mut bytes = Box::<[u8]>::from(bytes); - let len = bytes.len(); + let len = bytes + .len() + .try_into() + .map_err(|e| Error::SerializationError(Box::new(e)))?; // We need a separate pointer here to pass it to sqlite // as the returned pointer is a pointer to a dyn sized **slice** // and not the pointer to the first element of the slice @@ -112,7 +130,7 @@ impl Statement { self.inner_statement.as_ptr(), bind_index, ptr as *const libc::c_char, - len as libc::c_int, + len, ffi::SQLITE_STATIC(), ) } @@ -481,7 +499,10 @@ impl<'stmt, 'query> StatementUse<'stmt, 'query> { pub(super) fn index_for_column_name(&mut self, field_name: &str) -> Option { (0..self.column_count()) .find(|idx| self.field_name(*idx) == Some(field_name)) - .map(|v| v as usize) + .map(|v| { + v.try_into() + .expect("Diesel expects to run at least on a 32 bit platform") + }) } pub(super) fn field_name(&self, idx: i32) -> Option<&str> { @@ -497,7 +518,7 @@ impl<'stmt, 'query> StatementUse<'stmt, 'query> { }); column_names - .get(idx as usize) + .get(usize::try_from(idx).expect("Diesel expects to run at least on a 32 bit platform")) .and_then(|c| unsafe { c.as_ref() }) } diff --git a/diesel/src/sqlite/types/date_and_time/chrono.rs b/diesel/src/sqlite/types/date_and_time/chrono.rs index a8df1a0099e8..5e600e76ecbf 100644 --- a/diesel/src/sqlite/types/date_and_time/chrono.rs +++ b/diesel/src/sqlite/types/date_and_time/chrono.rs @@ -73,7 +73,10 @@ fn parse_julian(julian_days: f64) -> Option { const EPOCH_IN_JULIAN_DAYS: f64 = 2_440_587.5; const SECONDS_IN_DAY: f64 = 86400.0; let timestamp = (julian_days - EPOCH_IN_JULIAN_DAYS) * SECONDS_IN_DAY; - let seconds = timestamp as i64; + #[allow(clippy::cast_possible_truncation)] // we want to truncate + let seconds = timestamp.trunc() as i64; + // that's not true, `fract` is always > 0 + #[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)] let nanos = (timestamp.fract() * 1E9) as u32; #[allow(deprecated)] // otherwise we would need to bump our minimal chrono version NaiveDateTime::from_timestamp_opt(seconds, nanos) diff --git a/diesel/src/sqlite/types/date_and_time/mod.rs b/diesel/src/sqlite/types/date_and_time/mod.rs index 09330d0d7c61..0725a3705a07 100644 --- a/diesel/src/sqlite/types/date_and_time/mod.rs +++ b/diesel/src/sqlite/types/date_and_time/mod.rs @@ -94,6 +94,7 @@ impl ToSql for String { } #[cfg(all(test, feature = "chrono", feature = "time"))] +#[allow(clippy::cast_possible_truncation)] // it's a test mod tests { extern crate chrono; extern crate time; diff --git a/diesel/src/sqlite/types/date_and_time/time.rs b/diesel/src/sqlite/types/date_and_time/time.rs index ea97bcd0c1b1..5d88ad93688c 100644 --- a/diesel/src/sqlite/types/date_and_time/time.rs +++ b/diesel/src/sqlite/types/date_and_time/time.rs @@ -131,6 +131,7 @@ fn parse_julian(julian_days: f64) -> Result { const EPOCH_IN_JULIAN_DAYS: f64 = 2_440_587.5; const SECONDS_IN_DAY: f64 = 86400.0; let timestamp = (julian_days - EPOCH_IN_JULIAN_DAYS) * SECONDS_IN_DAY; + #[allow(clippy::cast_possible_truncation)] // we multiply by 1E9 to prevent that OffsetDateTime::from_unix_timestamp_nanos((timestamp * 1E9) as i128).map(naive_utc) } diff --git a/diesel/src/sqlite/types/mod.rs b/diesel/src/sqlite/types/mod.rs index 90db3d3ab2d9..88ef5f8cde26 100644 --- a/diesel/src/sqlite/types/mod.rs +++ b/diesel/src/sqlite/types/mod.rs @@ -54,6 +54,7 @@ impl Queryable for *const [u8] { } #[cfg(feature = "sqlite")] +#[allow(clippy::cast_possible_truncation)] // we want to truncate here impl FromSql for i16 { fn from_sql(value: SqliteValue<'_, '_, '_>) -> deserialize::Result { Ok(value.read_integer() as i16) @@ -82,6 +83,7 @@ impl FromSql for i64 { } #[cfg(feature = "sqlite")] +#[allow(clippy::cast_possible_truncation)] // we want to truncate here impl FromSql for f32 { fn from_sql(value: SqliteValue<'_, '_, '_>) -> deserialize::Result { Ok(value.read_double() as f32) diff --git a/diesel_cli/Cargo.toml b/diesel_cli/Cargo.toml index 00437b7754e9..dd9f940cd000 100644 --- a/diesel_cli/Cargo.toml +++ b/diesel_cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "diesel_cli" -version = "2.2.1" +version = "2.2.2" license = "MIT OR Apache-2.0" description = "Provides the CLI for the Diesel crate" readme = "README.md" diff --git a/diesel_cli/src/config.rs b/diesel_cli/src/config.rs index fb3fca7502fe..fa6a8d9ab463 100644 --- a/diesel_cli/src/config.rs +++ b/diesel_cli/src/config.rs @@ -458,6 +458,9 @@ impl PrintSchema { self.import_types.as_deref() } + // it's a false positive + // https://github.com/rust-lang/rust-clippy/issues/12856 + #[allow(clippy::needless_borrows_for_generic_args)] fn set_relative_path_base(&mut self, base: &Path) { if let Some(ref mut file) = self.file { if file.is_relative() { diff --git a/diesel_cli/src/errors.rs b/diesel_cli/src/errors.rs index 6c079e8a3176..88ced1c140fb 100644 --- a/diesel_cli/src/errors.rs +++ b/diesel_cli/src/errors.rs @@ -52,7 +52,7 @@ pub enum Error { FmtError(#[from] std::fmt::Error), #[error("Failed to parse patch file: {0}")] DiffyParseError(#[from] diffy::ParsePatchError), - #[error("Failed to apply path: {0}")] + #[error("Failed to apply patch: {0}")] DiffyApplyError(#[from] diffy::ApplyError), #[error("Column length literal can't be parsed as u64: {0}")] ColumnLiteralParseError(syn::Error), diff --git a/diesel_compile_tests/tests/fail/copy_can_only_use_options_with_raw_variant.stderr b/diesel_compile_tests/tests/fail/copy_can_only_use_options_with_raw_variant.stderr index 3c9f82c63e61..2d2f9623def0 100644 --- a/diesel_compile_tests/tests/fail/copy_can_only_use_options_with_raw_variant.stderr +++ b/diesel_compile_tests/tests/fail/copy_can_only_use_options_with_raw_variant.stderr @@ -1,11 +1,7 @@ error[E0599]: no method named `with_format` found for struct `CopyFromQuery>>` in the current scope --> tests/fail/copy_can_only_use_options_with_raw_variant.rs:49:10 | -47 | diesel::copy_from(users::table) - | ------------------------------- - | | - | _____method `with_format` is available on `CopyFromQuery` - | | +47 | / diesel::copy_from(users::table) 48 | | .from_insertable(vec![NewUser { name: "John" }]) 49 | | .with_format(CopyFormat::Csv) | | -^^^^^^^^^^^ method not found in `CopyFromQuery>>` diff --git a/diesel_compile_tests/tests/fail/derive/queryable_by_name_requires_table_name_or_sql_type_annotation.stderr b/diesel_compile_tests/tests/fail/derive/queryable_by_name_requires_table_name_or_sql_type_annotation.stderr index 2dd72fafbb74..f46370568073 100644 --- a/diesel_compile_tests/tests/fail/derive/queryable_by_name_requires_table_name_or_sql_type_annotation.stderr +++ b/diesel_compile_tests/tests/fail/derive/queryable_by_name_requires_table_name_or_sql_type_annotation.stderr @@ -6,49 +6,6 @@ error: All fields of tuple structs must be annotated with `#[diesel(column_name) | = note: this error originates in the derive macro `QueryableByName` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: cannot deserialize a value of the database type `_` as `i32` - --> tests/fail/derive/queryable_by_name_requires_table_name_or_sql_type_annotation.rs:4:10 - | -4 | #[derive(QueryableByName)] - | ^^^^^^^^^^^^^^^ the trait `FromSql<_, __DB>` is not implemented for `i32` - | - = note: double check your type mappings via the documentation of `_` - = help: the following other types implement trait `FromSql`: - > - > - > -note: required by a bound in `diesel::row::NamedRow::get` - --> $DIESEL/src/row.rs - | - | fn get(&self, column_name: &str) -> deserialize::Result - | --- required by a bound in this associated function - | where - | T: FromSql; - | ^^^^^^^^^^^^^^^ required by this bound in `NamedRow::get` - = note: this error originates in the derive macro `QueryableByName` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0277]: cannot deserialize a value of the database type `_` as `*const str` - --> tests/fail/derive/queryable_by_name_requires_table_name_or_sql_type_annotation.rs:4:10 - | -4 | #[derive(QueryableByName)] - | ^^^^^^^^^^^^^^^ the trait `FromSql<_, __DB>` is not implemented for `*const str`, which is required by `std::string::String: FromSql<_, __DB>` - | - = note: double check your type mappings via the documentation of `_` - = help: the following other types implement trait `FromSql`: - <*const str as FromSql> - <*const str as FromSql> - <*const str as FromSql> - = note: required for `std::string::String` to implement `FromSql<_, __DB>` -note: required by a bound in `diesel::row::NamedRow::get` - --> $DIESEL/src/row.rs - | - | fn get(&self, column_name: &str) -> deserialize::Result - | --- required by a bound in this associated function - | where - | T: FromSql; - | ^^^^^^^^^^^^^^^ required by this bound in `NamedRow::get` - = note: this error originates in the derive macro `QueryableByName` (in Nightly builds, run with -Z macro-backtrace for more info) - error[E0433]: failed to resolve: use of undeclared crate or module `foos` --> tests/fail/derive/queryable_by_name_requires_table_name_or_sql_type_annotation.rs:5:8 | diff --git a/diesel_compile_tests/tests/fail/derive/selectable.stderr b/diesel_compile_tests/tests/fail/derive/selectable.stderr index 0feb1521e54b..faa86fa84608 100644 --- a/diesel_compile_tests/tests/fail/derive/selectable.stderr +++ b/diesel_compile_tests/tests/fail/derive/selectable.stderr @@ -40,3 +40,12 @@ error[E0425]: cannot find value `non_existing` in module `users` | 33 | select_expression = (users::id, users::non_existing), | ^^^^^^^^^^^^ not found in `users` + +error[E0308]: mismatched types + --> tests/fail/derive/selectable.rs:38:29 + | +38 | select_expression = (users::id + 45), + | ^^^^^^^^^^^^^^^^ expected `id`, found `Add>` + | + = note: expected struct `columns::id` + found struct `diesel::expression::ops::numeric::Add>` diff --git a/diesel_compile_tests/tests/fail/derive_deprecated/deprecated_sql_type.stderr b/diesel_compile_tests/tests/fail/derive_deprecated/deprecated_sql_type.stderr index 5b5592b5bf6e..94c66d44a278 100644 --- a/diesel_compile_tests/tests/fail/derive_deprecated/deprecated_sql_type.stderr +++ b/diesel_compile_tests/tests/fail/derive_deprecated/deprecated_sql_type.stderr @@ -59,24 +59,3 @@ error[E0412]: cannot find type `foo` in this scope | 27 | #[sql_type = "foo"] | ^^^^^ not found in this scope - -error[E0277]: cannot deserialize a value of the database type `_` as `i32` - --> tests/fail/derive_deprecated/deprecated_sql_type.rs:25:10 - | -25 | #[derive(QueryableByName)] - | ^^^^^^^^^^^^^^^ the trait `FromSql<_, __DB>` is not implemented for `i32` - | - = note: double check your type mappings via the documentation of `_` - = help: the following other types implement trait `FromSql`: - > - > - > -note: required by a bound in `diesel::row::NamedRow::get` - --> $DIESEL/src/row.rs - | - | fn get(&self, column_name: &str) -> deserialize::Result - | --- required by a bound in this associated function - | where - | T: FromSql; - | ^^^^^^^^^^^^^^^ required by this bound in `NamedRow::get` - = note: this error originates in the derive macro `QueryableByName` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/diesel_compile_tests/tests/fail/only_only_on_table.stderr b/diesel_compile_tests/tests/fail/only_only_on_table.stderr index 21696e399b5f..9aa70f669dac 100644 --- a/diesel_compile_tests/tests/fail/only_only_on_table.stderr +++ b/diesel_compile_tests/tests/fail/only_only_on_table.stderr @@ -74,4 +74,4 @@ note: the trait `Table` must be implemented = note: the following trait defines an item `only`, perhaps you need to implement it: candidate #1: `diesel::dsl::OnlyDsl` -error: internal compiler error: compiler/rustc_infer/src/infer/at.rs:400:21: relating different kinds: diesel::pg::Pg '?2 +error: internal compiler error: compiler/rustc_infer/src/infer/at.rs:364:21: relating different kinds: diesel::pg::Pg '?2 diff --git a/diesel_compile_tests/tests/fail/pg_specific_tablesample_cannot_be_used_on_mysql.stderr b/diesel_compile_tests/tests/fail/pg_specific_tablesample_cannot_be_used_on_mysql.stderr index a6d2eb13fc2f..a3db46ffcddb 100644 --- a/diesel_compile_tests/tests/fail/pg_specific_tablesample_cannot_be_used_on_mysql.stderr +++ b/diesel_compile_tests/tests/fail/pg_specific_tablesample_cannot_be_used_on_mysql.stderr @@ -1 +1 @@ -error: internal compiler error: compiler/rustc_infer/src/infer/at.rs:400:21: relating different kinds: diesel::pg::Pg '?2 +error: internal compiler error: compiler/rustc_infer/src/infer/at.rs:364:21: relating different kinds: diesel::pg::Pg '?2 diff --git a/diesel_compile_tests/tests/fail/pg_specific_tablesample_cannot_be_used_on_sqlite.stderr b/diesel_compile_tests/tests/fail/pg_specific_tablesample_cannot_be_used_on_sqlite.stderr index a6d2eb13fc2f..a3db46ffcddb 100644 --- a/diesel_compile_tests/tests/fail/pg_specific_tablesample_cannot_be_used_on_sqlite.stderr +++ b/diesel_compile_tests/tests/fail/pg_specific_tablesample_cannot_be_used_on_sqlite.stderr @@ -1 +1 @@ -error: internal compiler error: compiler/rustc_infer/src/infer/at.rs:400:21: relating different kinds: diesel::pg::Pg '?2 +error: internal compiler error: compiler/rustc_infer/src/infer/at.rs:364:21: relating different kinds: diesel::pg::Pg '?2 diff --git a/diesel_derives/Cargo.toml b/diesel_derives/Cargo.toml index d01d52dbca5b..2662ef343b6b 100644 --- a/diesel_derives/Cargo.toml +++ b/diesel_derives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "diesel_derives" -version = "2.2.2" +version = "2.2.3" license = "MIT OR Apache-2.0" description = "You should not use this crate directly, it is internal to Diesel." documentation = "https://diesel.rs/guides/" diff --git a/diesel_derives/src/lib.rs b/diesel_derives/src/lib.rs index 2c909941de92..ab505d02582c 100644 --- a/diesel_derives/src/lib.rs +++ b/diesel_derives/src/lib.rs @@ -467,9 +467,9 @@ pub fn derive_query_id(input: TokenStream) -> TokenStream { /// your struct__ matches __all fields in the query__, including the order and /// count. This means that field order is significant if you're using /// `#[derive(Queryable)]`. __Field name has no effect__. If you see errors while -/// loading data into a struct that derives `Queryable`: Consider using [`#[derive(Selectable)]`] -/// + `#[diesel(check_for_backend(YourBackendType))]` to check for mismatching fields at -/// compile-time. +/// loading data into a struct that derives `Queryable`: Consider using +/// [`#[derive(Selectable)]`] + `#[diesel(check_for_backend(YourBackendType))]` +/// to check for mismatching fields at compile-time. /// /// To provide custom deserialization behavior for a field, you can use /// `#[diesel(deserialize_as = SomeType)]`. If this attribute is present, Diesel diff --git a/diesel_derives/src/queryable_by_name.rs b/diesel_derives/src/queryable_by_name.rs index 16763059a70b..7848e20ed58c 100644 --- a/diesel_derives/src/queryable_by_name.rs +++ b/diesel_derives/src/queryable_by_name.rs @@ -23,12 +23,13 @@ pub fn derive(item: DeriveInput) -> Result { if f.embed() { Ok(quote!(<#field_ty as QueryableByName<__DB>>::build(row)?)) } else { + let st = sql_type(f, &model)?; let deserialize_ty = f.ty_for_deserialize(); let name = f.column_name()?; let name = LitStr::new(&name.to_string(), name.span()); Ok(quote!( { - let field = diesel::row::NamedRow::get(row, #name)?; + let field = diesel::row::NamedRow::get::<#st, #deserialize_ty>(row, #name)?; <#deserialize_ty as Into<#field_ty>>::into(field) } )) diff --git a/diesel_derives/src/table.rs b/diesel_derives/src/table.rs index 90c7f38bac36..90ee6d161290 100644 --- a/diesel_derives/src/table.rs +++ b/diesel_derives/src/table.rs @@ -211,7 +211,7 @@ pub(crate) fn expand(input: TableDecl) -> TokenStream { quote::quote! { #(#meta)* - #[allow(unused_imports, dead_code, unreachable_pub)] + #[allow(unused_imports, dead_code, unreachable_pub, unused_qualifications)] pub mod #table_name { use ::diesel; pub use self::columns::*; diff --git a/diesel_derives/tests/queryable_by_name.rs b/diesel_derives/tests/queryable_by_name.rs index d865ebba8804..4135935b4b2f 100644 --- a/diesel_derives/tests/queryable_by_name.rs +++ b/diesel_derives/tests/queryable_by_name.rs @@ -12,6 +12,15 @@ table! { } } +#[cfg(feature = "sqlite")] +table! { + multiple_sql_types_for_text { + id -> Integer, + string -> Text, + time -> Timestamp, + } +} + #[test] fn named_struct_definition() { #[derive(Debug, Clone, Copy, PartialEq, Eq, QueryableByName)] @@ -86,8 +95,50 @@ fn struct_with_path_in_name() { ); } -// FIXME: Test usage with renamed columns +#[cfg(feature = "sqlite")] +#[test] +fn struct_with_multiple_sql_types_for_text() { + #[derive(Debug, PartialEq, QueryableByName)] + struct MultipleSqlTypesForText { + #[diesel(sql_type = diesel::sql_types::Text)] + string: String, + #[diesel(sql_type = diesel::sql_types::Timestamp)] + time: String, + } + + let conn = &mut connection(); + let data = sql_query("SELECT 'name' AS string, '2024-07-31T21:09:00' AS time").get_result(conn); + assert_eq!( + Ok(MultipleSqlTypesForText { + string: "name".into(), + time: "2024-07-31T21:09:00".into() + }), + data + ); +} +#[cfg(feature = "sqlite")] +#[test] +fn struct_with_multiple_sql_types_for_text_from_table() { + #[derive(Debug, PartialEq, QueryableByName)] + #[diesel(table_name = multiple_sql_types_for_text)] + struct MultipleSqlTypesForText { + string: String, + time: String, + } + + let conn = &mut connection(); + let data = sql_query("SELECT 'name' AS string, '2024-07-31T21:09:00' AS time").get_result(conn); + assert_eq!( + Ok(MultipleSqlTypesForText { + string: "name".into(), + time: "2024-07-31T21:09:00".into() + }), + data + ); +} + +// FIXME: Test usage with renamed columns #[test] fn struct_with_no_table() { #[derive(Debug, Clone, Copy, PartialEq, Eq, QueryableByName)] diff --git a/examples/mysql/all_about_inserts/src/lib.rs b/examples/mysql/all_about_inserts/src/lib.rs index 39ba19f7cd89..59b7ef05ab34 100644 --- a/examples/mysql/all_about_inserts/src/lib.rs +++ b/examples/mysql/all_about_inserts/src/lib.rs @@ -30,7 +30,7 @@ pub struct UserForm<'a> { } #[derive(Queryable, PartialEq, Debug)] -struct User { +pub struct User { id: i32, name: String, hair_color: Option, diff --git a/examples/postgres/all_about_inserts/src/lib.rs b/examples/postgres/all_about_inserts/src/lib.rs index 23eb4e201ecf..664ebde1d7ff 100644 --- a/examples/postgres/all_about_inserts/src/lib.rs +++ b/examples/postgres/all_about_inserts/src/lib.rs @@ -30,7 +30,7 @@ pub struct UserForm<'a> { } #[derive(Queryable, PartialEq, Debug)] -struct User { +pub struct User { id: i32, name: String, hair_color: Option, diff --git a/examples/sqlite/all_about_inserts/src/lib.rs b/examples/sqlite/all_about_inserts/src/lib.rs index 3a29eef3ae68..562cf8bb3092 100644 --- a/examples/sqlite/all_about_inserts/src/lib.rs +++ b/examples/sqlite/all_about_inserts/src/lib.rs @@ -30,7 +30,7 @@ pub struct UserForm<'a> { } #[derive(Queryable, PartialEq, Debug)] -struct User { +pub struct User { id: i32, name: String, hair_color: Option, diff --git a/rust-toolchain b/rust-toolchain index b3a8c61e6a86..aaceec04e040 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -1.79.0 +1.80.0