From c7c78fed314312a4d9a9f0ae09867510f5c0a9a1 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Tue, 2 Aug 2022 19:54:37 +0900 Subject: [PATCH] Add generic Atomic type --- .github/workflows/ci.yml | 2 + Cargo.toml | 27 +- README.md | 22 +- build.rs | 7 + portable-atomic-derive/Cargo.toml | 26 + portable-atomic-derive/LICENSE-APACHE | 177 +++ portable-atomic-derive/LICENSE-MIT | 23 + portable-atomic-derive/src/attr.rs | 391 ++++++ portable-atomic-derive/src/derive.rs | 426 +++++++ portable-atomic-derive/src/error.rs | 14 + portable-atomic-derive/src/lib.rs | 39 + src/generic/impls.rs | 1135 +++++++++++++++++ src/generic/mod.rs | 751 +++++++++++ src/lib.rs | 80 +- src/tests/generic.rs | 23 + src/tests/mod.rs | 2 + tests/compiletest.rs | 18 + .../expand/struct-named-multiple.expanded.rs | 99 ++ tests/expand/struct-named-multiple.rs | 10 + tests/expand/struct-named-single.expanded.rs | 48 + tests/expand/struct-named-single.rs | 8 + tests/expand/union-multiple.expanded.rs | 56 + tests/expand/union-multiple.rs | 10 + tests/expand/union-single.expanded.rs | 48 + tests/expand/union-single.rs | 8 + tests/expandtest.rs | 43 + tests/test.rs | 41 + tests/ui/enum-invalid.rs | 8 + tests/ui/enum-invalid.stderr | 5 + tests/ui/struct-invalid.rs | 18 + tests/ui/struct-invalid.stderr | 11 + tests/ui/struct-large.rs | 10 + tests/ui/struct-large.stderr | 5 + tests/ui/union-different-size1.rs | 10 + tests/ui/union-different-size1.stderr | 7 + tests/ui/union-different-size2.rs | 10 + tests/ui/union-different-size2.stderr | 7 + tests/ui/union-invalid.rs | 11 + tests/ui/union-invalid.stderr | 5 + tests/ui/union-no-field.rs | 8 + tests/ui/union-no-field.stderr | 11 + tests/ui/union-not-transmutable.rs | 15 + tests/ui/union-not-transmutable.stderr | 21 + tools/build.sh | 4 +- 44 files changed, 3682 insertions(+), 18 deletions(-) create mode 100644 portable-atomic-derive/Cargo.toml create mode 100644 portable-atomic-derive/LICENSE-APACHE create mode 100644 portable-atomic-derive/LICENSE-MIT create mode 100644 portable-atomic-derive/src/attr.rs create mode 100644 portable-atomic-derive/src/derive.rs create mode 100644 portable-atomic-derive/src/error.rs create mode 100644 portable-atomic-derive/src/lib.rs create mode 100644 src/generic/impls.rs create mode 100644 src/generic/mod.rs create mode 100644 src/tests/generic.rs create mode 100644 tests/compiletest.rs create mode 100644 tests/expand/struct-named-multiple.expanded.rs create mode 100644 tests/expand/struct-named-multiple.rs create mode 100644 tests/expand/struct-named-single.expanded.rs create mode 100644 tests/expand/struct-named-single.rs create mode 100644 tests/expand/union-multiple.expanded.rs create mode 100644 tests/expand/union-multiple.rs create mode 100644 tests/expand/union-single.expanded.rs create mode 100644 tests/expand/union-single.rs create mode 100644 tests/expandtest.rs create mode 100644 tests/test.rs create mode 100644 tests/ui/enum-invalid.rs create mode 100644 tests/ui/enum-invalid.stderr create mode 100644 tests/ui/struct-invalid.rs create mode 100644 tests/ui/struct-invalid.stderr create mode 100644 tests/ui/struct-large.rs create mode 100644 tests/ui/struct-large.stderr create mode 100644 tests/ui/union-different-size1.rs create mode 100644 tests/ui/union-different-size1.stderr create mode 100644 tests/ui/union-different-size2.rs create mode 100644 tests/ui/union-different-size2.stderr create mode 100644 tests/ui/union-invalid.rs create mode 100644 tests/ui/union-invalid.stderr create mode 100644 tests/ui/union-no-field.rs create mode 100644 tests/ui/union-no-field.stderr create mode 100644 tests/ui/union-not-transmutable.rs create mode 100644 tests/ui/union-not-transmutable.stderr diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9b2b8d01f..4c66f8296 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -101,6 +101,8 @@ jobs: components: rust-src - uses: taiki-e/install-action@cargo-hack - uses: taiki-e/install-action@cargo-minimal-versions + - uses: dtolnay/install@cargo-expand + if: startsWith(matrix.rust, 'nightly') && matrix.target == '' && matrix.os == '' - uses: taiki-e/setup-cross-toolchain-action@v1 with: target: ${{ matrix.target }} diff --git a/Cargo.toml b/Cargo.toml index f617f9444..731b5a3f3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,7 @@ keywords = ["atomic"] categories = ["concurrency", "data-structures", "embedded", "hardware-support", "no-std"] exclude = ["/.*", "/tools", "/target-specs"] description = """ -Portable atomic types including support for 128-bit atomics, atomic float, etc. +Portable atomic types including support for 128-bit atomics, atomic float, generic atomic type, etc. """ [package.metadata.docs.rs] @@ -20,6 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] [workspace] members = [ "bench", + "portable-atomic-derive", "tests/no-std", ] @@ -45,15 +46,28 @@ outline-atomics = [] # Note that most of `fetch_*` operations of atomic floats are implemented using CAS loops, which can be slower than equivalent operations of atomic integers. float = [] -# TODO -# # Provides generic `atomic` type. -# generic = [] +# Provides generic `Atomic` type. +# +# Note: +# - This implicitly enables the `fallback` feature. +generic = ["fallback"] + +# Provide derive(Atomicable). +derive = ["portable-atomic-derive"] + +# Use `alloc`. +alloc = [] # Use `std`. -std = [] +# +# Note: +# - This implicitly enables the `alloc` feature. +std = ["alloc"] # Note: serde is public dependencies. [dependencies] +portable-atomic-derive = { path = "portable-atomic-derive", version = "=0.2.1", optional = true } + # Implements serde::{Serialize,Deserialize} for atomic types. # # Note: @@ -63,9 +77,12 @@ serde = { version = "1.0.103", optional = true, default-features = false } [dev-dependencies] crossbeam-utils = "0.8" fastrand = "1" +macrotest = "1.0.9" paste = "1" quickcheck = { default-features = false, git = "https://github.com/taiki-e/quickcheck.git", branch = "dev" } # https://github.com/BurntSushi/quickcheck/pull/304 + https://github.com/BurntSushi/quickcheck/pull/282 + lower MSRV +rustversion = "1" serde = { version = "1", features = ["derive"] } serde_test = "1" sptr = "0.3" static_assertions = "1" +trybuild = "1" diff --git a/README.md b/README.md index 5146bca78..f62659888 100644 --- a/README.md +++ b/README.md @@ -7,12 +7,12 @@ [![build status](https://img.shields.io/github/workflow/status/taiki-e/portable-atomic/CI/main?style=flat-square&logo=github)](https://github.com/taiki-e/portable-atomic/actions) [![build status](https://img.shields.io/cirrus/github/taiki-e/portable-atomic/main?style=flat-square&logo=cirrusci)](https://cirrus-ci.com/github/taiki-e/portable-atomic) -Portable atomic types including support for 128-bit atomics, atomic float, etc. +Portable atomic types including support for 128-bit atomics, atomic float, generic atomic type, etc. - Provide all atomic integer types (`Atomic{I,U}{8,16,32,64}`) for all targets that can use atomic CAS. (i.e., all targets that can use `std`, and most no-std targets) - Provide `AtomicI128` and `AtomicU128`. - Provide `AtomicF32` and `AtomicF64`. (optional) - +- Provide generic `Atomic` type. (optional) - Provide atomic load/store for targets where atomic is not available at all in the standard library. (riscv without A-extension, msp430, avr) - Provide atomic CAS for targets where atomic CAS is not available in the standard library. (thumbv6m, riscv without A-extension, msp430, avr) (optional, [single-core only](#optional-cfg)) - Provide stable equivalents of the standard library atomic types' unstable APIs, such as [`AtomicPtr::fetch_*`](https://github.com/rust-lang/rust/issues/99108), [`AtomicBool::fetch_not`](https://github.com/rust-lang/rust/issues/98485). @@ -26,6 +26,13 @@ On x86_64, when the `outline-atomics` optional feature is not enabled and `cmpxc See [this list](https://github.com/taiki-e/portable-atomic/issues/10#issuecomment-1159368067) for details. +## Generic Atomic\ type + +- Support for various operations on `Copy` types and support for swap and store on non-`Copy` types. +- Support for primitives, immutable references, function pointers, `NonNull`, `NoneZero`, etc. +- Support for user-defined structs and enums, including those with multiple fields and padding. (via `#[derive(Atomicable)]`) +- Support for user-defined unions. (via `#[derive(Atomicable)]`) + ## Optional features - **`fallback`** *(enabled by default)*
@@ -49,14 +56,21 @@ See [this list](https://github.com/taiki-e/portable-atomic/issues/10#issuecommen Provide `AtomicF{32,64}`. Note that most of `fetch_*` operations of atomic floats are implemented using CAS loops, which can be slower than equivalent operations of atomic integers. - + + Note: + - This implicitly enables the `fallback` feature. + +- **`alloc`**
+ Use `alloc`. - **`std`**
Use `std`. + Note: + - This implicitly enables the `alloc` feature. + - **`serde`**
Implement `serde::{Serialize,Deserialize}` for atomic types. diff --git a/build.rs b/build.rs index 04febd375..2688e7ab6 100644 --- a/build.rs +++ b/build.rs @@ -59,6 +59,9 @@ fn main() { // Note that this is `no_`*, not `has_*`. This allows treating as the latest // stable rustc is used when the build script doesn't run. This is useful // for non-cargo build systems that don't run the build script. + if version.minor < 36 { + println!("cargo:rustc-cfg=portable_atomic_no_alloc"); + } // underscore_const_names stabilized in Rust 1.37 (nightly-2019-06-18): https://github.com/rust-lang/rust/pull/61347 if !version.probe(37, 2019, 6, 17) { println!("cargo:rustc-cfg=portable_atomic_no_underscore_consts"); @@ -79,6 +82,10 @@ fn main() { if !version.probe(59, 2021, 12, 15) { println!("cargo:rustc-cfg=portable_atomic_no_asm"); } + // const_fn_trait_bound stabilized in Rust 1.61 (nightly-2022-03-08): https://github.com/rust-lang/rust/pull/93827 + if !version.probe(61, 2022, 3, 7) { + println!("cargo:rustc-cfg=portable_atomic_no_const_fn_trait_bound"); + } // aarch64_target_feature stabilized in Rust 1.61 (nightly-2022-03-16): https://github.com/rust-lang/rust/pull/90621 if !version.probe(61, 2022, 3, 15) { println!("cargo:rustc-cfg=portable_atomic_no_aarch64_target_feature"); diff --git a/portable-atomic-derive/Cargo.toml b/portable-atomic-derive/Cargo.toml new file mode 100644 index 000000000..eb949c020 --- /dev/null +++ b/portable-atomic-derive/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "portable-atomic-derive" +version = "0.2.1" +edition = "2018" +rust-version = "1.34" +license = "Apache-2.0 OR MIT" +repository = "https://github.com/taiki-e/portable-atomic" +keywords = ["atomic"] +categories = ["concurrency", "data-structures", "embedded", "hardware-support", "no-std"] +description = """ +Implementation detail of the `portable-atomic` crate. +""" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[lib] +proc-macro = true + +[dependencies] +proc-macro2 = "1.0" +quote = "1.0" +syn = { version = "1.0.60" } + +[dev-dependencies] +portable-atomic = { path = ".." } diff --git a/portable-atomic-derive/LICENSE-APACHE b/portable-atomic-derive/LICENSE-APACHE new file mode 100644 index 000000000..f433b1a53 --- /dev/null +++ b/portable-atomic-derive/LICENSE-APACHE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/portable-atomic-derive/LICENSE-MIT b/portable-atomic-derive/LICENSE-MIT new file mode 100644 index 000000000..31aa79387 --- /dev/null +++ b/portable-atomic-derive/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/portable-atomic-derive/src/attr.rs b/portable-atomic-derive/src/attr.rs new file mode 100644 index 000000000..2bd393178 --- /dev/null +++ b/portable-atomic-derive/src/attr.rs @@ -0,0 +1,391 @@ +use std::{cell::RefCell, fmt::Write as _, thread}; + +use proc_macro2::Span; +use syn::{spanned::Spanned, Error, Fields, Meta}; + +const ATTR_NAME: &str = "atomic"; + +static INTEGER_ATTRS: &[&str] = + &["i8", "u8", "i16", "u16", "i32", "u32", "i64", "u64", "i128", "u128", "isize", "usize"]; + +// All #[atomic] attributes. +static ATTRS: &[AttrDef] = &[ + // #[atomic(i8)] + AttrDef { + name: "i8", + conflicts_with: INTEGER_ATTRS, + position: &[Position::Struct, Position::Enum], + style: &[MetaStyle::Ident], + }, + // #[atomic(u8)] + AttrDef { + name: "u8", + conflicts_with: INTEGER_ATTRS, + position: &[Position::Struct, Position::Enum], + style: &[MetaStyle::Ident], + }, + // #[atomic(i16)] + AttrDef { + name: "i16", + conflicts_with: INTEGER_ATTRS, + position: &[Position::Struct, Position::Enum], + style: &[MetaStyle::Ident], + }, + // #[atomic(u16)] + AttrDef { + name: "u16", + conflicts_with: INTEGER_ATTRS, + position: &[Position::Struct, Position::Enum], + style: &[MetaStyle::Ident], + }, + // #[atomic(i32)] + AttrDef { + name: "i32", + conflicts_with: INTEGER_ATTRS, + position: &[Position::Struct, Position::Enum], + style: &[MetaStyle::Ident], + }, + // #[atomic(u32)] + AttrDef { + name: "u32", + conflicts_with: INTEGER_ATTRS, + position: &[Position::Struct, Position::Enum], + style: &[MetaStyle::Ident], + }, + // #[atomic(i64)] + AttrDef { + name: "i64", + conflicts_with: INTEGER_ATTRS, + position: &[Position::Struct, Position::Enum], + style: &[MetaStyle::Ident], + }, + // #[atomic(u64)] + AttrDef { + name: "u64", + conflicts_with: INTEGER_ATTRS, + position: &[Position::Struct, Position::Enum], + style: &[MetaStyle::Ident], + }, + // #[atomic(i128)] + AttrDef { + name: "i128", + conflicts_with: INTEGER_ATTRS, + position: &[Position::Struct, Position::Enum], + style: &[MetaStyle::Ident], + }, + // #[atomic(u128)] + AttrDef { + name: "u128", + conflicts_with: INTEGER_ATTRS, + position: &[Position::Struct, Position::Enum], + style: &[MetaStyle::Ident], + }, + // #[atomic(isize)] + AttrDef { + name: "isize", + conflicts_with: INTEGER_ATTRS, + position: &[Position::Struct, Position::Enum], + style: &[MetaStyle::Ident], + }, + // #[atomic(usize)] + AttrDef { + name: "usize", + conflicts_with: INTEGER_ATTRS, + position: &[Position::Struct, Position::Enum], + style: &[MetaStyle::Ident], + }, +]; + +#[derive(Debug)] +pub(crate) struct Attrs { + pub(super) repr: Option<(&'static str, Span)>, +} + +pub(crate) fn parse_attrs(cx: &Context, attrs: &[syn::Attribute], pos: Position) -> Attrs { + let mut repr = None; + + let attrs = filter_attrs(cx, attrs, pos); + for (def, meta) in &attrs { + if def.late_check(cx, &attrs) { + continue; + } + match def.name { + "i8" | "u8" | "i16" | "u16" | "i32" | "u32" | "i64" | "u64" | "i128" | "u128" + | "isize" | "usize" => repr = Some((def.name, meta.span())), + + _ => unreachable!("{}", def.name), + } + } + + Attrs { repr } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum Position { + // TODO: kind: struct, tuple, or unit + Struct, + Enum, + Union, + // TODO: kind of variants: struct, tuple, or unit + #[allow(dead_code)] + Variant, + NamedField, + UnnamedField, + UnionField, +} + +impl Position { + #[allow(clippy::trivially_copy_pass_by_ref)] + fn as_str(&self) -> &'static str { + match self { + Position::Struct => "struct", + Position::Enum => "enum", + Position::Union => "union", + Position::Variant => "variant", + Position::NamedField => "named field", + Position::UnnamedField => "unnamed field", + Position::UnionField => "field", + } + } + + fn is_field(self) -> bool { + self == Position::NamedField || self == Position::UnnamedField + } +} + +impl From<&Fields> for Position { + fn from(meta: &Fields) -> Self { + match meta { + Fields::Named(..) => Position::NamedField, + Fields::Unnamed(..) | Fields::Unit => Position::UnnamedField, + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum MetaStyle { + // #[atomic()] + Ident, + // #[atomic( = ...)] + NamedValue, + // #[atomic((...))] + List, +} + +impl MetaStyle { + pub(crate) fn format(self, name: &str) -> String { + match self { + MetaStyle::Ident => name.to_owned(), + MetaStyle::List => format!("{}(...)", name), + MetaStyle::NamedValue => format!("{} = \"...\"", name), + } + } +} + +impl From<&Meta> for MetaStyle { + fn from(meta: &Meta) -> Self { + match meta { + Meta::Path(..) => MetaStyle::Ident, + Meta::List(..) => MetaStyle::List, + Meta::NameValue(..) => MetaStyle::NamedValue, + } + } +} + +#[derive(Debug)] +struct AttrDef { + name: &'static str, + conflicts_with: &'static [&'static str], + // allowed positions. + position: &'static [Position], + // allowed styles. + style: &'static [MetaStyle], +} + +impl AttrDef { + /// Performs checks that can be performed without parsing other attributes, + /// and returns `true` if at least one error occurs. + fn early_check(&self, cx: &Context, pos: Position, meta: &Meta) -> bool { + let mut has_error = false; + if let Err(msg) = self.check_position(pos) { + cx.error(format_err!(meta, msg)); + has_error = true; + } + if let Err(msg) = self.check_style(meta) { + cx.error(format_err!(meta, msg)); + has_error = true; + } + has_error + } + + fn check_position(&self, pos: Position) -> Result<(), String> { + if self.position.contains(&pos) { + return Ok(()); + } + let mut msg = format!("#[{}({})] may only be used on ", ATTR_NAME, self.name); + // TODO: simplify if possible + // len == 1: a + // len == 2: a and b + // len > 2: a, b, and c + let position = if self.position.contains(&Position::NamedField) + && self.position.contains(&Position::UnnamedField) + { + let mut position: Vec<_> = + self.position.iter().filter(|p| !p.is_field()).map(Position::as_str).collect(); + position.push("field"); + position + } else { + self.position.iter().map(Position::as_str).collect() + }; + let len = position.len(); + for (i, p) in position.iter().enumerate() { + if i != 0 { + if len == 2 { + msg.push_str(" and "); + } else { + msg.push_str(", "); + if i == len - 1 { + msg.push_str("and "); + } + } + } + msg.push_str(p); + msg.push('s'); + } + Err(msg) + } + + fn check_style(&self, meta: &Meta) -> Result<(), String> { + let meta = MetaStyle::from(meta); + if self.style.contains(&meta) { + return Ok(()); + } + let mut msg = "expected ".to_owned(); + let mut first = true; + for style in self.style { + if first { + first = false; + } else { + msg.push_str(" or "); + } + let _ = write!(msg, "`#[{}({})]`", ATTR_NAME, style.format(self.name)); + } + msg.push_str(", found "); + let _ = write!(msg, "`#[{}({})]`", ATTR_NAME, meta.format(self.name)); + Err(msg) + } + + /// Performs checks that can be performed after parsing all attributes in + /// the same scope and parent scopes, and returns `true` if at least one + /// error occurs. + fn late_check(&self, cx: &Context, attrs: &[(&AttrDef, Meta)]) -> bool { + let mut has_error = false; + for (def, meta) in attrs { + if def.name != self.name && self.conflicts_with.contains(&def.name) { + let msg = format!( + "#[{0}({1})] may not be used together with #[{0}({2})]", + ATTR_NAME, self.name, def.name + ); + cx.error(format_err!(meta.path(), msg)); + has_error = true; + } + } + has_error + } +} + +#[derive(Debug)] +pub(crate) struct Context { + // - `None`: during checking. + // - `Some(None)`: there are no errors. + // - `Some(Some)`: there are errors. + #[allow(clippy::option_option)] + error: RefCell>>, +} + +impl Context { + pub(crate) fn error(&self, e: Error) { + match self.error.borrow_mut().as_mut().unwrap() { + Some(base) => base.combine(e), + error @ None => *error = Some(e), + } + } + + pub(crate) fn check(self) -> Result<(), Error> { + match self.error.borrow_mut().take().unwrap() { + Some(e) => Err(e), + None => Ok(()), + } + } +} + +impl Default for Context { + fn default() -> Self { + Self { error: RefCell::new(Some(None)) } + } +} + +impl Drop for Context { + fn drop(&mut self) { + if !thread::panicking() && self.error.borrow().is_some() { + panic!("context need to be checked"); + } + } +} + +fn filter_attrs<'a>( + cx: &'a Context, + attrs: &'a [syn::Attribute], + pos: Position, +) -> Vec<(&'static AttrDef, Meta)> { + let mut counter = vec![0; ATTRS.len()]; + attrs + .iter() + .filter(|attr| attr.path.is_ident(ATTR_NAME)) + .filter_map(move |attr| match attr.parse_meta() { + Ok(Meta::List(l)) => Some(l.nested), + Ok(m) => { + cx.error(format_err!(m, "expected `#[{}(...)]`", ATTR_NAME)); + None + } + Err(e) => { + cx.error(e); + None + } + }) + .flatten() + .filter_map(move |m| match m { + syn::NestedMeta::Lit(l) => { + cx.error(format_err!(l, "expected identifier, found literal")); + None + } + syn::NestedMeta::Meta(m) => match m.path().get_ident() { + Some(p) => match ATTRS.iter().position(|a| p == a.name) { + Some(pos) => { + counter[pos] += 1; + if counter[pos] == 1 { + Some((&ATTRS[pos], m)) + } else { + cx.error(format_err!( + &m, + "duplicate #[{}({})] attribute", + ATTR_NAME, + p + )); + None + } + } + None => { + cx.error(format_err!(p, "unknown {} attribute `{}`", ATTR_NAME, p)); + None + } + }, + None => { + cx.error(format_err!(m, "expected identifier, found path")); + None + } + }, + }) + .filter(|(def, meta)| !def.early_check(cx, pos, meta)) + .collect() +} diff --git a/portable-atomic-derive/src/derive.rs b/portable-atomic-derive/src/derive.rs new file mode 100644 index 000000000..6e3d99151 --- /dev/null +++ b/portable-atomic-derive/src/derive.rs @@ -0,0 +1,426 @@ +use proc_macro2::{Span, TokenStream}; +use quote::{format_ident, quote, ToTokens}; +use syn::{spanned::Spanned, Data, DataStruct, DeriveInput, Field, Ident, Result}; + +use crate::attr::{self, Context}; + +pub(crate) fn derive(input: &mut DeriveInput) -> Result { + let krate = &format_ident!("_portable_atomic"); + let private = "e! { #krate::__private }; + let cx = Context::default(); + let items = match &input.data { + Data::Struct(data) => extend_struct(cx, input, data, krate, private)?, + Data::Enum(data) => extend_enum(cx, input, data, krate, private)?, + Data::Union(data) => extend_union(cx, input, data, krate, private)?, + }; + Ok(quote! { + #[allow( + non_upper_case_globals, + clippy::match_single_binding, + clippy::never_loop, + clippy::single_match, + )] + const _: () = { + extern crate portable_atomic as #krate; + #items + }; + }) +} + +fn extend_struct( + cx: Context, + input: &DeriveInput, + data: &DataStruct, + krate: &Ident, + private: &TokenStream, +) -> Result { + let struct_attrs = &attr::parse_attrs(&cx, &input.attrs, attr::Position::Struct); + let _field_attrs: &Vec<_> = &data + .fields + .iter() + .map(|f| attr::parse_attrs(&cx, &f.attrs, attr::Position::from(&data.fields))) + .collect(); + + if data.fields.is_empty() { + cx.error(format_err!( + input.ident, + "#[derive(Atomicable)] may not be used on struct with no field" + )); + } + if data.fields.len() > 1 && struct_attrs.repr.is_none() { + cx.error(format_err!(input.ident, "repr must be must specified")); + } + + cx.check()?; + + // no user-specified repr & field=1 => transparent + if struct_attrs.repr.is_none() && data.fields.len() == 1 { + let name = &input.ident; + let field = data.fields.iter().next().unwrap(); + let ty = &field.ty; + let index = field_to_access((0, field)); + let msg = panic_msg(name); + + return Ok(quote! { + impl #krate::Atomicable for #name { + type Value = <#ty as #krate::Atomicable>::Value; + #[inline] + fn to_val(self) -> Self::Value { + <#ty as #krate::Atomicable>::to_val(self.#index) + } + fn from_val(_: Self::Value) -> Self { + panic!(#msg); + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + Self { + #index: <#ty as #krate::Atomicable>::from_val_unchecked(val) + } + } + } + }); + } + + let _known_types: Vec<_> = data.fields.iter().map(|f| known_type(&f.ty)).collect(); + + let repr = struct_attrs.repr.unwrap(); + let repr_type = Ident::new(repr.0, repr.1).to_token_stream(); + + let name = &input.ident; + let msg = panic_msg(name); + + let field_types = data.fields.iter().map(|f| &f.ty).collect::>(); + let field_repr_types = field_types + .iter() + .map(|ty| quote! { <#ty as #krate::Atomicable>::Value }) + .collect::>(); + let accesses = data.fields.iter().enumerate().map(field_to_access).collect::>(); + + let increment_offset = field_repr_types + .iter() + .map(|field_repr_type| { + quote! { + __offset += #private::size_of::<#field_repr_type>(); + } + }) + .collect::>(); + let access_offset = field_types + .iter() + .enumerate() + .map(|(i, _)| { + if i == 0 { + quote! {} + } else { + quote! { .add(__offset) } + } + }) + .collect::>(); + + let filled_size = quote! { + #(#private::size_of::<#field_repr_types>())+* + }; + let static_assert_repr_type_size = respan_tokens( + quote! { + #private::size_of::<#repr_type>() - (#filled_size) + }, + repr_type.span(), + ); + let static_asserts = quote! { + // assert repr >= fields + let _ = [(); #static_assert_repr_type_size]; + }; + // let debug_asserts = quote! { + // debug_assert_eq!(__offset, #filled_size); + // }; + + let to_val_writes = field_repr_types.iter().enumerate().map(|(i, field_repr_type)| { + let access = &accesses[i]; + let increment_offset = &increment_offset[i]; + let add_offset = &access_offset[i]; + quote! { + __result_ptr + #add_offset + .cast::<#field_repr_type>() + .write_unaligned(#krate::Atomicable::to_val(self.#access)); + #increment_offset + } + }); + + let mut from_val_reads = vec![]; + let mut from_val_field_vars = vec![]; + for (i, ty) in field_types.iter().enumerate() { + let access = &accesses[i]; + let increment_offset = &increment_offset[i]; + let add_offset = &access_offset[i]; + let field_var = format_ident!("__field{}", i); + from_val_reads.push(quote! { + let #field_var = __val_ptr + #add_offset + .cast::<<#ty as #krate::Atomicable>::Value>().read_unaligned(); + let #field_var = <#ty as #krate::Atomicable>::from_val_unchecked(#field_var); + #increment_offset + }); + from_val_field_vars.push(quote! { #access: #field_var, }); + } + + Ok(quote! { + impl #krate::Atomicable for #name { + type Value = #repr_type; + #[inline] + fn to_val(self) -> Self::Value { + #static_asserts + let mut __result: #repr_type = 0; + let mut __result_ptr: *mut u8 = &mut __result as *mut #repr_type as *mut u8; + let mut __offset: usize = 0; + unsafe { + #(#to_val_writes)* + } + // #debug_asserts + __result + } + fn from_val(_: Self::Value) -> Self { + panic!(#msg); + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + #static_asserts + let mut __val_ptr: *const u8 = &val as *const #repr_type as *const u8; + let mut __offset = 0; + #(#from_val_reads)* + // #debug_asserts + Self { + #(#from_val_field_vars)* + } + } + } + }) +} + +fn extend_enum( + cx: Context, + input: &syn::DeriveInput, + data: &syn::DataEnum, + krate: &Ident, + private: &TokenStream, +) -> Result { + let _enum_attrs = &attr::parse_attrs(&cx, &input.attrs, attr::Position::Enum); + let _variant_attrs: &Vec<_> = &data + .variants + .iter() + .map(|v| { + let variant_attr = attr::parse_attrs(&cx, &input.attrs, attr::Position::Variant); + let field_attrs = v + .fields + .iter() + .map(|f| attr::parse_attrs(&cx, &f.attrs, attr::Position::from(&v.fields))) + .collect::>(); + (variant_attr, field_attrs) + }) + .collect(); + + if data.variants.is_empty() { + cx.error(format_err!( + input.ident, + "#[derive(Atomicable)] may not be used on enum with no variant" + )); + } + + cx.check()?; + + unimplemented!() +} + +fn extend_union( + cx: Context, + input: &syn::DeriveInput, + data: &syn::DataUnion, + krate: &Ident, + private: &TokenStream, +) -> Result { + let _union_attrs = &attr::parse_attrs(&cx, &input.attrs, attr::Position::Union); + data.fields + .named + .iter() + .map(|f| attr::parse_attrs(&cx, &f.attrs, attr::Position::UnionField)) + .for_each(drop); + + if data.fields.named.is_empty() { + cx.error(format_err!( + input.ident, + "#[derive(Atomicable)] may not be used on union with no field" + )); + } + if data.fields.named.len() > 1 && !is_repr_c(&input.attrs) { + cx.error(format_err!( + input.ident, + "portable-atomic: union with multiple fields must be #[repr(C)]" + )); + } + + cx.check()?; + + let name = &input.ident; + let msg = panic_msg(name); + + // Case 1: The union has exactly one field. + if data.fields.named.len() == 1 { + let field = data.fields.named.iter().next().unwrap(); + let ty = &field.ty; + let index = field_to_access((0, field)); + + return Ok(quote! { + impl #krate::Atomicable for #name { + type Value = <#ty as #krate::Atomicable>::Value; + #[inline] + fn to_val(self) -> Self::Value { + // SAFETY: the union has exactly one field. + <#ty as #krate::Atomicable>::to_val(unsafe { self.#index }) + } + fn from_val(_: Self::Value) -> Self { + panic!(#msg); + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + Self { + #index: <#ty as #krate::Atomicable>::from_val_unchecked(val) + } + } + } + }); + } + + let first_field = data.fields.named.iter().next().unwrap(); + let first_ty = &first_field.ty; + + let field_types = data.fields.named.iter().map(|f| &f.ty).collect::>(); + // assert that all fields are Transmutable + let static_asserts_transmutable = field_types.iter().map(|t| { + quote! { + #private::is_transmutable::<#t>(); + } + }); + // assert that all fields have the same size + let static_asserts_field_size = field_types.windows(2).map(|t| { + let a = &t[0]; + let b = &t[1]; + quote! { + let [] = [(); #private::size_of::<#a>() - #private::size_of::<#b>()]; + } + }); + let static_asserts = quote! { + #(#static_asserts_transmutable)* + #(#static_asserts_field_size)* + }; + + Ok(quote! { + impl #krate::Atomicable for #name { + type Value = <#first_ty as #krate::Atomicable>::Value; + #[inline] + fn to_val(self) -> Self::Value { + #static_asserts + // SAFETY: the union is #[repr(C)] and all fields are `Transmutable` and have the same size. + unsafe { #private::transmute(self) } + } + fn from_val(_: Self::Value) -> Self { + panic!(#msg); + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + #static_asserts + // SAFETY: the union is #[repr(C)] and all fields are `Transmutable` and have the same size. + unsafe { #private::transmute(val) } + } + } + }) +} + +fn field_to_access((index, field): (usize, &Field)) -> TokenStream { + match &field.ident { + Some(ident) => ident.to_token_stream(), + None => syn::Index::from(index).to_token_stream(), + } +} + +fn known_type(ty: &syn::Type) -> Option { + if let syn::Type::Path(p) = ty { + if let Some(name) = p.path.get_ident() { + let name = name.to_string(); + match &*name { + "i8" | "u8" | "bool" => return Some(KnownType::Scalar { name, size: Some(1) }), + "i16" | "u16" => return Some(KnownType::Scalar { name, size: Some(2) }), + "i32" | "u32" | "f32" | "char" => { + return Some(KnownType::Scalar { name, size: Some(4) }) + } + "i64" | "u64" | "f64" => return Some(KnownType::Scalar { name, size: Some(8) }), + "i128" | "u128" => return Some(KnownType::Scalar { name, size: Some(16) }), + "isize" | "usize" => return Some(KnownType::Scalar { name, size: None }), + _ => {} + } + } + } + // TODO: more types + None +} + +fn panic_msg(name: &Ident) -> String { + format!("there is no way to call Atomicable::from_val for `{}` safely", name) +} + +#[allow(dead_code)] // TODO +enum KnownType { + Scalar { name: String, size: Option }, +} + +// fn is_option(ty: &Type) -> Option<&Type> { +// if let Type::Path(ty) = ty { +// if ty.path.segments.len() == 1 && ty.path.segments[0].ident == "Option" +// || ty.path.segments.len() == 3 +// && (ty.path.segments[0].ident == "std" || ty.path.segments[0].ident == "core") +// && ty.path.segments[1].ident == "option" +// && ty.path.segments[2].ident == "Option" +// { +// if let PathArguments::AngleBracketed(args) = &ty.path.segments.last().unwrap().arguments +// { +// if let GenericArgument::Type(ty) = &args.args[0] { +// return Some(ty); +// } +// } +// } +// } +// None +// } + +fn is_repr_c(attrs: &[syn::Attribute]) -> bool { + for meta in attrs.iter().filter_map(|attr| attr.parse_meta().ok()) { + if let syn::Meta::List(list) = meta { + if list.path.is_ident("repr") { + for repr in list.nested.iter() { + match repr { + syn::NestedMeta::Meta(syn::Meta::Path(path)) + | syn::NestedMeta::Meta(syn::Meta::List(syn::MetaList { path, .. })) + | syn::NestedMeta::Meta(syn::Meta::NameValue(syn::MetaNameValue { + path, + .. + })) => { + if path.is_ident("C") { + return true; + } + } + syn::NestedMeta::Lit(..) => {} + } + } + } + } + } + false +} + +fn respan_tokens(tokens: TokenStream, span: Span) -> TokenStream { + tokens + .into_iter() + .map(|mut token| { + token.set_span(span); + token + }) + .collect() +} diff --git a/portable-atomic-derive/src/error.rs b/portable-atomic-derive/src/error.rs new file mode 100644 index 000000000..d9435e760 --- /dev/null +++ b/portable-atomic-derive/src/error.rs @@ -0,0 +1,14 @@ +macro_rules! format_err { + ($span:expr, $msg:expr $(,)?) => { + syn::Error::new_spanned(&$span as &dyn quote::ToTokens, &$msg as &dyn std::fmt::Display) + }; + ($span:expr, $($tt:tt)*) => { + format_err!($span, format!($($tt)*)) + }; +} + +macro_rules! bail { + ($($tt:tt)*) => { + return Err(format_err!($($tt)*)) + }; +} diff --git a/portable-atomic-derive/src/lib.rs b/portable-atomic-derive/src/lib.rs new file mode 100644 index 000000000..d265d7622 --- /dev/null +++ b/portable-atomic-derive/src/lib.rs @@ -0,0 +1,39 @@ +#![doc(test( + no_crate_inject, + attr( + deny(warnings, rust_2018_idioms, single_use_lifetimes), + allow(dead_code, unused_variables) + ) +))] +#![warn(rust_2018_idioms, single_use_lifetimes, unreachable_pub)] +#![warn(clippy::pedantic)] +#![allow(clippy::too_many_lines, clippy::single_match_else)] +#![allow(dead_code, unused_variables, unused_macros)] // TODO + +// older compilers require explicit `extern crate`. +#[allow(unused_extern_crates)] +extern crate proc_macro; + +#[macro_use] +mod error; + +mod attr; +mod derive; + +use proc_macro::TokenStream; + +/// # Structs +/// +/// # Enums +/// +/// # Unions +/// +/// Unions that meet any of the following are supported: +/// +/// - The union has exactly one field. +/// - The union is `#[repr(C)]`, and all fields are `Transmutable` and have the same size. +#[proc_macro_derive(Atomicable, attributes(atomic))] +pub fn atomicable_derive(input: TokenStream) -> TokenStream { + let mut input = syn::parse_macro_input!(input as syn::DeriveInput); + derive::derive(&mut input).unwrap_or_else(syn::Error::into_compile_error).into() +} diff --git a/src/generic/impls.rs b/src/generic/impls.rs new file mode 100644 index 000000000..f16e0e83e --- /dev/null +++ b/src/generic/impls.rs @@ -0,0 +1,1135 @@ +#[cfg(all(feature = "alloc", not(portable_atomic_no_alloc)))] +extern crate alloc; +#[cfg(all(feature = "std", portable_atomic_no_alloc))] +extern crate std as alloc; + +use super::{ + AllowRelaxed, AtomicArithmetic, AtomicArithmeticPriv, AtomicBitOps, AtomicBitOpsPriv, + AtomicMinMax, AtomicMinMaxPriv, Atomicable, AtomicablePrimitive, AtomicablePrimitivePriv, + Transmutable, +}; +use core::{ + char, mem, + ptr::{self, NonNull}, + slice, + sync::atomic::Ordering, +}; + +// ----------------------------------------------------------------------------- +// integer / float / boolean / character + +macro_rules! primitive { + ($(#[$attrs:meta])* $atomic_type:ident, $value_type:ident) => { + $(#[$attrs])* + impl AtomicablePrimitive for $value_type {} + $(#[$attrs])* + impl AtomicablePrimitivePriv for $value_type { + type Atomic = crate::$atomic_type; + const IS_ALWAYS_LOCK_FREE: bool = crate::$atomic_type::is_always_lock_free(); + #[inline] + fn default() -> Self { + <$value_type as Default>::default() + } + #[inline] + fn new(v: Self) -> Self::Atomic { + crate::$atomic_type::new(v) + } + #[inline] + fn is_lock_free() -> bool { + crate::$atomic_type::is_lock_free() + } + #[inline] + fn get_mut(a: &mut Self::Atomic) -> &mut Self { + a.get_mut() + } + #[inline] + fn into_inner(a: Self::Atomic) -> Self { + a.into_inner() + } + #[inline] + fn load(a: &Self::Atomic, order: Ordering) -> Self { + a.load(order) + } + #[inline] + fn store(a: &Self::Atomic, val: Self, order: Ordering) { + a.store(val, order); + } + #[inline] + fn swap(a: &Self::Atomic, val: Self, order: Ordering) -> Self { + a.swap(val, order) + } + #[inline] + fn compare_exchange( + a: &Self::Atomic, + current: Self, + new: Self, + success: Ordering, + failure: Ordering, + ) -> Result { + a.compare_exchange(current, new, success, failure) + } + #[inline] + fn compare_exchange_weak( + a: &Self::Atomic, + current: Self, + new: Self, + success: Ordering, + failure: Ordering, + ) -> Result { + a.compare_exchange_weak(current, new, success, failure) + } + } + $(#[$attrs])* + impl Atomicable for $value_type { + type Value = $value_type; + #[inline] + fn to_val(self) -> Self::Value { + self + } + #[inline] + fn from_val(val: Self::Value) -> Self { + val + } + #[inline] + unsafe fn allow_relaxed() -> AllowRelaxed { + unsafe { AllowRelaxed::new(true) } + } + } + $(#[$attrs])* + unsafe impl Transmutable for $value_type {} + }; +} +macro_rules! int { + ($atomic_type:ident, $non_zero_type:ident, $value_type:ident) => { + primitive!($atomic_type, $value_type); + impl AtomicBitOpsPriv for $value_type { + #[inline] + fn fetch_and(a: &Self::Atomic, val: Self, order: Ordering) -> Self { + a.fetch_and(val, order) + } + #[inline] + fn fetch_nand(a: &Self::Atomic, val: Self, order: Ordering) -> Self { + a.fetch_nand(val, order) + } + #[inline] + fn fetch_or(a: &Self::Atomic, val: Self, order: Ordering) -> Self { + a.fetch_or(val, order) + } + #[inline] + fn fetch_xor(a: &Self::Atomic, val: Self, order: Ordering) -> Self { + a.fetch_xor(val, order) + } + } + unsafe impl AtomicBitOps for $value_type {} + impl AtomicArithmeticPriv for $value_type { + #[inline] + fn fetch_add(a: &Self::Atomic, val: Self, order: Ordering) -> Self { + a.fetch_add(val, order) + } + #[inline] + fn fetch_sub(a: &Self::Atomic, val: Self, order: Ordering) -> Self { + a.fetch_sub(val, order) + } + } + unsafe impl AtomicArithmetic for $value_type {} + impl AtomicMinMaxPriv for $value_type { + #[inline] + fn fetch_max(a: &Self::Atomic, val: Self, order: Ordering) -> Self { + a.fetch_max(val, order) + } + #[inline] + fn fetch_min(a: &Self::Atomic, val: Self, order: Ordering) -> Self { + a.fetch_min(val, order) + } + } + impl AtomicMinMax for $value_type {} + impl Atomicable for core::num::$non_zero_type { + type Value = $value_type; + #[inline] + fn to_val(self) -> Self::Value { + self.get() + } + #[inline] + fn from_val(val: Self::Value) -> Self { + Self::new(val).unwrap() + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + debug_assert_ne!(val, 0); + // SAFETY: we don't provide method to make val zero. + unsafe { Self::new_unchecked(val) } + } + #[inline] + unsafe fn allow_relaxed() -> AllowRelaxed { + unsafe { AllowRelaxed::new(true) } + } + } + // SAFETY: https://github.com/rust-lang/rust/pull/94786 + unsafe impl Transmutable for core::num::$non_zero_type {} + impl Atomicable for Option { + type Value = $value_type; + #[inline] + fn to_val(self) -> Self::Value { + self.map(core::num::$non_zero_type::get).unwrap_or(0) + } + #[inline] + fn from_val(val: Self::Value) -> Self { + core::num::$non_zero_type::new(val) + } + #[inline] + unsafe fn allow_relaxed() -> AllowRelaxed { + unsafe { AllowRelaxed::new(true) } + } + } + // SAFETY: https://doc.rust-lang.org/nightly/core/option/index.html#representation + // SAFETY: https://github.com/rust-lang/rust/pull/51396#issuecomment-395066132 + unsafe impl Transmutable for Option {} + }; +} +macro_rules! fixed_int { + ($atomic_type:ident, $non_zero_type:ident, $value_type:ident, $max_array_len:tt) => { + int!($atomic_type, $non_zero_type, $value_type); + array_transmutable!([$value_type; $max_array_len]); + // SAFETY: https://github.com/rust-lang/rust/pull/94786 + array_transmutable!([core::num::$non_zero_type; $max_array_len]); + // SAFETY: https://doc.rust-lang.org/nightly/core/option/index.html#representation + // SAFETY: https://github.com/rust-lang/rust/pull/51396#issuecomment-395066132 + array_transmutable!([Option; $max_array_len]); + }; +} +macro_rules! float { + ($atomic_type:ident, $float_type:ident, $int_type:ident) => { + #[cfg(feature = "float")] + primitive!($atomic_type, $float_type); + #[cfg(not(feature = "float"))] + impl Atomicable for $float_type { + type Value = $int_type; + #[inline] + fn to_val(self) -> Self::Value { + self.to_bits() + } + #[inline] + fn from_val(val: Self::Value) -> Self { + $float_type::from_bits(val) + } + #[inline] + unsafe fn allow_relaxed() -> AllowRelaxed { + unsafe { AllowRelaxed::new(true) } + } + } + #[cfg(not(feature = "float"))] + unsafe impl Transmutable for $float_type {} + #[cfg(feature = "float")] + impl AtomicArithmeticPriv for $float_type { + #[inline] + fn fetch_add(a: &Self::Atomic, val: Self, order: Ordering) -> Self { + a.fetch_add(val, order) + } + #[inline] + fn fetch_sub(a: &Self::Atomic, val: Self, order: Ordering) -> Self { + a.fetch_sub(val, order) + } + } + #[cfg(feature = "float")] + #[cfg_attr(docsrs, doc(cfg(feature = "float")))] + unsafe impl AtomicArithmetic for $float_type {} + #[cfg(feature = "float")] + impl AtomicMinMaxPriv for $float_type { + #[inline] + fn fetch_max(a: &Self::Atomic, val: Self, order: Ordering) -> Self { + a.fetch_max(val, order) + } + #[inline] + fn fetch_min(a: &Self::Atomic, val: Self, order: Ordering) -> Self { + a.fetch_min(val, order) + } + } + #[cfg(feature = "float")] + #[cfg_attr(docsrs, doc(cfg(feature = "float")))] + impl AtomicMinMax for $float_type {} + }; +} +macro_rules! array_transmutable { + ([$value_type:ty; 16]) => { + array_transmutable!([$value_type; 2], u16); + array_transmutable!([$value_type; 4], u32); + array_transmutable!([$value_type; 8], u64); + array_transmutable!([$value_type; 16], u128); + }; + ([$value_type:ty; 8]) => { + array_transmutable!([$value_type; 2], u32); + array_transmutable!([$value_type; 4], u64); + array_transmutable!([$value_type; 8], u128); + }; + ([$value_type:ty; 4]) => { + array_transmutable!([$value_type; 2], u64); + array_transmutable!([$value_type; 4], u128); + }; + ([$value_type:ty; 2]) => { + array_transmutable!([$value_type; 2], u128); + }; + ([$value_type:ty; $n:expr], $repr_type:ty) => { + impl Atomicable for [$value_type; $n] { + type Value = $repr_type; + #[inline] + fn to_val(self) -> Self::Value { + unsafe { mem::transmute(self) } + } + #[inline] + fn from_val(val: Self::Value) -> Self { + #[allow(clippy::transmute_num_to_bytes)] + unsafe { + mem::transmute(val) + } + } + #[inline] + unsafe fn allow_relaxed() -> AllowRelaxed { + unsafe { AllowRelaxed::new(true) } + } + } + unsafe impl Transmutable for [$value_type; $n] {} + }; +} + +primitive!(AtomicBool, bool); +array_transmutable!([bool; 16]); +impl AtomicBitOpsPriv for bool { + #[inline] + fn fetch_and(a: &Self::Atomic, val: Self, order: Ordering) -> Self { + a.fetch_and(val, order) + } + #[inline] + fn fetch_nand(a: &Self::Atomic, val: Self, order: Ordering) -> Self { + a.fetch_nand(val, order) + } + #[inline] + fn fetch_or(a: &Self::Atomic, val: Self, order: Ordering) -> Self { + a.fetch_or(val, order) + } + #[inline] + fn fetch_xor(a: &Self::Atomic, val: Self, order: Ordering) -> Self { + a.fetch_xor(val, order) + } +} +unsafe impl AtomicBitOps for bool {} + +fixed_int!(AtomicI8, NonZeroI8, i8, 16); +fixed_int!(AtomicU8, NonZeroU8, u8, 16); +fixed_int!(AtomicI16, NonZeroI16, i16, 8); +fixed_int!(AtomicU16, NonZeroU16, u16, 8); +fixed_int!(AtomicI32, NonZeroI32, i32, 4); +fixed_int!(AtomicU32, NonZeroU32, u32, 4); +fixed_int!(AtomicI64, NonZeroI64, i64, 2); +fixed_int!(AtomicU64, NonZeroU64, u64, 2); +int!(AtomicI128, NonZeroI128, i128); +int!(AtomicU128, NonZeroU128, u128); + +int!(AtomicIsize, NonZeroIsize, isize); +array_transmutable!([isize; 2], DW); +array_transmutable!([core::num::NonZeroIsize; 2], DW); +array_transmutable!([Option; 2], DW); +int!(AtomicUsize, NonZeroUsize, usize); +array_transmutable!([usize; 2], DW); +array_transmutable!([core::num::NonZeroUsize; 2], DW); +array_transmutable!([Option; 2], DW); + +float!(AtomicF32, f32, u32); +array_transmutable!([f32; 4]); +float!(AtomicF64, f64, u64); +array_transmutable!([f64; 2]); + +impl Atomicable for char { + type Value = u32; + #[inline] + fn to_val(self) -> Self::Value { + self as u32 + } + #[inline] + fn from_val(val: Self::Value) -> Self { + char::from_u32(val).unwrap() + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + // SAFETY: we don't provide method to make val invalid. + unsafe { char::from_u32_unchecked(val) } + } + #[inline] + unsafe fn allow_relaxed() -> AllowRelaxed { + unsafe { AllowRelaxed::new(true) } + } +} +unsafe impl Transmutable for char {} + +// TODO: Wrapping +// TODO: ManuallyDrop + +// ----------------------------------------------------------------------------- +// pointer + +impl AtomicablePrimitive for *mut T {} +impl AtomicablePrimitivePriv for *mut T { + type Atomic = crate::AtomicPtr; + const IS_ALWAYS_LOCK_FREE: bool = crate::AtomicPtr::::is_always_lock_free(); + #[inline] + fn default() -> Self { + ptr::null_mut() + } + #[inline] + fn new(v: Self) -> Self::Atomic { + crate::AtomicPtr::::new(v) + } + #[inline] + fn is_lock_free() -> bool { + crate::AtomicPtr::::is_lock_free() + } + #[inline] + fn get_mut(a: &mut Self::Atomic) -> &mut Self { + a.get_mut() + } + #[inline] + fn into_inner(a: Self::Atomic) -> Self { + a.into_inner() + } + #[inline] + fn load(a: &Self::Atomic, order: Ordering) -> Self { + a.load(order) + } + #[inline] + fn store(a: &Self::Atomic, val: Self, order: Ordering) { + a.store(val, order); + } + #[inline] + fn swap(a: &Self::Atomic, val: Self, order: Ordering) -> Self { + a.swap(val, order) + } + #[inline] + fn compare_exchange( + a: &Self::Atomic, + current: Self, + new: Self, + success: Ordering, + failure: Ordering, + ) -> Result { + a.compare_exchange(current, new, success, failure) + } + #[inline] + fn compare_exchange_weak( + a: &Self::Atomic, + current: Self, + new: Self, + success: Ordering, + failure: Ordering, + ) -> Result { + a.compare_exchange_weak(current, new, success, failure) + } +} + +impl Atomicable for *mut T { + type Value = *mut T; + #[inline] + fn to_val(self) -> Self::Value { + self + } + #[inline] + fn from_val(val: Self::Value) -> Self { + val + } + #[inline] + unsafe fn allow_relaxed() -> AllowRelaxed { + unsafe { AllowRelaxed::new(true) } + } +} +unsafe impl Transmutable for *mut T {} +impl Atomicable for *const T { + type Value = *mut T; + #[inline] + fn to_val(self) -> Self::Value { + self as *mut T + } + #[inline] + fn from_val(val: Self::Value) -> Self { + val as *const T + } + #[inline] + unsafe fn allow_relaxed() -> AllowRelaxed { + unsafe { AllowRelaxed::new(true) } + } +} +unsafe impl Transmutable for *const T {} + +impl Atomicable for NonNull { + type Value = *mut T; + #[inline] + fn to_val(self) -> Self::Value { + self.as_ptr() + } + #[inline] + fn from_val(val: Self::Value) -> Self { + Self::new(val).unwrap() + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + debug_assert!(!val.is_null()); + // SAFETY: we don't provide method to make val zero. + unsafe { Self::new_unchecked(val) } + } + #[inline] + unsafe fn allow_relaxed() -> AllowRelaxed { + unsafe { AllowRelaxed::new(true) } + } +} +unsafe impl Transmutable for NonNull {} +impl Atomicable for Option> { + type Value = *mut T; + #[inline] + fn to_val(self) -> Self::Value { + match self { + Some(v) => v.to_val(), + None => ptr::null_mut(), + } + } + #[inline] + fn from_val(val: Self::Value) -> Self { + NonNull::new(val) + } + #[inline] + unsafe fn allow_relaxed() -> AllowRelaxed { + unsafe { AllowRelaxed::new(true) } + } +} +// SAFETY: https://doc.rust-lang.org/nightly/core/option/index.html#representation +unsafe impl Transmutable for Option> {} + +// Note: Since the reference must be valid, Relaxed ordering is not allowed. +impl Atomicable for &T { + type Value = *mut T; + #[inline] + fn to_val(self) -> Self::Value { + self as *const T as *mut T + } + #[inline] + fn from_val(_val: Self::Value) -> Self { + panic!("there is no way to call <&T as Atomicable>::from_val safely") + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + debug_assert!(!val.is_null()); + // SAFETY: we don't provide method to make val invalid. + unsafe { &*val } + } +} +unsafe impl Transmutable for &T {} +// Note: Since the reference must be valid, Relaxed ordering is not allowed. +impl Atomicable for Option<&T> { + type Value = *mut T; + #[inline] + fn to_val(self) -> Self::Value { + match self { + Some(v) => v.to_val(), + None => ptr::null_mut(), + } + } + #[inline] + fn from_val(_val: Self::Value) -> Self { + panic!("there is no way to call as Atomicable>::from_val safely") + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + if val.is_null() { + None + } else { + // SAFETY: we don't provide method to make val invalid. + Some(unsafe { <&T>::from_val_unchecked(val) }) + } + } +} +// SAFETY: https://doc.rust-lang.org/nightly/core/option/index.html#representation +unsafe impl Transmutable for Option<&T> {} + +// ----------------------------------------------------------------------------- +// fat pointer + +#[cfg(target_pointer_width = "16")] +type DW = u32; +#[cfg(target_pointer_width = "32")] +type DW = u64; +#[cfg(target_pointer_width = "64")] +type DW = u128; + +#[derive(Clone, Copy)] +#[repr(C)] +struct SliceRepr { + // *const () + ptr: usize, + len: usize, +} + +impl SliceRepr { + fn to_val(self) -> DW { + unsafe { mem::transmute(self) } + } + fn from_val(val: DW) -> Self { + unsafe { mem::transmute(val) } + } +} + +// TODO: once https://github.com/rust-lang/rust/issues/71146 stable, we can provide: +// impl Atomicable for *mut [T] +// impl Atomicable for *const [T] +// impl Atomicable for NonNull<[T]> +// impl Atomicable for Option> + +// Note: Since the reference must be valid, Relaxed ordering is not allowed. +impl Atomicable for &[T] { + type Value = DW; + #[inline] + fn to_val(self) -> Self::Value { + SliceRepr { ptr: self.as_ptr() as *const () as usize, len: self.len() }.to_val() + } + #[inline] + fn from_val(_val: Self::Value) -> Self { + panic!("there is no way to call <&[T] as Atomicable>::from_val safely") + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + debug_assert_ne!(val, 0); + let repr = SliceRepr::from_val(val); + // SAFETY: we don't provide method to make val invalid. + unsafe { slice::from_raw_parts(repr.ptr as *const T, repr.len) } + } +} +// Note: Since the reference must be valid, Relaxed ordering is not allowed. +impl Atomicable for Option<&[T]> { + type Value = DW; + #[inline] + fn to_val(self) -> Self::Value { + match self { + Some(v) => v.to_val(), + None => 0, + } + } + #[inline] + fn from_val(_val: Self::Value) -> Self { + panic!("there is no way to call as Atomicable>::from_val safely") + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + if val == 0 { + None + } else { + // SAFETY: we don't provide method to make val invalid. + Some(unsafe { <&[T]>::from_val_unchecked(val) }) + } + } +} + +// TODO: once https://github.com/rust-lang/rust/issues/71146 stable, we can provide: +// impl Atomicable for *mut str +// impl Atomicable for *const str +// impl Atomicable for NonNull +// impl Atomicable for Option> + +// Note: Since the reference must be valid, Relaxed ordering is not allowed. +impl Atomicable for &str { + type Value = DW; + #[inline] + fn to_val(self) -> Self::Value { + self.as_bytes().to_val() + } + #[inline] + fn from_val(_val: Self::Value) -> Self { + panic!("there is no way to call <&str as Atomicable>::from_val safely") + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + // SAFETY: we don't provide method to make val invalid. + unsafe { core::str::from_utf8_unchecked(<&[u8]>::from_val_unchecked(val)) } + } +} +// Note: Since the reference must be valid, Relaxed ordering is not allowed. +impl Atomicable for Option<&str> { + type Value = DW; + #[inline] + fn to_val(self) -> Self::Value { + match self { + Some(v) => v.to_val(), + None => 0, + } + } + #[inline] + fn from_val(_val: Self::Value) -> Self { + panic!("there is no way to call as Atomicable>::from_val safely") + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + if val == 0 { + None + } else { + // SAFETY: we don't provide method to make val invalid. + Some(unsafe { <&str>::from_val_unchecked(val) }) + } + } +} + +// ----------------------------------------------------------------------------- +// {core,std}::time + +// Duration is not repr(C) and contains padding bytes, so it cannot be directly +// converted to an integer. However, it can be converted to/from a pair of +// integers without loss, so it can be represented as u128. +#[derive(Clone, Copy)] +#[repr(C)] +struct DurationRepr { + secs: u64, + nanos: u32, + // Note: Without this, the last 4 bytes will be uninitialized. + padding: u32, +} + +impl DurationRepr { + const NULL: Self = Self { secs: 0, nanos: 0, padding: !0 }; + #[inline] + fn to_int(self) -> u128 { + unsafe { mem::transmute(self) } + } + #[inline] + fn from_int(val: u128) -> Self { + unsafe { mem::transmute(val) } + } +} + +impl Atomicable for core::time::Duration { + type Value = u128; + #[inline] + fn to_val(self) -> Self::Value { + DurationRepr { secs: self.as_secs(), nanos: self.subsec_nanos(), padding: 0 }.to_int() + } + #[inline] + fn from_val(val: Self::Value) -> Self { + let repr = DurationRepr::from_int(val); + core::time::Duration::new(repr.secs, repr.nanos) + } + #[inline] + unsafe fn allow_relaxed() -> AllowRelaxed { + unsafe { AllowRelaxed::new(true) } + } +} +// We can use padding bytes to represent `None`. +impl Atomicable for Option { + type Value = u128; + #[inline] + fn to_val(self) -> Self::Value { + match self { + Some(val) => val.to_val(), + None => DurationRepr::NULL.to_int(), + } + } + #[inline] + fn from_val(val: Self::Value) -> Self { + let repr = DurationRepr::from_int(val); + if repr.padding == 0 { + let secs = core::time::Duration::from_secs(repr.secs); + let nanos = core::time::Duration::from_nanos(repr.nanos.into()); + secs.checked_add(nanos) + } else { + None + } + } + #[inline] + unsafe fn allow_relaxed() -> AllowRelaxed { + unsafe { AllowRelaxed::new(true) } + } +} + +// The internal representation of `SystemTime` is platform-dependent, so it +// cannot be directly converted to an integer. However we can convert it to +// duration since unix epoch, and recover it on conversion to `SystemTime`. +// This is inspired by the way `serde` implements `Serialize`/`Deserialize` for `SystemTime`. +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl Atomicable for std::time::SystemTime { + type Value = u128; + #[inline] + fn to_val(self) -> Self::Value { + self.duration_since(std::time::SystemTime::UNIX_EPOCH) + .expect("SystemTime must be later than UNIX_EPOCH") + .to_val() + } + #[inline] + fn from_val(val: Self::Value) -> Self { + let duration_since_epoch = core::time::Duration::from_val(val); + std::time::SystemTime::UNIX_EPOCH.checked_add(duration_since_epoch).unwrap() + } + #[inline] + unsafe fn allow_relaxed() -> AllowRelaxed { + unsafe { AllowRelaxed::new(true) } + } +} +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl Atomicable for Option { + type Value = u128; + #[inline] + fn to_val(self) -> Self::Value { + match self { + Some(val) => val + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .expect("SystemTime must be later than UNIX_EPOCH") + .to_val(), + None => DurationRepr::NULL.to_int(), + } + } + #[inline] + fn from_val(val: Self::Value) -> Self { + match Option::::from_val(val) { + Some(duration_since_epoch) => { + std::time::SystemTime::UNIX_EPOCH.checked_add(duration_since_epoch) + } + None => None, + } + } + #[inline] + unsafe fn allow_relaxed() -> AllowRelaxed { + unsafe { AllowRelaxed::new(true) } + } +} + +// ----------------------------------------------------------------------------- +// std::net + +// Ipv4Addr and Ipv6Addr are not repr(C), so they cannot be directly transmuted +// to an integer. However, it can be converted to/from integers without loss. + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl Atomicable for std::net::Ipv4Addr { + type Value = u32; + #[inline] + fn to_val(self) -> Self::Value { + u32::from_ne_bytes(self.octets()) + } + #[inline] + fn from_val(val: Self::Value) -> Self { + Self::from(val) + } + #[inline] + unsafe fn allow_relaxed() -> AllowRelaxed { + unsafe { AllowRelaxed::new(true) } + } +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl Atomicable for std::net::Ipv6Addr { + type Value = u128; + #[inline] + fn to_val(self) -> Self::Value { + u128::from_ne_bytes(self.octets()) + } + #[inline] + fn from_val(val: Self::Value) -> Self { + Self::from(val) + } + #[inline] + unsafe fn allow_relaxed() -> AllowRelaxed { + unsafe { AllowRelaxed::new(true) } + } +} + +// ----------------------------------------------------------------------------- +// !Copy types + +// Note: Since the Box must point to a valid value, Relaxed ordering is not allowed. +#[cfg(any(all(feature = "alloc", not(portable_atomic_no_alloc)), feature = "std"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "alloc", feature = "std"))))] +impl Atomicable for alloc::boxed::Box { + type Value = *mut T; + #[inline] + fn to_val(self) -> Self::Value { + alloc::boxed::Box::into_raw(self) + } + #[inline] + fn from_val(_val: Self::Value) -> Self { + panic!("there is no way to call as Atomicable>::from_val safely") + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + // SAFETY: we don't provide method to make val invalid. + unsafe { alloc::boxed::Box::from_raw(val) } + } +} +// Note: Since the Box must point to a valid value, Relaxed ordering is not allowed. +#[cfg(any(all(feature = "alloc", not(portable_atomic_no_alloc)), feature = "std"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "alloc", feature = "std"))))] +impl Atomicable for Option> { + type Value = *mut T; + #[inline] + fn to_val(self) -> Self::Value { + match self { + Some(val) => val.to_val(), + None => ptr::null_mut(), + } + } + #[inline] + fn from_val(_val: Self::Value) -> Self { + panic!("there is no way to call > as Atomicable>::from_val safely") + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + if val.is_null() { + None + } else { + // SAFETY: we don't provide method to make val invalid. + Some(unsafe { alloc::boxed::Box::from_raw(val) }) + } + } +} + +// Note: Since the Box must point to a valid value, Relaxed ordering is not allowed. +#[cfg(any(all(feature = "alloc", not(portable_atomic_no_alloc)), feature = "std"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "alloc", feature = "std"))))] +impl Atomicable for alloc::boxed::Box<[T]> { + type Value = DW; + #[inline] + fn to_val(self) -> Self::Value { + let len = self.len(); + SliceRepr { ptr: alloc::boxed::Box::into_raw(self) as *const () as usize, len }.to_val() + } + #[inline] + fn from_val(_val: Self::Value) -> Self { + panic!("there is no way to call as Atomicable>::from_val safely") + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + debug_assert_ne!(val, 0); + let repr = SliceRepr::from_val(val); + // SAFETY: we don't provide method to make val invalid. + unsafe { + let ptr = slice::from_raw_parts_mut(repr.ptr as *mut T, repr.len) as *mut [T]; + alloc::boxed::Box::from_raw(ptr) + } + } +} +// Note: Since the Box must point to a valid value, Relaxed ordering is not allowed. +#[cfg(any(all(feature = "alloc", not(portable_atomic_no_alloc)), feature = "std"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "alloc", feature = "std"))))] +impl Atomicable for Option> { + type Value = DW; + #[inline] + fn to_val(self) -> Self::Value { + match self { + Some(val) => val.to_val(), + None => 0, + } + } + #[inline] + fn from_val(_val: Self::Value) -> Self { + panic!("there is no way to call > as Atomicable>::from_val safely") + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + if val == 0 { + None + } else { + // SAFETY: we don't provide method to make val invalid. + Some(unsafe { >::from_val_unchecked(val) }) + } + } +} + +// Note: Since the Arc must point to a valid value, Relaxed ordering is not allowed. +#[cfg(any(all(feature = "alloc", not(portable_atomic_no_alloc)), feature = "std"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "alloc", feature = "std"))))] +#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_cas)))] +#[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))] +impl Atomicable for alloc::sync::Arc { + type Value = *mut T; + #[inline] + fn to_val(self) -> Self::Value { + alloc::sync::Arc::into_raw(self) as *mut T + } + #[inline] + fn from_val(_val: Self::Value) -> Self { + panic!("there is no way to call as Atomicable>::from_val safely") + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + // SAFETY: we don't provide method to make val invalid. + unsafe { alloc::sync::Arc::from_raw(val) } + } +} +// Note: Since the Arc must point to a valid value, Relaxed ordering is not allowed. +#[cfg(any(all(feature = "alloc", not(portable_atomic_no_alloc)), feature = "std"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "alloc", feature = "std"))))] +#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_cas)))] +#[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))] +impl Atomicable for Option> { + type Value = *mut T; + #[inline] + fn to_val(self) -> Self::Value { + match self { + Some(val) => val.to_val(), + None => ptr::null_mut(), + } + } + #[inline] + fn from_val(_val: Self::Value) -> Self { + panic!("there is no way to call > as Atomicable>::from_val safely") + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + if val.is_null() { + None + } else { + // SAFETY: we don't provide method to make val invalid. + Some(unsafe { alloc::sync::Arc::from_raw(val) }) + } + } +} + +// Note: Since the Arc must point to a valid value, Relaxed ordering is not allowed. +#[cfg(any(all(feature = "alloc", not(portable_atomic_no_alloc)), feature = "std"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "alloc", feature = "std"))))] +#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_cas)))] +#[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))] +impl Atomicable for alloc::sync::Arc<[T]> { + type Value = DW; + #[inline] + fn to_val(self) -> Self::Value { + let len = self.len(); + SliceRepr { ptr: alloc::sync::Arc::into_raw(self) as *const () as usize, len }.to_val() + } + #[inline] + fn from_val(_val: Self::Value) -> Self { + panic!("there is no way to call as Atomicable>::from_val safely") + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + debug_assert_ne!(val, 0); + let repr = SliceRepr::from_val(val); + // SAFETY: we don't provide method to make val invalid. + unsafe { + let ptr = slice::from_raw_parts_mut(repr.ptr as *mut T, repr.len) as *mut [T]; + alloc::sync::Arc::from_raw(ptr) + } + } +} +// Note: Since the Arc must point to a valid value, Relaxed ordering is not allowed. +#[cfg(any(all(feature = "alloc", not(portable_atomic_no_alloc)), feature = "std"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "alloc", feature = "std"))))] +#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_cas)))] +#[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))] +impl Atomicable for Option> { + type Value = DW; + #[inline] + fn to_val(self) -> Self::Value { + match self { + Some(val) => val.to_val(), + None => 0, + } + } + #[inline] + fn from_val(_val: Self::Value) -> Self { + panic!("there is no way to call > as Atomicable>::from_val safely") + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + if val == 0 { + None + } else { + // SAFETY: we don't provide method to make val invalid. + Some(unsafe { >::from_val_unchecked(val) }) + } + } +} + +// ----------------------------------------------------------------------------- +// function pointer + +macro_rules! fn_ptr_impls_safety_abi { + ($FnTy:ty, $($Arg:ident),*) => { + impl Atomicable for $FnTy { + type Value = *mut (); + #[inline] + fn to_val(self) -> Self::Value { + self as *mut () + } + #[inline] + fn from_val(_val: Self::Value) -> Self { + panic!("there is no way to call ::from_val safely") + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + debug_assert!(!val.is_null()); + // SAFETY: we don't provide method to make val invalid. + unsafe { mem::transmute(val) } + } + #[inline] + unsafe fn allow_relaxed() -> AllowRelaxed { + unsafe { AllowRelaxed::new(true) } + } + } + unsafe impl Transmutable for $FnTy {} + impl Atomicable for Option<$FnTy> { + type Value = *mut (); + #[inline] + fn to_val(self) -> Self::Value { + match self { + Some(v) => v.to_val(), + None => ptr::null_mut(), + } + } + #[inline] + fn from_val(_val: Self::Value) -> Self { + panic!("there is no way to call as Atomicable>::from_val safely") + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + if val.is_null() { + None + } else { + // SAFETY: we don't provide method to make val invalid. + Some(unsafe { <_>::from_val_unchecked(val) }) + } + } + #[inline] + unsafe fn allow_relaxed() -> AllowRelaxed { + unsafe { AllowRelaxed::new(true) } + } + } + // SAFETY: https://doc.rust-lang.org/nightly/core/option/index.html#representation + unsafe impl Transmutable for Option<$FnTy> {} + } +} + +macro_rules! fn_ptr_impls_args { + ($($Arg:ident),+) => { + fn_ptr_impls_safety_abi! { extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ } + fn_ptr_impls_safety_abi! { extern "C" fn($($Arg),+) -> Ret, $($Arg),+ } + fn_ptr_impls_safety_abi! { extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ } + fn_ptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ } + fn_ptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+) -> Ret, $($Arg),+ } + fn_ptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ } + }; + () => { + // No variadic functions with 0 parameters + fn_ptr_impls_safety_abi! { extern "Rust" fn() -> Ret, } + fn_ptr_impls_safety_abi! { extern "C" fn() -> Ret, } + fn_ptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, } + fn_ptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, } + }; +} + +fn_ptr_impls_args! {} +fn_ptr_impls_args! { A } +fn_ptr_impls_args! { A, B } +fn_ptr_impls_args! { A, B, C } +fn_ptr_impls_args! { A, B, C, D } +fn_ptr_impls_args! { A, B, C, D, E } +fn_ptr_impls_args! { A, B, C, D, E, F } +fn_ptr_impls_args! { A, B, C, D, E, F, G } +fn_ptr_impls_args! { A, B, C, D, E, F, G, H } +fn_ptr_impls_args! { A, B, C, D, E, F, G, H, I } +fn_ptr_impls_args! { A, B, C, D, E, F, G, H, I, J } +fn_ptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K } +fn_ptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L } diff --git a/src/generic/mod.rs b/src/generic/mod.rs new file mode 100644 index 000000000..bf7e13ae5 --- /dev/null +++ b/src/generic/mod.rs @@ -0,0 +1,751 @@ +#![allow(missing_docs, clippy::undocumented_unsafe_blocks)] // TODO + +mod impls; + +#[cfg(doc)] +use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release, SeqCst}; +use core::{ + cell::UnsafeCell, + fmt, + marker::PhantomData, + mem::{self, ManuallyDrop}, + ptr, + sync::atomic::Ordering, +}; + +use self::private::{ + AtomicArithmeticPriv, AtomicBitOpsPriv, AtomicMinMaxPriv, AtomicablePrimitivePriv, +}; +use crate::utils::{assert_compare_exchange_ordering, assert_load_ordering, assert_store_ordering}; + +#[cfg_attr(docsrs, doc(cfg(feature = "generic")))] +pub trait AtomicablePrimitive: AtomicablePrimitivePriv {} + +#[cfg_attr(docsrs, doc(cfg(feature = "generic")))] +pub trait Atomicable: Sized { + type Value: AtomicablePrimitive; + + fn to_val(self) -> Self::Value; + + /// # Panics + /// + /// If the value is not valid, this method will panic. + /// + /// This method will also panic if there is no safe way to create `Self` + /// from `Self::Value`. + fn from_val(val: Self::Value) -> Self; + + /// # Safety + /// + /// Behavior is undefined if any of the following conditions are violated: + /// + /// - The value must not be created in any other way than [`Atomicable::to_val`]. + /// - The value must not be modified by operations that are not allowed by the trait. + /// - If the type contains lifetimes, the value must be alive and that the lifetime is [invariant][variance]. + /// - If the type is not `Copy`, the value must be unique. + /// + /// The implementor of this method must guarantee that conversion from a + /// value that meets the above requirements will not result in undefined + /// behavior. + /// + /// [variance]: https://doc.rust-lang.org/nomicon/subtyping.html#variance + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + Self::from_val(val) + } + + // Ideally, this should be a constant, but it is a function because it must + // be marked as unsafe. + // + // Note: conversions that may cause undefined behavior require unsafe code, + // so at first glance it seems that the constant is fine, but users can + // actually override them by combining them with existing Atomicable + // implementations. + /// Returns a token type that indicates whether `Relaxed` memory ordering is allowed. + /// + /// Atomic operations using [`Relaxed`] ordering are fine for plain data types, + /// such as integers, but for types that must point to valid values, such as + /// references, it is unsound because they may point to values that have already + /// been replaced and dropped. + /// + /// # Safety + /// + /// The implementor of this method must guarantee that atomic operations in + /// the [`Relaxed`] ordering will never cause undefined behavior. + #[inline] + unsafe fn allow_relaxed() -> AllowRelaxed { + AllowRelaxed(false) + } +} + +/// A token type that indicates whether `Relaxed` memory ordering is allowed. +/// +/// Atomic operations using [`Relaxed`] ordering are fine for plain data types, +/// such as integers, but for types that must point to valid values, such as +/// references, it is unsound because they may point to values that have already +/// been replaced and dropped. +#[cfg_attr(docsrs, doc(cfg(feature = "generic")))] +#[derive(Debug, Clone, Copy)] +#[must_use] +pub struct AllowRelaxed(bool); + +impl AllowRelaxed { + /// Creates a new `AllowRelaxed` token. + /// + /// # Safety + /// + /// If `allow` is true, the caller must guarantee that atomic operations in + /// the [`Relaxed`] ordering will never cause undefined behavior. + #[inline] + pub const unsafe fn new(allow: bool) -> Self { + AllowRelaxed(allow) + } + + /// Returns true if `Relaxed` memory ordering is allowed. + #[inline] + #[must_use] + pub fn is_allowed(self) -> bool { + self.0 + } +} + +/// # Safety +/// +/// `T` must be `#[repr(C)]` or `#[repr(transparent)]` and have the same +/// in-memory representation as [`::Value`](Atomicable::Value). +#[cfg_attr(docsrs, doc(cfg(feature = "generic")))] +pub unsafe trait Transmutable: Atomicable {} + +/// # Safety +/// +/// The implementer must guarantee that the resulting value of this operation will not be invalid. +#[cfg_attr(docsrs, doc(cfg(feature = "generic")))] +pub unsafe trait AtomicBitOps: Atomicable + Copy +where + ::Value: AtomicBitOpsPriv, +{ +} + +/// # Safety +/// +/// The implementer must guarantee that the resulting value of this operation will not be invalid. +#[cfg_attr(docsrs, doc(cfg(feature = "generic")))] +pub unsafe trait AtomicArithmetic: Atomicable + Copy +where + ::Value: AtomicArithmeticPriv, +{ +} + +#[cfg_attr(docsrs, doc(cfg(feature = "generic")))] +pub trait AtomicMinMax: Atomicable + Copy +where + ::Value: AtomicMinMaxPriv, +{ +} + +/// A generic atomic type. +#[cfg_attr(docsrs, doc(cfg(feature = "generic")))] +#[repr(transparent)] +pub struct Atomic { + repr: <::Value as AtomicablePrimitivePriv>::Atomic, + // make T invariant and propagate auto traits from T + _marker: PhantomData>, +} + +impl Default for Atomic { + #[inline] + fn default() -> Self { + Self::new(T::default()) + } +} + +impl From for Atomic { + #[inline] + fn from(v: T) -> Self { + Self::new(v) + } +} + +impl fmt::Debug for Atomic { + #[allow(clippy::missing_inline_in_public_items)] // fmt is not hot path + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // std atomic types use SeqCst in Debug::fmt: https://github.com/rust-lang/rust/blob/1.58.1/library/core/src/sync/atomic.rs#L1378 + fmt::Debug::fmt(&self.load(Ordering::SeqCst), f) + } +} + +// Send is implicitly implemented. +unsafe impl Sync for Atomic {} + +// UnwindSafe is implicitly implemented. +#[cfg(not(portable_atomic_no_core_unwind_safe))] +impl core::panic::RefUnwindSafe for Atomic {} +#[cfg(all(portable_atomic_no_core_unwind_safe, feature = "std"))] +impl std::panic::RefUnwindSafe for Atomic {} + +impl Atomic { + /// Creates a new atomic value. + #[inline] + #[must_use] + pub fn new(v: T) -> Self { + Self { repr: ::new(v.to_val()), _marker: PhantomData } + } + + /// Creates a new atomic value. + #[cfg(not(portable_atomic_no_const_fn_trait_bound))] + #[inline] + #[must_use] + pub const fn const_new(v: T) -> Self + where + T: Transmutable, + { + // HACK: This is equivalent to transmute_copy by value, but available in const fn. + #[repr(C)] + union ConstHack { + t: ManuallyDrop, + u: ManuallyDrop, + } + // SAFETY: + // - `T: Transmutable` guarantees that the in-memory representations of + // `T` and `T::Value` are the same. + // - `T::Value` implements `AtomicablePrimitivePriv` that guarantees + // that the in-memory representations of `T::Value` and + // `::Atomic` are the same. + // Based on the above, `T` and `::Atomic` + // have the same in-memory representation, so they can be safely transmuted. + unsafe { + Self { + repr: ManuallyDrop::into_inner( + ConstHack::::Atomic> { + t: ManuallyDrop::new(v), + } + .u, + ), + _marker: PhantomData, + } + } + } + + /// Returns `true` if operations on values of this type are lock-free. + /// + /// If the compiler or the platform doesn't support the necessary + /// atomic instructions, global locks for every potentially + /// concurrent atomic operation will be used. + #[inline] + #[must_use] + pub fn is_lock_free() -> bool { + ::is_lock_free() + } + + /// Returns `true` if operations on values of this type are lock-free. + /// + /// If the compiler or the platform doesn't support the necessary + /// atomic instructions, global locks for every potentially + /// concurrent atomic operation will be used. + /// + /// **Note:** If the atomic operation relies on dynamic CPU feature detection, + /// this type may be lock-free even if the function returns false. + #[cfg(not(portable_atomic_no_const_fn_trait_bound))] + #[inline] + #[must_use] + pub const fn is_always_lock_free() -> bool { + ::IS_ALWAYS_LOCK_FREE + } + + /// Returns a mutable reference to the underlying value. + /// + /// This is safe because the mutable reference guarantees that no other threads are + /// concurrently accessing the atomic data. + #[inline] + pub fn get_mut(&mut self) -> &mut T + where + T: Transmutable, + { + debug_assert!(mem::align_of::() >= mem::align_of::()); + // SAFETY: `T: Transmutable` guarantees that `T` is `#[repr(C)]` + // or `#[repr(transparent)]` and has the same in-memory representation + // as `T::Value`. + // + // Our (and the standard library's) atomic types have the same alignment + // as size, so we don't need to check the alignment when converting a + // reference to atomic type *to* a reference to T. + unsafe { + &mut *(::get_mut(&mut self.repr) as *mut T::Value + as *mut T) + } + } + + /// Consumes the atomic and returns the contained value. + /// + /// This is safe because passing `self` by value guarantees that no other threads are + /// concurrently accessing the atomic data. + #[inline] + pub fn into_inner(self) -> T { + let this = ManuallyDrop::new(self); + unsafe { + T::from_val_unchecked(::into_inner(ptr::read( + &this.repr, + ))) + } + } + + /// Stores a value into the atomic integer. + /// + /// `store` takes an [`Ordering`] argument which describes the memory ordering of this operation. + /// Possible values are [`SeqCst`], [`Release`] and [`Relaxed`]. + /// + /// # Panics + /// + /// Panics if `order` is [`Acquire`] or [`AcqRel`]. + #[inline] + pub fn store(&self, val: T, order: Ordering) { + assert_store_ordering(order); + if mem::needs_drop::() { + drop(self.swap(val, order)); + } else { + ::store( + &self.repr, + val.to_val(), + store_ordering(order, unsafe { T::allow_relaxed() }), + ); + } + } + + /// Stores a value into the atomic integer, returning the previous value. + /// + /// `swap` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + #[inline] + pub fn swap(&self, val: T, order: Ordering) -> T { + unsafe { + T::from_val_unchecked(::swap( + &self.repr, + val.to_val(), + swap_ordering(order, T::allow_relaxed()), + )) + } + } +} + +impl Atomic { + /// Loads a value from the atomic integer. + /// + /// `load` takes an [`Ordering`] argument which describes the memory ordering of this operation. + /// Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`]. + /// + /// # Panics + /// + /// Panics if `order` is [`Release`] or [`AcqRel`]. + #[inline] + pub fn load(&self, order: Ordering) -> T { + assert_load_ordering(order); + unsafe { + T::from_val_unchecked(::load( + &self.repr, + load_ordering(order, T::allow_relaxed()), + )) + } + } + + /// Stores a value into the atomic integer if the current value is the same as + /// the `current` value. + /// + /// The return value is a result indicating whether the new value was written and + /// containing the previous value. On success this value is guaranteed to be equal to + /// `current`. + /// + /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory + /// ordering of this operation. `success` describes the required ordering for the + /// read-modify-write operation that takes place if the comparison with `current` succeeds. + /// `failure` describes the required ordering for the load operation that takes place when + /// the comparison fails. Using [`Acquire`] as success ordering makes the store part + /// of this operation [`Relaxed`], and using [`Release`] makes the successful load + /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`] + /// and must be equivalent to or weaker than the success ordering. + #[inline] + #[cfg_attr(docsrs, doc(alias = "compare_and_swap"))] + pub fn compare_exchange( + &self, + current: T, + new: T, + success: Ordering, + failure: Ordering, + ) -> Result { + assert_compare_exchange_ordering(success, failure); + match ::compare_exchange( + &self.repr, + current.to_val(), + new.to_val(), + swap_ordering(success, unsafe { T::allow_relaxed() }), + load_ordering(failure, unsafe { T::allow_relaxed() }), + ) { + Ok(v) => Ok(unsafe { T::from_val_unchecked(v) }), + Err(v) => Err(unsafe { T::from_val_unchecked(v) }), + } + } + + /// Stores a value into the atomic integer if the current value is the same as + /// the `current` value. + /// Unlike [`compare_exchange`](Self::compare_exchange) + /// this function is allowed to spuriously fail even + /// when the comparison succeeds, which can result in more efficient code on some + /// platforms. The return value is a result indicating whether the new value was + /// written and containing the previous value. + /// + /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory + /// ordering of this operation. `success` describes the required ordering for the + /// read-modify-write operation that takes place if the comparison with `current` succeeds. + /// `failure` describes the required ordering for the load operation that takes place when + /// the comparison fails. Using [`Acquire`] as success ordering makes the store part + /// of this operation [`Relaxed`], and using [`Release`] makes the successful load + /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`] + /// and must be equivalent to or weaker than the success ordering. + #[inline] + #[cfg_attr(docsrs, doc(alias = "compare_and_swap"))] + pub fn compare_exchange_weak( + &self, + current: T, + new: T, + success: Ordering, + failure: Ordering, + ) -> Result { + assert_compare_exchange_ordering(success, failure); + match ::compare_exchange_weak( + &self.repr, + current.to_val(), + new.to_val(), + swap_ordering(success, unsafe { T::allow_relaxed() }), + load_ordering(failure, unsafe { T::allow_relaxed() }), + ) { + Ok(v) => Ok(unsafe { T::from_val_unchecked(v) }), + Err(v) => Err(unsafe { T::from_val_unchecked(v) }), + } + } + + /// Fetches the value, and applies a function to it that returns an optional + /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else + /// `Err(previous_value)`. + /// + /// Note: This may call the function multiple times if the value has been changed from other threads in + /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied + /// only once to the stored value. + /// + /// `fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation. + /// The first describes the required ordering for when the operation finally succeeds while the second + /// describes the required ordering for loads. These correspond to the success and failure orderings of + /// [`compare_exchange`](Self::compare_exchange) respectively. + /// + /// Using [`Acquire`] as success ordering makes the store part + /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load + /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`] + /// and must be equivalent to or weaker than the success ordering. + #[inline] + pub fn fetch_update( + &self, + set_order: Ordering, + fetch_order: Ordering, + mut f: F, + ) -> Result + where + F: FnMut(T) -> Option, + { + let mut prev = self.load(fetch_order); + while let Some(next) = f(prev) { + match self.compare_exchange_weak(prev, next, set_order, fetch_order) { + x @ Ok(_) => return x, + Err(next_prev) => prev = next_prev, + } + } + Err(prev) + } +} + +impl Atomic +where + T::Value: AtomicArithmeticPriv, +{ + /// Adds to the current value, returning the previous value. + /// + /// This operation wraps around on overflow. + /// + /// `fetch_add` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + #[inline] + pub fn fetch_add(&self, val: T, order: Ordering) -> T { + unsafe { + T::from_val_unchecked(::fetch_add( + &self.repr, + val.to_val(), + swap_ordering(order, T::allow_relaxed()), + )) + } + } + + /// Subtracts from the current value, returning the previous value. + /// + /// This operation wraps around on overflow. + /// + /// `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + #[inline] + pub fn fetch_sub(&self, val: T, order: Ordering) -> T { + unsafe { + T::from_val_unchecked(::fetch_sub( + &self.repr, + val.to_val(), + swap_ordering(order, T::allow_relaxed()), + )) + } + } +} + +impl Atomic +where + T::Value: AtomicBitOpsPriv, +{ + /// Bitwise "and" with the current value. + /// + /// Performs a bitwise "and" operation on the current value and the argument `val`, and + /// sets the new value to the result. + /// + /// Returns the previous value. + /// + /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + #[inline] + pub fn fetch_and(&self, val: T, order: Ordering) -> T { + unsafe { + T::from_val_unchecked(::fetch_and( + &self.repr, + val.to_val(), + swap_ordering(order, T::allow_relaxed()), + )) + } + } + + /// Bitwise "nand" with the current value. + /// + /// Performs a bitwise "nand" operation on the current value and the argument `val`, and + /// sets the new value to the result. + /// + /// Returns the previous value. + /// + /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + #[inline] + pub fn fetch_nand(&self, val: T, order: Ordering) -> T { + unsafe { + T::from_val_unchecked(::fetch_nand( + &self.repr, + val.to_val(), + swap_ordering(order, T::allow_relaxed()), + )) + } + } + + /// Bitwise "or" with the current value. + /// + /// Performs a bitwise "or" operation on the current value and the argument `val`, and + /// sets the new value to the result. + /// + /// Returns the previous value. + /// + /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + #[inline] + pub fn fetch_or(&self, val: T, order: Ordering) -> T { + unsafe { + T::from_val_unchecked(::fetch_or( + &self.repr, + val.to_val(), + swap_ordering(order, T::allow_relaxed()), + )) + } + } + + /// Bitwise "xor" with the current value. + /// + /// Performs a bitwise "xor" operation on the current value and the argument `val`, and + /// sets the new value to the result. + /// + /// Returns the previous value. + /// + /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + #[inline] + pub fn fetch_xor(&self, val: T, order: Ordering) -> T { + unsafe { + T::from_val_unchecked(::fetch_xor( + &self.repr, + val.to_val(), + swap_ordering(order, T::allow_relaxed()), + )) + } + } +} + +impl Atomic +where + T::Value: AtomicMinMaxPriv, +{ + /// Maximum with the current value. + /// + /// Finds the maximum of the current value and the argument `val`, and + /// sets the new value to the result. + /// + /// Returns the previous value. + /// + /// `fetch_max` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + #[inline] + pub fn fetch_max(&self, val: T, order: Ordering) -> T { + unsafe { + T::from_val_unchecked(::fetch_max( + &self.repr, + val.to_val(), + swap_ordering(order, T::allow_relaxed()), + )) + } + } + + /// Minimum with the current value. + /// + /// Finds the minimum of the current value and the argument `val`, and + /// sets the new value to the result. + /// + /// Returns the previous value. + /// + /// `fetch_min` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + #[inline] + pub fn fetch_min(&self, val: T, order: Ordering) -> T { + unsafe { + T::from_val_unchecked(::fetch_min( + &self.repr, + val.to_val(), + swap_ordering(order, T::allow_relaxed()), + )) + } + } +} + +impl Drop for Atomic { + #[inline] + fn drop(&mut self) { + if mem::needs_drop::() { + unsafe { + drop(T::from_val_unchecked(ptr::read( + ::get_mut(&mut self.repr), + ))); + } + } + } +} + +#[inline] +fn load_ordering(order: Ordering, allow_relaxed: AllowRelaxed) -> Ordering { + if allow_relaxed.0 { + order + } else { + match order { + Ordering::Acquire | Ordering::Relaxed => Ordering::Acquire, + Ordering::SeqCst => Ordering::SeqCst, + _ => unreachable!("{:?}", order), + } + } +} +#[inline] +fn store_ordering(order: Ordering, allow_relaxed: AllowRelaxed) -> Ordering { + if allow_relaxed.0 { + order + } else { + match order { + Ordering::Release | Ordering::Relaxed => Ordering::Release, + Ordering::SeqCst => Ordering::SeqCst, + _ => unreachable!("{:?}", order), + } + } +} +#[inline] +fn swap_ordering(order: Ordering, allow_relaxed: AllowRelaxed) -> Ordering { + if allow_relaxed.0 { + order + } else { + match order { + Ordering::Relaxed | Ordering::Acquire | Ordering::Release | Ordering::AcqRel => { + Ordering::AcqRel + } + Ordering::SeqCst => Ordering::SeqCst, + _ => unreachable!("{:?}", order), + } + } +} + +mod private { + use core::sync::atomic::Ordering; + + /// # Safety + /// + /// The in-memory representations of `Self` and `Self::Atomic` must be the same. + pub trait AtomicablePrimitivePriv: Copy { + type Atomic: Sized + Send + Sync; + const IS_ALWAYS_LOCK_FREE: bool; + fn default() -> Self; + fn new(v: Self) -> Self::Atomic; + fn is_lock_free() -> bool; + fn get_mut(a: &mut Self::Atomic) -> &mut Self; + fn into_inner(a: Self::Atomic) -> Self; + fn load(a: &Self::Atomic, order: Ordering) -> Self; + fn store(a: &Self::Atomic, val: Self, order: Ordering); + fn swap(a: &Self::Atomic, val: Self, order: Ordering) -> Self; + fn compare_exchange( + a: &Self::Atomic, + current: Self, + new: Self, + success: Ordering, + failure: Ordering, + ) -> Result; + fn compare_exchange_weak( + a: &Self::Atomic, + current: Self, + new: Self, + success: Ordering, + failure: Ordering, + ) -> Result; + } + + pub trait AtomicBitOpsPriv: AtomicablePrimitivePriv { + fn fetch_and(a: &Self::Atomic, val: Self, order: Ordering) -> Self; + fn fetch_nand(a: &Self::Atomic, val: Self, order: Ordering) -> Self; + fn fetch_or(a: &Self::Atomic, val: Self, order: Ordering) -> Self; + fn fetch_xor(a: &Self::Atomic, val: Self, order: Ordering) -> Self; + } + + pub trait AtomicArithmeticPriv: AtomicablePrimitivePriv { + fn fetch_add(a: &Self::Atomic, val: Self, order: Ordering) -> Self; + fn fetch_sub(a: &Self::Atomic, val: Self, order: Ordering) -> Self; + } + + pub trait AtomicMinMaxPriv: AtomicablePrimitivePriv { + fn fetch_max(a: &Self::Atomic, val: Self, order: Ordering) -> Self; + fn fetch_min(a: &Self::Atomic, val: Self, order: Ordering) -> Self; + } +} diff --git a/src/lib.rs b/src/lib.rs index 8f4041a2b..0751ff460 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,10 +1,10 @@ /*! -Portable atomic types including support for 128-bit atomics, atomic float, etc. +Portable atomic types including support for 128-bit atomics, atomic float, generic atomic type, etc. - Provide all atomic integer types (`Atomic{I,U}{8,16,32,64}`) for all targets that can use atomic CAS. (i.e., all targets that can use `std`, and most no-std targets) - Provide `AtomicI128` and `AtomicU128`. - Provide `AtomicF32` and `AtomicF64`. (optional) - +- Provide generic `Atomic` type. (optional) - Provide atomic load/store for targets where atomic is not available at all in the standard library. (riscv without A-extension, msp430, avr) - Provide atomic CAS for targets where atomic CAS is not available in the standard library. (thumbv6m, riscv without A-extension, msp430, avr) (optional, [single-core only](#optional-cfg)) - Provide stable equivalents of the standard library atomic types' unstable APIs, such as [`AtomicPtr::fetch_*`](https://github.com/rust-lang/rust/issues/99108), [`AtomicBool::fetch_not`](https://github.com/rust-lang/rust/issues/98485). @@ -18,6 +18,13 @@ On x86_64, when the `outline-atomics` optional feature is not enabled and `cmpxc See [this list](https://github.com/taiki-e/portable-atomic/issues/10#issuecomment-1159368067) for details. +## Generic Atomic\ type + +- Support for various operations on `Copy` types and support for swap and store on non-`Copy` types. +- Support for primitives, immutable references, function pointers, `NonNull`, `NoneZero`, etc. +- Support for user-defined structs and enums, including those with multiple fields and padding. (via `#[derive(Atomicable)]`) +- Support for user-defined unions. (via `#[derive(Atomicable)]`) + ## Optional features - **`fallback`** *(enabled by default)*
@@ -41,20 +48,24 @@ See [this list](https://github.com/taiki-e/portable-atomic/issues/10#issuecommen Provide `AtomicF{32,64}`. Note that most of `fetch_*` operations of atomic floats are implemented using CAS loops, which can be slower than equivalent operations of atomic integers. - + + Note: + - This implicitly enables the `fallback` feature. + +- **`alloc`**
+ Use `alloc`. - **`std`**
Use `std`. + Note: + - This implicitly enables the `alloc` feature. + - **`serde`**
Implement `serde::{Serialize,Deserialize}` for atomic types. - Note: - - The MSRV when this feature enables depends on the MSRV of [serde]. - ## Optional cfg - **`--cfg portable_atomic_unsafe_assume_single_core`**
@@ -247,6 +258,61 @@ mod tests; #[doc(no_inline)] pub use core::sync::atomic::{compiler_fence, fence, Ordering}; +#[cfg(feature = "generic")] +#[cfg_attr(docsrs, doc(cfg(feature = "generic")))] +#[cfg_attr( + portable_atomic_no_cfg_target_has_atomic, + cfg(any(not(portable_atomic_no_atomic_cas), portable_atomic_unsafe_assume_single_core)) +)] +#[cfg_attr( + not(portable_atomic_no_cfg_target_has_atomic), + cfg(any(target_has_atomic = "ptr", portable_atomic_unsafe_assume_single_core)) +)] +pub mod generic; +#[cfg(feature = "generic")] +#[cfg_attr(docsrs, doc(cfg(feature = "generic")))] +#[cfg_attr( + portable_atomic_no_cfg_target_has_atomic, + cfg(any(not(portable_atomic_no_atomic_cas), portable_atomic_unsafe_assume_single_core)) +)] +#[cfg_attr( + not(portable_atomic_no_cfg_target_has_atomic), + cfg(any(target_has_atomic = "ptr", portable_atomic_unsafe_assume_single_core)) +)] +pub use crate::generic::*; + +#[cfg(feature = "derive")] +#[cfg(feature = "generic")] +#[cfg_attr( + portable_atomic_no_cfg_target_has_atomic, + cfg(any(not(portable_atomic_no_atomic_cas), portable_atomic_unsafe_assume_single_core)) +)] +#[cfg_attr( + not(portable_atomic_no_cfg_target_has_atomic), + cfg(any(target_has_atomic = "ptr", portable_atomic_unsafe_assume_single_core)) +)] +#[cfg_attr(docsrs, doc(cfg(all(feature = "generic", feature = "derive"))))] +pub use portable_atomic_derive::Atomicable; + +// Not public API. +#[doc(hidden)] +#[cfg(feature = "derive")] +#[cfg(feature = "generic")] +#[cfg_attr( + portable_atomic_no_cfg_target_has_atomic, + cfg(any(not(portable_atomic_no_atomic_cas), portable_atomic_unsafe_assume_single_core)) +)] +#[cfg_attr( + not(portable_atomic_no_cfg_target_has_atomic), + cfg(any(target_has_atomic = "ptr", portable_atomic_unsafe_assume_single_core)) +)] +pub mod __private { + pub use core::mem::{size_of, transmute}; + #[doc(hidden)] + #[allow(clippy::missing_inline_in_public_items)] + pub fn is_transmutable() {} +} + mod imp; pub mod hint { diff --git a/src/tests/generic.rs b/src/tests/generic.rs new file mode 100644 index 000000000..1e40936f9 --- /dev/null +++ b/src/tests/generic.rs @@ -0,0 +1,23 @@ +#![allow(dead_code, unused_imports)] + +use crate::generic::*; +use core::sync::atomic::Ordering; + +#[cfg(not(portable_atomic_no_const_fn_trait_bound))] +#[test] +fn const_new() { + static U8X16: Atomic<[u8; 16]> = Atomic::const_new([0; 16]); + let _v = Atomic::const_new(core::ptr::null::<()>()); + let _v = Atomic::const_new(core::ptr::null_mut::<()>()); + let _v = Atomic::const_new(&[0_u8]); + U8X16.store([1; 16], Ordering::Relaxed); + assert_eq!(U8X16.load(Ordering::Relaxed), [1; 16]); +} + +#[test] +fn dw_ptr() { + let x = &[0_u8, 1_u8][..]; + let a = Atomic::new(x); + let y = a.swap(x, Ordering::Relaxed); + assert_eq!(x, y); +} diff --git a/src/tests/mod.rs b/src/tests/mod.rs index 6e366eb23..703506aff 100644 --- a/src/tests/mod.rs +++ b/src/tests/mod.rs @@ -3,6 +3,8 @@ #[macro_use] pub(crate) mod helper; +#[cfg(feature = "generic")] +mod generic; #[cfg(feature = "serde")] mod serde; diff --git a/tests/compiletest.rs b/tests/compiletest.rs new file mode 100644 index 000000000..48ac26535 --- /dev/null +++ b/tests/compiletest.rs @@ -0,0 +1,18 @@ +#![cfg(not(miri))] +#![cfg(not(valgrind))] +#![cfg(feature = "generic")] +#![cfg(feature = "derive")] +#![warn(rust_2018_idioms, single_use_lifetimes)] + +use std::env; + +#[rustversion::attr(not(nightly), ignore)] +#[test] +fn ui() { + if env::var_os("CI").is_none() { + env::set_var("TRYBUILD", "overwrite"); + } + + let t = trybuild::TestCases::new(); + t.compile_fail("tests/ui/**/*.rs"); +} diff --git a/tests/expand/struct-named-multiple.expanded.rs b/tests/expand/struct-named-multiple.expanded.rs new file mode 100644 index 000000000..5365d2232 --- /dev/null +++ b/tests/expand/struct-named-multiple.expanded.rs @@ -0,0 +1,99 @@ +use portable_atomic::Atomicable; +#[atomic(u16)] +struct A { + f0: u8, + f1: u8, +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::clone::Clone for A { + #[inline] + fn clone(&self) -> A { + { + let _: ::core::clone::AssertParamIsClone; + let _: ::core::clone::AssertParamIsClone; + *self + } + } +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::marker::Copy for A {} +#[allow( + non_upper_case_globals, + clippy::match_single_binding, + clippy::never_loop, + clippy::single_match +)] +const _: () = { + extern crate portable_atomic as _portable_atomic; + impl _portable_atomic::Atomicable for A { + type Value = u16; + #[inline] + fn to_val(self) -> Self::Value { + let _ = [(); _portable_atomic::__private::size_of::() + - (_portable_atomic::__private::size_of::< + ::Value, + >() + _portable_atomic::__private::size_of::< + ::Value, + >())]; + let mut __result: u16 = 0; + let mut __result_ptr: *mut u8 = &mut __result as *mut u16 as *mut u8; + let mut __offset: usize = 0; + unsafe { + __result_ptr + .cast::<::Value>() + .write_unaligned(_portable_atomic::Atomicable::to_val(self.f0)); + __offset += _portable_atomic::__private::size_of::< + ::Value, + >(); + __result_ptr + .add(__offset) + .cast::<::Value>() + .write_unaligned(_portable_atomic::Atomicable::to_val(self.f1)); + __offset += _portable_atomic::__private::size_of::< + ::Value, + >(); + } + __result + } + fn from_val(_: Self::Value) -> Self { + { + ::std::rt::begin_panic( + "there is no way to call Atomicable::from_val for `A` safely", + ) + }; + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + let _ = [(); _portable_atomic::__private::size_of::() + - (_portable_atomic::__private::size_of::< + ::Value, + >() + _portable_atomic::__private::size_of::< + ::Value, + >())]; + let mut __val_ptr: *const u8 = &val as *const u16 as *const u8; + let mut __offset = 0; + let __field0 = __val_ptr + .cast::<::Value>() + .read_unaligned(); + let __field0 = ::from_val_unchecked(__field0); + __offset += _portable_atomic::__private::size_of::< + ::Value, + >(); + let __field1 = __val_ptr + .add(__offset) + .cast::<::Value>() + .read_unaligned(); + let __field1 = ::from_val_unchecked(__field1); + __offset += _portable_atomic::__private::size_of::< + ::Value, + >(); + Self { + f0: __field0, + f1: __field1, + } + } + } +}; +fn main() {} diff --git a/tests/expand/struct-named-multiple.rs b/tests/expand/struct-named-multiple.rs new file mode 100644 index 000000000..2aafe7a7f --- /dev/null +++ b/tests/expand/struct-named-multiple.rs @@ -0,0 +1,10 @@ +use portable_atomic::Atomicable; + +#[derive(Clone, Copy, Atomicable)] +#[atomic(u16)] +struct A { + f0: u8, + f1: u8, +} + +fn main() {} diff --git a/tests/expand/struct-named-single.expanded.rs b/tests/expand/struct-named-single.expanded.rs new file mode 100644 index 000000000..78e5897a8 --- /dev/null +++ b/tests/expand/struct-named-single.expanded.rs @@ -0,0 +1,48 @@ +use portable_atomic::Atomicable; +struct A { + f0: u8, +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::clone::Clone for A { + #[inline] + fn clone(&self) -> A { + { + let _: ::core::clone::AssertParamIsClone; + *self + } + } +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::marker::Copy for A {} +#[allow( + non_upper_case_globals, + clippy::match_single_binding, + clippy::never_loop, + clippy::single_match +)] +const _: () = { + extern crate portable_atomic as _portable_atomic; + impl _portable_atomic::Atomicable for A { + type Value = ::Value; + #[inline] + fn to_val(self) -> Self::Value { + ::to_val(self.f0) + } + fn from_val(_: Self::Value) -> Self { + { + ::std::rt::begin_panic( + "there is no way to call Atomicable::from_val for `A` safely", + ) + }; + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + Self { + f0: ::from_val_unchecked(val), + } + } + } +}; +fn main() {} diff --git a/tests/expand/struct-named-single.rs b/tests/expand/struct-named-single.rs new file mode 100644 index 000000000..4981fa8a9 --- /dev/null +++ b/tests/expand/struct-named-single.rs @@ -0,0 +1,8 @@ +use portable_atomic::Atomicable; + +#[derive(Clone, Copy, Atomicable)] +struct A { + f0: u8, +} + +fn main() {} diff --git a/tests/expand/union-multiple.expanded.rs b/tests/expand/union-multiple.expanded.rs new file mode 100644 index 000000000..b44d26029 --- /dev/null +++ b/tests/expand/union-multiple.expanded.rs @@ -0,0 +1,56 @@ +use portable_atomic::Atomicable; +#[repr(C)] +union A { + f0: u8, + f1: u8, +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::clone::Clone for A { + #[inline] + fn clone(&self) -> A { + { + let _: ::core::clone::AssertParamIsCopy; + *self + } + } +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::marker::Copy for A {} +#[allow( + non_upper_case_globals, + clippy::match_single_binding, + clippy::never_loop, + clippy::single_match +)] +const _: () = { + extern crate portable_atomic as _portable_atomic; + impl _portable_atomic::Atomicable for A { + type Value = ::Value; + #[inline] + fn to_val(self) -> Self::Value { + _portable_atomic::__private::is_transmutable::(); + _portable_atomic::__private::is_transmutable::(); + let [] = [(); _portable_atomic::__private::size_of::() + - _portable_atomic::__private::size_of::()]; + unsafe { _portable_atomic::__private::transmute(self) } + } + fn from_val(_: Self::Value) -> Self { + { + ::std::rt::begin_panic( + "there is no way to call Atomicable::from_val for `A` safely", + ) + }; + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + _portable_atomic::__private::is_transmutable::(); + _portable_atomic::__private::is_transmutable::(); + let [] = [(); _portable_atomic::__private::size_of::() + - _portable_atomic::__private::size_of::()]; + unsafe { _portable_atomic::__private::transmute(val) } + } + } +}; +fn main() {} diff --git a/tests/expand/union-multiple.rs b/tests/expand/union-multiple.rs new file mode 100644 index 000000000..b17040b0f --- /dev/null +++ b/tests/expand/union-multiple.rs @@ -0,0 +1,10 @@ +use portable_atomic::Atomicable; + +#[derive(Clone, Copy, Atomicable)] +#[repr(C)] +union A { + f0: u8, + f1: u8, +} + +fn main() {} diff --git a/tests/expand/union-single.expanded.rs b/tests/expand/union-single.expanded.rs new file mode 100644 index 000000000..f40ce572d --- /dev/null +++ b/tests/expand/union-single.expanded.rs @@ -0,0 +1,48 @@ +use portable_atomic::Atomicable; +union A { + f0: u8, +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::clone::Clone for A { + #[inline] + fn clone(&self) -> A { + { + let _: ::core::clone::AssertParamIsCopy; + *self + } + } +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::marker::Copy for A {} +#[allow( + non_upper_case_globals, + clippy::match_single_binding, + clippy::never_loop, + clippy::single_match +)] +const _: () = { + extern crate portable_atomic as _portable_atomic; + impl _portable_atomic::Atomicable for A { + type Value = ::Value; + #[inline] + fn to_val(self) -> Self::Value { + ::to_val(unsafe { self.f0 }) + } + fn from_val(_: Self::Value) -> Self { + { + ::std::rt::begin_panic( + "there is no way to call Atomicable::from_val for `A` safely", + ) + }; + } + #[inline] + unsafe fn from_val_unchecked(val: Self::Value) -> Self { + Self { + f0: ::from_val_unchecked(val), + } + } + } +}; +fn main() {} diff --git a/tests/expand/union-single.rs b/tests/expand/union-single.rs new file mode 100644 index 000000000..14514d9ea --- /dev/null +++ b/tests/expand/union-single.rs @@ -0,0 +1,8 @@ +use portable_atomic::Atomicable; + +#[derive(Clone, Copy, Atomicable)] +union A { + f0: u8, +} + +fn main() {} diff --git a/tests/expandtest.rs b/tests/expandtest.rs new file mode 100644 index 000000000..1124eac3d --- /dev/null +++ b/tests/expandtest.rs @@ -0,0 +1,43 @@ +#![cfg(not(miri))] +#![cfg(not(valgrind))] +#![cfg(feature = "generic")] +#![cfg(feature = "derive")] +#![warn(rust_2018_idioms, single_use_lifetimes)] + +use std::{ + env, + process::{Command, ExitStatus, Stdio}, +}; + +const PATH: &str = "tests/expand/**/*.rs"; + +#[rustversion::attr(not(nightly), ignore)] +#[test] +fn expandtest() { + let is_ci = env::var_os("CI").is_some(); + let cargo = &*env::var("CARGO").unwrap_or_else(|_| "cargo".into()); + if !has_command(&[cargo, "expand"]) || !has_command(&[cargo, "fmt"]) { + return; + } + + let args = &["--all-features"]; + if is_ci { + macrotest::expand_without_refresh_args(PATH, args); + } else { + env::set_var("MACROTEST", "overwrite"); + macrotest::expand_args(PATH, args); + } +} + +fn has_command(command: &[&str]) -> bool { + Command::new(command[0]) + .args(&command[1..]) + .arg("--version") + .stdin(Stdio::null()) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .status() + .as_ref() + .map(ExitStatus::success) + .unwrap_or(false) +} diff --git a/tests/test.rs b/tests/test.rs new file mode 100644 index 000000000..1c5858ebd --- /dev/null +++ b/tests/test.rs @@ -0,0 +1,41 @@ +#![warn(rust_2018_idioms, single_use_lifetimes, unsafe_op_in_unsafe_fn)] + +#[cfg(feature = "derive")] +mod derive { + use portable_atomic::*; + + #[derive(Debug, Clone, Copy, PartialEq, Eq, Atomicable)] + struct Transparent1 { + f: u64, + } + #[derive(Debug, Clone, Copy, PartialEq, Eq, Atomicable)] + struct Transparent2(u64); + #[derive(Debug, Clone, Copy, PartialEq, Eq, Atomicable)] + #[atomic(u64)] + struct Transparent3(Transparent1); + #[derive(Debug, Clone, Copy, PartialEq, Eq, Atomicable)] + #[atomic(u64)] + struct Transparent4 { + f: Transparent3, + } + + #[derive(Debug, Clone, Copy, PartialEq, Eq, Atomicable)] + #[atomic(u64)] + struct Composite1 { + f1: [u16; 2], + f2: u16, + } + #[derive(Debug, Clone, Copy, PartialEq, Eq, Atomicable)] + #[atomic(u128)] + struct Composite2(Composite1, u16); + + #[test] + fn test() { + let a = Atomic::new(Transparent1 { f: 0 }); + let _b = Atomic::new(Transparent2(0)); + let c = Atomic::new(Transparent3(a.load(Ordering::Acquire))); + let _d = Atomic::new(Transparent4 { f: c.load(Ordering::Acquire) }); + let e = Atomic::new(Composite1 { f1: [1, 3], f2: 2 }); + assert_eq!(e.load(Ordering::Acquire), Composite1 { f1: [1, 3], f2: 2 }); + } +} diff --git a/tests/ui/enum-invalid.rs b/tests/ui/enum-invalid.rs new file mode 100644 index 000000000..9c5223bfc --- /dev/null +++ b/tests/ui/enum-invalid.rs @@ -0,0 +1,8 @@ +pub mod enum_no_variant { + use portable_atomic::Atomicable; + + #[derive(Clone, Copy, Atomicable)] + enum A {} +} + +fn main() {} diff --git a/tests/ui/enum-invalid.stderr b/tests/ui/enum-invalid.stderr new file mode 100644 index 000000000..a7bf56acb --- /dev/null +++ b/tests/ui/enum-invalid.stderr @@ -0,0 +1,5 @@ +error: #[derive(Atomicable)] may not be used on enum with no variant + --> tests/ui/enum-invalid.rs:5:10 + | +5 | enum A {} + | ^ diff --git a/tests/ui/struct-invalid.rs b/tests/ui/struct-invalid.rs new file mode 100644 index 000000000..f155778d9 --- /dev/null +++ b/tests/ui/struct-invalid.rs @@ -0,0 +1,18 @@ +pub mod struct_no_field { + use portable_atomic::Atomicable; + + #[derive(Clone, Copy, Atomicable)] + struct A {} +} + +pub mod struct_no_repr { + use portable_atomic::Atomicable; + + #[derive(Clone, Copy, Atomicable)] + struct A { + f0: u8, + f1: u8, + } +} + +fn main() {} diff --git a/tests/ui/struct-invalid.stderr b/tests/ui/struct-invalid.stderr new file mode 100644 index 000000000..0c1f83631 --- /dev/null +++ b/tests/ui/struct-invalid.stderr @@ -0,0 +1,11 @@ +error: #[derive(Atomicable)] may not be used on struct with no field + --> tests/ui/struct-invalid.rs:5:12 + | +5 | struct A {} + | ^ + +error: repr must be must specified + --> tests/ui/struct-invalid.rs:12:12 + | +12 | struct A { + | ^ diff --git a/tests/ui/struct-large.rs b/tests/ui/struct-large.rs new file mode 100644 index 000000000..40032ab7f --- /dev/null +++ b/tests/ui/struct-large.rs @@ -0,0 +1,10 @@ +use portable_atomic::Atomicable; + +#[derive(Clone, Copy, Atomicable)] +#[atomic(u8)] +struct A { + f0: u8, + f1: u8, +} + +fn main() {} diff --git a/tests/ui/struct-large.stderr b/tests/ui/struct-large.stderr new file mode 100644 index 000000000..8d2e3b7f6 --- /dev/null +++ b/tests/ui/struct-large.stderr @@ -0,0 +1,5 @@ +error[E0080]: evaluation of constant value failed + --> tests/ui/struct-large.rs:4:10 + | +4 | #[atomic(u8)] + | ^^ attempt to compute `1_usize - 2_usize`, which would overflow diff --git a/tests/ui/union-different-size1.rs b/tests/ui/union-different-size1.rs new file mode 100644 index 000000000..b140f656a --- /dev/null +++ b/tests/ui/union-different-size1.rs @@ -0,0 +1,10 @@ +use portable_atomic::Atomicable; + +#[derive(Clone, Copy, Atomicable)] +#[repr(C)] +union A { + f0: u8, + f1: u16, +} + +fn main() {} diff --git a/tests/ui/union-different-size1.stderr b/tests/ui/union-different-size1.stderr new file mode 100644 index 000000000..0aaad4946 --- /dev/null +++ b/tests/ui/union-different-size1.stderr @@ -0,0 +1,7 @@ +error[E0080]: evaluation of constant value failed + --> tests/ui/union-different-size1.rs:3:23 + | +3 | #[derive(Clone, Copy, Atomicable)] + | ^^^^^^^^^^ attempt to compute `1_usize - 2_usize`, which would overflow + | + = note: this error originates in the derive macro `Atomicable` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/tests/ui/union-different-size2.rs b/tests/ui/union-different-size2.rs new file mode 100644 index 000000000..71d842e59 --- /dev/null +++ b/tests/ui/union-different-size2.rs @@ -0,0 +1,10 @@ +use portable_atomic::Atomicable; + +#[derive(Clone, Copy, Atomicable)] +#[repr(C)] +union A { + f0: u16, + f1: u8, +} + +fn main() {} diff --git a/tests/ui/union-different-size2.stderr b/tests/ui/union-different-size2.stderr new file mode 100644 index 000000000..bbb9cc946 --- /dev/null +++ b/tests/ui/union-different-size2.stderr @@ -0,0 +1,7 @@ +error[E0527]: pattern requires 0 elements but array has 1 + --> tests/ui/union-different-size2.rs:3:23 + | +3 | #[derive(Clone, Copy, Atomicable)] + | ^^^^^^^^^^ expected 1 element + | + = note: this error originates in the derive macro `Atomicable` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/tests/ui/union-invalid.rs b/tests/ui/union-invalid.rs new file mode 100644 index 000000000..c0f9f6a10 --- /dev/null +++ b/tests/ui/union-invalid.rs @@ -0,0 +1,11 @@ +pub mod multi_fields_no_repr_c { + use portable_atomic::Atomicable; + + #[derive(Clone, Copy, Atomicable)] + union A { + f0: u8, + f1: u8, + } +} + +fn main() {} diff --git a/tests/ui/union-invalid.stderr b/tests/ui/union-invalid.stderr new file mode 100644 index 000000000..55e73593c --- /dev/null +++ b/tests/ui/union-invalid.stderr @@ -0,0 +1,5 @@ +error: portable-atomic: union with multiple fields must be #[repr(C)] + --> tests/ui/union-invalid.rs:5:11 + | +5 | union A { + | ^ diff --git a/tests/ui/union-no-field.rs b/tests/ui/union-no-field.rs new file mode 100644 index 000000000..f8e101f6e --- /dev/null +++ b/tests/ui/union-no-field.rs @@ -0,0 +1,8 @@ +// This is rustc error. + +use portable_atomic::Atomicable; + +#[derive(Clone, Copy, Atomicable)] +union A {} + +fn main() {} diff --git a/tests/ui/union-no-field.stderr b/tests/ui/union-no-field.stderr new file mode 100644 index 000000000..09705b699 --- /dev/null +++ b/tests/ui/union-no-field.stderr @@ -0,0 +1,11 @@ +error: #[derive(Atomicable)] may not be used on union with no field + --> tests/ui/union-no-field.rs:6:7 + | +6 | union A {} + | ^ + +error: unions cannot have zero fields + --> tests/ui/union-no-field.rs:6:1 + | +6 | union A {} + | ^^^^^^^^^^ diff --git a/tests/ui/union-not-transmutable.rs b/tests/ui/union-not-transmutable.rs new file mode 100644 index 000000000..e55250c97 --- /dev/null +++ b/tests/ui/union-not-transmutable.rs @@ -0,0 +1,15 @@ +use portable_atomic::Atomicable; + +#[derive(Clone, Copy, Atomicable)] +#[repr(C)] +union A { + f0: u8, + f1: B, +} + +#[derive(Clone, Copy, Atomicable)] +struct B { + f0: u8, +} + +fn main() {} diff --git a/tests/ui/union-not-transmutable.stderr b/tests/ui/union-not-transmutable.stderr new file mode 100644 index 000000000..f20ea1f6c --- /dev/null +++ b/tests/ui/union-not-transmutable.stderr @@ -0,0 +1,21 @@ +error[E0277]: the trait bound `B: Transmutable` is not satisfied + --> tests/ui/union-not-transmutable.rs:7:9 + | +7 | f1: B, + | ^ the trait `Transmutable` is not implemented for `B` + | + = help: the following other types implement trait `Transmutable`: + &T + *const T + *mut T + NonNull + NonZeroI128 + NonZeroI16 + NonZeroI32 + NonZeroI64 + and 263 others +note: required by a bound in `is_transmutable` + --> src/lib.rs + | + | pub fn is_transmutable() {} + | ^^^^^^^^^^^^^^^^^^^ required by this bound in `is_transmutable` diff --git a/tools/build.sh b/tools/build.sh index f271742a2..b70174c1a 100755 --- a/tools/build.sh +++ b/tools/build.sh @@ -162,8 +162,8 @@ build() { # x cargo "${args[@]}" --manifest-path tests/no-std/Cargo.toml "$@" args+=( - --workspace --ignore-private - --no-dev-deps --feature-powerset --depth 3 --optional-deps + --workspace --ignore-private --exclude portable-atomic-derive + --no-dev-deps --feature-powerset --depth 3 --optional-deps --exclude-features "portable-atomic-derive" ) case "${target}" in x86_64* | aarch64* | arm64*) ;;