diff --git a/.github/workflows/rust.yml b/.github/workflows/ci.yml similarity index 73% rename from .github/workflows/rust.yml rename to .github/workflows/ci.yml index d54fa2f..141ddbf 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/ci.yml @@ -1,4 +1,4 @@ -name: Rust +name: CI on: push: @@ -20,3 +20,6 @@ jobs: run: cargo build --verbose - name: Run tests run: cargo test --verbose + - name: cargo-semver-checks + uses: obi1kenobi/cargo-semver-checks-action@v2.1 + diff --git a/README.md b/README.md index 63b24ce..6ed45b4 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# MultiIndexMap [![Tests](https://github.com/lun3x/multi_index_map/actions/workflows/rust.yml/badge.svg?branch=master)](https://github.com/lun3x/multi_index_map/actions/workflows/rust.yml) +# MultiIndexMap [![Tests](https://github.com/lun3x/multi_index_map/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/lun3x/multi_index_map/actions/workflows/ci.yml) [Also available on crates.io.](https://crates.io/crates/multi_index_map) @@ -32,14 +32,15 @@ Current implementation supports: This crate provides a derive macro `MultiIndexMap`, which when applied to the struct representing an element will generate a map to store and access these elements. Annotations are used to specify which fields to index. Currently `hashed_unique`, `hashed_non_unique`, `ordered_unique`, and `ordered_non_unique` are supported. -The element must implement `Clone`. +The types of all indexed fields must implement `Clone`. +If the MultiIndexMap needs to be cloned, `Clone` must be implemented manually, see `examples/main.rs` for an example of how to do this. ## Example ```rust use multi_index_map::MultiIndexMap; -#[derive(MultiIndexMap, Clone, Debug)] +#[derive(MultiIndexMap, Debug)] struct Order { #[multi_index(hashed_unique)] order_id: u32, @@ -47,6 +48,8 @@ struct Order { timestamp: u64, #[multi_index(hashed_non_unique)] trader_name: String, + filled: bool, + volume: u64, } fn main() { @@ -54,12 +57,16 @@ fn main() { order_id: 1, timestamp: 1656145181, trader_name: "JohnDoe".into(), + filled: false, + volume: 100, }; let order2 = Order { order_id: 2, timestamp: 1656145182, trader_name: "JohnDoe".into(), + filled: false, + volume: 100, }; let mut map = MultiIndexOrderMap::default(); @@ -80,10 +87,20 @@ fn main() { o.order_id = 42; }) .unwrap(); + assert_eq!(order2_ref.timestamp, 1656145183); assert_eq!(order2_ref.order_id, 42); assert_eq!(order2_ref.trader_name, "JohnDoe".to_string()); + let order2_ref = map + .update_by_order_id(&42, |filled: &mut bool, volume: &mut u64| { + *filled = true; + *volume = 0; + }) + .unwrap(); + assert_eq!(order2_ref.filled, true); + assert_eq!(order2_ref.volume, 0); + let orders = map.get_by_trader_name(&"JohnDoe".to_string()); assert_eq!(orders.len(), 2); println!("Found 2 orders for JohnDoe: [{orders:?}]"); @@ -109,10 +126,12 @@ For `hashed_unique` and `hashed_non_unique` a `FxHashMap` is used, for `ordered_ * When retrieving elements for a given key, we lookup the key in the lookup table, then retrieve the item at that index in the backing store. * When removing an element for a given key, we do the same, but we then must also remove keys from all the other lookup tables before returning the element. * When iterating over an index, we use the default iterators for the lookup table, then simply retrieve the element at the given index in the backing store. -* When modifying an element, we lookup the element through the given key, then apply the closure to modify the element in-place. We then return a reference to the modified element. -We must then update all the lookup tables to account for any changes to indexed fields. -If we only want to modify an unindexed field then it is much faster to just mutate that field directly. -This is why the unsafe methods are provided. These can be used to modify unindexed fields quickly, but must not be used to modify indexed fields. +* When updating un-indexed fields, we lookup the element(s) through the given key, then apply the closure to modify just the unindexed fields in-place. +We then return a reference to the modified element(s). +If the key doesn't match, the closure won't be applied, and Option::None will be returned. +* When modifying indexed fields of an element, we do the same process, but the closure takes a mutable reference to the whole element. +Any fields, indexed and un-indexed can be modified. +We must then update all the lookup tables to account for any changes to indexed fields, so this is slower than an un-indexed update. ```rust @@ -142,21 +161,21 @@ impl MultiIndexOrderMap { fn is_empty(&self) -> bool; fn clear(&mut self); - fn get_by_order_id(&self) -> Option<&Order>; - fn get_by_timestamp(&self) -> Option<&Order>; - fn get_by_trader_name(&self) -> Vec<&Order>; - - unsafe fn get_mut_by_order_id(&mut self) -> Option<&mut Order>; - unsafe fn get_mut_by_timestamp(&mut self) -> Option<&mut Order>; - unsafe fn get_mut_by_trader_name(&mut self) -> Vec<&mut Order>; + fn get_by_order_id(&self, key: &u32) -> Option<&Order>; + fn get_by_timestamp(&self, key: &u64) -> Option<&Order>; + fn get_by_trader_name(&self, key: &String) -> Vec<&Order>; + + fn update_by_order_id(&mut self, key: &u32, f: impl FnOnce(&mut bool, &mut u64)) -> Option<&Order>; + fn update_by_timestamp(&mut self, key: &u64, f: impl FnOnce(&mut bool, &mut u64)) -> Option<&Order>; + fn update_by_trader_name(&mut self, key: &String, f: impl FnMut(&mut bool, &mut u64)) -> Vec<&Order>; - fn modify_by_order_id(&mut self, f: impl FnOnce(&mut Order)) -> Option<&Order>; - fn modify_by_timestamp(&mut self, f: impl FnOnce(&mut Order)) -> Option<&Order>; - fn modify_by_trader_name(&mut self, f: impl Fn(&mut Order)) -> Vec<&Order>; + fn modify_by_order_id(&mut self, key: &u32, f: impl FnOnce(&mut Order)) -> Option<&Order>; + fn modify_by_timestamp(&mut self, key: &u64, f: impl FnOnce(&mut Order)) -> Option<&Order>; + fn modify_by_trader_name(&mut self, key: &String, f: impl FnMut(&mut Order)) -> Vec<&Order>; - fn remove_by_order_id(&mut self) -> Option; - fn remove_by_timestamp(&mut self) -> Option; - fn remove_by_trader_name(&mut self) -> Vec; + fn remove_by_order_id(&mut self, key: &u32) -> Option; + fn remove_by_timestamp(&mut self, key: &u64) -> Option; + fn remove_by_trader_name(&mut self, key: &String) -> Vec; fn iter(&self) -> slab::Iter; unsafe fn iter_mut(&mut self) -> slab::IterMut; diff --git a/RELEASES.md b/RELEASES.md index 32844fc..978b667 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,3 +1,25 @@ +Version 0.8.1 (2023-08-30) +========================== + +- Allow FnMut closures in `modify_by_` methods. + +Version 0.8.0 (2023-08-30) +========================== + +- Remove `Clone` requirement on elements, now only the indexed fields must implement Clone. This should be helpful when storing non-Clonable types in un-indexed fields. +- If the MultiIndexMap does need to be Cloned, this must be implemented manually, however this should be fairly simple to do next to where the element is defined. See `examples/main.rs`. + +Version 0.7.1 (2023-08-30) +========================== + +- Refactor and cleanup lots of code, also further reduce work done at compile time, by only generating identifiers for each field once. +- Implement work necessary to remove Clone requirement, however this will be fully removed in the next release. + +Version 0.7.0 (2023-08-29) +========================== + +- Add `update_by_` methods and deprecate `get_mut_by_` methods. The new methods are equivalently useful, but safe and equally performant. + Version 0.6.2 (2023-08-15) ========================== diff --git a/multi_index_map/Cargo.toml b/multi_index_map/Cargo.toml index c403399..bdfb076 100644 --- a/multi_index_map/Cargo.toml +++ b/multi_index_map/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "multi_index_map" -version = "0.6.2" +version = "0.8.1" edition = "2021" authors = ["Louis Wyborn "] rust-version = "1.62" @@ -14,7 +14,7 @@ readme = "README.md" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -multi_index_map_derive = { version = "0.6.2", path = "../multi_index_map_derive" } +multi_index_map_derive = { version = "0.8.1", path = "../multi_index_map_derive" } # Used as the backing store of all the elements. slab = { version = "0.4" } @@ -31,4 +31,4 @@ name = "performance" harness = false [features] -experimental = ["multi_index_map_derive/experimental"] +trivial_bounds = ["multi_index_map_derive/trivial_bounds"] diff --git a/multi_index_map/benches/performance.rs b/multi_index_map/benches/performance.rs index 7a10d89..8dc15b9 100644 --- a/multi_index_map/benches/performance.rs +++ b/multi_index_map/benches/performance.rs @@ -16,6 +16,18 @@ pub struct TestElementWithOnlyIndexedFields { field_ordered_non_unique: u32, } +impl Clone for MultiIndexTestElementWithOnlyIndexedFieldsMap { + fn clone(&self) -> Self { + Self { + _store: self._store.clone(), + _field_hashed_unique_index: self._field_hashed_unique_index.clone(), + _field_hashed_non_unique_index: self._field_hashed_non_unique_index.clone(), + _field_ordered_unique_index: self._field_ordered_unique_index.clone(), + _field_ordered_non_unique_index: self._field_ordered_non_unique_index.clone(), + } + } +} + const BENCH_SIZES: &[u32] = &[100u32, 1000u32, 10000u32, 100000u32]; fn insert_benchmark(c: &mut Criterion) { diff --git a/multi_index_map/examples/main.rs b/multi_index_map/examples/main.rs index 721509e..84a38af 100644 --- a/multi_index_map/examples/main.rs +++ b/multi_index_map/examples/main.rs @@ -13,6 +13,18 @@ mod inner { pub(crate) trader_name: String, pub(crate) note: String, } + + // Manually implement Clone, this can be auto generated correctly by rust-analyzer + impl Clone for MultiIndexOrderMap { + fn clone(&self) -> Self { + Self { + _store: self._store.clone(), + _order_id_index: self._order_id_index.clone(), + _timestamp_index: self._timestamp_index.clone(), + _trader_name_index: self._trader_name_index.clone(), + } + } + } } fn main() { @@ -75,12 +87,12 @@ fn main() { o1_ref.trader_name, o1_ref ); - let o1_mut_ref = unsafe { map.get_mut_by_order_id(&7).unwrap() }; - o1_mut_ref.note = "TestNote".to_string(); - println!( - "Changed note of order {o1_mut_ref:?}, to {:?}", - o1_mut_ref.note, - ); + let o1_ref = map + .update_by_order_id(&7, |note| { + *note = "TestNote".to_string(); + }) + .unwrap(); + println!("Updated note of order {o1_ref:?}, to {:?}", o1_ref.note,); let toms_orders = map.remove_by_trader_name(&"Tom".to_string()); assert_eq!(toms_orders.len(), 2); diff --git a/multi_index_map/tests/capacity_manipulations.rs b/multi_index_map/tests/capacity_manipulations.rs index 5154379..19dc776 100644 --- a/multi_index_map/tests/capacity_manipulations.rs +++ b/multi_index_map/tests/capacity_manipulations.rs @@ -1,6 +1,6 @@ use multi_index_map::MultiIndexMap; -#[derive(MultiIndexMap, Clone, Debug)] +#[derive(MultiIndexMap, Debug)] struct TestElement { #[multi_index(hashed_unique)] field1: i32, diff --git a/multi_index_map/tests/debug.rs b/multi_index_map/tests/debug.rs new file mode 100644 index 0000000..2aeaee2 --- /dev/null +++ b/multi_index_map/tests/debug.rs @@ -0,0 +1,33 @@ +#![cfg_attr(feature = "trivial_bounds", feature(trivial_bounds))] +#![cfg(feature = "trivial_bounds")] +use multi_index_map::MultiIndexMap; + +#[derive(Hash, PartialEq, Eq, Clone, Debug)] +struct TestNonPrimitiveType(u64); + +#[derive(MultiIndexMap, Clone, Debug)] +struct TestElement { + #[multi_index(hashed_unique)] + field1: TestNonPrimitiveType, + field2: String, +} + +#[test] +fn should_compile() { + let mut map = MultiIndexTestElementMap::default(); + + // check that formatting produces non empty strings + assert!(!format!("{:?}", map._field1_index).is_empty()); + assert!(!format!("{:?}", map._store).is_empty()); + assert!(!format!("{:?}", map).is_empty()); + + let elem1 = TestElement { + field1: TestNonPrimitiveType(42), + field2: "ElementOne".to_string(), + }; + map.insert(elem1); + + let msg = format!("{:?}", map); + // check if stored field 1 is present in debug output + assert!(msg.contains("42")); +} diff --git a/multi_index_map/tests/get_and_modify_mut.rs b/multi_index_map/tests/get_and_modify_mut.rs index 67352de..7101c17 100644 --- a/multi_index_map/tests/get_and_modify_mut.rs +++ b/multi_index_map/tests/get_and_modify_mut.rs @@ -1,9 +1,11 @@ +#![cfg_attr(feature = "trivial_bounds", feature(trivial_bounds))] + use multi_index_map::MultiIndexMap; #[derive(Hash, PartialEq, Eq, Clone)] struct TestNonPrimitiveType(u64); -#[derive(MultiIndexMap, Clone)] +#[derive(MultiIndexMap)] struct TestElement { #[multi_index(hashed_non_unique)] field1: usize, diff --git a/multi_index_map/tests/hashed_non_unique.rs b/multi_index_map/tests/hashed_non_unique.rs index eea2f16..86f5d66 100644 --- a/multi_index_map/tests/hashed_non_unique.rs +++ b/multi_index_map/tests/hashed_non_unique.rs @@ -3,7 +3,7 @@ use multi_index_map::MultiIndexMap; #[derive(Hash, PartialEq, Eq, Clone, Debug)] struct TestNonPrimitiveType(u64); -#[derive(MultiIndexMap, Clone, Debug)] +#[derive(MultiIndexMap, Debug)] struct TestElement { #[multi_index(hashed_non_unique)] field1: TestNonPrimitiveType, diff --git a/multi_index_map/tests/hashed_unique.rs b/multi_index_map/tests/hashed_unique.rs index 32ee84c..244bdca 100644 --- a/multi_index_map/tests/hashed_unique.rs +++ b/multi_index_map/tests/hashed_unique.rs @@ -1,9 +1,11 @@ +#![cfg_attr(feature = "trivial_bounds", feature(trivial_bounds))] + use multi_index_map::MultiIndexMap; #[derive(Hash, PartialEq, Eq, Clone)] struct TestNonPrimitiveType(u64); -#[derive(MultiIndexMap, Clone)] +#[derive(MultiIndexMap)] struct TestElement { #[multi_index(hashed_unique)] field1: TestNonPrimitiveType, diff --git a/multi_index_map/tests/iter_after_modify.rs b/multi_index_map/tests/iter_after_modify.rs index becf513..a331768 100644 --- a/multi_index_map/tests/iter_after_modify.rs +++ b/multi_index_map/tests/iter_after_modify.rs @@ -1,6 +1,6 @@ use multi_index_map::MultiIndexMap; -#[derive(MultiIndexMap, Debug, Clone)] +#[derive(MultiIndexMap, Debug)] pub(crate) struct Order { #[multi_index(hashed_unique)] pub(crate) order_id: u32, @@ -51,10 +51,15 @@ fn iter_after_modify() { assert_eq!(it.next().unwrap().order_id, 1); } + let mut s = "test".to_string(); + map.modify_by_order_id(&1, |o| { - o.timestamp = 0; + s = "p".to_string(); + o.timestamp = 4; }); + assert_eq!(s, "p"); + { let mut it = map.iter_by_timestamp(); assert_eq!(it.next().unwrap().order_id, 1); diff --git a/multi_index_map/tests/mixed_non_unique.rs b/multi_index_map/tests/mixed_non_unique.rs index de911bb..ad381f4 100644 --- a/multi_index_map/tests/mixed_non_unique.rs +++ b/multi_index_map/tests/mixed_non_unique.rs @@ -1,6 +1,8 @@ +#![cfg_attr(feature = "trivial_bounds", feature(trivial_bounds))] + use multi_index_map::MultiIndexMap; -#[derive(MultiIndexMap, Clone)] +#[derive(MultiIndexMap)] struct MultipleOrderedNonUniqueStruct { #[multi_index(ordered_non_unique)] field1: u32, diff --git a/multi_index_map/tests/ordered_non_unique.rs b/multi_index_map/tests/ordered_non_unique.rs index c3564d0..51bac20 100644 --- a/multi_index_map/tests/ordered_non_unique.rs +++ b/multi_index_map/tests/ordered_non_unique.rs @@ -3,7 +3,7 @@ use multi_index_map::MultiIndexMap; #[derive(Hash, PartialEq, Eq, Clone, Debug, PartialOrd, Ord)] struct TestNonPrimitiveType(u64); -#[derive(MultiIndexMap, Clone, Debug)] +#[derive(MultiIndexMap, Debug)] struct TestElement { #[multi_index(ordered_non_unique)] field1: TestNonPrimitiveType, diff --git a/multi_index_map/tests/ordered_unique.rs b/multi_index_map/tests/ordered_unique.rs index b2e7e71..c243a0c 100644 --- a/multi_index_map/tests/ordered_unique.rs +++ b/multi_index_map/tests/ordered_unique.rs @@ -1,9 +1,11 @@ +#![cfg_attr(feature = "trivial_bounds", feature(trivial_bounds))] + use multi_index_map::MultiIndexMap; #[derive(Hash, PartialEq, Eq, Clone, PartialOrd, Ord)] struct TestNonPrimitiveType(u64); -#[derive(MultiIndexMap, Clone)] +#[derive(MultiIndexMap)] struct TestElement { #[multi_index(ordered_unique)] field1: TestNonPrimitiveType, diff --git a/multi_index_map/tests/reverse_iter.rs b/multi_index_map/tests/reverse_iter.rs index dc249ba..2440379 100644 --- a/multi_index_map/tests/reverse_iter.rs +++ b/multi_index_map/tests/reverse_iter.rs @@ -1,5 +1,5 @@ use multi_index_map::MultiIndexMap; -#[derive(MultiIndexMap, Clone, PartialEq, Debug)] +#[derive(MultiIndexMap, PartialEq, Debug)] struct TestElement { #[multi_index(ordered_non_unique)] field1: usize, diff --git a/multi_index_map/tests/update.rs b/multi_index_map/tests/update.rs new file mode 100644 index 0000000..1f82ea3 --- /dev/null +++ b/multi_index_map/tests/update.rs @@ -0,0 +1,100 @@ +use multi_index_map::MultiIndexMap; + +#[derive(Hash, PartialEq, Eq, Clone)] +struct TestNonPrimitiveType(u64); + +#[derive(MultiIndexMap, PartialEq, Debug)] +struct TestElement { + #[multi_index(hashed_non_unique)] + field1: i32, + field2: f64, + #[multi_index(hashed_unique)] + field3: u32, + field4: String, +} + +#[test] +fn test_non_unique_update() { + let mut map = MultiIndexTestElementMap::default(); + for i in 0..10 { + if i % 2 == 0 { + map.insert(TestElement { + field1: 42, + field2: i as f64, + field3: i, + field4: i.to_string(), + }); + } else { + map.insert(TestElement { + field1: 37, + field2: i as f64, + field3: i, + field4: i.to_string(), + }); + } + } + + let refs = map.update_by_field1(&37, |field2, field4| { + *field2 = 99.0; + *field4 = "NinetyNine".to_string() + }); + for r in refs.iter() { + assert_eq!(r.field2, 99.0); + assert_eq!(r.field4, "NinetyNine"); + } + + let refs = map.get_by_field1(&42); + for (i, r) in refs.iter().enumerate() { + assert_eq!(r.field2, i as f64 * 2.0); + assert_eq!(r.field4, (i * 2).to_string()); + } +} + +#[test] +fn test_unique_update() { + let mut map = MultiIndexTestElementMap::default(); + for i in 0..10 { + if i % 2 == 0 { + map.insert(TestElement { + field1: 42, + field2: i as f64, + field3: i, + field4: i.to_string(), + }); + } else { + map.insert(TestElement { + field1: 37, + field2: i as f64, + field3: i, + field4: i.to_string(), + }); + } + } + + let elem = map.update_by_field3(&0, |field2, field4| { + *field2 = 99.0; + *field4 = "NinetyNine".to_string() + }); + + assert_eq!( + elem, + Some(&TestElement { + field1: 42, + field2: 99.0, + field3: 0, + field4: "NinetyNine".to_string() + }) + ); + + let elem = map.get_by_field3(&1); + + assert_eq!( + elem, + Some(&TestElement { + field1: 37, + field2: 1.0, + field3: 1, + field4: 1.to_string() + }) + ); +} diff --git a/multi_index_map_derive/Cargo.toml b/multi_index_map_derive/Cargo.toml index 00ce668..7eeeee1 100644 --- a/multi_index_map_derive/Cargo.toml +++ b/multi_index_map_derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "multi_index_map_derive" -version = "0.6.2" +version = "0.8.1" edition = "2021" authors = ["Louis Wyborn "] rust-version = "1.62" @@ -33,4 +33,4 @@ convert_case = { version = "0.6" } proc-macro = true [features] -experimental = [] +trivial_bounds = [] diff --git a/multi_index_map_derive/src/generators.rs b/multi_index_map_derive/src/generators.rs index f5ceb0f..f001711 100644 --- a/multi_index_map_derive/src/generators.rs +++ b/multi_index_map_derive/src/generators.rs @@ -1,49 +1,107 @@ -use ::convert_case::Casing; use ::quote::{format_ident, quote}; use ::syn::{Field, Visibility}; +use proc_macro2::Ident; +use proc_macro_error::OptionExt; +use syn::Type; use crate::index_attributes::{Ordering, Uniqueness}; +// Struct to store generated identifiers for each field. +// These are set once during the initial pass over the indexed fields, +// then reused in each generator, to reduce work done at compile-time, +// and to ensure each generator uses the same identifiers. +pub(crate) struct FieldIdents { + pub(crate) name: Ident, + pub(crate) index_name: Ident, + pub(crate) cloned_name: Ident, + pub(crate) iter_name: Ident, +} + +struct FieldInfo<'a> { + vis: &'a Visibility, + ty: &'a Type, + str: &'a str, +} + +pub(crate) const EXPECT_NAMED_FIELDS: &str = + "Internal logic broken, all fields should have named identifiers"; + // For each indexed field generate a TokenStream representing the lookup table for that field // Each lookup table maps it's index to a position in the backing storage, // or multiple positions in the backing storage in the non-unique indexes. -pub(crate) fn generate_lookup_tables<'a>( - fields: &'a [(&Field, Ordering, Uniqueness)], -) -> impl Iterator + 'a { - fields.iter().map(|(f, ordering, uniqueness)| { - let index_name = format_ident!("_{}_index", f.ident.as_ref().unwrap()); +pub(crate) fn generate_lookup_tables( + fields: &[(Field, FieldIdents, Ordering, Uniqueness)], +) -> impl Iterator + '_ { + fields.iter().map(|(f, idents, ordering, uniqueness)| { let ty = &f.ty; + let index_name = &idents.index_name; - match uniqueness { - Uniqueness::Unique => match ordering { - Ordering::Hashed => quote! { - #index_name: ::multi_index_map::rustc_hash::FxHashMap<#ty, usize>, - }, - Ordering::Ordered => quote! { - #index_name: ::std::collections::BTreeMap<#ty, usize>, - }, - }, - Uniqueness::NonUnique => match ordering { - Ordering::Hashed => quote! { - #index_name: ::multi_index_map::rustc_hash::FxHashMap<#ty, ::std::collections::BTreeSet>, - }, - Ordering::Ordered => quote! { - #index_name: ::std::collections::BTreeMap<#ty, ::std::collections::BTreeSet>, - }, - }, + let field_type = index_field_type(ty, ordering, uniqueness); + + quote! { + #index_name: #field_type, } }) } +fn index_field_type( + ty: &Type, + ordering: &Ordering, + uniqueness: &Uniqueness, +) -> ::proc_macro2::TokenStream { + match uniqueness { + Uniqueness::Unique => match ordering { + Ordering::Hashed => quote! { + ::multi_index_map::rustc_hash::FxHashMap<#ty, usize> + }, + Ordering::Ordered => quote! { + ::std::collections::BTreeMap<#ty, usize> + }, + }, + Uniqueness::NonUnique => match ordering { + Ordering::Hashed => quote! { + ::multi_index_map::rustc_hash::FxHashMap<#ty, ::std::collections::BTreeSet> + }, + Ordering::Ordered => quote! { + ::std::collections::BTreeMap<#ty, ::std::collections::BTreeSet> + }, + }, + } +} + +// For each indexed field generate a TokenStream of the Debug bound for the field type and the multi_index_map specific type +#[cfg(feature = "trivial_bounds")] +pub(crate) fn generate_lookup_table_field_types( + fields: &[(Field, FieldIdents, Ordering, Uniqueness)], +) -> impl Iterator + '_ { + fields + .iter() + .flat_map(|(f, _idents, ordering, uniqueness)| { + let ty = &f.ty; + + let type_debug = quote! { + #ty: core::fmt::Debug, + }; + + let field_type = index_field_type(ty, ordering, uniqueness); + + let field_debug = quote! { + #field_type: core::fmt::Debug, + }; + + [type_debug, field_debug] + }) +} + // For each indexed field generate a TokenStream representing initializing the lookup table. // Used in `with_capacity` initialization // If lookup table data structures support `with_capacity`, change `default()` and `new()` calls to // `with_capacity(n)` -pub(crate) fn generate_lookup_table_init<'a>( - fields: &'a [(&Field, Ordering, Uniqueness)], -) -> impl Iterator + 'a { - fields.iter().map(|(f, ordering, _uniqueness)| { - let index_name = format_ident!("_{}_index", f.ident.as_ref().unwrap()); +pub(crate) fn generate_lookup_table_init( + fields: &[(Field, FieldIdents, Ordering, Uniqueness)], +) -> impl Iterator + '_ { + fields.iter().map(|(_f, idents, ordering, _uniqueness)| { + let index_name = &idents.index_name; match ordering { Ordering::Hashed => quote! { @@ -60,11 +118,11 @@ pub(crate) fn generate_lookup_table_init<'a>( // Used in `reserve` // Currently `BTreeMap::extend_reserve()` is nightly-only and uses the trait default implementation, which does nothing. // Once this is implemented and stabilized, we will use it here to reserve capacity. -pub(crate) fn generate_lookup_table_reserve<'a>( - fields: &'a [(&Field, Ordering, Uniqueness)], -) -> impl Iterator + 'a { - fields.iter().map(|(f, ordering, _uniqueness)| { - let index_name = format_ident!("_{}_index", f.ident.as_ref().unwrap()); +pub(crate) fn generate_lookup_table_reserve( + fields: &[(Field, FieldIdents, Ordering, Uniqueness)], +) -> impl Iterator + '_ { + fields.iter().map(|(_f, idents, ordering, _uniqueness)| { + let index_name = &idents.index_name; match ordering { Ordering::Hashed => quote! { @@ -79,11 +137,11 @@ pub(crate) fn generate_lookup_table_reserve<'a>( // Used in `shrink_to_fit` // For consistency, HashMaps are shrunk to the capacity of the backing storage // `BTreeMap` does not support shrinking. -pub(crate) fn generate_lookup_table_shrink<'a>( - fields: &'a [(&Field, Ordering, Uniqueness)], -) -> impl Iterator + 'a { - fields.iter().map(|(f, ordering, _uniqueness)| { - let index_name = format_ident!("_{}_index", f.ident.as_ref().unwrap()); +pub(crate) fn generate_lookup_table_shrink( + fields: &[(Field, FieldIdents, Ordering, Uniqueness)], +) -> impl Iterator + '_ { + fields.iter().map(|(_f, idents, ordering, _uniqueness)| { + let index_name = &idents.index_name; match ordering { Ordering::Hashed => quote! { @@ -94,18 +152,32 @@ pub(crate) fn generate_lookup_table_shrink<'a>( }) } +// For each indexed field generate a TokenStream representing a debug struct field +#[cfg(feature = "trivial_bounds")] +pub(crate) fn generate_lookup_table_debug( + fields: &[(Field, FieldIdents, Ordering, Uniqueness)], +) -> impl Iterator + '_ { + fields.iter().map(|(_f, idents, _ordering, _uniqueness)| { + let index_name = &idents.index_name; + + quote! { + .field(stringify!(#index_name), &self.#index_name) + } + }) +} + // For each indexed field generate a TokenStream representing inserting the position // in the backing storage to that field's lookup table // Unique indexed fields just require a simple insert to the map, // whereas non-unique fields require inserting to the container of positions, // creating a new container if necessary. -pub(crate) fn generate_inserts<'a>( - fields: &'a [(&Field, Ordering, Uniqueness)], -) -> impl Iterator + 'a { - fields.iter().map(|(f, _ordering, uniqueness)| { - let field_name = f.ident.as_ref().unwrap(); - let field_name_string = field_name.to_string(); - let index_name = format_ident!("_{}_index", field_name); +pub(crate) fn generate_inserts( + fields: &[(Field, FieldIdents, Ordering, Uniqueness)], +) -> impl Iterator + '_ { + fields.iter().map(|(_f, idents, _ordering, uniqueness)| { + let field_name = &idents.name; + let field_name_string = stringify!(field_name); + let index_name = &idents.index_name; match uniqueness { Uniqueness::Unique => quote! { @@ -140,14 +212,13 @@ pub(crate) fn generate_inserts<'a>( // + If there are exactly one index in the container, then the index has to be idx, // remove the key from the lookup table pub(crate) fn generate_removes( - fields: &[(&Field, Ordering, Uniqueness)], + fields: &[(Field, FieldIdents, Ordering, Uniqueness)], ) -> Vec<::proc_macro2::TokenStream> { fields .iter() - .map(|(f, _ordering, uniqueness)| { - let field_name = f.ident.as_ref().unwrap(); - let field_name_string = field_name.to_string(); - let index_name = format_ident!("_{}_index", field_name); + .map(|(_f, idents, _ordering, uniqueness)| { + let field_name = &idents.name; + let field_name_string = stringify!(field_name); let error_msg = format!( concat!( "Internal invariants broken, ", @@ -155,6 +226,7 @@ pub(crate) fn generate_removes( ), field_name_string ); + let index_name = &idents.index_name; match uniqueness { Uniqueness::Unique => quote! { @@ -178,25 +250,44 @@ pub(crate) fn generate_removes( .collect() } +// For each indexed field generate a TokenStream representing the clone the original value, +// so that we can compare after the modify is applied and adjust lookup tables as necessary +pub(crate) fn generate_pre_modifies( + fields: &[(Field, FieldIdents, Ordering, Uniqueness)], +) -> Vec<::proc_macro2::TokenStream> { + fields + .iter() + .map(|(_f, idents, _, _)| { + let field_name = &idents.name; + let orig_ident = &idents.cloned_name; + + quote! { + let #orig_ident = elem.#field_name.clone(); + } + }) + .collect::>() +} + // For each indexed field generate a TokenStream representing the combined remove and insert from that // field's lookup table. // Used in modifier. Run after an element is already modified in the backing storage. -// The element before the change is stored in `elem_orig`. +// The fields of the original element are stored in `orig_#field_name` // The element after change is stored in reference `elem` (inside the backing storage). // The index of `elem` in the backing storage is `idx` -// For each field, only make changes if elem.#field_name and elem_orig.#field_name are not equal +// For each field, only make changes if `elem.#field_name` and `orig_#field_name` are not equal // - When the field is unique, remove the old key and insert idx under the new key // (if new key already exists, panic!) // - When the field is non-unique, remove idx from the container associated with the old key // + if the container is empty after removal, remove the old key, and insert idx to the new key // (create a new container if necessary) -pub(crate) fn generate_modifies( - fields: &[(&Field, Ordering, Uniqueness)], +pub(crate) fn generate_post_modifies( + fields: &[(Field, FieldIdents, Ordering, Uniqueness)], ) -> Vec<::proc_macro2::TokenStream> { - fields.iter().map(|(f, _ordering, uniqueness)| { - let field_name = f.ident.as_ref().unwrap(); - let field_name_string = field_name.to_string(); - let index_name = format_ident!("_{}_index", field_name); + fields.iter().map(|(_f, idents, _ordering, uniqueness)| { + let field_name = &idents.name; + let field_name_string = stringify!(field_name); + let orig_ident = &idents.cloned_name; + let index_name = &idents.index_name; let error_msg = format!( concat!( "Internal invariants broken, ", @@ -207,8 +298,8 @@ pub(crate) fn generate_modifies( match uniqueness { Uniqueness::Unique => quote! { - if elem.#field_name != elem_orig.#field_name { - let idx = self.#index_name.remove(&elem_orig.#field_name).expect(#error_msg); + if elem.#field_name != #orig_ident { + let idx = self.#index_name.remove(&#orig_ident).expect(#error_msg); let orig_elem_idx = self.#index_name.insert(elem.#field_name.clone(), idx); if orig_elem_idx.is_some() { panic!( @@ -217,17 +308,16 @@ pub(crate) fn generate_modifies( ); } } - }, Uniqueness::NonUnique => quote! { - if elem.#field_name != elem_orig.#field_name { - let idxs = self.#index_name.get_mut(&elem_orig.#field_name).expect(#error_msg); + if elem.#field_name != #orig_ident { + let idxs = self.#index_name.get_mut(&#orig_ident).expect(#error_msg); if idxs.len() > 1 { if !(idxs.remove(&idx)) { panic!(#error_msg); } } else { - self.#index_name.remove(&elem_orig.#field_name); + self.#index_name.remove(&#orig_ident); } self.#index_name.entry(elem.#field_name.clone()) .or_insert(::std::collections::BTreeSet::new()) @@ -238,12 +328,11 @@ pub(crate) fn generate_modifies( }).collect() } -pub(crate) fn generate_clears<'a>( - fields: &'a [(&Field, Ordering, Uniqueness)], -) -> impl Iterator + 'a { - fields.iter().map(|(f, _ordering, _uniqueness)| { - let field_name = f.ident.as_ref().unwrap(); - let index_name = format_ident!("_{}_index", field_name); +pub(crate) fn generate_clears( + fields: &[(Field, FieldIdents, Ordering, Uniqueness)], +) -> impl Iterator + '_ { + fields.iter().map(|(_f, idents, _ordering, _uniqueness)| { + let index_name = &idents.index_name; quote! { self.#index_name.clear(); @@ -251,245 +340,403 @@ pub(crate) fn generate_clears<'a>( }) } -// For each indexed field generate a TokenStream representing all the accessors -// for the underlying storage via that field's lookup table. -pub(crate) fn generate_accessors<'a>( - fields: &'a [(&Field, Ordering, Uniqueness)], - map_name: &'a proc_macro2::Ident, - element_name: &'a proc_macro2::Ident, - removes: &'a [proc_macro2::TokenStream], - modifies: &'a [proc_macro2::TokenStream], -) -> impl Iterator + 'a { - fields.iter().map(move |(f, ordering, uniqueness)| { - let field_name = f.ident.as_ref().unwrap(); - let field_name_string = field_name.to_string(); - let field_vis = &f.vis; - let index_name = format_ident!("_{}_index", field_name); - let getter_name = format_ident!("get_by_{}", field_name); - let mut_getter_name = format_ident!("get_mut_by_{}", field_name); - let remover_name = format_ident!("remove_by_{}", field_name); - let modifier_name = format_ident!("modify_by_{}", field_name); - let iter_name = format_ident!( - "{}{}Iter", - map_name, - field_name.to_string().to_case(::convert_case::Case::UpperCamel) - ); - let iter_getter_name = format_ident!("iter_by_{}", field_name); - let ty = &f.ty; - - // TokenStream representing the get_by_ accessor for this field. - // For non-unique indexes we must go through all matching elements and find their positions, - // in order to return a Vec of references to the backing storage. - let getter = match uniqueness { - Uniqueness::Unique => quote! { - #field_vis fn #getter_name(&self, key: &#ty) -> Option<&#element_name> { - Some(&self._store[*self.#index_name.get(key)?]) - } - }, - Uniqueness::NonUnique => quote! { - #field_vis fn #getter_name(&self, key: &#ty) -> Vec<&#element_name> { - if let Some(idxs) = self.#index_name.get(key) { - let mut elem_refs = Vec::with_capacity(idxs.len()); - for idx in idxs { - elem_refs.push(&self._store[*idx]) - } - elem_refs - } else { - Vec::new() - } - } - }, - }; - - // TokenStream representing the get_mut_by_ accessor for this field. - let mut_getter = match uniqueness { - Uniqueness::Unique => quote! { - /// SAFETY: - /// It is safe to mutate the non-indexed fields, - /// however mutating any of the indexed fields will break the internal invariants. - /// If the indexed fields need to be changed, the modify() method must be used. - #field_vis unsafe fn #mut_getter_name(&mut self, key: &#ty) -> Option<&mut #element_name> { - Some(&mut self._store[*self.#index_name.get(key)?]) - } - }, - Uniqueness::NonUnique => quote! { - /// SAFETY: - /// It is safe to mutate the non-indexed fields, - /// however mutating any of the indexed fields will break the internal invariants. - /// If the indexed fields need to be changed, the modify() method must be used. - #field_vis unsafe fn #mut_getter_name(&mut self, key: &#ty) -> Vec<&mut #element_name> { - if let Some(idxs) = self.#index_name.get(key) { - let mut refs = Vec::with_capacity(idxs.len()); - let mut mut_iter = self._store.iter_mut(); - let mut last_idx: usize = 0; - for idx in idxs.iter() { - match mut_iter.nth(*idx - last_idx) { - Some(val) => { - refs.push(val.1) - }, - _ => { - panic!( - "Error getting mutable reference of non-unique field `{}` in getter.", - #field_name_string - ); - } - } - last_idx = *idx + 1; - } - refs - } else { - Vec::new() - } - } - }, - }; - - // TokenStream representing the remove_by_ accessor for this field. - // For non-unique indexes we must go through all matching elements and find their positions, - // in order to return a Vec elements from the backing storage. - // - get the back storage index(s) - // - mark the index(s) as unused in back storage - // - remove the index(s) from all fields - // - return the element(s) - let remover = match uniqueness { - Uniqueness::Unique => quote! { - - #field_vis fn #remover_name(&mut self, key: &#ty) -> Option<#element_name> { - let idx = self.#index_name.remove(key)?; - let elem_orig = self._store.remove(idx); - #(#removes)* - Some(elem_orig) - } - }, - Uniqueness::NonUnique => quote! { - #field_vis fn #remover_name(&mut self, key: &#ty) -> Vec<#element_name> { - if let Some(idxs) = self.#index_name.remove(key) { - let mut elems = Vec::with_capacity(idxs.len()); - for idx in idxs { - let elem_orig = self._store.remove(idx); - #(#removes)* - elems.push(elem_orig) - } - elems - } else { - Vec::new() +// TokenStream representing the get_by_ accessor for this field. +// For non-unique indexes we must go through all matching elements and find their positions, +// in order to return a Vec of references to the backing storage. +fn generate_field_getter( + field_idents: &FieldIdents, + field_info: &FieldInfo, + element_name: &Ident, + uniqueness: &Uniqueness, +) -> proc_macro2::TokenStream { + let getter_name = format_ident!("get_by_{}", &field_idents.name); + let index_name = &field_idents.index_name; + let field_vis = &field_info.vis; + let field_type = &field_info.ty; + + match uniqueness { + Uniqueness::Unique => quote! { + #field_vis fn #getter_name(&self, key: &#field_type) -> Option<&#element_name> { + Some(&self._store[*self.#index_name.get(key)?]) + } + }, + Uniqueness::NonUnique => quote! { + #field_vis fn #getter_name(&self, key: &#field_type) -> Vec<&#element_name> { + if let Some(idxs) = self.#index_name.get(key) { + let mut elem_refs = Vec::with_capacity(idxs.len()); + for idx in idxs { + elem_refs.push(&self._store[*idx]) } + elem_refs + } else { + Vec::new() } - }, - }; + } + }, + } +} - // TokenStream representing the modify_by_ accessor for this field. - // - obtain mutable reference (s) of the element - // - apply changes to the reference(s) - // - for each changed element, update all changed fields - // - return the modified item(s) as references - let modifier = match uniqueness { - Uniqueness::Unique => quote! { - #field_vis fn #modifier_name( - &mut self, - key: &#ty, - f: impl FnOnce(&mut #element_name) - ) -> Option<&#element_name> { - let idx = *self.#index_name.get(key)?; - let elem = &mut self._store[idx]; - let elem_orig = elem.clone(); - f(elem); - #(#modifies)* - Some(elem) - } - }, - Uniqueness::NonUnique => quote! { - #field_vis fn #modifier_name( - &mut self, - key: &#ty, - f: impl Fn(&mut #element_name) - ) -> Vec<&#element_name> { - let idxs = match self.#index_name.get(key) { - Some(container) => container.clone(), - _ => ::std::collections::BTreeSet::::new() - }; +// TokenStream representing the get_mut_by_ accessor for this field. +// Note that these methods are deprecated, and will be removed in a future version. +fn generate_field_mut_getter( + field_idents: &FieldIdents, + field_info: &FieldInfo, + element_name: &Ident, + uniqueness: &Uniqueness, +) -> proc_macro2::TokenStream { + let mut_getter_name = format_ident!("get_mut_by_{}", &field_idents.name); + let index_name = &field_idents.index_name; + let field_vis = &field_info.vis; + let field_type = &field_info.ty; + let field_name_str = &field_info.str; + + match uniqueness { + Uniqueness::Unique => quote! { + /// SAFETY: + /// It is safe to mutate the non-indexed fields, + /// however mutating any of the indexed fields will break the internal invariants. + /// If the indexed fields need to be changed, the modify() method must be used. + #[deprecated(since="0.7.0", note="please use `update_by_` methods to update non-indexed fields instead, these are equally performant but are safe")] + #field_vis unsafe fn #mut_getter_name(&mut self, key: &#field_type) -> Option<&mut #element_name> { + Some(&mut self._store[*self.#index_name.get(key)?]) + } + }, + Uniqueness::NonUnique => quote! { + /// SAFETY: + /// It is safe to mutate the non-indexed fields, + /// however mutating any of the indexed fields will break the internal invariants. + /// If the indexed fields need to be changed, the modify() method must be used. + #[deprecated(since="0.7.0", note="please use `update_by_` methods to update non-indexed fields instead, these are equally performant but are safe")] + #field_vis unsafe fn #mut_getter_name(&mut self, key: &#field_type) -> Vec<&mut #element_name> { + if let Some(idxs) = self.#index_name.get(key) { let mut refs = Vec::with_capacity(idxs.len()); let mut mut_iter = self._store.iter_mut(); let mut last_idx: usize = 0; - for idx in idxs { - match mut_iter.nth(idx - last_idx) { + for idx in idxs.iter() { + match mut_iter.nth(*idx - last_idx) { Some(val) => { - let elem = val.1; - let elem_orig = elem.clone(); - f(elem); - #(#modifies)* - refs.push(&*elem); + refs.push(val.1) }, _ => { panic!( - "Error getting mutable reference of non-unique field `{}` in modifier.", - #field_name_string + "Error getting mutable reference of non-unique field `{}` in getter.", + #field_name_str ); } } - last_idx = idx + 1; + last_idx = *idx + 1; } refs + } else { + Vec::new() } - }, - }; + } + }, + } +} - let iterator_def = match ordering { - Ordering::Hashed => quote! { - #iter_name { - _store_ref: &self._store, - _iter: self.#index_name.iter(), - _inner_iter: None, +// TokenStream representing the remove_by_ accessor for this field. +// For non-unique indexes we must go through all matching elements and find their positions, +// in order to return a Vec elements from the backing storage. +// - get the back storage index(s) +// - mark the index(s) as unused in back storage +// - remove the index(s) from all fields +// - return the element(s) +fn generate_field_remover( + field_idents: &FieldIdents, + field_info: &FieldInfo, + element_name: &Ident, + uniqueness: &Uniqueness, + removes: &[proc_macro2::TokenStream], +) -> proc_macro2::TokenStream { + let remover_name = format_ident!("remove_by_{}", &field_idents.name); + let index_name = &field_idents.index_name; + let field_vis = &field_info.vis; + let field_type = &field_info.ty; + + match uniqueness { + Uniqueness::Unique => quote! { + #field_vis fn #remover_name(&mut self, key: &#field_type) -> Option<#element_name> { + let idx = self.#index_name.remove(key)?; + let elem_orig = self._store.remove(idx); + #(#removes)* + Some(elem_orig) + } + }, + Uniqueness::NonUnique => quote! { + #field_vis fn #remover_name(&mut self, key: &#field_type) -> Vec<#element_name> { + if let Some(idxs) = self.#index_name.remove(key) { + let mut elems = Vec::with_capacity(idxs.len()); + for idx in idxs { + let elem_orig = self._store.remove(idx); + #(#removes)* + elems.push(elem_orig) + } + elems + } else { + Vec::new() } - }, - Ordering::Ordered => quote! { - #iter_name { - _store_ref: &self._store, - _iter: self.#index_name.iter(), - _iter_rev: self.#index_name.iter().rev(), - _inner_iter: None, + } + }, + } +} + +fn generate_field_updater( + field_idents: &FieldIdents, + field_info: &FieldInfo, + element_name: &Ident, + uniqueness: &Uniqueness, + unindexed_types: &[&Type], + unindexed_idents: &[&Ident], +) -> proc_macro2::TokenStream { + let updater_name = format_ident!("update_by_{}", &field_idents.name); + let index_name = &field_idents.index_name; + let field_vis = &field_info.vis; + let field_type = &field_info.ty; + let field_name_str = &field_info.str; + + match uniqueness { + Uniqueness::Unique => quote! { + #field_vis fn #updater_name( + &mut self, + key: &#field_type, + f: impl FnOnce(#(&mut #unindexed_types,)*) + ) -> Option<&#element_name> { + let idx = *self.#index_name.get(key)?; + let elem = &mut self._store[idx]; + f(#(&mut elem.#unindexed_idents,)*); + Some(elem) + } + }, + Uniqueness::NonUnique => quote! { + #field_vis fn #updater_name( + &mut self, + key: &#field_type, + mut f: impl FnMut(#(&mut #unindexed_types,)*) + ) -> Vec<&#element_name> { + let empty = ::std::collections::BTreeSet::::new(); + let idxs = match self.#index_name.get(key) { + Some(container) => container, + _ => &empty, + }; + + let mut refs = Vec::with_capacity(idxs.len()); + let mut mut_iter = self._store.iter_mut(); + let mut last_idx: usize = 0; + for idx in idxs { + match mut_iter.nth(idx - last_idx) { + Some(val) => { + let elem = val.1; + f(#(&mut elem.#unindexed_idents,)*); + refs.push(&*elem); + } + _ => { + panic!( + "Error getting mutable reference of non-unique field `{}` in updater.", + #field_name_str + ); + } + } + last_idx = idx + 1; } - }, - }; + refs + } + }, + } +} - // Put all these TokenStreams together, and put a TokenStream representing the iter_by_ accessor - // on the end. - quote! { - #getter +// TokenStream representing the modify_by_ accessor for this field. +// - obtain mutable reference (s) of the element +// - apply changes to the reference(s) +// - for each changed element, update all changed fields +// - return the modified item(s) as references +fn generate_field_modifier( + field_idents: &FieldIdents, + field_info: &FieldInfo, + element_name: &Ident, + uniqueness: &Uniqueness, + pre_modifies: &[proc_macro2::TokenStream], + post_modifies: &[proc_macro2::TokenStream], +) -> proc_macro2::TokenStream { + let modifier_name = format_ident!("modify_by_{}", &field_idents.name); + let index_name = &field_idents.index_name; + let field_vis = &field_info.vis; + let field_type = &field_info.ty; + let field_name_str = &field_info.str; + + match uniqueness { + Uniqueness::Unique => quote! { + #field_vis fn #modifier_name( + &mut self, + key: &#field_type, + f: impl FnOnce(&mut #element_name) + ) -> Option<&#element_name> { + let idx = *self.#index_name.get(key)?; + let elem = &mut self._store[idx]; + #(#pre_modifies)* + f(elem); + #(#post_modifies)* + Some(elem) + } + }, + Uniqueness::NonUnique => quote! { + #field_vis fn #modifier_name( + &mut self, + key: &#field_type, + mut f: impl FnMut(&mut #element_name) + ) -> Vec<&#element_name> { + let idxs = match self.#index_name.get(key) { + Some(container) => container.clone(), + _ => ::std::collections::BTreeSet::::new() + }; + let mut refs = Vec::with_capacity(idxs.len()); + let mut mut_iter = self._store.iter_mut(); + let mut last_idx: usize = 0; + for idx in idxs { + match mut_iter.nth(idx - last_idx) { + Some(val) => { + let elem = val.1; + #(#pre_modifies)* + f(elem); + #(#post_modifies)* + refs.push(&*elem); + }, + _ => { + panic!( + "Error getting mutable reference of non-unique field `{}` in modifier.", + #field_name_str + ); + } + } + last_idx = idx + 1; + } + refs + } + }, + } +} - #mut_getter +fn generate_field_iter_getter( + field_idents: &FieldIdents, + field_info: &FieldInfo, + ordering: &Ordering, +) -> proc_macro2::TokenStream { + let iter_getter_name = format_ident!("iter_by_{}", &field_idents.name); + let iter_name = &field_idents.iter_name; + let index_name = &field_idents.index_name; + let field_vis = &field_info.vis; + + let iterator_def = match ordering { + Ordering::Hashed => quote! { + #iter_name { + _store_ref: &self._store, + _iter: self.#index_name.iter(), + _inner_iter: None, + } + }, + Ordering::Ordered => quote! { + #iter_name { + _store_ref: &self._store, + _iter: self.#index_name.iter(), + _iter_rev: self.#index_name.iter().rev(), + _inner_iter: None, + } + }, + }; - #remover + quote! { + #field_vis fn #iter_getter_name(&self) -> #iter_name { + #iterator_def + } + } +} - #modifier +// For each indexed field generate a TokenStream representing all the accessors +// for the underlying storage via that field's lookup table. +pub(crate) fn generate_accessors<'a>( + indexed_fields: &'a [(Field, FieldIdents, Ordering, Uniqueness)], + unindexed_fields: &'a [Field], + element_name: &'a proc_macro2::Ident, + removes: &'a [proc_macro2::TokenStream], + pre_modifies: &'a [proc_macro2::TokenStream], + post_modifies: &'a [proc_macro2::TokenStream], +) -> impl Iterator + 'a { + let unindexed_types = unindexed_fields.iter().map(|f| &f.ty).collect::>(); + let unindexed_idents = unindexed_fields + .iter() + .map(|f| f.ident.as_ref().expect_or_abort(EXPECT_NAMED_FIELDS)) + .collect::>(); + + indexed_fields + .iter() + .map(move |(f, idents, ordering, uniqueness)| { + let field_info = FieldInfo { + vis: &f.vis, + ty: &f.ty, + str: &idents.name.to_string(), + }; + + let getter = generate_field_getter(idents, &field_info, element_name, uniqueness); + + let mut_getter = + generate_field_mut_getter(idents, &field_info, element_name, uniqueness); + + let remover = + generate_field_remover(idents, &field_info, element_name, uniqueness, removes); + + let updater = generate_field_updater( + idents, + &field_info, + element_name, + uniqueness, + &unindexed_types, + &unindexed_idents, + ); + + let modifier = generate_field_modifier( + idents, + &field_info, + element_name, + uniqueness, + pre_modifies, + post_modifies, + ); + + let iter_getter = generate_field_iter_getter(idents, &field_info, ordering); - #field_vis fn #iter_getter_name(&self) -> #iter_name { - #iterator_def + // Put all these TokenStreams together, and put a TokenStream representing the iter_by_ accessor + // on the end. + quote! { + #getter + + #mut_getter + + #remover + + #modifier + + #updater + + #iter_getter } - } - }) + }) } // For each indexed field generate a TokenStream representing the Iterator over the backing storage // via that field, // such that the elements are accessed in an order defined by the index rather than the backing storage. pub(crate) fn generate_iterators<'a>( - fields: &'a [(&Field, Ordering, Uniqueness)], - map_name: &'a proc_macro2::Ident, + fields: &'a [(Field, FieldIdents, Ordering, Uniqueness)], element_name: &'a proc_macro2::Ident, ) -> impl Iterator + 'a { - fields.iter().map(move |(f, ordering, uniqueness)| { - let field_name = f.ident.as_ref().unwrap(); + fields.iter().map(move |(f, idents, ordering, uniqueness)| { + let field_name = &idents.name; let field_vis = &f.vis; let field_name_string = field_name.to_string(); let error_msg = format!( "Internal invariants broken, found empty slice in non_unique index '{field_name_string}'" ); - let iter_name = format_ident!( - "{}{}Iter", - map_name, - field_name.to_string().to_case(::convert_case::Case::UpperCamel) - ); + let iter_name = &idents.iter_name; let ty = &f.ty; // TokenStream representing the actual type of the iterator @@ -614,25 +861,33 @@ pub(crate) fn generate_expanded( lookup_table_fields_init: impl Iterator, lookup_table_fields_shrink: impl Iterator, lookup_table_fields_reserve: impl Iterator, + #[cfg(feature = "trivial_bounds")] lookup_table_fields_debug: impl Iterator< + Item = proc_macro2::TokenStream, + >, + #[cfg(feature = "trivial_bounds")] lookup_table_field_types: impl Iterator< + Item = proc_macro2::TokenStream, + >, ) -> proc_macro2::TokenStream { - let debug_impl = if cfg!(feature = "experimental") { - quote! { - #[allow(trivial_bounds)] - impl ::core::fmt::Debug for #map_name where #element_name: ::core::fmt::Debug { - fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { - f.debug_struct(stringify!(#map_name)) - .field("_store", &self._store) - // #(#lookup_table_fields_debug)* - .finish() - } + #[cfg(not(feature = "trivial_bounds"))] + let debug_impl = quote! {}; + + #[cfg(feature = "trivial_bounds")] + let debug_impl = quote! { + // #[allow(trivial_bounds)] + impl core::fmt::Debug for #map_name where #element_name: core::fmt::Debug, + #(#lookup_table_field_types)* + { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + f.debug_struct(stringify!(#map_name)) + .field("_store", &self._store) + #(#lookup_table_fields_debug)* + .finish() } } - } else { - quote! {} }; quote! { - #[derive(Default, Clone)] + #[derive(Default)] #element_vis struct #map_name { _store: ::multi_index_map::slab::Slab<#element_name>, #(#lookup_table_fields)* diff --git a/multi_index_map_derive/src/lib.rs b/multi_index_map_derive/src/lib.rs index daa5c42..9df00ac 100644 --- a/multi_index_map_derive/src/lib.rs +++ b/multi_index_map_derive/src/lib.rs @@ -1,6 +1,9 @@ use ::proc_macro_error::{abort_call_site, proc_macro_error}; use ::quote::format_ident; use ::syn::{parse_macro_input, DeriveInput}; +use convert_case::Casing; +use generators::{FieldIdents, EXPECT_NAMED_FIELDS}; +use proc_macro_error::OptionExt; mod generators; mod index_attributes; @@ -19,7 +22,7 @@ pub fn multi_index_map(input: proc_macro::TokenStream) -> proc_macro::TokenStrea }; // Verify the struct fields are named fields, - // otherwise throw an error as we do not support Unnamed of Unit structs. + // otherwise throw an error as we do not support Unnamed or Unit structs. let named_fields = match fields { syn::Fields::Named(f) => f, _ => abort_call_site!( @@ -29,44 +32,83 @@ pub fn multi_index_map(input: proc_macro::TokenStream) -> proc_macro::TokenStrea // Filter out all the fields that do not have a multi_index attribute, // so we can ignore the non-indexed fields. - let fields_to_index = named_fields + let (indexed_fields, unindexed_fields): (Vec<_>, Vec<_>) = named_fields .named - .iter() - .filter_map(|f| { - let (ordering, uniqueness) = index_attributes::get_index_kind(f)?; - Some((f, ordering, uniqueness)) + .into_iter() + .map(|f| { + let index_kind = index_attributes::get_index_kind(&f); + (f, index_kind) + }) + .partition(|(_, index_kind)| index_kind.is_some()); + + let element_name = &input.ident; + + let map_name = format_ident!("MultiIndex{}Map", element_name); + + // Massage the two partitioned Vecs into the correct types + let indexed_fields = indexed_fields + .into_iter() + .map(|(field, kind)| { + let (ordering, uniqueness) = kind + .expect_or_abort("Internal logic broken, all indexed fields should have a kind"); + + let field_ident = field.ident.as_ref().expect_or_abort(EXPECT_NAMED_FIELDS); + + let idents = FieldIdents { + name: field_ident.clone(), + index_name: format_ident!("_{field_ident}_index",), + cloned_name: format_ident!("{field_ident}_orig",), + iter_name: format_ident!( + "{map_name}{}Iter", + field_ident + .to_string() + .to_case(::convert_case::Case::UpperCamel), + ), + }; + + (field, idents, ordering, uniqueness) }) .collect::>(); - let lookup_table_fields = generators::generate_lookup_tables(&fields_to_index); + let unindexed_fields = unindexed_fields + .into_iter() + .map(|(field, _)| field) + .collect::>(); - let lookup_table_fields_init = generators::generate_lookup_table_init(&fields_to_index); + let lookup_table_fields = generators::generate_lookup_tables(&indexed_fields); - let lookup_table_fields_reserve = generators::generate_lookup_table_reserve(&fields_to_index); + let lookup_table_fields_init = generators::generate_lookup_table_init(&indexed_fields); - let lookup_table_fields_shrink = generators::generate_lookup_table_shrink(&fields_to_index); + let lookup_table_fields_reserve = generators::generate_lookup_table_reserve(&indexed_fields); - let inserts = generators::generate_inserts(&fields_to_index); + #[cfg(feature = "trivial_bounds")] + let lookup_table_fields_debug = generators::generate_lookup_table_debug(&indexed_fields); - let removes = generators::generate_removes(&fields_to_index); + let lookup_table_fields_shrink = generators::generate_lookup_table_shrink(&indexed_fields); - let modifies = generators::generate_modifies(&fields_to_index); + #[cfg(feature = "trivial_bounds")] + let lookup_table_field_types = generators::generate_lookup_table_field_types(&indexed_fields); - let clears = generators::generate_clears(&fields_to_index); + let inserts = generators::generate_inserts(&indexed_fields); - let element_name = &input.ident; + let removes = generators::generate_removes(&indexed_fields); - let map_name = format_ident!("MultiIndex{}Map", element_name); + let pre_modifies = generators::generate_pre_modifies(&indexed_fields); + + let post_modifies = generators::generate_post_modifies(&indexed_fields); + + let clears = generators::generate_clears(&indexed_fields); let accessors = generators::generate_accessors( - &fields_to_index, - &map_name, + &indexed_fields, + &unindexed_fields, element_name, &removes, - &modifies, + &pre_modifies, + &post_modifies, ); - let iterators = generators::generate_iterators(&fields_to_index, &map_name, element_name); + let iterators = generators::generate_iterators(&indexed_fields, element_name); let element_vis = input.vis; @@ -82,6 +124,10 @@ pub fn multi_index_map(input: proc_macro::TokenStream) -> proc_macro::TokenStrea lookup_table_fields_init, lookup_table_fields_shrink, lookup_table_fields_reserve, + #[cfg(feature = "trivial_bounds")] + lookup_table_fields_debug, + #[cfg(feature = "trivial_bounds")] + lookup_table_field_types, ); // Hand the output tokens back to the compiler.