diff --git a/README.md b/README.md index e46957e..53add42 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,8 @@ Current implementation supports: * Iteration over the backing store is the same as Slab, so contiguous memory but with potentially vacant slots. * Insertion, removal, and modification complexity grows as the number of indexed fields grow. All indexes must be updated during these operations so these are slower. * Modification of unindexed fields through unsafe mut methods is the same as regular retrieval time. -* Insertion or modification such that uniqueness is violated, will result in a `panic`. +* Modification such that uniqueness is violated, will result in a `panic`. +* Insertion such that uniqueness would be violated does not mutate the map, instead the element is returned to the user wrapped in an Err variant. ## Non-Unique Indexes * Hashed index retrievals are still constant-time with the total number of elements, but linear-time with the number of matching elements. (FxHashMap + (Slab * num_matches)). @@ -73,7 +74,7 @@ fn main() { let mut map = MultiIndexOrderMap::default(); - map.insert(order1); + map.try_insert(order1).unwrap(); map.insert(order2); let orders = map.get_by_trader_name(&"JohnDoe".to_string()); @@ -159,7 +160,8 @@ struct MultiIndexOrderMapTraderNameIter<'a> { } impl MultiIndexOrderMap { - fn insert(&mut self, elem: Order); + fn try_insert(&mut self, elem: Order) -> Result<&Order, MultiIndexMapError>; + fn insert(&mut self, elem: Order) -> &Order; fn len(&self) -> usize; fn is_empty(&self) -> bool; diff --git a/RELEASES.md b/RELEASES.md index 92d29da..d2132e2 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,3 +1,9 @@ +Version 0.11.0 (2023-11-02) +========================== + +- Add `try_insert` method to allow fallible insertion and return shared reference to newly inserted element. +- Change `insert` method to return shared reference to newly inserted element. + Version 0.10.0 (2023-11-02) ========================== diff --git a/multi_index_map/Cargo.toml b/multi_index_map/Cargo.toml index 529af81..b5f4ae2 100644 --- a/multi_index_map/Cargo.toml +++ b/multi_index_map/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "multi_index_map" -version = "0.10.0" +version = "0.11.0" edition = "2021" authors = ["Louis Wyborn "] rust-version = "1.62" @@ -14,7 +14,7 @@ readme = "README.md" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -multi_index_map_derive = { version = "0.10.0", path = "../multi_index_map_derive" } +multi_index_map_derive = { version = "0.11.0", path = "../multi_index_map_derive" } # Used as the backing store of all the elements. slab = { version = "0.4" } diff --git a/multi_index_map/examples/main.rs b/multi_index_map/examples/main.rs index 5cfc7ca..0efeef4 100644 --- a/multi_index_map/examples/main.rs +++ b/multi_index_map/examples/main.rs @@ -33,8 +33,8 @@ fn main() { let mut map = MultiIndexOrderMap::default(); - map.insert(o1); - map.insert(o2); + let _o1_ref: &Order = map.insert(o1); + let _o2_ref: &Order = map.try_insert(o2).unwrap(); // Set non-mutable, non mutating iter methods still work. let map = map; diff --git a/multi_index_map/src/lib.rs b/multi_index_map/src/lib.rs index 7603a20..a0d7074 100644 --- a/multi_index_map/src/lib.rs +++ b/multi_index_map/src/lib.rs @@ -1,5 +1,23 @@ pub use multi_index_map_derive::MultiIndexMap; +#[derive(Clone, Copy, PartialEq, Eq)] +pub struct UniquenessError(pub T); + +impl core::fmt::Display for UniquenessError { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!( + f, + "Unable to insert element, uniqueness constraint violated" + ) + } +} + +impl core::fmt::Debug for UniquenessError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("UniquenessViolated").finish() + } +} + #[doc(hidden)] pub use rustc_hash; #[doc(hidden)] diff --git a/multi_index_map/tests/hashed_non_unique.rs b/multi_index_map/tests/hashed_non_unique.rs index 8efbeb3..09e3a0f 100644 --- a/multi_index_map/tests/hashed_non_unique.rs +++ b/multi_index_map/tests/hashed_non_unique.rs @@ -27,6 +27,12 @@ fn test_insert_and_get_by_field1() { map.insert(elem2); map.insert(elem1); + let elem2 = TestElement { + field1: TestNonPrimitiveType(42), + field3: 1, + }; + map.try_insert(elem2).unwrap_err(); + let elems = map.get_by_field1(&TestNonPrimitiveType(42)); assert_eq!(elems.len(), 2); assert_eq!(map.len(), 2); diff --git a/multi_index_map/tests/hashed_unique.rs b/multi_index_map/tests/hashed_unique.rs index 6ef2f41..c9ec794 100644 --- a/multi_index_map/tests/hashed_unique.rs +++ b/multi_index_map/tests/hashed_unique.rs @@ -1,4 +1,4 @@ -use multi_index_map::MultiIndexMap; +use multi_index_map::{MultiIndexMap, UniquenessError}; #[derive(Hash, PartialEq, Eq, Clone)] struct TestNonPrimitiveType(u64); @@ -19,6 +19,12 @@ fn test_insert_and_get() { }; map.insert(elem1); + let elem1 = TestElement { + field1: TestNonPrimitiveType(42), + field2: "ElementOne".to_string(), + }; + assert!(map.try_insert(elem1).is_err()); + let elem1_ref = map.get_by_field1(&TestNonPrimitiveType(42)).unwrap(); assert_eq!(elem1_ref.field1.0, 42); assert_eq!(elem1_ref.field2, "ElementOne"); diff --git a/multi_index_map/tests/ordered_non_unique.rs b/multi_index_map/tests/ordered_non_unique.rs index 3408d53..c456c63 100644 --- a/multi_index_map/tests/ordered_non_unique.rs +++ b/multi_index_map/tests/ordered_non_unique.rs @@ -27,6 +27,12 @@ fn test_insert_and_get_by_field1() { map.insert(elem2); map.insert(elem1); + let elem2 = TestElement { + field1: TestNonPrimitiveType(42), + field3: 1, + }; + map.try_insert(elem2).unwrap_err(); + let elems = map.get_by_field1(&TestNonPrimitiveType(42)); assert_eq!(elems.len(), 2); assert_eq!(map.len(), 2); diff --git a/multi_index_map/tests/ordered_unique.rs b/multi_index_map/tests/ordered_unique.rs index d9ed5db..e50082d 100644 --- a/multi_index_map/tests/ordered_unique.rs +++ b/multi_index_map/tests/ordered_unique.rs @@ -19,6 +19,12 @@ fn test_insert_and_get() { }; map.insert(elem1); + let elem1 = TestElement { + field1: TestNonPrimitiveType(42), + field2: "ElementOne".to_string(), + }; + assert!(map.try_insert(elem1).is_err()); + let elem1_ref = map.get_by_field1(&TestNonPrimitiveType(42)).unwrap(); assert_eq!(elem1_ref.field1.0, 42); assert_eq!(elem1_ref.field2, "ElementOne"); diff --git a/multi_index_map_derive/Cargo.toml b/multi_index_map_derive/Cargo.toml index 761be4f..31d5eeb 100644 --- a/multi_index_map_derive/Cargo.toml +++ b/multi_index_map_derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "multi_index_map_derive" -version = "0.10.0" +version = "0.11.0" edition = "2021" authors = ["Louis Wyborn "] rust-version = "1.62" diff --git a/multi_index_map_derive/src/generators.rs b/multi_index_map_derive/src/generators.rs index 36779d7..04d0ee8 100644 --- a/multi_index_map_derive/src/generators.rs +++ b/multi_index_map_derive/src/generators.rs @@ -128,28 +128,53 @@ pub(crate) fn generate_lookup_table_shrink( }) } -// For each indexed field generate a TokenStream representing inserting the position -// in the backing storage to that field's lookup table +// For each indexed field generate a TokenStream representing getting the Entry for that field's lookup table +pub(crate) fn generate_entries_for_insert( + fields: &[(Field, FieldIdents, Ordering, Uniqueness)], +) -> impl Iterator + '_ { + fields.iter().map(|(_f, idents, ordering, uniqueness)| { + let field_name = &idents.name; + let index_name = &idents.index_name; + let entry_name = format_ident!("{field_name}_entry"); + + match uniqueness { + Uniqueness::Unique => match ordering { + Ordering::Hashed => { + quote! { + let #entry_name = match self.#index_name.entry(elem.#field_name.clone()) { + ::std::collections::hash_map::Entry::Occupied(_) => return Err(::multi_index_map::UniquenessError(elem)), + ::std::collections::hash_map::Entry::Vacant(e) => e, + }; + } + } + Ordering::Ordered => quote! { + let #entry_name = match self.#index_name.entry(elem.#field_name.clone()) { + ::std::collections::btree_map::Entry::Occupied(_) => return Err(::multi_index_map::UniquenessError(elem)), + ::std::collections::btree_map::Entry::Vacant(e) => e, + }; + }, + }, + Uniqueness::NonUnique => quote! {}, + } + }) +} + +// For each indexed field generate a TokenStream representing inserting the position in the backing storage +// to that field's lookup table via the entry generated previously // Unique indexed fields just require a simple insert to the map, // whereas non-unique fields require inserting to the container of positions, // creating a new container if necessary. -pub(crate) fn generate_inserts( +pub(crate) fn generate_inserts_for_entries( fields: &[(Field, FieldIdents, Ordering, Uniqueness)], ) -> impl Iterator + '_ { fields.iter().map(|(_f, idents, _ordering, uniqueness)| { let field_name = &idents.name; - let field_name_string = stringify!(field_name); let index_name = &idents.index_name; + let entry_name = format_ident!("{field_name}_entry"); match uniqueness { Uniqueness::Unique => quote! { - let orig_elem_idx = self.#index_name.insert(elem.#field_name.clone(), idx); - if orig_elem_idx.is_some() { - panic!( - "Unable to insert element, uniqueness constraint violated on field '{}'", - #field_name_string - ); - } + #entry_name.insert(idx); }, Uniqueness::NonUnique => quote! { self.#index_name.entry(elem.#field_name.clone()) @@ -816,7 +841,8 @@ pub(crate) fn generate_expanded( map_name: &proc_macro2::Ident, element_name: &proc_macro2::Ident, element_vis: &Visibility, - inserts: impl Iterator, + entries_for_insert: impl Iterator, + inserts_for_entries: impl Iterator, accessors: impl Iterator, iterators: impl Iterator, clears: impl Iterator, @@ -867,11 +893,20 @@ pub(crate) fn generate_expanded( #(#lookup_table_fields_shrink)* } - #element_vis fn insert(&mut self, elem: #element_name) { - let idx = self._store.insert(elem); - let elem = &self._store[idx]; + #element_vis fn try_insert(&mut self, elem: #element_name) -> Result<&#element_name, ::multi_index_map::UniquenessError<#element_name>> { + let store_entry = self._store.vacant_entry(); + let idx = store_entry.key(); + + #(#entries_for_insert)* + #(#inserts_for_entries)* + + let elem = store_entry.insert(elem); + + Ok(elem) + } - #(#inserts)* + #element_vis fn insert(&mut self, elem: #element_name) -> &#element_name { + self.try_insert(elem).expect("Unable to insert element") } #element_vis fn clear(&mut self) { diff --git a/multi_index_map_derive/src/lib.rs b/multi_index_map_derive/src/lib.rs index d62c927..9f91960 100644 --- a/multi_index_map_derive/src/lib.rs +++ b/multi_index_map_derive/src/lib.rs @@ -85,7 +85,9 @@ pub fn multi_index_map(input: proc_macro::TokenStream) -> proc_macro::TokenStrea let lookup_table_fields_shrink = generators::generate_lookup_table_shrink(&indexed_fields); - let inserts = generators::generate_inserts(&indexed_fields); + let entries_for_insert = generators::generate_entries_for_insert(&indexed_fields); + + let inserts_for_entries = generators::generate_inserts_for_entries(&indexed_fields); let removes = generators::generate_removes(&indexed_fields); @@ -113,7 +115,8 @@ pub fn multi_index_map(input: proc_macro::TokenStream) -> proc_macro::TokenStrea &map_name, element_name, &element_vis, - inserts, + entries_for_insert, + inserts_for_entries, accessors, iterators, clears,