Skip to content

Commit

Permalink
Fix tests
Browse files Browse the repository at this point in the history
  • Loading branch information
Markus Westerlind committed Oct 6, 2020
1 parent 2b7a513 commit 9ab2ee0
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 9 deletions.
5 changes: 5 additions & 0 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,11 @@ pub mod raw {
pub use inner::*;

#[cfg(feature = "rayon")]
/// [rayon]-based parallel iterator types for hash maps.
/// You will rarely need to interact with it directly unless you have need
/// to name one of the iterator types.
///
/// [rayon]: https://docs.rs/rayon/1.0/rayon
pub mod rayon {
pub use crate::external_trait_impls::rayon::raw::*;
}
Expand Down
16 changes: 7 additions & 9 deletions src/raw/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -730,7 +730,7 @@ impl<T> RawTable<T> {
// - there are no DELETED entries.
// - we know there is enough space in the table.
// - all elements are unique.
let index = new_table.prepare_insert_slot(hash);
let (index, _) = new_table.prepare_insert_slot(hash);
new_table.bucket(index).copy_from_nonoverlapping(&item);
}

Expand Down Expand Up @@ -787,12 +787,11 @@ impl<T> RawTable<T> {
#[cfg(any(feature = "raw", feature = "rustc-internal-api"))]
pub fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
unsafe {
let index = self.table.prepare_insert_slot(hash);
let (index, old_ctrl) = self.table.prepare_insert_slot(hash);
let bucket = self.table.bucket(index);

// If we are replacing a DELETED entry then we don't need to update
// the load counter.
let old_ctrl = *self.table.ctrl(index);
self.table.growth_left -= special_is_empty(old_ctrl) as usize;

bucket.write(value);
Expand Down Expand Up @@ -1041,10 +1040,11 @@ impl RawTableInner {
///
/// There must be at least 1 empty bucket in the table.
#[inline]
unsafe fn prepare_insert_slot(&self, hash: u64) -> usize {
unsafe fn prepare_insert_slot(&self, hash: u64) -> (usize, u8) {
let index = self.find_insert_slot(hash);
let old_ctrl = *self.ctrl(index);
self.set_ctrl_h2(index, hash);
index
(index, old_ctrl)
}

/// Searches for an empty or deleted bucket which is suitable for inserting
Expand Down Expand Up @@ -1501,7 +1501,7 @@ impl<T: Clone> RawTable<T> {
// - there are no DELETED entries.
// - we know there is enough space in the table.
// - all elements are unique.
let index = guard_self.table.prepare_insert_slot(hash);
let (index, _) = guard_self.table.prepare_insert_slot(hash);
guard_self.bucket(index).write(item);
}
}
Expand Down Expand Up @@ -1707,9 +1707,7 @@ impl<T> Iterator for RawIterRange<T> {
// than the group size where the trailing control bytes are all
// EMPTY. On larger tables self.end is guaranteed to be aligned
// to the group size (since tables are power-of-two sized).
if let None = self.inner.next_group() {
return None;
}
self.inner.next_group()?;
self.data = self.data.next_n(Group::WIDTH);
}
}
Expand Down

0 comments on commit 9ab2ee0

Please sign in to comment.