diff --git a/src/map.rs b/src/map.rs index e4c1d89d..5958005d 100644 --- a/src/map.rs +++ b/src/map.rs @@ -387,10 +387,8 @@ where // // 3. if table is set by a Moved node (below) through help_transfer, it will _either_ // keep using `table` (which is fine by 1. and 2.), or use the `next_table` raw - // pointer from inside the Moved. how do we know that that is safe? - // - // we must demonstrate that if a Moved(t) is _read_, then t must still be valid. - // FIXME + // pointer from inside the Moved. to see that if a Moved(t) is _read_, then t must + // still be valid, see the safety comment on BinEntry::Moved. let t = unsafe { table.deref() }; let bini = t.bini(h); @@ -1098,10 +1096,8 @@ where // only queues a drop for the next epoch after removing the table. // // 2. if table is set by a Moved node (below) through help_transfer, it will use the - // `next_table` raw pointer from inside the Moved. how do we know that that is safe? - // - // we must demonstrate that if a Moved(t) is _read_, then t must still be valid. - // FIXME cf. put + // `next_table` raw pointer from inside the Moved. to see that if a Moved(t) is + // _read_, then t must still be valid, see the safety comment on BinEntry::Moved. let t = unsafe { table.deref() }; let n = t.len() as u64; if n == 0 { diff --git a/src/node.rs b/src/node.rs index 81b73357..a2634ea7 100644 --- a/src/node.rs +++ b/src/node.rs @@ -10,6 +10,36 @@ use std::sync::atomic::Ordering; #[derive(Debug)] pub(crate) enum BinEntry { Node(Node), + // safety: the pointer t to the next table inside Moved(t) is a valid pointer if the Moved(t) + // entry was read after loading `map::HashMap.table` while the guard used to load that table is + // still alive: + // + // When loading the current table of the HashMap with a guard g, the current epoch will be + // pinned by g. This happens _before_ the resize which put the Moved entry into the current + // table finishes, as otherwise a different table would have been loaded (see + // `map::HashMap::transfer`). + // + // Hence, for the Moved(t) read from the loaded table: + // + // - When trying to access t during the current resize, t points to map::HashMap.next_table + // and is thus valid. + // + // - After the current resize and before another resize, `t == map::HashMap.table` as the + // "next table" t pointed to during the resize has become the current table. Thus t is + // still valid. + // + // - The above is true until a subsequent resize ends, at which point `map::HashMap.tableĀ“ is + // set to another new table != t and t is `epoch::Guard::defer_destroy`ed (again, see + // `map::HashMap::transfer`). At this point, t is not referenced by the map anymore. + // However, the guard g used to load the table is still pinning the epoch at the time of + // the call to `defer_destroy`. Thus, t remains valid for at least the lifetime of g. + // + // - After releasing g, either the current resize is finished and operations on the map + // cannot access t anymore as a more recent table will be loaded as the current table + // (see once again `map::HashMap::transfer`), or the argument is as above. + // + // Since finishing a resize is the only time a table is `defer_destroy`ed, the above covers + // all cases. Moved(*const Table), }