diff --git a/backend/Cargo.toml b/backend/Cargo.toml index 0db999c5..ae06eb4d 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -26,6 +26,10 @@ harness = false name = "grover" harness = false +[[bench]] +name = "qubit_management" +harness = false + [lib] crate-type = ["staticlib","rlib"] bench = false diff --git a/backend/benches/qubit_management.rs b/backend/benches/qubit_management.rs new file mode 100644 index 00000000..5a9c8120 --- /dev/null +++ b/backend/benches/qubit_management.rs @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use criterion::{criterion_group, criterion_main, Criterion}; +use qir_backend::*; + +/// Benchmarks large number of qubit allocations and releases. +pub fn allocate_release(c: &mut Criterion) { + c.bench_function("Allocate-Release 2k qubits", |b| { + b.iter(|| { + // Prepare a uniform superposition. + let qs = __quantum__rt__qubit_allocate_array(2_000); + unsafe { + __quantum__rt__qubit_release_array(qs); + } + }) + }); +} + +criterion_group!(benches, allocate_release); +criterion_main!(benches); diff --git a/sparsesim/src/exp.rs b/sparsesim/src/exp.rs index cfd63074..2bddfe66 100644 --- a/sparsesim/src/exp.rs +++ b/sparsesim/src/exp.rs @@ -40,7 +40,7 @@ impl QuantumSim { .map(|c| { *self .id_map - .get(c) + .get(*c) .unwrap_or_else(|| panic!("Unable to find qubit with id {c}")) as u64 }) @@ -51,7 +51,7 @@ impl QuantumSim { .map(|c| { *self .id_map - .get(c) + .get(*c) .unwrap_or_else(|| panic!("Unable to find qubit with id {c}")) as u64 }) diff --git a/sparsesim/src/index_map.rs b/sparsesim/src/index_map.rs new file mode 100644 index 00000000..7a52ded7 --- /dev/null +++ b/sparsesim/src/index_map.rs @@ -0,0 +1,341 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use std::{ + fmt::{self, Debug, Formatter}, + iter::Enumerate, + marker::PhantomData, + ops::{Index, IndexMut}, + option::Option, + slice, vec, +}; + +pub struct IndexMap { + _keys: PhantomData, + values: Vec>, +} + +impl IndexMap +where + K: Into, + V: Default, +{ + pub fn get_mut_or_default(&mut self, key: K) -> &mut V { + let index: usize = key.into(); + if index >= self.values.len() { + self.values.resize_with(index + 1, Option::default); + } + self.values + .get_mut(index) + .expect("IndexMap::get_mut_or_default: index out of bounds") + .get_or_insert_with(Default::default) + } +} + +impl IndexMap { + #[must_use] + pub fn new() -> Self { + Self::default() + } + + #[must_use] + pub fn with_capacity(capacity: usize) -> Self { + Self { + _keys: PhantomData, + values: Vec::with_capacity(capacity), + } + } + + #[must_use] + pub fn is_empty(&self) -> bool { + self.values.is_empty() + } + + // `Iter` does implement `Iterator`, but it has an additional bound on `K`. + #[allow(clippy::iter_not_returning_iterator)] + #[must_use] + pub fn iter(&self) -> Iter { + Iter { + _keys: PhantomData, + base: self.values.iter().enumerate(), + } + } + + // `Iter` does implement `Iterator`, but it has an additional bound on `K`. + #[allow(clippy::iter_not_returning_iterator)] + pub fn iter_mut(&mut self) -> IterMut { + IterMut { + _keys: PhantomData, + base: self.values.iter_mut().enumerate(), + } + } + + pub fn drain(&mut self) -> Drain { + Drain { + _keys: PhantomData, + base: self.values.drain(..).enumerate(), + } + } + + #[must_use] + pub fn values(&self) -> Values { + Values { + base: self.values.iter(), + } + } + + pub fn values_mut(&mut self) -> ValuesMut { + ValuesMut { + base: self.values.iter_mut(), + } + } + + pub fn retain(&mut self, mut f: F) + where + F: FnMut(K, &V) -> bool, + K: From, + { + for (k, v) in self.values.iter_mut().enumerate() { + let remove = if let Some(value) = v { + !f(K::from(k), value) + } else { + false + }; + if remove { + *v = None; + } + } + } + + pub fn clear(&mut self) { + self.values.clear(); + } +} + +impl, V> IndexMap { + pub fn insert(&mut self, key: K, value: V) { + let index = key.into(); + if index >= self.values.len() { + self.values.resize_with(index + 1, || None); + } + self.values[index] = Some(value); + } + + pub fn contains_key(&self, key: K) -> bool { + let index: usize = key.into(); + self.values.get(index).is_some_and(Option::is_some) + } + + pub fn get(&self, key: K) -> Option<&V> { + let index: usize = key.into(); + self.values.get(index).and_then(Option::as_ref) + } + + pub fn get_mut(&mut self, key: K) -> Option<&mut V> { + let index: usize = key.into(); + self.values.get_mut(index).and_then(Option::as_mut) + } + + pub fn remove(&mut self, key: K) { + let index: usize = key.into(); + if index < self.values.len() { + self.values[index] = None; + } + } +} + +impl Clone for IndexMap { + fn clone(&self) -> Self { + Self { + _keys: PhantomData, + values: self.values.clone(), + } + } +} + +impl Debug for IndexMap { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + f.debug_struct("IndexMap") + .field( + "values", + &self + .values + .iter() + .enumerate() + .filter_map(|(k, v)| v.as_ref().map(|val| format!("{k:?}: {val:?}"))) + .collect::>(), + ) + .finish() + } +} + +impl Default for IndexMap { + fn default() -> Self { + Self { + _keys: PhantomData, + values: Vec::default(), + } + } +} + +impl, V> IntoIterator for IndexMap { + type Item = (K, V); + + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + IntoIter { + _keys: PhantomData, + base: self.values.into_iter().enumerate(), + } + } +} + +impl<'a, K: From, V> IntoIterator for &'a IndexMap { + type Item = (K, &'a V); + + type IntoIter = Iter<'a, K, V>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl, V> FromIterator<(K, V)> for IndexMap { + fn from_iter>(iter: T) -> Self { + let iter = iter.into_iter(); + let mut map = Self::new(); + let (lo, hi) = iter.size_hint(); + map.values.reserve(hi.unwrap_or(lo)); + for (key, value) in iter { + map.insert(key, value); + } + map + } +} + +pub struct Iter<'a, K, V> { + _keys: PhantomData, + base: Enumerate>>, +} + +impl<'a, K: From, V> Iterator for Iter<'a, K, V> { + type Item = (K, &'a V); + + fn next(&mut self) -> Option { + loop { + if let (index, Some(value)) = self.base.next()? { + break Some((index.into(), value)); + } + } + } +} + +pub struct IterMut<'a, K, V> { + _keys: PhantomData, + base: Enumerate>>, +} + +impl, V> DoubleEndedIterator for Iter<'_, K, V> { + fn next_back(&mut self) -> Option { + loop { + if let (index, Some(value)) = self.base.next_back()? { + break Some((index.into(), value)); + } + } + } +} + +impl<'a, K: From, V> Iterator for IterMut<'a, K, V> { + type Item = (K, &'a mut V); + + fn next(&mut self) -> Option { + loop { + if let (index, Some(value)) = self.base.next()? { + break Some((index.into(), value)); + } + } + } +} + +pub struct IntoIter { + _keys: PhantomData, + base: Enumerate>>, +} + +impl, V> Iterator for IntoIter { + type Item = (K, V); + + fn next(&mut self) -> Option { + loop { + if let (index, Some(value)) = self.base.next()? { + break Some((index.into(), value)); + } + } + } +} + +pub struct Drain<'a, K, V> { + _keys: PhantomData, + base: Enumerate>>, +} + +impl, V> Iterator for Drain<'_, K, V> { + type Item = (K, V); + + fn next(&mut self) -> Option { + loop { + if let (index, Some(value)) = self.base.next()? { + break Some((index.into(), value)); + } + } + } +} + +pub struct Values<'a, V> { + base: slice::Iter<'a, Option>, +} + +impl<'a, V> Iterator for Values<'a, V> { + type Item = &'a V; + + fn next(&mut self) -> Option { + loop { + if let Some(value) = self.base.next()? { + break Some(value); + } + } + } +} + +pub struct ValuesMut<'a, V> { + base: slice::IterMut<'a, Option>, +} + +impl<'a, V> Iterator for ValuesMut<'a, V> { + type Item = &'a mut V; + + fn next(&mut self) -> Option { + loop { + if let Some(value) = self.base.next()? { + break Some(value); + } + } + } +} + +impl Index for IndexMap { + type Output = usize; + + fn index(&self, index: usize) -> &Self::Output { + self.get(index) + .expect("IndexMap::index: index out of bounds") + } +} + +impl IndexMut for IndexMap { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + self.get_mut(index) + .expect("IndexMap::index_mut: index out of bounds") + } +} diff --git a/sparsesim/src/lib.rs b/sparsesim/src/lib.rs index b726b619..d4dc1b2e 100644 --- a/sparsesim/src/lib.rs +++ b/sparsesim/src/lib.rs @@ -8,6 +8,7 @@ //! Leveraging state sparsity for more efficient quantum simulations. pub mod exp; +mod index_map; mod nearly_zero; // Additional test infrastructure is available in matrix_testing that allows comparing the transformations @@ -16,6 +17,7 @@ mod nearly_zero; mod matrix_testing; use crate::nearly_zero::NearlyZero; +use index_map::IndexMap; use ndarray::{s, Array2}; use num_bigint::BigUint; use num_complex::Complex64; @@ -35,7 +37,7 @@ pub struct QuantumSim { pub(crate) state: SparseState, /// The mapping from qubit identifiers to internal state locations. - pub(crate) id_map: FxHashMap, + pub(crate) id_map: IndexMap, /// The random number generator used for probabilistic operations. rng: RefCell, @@ -44,10 +46,10 @@ pub struct QuantumSim { h_flag: BigUint, /// The map for tracking queued Pauli-X rotations by a given angle for a given qubit. - rx_queue: FxHashMap, + rx_queue: IndexMap, /// The map for tracking queued Pauli-Y rotations by a given angle for a given qubit. - ry_queue: FxHashMap, + ry_queue: IndexMap, /// The list of queued gate operations. op_queue: Vec<(Vec, usize, OpCode)>, @@ -98,16 +100,17 @@ impl QuantumSim { /// Creates a new sparse state quantum simulator object with empty initial state (no qubits allocated, no operations buffered). #[must_use] pub fn new(rng: Option) -> Self { + let default_initial_size = 50; let mut initial_state = SparseState::default(); initial_state.insert(BigUint::zero(), Complex64::one()); QuantumSim { state: initial_state, - id_map: FxHashMap::default(), + id_map: IndexMap::with_capacity(default_initial_size), rng: RefCell::new(rng.unwrap_or_else(StdRng::from_entropy)), h_flag: BigUint::zero(), - rx_queue: FxHashMap::default(), - ry_queue: FxHashMap::default(), + rx_queue: IndexMap::with_capacity(default_initial_size), + ry_queue: IndexMap::with_capacity(default_initial_size), op_queue: Vec::new(), } } @@ -128,24 +131,23 @@ impl QuantumSim { pub fn get_state(&mut self) -> (Vec<(BigUint, Complex64)>, usize) { // Swap all the entries in the state to be ordered by qubit identifier. This makes // interpreting the state easier for external consumers that don't have access to the id map. - let mut sorted_keys: Vec = self.id_map.keys().copied().collect(); + let sorted_keys: Vec = self.id_map.iter().map(|(k, _)| k).collect(); self.flush_queue(&sorted_keys, FlushLevel::HRxRy); - sorted_keys.sort_unstable(); sorted_keys.iter().enumerate().for_each(|(index, &key)| { - if index != self.id_map[&key] { - self.swap_qubit_state(self.id_map[&key], index); - if let Some((&swapped_key, _)) = + if index != self.id_map[key] { + self.swap_qubit_state(self.id_map[key], index); + if let Some((swapped_key, _)) = self.id_map.iter().find(|(_, &value)| value == index) { *(self .id_map - .get_mut(&swapped_key) - .expect("key should be present in map")) = self.id_map[&key]; + .get_mut(swapped_key) + .expect("key should be present in map")) = self.id_map[key]; } *(self .id_map - .get_mut(&key) + .get_mut(key) .expect("key should be present in map")) = index; } }); @@ -162,16 +164,15 @@ impl QuantumSim { pub fn allocate(&mut self) -> usize { // Add the new entry into the FxHashMap at the first available sequential ID and first available // sequential location. - let mut sorted_keys: Vec<&usize> = self.id_map.keys().collect(); - sorted_keys.sort(); + let sorted_keys: Vec = self.id_map.iter().map(|(k, _)| k).collect(); let mut sorted_vals: Vec<&usize> = self.id_map.values().collect(); - sorted_vals.sort(); + sorted_vals.sort_unstable(); let new_key = sorted_keys .iter() .enumerate() - .take_while(|(index, key)| index == **key) + .take_while(|(index, key)| index == *key) .last() - .map_or(0_usize, |(_, &&key)| key + 1); + .map_or(0_usize, |(_, &key)| key + 1); let new_val = sorted_vals .iter() .enumerate() @@ -192,10 +193,8 @@ impl QuantumSim { pub fn release(&mut self, id: usize) { self.flush_queue(&[id], FlushLevel::HRxRy); - let loc = self - .id_map - .remove(&id) - .unwrap_or_else(|| panic!("Unable to find qubit with id {id}.")); + let loc = self.id_map[id]; + self.id_map.remove(id); if self.id_map.is_empty() { // When no qubits are allocated, we can reset the sparse state to a clean ground, so @@ -229,24 +228,24 @@ impl QuantumSim { pub fn dump(&mut self) -> String { // Swap all the entries in the state to be ordered by qubit identifier. This makes // interpreting the state easier for external consumers that don't have access to the id map. - let mut sorted_keys: Vec = self.id_map.keys().copied().collect(); + let mut sorted_keys: Vec = self.id_map.iter().map(|(k, _)| k).collect(); self.flush_queue(&sorted_keys, FlushLevel::HRxRy); sorted_keys.sort_unstable(); sorted_keys.iter().enumerate().for_each(|(index, &key)| { - if index != self.id_map[&key] { - self.swap_qubit_state(self.id_map[&key], index); - if let Some((&swapped_key, _)) = + if index != self.id_map[key] { + self.swap_qubit_state(self.id_map[key], index); + if let Some((swapped_key, _)) = self.id_map.iter().find(|(_, &value)| value == index) { *(self .id_map - .get_mut(&swapped_key) - .expect("key should be present in map")) = self.id_map[&key]; + .get_mut(swapped_key) + .expect("key should be present in map")) = self.id_map[key]; } *(self .id_map - .get_mut(&key) + .get_mut(key) .expect("key should be present in map")) = index; } }); @@ -306,7 +305,7 @@ impl QuantumSim { .map(|id| { *self .id_map - .get(id) + .get(*id) .unwrap_or_else(|| panic!("Unable to find qubit with id {id}")) }) .collect(); @@ -330,7 +329,7 @@ impl QuantumSim { self.measure_impl( *self .id_map - .get(&id) + .get(id) .unwrap_or_else(|| panic!("Unable to find qubit with id {id}")), ) } @@ -360,7 +359,7 @@ impl QuantumSim { .map(|id| { *self .id_map - .get(id) + .get(*id) .unwrap_or_else(|| panic!("Unable to find qubit with id {id}")) }) .collect(); @@ -435,42 +434,42 @@ impl QuantumSim { self.h_flag.set_bit(qubit1 as u64, h_val2); self.h_flag.set_bit(qubit2 as u64, h_val1); - let x_angle1 = self.rx_queue.get(&qubit1).copied(); - let x_angle2 = self.rx_queue.get(&qubit2).copied(); + let x_angle1 = self.rx_queue.get(qubit1).copied(); + let x_angle2 = self.rx_queue.get(qubit2).copied(); if let Some(angle) = x_angle1 { self.rx_queue.insert(qubit2, angle); } else { - self.rx_queue.remove(&qubit2); + self.rx_queue.remove(qubit2); } if let Some(angle) = x_angle2 { self.rx_queue.insert(qubit1, angle); } else { - self.rx_queue.remove(&qubit1); + self.rx_queue.remove(qubit1); } - let y_angle1 = self.ry_queue.get(&qubit1).copied(); - let y_angle2 = self.ry_queue.get(&qubit2).copied(); + let y_angle1 = self.ry_queue.get(qubit1).copied(); + let y_angle2 = self.ry_queue.get(qubit2).copied(); if let Some(ry_val) = y_angle1 { self.ry_queue.insert(qubit2, ry_val); } else { - self.ry_queue.remove(&qubit2); + self.ry_queue.remove(qubit2); } if let Some(ry_val) = y_angle2 { self.ry_queue.insert(qubit1, ry_val); } else { - self.ry_queue.remove(&qubit1); + self.ry_queue.remove(qubit1); } let qubit1_mapped = *self .id_map - .get(&qubit1) + .get(qubit1) .unwrap_or_else(|| panic!("Unable to find qubit with id {qubit1}")); let qubit2_mapped = *self .id_map - .get(&qubit2) + .get(qubit2) .unwrap_or_else(|| panic!("Unable to find qubit with id {qubit2}")); - *self.id_map.get_mut(&qubit1).unwrap() = qubit2_mapped; - *self.id_map.get_mut(&qubit2).unwrap() = qubit1_mapped; + *self.id_map.get_mut(qubit1).unwrap() = qubit2_mapped; + *self.id_map.get_mut(qubit2).unwrap() = qubit1_mapped; } /// Swaps the states of two qubits throughout the sparse state map. @@ -519,7 +518,7 @@ impl QuantumSim { let target = *self .id_map - .get(&target) + .get(target) .unwrap_or_else(|| panic!("Unable to find qubit with id {target}")) as u64; @@ -528,7 +527,7 @@ impl QuantumSim { .map(|c| { *self .id_map - .get(c) + .get(*c) .unwrap_or_else(|| panic!("Unable to find qubit with id {c}")) as u64 }) @@ -546,8 +545,8 @@ impl QuantumSim { fn has_queued_hrxy(&self, target: usize) -> bool { self.h_flag.bit(target as u64) - || self.rx_queue.contains_key(&target) - || self.ry_queue.contains_key(&target) + || self.rx_queue.contains_key(target) + || self.ry_queue.contains_key(target) } fn maybe_flush_queue(&mut self, qubits: &[usize], level: FlushLevel) { @@ -608,16 +607,16 @@ impl QuantumSim { } fn flush_rx(&mut self, target: usize) { - if let Some(theta) = self.rx_queue.get(&target) { + if let Some(theta) = self.rx_queue.get(target) { self.mcrotation(&[], *theta, target, false); - self.rx_queue.remove(&target); + self.rx_queue.remove(target); } } fn flush_ry(&mut self, target: usize) { - if let Some(theta) = self.ry_queue.get(&target) { + if let Some(theta) = self.ry_queue.get(target) { self.mcrotation(&[], *theta, target, true); - self.ry_queue.remove(&target); + self.ry_queue.remove(target); } } @@ -656,7 +655,7 @@ impl QuantumSim { /// Single qubit X gate. pub fn x(&mut self, target: usize) { - if let Some(entry) = self.ry_queue.get_mut(&target) { + if let Some(entry) = self.ry_queue.get_mut(target) { // XY = -YX, so switch the sign on any queued Ry rotations. *entry *= -1.0; } @@ -677,14 +676,14 @@ impl QuantumSim { if ctls.len() > 1 { self.maybe_flush_queue(ctls, FlushLevel::HRxRy); - } else if self.ry_queue.contains_key(&ctls[0]) - || self.rx_queue.contains_key(&ctls[0]) + } else if self.ry_queue.contains_key(ctls[0]) + || self.rx_queue.contains_key(ctls[0]) || (self.h_flag.bit(ctls[0] as u64) && !self.h_flag.bit(target as u64)) { self.flush_queue(ctls, FlushLevel::HRxRy); } - if self.ry_queue.contains_key(&target) { + if self.ry_queue.contains_key(target) { self.flush_queue(&[target], FlushLevel::HRxRy); } @@ -718,7 +717,7 @@ impl QuantumSim { /// Single qubit Y gate. pub fn y(&mut self, target: usize) { - if let Some(entry) = self.rx_queue.get_mut(&target) { + if let Some(entry) = self.rx_queue.get_mut(target) { // XY = -YX, so flip the sign on any queued Rx rotation. *entry *= -1.0; } @@ -736,7 +735,7 @@ impl QuantumSim { self.maybe_flush_queue(ctls, FlushLevel::HRxRy); - if self.rx_queue.contains_key(&target) { + if self.rx_queue.contains_key(target) { self.flush_queue(&[target], FlushLevel::HRx); } @@ -782,12 +781,12 @@ impl QuantumSim { /// Single qubit Z gate. pub fn z(&mut self, target: usize) { - if let Some(entry) = self.ry_queue.get_mut(&target) { + if let Some(entry) = self.ry_queue.get_mut(target) { // ZY = -YZ, so flip the sign on any queued Ry rotations. *entry *= -1.0; } - if let Some(entry) = self.rx_queue.get_mut(&target) { + if let Some(entry) = self.rx_queue.get_mut(target) { // ZX = -XZ, so flip the sign on any queued Rx rotations. *entry *= -1.0; } @@ -811,13 +810,13 @@ impl QuantumSim { let count = ctls.iter().fold(0, |accum, c| { accum + i32::from(self.h_flag.bit(*c as u64)) - + if self.rx_queue.contains_key(c) || self.ry_queue.contains_key(c) { + + if self.rx_queue.contains_key(*c) || self.ry_queue.contains_key(*c) { 2 } else { 0 } }) + i32::from(self.h_flag.bit(target as u64)) - + if self.rx_queue.contains_key(&target) || self.ry_queue.contains_key(&target) { + + if self.rx_queue.contains_key(target) || self.ry_queue.contains_key(target) { 2 } else { 0 @@ -962,12 +961,12 @@ impl QuantumSim { /// Single qubit H gate. pub fn h(&mut self, target: usize) { - if let Some(entry) = self.ry_queue.get_mut(&target) { + if let Some(entry) = self.ry_queue.get_mut(target) { // YH = -HY, so flip the sign on any queued Ry rotations. *entry *= -1.0; } - if self.rx_queue.contains_key(&target) { + if self.rx_queue.contains_key(target) { // Can't commute well with queued Rx, so flush those ops. self.flush_queue(&[target], FlushLevel::HRx); } @@ -979,7 +978,7 @@ impl QuantumSim { /// Multi-controlled H gate. pub fn mch(&mut self, ctls: &[usize], target: usize) { self.flush_queue(ctls, FlushLevel::HRxRy); - if self.ry_queue.contains_key(&target) || self.rx_queue.contains_key(&target) { + if self.ry_queue.contains_key(target) || self.rx_queue.contains_key(target) { self.flush_queue(&[target], FlushLevel::HRxRy); } @@ -1150,13 +1149,13 @@ impl QuantumSim { /// Single qubit Rx gate. pub fn rx(&mut self, theta: f64, target: usize) { - if self.h_flag.bit(target as u64) || self.ry_queue.contains_key(&target) { + if self.h_flag.bit(target as u64) || self.ry_queue.contains_key(target) { self.flush_queue(&[target], FlushLevel::HRxRy); } - if let Some(entry) = self.rx_queue.get_mut(&target) { + if let Some(entry) = self.rx_queue.get_mut(target) { *entry += theta; if entry.is_nearly_zero() { - self.rx_queue.remove(&target); + self.rx_queue.remove(target); } } else { self.rx_queue.insert(target, theta); @@ -1167,7 +1166,7 @@ impl QuantumSim { pub fn mcrx(&mut self, ctls: &[usize], theta: f64, target: usize) { self.flush_queue(ctls, FlushLevel::HRxRy); - if self.ry_queue.contains_key(&target) { + if self.ry_queue.contains_key(target) { self.flush_queue(&[target], FlushLevel::HRxRy); } else if self.h_flag.bit(target as u64) { self.flush_queue(&[target], FlushLevel::H); @@ -1178,10 +1177,10 @@ impl QuantumSim { /// Single qubit Ry gate. pub fn ry(&mut self, theta: f64, target: usize) { - if let Some(entry) = self.ry_queue.get_mut(&target) { + if let Some(entry) = self.ry_queue.get_mut(target) { *entry += theta; if entry.is_nearly_zero() { - self.ry_queue.remove(&target); + self.ry_queue.remove(target); } } else { self.ry_queue.insert(target, theta); @@ -1192,7 +1191,7 @@ impl QuantumSim { pub fn mcry(&mut self, ctls: &[usize], theta: f64, target: usize) { self.flush_queue(ctls, FlushLevel::HRxRy); - if self.rx_queue.contains_key(&target) { + if self.rx_queue.contains_key(target) { self.flush_queue(&[target], FlushLevel::HRx); } else if self.h_flag.bit(target as u64) { self.flush_queue(&[target], FlushLevel::H); @@ -1251,17 +1250,17 @@ impl QuantumSim { .for_each(|(target_loc, target)| { let loc = *self .id_map - .get(target) + .get(*target) .unwrap_or_else(|| panic!("Unable to find qubit with id {target}")); - let swap_id = *self + let swap_id = self .id_map .iter() .find(|(_, &value)| value == target_loc) .unwrap() .0; self.swap_qubit_state(loc, target_loc); - *(self.id_map.get_mut(&swap_id).unwrap()) = loc; - *(self.id_map.get_mut(target).unwrap()) = target_loc; + *(self.id_map.get_mut(swap_id).unwrap()) = loc; + *(self.id_map.get_mut(*target).unwrap()) = target_loc; }); let op_size = unitary.nrows();