diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..ec05e4b --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,9 @@ +repos: + - repo: local + hooks: + - id: rustfmt + name: rustfmt + description: Check if all files follow the rustfmt style + entry: cargo fmt --all -- --color always + language: system + pass_filenames: false \ No newline at end of file diff --git a/src/array.rs b/src/array.rs index 18f1ce3..13ad292 100644 --- a/src/array.rs +++ b/src/array.rs @@ -1,20 +1,20 @@ +use crate::utils::{cartesian_product, update_bytes_flen, update_bytes_flen_with_indexer}; +use dlpark::prelude::*; use numpy::ndarray::{Array, ArrayBase, ArrayViewD}; use numpy::{PyArray, PyArray2, PyArrayDyn, PyArrayMethods}; use pyo3::exceptions::{PyIndexError, PyTypeError, PyValueError}; use pyo3::prelude::*; +use pyo3::types::{PyInt, PyList, PySlice, PyTuple}; +use rayon::iter::{IntoParallelIterator, ParallelBridge, ParallelIterator}; +use rayon::prelude::*; use rayon_iter_concurrent_limit::iter_concurrent_limit; +use std::ffi::c_void; +use std::ops::Range; use zarrs::array::codec::CodecOptionsBuilder; use zarrs::array::{Array as RustArray, ArrayCodecTraits, RecommendedConcurrency, UnsafeCellSlice}; use zarrs::array_subset::ArraySubset; use zarrs::config::global_config; use zarrs::storage::ReadableStorageTraits; -use pyo3::types::{PyInt, PyList, PySlice, PyTuple}; -use std::ops::Range; -use dlpark::prelude::*; -use std::ffi::c_void; -use rayon::iter::{IntoParallelIterator, ParallelBridge, ParallelIterator}; -use rayon::prelude::*; -use crate::utils::{cartesian_product, update_bytes_flen, update_bytes_flen_with_indexer}; struct Chunk<'a> { index: &'a Vec, @@ -30,140 +30,241 @@ struct NdArrayChunk<'a> { #[pyclass] pub struct ZarrsPythonArray { - pub arr: RustArray + pub arr: RustArray, } impl ZarrsPythonArray { - fn maybe_convert_u64(&self, ind: i32, axis: usize) -> PyResult { let mut ind_u64: u64 = ind as u64; if ind < 0 { if self.arr.shape()[axis] as i32 + ind < 0 { - return Err(PyIndexError::new_err(format!("{0} out of bounds", ind))) + return Err(PyIndexError::new_err(format!("{0} out of bounds", ind))); } - ind_u64 = u64::try_from(ind).map_err(|_| PyIndexError::new_err("Failed to extract start"))?; + ind_u64 = + u64::try_from(ind).map_err(|_| PyIndexError::new_err("Failed to extract start"))?; } return Ok(ind_u64); } fn bound_slice(&self, slice: &Bound, axis: usize) -> PyResult> { let start: i32 = slice.getattr("start")?.extract().map_or(0, |x| x); - let stop: i32 = slice.getattr("stop")?.extract().map_or(self.arr.shape()[axis] as i32, |x| x); + let stop: i32 = slice + .getattr("stop")? + .extract() + .map_or(self.arr.shape()[axis] as i32, |x| x); let start_u64 = self.maybe_convert_u64(start, 0)?; let stop_u64 = self.maybe_convert_u64(stop, 0)?; // let _step: u64 = slice.getattr("step")?.extract().map_or(1, |x| x); // there is no way to use step it seems with zarrs? let selection = start_u64..stop_u64; - return Ok(selection) + return Ok(selection); } pub fn fill_from_slices(&self, slices: Vec>) -> PyResult>> { - Ok(self.arr.shape().iter().enumerate().map(|(index, &value)| { if index < slices.len() { slices[index].clone() } else { 0..value } }).collect()) + Ok(self + .arr + .shape() + .iter() + .enumerate() + .map(|(index, &value)| { + if index < slices.len() { + slices[index].clone() + } else { + 0..value + } + }) + .collect()) } - fn extract_coords(&self, chunk_coords_and_selections: &Bound<'_, PyList>) -> PyResult>> { - chunk_coords_and_selections.into_iter().map(|chunk_coord_and_selection| { - if let Ok(chunk_coord_and_selection_tuple) = chunk_coord_and_selection.downcast::() { - let coord = chunk_coord_and_selection_tuple.get_item(0)?; - let coord_extracted: Vec; - if let Ok(coord_downcast) = coord.downcast::() { - coord_extracted = coord_downcast.extract()?; - return Ok(coord_extracted); - } else if let Ok(nd_array) = coord.downcast::>() { - let nd_array_extracted: Vec = nd_array.to_vec()?; - return Ok(nd_array_extracted); - } else { - return Err(PyValueError::new_err(format!("Cannot take {0}, must be int, ndarray, or slice", coord.to_string()))); + fn extract_coords( + &self, + chunk_coords_and_selections: &Bound<'_, PyList>, + ) -> PyResult>> { + chunk_coords_and_selections + .into_iter() + .map(|chunk_coord_and_selection| { + if let Ok(chunk_coord_and_selection_tuple) = + chunk_coord_and_selection.downcast::() + { + let coord = chunk_coord_and_selection_tuple.get_item(0)?; + let coord_extracted: Vec; + if let Ok(coord_downcast) = coord.downcast::() { + coord_extracted = coord_downcast.extract()?; + return Ok(coord_extracted); + } else if let Ok(nd_array) = coord.downcast::>() { + let nd_array_extracted: Vec = nd_array.to_vec()?; + return Ok(nd_array_extracted); + } else { + return Err(PyValueError::new_err(format!( + "Cannot take {0}, must be int, ndarray, or slice", + coord.to_string() + ))); + } } - } - return Err(PyTypeError::new_err(format!("Unsupported type: {0}", chunk_coord_and_selection))); - }).collect::>>>() + return Err(PyTypeError::new_err(format!( + "Unsupported type: {0}", + chunk_coord_and_selection + ))); + }) + .collect::>>>() } - fn extract_selection_to_array_subset(&self, chunk_coords_and_selections: &Bound<'_, PyList>, index: usize) -> PyResult> { - chunk_coords_and_selections.into_iter().map(|chunk_coord_and_selection| { - if let Ok(chunk_coord_and_selection_tuple) = chunk_coord_and_selection.downcast::() { - let selection = chunk_coord_and_selection_tuple.get_item(index)?; - if let Ok(slice) = selection.downcast::() { - return Ok(ArraySubset::new_with_ranges(&self.fill_from_slices(vec![self.bound_slice(slice, 0)?])?)); - } else if let Ok(tuple) = selection.downcast::(){ - let ranges: Vec> = tuple.into_iter().enumerate().map(|(index, val)| { - if let Ok(int) = val.downcast::() { - let end = self.maybe_convert_u64(int.extract()?, index)?; - Ok(end..(end + 1)) - } else if let Ok(slice) = val.downcast::() { - Ok(self.bound_slice(slice, index)?) - } else { - return Err(PyValueError::new_err(format!("Cannot take {0}, must be int or slice", val.to_string()))); - } - }).collect::>, _>>()?; - return Ok(ArraySubset::new_with_ranges(&self.fill_from_slices(ranges)?)); - } else { - return Err(PyTypeError::new_err(format!("Unsupported type: {0}", selection))); + fn extract_selection_to_array_subset( + &self, + chunk_coords_and_selections: &Bound<'_, PyList>, + index: usize, + ) -> PyResult> { + chunk_coords_and_selections + .into_iter() + .map(|chunk_coord_and_selection| { + if let Ok(chunk_coord_and_selection_tuple) = + chunk_coord_and_selection.downcast::() + { + let selection = chunk_coord_and_selection_tuple.get_item(index)?; + if let Ok(slice) = selection.downcast::() { + return Ok(ArraySubset::new_with_ranges( + &self.fill_from_slices(vec![self.bound_slice(slice, 0)?])?, + )); + } else if let Ok(tuple) = selection.downcast::() { + let ranges: Vec> = tuple + .into_iter() + .enumerate() + .map(|(index, val)| { + if let Ok(int) = val.downcast::() { + let end = self.maybe_convert_u64(int.extract()?, index)?; + Ok(end..(end + 1)) + } else if let Ok(slice) = val.downcast::() { + Ok(self.bound_slice(slice, index)?) + } else { + return Err(PyValueError::new_err(format!( + "Cannot take {0}, must be int or slice", + val.to_string() + ))); + } + }) + .collect::>, _>>()?; + return Ok(ArraySubset::new_with_ranges( + &self.fill_from_slices(ranges)?, + )); + } else { + return Err(PyTypeError::new_err(format!( + "Unsupported type: {0}", + selection + ))); + } } - } - return Err(PyTypeError::new_err(format!("Unsupported type: {0}", chunk_coord_and_selection))); - }).collect::>>() + return Err(PyTypeError::new_err(format!( + "Unsupported type: {0}", + chunk_coord_and_selection + ))); + }) + .collect::>>() } - fn extract_selection_to_vec_indices(&self, chunk_coords_and_selections: &Bound<'_, PyList>, index: usize) -> PyResult>>> { - chunk_coords_and_selections.into_iter().map(|chunk_coord_and_selection| { - if let Ok(chunk_coord_and_selection_tuple) = chunk_coord_and_selection.downcast::() { - let selection = chunk_coord_and_selection_tuple.get_item(index)?; - if let Ok(tuple) = selection.downcast::(){ - let res = tuple.into_iter().map(|(val)| { - if let Ok(nd_array) = val.downcast::>() { - let res = nd_array.to_vec()?; - Ok(res) - } else { - Err(PyTypeError::new_err(format!("Unsupported type: {0}", tuple))) - } - }).collect::>>>()?; - return Ok(res); - } else { - return Err(PyTypeError::new_err(format!("Unsupported type: {0}", selection))); + fn extract_selection_to_vec_indices( + &self, + chunk_coords_and_selections: &Bound<'_, PyList>, + index: usize, + ) -> PyResult>>> { + chunk_coords_and_selections + .into_iter() + .map(|chunk_coord_and_selection| { + if let Ok(chunk_coord_and_selection_tuple) = + chunk_coord_and_selection.downcast::() + { + let selection = chunk_coord_and_selection_tuple.get_item(index)?; + if let Ok(tuple) = selection.downcast::() { + let res = tuple + .into_iter() + .map(|(val)| { + if let Ok(nd_array) = val.downcast::>() { + let res = nd_array.to_vec()?; + Ok(res) + } else { + Err(PyTypeError::new_err(format!( + "Unsupported type: {0}", + tuple + ))) + } + }) + .collect::>>>()?; + return Ok(res); + } else { + return Err(PyTypeError::new_err(format!( + "Unsupported type: {0}", + selection + ))); + } } - } - return Err(PyTypeError::new_err(format!("Unsupported type: {0}", chunk_coord_and_selection))); - }).collect::>>>>() + return Err(PyTypeError::new_err(format!( + "Unsupported type: {0}", + chunk_coord_and_selection + ))); + }) + .collect::>>>>() } - fn is_selection_numpy_array(&self, chunk_coords_and_selections: &Bound<'_, PyList>, index: usize) -> bool { - let results = chunk_coords_and_selections.into_iter().map(|chunk_coord_and_selection| { - if let Ok(chunk_coord_and_selection_tuple) = chunk_coord_and_selection.downcast::() { - let selection = chunk_coord_and_selection_tuple.get_item(index); - if let Ok(selection_unwrapped) = selection { - if let Ok(tuple) = selection_unwrapped.downcast::(){ - let res: Vec = tuple.into_iter().map(|(val)| -> bool { - let nd_array = val.downcast::>(); - let res = match nd_array { - Ok(_) => true, - Err(_) => false - }; + fn is_selection_numpy_array( + &self, + chunk_coords_and_selections: &Bound<'_, PyList>, + index: usize, + ) -> bool { + let results = chunk_coords_and_selections + .into_iter() + .map(|chunk_coord_and_selection| { + if let Ok(chunk_coord_and_selection_tuple) = + chunk_coord_and_selection.downcast::() + { + let selection = chunk_coord_and_selection_tuple.get_item(index); + if let Ok(selection_unwrapped) = selection { + if let Ok(tuple) = selection_unwrapped.downcast::() { + let res: Vec = tuple + .into_iter() + .map(|(val)| -> bool { + let nd_array = val.downcast::>(); + let res = match nd_array { + Ok(_) => true, + Err(_) => false, + }; + return res; + }) + .collect(); return res; - }).collect(); - return res; + } + return vec![false]; } return vec![false]; } - return vec![false] - } - return vec![false]; - }).flatten().collect::>(); - results.iter().any(|x: &bool| *x ) + return vec![false]; + }) + .flatten() + .collect::>(); + results.iter().any(|x: &bool| *x) } } #[pymethods] impl ZarrsPythonArray { - - pub fn retrieve_chunk_subset(&self, out_shape: &Bound<'_, PyTuple>, chunk_coords_and_selections: &Bound<'_, PyList>) -> PyResult> { - if let Ok(chunk_coords_and_selection_list) = chunk_coords_and_selections.downcast::() { + pub fn retrieve_chunk_subset( + &self, + out_shape: &Bound<'_, PyTuple>, + chunk_coords_and_selections: &Bound<'_, PyList>, + ) -> PyResult> { + if let Ok(chunk_coords_and_selection_list) = + chunk_coords_and_selections.downcast::() + { // Need to scale up everything because zarr's chunks don't match zarrs' chunks - let chunk_representation = self.arr.chunk_array_representation(&vec![0; self.arr.chunk_grid().dimensionality()]).map_err(|x| PyErr::new::(x.to_string()))?; + let chunk_representation = self + .arr + .chunk_array_representation(&vec![0; self.arr.chunk_grid().dimensionality()]) + .map_err(|x| PyErr::new::(x.to_string()))?; let data_type_size = chunk_representation.data_type().size(); - let out_shape_extracted = out_shape.into_iter().map(|x| x.extract::()).collect::>>()?; + let out_shape_extracted = out_shape + .into_iter() + .map(|x| x.extract::()) + .collect::>>()?; let coords_extracted = &self.extract_coords(chunk_coords_and_selection_list)?; - let out_selections_extracted = &self.extract_selection_to_array_subset(chunk_coords_and_selections, 2)?; + let out_selections_extracted = + &self.extract_selection_to_array_subset(chunk_coords_and_selections, 2)?; let chunks = ArraySubset::new_with_shape(self.arr.chunk_grid_shape().unwrap()); let concurrent_target = std::thread::available_parallelism().unwrap().get(); let (chunks_concurrent_limit, codec_concurrent_target) = @@ -176,23 +277,45 @@ impl ZarrsPythonArray { ); RecommendedConcurrency::new_minimum(concurrent_chunks) }, - &self.arr + &self + .arr .codecs() - .recommended_concurrency(&chunk_representation).map_err(|x| PyErr::new::(x.to_string()))?, + .recommended_concurrency(&chunk_representation) + .map_err(|x| PyErr::new::(x.to_string()))?, ); - let codec_options = CodecOptionsBuilder::new().concurrent_target(codec_concurrent_target).build(); + let codec_options = CodecOptionsBuilder::new() + .concurrent_target(codec_concurrent_target) + .build(); let size_output = out_shape_extracted.iter().product::() as usize; let mut output = Vec::with_capacity(size_output * data_type_size); if self.is_selection_numpy_array(chunk_coords_and_selections, 1) { - let selections_extracted = self.extract_selection_to_vec_indices(chunk_coords_and_selections, 1)?; + let selections_extracted = + self.extract_selection_to_vec_indices(chunk_coords_and_selections, 1)?; let borrowed_selections = &selections_extracted; { - let output = - UnsafeCellSlice::new_from_vec_with_spare_capacity(&mut output); + let output = UnsafeCellSlice::new_from_vec_with_spare_capacity(&mut output); let retrieve_chunk = |chunk: NdArrayChunk| { - let indices: Vec = cartesian_product(chunk.selection).iter().map(|x| x.iter().enumerate().fold(0, |acc, (ind, x)| {acc + (*x as u64) * if (ind + 1 == chunk.selection.len()) { 1 } else { self.arr.chunk_shape(&chunk.index).unwrap()[(ind + 1)..].iter().map(|x| x.get() as u64).product::()}})).collect(); - let chunk_subset_bytes = self.arr.retrieve_chunk(&chunk.index).map_err(|x| PyErr::new::(x.to_string()))?; + let indices: Vec = cartesian_product(chunk.selection) + .iter() + .map(|x| { + x.iter().enumerate().fold(0, |acc, (ind, x)| { + acc + (*x as u64) + * if (ind + 1 == chunk.selection.len()) { + 1 + } else { + self.arr.chunk_shape(&chunk.index).unwrap()[(ind + 1)..] + .iter() + .map(|x| x.get() as u64) + .product::() + } + }) + }) + .collect(); + let chunk_subset_bytes = self + .arr + .retrieve_chunk(&chunk.index) + .map_err(|x| PyErr::new::(x.to_string()))?; update_bytes_flen_with_indexer( unsafe { output.get() }, &out_shape_extracted, @@ -203,7 +326,15 @@ impl ZarrsPythonArray { ); Ok::<_, PyErr>(()) }; - let zipped_iterator = coords_extracted.into_iter().zip(borrowed_selections.into_iter()).zip(out_selections_extracted.into_iter()).map(|((index, selection), out_selection)| NdArrayChunk { index, selection, out_selection }); + let zipped_iterator = coords_extracted + .into_iter() + .zip(borrowed_selections.into_iter()) + .zip(out_selections_extracted.into_iter()) + .map(|((index, selection), out_selection)| NdArrayChunk { + index, + selection, + out_selection, + }); iter_concurrent_limit!( chunks_concurrent_limit, zipped_iterator.collect::>(), @@ -212,16 +343,24 @@ impl ZarrsPythonArray { )?; } unsafe { output.set_len(size_output) }; - return Ok(ManagerCtx::new(PyZarrArr{ shape: out_shape_extracted, arr: output, dtype: chunk_representation.data_type().clone() })); + return Ok(ManagerCtx::new(PyZarrArr { + shape: out_shape_extracted, + arr: output, + dtype: chunk_representation.data_type().clone(), + })); } - let selections_extracted = self.extract_selection_to_array_subset(chunk_coords_and_selections, 1)?; - let out_selections_extracted = &self.extract_selection_to_array_subset(chunk_coords_and_selections, 2)?; + let selections_extracted = + self.extract_selection_to_array_subset(chunk_coords_and_selections, 1)?; + let out_selections_extracted = + &self.extract_selection_to_array_subset(chunk_coords_and_selections, 2)?; let borrowed_selections = &selections_extracted; { - let output = - UnsafeCellSlice::new_from_vec_with_spare_capacity(&mut output); + let output = UnsafeCellSlice::new_from_vec_with_spare_capacity(&mut output); let retrieve_chunk = |chunk: Chunk| { - let chunk_subset_bytes = self.arr.retrieve_chunk_subset_opt(&chunk.index, &chunk.selection, &codec_options).map_err(|x| PyErr::new::(x.to_string()))?; + let chunk_subset_bytes = self + .arr + .retrieve_chunk_subset_opt(&chunk.index, &chunk.selection, &codec_options) + .map_err(|x| PyErr::new::(x.to_string()))?; update_bytes_flen( unsafe { output.get() }, &out_shape_extracted, @@ -231,7 +370,15 @@ impl ZarrsPythonArray { ); Ok::<_, PyErr>(()) }; - let zipped_iterator = coords_extracted.into_iter().zip(borrowed_selections.into_iter()).zip(out_selections_extracted.into_iter()).map(|((index, selection), out_selection)| Chunk { index, selection, out_selection }); + let zipped_iterator = coords_extracted + .into_iter() + .zip(borrowed_selections.into_iter()) + .zip(out_selections_extracted.into_iter()) + .map(|((index, selection), out_selection)| Chunk { + index, + selection, + out_selection, + }); iter_concurrent_limit!( chunks_concurrent_limit, zipped_iterator.collect::>(), @@ -240,27 +387,37 @@ impl ZarrsPythonArray { )?; } unsafe { output.set_len(size_output) }; - Ok(ManagerCtx::new(PyZarrArr{ shape: out_shape_extracted, arr: output, dtype: chunk_representation.data_type().clone() })) + Ok(ManagerCtx::new(PyZarrArr { + shape: out_shape_extracted, + arr: output, + dtype: chunk_representation.data_type().clone(), + })) } else { - return Err(PyTypeError::new_err(format!("Unsupported type: {0}", chunk_coords_and_selections))); + return Err(PyTypeError::new_err(format!( + "Unsupported type: {0}", + chunk_coords_and_selections + ))); } } } - pub struct PyZarrArr { arr: Vec, shape: Vec, - dtype: zarrs::array::DataType + dtype: zarrs::array::DataType, } -impl ToTensor for PyZarrArr { +impl ToTensor for PyZarrArr { fn data_ptr(&self) -> *mut std::ffi::c_void { self.arr.as_ptr() as *const c_void as *mut c_void } fn shape_and_strides(&self) -> ShapeAndStrides { ShapeAndStrides::new_contiguous_with_strides( - self.shape.iter().map(|x| *x as i64).collect::>().iter(), + self.shape + .iter() + .map(|x| *x as i64) + .collect::>() + .iter(), ) } @@ -268,13 +425,12 @@ impl ToTensor for PyZarrArr { 0 } - fn device(&self) -> Device { Device::CPU } fn dtype(&self) -> DataType { - match self.dtype { + match self.dtype { zarrs::array::DataType::Int16 => DataType::I16, zarrs::array::DataType::Int32 => DataType::I32, zarrs::array::DataType::Int64 => DataType::I64, @@ -286,7 +442,7 @@ impl ToTensor for PyZarrArr { zarrs::array::DataType::Float32 => DataType::F32, zarrs::array::DataType::Float64 => DataType::F64, zarrs::array::DataType::Bool => DataType::BOOL, - _ => panic!("Unsupported data type") + _ => panic!("Unsupported data type"), } } - } \ No newline at end of file +} diff --git a/src/lib.rs b/src/lib.rs index 0151624..1652a28 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,7 +1,7 @@ use pyo3::{exceptions::PyTypeError, prelude::*}; use std::sync::Arc; -use zarrs::storage::{ReadableStorage, store}; use zarrs::array::Array as RustArray; +use zarrs::storage::{store, ReadableStorage}; mod array; mod utils; @@ -14,8 +14,8 @@ fn open_array(path: &str) -> PyResult { } else { s = Arc::new(store::FilesystemStore::new(path).or_else(|x| utils::err(x.to_string()))?); } - let arr = RustArray::new(s, &"/").or_else(|x| utils::err(x.to_string()))?; - Ok(array::ZarrsPythonArray{ arr }) + let arr = RustArray::new(s, &"/").or_else(|x| utils::err(x.to_string()))?; + Ok(array::ZarrsPythonArray { arr }) } /// A Python module implemented in Rust. diff --git a/src/utils.rs b/src/utils.rs index a9f52f7..a105e7c 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,7 +1,9 @@ use pyo3::{exceptions::PyTypeError, PyErr}; use zarrs::array_subset::ArraySubset; -pub fn err(msg: String) -> Result { Err(PyErr::new::(msg)) } +pub fn err(msg: String) -> Result { + Err(PyErr::new::(msg)) +} pub fn update_bytes_flen_with_indexer( output_bytes: &mut [u8], @@ -11,13 +13,13 @@ pub fn update_bytes_flen_with_indexer( indexer: &Vec, data_type_size: usize, ) { - let contiguous_indices = unsafe { subset.contiguous_linearised_indices_unchecked(output_shape) }; // TODO: Par iteration? let mut indexer_index = 0; for (array_subset_element_index, _num_elements) in contiguous_indices.iter() { - let mut output_offset = usize::try_from(array_subset_element_index).unwrap() * data_type_size; + let mut output_offset = + usize::try_from(array_subset_element_index).unwrap() * data_type_size; for _num_elem in 0.._num_elements { let decoded_offset = (indexer[indexer_index] as usize) * data_type_size; debug_assert!((output_offset + data_type_size) <= output_bytes.len()); @@ -40,7 +42,10 @@ pub fn update_bytes_flen( debug_assert_eq!( output_bytes.len(), usize::try_from(output_shape.iter().product::()).unwrap() * data_type_size, - "Failed out check: output_bytes.len(): {:?}, output_shape: {:?}, data_type_size: {:?}", output_bytes.len(), output_shape, data_type_size, + "Failed out check: output_bytes.len(): {:?}, output_shape: {:?}, data_type_size: {:?}", + output_bytes.len(), + output_shape, + data_type_size, ); debug_assert_eq!( subset_bytes.len(), @@ -67,12 +72,14 @@ pub fn cartesian_product(vecs: &Vec>) -> Vec> { vecs.into_iter().fold(vec![vec![]], |acc, vec| { acc.into_iter() .flat_map(|prefix| { - vec.iter().map(move |elem| { - let mut new_prefix = prefix.clone(); - new_prefix.push(elem.clone()); - new_prefix - }).collect::>() + vec.iter() + .map(move |elem| { + let mut new_prefix = prefix.clone(); + new_prefix.push(elem.clone()); + new_prefix + }) + .collect::>() }) .collect() }) -} \ No newline at end of file +}