diff --git a/turbopack/crates/turbo-persistence-tools/src/main.rs b/turbopack/crates/turbo-persistence-tools/src/main.rs index 6a384bae92ab4e..a1b2bb15a1f09f 100644 --- a/turbopack/crates/turbo-persistence-tools/src/main.rs +++ b/turbopack/crates/turbo-persistence-tools/src/main.rs @@ -35,7 +35,6 @@ fn main() -> Result<()> { amqf_entries, sst_size, key_compression_dictionary_size, - value_compression_dictionary_size, block_count, } in meta_file.entries { @@ -45,15 +44,11 @@ fn main() -> Result<()> { ); println!(" AMQF {amqf_entries} entries = {} KiB", amqf_size / 1024); println!( - " {} KiB = {} kiB key compression dict + {} KiB value compression dict + \ - {block_count} blocks (avg {} bytes/block)", + " {} KiB = {} kiB key compression dict + {block_count} blocks (avg {} \ + bytes/block)", sst_size / 1024, key_compression_dictionary_size / 1024, - value_compression_dictionary_size / 1024, - (sst_size - - key_compression_dictionary_size as u64 - - value_compression_dictionary_size as u64) - / block_count as u64 + (sst_size - key_compression_dictionary_size as u64) / block_count as u64 ); } if !meta_file.obsolete_sst_files.is_empty() { diff --git a/turbopack/crates/turbo-persistence/README.md b/turbopack/crates/turbo-persistence/README.md index 93d97ab052ad68..51baa4c5a457c6 100644 --- a/turbopack/crates/turbo-persistence/README.md +++ b/turbopack/crates/turbo-persistence/README.md @@ -45,7 +45,6 @@ A meta file can contain metadata about multiple SST files. The metadata is store - foreach described SST file - 4 bytes sequence number of the SST file - 2 bytes key Compression Dictionary length - - 2 bytes value Compression Dictionary length - 2 bytes block count - 8 bytes min hash - 8 bytes max hash @@ -59,7 +58,6 @@ A meta file can contain metadata about multiple SST files. The metadata is store The SST file contains only data without any header. - serialized key Compression Dictionary -- serialized value Compression Dictionary - foreach block - 4 bytes uncompressed block length - compressed data diff --git a/turbopack/crates/turbo-persistence/src/collector.rs b/turbopack/crates/turbo-persistence/src/collector.rs index 6637ea2c13e3ce..b955d6102bec14 100644 --- a/turbopack/crates/turbo-persistence/src/collector.rs +++ b/turbopack/crates/turbo-persistence/src/collector.rs @@ -92,11 +92,11 @@ impl Collector { self.entries.push(entry); } - /// Sorts the entries and returns them along with the total key and value sizes. This doesn't + /// Sorts the entries and returns them along with the total key size. This doesn't /// clear the entries. - pub fn sorted(&mut self) -> (&[CollectorEntry], usize, usize) { + pub fn sorted(&mut self) -> (&[CollectorEntry], usize) { self.entries.sort_unstable_by(|a, b| a.key.cmp(&b.key)); - (&self.entries, self.total_key_size, self.total_value_size) + (&self.entries, self.total_key_size) } /// Clears the collector. diff --git a/turbopack/crates/turbo-persistence/src/db.rs b/turbopack/crates/turbo-persistence/src/db.rs index 95cadfc14b37ed..644019a4b64f96 100644 --- a/turbopack/crates/turbo-persistence/src/db.rs +++ b/turbopack/crates/turbo-persistence/src/db.rs @@ -898,8 +898,6 @@ impl TurboPersistence { amqf, key_compression_dictionary_length: entry .key_compression_dictionary_length(), - value_compression_dictionary_length: entry - .value_compression_dictionary_length(), block_count: entry.block_count(), size: entry.size(), entries: 0, @@ -914,7 +912,6 @@ impl TurboPersistence { parallel_scheduler: &S, entries: &[LookupEntry<'l>], total_key_size: usize, - total_value_size: usize, path: &Path, seq: u32, ) -> Result<(u32, File, StaticSortedFileBuilderMeta<'static>)> @@ -924,7 +921,6 @@ impl TurboPersistence { write_static_stored_file( entries, total_key_size, - total_value_size, &path.join(format!("{seq:08}.sst")), ) })?; @@ -959,7 +955,7 @@ impl TurboPersistence { let mut current: Option> = None; let mut entries = Vec::new(); let mut last_entries = Vec::new(); - let mut last_entries_total_sizes = (0, 0); + let mut last_entries_total_key_size = 0; for entry in iter { let entry = entry?; @@ -975,15 +971,10 @@ impl TurboPersistence { > DATA_THRESHOLD_PER_COMPACTED_FILE || entries.len() >= MAX_ENTRIES_PER_COMPACTED_FILE { - let ( - selected_total_key_size, - selected_total_value_size, - ) = last_entries_total_sizes; + let selected_total_key_size = + last_entries_total_key_size; swap(&mut entries, &mut last_entries); - last_entries_total_sizes = ( - total_key_size - key_size, - total_value_size - value_size, - ); + last_entries_total_key_size = total_key_size - key_size; total_key_size = key_size; total_value_size = value_size; @@ -997,7 +988,6 @@ impl TurboPersistence { &self.parallel_scheduler, &entries, selected_total_key_size, - selected_total_value_size, path, seq, )?); @@ -1015,7 +1005,8 @@ impl TurboPersistence { } if let Some(entry) = current { total_key_size += entry.key.len(); - total_value_size += entry.value.uncompressed_size_in_sst(); + // Obsolete as we no longer need total_value_size + // total_value_size += entry.value.uncompressed_size_in_sst(); entries.push(entry); } @@ -1028,7 +1019,6 @@ impl TurboPersistence { &self.parallel_scheduler, &entries, total_key_size, - total_value_size, path, seq, )?); @@ -1039,8 +1029,7 @@ impl TurboPersistence { if !last_entries.is_empty() { last_entries.append(&mut entries); - last_entries_total_sizes.0 += total_key_size; - last_entries_total_sizes.1 += total_value_size; + last_entries_total_key_size += total_key_size; let (part1, part2) = last_entries.split_at(last_entries.len() / 2); @@ -1052,8 +1041,7 @@ impl TurboPersistence { &self.parallel_scheduler, part1, // We don't know the exact sizes so we estimate them - last_entries_total_sizes.0 / 2, - last_entries_total_sizes.1 / 2, + last_entries_total_key_size / 2, path, seq1, )?); @@ -1062,8 +1050,7 @@ impl TurboPersistence { new_sst_files.push(create_sst_file( &self.parallel_scheduler, part2, - last_entries_total_sizes.0 / 2, - last_entries_total_sizes.1 / 2, + last_entries_total_key_size / 2, path, seq2, )?); @@ -1263,8 +1250,6 @@ impl TurboPersistence { amqf_entries: amqf.len(), key_compression_dictionary_size: entry .key_compression_dictionary_length(), - value_compression_dictionary_size: entry - .value_compression_dictionary_length(), block_count: entry.block_count(), } }) @@ -1302,6 +1287,5 @@ pub struct MetaFileEntryInfo { pub amqf_entries: usize, pub sst_size: u64, pub key_compression_dictionary_size: u16, - pub value_compression_dictionary_size: u16, pub block_count: u16, } diff --git a/turbopack/crates/turbo-persistence/src/lookup_entry.rs b/turbopack/crates/turbo-persistence/src/lookup_entry.rs index 1c5048998a01f0..c55adca31eaeae 100644 --- a/turbopack/crates/turbo-persistence/src/lookup_entry.rs +++ b/turbopack/crates/turbo-persistence/src/lookup_entry.rs @@ -22,7 +22,6 @@ pub enum LazyLookupValue<'l> { Medium { uncompressed_size: u32, block: &'l [u8], - dictionary: &'l [u8], }, } @@ -79,11 +78,9 @@ impl Entry for LookupEntry<'_> { LazyLookupValue::Medium { uncompressed_size, block, - dictionary, } => EntryValue::MediumCompressed { uncompressed_size: *uncompressed_size, block, - dictionary, }, } } diff --git a/turbopack/crates/turbo-persistence/src/meta_file.rs b/turbopack/crates/turbo-persistence/src/meta_file.rs index 871cccd3cd5125..3c0f1b3aa755e9 100644 --- a/turbopack/crates/turbo-persistence/src/meta_file.rs +++ b/turbopack/crates/turbo-persistence/src/meta_file.rs @@ -144,10 +144,6 @@ impl MetaEntry { self.sst_data.key_compression_dictionary_length } - pub fn value_compression_dictionary_length(&self) -> u16 { - self.sst_data.value_compression_dictionary_length - } - pub fn block_count(&self) -> u16 { self.sst_data.block_count } @@ -222,7 +218,6 @@ impl MetaFile { sst_data: StaticSortedFileMetaData { sequence_number: file.read_u32::()?, key_compression_dictionary_length: file.read_u16::()?, - value_compression_dictionary_length: file.read_u16::()?, block_count: file.read_u16::()?, }, family, diff --git a/turbopack/crates/turbo-persistence/src/meta_file_builder.rs b/turbopack/crates/turbo-persistence/src/meta_file_builder.rs index afa402ac684735..67831753683717 100644 --- a/turbopack/crates/turbo-persistence/src/meta_file_builder.rs +++ b/turbopack/crates/turbo-persistence/src/meta_file_builder.rs @@ -58,7 +58,6 @@ impl<'a> MetaFileBuilder<'a> { for (sequence_number, sst) in &self.entries { file.write_u32::(*sequence_number)?; file.write_u16::(sst.key_compression_dictionary_length)?; - file.write_u16::(sst.value_compression_dictionary_length)?; file.write_u16::(sst.block_count)?; file.write_u64::(sst.min_hash)?; file.write_u64::(sst.max_hash)?; diff --git a/turbopack/crates/turbo-persistence/src/static_sorted_file.rs b/turbopack/crates/turbo-persistence/src/static_sorted_file.rs index 55765d12861075..eac0b9b33d97aa 100644 --- a/turbopack/crates/turbo-persistence/src/static_sorted_file.rs +++ b/turbopack/crates/turbo-persistence/src/static_sorted_file.rs @@ -65,8 +65,6 @@ pub struct StaticSortedFileMetaData { pub sequence_number: u32, /// The length of the key compression dictionary. pub key_compression_dictionary_length: u16, - /// The length of the value compression dictionary. - pub value_compression_dictionary_length: u16, /// The number of blocks in the SST file. pub block_count: u16, } @@ -79,8 +77,7 @@ impl StaticSortedFileMetaData { pub fn blocks_start(&self) -> usize { let k: usize = self.key_compression_dictionary_length.into(); - let v: usize = self.value_compression_dictionary_length.into(); - k + v + k } pub fn key_compression_dictionary_range(&self) -> Range { @@ -88,12 +85,6 @@ impl StaticSortedFileMetaData { let end: usize = self.key_compression_dictionary_length.into(); start..end } - - pub fn value_compression_dictionary_range(&self) -> Range { - let start = self.key_compression_dictionary_length as usize; - let end = start + self.value_compression_dictionary_length as usize; - start..end - } } /// A memory mapped SST file. @@ -310,7 +301,7 @@ impl StaticSortedFile { match value_block_cache.get_value_or_guard(&(self.meta.sequence_number, block), None) { GuardResult::Value(block) => block, GuardResult::Guard(guard) => { - let block = self.read_value_block(block)?; + let block = self.read_small_value_block(block)?; let _ = guard.insert(block.clone()); block } @@ -323,25 +314,26 @@ impl StaticSortedFile { fn read_key_block(&self, block_index: u16) -> Result> { self.read_block( block_index, - &self.mmap[self.meta.key_compression_dictionary_range()], + Some(&self.mmap[self.meta.key_compression_dictionary_range()]), false, ) } + /// Reads a value block from the file. + fn read_small_value_block(&self, block_index: u16) -> Result> { + self.read_block(block_index, None, false) + } + /// Reads a value block from the file. fn read_value_block(&self, block_index: u16) -> Result> { - self.read_block( - block_index, - &self.mmap[self.meta.value_compression_dictionary_range()], - false, - ) + self.read_block(block_index, None, true) } /// Reads a block from the file. fn read_block( &self, block_index: u16, - compression_dictionary: &[u8], + compression_dictionary: Option<&[u8]>, long_term: bool, ) -> Result> { let (uncompressed_length, block) = self.get_compressed_block(block_index)?; @@ -349,7 +341,7 @@ impl StaticSortedFile { let buffer = decompress_into_arc( uncompressed_length, block, - Some(compression_dictionary), + compression_dictionary, long_term, )?; Ok(ArcSlice::from(buffer)) @@ -496,8 +488,6 @@ impl<'l> StaticSortedFileIter<'l> { LazyLookupValue::Medium { uncompressed_size, block, - dictionary: &self.this.mmap - [self.this.meta.value_compression_dictionary_range()], } } else { let value = self diff --git a/turbopack/crates/turbo-persistence/src/static_sorted_file_builder.rs b/turbopack/crates/turbo-persistence/src/static_sorted_file_builder.rs index 0f06e9f6f8eca3..66d0d043ce25f5 100644 --- a/turbopack/crates/turbo-persistence/src/static_sorted_file_builder.rs +++ b/turbopack/crates/turbo-persistence/src/static_sorted_file_builder.rs @@ -10,7 +10,7 @@ use anyhow::{Context, Result}; use byteorder::{BE, ByteOrder, WriteBytesExt}; use crate::{ - compression::{compress_into_buffer, decompress_into_arc}, + compression::compress_into_buffer, static_sorted_file::{ BLOCK_TYPE_INDEX, BLOCK_TYPE_KEY, KEY_BLOCK_ENTRY_TYPE_BLOB, KEY_BLOCK_ENTRY_TYPE_DELETED, KEY_BLOCK_ENTRY_TYPE_MEDIUM, KEY_BLOCK_ENTRY_TYPE_SMALL, @@ -31,23 +31,16 @@ const MAX_SMALL_VALUE_BLOCK_SIZE: usize = 64 * 1024; /// The aimed false positive rate for the AMQF const AMQF_FALSE_POSITIVE_RATE: f64 = 0.01; -/// The maximum compression dictionary size for value blocks -const VALUE_COMPRESSION_DICTIONARY_SIZE: usize = 64 * 1024 - 1; /// The maximum compression dictionary size for key and index blocks const KEY_COMPRESSION_DICTIONARY_SIZE: usize = 64 * 1024 - 1; -/// The maximum bytes that should be selected as value samples to create a compression dictionary -const VALUE_COMPRESSION_SAMPLES_SIZE: usize = 256 * 1024; /// The maximum bytes that should be selected as key samples to create a compression dictionary const KEY_COMPRESSION_SAMPLES_SIZE: usize = 256 * 1024; -/// The minimum bytes that should be selected as value samples. Below that no compression dictionary -/// is used. -const MIN_VALUE_COMPRESSION_SAMPLES_SIZE: usize = 1024; -/// The minimum bytes that should be selected as key samples. Below that no compression dictionary +/// The minimum bytes that should be selected as keys samples. Below that no compression dictionary /// is used. const MIN_KEY_COMPRESSION_SAMPLES_SIZE: usize = 1024; -/// The bytes that are used per key/value entry for a sample. +/// The bytes that are used per key entry for a sample. const COMPRESSION_DICTIONARY_SAMPLE_PER_ENTRY: usize = 100; -/// The minimum bytes that are used per key/value entry for a sample. +/// The minimum bytes that are used per key entry for a sample. const MIN_COMPRESSION_DICTIONARY_SAMPLE_PER_ENTRY: usize = 16; /// Trait for entries from that SST files can be created @@ -74,7 +67,6 @@ pub enum EntryValue<'l> { MediumCompressed { uncompressed_size: u32, block: &'l [u8], - dictionary: &'l [u8], }, /// Large-sized value. They are stored in a blob file. Large { blob: u32 }, @@ -92,8 +84,6 @@ pub struct StaticSortedFileBuilderMeta<'a> { pub amqf: Cow<'a, [u8]>, /// The key compression dictionary pub key_compression_dictionary_length: u16, - /// The value compression dictionary - pub value_compression_dictionary_length: u16, /// The number of blocks in the SST file pub block_count: u16, /// The file size of the SST file @@ -105,21 +95,18 @@ pub struct StaticSortedFileBuilderMeta<'a> { pub fn write_static_stored_file( entries: &[E], total_key_size: usize, - total_value_size: usize, file: &Path, ) -> Result<(StaticSortedFileBuilderMeta<'static>, File)> { debug_assert!(entries.iter().map(|e| e.key_hash()).is_sorted()); let mut file = BufWriter::new(File::create(file)?); - let capacity = get_compression_buffer_capacity(total_key_size, total_value_size); + let capacity = get_compression_buffer_capacity(total_key_size); // We use a shared buffer for all operations to avoid excessive allocations let mut buffer = Vec::with_capacity(capacity); let key_dict = compute_key_compression_dictionary(entries, total_key_size, &mut buffer)?; - let value_dict = compute_value_compression_dictionary(entries, total_value_size, &mut buffer)?; file.write_all(&key_dict)?; - file.write_all(&value_dict)?; let mut block_writer = BlockWriter::new(&mut file, &mut buffer); @@ -129,7 +116,7 @@ pub fn write_static_stored_file( let mut buffer = Vec::new(); let min_hash = entries.first().map_or(u64::MAX, |e| e.key_hash()); - let value_locations = write_value_blocks(entries, &value_dict, &mut block_writer, &mut buffer) + let value_locations = write_value_blocks(entries, &mut block_writer, &mut buffer) .context("Failed to write value blocks")?; let amqf = write_key_blocks_and_compute_amqf( entries, @@ -152,7 +139,6 @@ pub fn write_static_stored_file( max_hash, amqf: Cow::Owned(amqf), key_compression_dictionary_length: key_dict.len().try_into().unwrap(), - value_compression_dictionary_length: value_dict.len().try_into().unwrap(), block_count, size: file.stream_position()?, entries: entries.len() as u64, @@ -160,17 +146,12 @@ pub fn write_static_stored_file( Ok((meta, file.into_inner()?)) } -fn get_compression_buffer_capacity(total_key_size: usize, total_value_size: usize) -> usize { +fn get_compression_buffer_capacity(total_key_size: usize) -> usize { let mut size = 0; if total_key_size >= MIN_KEY_COMPRESSION_SAMPLES_SIZE { let key_compression_samples_size = min(KEY_COMPRESSION_SAMPLES_SIZE, total_key_size / 16); size = key_compression_samples_size; } - if total_value_size >= MIN_VALUE_COMPRESSION_SAMPLES_SIZE { - let value_compression_samples_size = - min(VALUE_COMPRESSION_SAMPLES_SIZE, total_value_size / 16); - size = size.max(value_compression_samples_size); - } size } @@ -223,53 +204,6 @@ fn compute_key_compression_dictionary( Ok(result) } -/// Computes compression dictionaries from values of all entries -#[tracing::instrument(level = "trace", skip(entries))] -fn compute_value_compression_dictionary( - entries: &[E], - total_value_size: usize, - buffer: &mut Vec, -) -> Result> { - if total_value_size < MIN_VALUE_COMPRESSION_SAMPLES_SIZE { - return Ok(Vec::new()); - } - let value_compression_samples_size = min(VALUE_COMPRESSION_SAMPLES_SIZE, total_value_size / 16); - let mut sample_sizes = Vec::new(); - - // Limit the number of iterations to avoid infinite loops - let max_iterations = total_value_size / COMPRESSION_DICTIONARY_SAMPLE_PER_ENTRY * 2; - for i in 0..max_iterations { - let entry = &entries[i % entries.len()]; - let remaining = value_compression_samples_size - buffer.len(); - if remaining < MIN_COMPRESSION_DICTIONARY_SAMPLE_PER_ENTRY { - break; - } - if let EntryValue::Small { value } | EntryValue::Medium { value } = entry.value() { - let len = value.len(); - if len >= MIN_COMPRESSION_DICTIONARY_SAMPLE_PER_ENTRY { - let used_len = min(remaining, COMPRESSION_DICTIONARY_SAMPLE_PER_ENTRY); - if len <= used_len { - sample_sizes.push(len); - buffer.extend_from_slice(value); - } else { - sample_sizes.push(used_len); - let p = buffer.len() % (len - used_len); - buffer.extend_from_slice(&value[p..p + used_len]); - }; - } - } - } - debug_assert!(buffer.len() == sample_sizes.iter().sum::()); - let result = if buffer.len() > MIN_VALUE_COMPRESSION_SAMPLES_SIZE && sample_sizes.len() > 5 { - zstd::dict::from_continuous(buffer, &sample_sizes, VALUE_COMPRESSION_DICTIONARY_SIZE) - .context("Value dictionary creation failed")? - } else { - Vec::new() - }; - buffer.clear(); - Ok(result) -} - struct BlockWriter<'l> { buffer: &'l mut Vec, block_offsets: Vec, @@ -301,23 +235,29 @@ impl<'l> BlockWriter<'l> { #[tracing::instrument(level = "trace", skip_all)] fn write_key_block(&mut self, block: &[u8], dict: &[u8]) -> Result<()> { - self.write_block(block, dict, false) + self.write_block(block, Some(dict), false) .context("Failed to write key block") } #[tracing::instrument(level = "trace", skip_all)] fn write_index_block(&mut self, block: &[u8], dict: &[u8]) -> Result<()> { - self.write_block(block, dict, false) + self.write_block(block, Some(dict), false) .context("Failed to write index block") } #[tracing::instrument(level = "trace", skip_all)] - fn write_value_block(&mut self, block: &[u8], dict: &[u8]) -> Result<()> { - self.write_block(block, dict, false) + fn write_small_value_block(&mut self, block: &[u8]) -> Result<()> { + self.write_block(block, None, false) + .context("Failed to write small value block") + } + + #[tracing::instrument(level = "trace", skip_all)] + fn write_value_block(&mut self, block: &[u8]) -> Result<()> { + self.write_block(block, None, true) .context("Failed to write value block") } - fn write_block(&mut self, block: &[u8], dict: &[u8], long_term: bool) -> Result<()> { + fn write_block(&mut self, block: &[u8], dict: Option<&[u8]>, long_term: bool) -> Result<()> { let uncompressed_size = block.len().try_into().unwrap(); self.compress_block_into_buffer(block, dict, long_term)?; let len = (self.buffer.len() + 4).try_into().unwrap(); @@ -340,14 +280,34 @@ impl<'l> BlockWriter<'l> { Ok(()) } + fn write_compressed_block(&mut self, uncompressed_size: u32, block: &[u8]) -> Result<()> { + let len = (block.len() + 4).try_into().unwrap(); + let offset = self + .block_offsets + .last() + .copied() + .unwrap_or_default() + .checked_add(len) + .expect("Block offset overflow"); + self.block_offsets.push(offset); + + self.writer + .write_u32::(uncompressed_size) + .context("Failed to write uncompressed size")?; + self.writer + .write_all(block) + .context("Failed to write compressed block")?; + Ok(()) + } + /// Compresses a block with a compression dictionary. fn compress_block_into_buffer( &mut self, block: &[u8], - dict: &[u8], + dict: Option<&[u8]>, long_term: bool, ) -> Result<()> { - compress_into_buffer(block, Some(dict), long_term, self.buffer) + compress_into_buffer(block, dict, long_term, self.buffer) } } @@ -355,7 +315,6 @@ impl<'l> BlockWriter<'l> { #[tracing::instrument(level = "trace", skip_all)] fn write_value_blocks( entries: &[impl Entry], - value_compression_dictionary: &[u8], writer: &mut BlockWriter<'_>, buffer: &mut Vec, ) -> Result> { @@ -378,7 +337,7 @@ fn write_value_blocks( value_locations[j].0 = block_index; } } - writer.write_value_block(buffer, value_compression_dictionary)?; + writer.write_small_value_block(buffer)?; buffer.clear(); current_block_start = i; current_block_size = 0; @@ -391,19 +350,15 @@ fn write_value_blocks( EntryValue::Medium { value } => { let block_index = writer.next_block_index(); value_locations.push((block_index, 0)); - writer.write_value_block(value, value_compression_dictionary)?; + writer.write_value_block(value)?; } EntryValue::MediumCompressed { uncompressed_size, block, - dictionary, } => { let block_index = writer.next_block_index(); value_locations.push((block_index, 0)); - // Recompress block with a different dictionary - let decompressed = - decompress_into_arc(uncompressed_size, block, Some(dictionary), false)?; - writer.write_value_block(&decompressed, value_compression_dictionary)?; + writer.write_compressed_block(uncompressed_size, block)?; } EntryValue::Deleted | EntryValue::Large { .. } => { value_locations.push((0, 0)); @@ -419,7 +374,7 @@ fn write_value_blocks( value_locations[j].0 = block_index; } } - writer.write_value_block(buffer, value_compression_dictionary)?; + writer.write_small_value_block(buffer)?; buffer.clear(); } diff --git a/turbopack/crates/turbo-persistence/src/write_batch.rs b/turbopack/crates/turbo-persistence/src/write_batch.rs index 9696abc9a9a57d..4426f8795af441 100644 --- a/turbopack/crates/turbo-persistence/src/write_batch.rs +++ b/turbopack/crates/turbo-persistence/src/write_batch.rs @@ -407,17 +407,15 @@ impl fn create_sst_file( &self, family: u32, - collector_data: (&[CollectorEntry], usize, usize), + collector_data: (&[CollectorEntry], usize), ) -> Result<(u32, File)> { - let (entries, total_key_size, total_value_size) = collector_data; + let (entries, total_key_size) = collector_data; let seq = self.current_sequence_number.fetch_add(1, Ordering::SeqCst) + 1; let path = self.db_path.join(format!("{seq:08}.sst")); let (meta, file) = self .parallel_scheduler - .block_in_place(|| { - write_static_stored_file(entries, total_key_size, total_value_size, &path) - }) + .block_in_place(|| write_static_stored_file(entries, total_key_size, &path)) .with_context(|| format!("Unable to write SST file {seq:08}.sst"))?; #[cfg(feature = "verify_sst_content")] @@ -440,7 +438,6 @@ impl StaticSortedFileMetaData { sequence_number: seq, key_compression_dictionary_length: meta.key_compression_dictionary_length, - value_compression_dictionary_length: meta.value_compression_dictionary_length, block_count: meta.block_count, }, )?;