diff --git a/linker-diff/src/asm_diff.rs b/linker-diff/src/asm_diff.rs index 111533b..8d7eb0d 100644 --- a/linker-diff/src/asm_diff.rs +++ b/linker-diff/src/asm_diff.rs @@ -1332,7 +1332,7 @@ impl<'data> AddressIndex<'data> { let phoff = header.e_phoff.get(e); let phnum = header.e_phnum.get(e); let file_header_size = - core::mem::size_of::>() as u64; + size_of::>() as u64; for raw_seg in elf_file.elf_program_headers() { if raw_seg.p_type(e) != object::elf::PT_LOAD { continue; @@ -1347,7 +1347,7 @@ impl<'data> AddressIndex<'data> { if file_range.contains(&phoff) { let mem_start = phoff - file_offset + seg_address; let byte_len = u64::from(phnum) - * core::mem::size_of::>() as u64; + * size_of::>() as u64; self.program_header_addresses = mem_start..(mem_start + byte_len); } } @@ -1364,7 +1364,7 @@ impl<'data> AddressIndex<'data> { return Ok(()); }; let data = got.data()?; - let entry_size = core::mem::size_of::(); + let entry_size = size_of::(); let entries: &[u64] = object::slice_from_bytes(data, data.len() / entry_size) .unwrap() .0; diff --git a/linker-diff/src/eh_frame_diff.rs b/linker-diff/src/eh_frame_diff.rs index 3726d62..10eb2cc 100644 --- a/linker-diff/src/eh_frame_diff.rs +++ b/linker-diff/src/eh_frame_diff.rs @@ -15,6 +15,7 @@ use object::ObjectSymbol; use object::SymbolKind; use std::collections::HashMap; use std::collections::HashSet; +use std::mem::offset_of; pub(crate) fn report_diffs(report: &mut crate::Report, objects: &[crate::Object]) { report.add_diffs(crate::header_diff::diff_fields( @@ -45,9 +46,8 @@ fn read_eh_frame_hdr_fields(object: &crate::Object) -> Result { } let data = section.data()?; - let header: &EhFrameHdr = bytemuck::from_bytes(&data[..core::mem::size_of::()]); - let header_entries: &[EhFrameHdrEntry] = - bytemuck::cast_slice(&data[core::mem::size_of::()..]); + let header: &EhFrameHdr = bytemuck::from_bytes(&data[..size_of::()]); + let header_entries: &[EhFrameHdrEntry] = bytemuck::cast_slice(&data[size_of::()..]); values.insert("version", header.version, Converter::None, object); values.insert( @@ -71,7 +71,7 @@ fn read_eh_frame_hdr_fields(object: &crate::Object) -> Result { values.insert( "frame_pointer", (address1 as i64 + i64::from(header.frame_pointer)) as u64 - + core::mem::offset_of!(EhFrameHdr, frame_pointer) as u64, + + offset_of!(EhFrameHdr, frame_pointer) as u64, Converter::SectionAddress, object, ); @@ -117,7 +117,7 @@ fn verify_frames( let eh_frame_base = eh_frame_section.address(); let eh_frame_data = eh_frame_section.data()?; let mut offset = 0; - const PREFIX_LEN: usize = core::mem::size_of::(); + const PREFIX_LEN: usize = size_of::(); while offset + PREFIX_LEN <= eh_frame_data.len() { let prefix: EhFrameEntryPrefix = bytemuck::pod_read_unaligned(&eh_frame_data[offset..offset + PREFIX_LEN]); @@ -139,7 +139,7 @@ fn verify_frames( ); } } - offset += core::mem::size_of_val(&prefix.length) + prefix.length as usize; + offset += size_of_val(&prefix.length) + prefix.length as usize; } // TODO: Enable this or clean it it up. diff --git a/linker-diff/src/gnu_hash.rs b/linker-diff/src/gnu_hash.rs index 99a605b..8292978 100644 --- a/linker-diff/src/gnu_hash.rs +++ b/linker-diff/src/gnu_hash.rs @@ -123,7 +123,7 @@ fn lookup_symbol( let e = LittleEndian; let symbol_base = header.symbol_base.get(e) as usize; let hash = object::elf::gnu_hash(sym_name); - let elf_class_bits = core::mem::size_of::() as u32 * 8; + let elf_class_bits = size_of::() as u32 * 8; let bloom_shift = header.bloom_shift.get(e); let bloom_count = bloom_values.len() as u32; let bucket_count = buckets.len() as u32; diff --git a/linker-diff/src/lib.rs b/linker-diff/src/lib.rs index 50b8cee..d86ec3d 100644 --- a/linker-diff/src/lib.rs +++ b/linker-diff/src/lib.rs @@ -662,7 +662,7 @@ impl<'data> NameIndex<'data> { } fn slice_from_all_bytes(data: &[u8]) -> &[T] { - object::slice_from_bytes(data, data.len() / core::mem::size_of::()) + object::slice_from_bytes(data, data.len() / size_of::()) .unwrap() .0 } diff --git a/wild_lib/src/archive.rs b/wild_lib/src/archive.rs index 7186dc5..2afc042 100644 --- a/wild_lib/src/archive.rs +++ b/wild_lib/src/archive.rs @@ -69,10 +69,10 @@ struct EntryHeader { } const _ASSERTS: () = { - assert!(core::mem::size_of::() == 60); + assert!(size_of::() == 60); }; -const HEADER_SIZE: usize = core::mem::size_of::(); +const HEADER_SIZE: usize = size_of::(); impl<'data> ArchiveIterator<'data> { /// Create an iterator from the bytes of the whole archive. The supplied bytes should start with diff --git a/wild_lib/src/elf.rs b/wild_lib/src/elf.rs index ecd29ec..1472744 100644 --- a/wild_lib/src/elf.rs +++ b/wild_lib/src/elf.rs @@ -221,7 +221,7 @@ impl<'data> File<'data> { return get_entries( self.data, header.p_offset(e) as usize, - header.p_filesz(e) as usize / core::mem::size_of::(), + header.p_filesz(e) as usize / size_of::(), ) .context("Failed to read dynamic table"); } @@ -261,7 +261,7 @@ pub(crate) fn get_entries( offset: usize, entry_count: usize, ) -> Result<&[T]> { - debug_assert_eq!(core::mem::align_of::(), 1); + debug_assert_eq!(align_of::(), 1); if offset >= data.len() { bail!("Invalid offset 0x{offset}"); } @@ -270,7 +270,7 @@ pub(crate) fn get_entries( anyhow!( "Tried to extract 0x{:x} entries of size 0x{:x} from 0x{:x}", entry_count, - core::mem::size_of::(), + size_of::(), data.len(), ) })? @@ -351,14 +351,14 @@ pub(crate) const FILE_HEADER_SIZE: u16 = 0x40; pub(crate) const PROGRAM_HEADER_SIZE: u16 = 0x38; pub(crate) const SECTION_HEADER_SIZE: u16 = 0x40; pub(crate) const COMPRESSION_HEADER_SIZE: usize = - core::mem::size_of::>(); + size_of::>(); pub(crate) const GOT_ENTRY_SIZE: u64 = 0x8; pub(crate) const PLT_ENTRY_SIZE: u64 = PLT_ENTRY_TEMPLATE.len() as u64; pub(crate) const RELA_ENTRY_SIZE: u64 = 0x18; -pub(crate) const SYMTAB_ENTRY_SIZE: u64 = core::mem::size_of::() as u64; -pub(crate) const GNU_VERSION_ENTRY_SIZE: u64 = core::mem::size_of::() as u64; +pub(crate) const SYMTAB_ENTRY_SIZE: u64 = size_of::() as u64; +pub(crate) const GNU_VERSION_ENTRY_SIZE: u64 = size_of::() as u64; pub(crate) const PLT_ENTRY_TEMPLATE: &[u8] = &[ 0xf3, 0x0f, 0x1e, 0xfa, // endbr64 @@ -473,7 +473,7 @@ impl RelocationKindInfo { } pub(crate) fn slice_from_all_bytes_mut(data: &mut [u8]) -> &mut [T] { - object::slice_from_bytes_mut(data, data.len() / core::mem::size_of::()) + object::slice_from_bytes_mut(data, data.len() / size_of::()) .unwrap() .0 } diff --git a/wild_lib/src/elf_writer.rs b/wild_lib/src/elf_writer.rs index 7d333fb..b84b467 100644 --- a/wild_lib/src/elf_writer.rs +++ b/wild_lib/src/elf_writer.rs @@ -389,7 +389,7 @@ fn split_output_into_sections<'out, S: StorageModel>( #[tracing::instrument(skip_all, name = "Sort .eh_frame_hdr")] fn sort_eh_frame_hdr_entries(eh_frame_hdr: &mut [u8]) { - let entry_bytes = &mut eh_frame_hdr[core::mem::size_of::()..]; + let entry_bytes = &mut eh_frame_hdr[size_of::()..]; let entries: &mut [elf::EhFrameHdrEntry] = bytemuck::cast_slice_mut(entry_bytes); entries.sort_by_key(|e| e.frame_ptr); } @@ -535,7 +535,7 @@ impl<'out> VersionWriter<'out> { } fn take_verneed(&mut self) -> Result<&'out mut Verneed> { - let bytes = self.take_bytes(core::mem::size_of::())?; + let bytes = self.take_bytes(size_of::())?; Ok(object::from_bytes_mut(bytes) .map_err(|_| anyhow!("Incorrect .gnu.version_r alignment"))? .0) @@ -543,7 +543,7 @@ impl<'out> VersionWriter<'out> { fn take_auxes(&mut self, version_count: u16) -> Result<&'out mut [Vernaux]> { let bytes = - self.take_bytes(core::mem::size_of::() * usize::from(version_count))?; + self.take_bytes(size_of::() * usize::from(version_count))?; object::slice_from_all_bytes_mut::(bytes) .map_err(|_| anyhow!("Invalid .gnu.version_r allocation")) } @@ -938,7 +938,7 @@ impl<'data, 'layout, 'out> TableWriter<'data, 'layout, 'out> { fn take_eh_frame_hdr(&mut self) -> &'out mut EhFrameHdr { let entry_bytes = crate::slice::slice_take_prefix_mut( &mut self.eh_frame_hdr, - core::mem::size_of::(), + size_of::(), ); bytemuck::from_bytes_mut(entry_bytes) } @@ -949,7 +949,7 @@ impl<'data, 'layout, 'out> TableWriter<'data, 'layout, 'out> { } let entry_bytes = crate::slice::slice_take_prefix_mut( &mut self.eh_frame_hdr, - core::mem::size_of::(), + size_of::(), ); Some(bytemuck::from_bytes_mut(entry_bytes)) } @@ -1420,7 +1420,7 @@ impl<'data> ObjectLayout<'data> { ) -> Result { let eh_frame_section = self.object.section(eh_frame_section_index)?; let data = self.object.raw_section_data(eh_frame_section)?; - const PREFIX_LEN: usize = core::mem::size_of::(); + const PREFIX_LEN: usize = size_of::(); let e = LittleEndian; let section_flags = SectionFlags::from_header(eh_frame_section); let mut relocations = self @@ -1439,7 +1439,7 @@ impl<'data> ObjectLayout<'data> { while input_pos + PREFIX_LEN <= data.len() { let prefix: elf::EhFrameEntryPrefix = bytemuck::pod_read_unaligned(&data[input_pos..input_pos + PREFIX_LEN]); - let size = core::mem::size_of_val(&prefix.length) + prefix.length as usize; + let size = size_of_val(&prefix.length) + prefix.length as usize; let next_input_pos = input_pos + size; let next_output_pos = output_pos + size; if next_input_pos > data.len() { @@ -2075,7 +2075,7 @@ fn write_gnu_hash_tables( let mut sym_defs = epilogue.dynamic_symbol_definitions.iter().peekable(); - let elf_class_bits = core::mem::size_of::() as u32 * 8; + let elf_class_bits = size_of::() as u32 * 8; let mut start_of_chain = true; for (i, chain_out) in chains.iter_mut().enumerate() { @@ -2309,8 +2309,8 @@ fn write_eh_frame_hdr( fn eh_frame_hdr_entry_count(layout: &Layout) -> Result { let hdr_sec = layout.section_layouts.get(output_section_id::EH_FRAME_HDR); u32::try_from( - (hdr_sec.mem_size - core::mem::size_of::() as u64) - / core::mem::size_of::() as u64, + (hdr_sec.mem_size - size_of::() as u64) + / size_of::() as u64, ) .context(".eh_frame_hdr entries overflowed 32 bits") } @@ -2371,7 +2371,7 @@ const EPILOGUE_DYNAMIC_ENTRY_WRITERS: &[DynamicEntryWriter] = &[ inputs.vma_of_section(output_section_id::DYNSYM) }), DynamicEntryWriter::new(object::elf::DT_SYMENT, |_inputs| { - core::mem::size_of::() as u64 + size_of::() as u64 }), DynamicEntryWriter::optional( object::elf::DT_VERNEED, @@ -2449,7 +2449,7 @@ const EPILOGUE_DYNAMIC_ENTRY_WRITERS: &[DynamicEntryWriter] = &[ .section_part_layouts .get(part_id::RELA_DYN_RELATIVE) .mem_size - / core::mem::size_of::() as u64 + / size_of::() as u64 }), DynamicEntryWriter::new(object::elf::DT_GNU_HASH, |inputs| { inputs.vma_of_section(output_section_id::GNU_HASH) @@ -2731,15 +2731,15 @@ impl<'data> DynamicLayout<'data> { let next_verneed_offset = if self.is_last_verneed { 0 } else { - (core::mem::size_of::() - + core::mem::size_of::() * verdef_info.version_count as usize) + (size_of::() + + size_of::() * verdef_info.version_count as usize) as u32 }; ver_need.vn_version.set(e, 1); ver_need.vn_cnt.set(e, verdef_info.version_count); ver_need .vn_aux - .set(e, core::mem::size_of::() as u32); + .set(e, size_of::() as u32); ver_need.vn_next.set(e, next_verneed_offset); let auxes = table_writer @@ -2778,7 +2778,7 @@ impl<'data> DynamicLayout<'data> { let vna_next = if is_last_aux { 0 } else { - core::mem::size_of::() as u32 + size_of::() as u32 }; aux_out.vna_next.set(e, vna_next); aux_out.vna_other.set(e, output_version); @@ -2884,7 +2884,7 @@ pub(crate) fn verify_resolution_allocation( total_bytes_allocated = alignment.align_up(total_bytes_allocated) + size; }); total_bytes_allocated = crate::alignment::USIZE.align_up(total_bytes_allocated); - let mut all_mem = vec![0_u64; total_bytes_allocated as usize / core::mem::size_of::()]; + let mut all_mem = vec![0_u64; total_bytes_allocated as usize / size_of::()]; let mut all_mem: &mut [u8] = bytemuck::cast_slice_mut(all_mem.as_mut_slice()); let mut offset = 0; let mut buffers = mem_sizes.output_order_map(output_sections, |_part_id, alignment, &size| { diff --git a/wild_lib/src/grouping.rs b/wild_lib/src/grouping.rs index edf8611..0d5f5d4 100644 --- a/wild_lib/src/grouping.rs +++ b/wild_lib/src/grouping.rs @@ -3,6 +3,7 @@ use crate::input_data::FileId; use crate::parsing::ParsedInput; use crate::sharding::ShardKey as _; use crate::symbol_db::SymbolId; +use std::mem::replace; pub(crate) struct Group<'data> { pub(crate) files: Vec>, @@ -39,7 +40,7 @@ pub(crate) fn group_files<'data>(files: Vec>, args: &Args) -> || (!group.files.is_empty() && num_symbols_with_file > symbols_per_group) { // Start a new group. - groups.push(core::mem::replace(&mut group, Group::empty())); + groups.push(replace(&mut group, Group::empty())); num_symbols_with_file = file.symbol_id_range().len(); } num_symbols = num_symbols_with_file; diff --git a/wild_lib/src/layout.rs b/wild_lib/src/layout.rs index c53a154..8f2b1d6 100644 --- a/wild_lib/src/layout.rs +++ b/wild_lib/src/layout.rs @@ -77,7 +77,10 @@ use object::SectionIndex; use smallvec::SmallVec; use std::ffi::CString; use std::fmt::Display; +use std::mem::replace; use std::mem::size_of; +use std::mem::swap; +use std::mem::take; use std::num::NonZeroU32; use std::num::NonZeroU64; use std::sync::atomic; @@ -814,8 +817,7 @@ impl CommonGroupState<'_> { if *self.mem_sizes.get(part_id::GNU_VERSION) > 0 { let num_dynamic_symbols = self.mem_sizes.get(part_id::DYNSYM) / crate::elf::SYMTAB_ENTRY_SIZE; - let num_versym = - self.mem_sizes.get(part_id::GNU_VERSION) / core::mem::size_of::() as u64; + let num_versym = self.mem_sizes.get(part_id::GNU_VERSION) / size_of::() as u64; if num_versym != num_dynamic_symbols { bail!( "Object has {num_dynamic_symbols} dynamic symbols, but \ @@ -1683,7 +1685,7 @@ fn find_required_sections<'data, S: StorageModel, A: Arch>( }); } }); - let mut errors: Vec = core::mem::take(resources.errors.lock().unwrap().as_mut()); + let mut errors: Vec = take(resources.errors.lock().unwrap().as_mut()); // TODO: Figure out good way to report more than one error. if let Some(error) = errors.pop() { return Err(error); @@ -1762,7 +1764,7 @@ impl<'data> GroupState<'data> { slot.worker = Some(self); return; } - core::mem::swap(&mut slot.work, &mut self.queue.local_work); + swap(&mut slot.work, &mut self.queue.local_work); }; } } @@ -2463,10 +2465,7 @@ impl PreludeLayoutState { } if symbol_db.args.should_write_eh_frame_hdr { - common.allocate( - part_id::EH_FRAME_HDR, - core::mem::size_of::() as u64, - ); + common.allocate(part_id::EH_FRAME_HDR, size_of::() as u64); } Ok(()) @@ -2741,7 +2740,7 @@ impl<'data> EpilogueLayoutState<'data> { } if symbol_db.args.needs_dynamic() { - let dynamic_entry_size = core::mem::size_of::(); + let dynamic_entry_size = size_of::(); common.allocate( part_id::DYNAMIC, (elf_writer::NUM_EPILOGUE_DYNAMIC_ENTRIES * dynamic_entry_size) as u64, @@ -2794,10 +2793,10 @@ impl<'data> EpilogueLayoutState<'data> { let num_blume = 1; common.allocate( part_id::GNU_HASH, - (core::mem::size_of::() - + core::mem::size_of::() * num_blume - + core::mem::size_of::() * gnu_hash_layout.bucket_count as usize - + core::mem::size_of::() * num_defs) as u64, + (size_of::() + + size_of::() * num_blume + + size_of::() * gnu_hash_layout.bucket_count as usize + + size_of::() * num_defs) as u64, ); self.gnu_hash_layout = Some(gnu_hash_layout); } @@ -3050,7 +3049,7 @@ impl<'data> ObjectLayoutState<'data> { if resources.symbol_db.args.should_write_eh_frame_hdr { common.allocate( part_id::EH_FRAME_HDR, - core::mem::size_of::() as u64 * num_frames, + size_of::() as u64 * num_frames, ); } @@ -3378,7 +3377,7 @@ fn process_eh_frame_data( ) -> Result { let eh_frame_section = object.object.section(eh_frame_section_index)?; let data = object.object.raw_section_data(eh_frame_section)?; - const PREFIX_LEN: usize = core::mem::size_of::(); + const PREFIX_LEN: usize = size_of::(); let e = LittleEndian; let relocations = object.object.relocations(eh_frame_section_index)?; let mut rel_iter = relocations.iter().enumerate().peekable(); @@ -3391,7 +3390,7 @@ fn process_eh_frame_data( // See https://www.airs.com/blog/archives/170 let prefix: elf::EhFrameEntryPrefix = bytemuck::pod_read_unaligned(&data[offset..offset + PREFIX_LEN]); - let size = core::mem::size_of_val(&prefix.length) + prefix.length as usize; + let size = size_of_val(&prefix.length) + prefix.length as usize; let next_offset = offset + size; if next_offset > data.len() { bail!("Invalid .eh_frame data"); @@ -3467,7 +3466,7 @@ fn process_eh_frame_data( // Update our unloaded section to point to our new frame. Our frame will then in // turn point to whatever the section pointed to before. let previous_frame_for_section = - core::mem::replace(&mut unloaded.last_frame_index, Some(frame_index)); + replace(&mut unloaded.last_frame_index, Some(frame_index)); object.exception_frames.push(ExceptionFrame { relocations: &relocations[rel_start_index..rel_end_index], @@ -3616,9 +3615,9 @@ impl<'data> resolution::ResolvedFile<'data> { FileLayoutState::Prelude(PreludeLayoutState::new(s)) } resolution::ResolvedFile::NotLoaded(s) => FileLayoutState::NotLoaded(s), - resolution::ResolvedFile::Epilogue(s) => FileLayoutState::Epilogue( - EpilogueLayoutState::new(s, core::mem::take(custom_start_stop_defs)), - ), + resolution::ResolvedFile::Epilogue(s) => { + FileLayoutState::Epilogue(EpilogueLayoutState::new(s, take(custom_start_stop_defs))) + } } } } @@ -3785,7 +3784,7 @@ impl<'data> DynamicLayoutState<'data> { } common.allocate( part_id::DYNAMIC, - core::mem::size_of::() as u64, + size_of::() as u64, ); common.allocate(part_id::DYNSTR, self.lib_name.len() as u64 + 1); self.request_all_undefined_symbols(common, resources, queue) @@ -3863,9 +3862,8 @@ impl<'data> DynamicLayoutState<'data> { common.allocate(part_id::DYNSTR, base_size); common.allocate( part_id::GNU_VERSION_R, - core::mem::size_of::() as u64 - + u64::from(version_count) - * core::mem::size_of::() as u64, + size_of::() as u64 + + u64::from(version_count) * size_of::() as u64, ); self.verdef_info = Some(VerdefInfo { @@ -3947,9 +3945,8 @@ impl<'data> DynamicLayoutState<'data> { if let Some(v) = self.verdef_info.as_ref() { memory_offsets.increment( part_id::GNU_VERSION_R, - core::mem::size_of::() as u64 - + u64::from(v.version_count) - * core::mem::size_of::() as u64, + size_of::() as u64 + + u64::from(v.version_count) * size_of::() as u64, ); } diff --git a/wild_lib/src/output_section_id.rs b/wild_lib/src/output_section_id.rs index 65edd76..19f6fcf 100644 --- a/wild_lib/src/output_section_id.rs +++ b/wild_lib/src/output_section_id.rs @@ -35,7 +35,6 @@ use crate::program_segments::ProgramSegmentId; use crate::resolution::SectionSlot; use ahash::AHashMap; use anyhow::anyhow; -use core::mem::size_of; use linker_utils::elf::shf; use linker_utils::elf::sht; use linker_utils::elf::SectionFlags; @@ -318,7 +317,7 @@ const SECTION_DEFINITIONS: [BuiltInSectionDetails; NUM_BUILT_IN_SECTIONS] = [ name: SectionName(b".dynamic"), ty: sht::DYNAMIC, section_flags: shf::ALLOC.with(shf::WRITE), - element_size: core::mem::size_of::() as u64, + element_size: size_of::() as u64, link: &[DYNSTR], min_alignment: alignment::USIZE, start_symbol_name: Some("_DYNAMIC"), @@ -359,7 +358,7 @@ const SECTION_DEFINITIONS: [BuiltInSectionDetails; NUM_BUILT_IN_SECTIONS] = [ name: SectionName(b".gnu.version"), ty: sht::GNU_VERSYM, section_flags: shf::ALLOC, - element_size: core::mem::size_of::() as u64, + element_size: size_of::() as u64, min_alignment: alignment::VERSYM, link: &[DYNSYM], ..DEFAULT_DEFS @@ -403,7 +402,7 @@ const SECTION_DEFINITIONS: [BuiltInSectionDetails; NUM_BUILT_IN_SECTIONS] = [ name: SectionName(b".init_array"), ty: sht::INIT_ARRAY, section_flags: shf::ALLOC.with(shf::WRITE).with(shf::GNU_RETAIN), - element_size: core::mem::size_of::() as u64, + element_size: size_of::() as u64, start_symbol_name: Some("__init_array_start"), end_symbol_name: Some("__init_array_end"), ..DEFAULT_DEFS @@ -412,7 +411,7 @@ const SECTION_DEFINITIONS: [BuiltInSectionDetails; NUM_BUILT_IN_SECTIONS] = [ name: SectionName(b".fini_array"), ty: sht::FINI_ARRAY, section_flags: shf::ALLOC.with(shf::WRITE).with(shf::GNU_RETAIN), - element_size: core::mem::size_of::() as u64, + element_size: size_of::() as u64, start_symbol_name: Some("__fini_array_start"), end_symbol_name: Some("__fini_array_end"), ..DEFAULT_DEFS diff --git a/wild_lib/src/output_section_part_map.rs b/wild_lib/src/output_section_part_map.rs index 91f03d3..d82e99c 100644 --- a/wild_lib/src/output_section_part_map.rs +++ b/wild_lib/src/output_section_part_map.rs @@ -5,6 +5,7 @@ use crate::output_section_id::OutputSectionId; use crate::output_section_id::OutputSections; use crate::output_section_map::OutputSectionMap; use crate::part_id::PartId; +use std::mem::take; use std::ops::AddAssign; /// A map from each part of each output section to some value. Different sections are split into @@ -44,7 +45,7 @@ impl OutputSectionPartMap { impl OutputSectionPartMap { pub(crate) fn take(&mut self, part_id: PartId) -> T { - core::mem::take(self.get_mut(part_id)) + take(self.get_mut(part_id)) } } diff --git a/wild_lib/src/output_trace.rs b/wild_lib/src/output_trace.rs index 4c32d86..7ec37a2 100644 --- a/wild_lib/src/output_trace.rs +++ b/wild_lib/src/output_trace.rs @@ -5,6 +5,7 @@ use crate::args::Args; use crate::error::Result; use linker_trace::AddressTrace; use std::fmt::Write as _; +use std::mem::take; use std::ops::DerefMut; use std::path::PathBuf; use std::sync::Mutex; @@ -69,7 +70,7 @@ where let Some(address) = data.address else { return }; let trace = AddressTrace { address, - messages: core::mem::take(data.messages.lock().unwrap().deref_mut()), + messages: take(data.messages.lock().unwrap().deref_mut()), }; self.data.lock().unwrap().traces.push(trace); } @@ -123,7 +124,7 @@ impl tracing::field::Visit for MessageFormatter { impl OutputTraceLayer { fn flush(&self) -> Result { let mut file = std::io::BufWriter::new(std::fs::File::create(&self.trace_path)?); - let data = core::mem::take(self.data.lock().unwrap().deref_mut()); + let data = take(self.data.lock().unwrap().deref_mut()); data.write(&mut file)?; Ok(()) } diff --git a/wild_lib/src/resolution.rs b/wild_lib/src/resolution.rs index 8204618..66ba847 100644 --- a/wild_lib/src/resolution.rs +++ b/wild_lib/src/resolution.rs @@ -50,6 +50,7 @@ use object::read::elf::Sym as _; use object::LittleEndian; use rayon::iter::IntoParallelRefMutIterator; use rayon::iter::ParallelIterator; +use std::mem::take; use std::num::NonZeroU32; use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicUsize; @@ -354,8 +355,8 @@ fn resolve_alternative_symbol_definitions<'data, S: StorageModel>( // For now, we do this from a single thread since we don't expect a lot of symbols will have // multiple definitions. If it turns out that there are cases where it's actually taking // significant time, then we could parallelise this without too much work. - let previous_definitions = core::mem::take(&mut symbol_db.alternative_definitions); - let symbols_with_alternatives = core::mem::take(&mut symbol_db.symbols_with_alternatives); + let previous_definitions = take(&mut symbol_db.alternative_definitions); + let symbols_with_alternatives = take(&mut symbol_db.symbols_with_alternatives); let mut alternatives = Vec::new(); for first in symbols_with_alternatives { alternatives.clear(); diff --git a/wild_lib/src/slice.rs b/wild_lib/src/slice.rs index 9e3591c..f692c49 100644 --- a/wild_lib/src/slice.rs +++ b/wild_lib/src/slice.rs @@ -1,3 +1,5 @@ +use std::mem::take; + /// Removes `prefix` elements from `data` and returns them. Once `take_mut` on core::slice is /// stable, we can use that instead. #[track_caller] @@ -7,7 +9,7 @@ pub(crate) fn slice_take_prefix_mut<'t, T>(data: &mut &'t mut [T], prefix: usize prefix <= len, "Attempted to slice {prefix} elements when only {len} available" ); - let owned_data = core::mem::take(data); + let owned_data = take(data); let (prefix, rest) = owned_data.split_at_mut(prefix); *data = rest; prefix @@ -21,7 +23,7 @@ pub(crate) fn try_slice_take_prefix_mut<'t, T>( if prefix > len { return None; } - let owned_data = core::mem::take(data); + let owned_data = take(data); let (prefix, rest) = owned_data.split_at_mut(prefix); *data = rest; Some(prefix) @@ -31,7 +33,7 @@ pub(crate) fn take_first_mut<'t, T>(data: &mut &'t mut [T]) -> Option<&'t mut T> if data.is_empty() { None } else { - let owned_data = core::mem::take(data); + let owned_data = take(data); let (prefix, rest) = owned_data.split_at_mut(1); *data = rest; Some(&mut prefix[0]) diff --git a/wild_lib/src/symbol_db.rs b/wild_lib/src/symbol_db.rs index 7490e40..7af243f 100644 --- a/wild_lib/src/symbol_db.rs +++ b/wild_lib/src/symbol_db.rs @@ -26,6 +26,8 @@ use itertools::Itertools; use object::read::elf::Sym as _; use object::LittleEndian; use std::collections::hash_map; +use std::mem::replace; +use std::mem::take; pub struct SymbolDb<'data, S: StorageModel> { pub(crate) args: &'data Args, @@ -299,7 +301,7 @@ impl<'data, S: StorageModel> SymbolDb<'data, S> { let first_symbol_id = *entry.get(); // Update the entry at `first_symbol_id` to point to the new last symbol (the // pending symbol). - let previous_last = core::mem::replace( + let previous_last = replace( &mut self.alternative_definitions[first_symbol_id.as_usize()], pending.symbol_id, ); @@ -380,7 +382,7 @@ impl<'data, S: StorageModel> SymbolDb<'data, S> { /// restored later by calling `restore_definitions`. While the definitions are taken, any method /// that requires definitions will fail. pub(crate) fn take_definitions(&mut self) -> Vec { - core::mem::take(&mut self.symbol_definitions) + take(&mut self.symbol_definitions) } pub(crate) fn restore_definitions(&mut self, definitions: Vec) { diff --git a/wild_lib/src/validation.rs b/wild_lib/src/validation.rs index 460d0ca..5df1d72 100644 --- a/wild_lib/src/validation.rs +++ b/wild_lib/src/validation.rs @@ -82,7 +82,7 @@ fn validate_resolution( }; if let Some(got_address) = resolution.got_address { let start_offset = (got_address.get() - got.sh_addr(LittleEndian)) as usize; - let end_offset = start_offset + core::mem::size_of::(); + let end_offset = start_offset + size_of::(); if end_offset > got_data.len() { bail!("GOT offset beyond end of GOT 0x{end_offset}"); }