From dc97188c77bab6948f23fe69bfd154144119d653 Mon Sep 17 00:00:00 2001 From: Tom Dohrmann Date: Sat, 25 Dec 2021 15:02:11 +0100 Subject: [PATCH 1/5] add aslr support --- Cargo.lock | 47 ++++++++++++++++- Cargo.toml | 8 ++- build.rs | 4 ++ src/binary/entropy.rs | 87 +++++++++++++++++++++++++++++++ src/binary/level_4_entries.rs | 69 +++++++++++++++++++----- src/binary/load_kernel.rs | 22 ++++++-- src/binary/mod.rs | 46 +++++++++++----- src/config.rs | 6 +++ tests/test_kernels/pie/Cargo.toml | 3 ++ 9 files changed, 257 insertions(+), 35 deletions(-) create mode 100644 src/binary/entropy.rs diff --git a/Cargo.lock b/Cargo.lock index d43857b2..2936c930 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -72,6 +72,9 @@ dependencies = [ "noto-sans-mono-bitmap", "proc-macro2", "quote", + "rand", + "rand_chacha", + "raw-cpuid", "rsdp", "serde", "spinning_top", @@ -272,6 +275,12 @@ dependencies = [ "autocfg", ] +[[package]] +name = "ppv-lite86" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba" + [[package]] name = "proc-macro2" version = "1.0.27" @@ -290,6 +299,40 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "rand" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +dependencies = [ + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" + +[[package]] +name = "raw-cpuid" +version = "10.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "929f54e29691d4e6a9cc558479de70db7aa3d98cd6fe7ab86d7507aa2886b9d2" +dependencies = [ + "bitflags", +] + [[package]] name = "rsdp" version = "1.1.0" @@ -544,9 +587,9 @@ dependencies = [ [[package]] name = "xmas-elf" -version = "0.6.2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22678df5df766e8d1e5d609da69f0c3132d794edf6ab5e75e7abcd2270d4cf58" +checksum = "8d29b4d8e7beaceb4e77447ba941a7600d23d0319ab52da0461abea214832d5a" dependencies = [ "zero", ] diff --git a/Cargo.toml b/Cargo.toml index 27ba872c..6161f789 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,7 +34,7 @@ name = "uefi" required-features = ["uefi_bin"] [dependencies] -xmas-elf = { version = "0.6.2", optional = true } +xmas-elf = { version = "0.8.0", optional = true } x86_64 = { version = "0.14.7", optional = true, default-features = false, features = ["instructions", "inline_asm"] } usize_conversions = { version = "0.2.0", optional = true } bit_field = { version = "0.10.0", optional = true } @@ -51,6 +51,9 @@ json = { version = "0.12.4", optional = true } rsdp = { version = "1.0.0", optional = true } fatfs = { version = "0.3.4", optional = true } gpt = { version = "2.0.0", optional = true } +raw-cpuid = { version = "10.2.0", optional = true } +rand = { version = "0.8.4", optional = true, default-features = false } +rand_chacha = { version = "0.3.1", optional = true, default-features = false } [dependencies.noto-sans-mono-bitmap] version = "0.1.2" @@ -72,7 +75,8 @@ bios_bin = ["binary", "rsdp"] uefi_bin = ["binary", "uefi"] binary = [ "llvm-tools-build", "x86_64", "toml", "xmas-elf", "usize_conversions", "log", "conquer-once", - "spinning_top", "serde", "noto-sans-mono-bitmap", "quote", "proc-macro2", + "spinning_top", "serde", "noto-sans-mono-bitmap", "quote", "proc-macro2", "raw-cpuid", "rand", + "rand_chacha" ] [profile.dev] diff --git a/build.rs b/build.rs index c86cb670..2c4c46e7 100644 --- a/build.rs +++ b/build.rs @@ -356,6 +356,8 @@ mod binary { pub map_page_table_recursively: bool, #[serde(default = "val_true")] pub map_framebuffer: bool, + #[serde(default)] + pub aslr: bool, pub kernel_stack_size: Option, pub physical_memory_offset: Option, pub recursive_index: Option, @@ -376,6 +378,7 @@ mod binary { let map_physical_memory = self.map_physical_memory; let map_page_table_recursively = self.map_page_table_recursively; let map_framebuffer = self.map_framebuffer; + let aslr = self.aslr; let kernel_stack_size = optional(self.kernel_stack_size); let physical_memory_offset = optional(self.physical_memory_offset); let recursive_index = optional(self.recursive_index); @@ -389,6 +392,7 @@ mod binary { map_physical_memory: #map_physical_memory, map_page_table_recursively: #map_page_table_recursively, map_framebuffer: #map_framebuffer, + aslr: #aslr, kernel_stack_size: #kernel_stack_size, physical_memory_offset: #physical_memory_offset, recursive_index: #recursive_index, diff --git a/src/binary/entropy.rs b/src/binary/entropy.rs new file mode 100644 index 00000000..e88654f6 --- /dev/null +++ b/src/binary/entropy.rs @@ -0,0 +1,87 @@ +use rand_chacha::{rand_core::SeedableRng, ChaCha20Rng}; +use raw_cpuid::CpuId; +use x86_64::instructions::{port::Port, random::RdRand}; + +/// Gather entropy from various sources to seed a RNG. +pub fn build_rng() -> ChaCha20Rng { + const ENTROPY_SOURCES: [fn() -> [u8; 32]; 3] = [rd_rand_entropy, tsc_entropy, pit_entropy]; + + // Collect entropy from different sources and xor them all together. + let mut seed = [0; 32]; + for entropy_source in ENTROPY_SOURCES { + let entropy = entropy_source(); + + for (seed, entropy) in seed.iter_mut().zip(entropy) { + *seed ^= entropy; + } + } + + // Construct the RNG. + ChaCha20Rng::from_seed(seed) +} + +/// Gather entropy by requesting random numbers with `rdrand` instruction if it's available. +/// +/// This function provides excellent entropy (unless you don't trust the CPU vendors). +fn rd_rand_entropy() -> [u8; 32] { + let mut entropy = [0; 32]; + + // Check if the CPU supports `RDRAND`. + if let Some(rd_rand) = RdRand::new() { + for i in 0..4 { + let value = loop { + if let Some(value) = rd_rand.get_u64() { + break value; + } + }; + entropy[i * 8..(i + 1) * 8].copy_from_slice(&value.to_ne_bytes()); + } + } + + entropy +} + +/// Gather entropy by reading the current time with the `rdtsc` instruction if it's available. +/// +/// This function doesn't provide particulary good entropy, but it's better than nothing. +fn tsc_entropy() -> [u8; 32] { + let mut entropy = [0; 32]; + + // Check if the CPU supports `RDTSC`. + let cpu_id = CpuId::new(); + if let Some(feature_info) = cpu_id.get_feature_info() { + if !feature_info.has_tsc() { + for i in 0..4 { + let value = unsafe { + // SAFETY: We checked that the cpu supports `RDTSC` and we run in ring 0. + core::arch::x86_64::_rdtsc() + }; + entropy[i * 8..(i + 1) * 8].copy_from_slice(&value.to_ne_bytes()); + } + } + } + + entropy +} + +/// Gather entropy by reading the current count of PIT channel 1-3. +/// +/// This function doesn't provide particulary good entropy, but it's always available. +fn pit_entropy() -> [u8; 32] { + let mut entropy = [0; 32]; + + for (i, entropy_byte) in entropy.iter_mut().enumerate() { + // Cycle through channels 1-3. + let channel = i % 3; + + let mut port = Port::::new(0x40 + channel as u16); + let value = unsafe { + // SAFETY: It's safe to read from ports 0x40-0x42. + port.read() + }; + + *entropy_byte = value; + } + + entropy +} diff --git a/src/binary/level_4_entries.rs b/src/binary/level_4_entries.rs index f30e5a45..d7859960 100644 --- a/src/binary/level_4_entries.rs +++ b/src/binary/level_4_entries.rs @@ -1,4 +1,6 @@ use core::{alloc::Layout, convert::TryInto}; +use rand::distributions::{Distribution, Uniform}; +use rand_chacha::ChaCha20Rng; use usize_conversions::IntoUsize; use x86_64::{ structures::paging::{Page, PageTableIndex, Size4KiB}, @@ -7,7 +9,7 @@ use x86_64::{ use xmas_elf::program::ProgramHeader; use crate::{ - binary::{MemoryRegion, CONFIG}, + binary::{entropy, MemoryRegion, CONFIG}, BootInfo, }; @@ -15,7 +17,11 @@ use crate::{ /// /// Useful for determining a free virtual memory block, e.g. for mapping additional data. pub struct UsedLevel4Entries { - entry_state: [bool; 512], // whether an entry is in use by the kernel + /// Whether an entry is in use by the kernel. + entry_state: [bool; 512], + /// A random number generator that should be used to generate random addresses or + /// `None` if aslr is disabled. + rng: Option, } impl UsedLevel4Entries { @@ -25,6 +31,7 @@ impl UsedLevel4Entries { pub fn new(max_phys_addr: PhysAddr, regions_len: usize, framebuffer_size: usize) -> Self { let mut used = UsedLevel4Entries { entry_state: [false; 512], + rng: CONFIG.aslr.then(entropy::build_rng), }; used.entry_state[0] = true; // TODO: Can we do this dynamically? @@ -104,23 +111,61 @@ impl UsedLevel4Entries { /// Since this method marks each returned index as used, it can be used multiple times /// to determine multiple unused virtual memory regions. pub fn get_free_entry(&mut self) -> PageTableIndex { - let (idx, entry) = self + // Create an iterator over all available p4 indices. + let mut free_entries = self .entry_state - .iter_mut() + .iter() + .copied() .enumerate() - .find(|(_, &mut entry)| entry == false) - .expect("no usable level 4 entries found"); + .filter(|(_, used)| !used) + .map(|(idx, _)| idx); + + // Choose the free entry index. + let idx = if let Some(rng) = self.rng.as_mut() { + // Count the entries and randomly choose an index in `[0..count)`. + let count = free_entries.clone().count(); + if count == 0 { + panic!("no usable level 4 entries found") + } + let distribution = Uniform::from(0..count); + let idx = distribution.sample(rng); + + // Get the index of the free entry. + free_entries.nth(idx).unwrap() + } else { + // Choose the first index. + free_entries + .next() + .expect("no usable level 4 entries found") + }; + + // Mark the entry as used. + self.entry_state[idx] = true; - *entry = true; PageTableIndex::new(idx.try_into().unwrap()) } - /// Returns the virtual start address of an unused level 4 entry and marks it as used. + /// Returns a virtual address in an unused level 4 entry and marks it as used. /// - /// This is a convenience method around [`get_free_entry`], so all of its docs applies here + /// This functions call [`get_free_entry`] internally, so all of its docs applies here /// too. - pub fn get_free_address(&mut self) -> VirtAddr { - Page::from_page_table_indices_1gib(self.get_free_entry(), PageTableIndex::new(0)) - .start_address() + pub fn get_free_address(&mut self, size: u64, alignment: u64) -> VirtAddr { + assert!(alignment.is_power_of_two()); + + let base = + Page::from_page_table_indices_1gib(self.get_free_entry(), PageTableIndex::new(0)) + .start_address(); + + let offset = if let Some(rng) = self.rng.as_mut() { + // Choose a random offset. + const LEVEL_4_SIZE: u64 = 4096 * 512 * 512 * 512; + let end = LEVEL_4_SIZE - size; + let uniform_range = Uniform::from(0..end / alignment); + uniform_range.sample(rng) * alignment + } else { + 0 + }; + + base + offset } } diff --git a/src/binary/load_kernel.rs b/src/binary/load_kernel.rs index e07eb114..626c9f95 100644 --- a/src/binary/load_kernel.rs +++ b/src/binary/load_kernel.rs @@ -52,12 +52,28 @@ where } let elf_file = ElfFile::new(bytes)?; + for program_header in elf_file.program_iter() { + program::sanity_check(program_header, &elf_file)?; + } let virtual_address_offset = match elf_file.header.pt2.type_().as_type() { header::Type::None => unimplemented!(), header::Type::Relocatable => unimplemented!(), header::Type::Executable => 0, - header::Type::SharedObject => used_entries.get_free_address().as_u64(), + header::Type::SharedObject => { + // Find the highest virtual memory address and the biggest alignment. + let load_program_headers = elf_file + .program_iter() + .filter(|h| matches!(h.get_type(), Ok(Type::Load))); + let size = load_program_headers + .clone() + .map(|h| h.virtual_addr() + h.mem_size()) + .max() + .unwrap_or(0); + let align = load_program_headers.map(|h| h.align()).max().unwrap_or(1); + + used_entries.get_free_address(size, align).as_u64() + } header::Type::Core => unimplemented!(), header::Type::ProcessorSpecific(_) => unimplemented!(), }; @@ -79,10 +95,6 @@ where } fn load_segments(&mut self) -> Result, &'static str> { - for program_header in self.elf_file.program_iter() { - program::sanity_check(program_header, &self.elf_file)?; - } - // Load the segments into virtual memory. let mut tls_template = None; for program_header in self.elf_file.program_iter() { diff --git a/src/binary/mod.rs b/src/binary/mod.rs index ef0c6cd3..a4f6aec2 100644 --- a/src/binary/mod.rs +++ b/src/binary/mod.rs @@ -8,8 +8,8 @@ use parsed_config::CONFIG; use usize_conversions::FromUsize; use x86_64::{ structures::paging::{ - FrameAllocator, Mapper, OffsetPageTable, Page, PageTableFlags, PageTableIndex, PhysFrame, - Size2MiB, + FrameAllocator, Mapper, OffsetPageTable, Page, PageSize, PageTableFlags, PageTableIndex, + PhysFrame, Size2MiB, Size4KiB, }, PhysAddr, VirtAddr, }; @@ -21,6 +21,8 @@ pub mod bios; #[cfg(feature = "uefi_bin")] mod uefi; +/// Provides a function to gather entropy and build a RNG. +mod entropy; mod gdt; /// Provides a frame allocator based on a BIOS or UEFI memory map. pub mod legacy_memory_region; @@ -147,7 +149,7 @@ where let stack_start_addr = kernel_stack_start_location(&mut used_entries); let stack_start: Page = Page::containing_address(stack_start_addr); let stack_end = { - let end_addr = stack_start_addr + CONFIG.kernel_stack_size.unwrap_or(20 * PAGE_SIZE); + let end_addr = stack_start_addr + CONFIG.kernel_stack_size(); Page::containing_address(end_addr - 1u64) }; for page in Page::range_inclusive(stack_start, stack_end) { @@ -197,7 +199,9 @@ where let framebuffer_start_frame: PhysFrame = PhysFrame::containing_address(framebuffer_addr); let framebuffer_end_frame = PhysFrame::containing_address(framebuffer_addr + framebuffer_size - 1u64); - let start_page = Page::containing_address(frame_buffer_location(&mut used_entries)); + let start_page = + Page::from_start_address(frame_buffer_location(&mut used_entries, framebuffer_size)) + .expect("the framebuffer address must be page aligned"); for (i, frame) in PhysFrame::range_inclusive(framebuffer_start_frame, framebuffer_end_frame).enumerate() { @@ -219,14 +223,18 @@ where let physical_memory_offset = if CONFIG.map_physical_memory { log::info!("Map physical memory"); - let offset = CONFIG - .physical_memory_offset - .map(VirtAddr::new) - .unwrap_or_else(|| used_entries.get_free_address()); let start_frame = PhysFrame::containing_address(PhysAddr::new(0)); let max_phys = frame_allocator.max_phys_addr(); let end_frame: PhysFrame = PhysFrame::containing_address(max_phys - 1u64); + + let size = max_phys.as_u64(); + let alignment = Size2MiB::SIZE; + let offset = CONFIG + .physical_memory_offset + .map(VirtAddr::new) + .unwrap_or_else(|| used_entries.get_free_address(size, alignment)); + for frame in PhysFrame::range_inclusive(start_frame, end_frame) { let page = Page::containing_address(offset + frame.start_address().as_u64()); let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE; @@ -322,7 +330,7 @@ where let (combined, memory_regions_offset) = boot_info_layout.extend(memory_regions_layout).unwrap(); - let boot_info_addr = boot_info_location(&mut mappings.used_entries); + let boot_info_addr = boot_info_location(&mut mappings.used_entries, combined); assert!( boot_info_addr.is_aligned(u64::from_usize(combined.align())), "boot info addr is not properly aligned" @@ -458,25 +466,35 @@ struct Addresses { boot_info: &'static mut crate::boot_info::BootInfo, } -fn boot_info_location(used_entries: &mut UsedLevel4Entries) -> VirtAddr { +fn boot_info_location(used_entries: &mut UsedLevel4Entries, layout: Layout) -> VirtAddr { CONFIG .boot_info_address .map(VirtAddr::new) - .unwrap_or_else(|| used_entries.get_free_address()) + .unwrap_or_else(|| { + used_entries.get_free_address( + u64::from_usize(layout.size()), + u64::from_usize(layout.align()), + ) + }) } -fn frame_buffer_location(used_entries: &mut UsedLevel4Entries) -> VirtAddr { +fn frame_buffer_location( + used_entries: &mut UsedLevel4Entries, + framebuffer_size: usize, +) -> VirtAddr { CONFIG .framebuffer_address .map(VirtAddr::new) - .unwrap_or_else(|| used_entries.get_free_address()) + .unwrap_or_else(|| { + used_entries.get_free_address(u64::from_usize(framebuffer_size), Size4KiB::SIZE) + }) } fn kernel_stack_start_location(used_entries: &mut UsedLevel4Entries) -> VirtAddr { CONFIG .kernel_stack_address .map(VirtAddr::new) - .unwrap_or_else(|| used_entries.get_free_address()) + .unwrap_or_else(|| used_entries.get_free_address(CONFIG.kernel_stack_size(), 16)) } fn enable_nxe_bit() { diff --git a/src/config.rs b/src/config.rs index c07a4c4c..de5b7cd2 100644 --- a/src/config.rs +++ b/src/config.rs @@ -50,6 +50,12 @@ pub struct Config { /// /// Defaults to `false`. pub map_page_table_recursively: bool, + /// Whether to randomize non-statically configured addresses. + /// The kernel base address will be randomized when it's compiled as + /// a position independent executable. + /// + /// Defaults to `false`. + pub aslr: bool, /// Create the recursive mapping in at the given entry of the level 4 page table. /// /// If not given, the bootloader searches for a free level 4 entry dynamically. diff --git a/tests/test_kernels/pie/Cargo.toml b/tests/test_kernels/pie/Cargo.toml index dfbb0308..520f8192 100644 --- a/tests/test_kernels/pie/Cargo.toml +++ b/tests/test_kernels/pie/Cargo.toml @@ -8,3 +8,6 @@ edition = "2018" bootloader = { path = "../../.." } x86_64 = { version = "0.14.7", default-features = false, features = ["instructions", "inline_asm"] } uart_16550 = "0.2.10" + +[package.metadata.bootloader] +aslr = true From 4efc182fa60a6a61758b2eba3911268e794ed0fa Mon Sep 17 00:00:00 2001 From: Tom Dohrmann Date: Fri, 4 Feb 2022 12:05:59 +0100 Subject: [PATCH 2/5] use consistent casing --- src/binary/entropy.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/binary/entropy.rs b/src/binary/entropy.rs index e88654f6..32e31b34 100644 --- a/src/binary/entropy.rs +++ b/src/binary/entropy.rs @@ -20,7 +20,7 @@ pub fn build_rng() -> ChaCha20Rng { ChaCha20Rng::from_seed(seed) } -/// Gather entropy by requesting random numbers with `rdrand` instruction if it's available. +/// Gather entropy by requesting random numbers with `RDRAND` instruction if it's available. /// /// This function provides excellent entropy (unless you don't trust the CPU vendors). fn rd_rand_entropy() -> [u8; 32] { @@ -41,7 +41,7 @@ fn rd_rand_entropy() -> [u8; 32] { entropy } -/// Gather entropy by reading the current time with the `rdtsc` instruction if it's available. +/// Gather entropy by reading the current time with the `RDTSC` instruction if it's available. /// /// This function doesn't provide particulary good entropy, but it's better than nothing. fn tsc_entropy() -> [u8; 32] { From 4f86dc0143582813fd0a9827795e4fe7e3c816d6 Mon Sep 17 00:00:00 2001 From: Tom Dohrmann Date: Fri, 4 Feb 2022 12:06:27 +0100 Subject: [PATCH 3/5] add retry limit to `rd_rand_entropy` --- src/binary/entropy.rs | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/src/binary/entropy.rs b/src/binary/entropy.rs index 32e31b34..65ef3821 100644 --- a/src/binary/entropy.rs +++ b/src/binary/entropy.rs @@ -29,18 +29,28 @@ fn rd_rand_entropy() -> [u8; 32] { // Check if the CPU supports `RDRAND`. if let Some(rd_rand) = RdRand::new() { for i in 0..4 { - let value = loop { - if let Some(value) = rd_rand.get_u64() { - break value; - } - }; - entropy[i * 8..(i + 1) * 8].copy_from_slice(&value.to_ne_bytes()); + if let Some(value) = get_random_64(rd_rand) { + entropy[i * 8..(i + 1) * 8].copy_from_slice(&value.to_ne_bytes()); + } } } entropy } +/// Try to fetch a 64 bit random value with a retry count limit of 10. +/// +/// This function is a port of the C implementation provided in Intel's Software Developer's Manual, Volume 1, 7.3.17.1. +fn get_random_64(rd_rand: RdRand) -> Option { + const RETRY_LIMIT: u32 = 10; + for _ in 0..RETRY_LIMIT { + if let Some(value) = rd_rand.get_u64() { + return Some(value); + } + } + None +} + /// Gather entropy by reading the current time with the `RDTSC` instruction if it's available. /// /// This function doesn't provide particulary good entropy, but it's better than nothing. From 719f1d270604cb98b9250cead2bff89f831ae844 Mon Sep 17 00:00:00 2001 From: Tom Dohrmann Date: Sun, 6 Feb 2022 15:51:28 +0100 Subject: [PATCH 4/5] use `IteratorRandom::choose` --- src/binary/level_4_entries.rs | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/src/binary/level_4_entries.rs b/src/binary/level_4_entries.rs index d7859960..d94360dc 100644 --- a/src/binary/level_4_entries.rs +++ b/src/binary/level_4_entries.rs @@ -1,5 +1,8 @@ use core::{alloc::Layout, convert::TryInto}; -use rand::distributions::{Distribution, Uniform}; +use rand::{ + distributions::{Distribution, Uniform}, + seq::IteratorRandom, +}; use rand_chacha::ChaCha20Rng; use usize_conversions::IntoUsize; use x86_64::{ @@ -121,23 +124,14 @@ impl UsedLevel4Entries { .map(|(idx, _)| idx); // Choose the free entry index. - let idx = if let Some(rng) = self.rng.as_mut() { - // Count the entries and randomly choose an index in `[0..count)`. - let count = free_entries.clone().count(); - if count == 0 { - panic!("no usable level 4 entries found") - } - let distribution = Uniform::from(0..count); - let idx = distribution.sample(rng); - - // Get the index of the free entry. - free_entries.nth(idx).unwrap() + let idx_opt = if let Some(rng) = self.rng.as_mut() { + // Randomly choose an index. + free_entries.choose(rng) } else { // Choose the first index. - free_entries - .next() - .expect("no usable level 4 entries found") + free_entries.next() }; + let idx = idx_opt.expect("no usable level 4 entry found"); // Mark the entry as used. self.entry_state[idx] = true; From bae5fb855a7277eccfc8d6d712d556c2861c2136 Mon Sep 17 00:00:00 2001 From: Tom Dohrmann Date: Sun, 6 Feb 2022 15:59:44 +0100 Subject: [PATCH 5/5] add comment explaining `CONFIG.aslr` involvment --- src/binary/level_4_entries.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/binary/level_4_entries.rs b/src/binary/level_4_entries.rs index d94360dc..b62f2872 100644 --- a/src/binary/level_4_entries.rs +++ b/src/binary/level_4_entries.rs @@ -109,7 +109,8 @@ impl UsedLevel4Entries { } } - /// Returns a unused level 4 entry and marks it as used. + /// Returns a unused level 4 entry and marks it as used. If `CONFIG.aslr` is + /// enabled, this will return a random available entry. /// /// Since this method marks each returned index as used, it can be used multiple times /// to determine multiple unused virtual memory regions.