diff --git a/src/util/heap/layout/byte_map_mmapper.rs b/src/util/heap/layout/byte_map_mmapper.rs index 083ad5a3e4..cbf29acb78 100644 --- a/src/util/heap/layout/byte_map_mmapper.rs +++ b/src/util/heap/layout/byte_map_mmapper.rs @@ -83,7 +83,7 @@ impl Mmapper for ByteMapMmapper { let start_chunk = Self::address_to_mmap_chunks_down(start); let end_chunk = Self::address_to_mmap_chunks_up(start + pages_to_bytes(pages)); trace!( - "Calling quanrantine_address_range with start={:?} and {} pages, {}-{}", + "Calling quarantine_address_range with start={:?} and {} pages, {}-{}", start, pages, Self::mmap_chunks_to_address(start_chunk), diff --git a/src/util/heap/layout/mmapper.rs b/src/util/heap/layout/mmapper.rs index ce84f3784c..898f3a167b 100644 --- a/src/util/heap/layout/mmapper.rs +++ b/src/util/heap/layout/mmapper.rs @@ -121,11 +121,11 @@ impl MapState { MapState::Unmapped => mmap_noreserve(mmap_start, MMAP_CHUNK_BYTES, strategy), MapState::Quarantined => Ok(()), MapState::Mapped => { - // If a chunk is mapped by us and we try to quanrantine it, we simply don't do anything. + // If a chunk is mapped by us and we try to quarantine it, we simply don't do anything. // We allow this as it is possible to have a situation like this: - // we have global side metdata S, and space A and B. We quanrantine memory X for S for A, then map - // X for A, and then we quanrantine memory Y for S for B. It is possible that X and Y is the same chunk, - // so the chunk is already mapped for A, and we try quanrantine it for B. We simply allow this transition. + // we have global side metadata S, and space A and B. We quarantine memory X for S for A, then map + // X for A, and then we quarantine memory Y for S for B. It is possible that X and Y is the same chunk, + // so the chunk is already mapped for A, and we try quarantine it for B. We simply allow this transition. return Ok(()); } MapState::Protected => panic!("Cannot quarantine protected memory"), diff --git a/src/util/memory.rs b/src/util/memory.rs index 09d8d7d719..35950ea4f7 100644 --- a/src/util/memory.rs +++ b/src/util/memory.rs @@ -147,6 +147,7 @@ pub fn handle_mmap_error(error: Error, tls: VMThread) -> ! { match error.kind() { // From Rust nightly 2021-05-12, we started to see Rust added this ErrorKind. ErrorKind::OutOfMemory => { + eprintln!("{}", get_process_memory_maps()); // Signal `MmapOutOfMemory`. Expect the VM to abort immediately. trace!("Signal MmapOutOfMemory!"); VM::VMCollection::out_of_memory(tls, AllocationError::MmapOutOfMemory); @@ -159,6 +160,7 @@ pub fn handle_mmap_error(error: Error, tls: VMThread) -> ! { if let Some(os_errno) = error.raw_os_error() { // If it is OOM, we invoke out_of_memory() through the VM interface. if os_errno == libc::ENOMEM { + eprintln!("{}", get_process_memory_maps()); // Signal `MmapOutOfMemory`. Expect the VM to abort immediately. trace!("Signal MmapOutOfMemory!"); VM::VMCollection::out_of_memory(tls, AllocationError::MmapOutOfMemory); @@ -166,7 +168,10 @@ pub fn handle_mmap_error(error: Error, tls: VMThread) -> ! { } } } - ErrorKind::AlreadyExists => panic!("Failed to mmap, the address is already mapped. Should MMTk quanrantine the address range first?"), + ErrorKind::AlreadyExists => { + eprintln!("{}", get_process_memory_maps()); + panic!("Failed to mmap, the address is already mapped. Should MMTk quarantine the address range first?"); + } _ => {} } panic!("Unexpected mmap failure: {:?}", error) @@ -228,9 +233,7 @@ fn wrap_libc_call(f: &dyn Fn() -> T, expect: T) -> Result<()> { /// Get the memory maps for the process. The returned string is a multi-line string. /// This is only meant to be used for debugging. For example, log process memory maps after detecting a clash. -/// If we would need to parsable memory maps, I would suggest using a library instead which saves us the trouble to deal with portability. -#[cfg(debug_assertions)] -#[cfg(target_os = "linux")] +#[cfg(any(target_os = "linux", target_os = "android"))] pub fn get_process_memory_maps() -> String { // print map use std::fs::File; @@ -241,6 +244,13 @@ pub fn get_process_memory_maps() -> String { data } +/// Get the memory maps for the process. The returned string is a multi-line string. +/// This is only meant to be used for debugging. For example, log process memory maps after detecting a clash. +#[cfg(not(any(target_os = "linux", target_os = "android")))] +pub fn get_process_memory_maps() -> String { + "(process map unavailable)".to_string() +} + /// Returns the total physical memory for the system in bytes. pub(crate) fn get_system_total_memory() -> u64 { // TODO: Note that if we want to get system info somewhere else in the future, we should diff --git a/src/util/metadata/side_metadata/global.rs b/src/util/metadata/side_metadata/global.rs index 4b6ea61a1c..8fc54764bd 100644 --- a/src/util/metadata/side_metadata/global.rs +++ b/src/util/metadata/side_metadata/global.rs @@ -1124,7 +1124,7 @@ impl SideMetadataContext { /// # Arguments /// * `start` - The starting address of the source data. /// * `size` - The size of the source data (in bytes). - /// * `no_reserve` - whether to invoke mmap with a noreserve flag (we use this flag to quanrantine address range) + /// * `no_reserve` - whether to invoke mmap with a noreserve flag (we use this flag to quarantine address range) fn map_metadata_internal(&self, start: Address, size: usize, no_reserve: bool) -> Result<()> { for spec in self.global.iter() { match try_mmap_contiguous_metadata_space(start, size, spec, no_reserve) { @@ -1137,15 +1137,17 @@ impl SideMetadataContext { let mut lsize: usize = 0; for spec in self.local.iter() { - // For local side metadata, we always have to reserve address space for all - // local metadata required by all policies in MMTk to be able to calculate a constant offset for each local metadata at compile-time - // (it's like assigning an ID to each policy). - // As the plan is chosen at run-time, we will never know which subset of policies will be used during run-time. - // We can't afford this much address space in 32-bits. + // For local side metadata, we always have to reserve address space for all local + // metadata required by all policies in MMTk to be able to calculate a constant offset + // for each local metadata at compile-time (it's like assigning an ID to each policy). + // + // As the plan is chosen at run-time, we will never know which subset of policies will + // be used during run-time. We can't afford this much address space in 32-bits. // So, we switch to the chunk-based approach for this specific case. // - // The global metadata is different in that for each plan, we can calculate its constant base addresses at compile-time. - // Using the chunk-based approach will need the same address space size as the current not-chunked approach. + // The global metadata is different in that for each plan, we can calculate its constant + // base addresses at compile-time. Using the chunk-based approach will need the same + // address space size as the current not-chunked approach. #[cfg(target_pointer_width = "64")] { match try_mmap_contiguous_metadata_space(start, size, spec, no_reserve) { @@ -1191,13 +1193,13 @@ impl SideMetadataContext { debug_assert!(size % BYTES_IN_PAGE == 0); for spec in self.global.iter() { - ensure_munmap_contiguos_metadata_space(start, size, spec); + ensure_munmap_contiguous_metadata_space(start, size, spec); } for spec in self.local.iter() { #[cfg(target_pointer_width = "64")] { - ensure_munmap_contiguos_metadata_space(start, size, spec); + ensure_munmap_contiguous_metadata_space(start, size, spec); } #[cfg(target_pointer_width = "32")] { diff --git a/src/util/metadata/side_metadata/helpers.rs b/src/util/metadata/side_metadata/helpers.rs index 5c7aa19a73..3fc9762a95 100644 --- a/src/util/metadata/side_metadata/helpers.rs +++ b/src/util/metadata/side_metadata/helpers.rs @@ -37,7 +37,7 @@ pub(crate) fn ensure_munmap_metadata(start: Address, size: usize) { /// Unmaps a metadata space (`spec`) for the specified data address range (`start` and `size`) /// Returns the size in bytes that get munmapped. #[cfg(test)] -pub(crate) fn ensure_munmap_contiguos_metadata_space( +pub(crate) fn ensure_munmap_contiguous_metadata_space( start: Address, size: usize, spec: &SideMetadataSpec, diff --git a/src/vm/tests/mock_tests/mock_test_handle_mmap_conflict.rs b/src/vm/tests/mock_tests/mock_test_handle_mmap_conflict.rs index 7d07f7f8e4..fa9400e451 100644 --- a/src/vm/tests/mock_tests/mock_test_handle_mmap_conflict.rs +++ b/src/vm/tests/mock_tests/mock_test_handle_mmap_conflict.rs @@ -29,7 +29,7 @@ pub fn test_handle_mmap_conflict() { assert!(panic_res.is_err()); let err = panic_res.err().unwrap(); assert!(err.is::<&str>()); - assert_eq!(err.downcast_ref::<&str>().unwrap(), &"Failed to mmap, the address is already mapped. Should MMTk quanrantine the address range first?"); + assert_eq!(err.downcast_ref::<&str>().unwrap(), &"Failed to mmap, the address is already mapped. Should MMTk quarantine the address range first?"); }, no_cleanup, )