From 282840b33761990b30b9edaec16f2d9735f323b6 Mon Sep 17 00:00:00 2001 From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Thu, 13 Apr 2023 18:34:50 -0400 Subject: [PATCH] Fix spelling * additional * addresses * aggregates * always * around * beginning * behaviours * borrows * called * canary * deallocated * determine * division * documentation * empty * endianness * ensures * existing * github * hygiene * individual * initialize * instantiate * library * location * miscellaneous * mitigates * needs * nonexistent * occurred * occurring * overridden * parameter * performable * previous * referential * requires * resolved * scenarios * semantics * spurious * structure * subtracting * suppress * synchronization * this * timestamp * to * transferring * unknown * variable * windows Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com> --- README.md | 2 +- cargo-miri/src/main.rs | 2 +- src/bin/miri.rs | 2 +- src/borrow_tracker/mod.rs | 2 +- .../stacked_borrows/diagnostics.rs | 2 +- src/borrow_tracker/stacked_borrows/mod.rs | 4 ++-- src/borrow_tracker/stacked_borrows/stack.rs | 2 +- src/borrow_tracker/tree_borrows/mod.rs | 2 +- src/borrow_tracker/tree_borrows/perms.rs | 2 +- src/borrow_tracker/tree_borrows/tree.rs | 2 +- src/concurrency/data_race.rs | 2 +- src/concurrency/init_once.rs | 2 +- src/concurrency/range_object_map.rs | 6 +++--- src/concurrency/sync.rs | 4 ++-- src/concurrency/thread.rs | 4 ++-- src/concurrency/weak_memory.rs | 14 +++++++------- src/eval.rs | 4 ++-- src/helpers.rs | 2 +- src/intptrcast.rs | 2 +- src/machine.rs | 4 ++-- src/shims/intrinsics/simd.rs | 4 ++-- src/shims/os_str.rs | 2 +- src/shims/time.rs | 2 +- src/shims/tls.rs | 4 ++-- src/shims/unix/fs.rs | 6 +++--- src/shims/unix/linux/foreign_items.rs | 4 ++-- src/shims/unix/sync.rs | 2 +- src/shims/windows/foreign_items.rs | 2 +- tests/fail/intrinsics/exact_div1.rs | 2 +- tests/fail/intrinsics/exact_div2.rs | 2 +- tests/fail/intrinsics/exact_div3.rs | 2 +- tests/fail/intrinsics/exact_div4.rs | 2 +- tests/fail/stacked_borrows/illegal_read3.rs | 2 +- .../intptrcast_alignment_check.rs | 2 +- .../concurrency/tls_pthread_drop_order.rs | 16 ++++++++-------- tests/pass-dep/shims/libc-fs.rs | 2 +- tests/pass-dep/shims/libc-misc.rs | 2 +- tests/pass/0weak_memory_consistency.rs | 2 +- tests/pass/dyn-arbitrary-self.rs | 2 +- tests/pass/global_allocator.rs | 2 +- tests/pass/global_allocator.stdout | 2 +- tests/pass/issues/issue-29746.rs | 2 +- tests/pass/packed_struct.rs | 2 +- tests/pass/ptr_offset.rs | 2 +- tests/pass/rfc1623.rs | 2 +- tests/pass/shims/fs.rs | 2 +- tests/pass/stacked-borrows/stacked-borrows.rs | 6 +++--- tests/pass/strings.rs | 2 +- tests/pass/weak_memory/extra_cpp.rs | 2 +- tests/pass/weak_memory/extra_cpp_unsafe.rs | 2 +- 50 files changed, 77 insertions(+), 77 deletions(-) diff --git a/README.md b/README.md index 4c73518798..129fec8cc8 100644 --- a/README.md +++ b/README.md @@ -403,7 +403,7 @@ to Miri failing to detect cases of undefined behavior in a program. * `-Zmiri-retag-fields=` controls when Stacked Borrows retagging recurses into fields. `all` means it always recurses (like `-Zmiri-retag-fields`), `none` means it never recurses, `scalar` (the default) means it only recurses for types where we would also emit - `noalias` annotations in the generated LLVM IR (types passed as indivudal scalars or pairs of + `noalias` annotations in the generated LLVM IR (types passed as individual scalars or pairs of scalars). Setting this to `none` is **unsound**. * `-Zmiri-tag-gc=` configures how often the pointer tag garbage collector runs. The default is to search for and remove unreachable tags once every `10000` basic blocks. Setting this to diff --git a/cargo-miri/src/main.rs b/cargo-miri/src/main.rs index 9b5fa7ae87..85c9cdad7d 100644 --- a/cargo-miri/src/main.rs +++ b/cargo-miri/src/main.rs @@ -81,7 +81,7 @@ fn main() { "miri" => phase_cargo_miri(args), "runner" => phase_runner(args, RunnerPhase::Cargo), arg if arg == env::var("RUSTC").unwrap() => { - // If the first arg is equal to the RUSTC env ariable (which should be set at this + // If the first arg is equal to the RUSTC env variable (which should be set at this // point), then we need to behave as rustc. This is the somewhat counter-intuitive // behavior of having both RUSTC and RUSTC_WRAPPER set // (see https://github.com/rust-lang/cargo/issues/10886). diff --git a/src/bin/miri.rs b/src/bin/miri.rs index 26a7ead240..cdf06b6a10 100644 --- a/src/bin/miri.rs +++ b/src/bin/miri.rs @@ -120,7 +120,7 @@ impl rustc_driver::Callbacks for MiriBeRustCompilerCalls { #[allow(rustc::potential_query_instability)] // rustc_codegen_ssa (where this code is copied from) also allows this lint fn config(&mut self, config: &mut Config) { if config.opts.prints.is_empty() && self.target_crate { - // Queries overriden here affect the data stored in `rmeta` files of dependencies, + // Queries overridden here affect the data stored in `rmeta` files of dependencies, // which will be used later in non-`MIRI_BE_RUSTC` mode. config.override_queries = Some(|_, local_providers, _| { // `exported_symbols` and `reachable_non_generics` provided by rustc always returns diff --git a/src/borrow_tracker/mod.rs b/src/borrow_tracker/mod.rs index ed958329f9..827dd2b621 100644 --- a/src/borrow_tracker/mod.rs +++ b/src/borrow_tracker/mod.rs @@ -238,7 +238,7 @@ pub enum BorrowTrackerMethod { } impl BorrowTrackerMethod { - pub fn instanciate_global_state(self, config: &MiriConfig) -> GlobalState { + pub fn instantiate_global_state(self, config: &MiriConfig) -> GlobalState { RefCell::new(GlobalStateInner::new( self, config.tracked_pointer_tags.clone(), diff --git a/src/borrow_tracker/stacked_borrows/diagnostics.rs b/src/borrow_tracker/stacked_borrows/diagnostics.rs index 2cc8f03546..c9674e0a2f 100644 --- a/src/borrow_tracker/stacked_borrows/diagnostics.rs +++ b/src/borrow_tracker/stacked_borrows/diagnostics.rs @@ -292,7 +292,7 @@ impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> { .rev() .find_map(|event| { // First, look for a Creation event where the tag and the offset matches. This - // ensrues that we pick the right Creation event when a retag isn't uniform due to + // ensures that we pick the right Creation event when a retag isn't uniform due to // Freeze. let range = event.retag.range; if event.retag.new_tag == tag diff --git a/src/borrow_tracker/stacked_borrows/mod.rs b/src/borrow_tracker/stacked_borrows/mod.rs index b766916402..4d7bbb643b 100644 --- a/src/borrow_tracker/stacked_borrows/mod.rs +++ b/src/borrow_tracker/stacked_borrows/mod.rs @@ -433,7 +433,7 @@ impl<'tcx> Stack { let (Some(granting_idx), ProvenanceExtra::Concrete(_)) = (granting_idx, derived_from) else { // The parent is a wildcard pointer or matched the unknown bottom. // This is approximate. Nobody knows what happened, so forget everything. - // The new thing is SRW anyway, so we cannot push it "on top of the unkown part" + // The new thing is SRW anyway, so we cannot push it "on top of the unknown part" // (for all we know, it might join an SRW group inside the unknown). trace!("reborrow: forgetting stack entirely due to SharedReadWrite reborrow from wildcard or unknown"); self.set_unknown_bottom(global.next_ptr_tag); @@ -825,7 +825,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' Ok(Some(alloc_id)) } - /// Retags an indidual pointer, returning the retagged version. + /// Retags an individual pointer, returning the retagged version. /// `kind` indicates what kind of reference is being created. fn sb_retag_reference( &mut self, diff --git a/src/borrow_tracker/stacked_borrows/stack.rs b/src/borrow_tracker/stacked_borrows/stack.rs index 1d5cfec350..064dbe025a 100644 --- a/src/borrow_tracker/stacked_borrows/stack.rs +++ b/src/borrow_tracker/stacked_borrows/stack.rs @@ -51,7 +51,7 @@ impl Stack { // Note that the algorithm below is based on considering the tag at read_idx - 1, // so precisely considering the tag at index 0 for removal when we have an unknown // bottom would complicate the implementation. The simplification of not considering - // it does not have a significant impact on the degree to which the GC mititages + // it does not have a significant impact on the degree to which the GC mitigates // memory growth. let mut read_idx = 1; let mut write_idx = read_idx; diff --git a/src/borrow_tracker/tree_borrows/mod.rs b/src/borrow_tracker/tree_borrows/mod.rs index 2297ceb125..f73b2554ad 100644 --- a/src/borrow_tracker/tree_borrows/mod.rs +++ b/src/borrow_tracker/tree_borrows/mod.rs @@ -283,7 +283,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<' Ok(Some((alloc_id, new_tag))) } - /// Retags an indidual pointer, returning the retagged version. + /// Retags an individual pointer, returning the retagged version. fn tb_retag_reference( &mut self, val: &ImmTy<'tcx, Provenance>, diff --git a/src/borrow_tracker/tree_borrows/perms.rs b/src/borrow_tracker/tree_borrows/perms.rs index 04b8e1df57..3b4fcfd190 100644 --- a/src/borrow_tracker/tree_borrows/perms.rs +++ b/src/borrow_tracker/tree_borrows/perms.rs @@ -113,7 +113,7 @@ mod transition { } impl PermissionPriv { - /// Determines whether a transition that occured is compatible with the presence + /// Determines whether a transition that occurred is compatible with the presence /// of a Protector. This is not included in the `transition` functions because /// it would distract from the few places where the transition is modified /// because of a protector, but not forbidden. diff --git a/src/borrow_tracker/tree_borrows/tree.rs b/src/borrow_tracker/tree_borrows/tree.rs index 86416a0eb1..4477ce0191 100644 --- a/src/borrow_tracker/tree_borrows/tree.rs +++ b/src/borrow_tracker/tree_borrows/tree.rs @@ -34,7 +34,7 @@ pub(super) struct LocationState { /// Before initialization we still apply some preemptive transitions on /// `permission` to know what to do in case it ever gets initialized, /// but these can never cause any immediate UB. There can however be UB - /// the moment we attempt to initalize (i.e. child-access) because some + /// the moment we attempt to initialize (i.e. child-access) because some /// foreign access done between the creation and the initialization is /// incompatible with child accesses. initialized: bool, diff --git a/src/concurrency/data_race.rs b/src/concurrency/data_race.rs index 9646327966..0080d0a9f6 100644 --- a/src/concurrency/data_race.rs +++ b/src/concurrency/data_race.rs @@ -1199,7 +1199,7 @@ pub struct GlobalState { /// A flag to mark we are currently performing /// a data race free action (such as atomic access) - /// to supress the race detector + /// to suppress the race detector ongoing_action_data_race_free: Cell, /// Mapping of a vector index to a known set of thread diff --git a/src/concurrency/init_once.rs b/src/concurrency/init_once.rs index 867683d355..47ebf1b38e 100644 --- a/src/concurrency/init_once.rs +++ b/src/concurrency/init_once.rs @@ -151,7 +151,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { assert_eq!( init_once.status, InitOnceStatus::Uninitialized, - "begining already begun or complete init once" + "beginning already begun or complete init once" ); init_once.status = InitOnceStatus::Begun; } diff --git a/src/concurrency/range_object_map.rs b/src/concurrency/range_object_map.rs index dfe2e9f05d..89c009933b 100644 --- a/src/concurrency/range_object_map.rs +++ b/src/concurrency/range_object_map.rs @@ -25,9 +25,9 @@ pub struct RangeObjectMap { #[derive(Clone, Debug, PartialEq)] pub enum AccessType { - /// The access perfectly overlaps (same offset and range) with the exsiting allocation + /// The access perfectly overlaps (same offset and range) with the existing allocation PerfectlyOverlapping(Position), - /// The access does not touch any exising allocation + /// The access does not touch any existing allocation Empty(Position), /// The access overlaps with one or more existing allocations ImperfectlyOverlapping(Range), @@ -115,7 +115,7 @@ impl RangeObjectMap { // want to repeat the binary search on each time, so we ask the caller to supply Position pub fn insert_at_pos(&mut self, pos: Position, range: AllocRange, data: T) { self.v.insert(pos, Elem { range, data }); - // If we aren't the first element, then our start must be greater than the preivous element's end + // If we aren't the first element, then our start must be greater than the previous element's end if pos > 0 { assert!(self.v[pos - 1].range.end() <= range.start); } diff --git a/src/concurrency/sync.rs b/src/concurrency/sync.rs index b962052397..08b13b956e 100644 --- a/src/concurrency/sync.rs +++ b/src/concurrency/sync.rs @@ -143,7 +143,7 @@ struct Condvar { waiters: VecDeque, /// Tracks the happens-before relationship /// between a cond-var signal and a cond-var - /// wait during a non-suprious signal event. + /// wait during a non-spurious signal event. /// Contains the clock of the last thread to /// perform a futex-signal. data_race: VClock, @@ -373,7 +373,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { .expect("invariant violation: lock_count == 0 iff the thread is unlocked"); if mutex.lock_count == 0 { mutex.owner = None; - // The mutex is completely unlocked. Try transfering ownership + // The mutex is completely unlocked. Try transferring ownership // to another thread. if let Some(data_race) = &this.machine.data_race { data_race.validate_lock_release( diff --git a/src/concurrency/thread.rs b/src/concurrency/thread.rs index 9173eb3c4e..0d8d941c19 100644 --- a/src/concurrency/thread.rs +++ b/src/concurrency/thread.rs @@ -821,7 +821,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } // Write the current thread-id, switch to the next thread later - // to treat this write operation as occuring on the current thread. + // to treat this write operation as occurring on the current thread. if let Some(thread_info_place) = thread { this.write_scalar( Scalar::from_uint(new_thread_id.to_u32(), thread_info_place.layout.size), @@ -830,7 +830,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } // Finally switch to new thread so that we can push the first stackframe. - // After this all accesses will be treated as occuring in the new thread. + // After this all accesses will be treated as occurring in the new thread. let old_thread_id = this.set_active_thread(new_thread_id); // Perform the function pointer load in the new thread frame. diff --git a/src/concurrency/weak_memory.rs b/src/concurrency/weak_memory.rs index 2a48c9e6cd..c1395468fe 100644 --- a/src/concurrency/weak_memory.rs +++ b/src/concurrency/weak_memory.rs @@ -24,7 +24,7 @@ //! However, this model lacks SC accesses and is therefore unusable by Miri (SC accesses are everywhere in library code). //! //! If you find anything that proposes a relaxed memory model that is C++20-consistent, supports all orderings Rust's atomic accesses -//! and fences accept, and is implementable (with operational semanitcs), please open a GitHub issue! +//! and fences accept, and is implementable (with operational semantics), please open a GitHub issue! //! //! One characteristic of this implementation, in contrast to some other notable operational models such as ones proposed in //! Taming Release-Acquire Consistency by Ori Lahav et al. () or Promising Semantics noted above, @@ -32,8 +32,8 @@ //! and shared across all threads. This is more memory efficient but does require store elements (representing writes to a location) to record //! information about reads, whereas in the other two models it is the other way round: reads points to the write it got its value from. //! Additionally, writes in our implementation do not have globally unique timestamps attached. In the other two models this timestamp is -//! used to make sure a value in a thread's view is not overwritten by a write that occured earlier than the one in the existing view. -//! In our implementation, this is detected using read information attached to store elements, as there is no data strucutre representing reads. +//! used to make sure a value in a thread's view is not overwritten by a write that occurred earlier than the one in the existing view. +//! In our implementation, this is detected using read information attached to store elements, as there is no data structure representing reads. //! //! The C++ memory model is built around the notion of an 'atomic object', so it would be natural //! to attach store buffers to atomic objects. However, Rust follows LLVM in that it only has @@ -48,7 +48,7 @@ //! One consequence of this difference is that safe/sound Rust allows for more operations on atomic locations //! than the C++20 atomic API was intended to allow, such as non-atomically accessing //! a previously atomically accessed location, or accessing previously atomically accessed locations with a differently sized operation -//! (such as accessing the top 16 bits of an AtomicU32). These senarios are generally undiscussed in formalisations of C++ memory model. +//! (such as accessing the top 16 bits of an AtomicU32). These scenarios are generally undiscussed in formalisations of C++ memory model. //! In Rust, these operations can only be done through a `&mut AtomicFoo` reference or one derived from it, therefore these operations //! can only happen after all previous accesses on the same locations. This implementation is adapted to allow these operations. //! A mixed atomicity read that races with writes, or a write that races with reads or writes will still cause UBs to be thrown. @@ -61,7 +61,7 @@ // // 2. In the operational semantics, each store element keeps the timestamp of a thread when it loads from the store. // If the same thread loads from the same store element multiple times, then the timestamps at all loads are saved in a list of load elements. -// This is not necessary as later loads by the same thread will always have greater timetstamp values, so we only need to record the timestamp of the first +// This is not necessary as later loads by the same thread will always have greater timestamp values, so we only need to record the timestamp of the first // load by each thread. This optimisation is done in tsan11 // (https://github.com/ChrisLidbury/tsan11/blob/ecbd6b81e9b9454e01cba78eb9d88684168132c7/lib/tsan/rtl/tsan_relaxed.h#L35-L37) // and here. @@ -193,7 +193,7 @@ impl StoreBufferAlloc { buffers.remove_pos_range(pos_range); } AccessType::Empty(_) => { - // The range had no weak behaivours attached, do nothing + // The range had no weak behaviours attached, do nothing } } } @@ -336,7 +336,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer { let mut found_sc = false; // FIXME: we want an inclusive take_while (stops after a false predicate, but // includes the element that gave the false), but such function doesn't yet - // exist in the standard libary https://github.com/rust-lang/rust/issues/62208 + // exist in the standard library https://github.com/rust-lang/rust/issues/62208 // so we have to hack around it with keep_searching let mut keep_searching = true; let candidates = self diff --git a/src/eval.rs b/src/eval.rs index a32b18595b..430229c132 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -372,7 +372,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>( // Inlining of `DEFAULT` from // https://github.com/rust-lang/rust/blob/master/compiler/rustc_session/src/config/sigpipe.rs. - // Alaways using DEFAULT is okay since we don't support signals in Miri anyway. + // Always using DEFAULT is okay since we don't support signals in Miri anyway. let sigpipe = 2; ecx.call_function( @@ -456,7 +456,7 @@ pub fn eval_entry<'tcx>( return None; } // Check for memory leaks. - info!("Additonal static roots: {:?}", ecx.machine.static_roots); + info!("Additional static roots: {:?}", ecx.machine.static_roots); let leaks = ecx.leak_report(&ecx.machine.static_roots); if leaks != 0 { tcx.sess.err("the evaluated program leaked memory"); diff --git a/src/helpers.rs b/src/helpers.rs index 8f6ae72949..a2b49e6f21 100644 --- a/src/helpers.rs +++ b/src/helpers.rs @@ -524,7 +524,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } } - // Make sure we visit aggregrates in increasing offset order. + // Make sure we visit aggregates in increasing offset order. fn visit_aggregate( &mut self, place: &MPlaceTy<'tcx, Provenance>, diff --git a/src/intptrcast.rs b/src/intptrcast.rs index 2ba1829312..4fd0af3530 100644 --- a/src/intptrcast.rs +++ b/src/intptrcast.rs @@ -77,7 +77,7 @@ impl<'mir, 'tcx> GlobalStateInner { Ok(pos) => Some(global_state.int_to_ptr_map[pos].1), Err(0) => None, Err(pos) => { - // This is the largest of the adresses smaller than `int`, + // This is the largest of the addresses smaller than `int`, // i.e. the greatest lower bound (glb) let (glb, alloc_id) = global_state.int_to_ptr_map[pos - 1]; // This never overflows because `addr >= glb` diff --git a/src/machine.rs b/src/machine.rs index 7d972dc514..176e3758a6 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -491,9 +491,9 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> { measureme::Profiler::new(out).expect("Couldn't create `measureme` profiler") }); let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0)); - let borrow_tracker = config.borrow_tracker.map(|bt| bt.instanciate_global_state(config)); + let borrow_tracker = config.borrow_tracker.map(|bt| bt.instantiate_global_state(config)); let data_race = config.data_race_detector.then(|| data_race::GlobalState::new(config)); - // Determinine page size, stack address, and stack size. + // Determine page size, stack address, and stack size. // These values are mostly meaningless, but the stack address is also where we start // allocating physical integer addresses for all allocations. let page_size = if let Some(page_size) = config.page_size { diff --git a/src/shims/intrinsics/simd.rs b/src/shims/intrinsics/simd.rs index f2e1652129..d101f8d311 100644 --- a/src/shims/intrinsics/simd.rs +++ b/src/shims/intrinsics/simd.rs @@ -585,9 +585,9 @@ fn simd_element_to_bool(elem: ImmTy<'_, Provenance>) -> InterpResult<'_, bool> { }) } -fn simd_bitmask_index(idx: u32, vec_len: u32, endianess: Endian) -> u32 { +fn simd_bitmask_index(idx: u32, vec_len: u32, endianness: Endian) -> u32 { assert!(idx < vec_len); - match endianess { + match endianness { Endian::Little => idx, #[allow(clippy::integer_arithmetic)] // idx < vec_len Endian::Big => vec_len - 1 - idx, // reverse order of bits diff --git a/src/shims/os_str.rs b/src/shims/os_str.rs index f010d4251f..6bc5b8f39d 100644 --- a/src/shims/os_str.rs +++ b/src/shims/os_str.rs @@ -329,7 +329,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { match direction { PathConversion::HostToTarget => { // If this start withs a `\`, we add `\\?` so it starts with `\\?\` which is - // some magic path on Windos that *is* considered absolute. + // some magic path on Windows that *is* considered absolute. if converted.get(0).copied() == Some(b'\\') { converted.splice(0..0, b"\\\\?".iter().copied()); } diff --git a/src/shims/time.rs b/src/shims/time.rs index ef411eb8aa..2f24c00ce1 100644 --- a/src/shims/time.rs +++ b/src/shims/time.rs @@ -40,7 +40,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { this.eval_libc_i32("CLOCK_REALTIME_COARSE"), ]; // The second kind is MONOTONIC clocks for which 0 is an arbitrary time point, but they are - // never allowed to go backwards. We don't need to do any additonal monotonicity + // never allowed to go backwards. We don't need to do any additional monotonicity // enforcement because std::time::Instant already guarantees that it is monotonic. relative_clocks = vec![ this.eval_libc_i32("CLOCK_MONOTONIC"), diff --git a/src/shims/tls.rs b/src/shims/tls.rs index e9119f9e1e..685feeaf89 100644 --- a/src/shims/tls.rs +++ b/src/shims/tls.rs @@ -79,7 +79,7 @@ impl<'tcx> TlsData<'tcx> { trace!("TLS key {} removed", key); Ok(()) } - None => throw_ub_format!("removing a non-existig TLS key: {}", key), + None => throw_ub_format!("removing a nonexistent TLS key: {}", key), } } @@ -175,7 +175,7 @@ impl<'tcx> TlsData<'tcx> { Some(key) => Excluded(key), None => Unbounded, }; - // We interpret the documentaion above (taken from POSIX) as saying that we need to iterate + // We interpret the documentation above (taken from POSIX) as saying that we need to iterate // over all keys and run each destructor at least once before running any destructor a 2nd // time. That's why we have `key` to indicate how far we got in the current iteration. If we // return `None`, `schedule_next_pthread_tls_dtor` will re-try with `ket` set to `None` to diff --git a/src/shims/unix/fs.rs b/src/shims/unix/fs.rs index de27154821..385fa366b2 100644 --- a/src/shims/unix/fs.rs +++ b/src/shims/unix/fs.rs @@ -1015,8 +1015,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let path = this.read_path_from_c_str(pathname_ptr)?.into_owned(); // See for a discussion of argument sizes. - let at_ampty_path = this.eval_libc_i32("AT_EMPTY_PATH"); - let empty_path_flag = flags & at_ampty_path == at_ampty_path; + let at_empty_path = this.eval_libc_i32("AT_EMPTY_PATH"); + let empty_path_flag = flags & at_empty_path == at_empty_path; // We only support: // * interpreting `path` as an absolute directory, // * interpreting `path` as a path relative to `dirfd` when the latter is `AT_FDCWD`, or @@ -1053,7 +1053,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { return Ok(-1); } - // the `_mask_op` paramter specifies the file information that the caller requested. + // the `_mask_op` parameter specifies the file information that the caller requested. // However `statx` is allowed to return information that was not requested or to not // return information that was requested. This `mask` represents the information we can // actually provide for any target. diff --git a/src/shims/unix/linux/foreign_items.rs b/src/shims/unix/linux/foreign_items.rs index c11e2220e6..4cb7ee8efc 100644 --- a/src/shims/unix/linux/foreign_items.rs +++ b/src/shims/unix/linux/foreign_items.rs @@ -169,7 +169,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { this.linux_statx(&args[1], &args[2], &args[3], &args[4], &args[5])?; this.write_scalar(Scalar::from_target_isize(result.into(), this), dest)?; } - // `futex` is used by some synchonization primitives. + // `futex` is used by some synchronization primitives. id if id == sys_futex => { futex(this, &args[1..], dest)?; } @@ -180,7 +180,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } } - // Miscelanneous + // Miscellaneous "getrandom" => { let [ptr, len, flags] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?; diff --git a/src/shims/unix/sync.rs b/src/shims/unix/sync.rs index b3c474dd3c..05feeac45b 100644 --- a/src/shims/unix/sync.rs +++ b/src/shims/unix/sync.rs @@ -242,7 +242,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { // // To distinguish these two cases in already constructed mutexes, we // use the same trick as glibc: for the case when - // `pthread_mutexattr_settype` is caled explicitly, we set the + // `pthread_mutexattr_settype` is called explicitly, we set the // `PTHREAD_MUTEX_NORMAL_FLAG` flag. let normal_kind = kind | PTHREAD_MUTEX_NORMAL_FLAG; // Check that after setting the flag, the kind is distinguishable diff --git a/src/shims/windows/foreign_items.rs b/src/shims/windows/foreign_items.rs index 665c7ed438..f72ba5cca7 100644 --- a/src/shims/windows/foreign_items.rs +++ b/src/shims/windows/foreign_items.rs @@ -96,7 +96,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { if byte_offset != 0 { throw_unsup_format!( - "`NtWriteFile` `ByteOffset` paremeter is non-null, which is unsupported" + "`NtWriteFile` `ByteOffset` parameter is non-null, which is unsupported" ); } diff --git a/tests/fail/intrinsics/exact_div1.rs b/tests/fail/intrinsics/exact_div1.rs index 3dda9d1090..e11d8937fe 100644 --- a/tests/fail/intrinsics/exact_div1.rs +++ b/tests/fail/intrinsics/exact_div1.rs @@ -1,5 +1,5 @@ #![feature(core_intrinsics)] fn main() { - // divison by 0 + // division by 0 unsafe { std::intrinsics::exact_div(2, 0) }; //~ ERROR: divisor of zero } diff --git a/tests/fail/intrinsics/exact_div2.rs b/tests/fail/intrinsics/exact_div2.rs index 00064fa0b9..7914de403a 100644 --- a/tests/fail/intrinsics/exact_div2.rs +++ b/tests/fail/intrinsics/exact_div2.rs @@ -1,5 +1,5 @@ #![feature(core_intrinsics)] fn main() { - // divison with a remainder + // division with a remainder unsafe { std::intrinsics::exact_div(2u16, 3) }; //~ ERROR: 2_u16 cannot be divided by 3_u16 without remainder } diff --git a/tests/fail/intrinsics/exact_div3.rs b/tests/fail/intrinsics/exact_div3.rs index a61abcd137..50ee538646 100644 --- a/tests/fail/intrinsics/exact_div3.rs +++ b/tests/fail/intrinsics/exact_div3.rs @@ -1,5 +1,5 @@ #![feature(core_intrinsics)] fn main() { - // signed divison with a remainder + // signed division with a remainder unsafe { std::intrinsics::exact_div(-19i8, 2) }; //~ ERROR: -19_i8 cannot be divided by 2_i8 without remainder } diff --git a/tests/fail/intrinsics/exact_div4.rs b/tests/fail/intrinsics/exact_div4.rs index b5b60190b4..48c5520823 100644 --- a/tests/fail/intrinsics/exact_div4.rs +++ b/tests/fail/intrinsics/exact_div4.rs @@ -1,5 +1,5 @@ #![feature(core_intrinsics)] fn main() { - // divison of MIN by -1 + // division of MIN by -1 unsafe { std::intrinsics::exact_div(i64::MIN, -1) }; //~ ERROR: overflow in signed remainder (dividing MIN by -1) } diff --git a/tests/fail/stacked_borrows/illegal_read3.rs b/tests/fail/stacked_borrows/illegal_read3.rs index 43ea0a0e84..9c0c974223 100644 --- a/tests/fail/stacked_borrows/illegal_read3.rs +++ b/tests/fail/stacked_borrows/illegal_read3.rs @@ -1,5 +1,5 @@ // A callee may not read the destination of our `&mut` without us noticing. -// Thise code got carefully checked to not introduce any reborrows +// This code got carefully checked to not introduce any reborrows // that are not explicit in the source. Let's hope the compiler does not break this later! use std::mem; diff --git a/tests/fail/unaligned_pointers/intptrcast_alignment_check.rs b/tests/fail/unaligned_pointers/intptrcast_alignment_check.rs index c1041ee32a..ed43e55250 100644 --- a/tests/fail/unaligned_pointers/intptrcast_alignment_check.rs +++ b/tests/fail/unaligned_pointers/intptrcast_alignment_check.rs @@ -1,7 +1,7 @@ //@compile-flags: -Zmiri-symbolic-alignment-check -Zmiri-permissive-provenance -Cdebug-assertions=no // With the symbolic alignment check, even with intptrcast and without // validation, we want to be *sure* to catch bugs that arise from pointers being -// insufficiently aligned. The only way to achieve that is not not let programs +// insufficiently aligned. The only way to achieve that is not to let programs // exploit integer information for alignment, so here we test that this is // indeed the case. // diff --git a/tests/pass-dep/concurrency/tls_pthread_drop_order.rs b/tests/pass-dep/concurrency/tls_pthread_drop_order.rs index 6516396ac5..ae874740f2 100644 --- a/tests/pass-dep/concurrency/tls_pthread_drop_order.rs +++ b/tests/pass-dep/concurrency/tls_pthread_drop_order.rs @@ -14,7 +14,7 @@ static mut RECORD: usize = 0; static mut KEYS: [Key; 2] = [0; 2]; static mut GLOBALS: [u64; 2] = [1, 0]; -static mut CANNARY: *mut u64 = ptr::null_mut(); // this serves as a cannary: if TLS dtors are not run properly, this will not get deallocated, making the test fail. +static mut CANARY: *mut u64 = ptr::null_mut(); // this serves as a canary: if TLS dtors are not run properly, this will not get deallocated, making the test fail. pub unsafe fn create(dtor: Option) -> Key { let mut key = 0; @@ -33,7 +33,7 @@ pub fn record(r: usize) { } unsafe extern "C" fn dtor(ptr: *mut u64) { - assert!(CANNARY != ptr::null_mut()); // make sure we do not get run too often + assert!(CANARY != ptr::null_mut()); // make sure we do not get run too often let val = *ptr; let which_key = @@ -45,15 +45,15 @@ unsafe extern "C" fn dtor(ptr: *mut u64) { set(KEYS[which_key], ptr as *mut _); } - // Check if the records matches what we expect. If yes, clear the cannary. - // If the record is wrong, the cannary will never get cleared, leading to a leak -> test fails. + // Check if the records matches what we expect. If yes, clear the canary. + // If the record is wrong, the canary will never get cleared, leading to a leak -> test fails. // If the record is incomplete (i.e., more dtor calls happen), the check at the beginning of this function will fail -> test fails. // The correct sequence is: First key 0, then key 1, then key 0. // Note that this relies on dtor order, which is not specified by POSIX, but seems to be // consistent between Miri and Linux currently (as of Aug 2022). if RECORD == 0_1_0 { - drop(Box::from_raw(CANNARY)); - CANNARY = ptr::null_mut(); + drop(Box::from_raw(CANARY)); + CANARY = ptr::null_mut(); } } @@ -67,7 +67,7 @@ fn main() { set(*key, global as *mut _ as *mut u8); } - // Initialize cannary - CANNARY = Box::into_raw(Box::new(0u64)); + // Initialize canary + CANARY = Box::into_raw(Box::new(0u64)); } } diff --git a/tests/pass-dep/shims/libc-fs.rs b/tests/pass-dep/shims/libc-fs.rs index cd071a7f32..fbdf27688a 100644 --- a/tests/pass-dep/shims/libc-fs.rs +++ b/tests/pass-dep/shims/libc-fs.rs @@ -130,7 +130,7 @@ fn test_readlink() { let mut large_buf = vec![0xFF; expected_path.len() + 1]; let res = unsafe { libc::readlink(symlink_c_ptr, large_buf.as_mut_ptr().cast(), large_buf.len()) }; - // Check that the resovled path was properly written into the buf. + // Check that the resolved path was properly written into the buf. assert_eq!(&large_buf[..(large_buf.len() - 1)], expected_path); assert_eq!(large_buf.last(), Some(&0xFF)); assert_eq!(res, large_buf.len() as isize - 1); diff --git a/tests/pass-dep/shims/libc-misc.rs b/tests/pass-dep/shims/libc-misc.rs index 8be9cd983a..82ef59427a 100644 --- a/tests/pass-dep/shims/libc-misc.rs +++ b/tests/pass-dep/shims/libc-misc.rs @@ -90,7 +90,7 @@ fn test_posix_realpath_errors() { use std::ffi::CString; use std::io::ErrorKind; - // Test non-existent path returns an error. + // Test nonexistent path returns an error. let c_path = CString::new("./nothing_to_see_here").expect("CString::new failed"); let r = unsafe { libc::realpath(c_path.as_ptr(), std::ptr::null_mut()) }; assert!(r.is_null()); diff --git a/tests/pass/0weak_memory_consistency.rs b/tests/pass/0weak_memory_consistency.rs index f3820bd660..3a531eede6 100644 --- a/tests/pass/0weak_memory_consistency.rs +++ b/tests/pass/0weak_memory_consistency.rs @@ -10,7 +10,7 @@ // the RNG and never observed in our tests. // // To mitigate this, each test is ran enough times such that the chance -// of spurious success is very low. These tests never supriously fail. +// of spurious success is very low. These tests never spuriously fail. // Test cases and their consistent outcomes are from // http://svr-pes20-cppmem.cl.cam.ac.uk/cppmem/ diff --git a/tests/pass/dyn-arbitrary-self.rs b/tests/pass/dyn-arbitrary-self.rs index fc58775a19..6be13b155f 100644 --- a/tests/pass/dyn-arbitrary-self.rs +++ b/tests/pass/dyn-arbitrary-self.rs @@ -93,7 +93,7 @@ fn pointers_and_wrappers() { trait Trait { // This method isn't object-safe yet. Unsized by-value `self` is object-safe (but not callable - // without unsized_locals), but wrappers arond `Self` currently are not. + // without unsized_locals), but wrappers around `Self` currently are not. // FIXME (mikeyhew) uncomment this when unsized rvalues object-safety is implemented // fn wrapper(self: Wrapper) -> i32; fn ptr_wrapper(self: Ptr>) -> i32; diff --git a/tests/pass/global_allocator.rs b/tests/pass/global_allocator.rs index 24a56c663f..9a40c322b3 100644 --- a/tests/pass/global_allocator.rs +++ b/tests/pass/global_allocator.rs @@ -19,7 +19,7 @@ unsafe impl GlobalAlloc for Allocator { unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { if layout.size() == 123 { - println!("Dellocated!") + println!("Deallocated!") } System.dealloc(ptr, layout) diff --git a/tests/pass/global_allocator.stdout b/tests/pass/global_allocator.stdout index 411a4cdd14..30c6194640 100644 --- a/tests/pass/global_allocator.stdout +++ b/tests/pass/global_allocator.stdout @@ -1,2 +1,2 @@ Allocated! -Dellocated! +Deallocated! diff --git a/tests/pass/issues/issue-29746.rs b/tests/pass/issues/issue-29746.rs index 43bed4464b..6b971c5217 100644 --- a/tests/pass/issues/issue-29746.rs +++ b/tests/pass/issues/issue-29746.rs @@ -7,7 +7,7 @@ macro_rules! zip { }; // Intermediate steps to build the zipped expression, the match pattern, and - // and the output tuple of the closure, using macro hygene to repeatedly + // and the output tuple of the closure, using macro hygiene to repeatedly // introduce new variables named 'x'. ([$a:expr, $($rest:expr),*], $zip:expr, $pat:pat, [$($flat:expr),*]) => { zip!([$($rest),*], $zip.zip($a), ($pat,x), [$($flat),*, x]) diff --git a/tests/pass/packed_struct.rs b/tests/pass/packed_struct.rs index 85acab858a..0b06167aec 100644 --- a/tests/pass/packed_struct.rs +++ b/tests/pass/packed_struct.rs @@ -36,7 +36,7 @@ fn test_basic() { let b = x.b; assert_eq!(a, 42); assert_eq!(b, 99); - assert_eq!(&x.fill, &0); // `fill` just requirs 1-byte-align, so this is fine + assert_eq!(&x.fill, &0); // `fill` just requires 1-byte-align, so this is fine // can't do `assert_eq!(x.a, 42)`, because `assert_eq!` takes a reference assert_eq!({ x.a }, 42); assert_eq!({ x.b }, 99); diff --git a/tests/pass/ptr_offset.rs b/tests/pass/ptr_offset.rs index 95eac8522f..92b275b003 100644 --- a/tests/pass/ptr_offset.rs +++ b/tests/pass/ptr_offset.rs @@ -63,7 +63,7 @@ fn ptr_arith_offset_overflow() { let v = [1i16, 2]; let x = &mut ptr::null(); // going through memory as there are more sanity checks along that path *x = v.as_ptr().wrapping_offset(1); // ptr to the 2nd element - // Adding 2*isize::max and then 1 is like substracting 1 + // Adding 2*isize::max and then 1 is like subtracting 1 *x = x.wrapping_offset(isize::MAX); *x = x.wrapping_offset(isize::MAX); *x = x.wrapping_offset(1); diff --git a/tests/pass/rfc1623.rs b/tests/pass/rfc1623.rs index 76e2c01e74..8f1ef1b75c 100644 --- a/tests/pass/rfc1623.rs +++ b/tests/pass/rfc1623.rs @@ -58,7 +58,7 @@ fn main() { STATIC_SIMPLE_FN(x); CONST_SIMPLE_FN(x); - STATIC_BAZ(BYTES); // neees static lifetime + STATIC_BAZ(BYTES); // needs static lifetime CONST_BAZ(BYTES); // make sure this works with different lifetimes diff --git a/tests/pass/shims/fs.rs b/tests/pass/shims/fs.rs index 7a9974f393..0b90a15adb 100644 --- a/tests/pass/shims/fs.rs +++ b/tests/pass/shims/fs.rs @@ -366,7 +366,7 @@ fn test_directory() { // Deleting the directory should succeed. remove_dir(&dir_path).unwrap(); - // Reading the metadata of a non-existent directory should fail with a "not found" error. + // Reading the metadata of a nonexistent directory should fail with a "not found" error. assert_eq!(ErrorKind::NotFound, check_metadata(&[], &dir_path).unwrap_err().kind()); // To test remove_dir_all, re-create the directory with a file and a directory in it. diff --git a/tests/pass/stacked-borrows/stacked-borrows.rs b/tests/pass/stacked-borrows/stacked-borrows.rs index 8e78efa73c..d7d7d1f97d 100644 --- a/tests/pass/stacked-borrows/stacked-borrows.rs +++ b/tests/pass/stacked-borrows/stacked-borrows.rs @@ -90,7 +90,7 @@ fn mut_raw_mut() { assert_eq!(unsafe { *xraw }, 4); assert_eq!(*xref1, 4); assert_eq!(unsafe { *xraw }, 4); - // we cannot use xref2; see `compile-fail/stacked-borows/illegal_read4.rs` + // we cannot use xref2; see `compile-fail/stacked-borrows/illegal_read4.rs` } assert_eq!(x, 4); } @@ -104,7 +104,7 @@ fn partially_invalidate_mut() { assert_eq!(*data, (1, 1)); } -// Make sure that we can handle the situation where a loaction is frozen when being dropped. +// Make sure that we can handle the situation where a location is frozen when being dropped. fn drop_after_sharing() { let x = String::from("hello!"); let _len = x.len(); @@ -224,7 +224,7 @@ fn wide_raw_ptr_in_tuple() { fn not_unpin_not_protected() { // `&mut !Unpin`, at least for now, does not get `noalias` nor `dereferenceable`, so we also // don't add protectors. (We could, but until we have a better idea for where we want to go with - // the self-referntial-generator situation, it does not seem worth the potential trouble.) + // the self-referential-generator situation, it does not seem worth the potential trouble.) use std::marker::PhantomPinned; pub struct NotUnpin(i32, PhantomPinned); diff --git a/tests/pass/strings.rs b/tests/pass/strings.rs index 5e2d2e9b5b..72cdbe7ed5 100644 --- a/tests/pass/strings.rs +++ b/tests/pass/strings.rs @@ -29,7 +29,7 @@ fn unique_aliasing() { // This is a regression test for the aliasing rules of a `Unique` pointer. // At the time of writing this test case, Miri does not treat `Unique` // pointers as a special case, these are treated like any other raw pointer. - // However, there are existing Github issues which may lead to `Unique` + // However, there are existing GitHub issues which may lead to `Unique` // becoming a special case through asserting unique ownership over the pointee: // - https://github.com/rust-lang/unsafe-code-guidelines/issues/258 // - https://github.com/rust-lang/unsafe-code-guidelines/issues/262 diff --git a/tests/pass/weak_memory/extra_cpp.rs b/tests/pass/weak_memory/extra_cpp.rs index 07cbb4a803..94df730808 100644 --- a/tests/pass/weak_memory/extra_cpp.rs +++ b/tests/pass/weak_memory/extra_cpp.rs @@ -1,6 +1,6 @@ //@compile-flags: -Zmiri-ignore-leaks -// Tests operations not perfomable through C++'s atomic API +// Tests operations not performable through C++'s atomic API // but doable in safe (at least sound) Rust. #![feature(atomic_from_mut)] diff --git a/tests/pass/weak_memory/extra_cpp_unsafe.rs b/tests/pass/weak_memory/extra_cpp_unsafe.rs index f7e2748408..48b15191b3 100644 --- a/tests/pass/weak_memory/extra_cpp_unsafe.rs +++ b/tests/pass/weak_memory/extra_cpp_unsafe.rs @@ -1,6 +1,6 @@ //@compile-flags: -Zmiri-ignore-leaks -// Tests operations not perfomable through C++'s atomic API +// Tests operations not performable through C++'s atomic API // but doable in unsafe Rust which we think *should* be fine. // Nonetheless they may be determined as inconsistent with the // memory model in the future.