diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs index 35d58d2f638bf..04e68b9645525 100644 --- a/compiler/rustc_const_eval/src/const_eval/machine.rs +++ b/compiler/rustc_const_eval/src/const_eval/machine.rs @@ -1,8 +1,12 @@ use rustc_hir::def::DefKind; +use rustc_hir::LangItem; use rustc_middle::mir; +use rustc_middle::mir::interpret::PointerArithmetic; +use rustc_middle::ty::layout::FnAbiOf; use rustc_middle::ty::{self, Ty, TyCtxt}; use std::borrow::Borrow; use std::hash::Hash; +use std::ops::ControlFlow; use rustc_data_structures::fx::FxIndexMap; use rustc_data_structures::fx::IndexEntry; @@ -17,58 +21,12 @@ use rustc_target::abi::{Align, Size}; use rustc_target::spec::abi::Abi as CallAbi; use crate::interpret::{ - self, compile_time_machine, AllocId, ConstAllocation, Frame, ImmTy, InterpCx, InterpResult, - OpTy, PlaceTy, Pointer, Scalar, StackPopUnwind, + self, compile_time_machine, AllocId, ConstAllocation, FnVal, Frame, ImmTy, InterpCx, + InterpResult, OpTy, PlaceTy, Pointer, Scalar, StackPopUnwind, }; use super::error::*; -impl<'mir, 'tcx> InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>> { - /// "Intercept" a function call to a panic-related function - /// because we have something special to do for it. - /// If this returns successfully (`Ok`), the function should just be evaluated normally. - fn hook_special_const_fn( - &mut self, - instance: ty::Instance<'tcx>, - args: &[OpTy<'tcx>], - ) -> InterpResult<'tcx, Option>> { - // All `#[rustc_do_not_const_check]` functions should be hooked here. - let def_id = instance.def_id(); - - if Some(def_id) == self.tcx.lang_items().panic_display() - || Some(def_id) == self.tcx.lang_items().begin_panic_fn() - { - // &str or &&str - assert!(args.len() == 1); - - let mut msg_place = self.deref_operand(&args[0])?; - while msg_place.layout.ty.is_ref() { - msg_place = self.deref_operand(&msg_place.into())?; - } - - let msg = Symbol::intern(self.read_str(&msg_place)?); - let span = self.find_closest_untracked_caller_location(); - let (file, line, col) = self.location_triple_for_span(span); - return Err(ConstEvalErrKind::Panic { msg, file, line, col }.into()); - } else if Some(def_id) == self.tcx.lang_items().panic_fmt() { - // For panic_fmt, call const_panic_fmt instead. - if let Some(const_panic_fmt) = self.tcx.lang_items().const_panic_fmt() { - return Ok(Some( - ty::Instance::resolve( - *self.tcx, - ty::ParamEnv::reveal_all(), - const_panic_fmt, - self.tcx.intern_substs(&[]), - ) - .unwrap() - .unwrap(), - )); - } - } - Ok(None) - } -} - /// Extra machine state for CTFE, and the Machine instance pub struct CompileTimeInterpreter<'mir, 'tcx> { /// For now, the number of terminators that can be evaluated before we throw a resource @@ -191,6 +149,125 @@ impl interpret::MayLeak for ! { } impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> { + /// "Intercept" a function call, because we have something special to do for it. + /// All `#[rustc_do_not_const_check]` functions should be hooked here. + /// If this returns `Some` function, which may be `instance` or a different function with + /// compatible arguments, then evaluation should continue with that function. + /// If this returns `None`, the function call has been handled and the function has returned. + fn hook_special_const_fn( + &mut self, + instance: ty::Instance<'tcx>, + args: &[OpTy<'tcx>], + dest: &PlaceTy<'tcx>, + ret: Option, + ) -> InterpResult<'tcx, Option>> { + let def_id = instance.def_id(); + + if Some(def_id) == self.tcx.lang_items().panic_display() + || Some(def_id) == self.tcx.lang_items().begin_panic_fn() + { + // &str or &&str + assert!(args.len() == 1); + + let mut msg_place = self.deref_operand(&args[0])?; + while msg_place.layout.ty.is_ref() { + msg_place = self.deref_operand(&msg_place.into())?; + } + + let msg = Symbol::intern(self.read_str(&msg_place)?); + let span = self.find_closest_untracked_caller_location(); + let (file, line, col) = self.location_triple_for_span(span); + return Err(ConstEvalErrKind::Panic { msg, file, line, col }.into()); + } else if Some(def_id) == self.tcx.lang_items().panic_fmt() { + // For panic_fmt, call const_panic_fmt instead. + let const_def_id = self.tcx.require_lang_item(LangItem::ConstPanicFmt, None); + let new_instance = ty::Instance::resolve( + *self.tcx, + ty::ParamEnv::reveal_all(), + const_def_id, + instance.substs, + ) + .unwrap() + .unwrap(); + + return Ok(Some(new_instance)); + } else if Some(def_id) == self.tcx.lang_items().align_offset_fn() { + // For align_offset, we replace the function call if the pointer has no address. + match self.align_offset(instance, args, dest, ret)? { + ControlFlow::Continue(()) => return Ok(Some(instance)), + ControlFlow::Break(()) => return Ok(None), + } + } + Ok(Some(instance)) + } + + /// `align_offset(ptr, target_align)` needs special handling in const eval, because the pointer + /// may not have an address. + /// + /// If `ptr` does have a known address, then we return `CONTINUE` and the function call should + /// proceed as normal. + /// + /// If `ptr` doesn't have an address, but its underlying allocation's alignment is at most + /// `target_align`, then we call the function again with an dummy address relative to the + /// allocation. + /// + /// If `ptr` doesn't have an address and `target_align` is stricter than the underlying + /// allocation's alignment, then we return `usize::MAX` immediately. + fn align_offset( + &mut self, + instance: ty::Instance<'tcx>, + args: &[OpTy<'tcx>], + dest: &PlaceTy<'tcx>, + ret: Option, + ) -> InterpResult<'tcx, ControlFlow<()>> { + assert_eq!(args.len(), 2); + + let ptr = self.read_pointer(&args[0])?; + let target_align = self.read_scalar(&args[1])?.to_machine_usize(self)?; + + if !target_align.is_power_of_two() { + throw_ub_format!("`align_offset` called with non-power-of-two align: {}", target_align); + } + + match self.ptr_try_get_alloc_id(ptr) { + Ok((alloc_id, offset, _extra)) => { + let (_size, alloc_align, _kind) = self.get_alloc_info(alloc_id); + + if target_align <= alloc_align.bytes() { + // Extract the address relative to the allocation base that is definitely + // sufficiently aligned and call `align_offset` again. + let addr = ImmTy::from_uint(offset.bytes(), args[0].layout).into(); + let align = ImmTy::from_uint(target_align, args[1].layout).into(); + let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?; + + // We replace the entire entire function call with a "tail call". + // Note that this happens before the frame of the original function + // is pushed on the stack. + self.eval_fn_call( + FnVal::Instance(instance), + (CallAbi::Rust, fn_abi), + &[addr, align], + /* with_caller_location = */ false, + dest, + ret, + StackPopUnwind::NotAllowed, + )?; + Ok(ControlFlow::BREAK) + } else { + // Not alignable in const, return `usize::MAX`. + let usize_max = Scalar::from_machine_usize(self.machine_usize_max(), self); + self.write_scalar(usize_max, dest)?; + self.return_to_block(ret)?; + Ok(ControlFlow::BREAK) + } + } + Err(_addr) => { + // The pointer has an address, continue with function call. + Ok(ControlFlow::CONTINUE) + } + } + } + /// See documentation on the `ptr_guaranteed_cmp` intrinsic. fn guaranteed_cmp(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, u8> { Ok(match (a, b) { @@ -271,8 +348,8 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, instance: ty::Instance<'tcx>, _abi: CallAbi, args: &[OpTy<'tcx>], - _dest: &PlaceTy<'tcx>, - _ret: Option, + dest: &PlaceTy<'tcx>, + ret: Option, _unwind: StackPopUnwind, // unwinding is not supported in consts ) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>> { debug!("find_mir_or_eval_fn: {:?}", instance); @@ -291,7 +368,11 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, } } - if let Some(new_instance) = ecx.hook_special_const_fn(instance, args)? { + let Some(new_instance) = ecx.hook_special_const_fn(instance, args, dest, ret)? else { + return Ok(None); + }; + + if new_instance != instance { // We call another const fn instead. // However, we return the *original* instance to make backtraces work out // (and we hope this does not confuse the FnAbi checks too much). @@ -300,13 +381,14 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, new_instance, _abi, args, - _dest, - _ret, + dest, + ret, _unwind, )? .map(|(body, _instance)| (body, instance))); } } + // This is a const fn. Call it. Ok(Some((ecx.load_mir(instance.def, None)?, instance))) } diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index 6fc2407b77803..7940efcd2b11f 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -243,6 +243,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let discr_val = self.read_discriminant(&place.into())?.0; self.write_scalar(discr_val, dest)?; } + sym::exact_div => { + let l = self.read_immediate(&args[0])?; + let r = self.read_immediate(&args[1])?; + self.exact_div(&l, &r, dest)?; + } sym::unchecked_shl | sym::unchecked_shr | sym::unchecked_add diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs index 819ccf5a3e9e7..7ed7d767f2fb5 100644 --- a/library/core/src/intrinsics.rs +++ b/library/core/src/intrinsics.rs @@ -1851,6 +1851,7 @@ extern "rust-intrinsic" { /// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1` /// /// This intrinsic does not have a stable counterpart. + #[rustc_const_unstable(feature = "const_exact_div", issue = "none")] pub fn exact_div(x: T, y: T) -> T; /// Performs an unchecked division, resulting in undefined behavior diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs index 7191bbb93447c..848eccd7f2908 100644 --- a/library/core/src/lib.rs +++ b/library/core/src/lib.rs @@ -109,6 +109,7 @@ #![feature(const_cmp)] #![feature(const_discriminant)] #![feature(const_eval_select)] +#![feature(const_exact_div)] #![feature(const_float_bits_conv)] #![feature(const_float_classify)] #![feature(const_fmt_arguments_new)] @@ -129,6 +130,7 @@ #![feature(const_option)] #![feature(const_option_ext)] #![feature(const_pin)] +#![feature(const_pointer_is_aligned)] #![feature(const_ptr_sub_ptr)] #![feature(const_replace)] #![feature(const_result_drop)] diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs index bff270b787ece..8a3eee0dc529f 100644 --- a/library/core/src/ptr/const_ptr.rs +++ b/library/core/src/ptr/const_ptr.rs @@ -1320,6 +1320,8 @@ impl *const T { /// } /// # } /// ``` + #[must_use] + #[inline] #[stable(feature = "align_offset", since = "1.36.0")] #[rustc_const_unstable(feature = "const_align_offset", issue = "90962")] pub const fn align_offset(self, align: usize) -> usize @@ -1330,32 +1332,149 @@ impl *const T { panic!("align_offset: align is not a power-of-two"); } - fn rt_impl(p: *const T, align: usize) -> usize { - // SAFETY: `align` has been checked to be a power of 2 above - unsafe { align_offset(p, align) } - } + #[cfg(bootstrap)] + { + fn rt_impl(p: *const T, align: usize) -> usize { + // SAFETY: `align` has been checked to be a power of 2 above + unsafe { align_offset(p, align) } + } + + const fn ctfe_impl(_: *const T, _: usize) -> usize { + usize::MAX + } - const fn ctfe_impl(_: *const T, _: usize) -> usize { - usize::MAX + // SAFETY: + // It is permissible for `align_offset` to always return `usize::MAX`, + // algorithm correctness can not depend on `align_offset` returning non-max values. + // + // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can. + unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) } } - // SAFETY: - // It is permissible for `align_offset` to always return `usize::MAX`, - // algorithm correctness can not depend on `align_offset` returning non-max values. - // - // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can. - unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) } + #[cfg(not(bootstrap))] + { + // SAFETY: `align` has been checked to be a power of 2 above + unsafe { align_offset(self, align) } + } } /// Returns whether the pointer is properly aligned for `T`. + /// + /// # Examples + /// + /// Basic usage: + /// ``` + /// #![feature(pointer_is_aligned)] + /// #![feature(pointer_byte_offsets)] + /// + /// // On some platforms, the alignment of i32 is less than 4. + /// #[repr(align(4))] + /// struct AlignedI32(i32); + /// + /// let data = AlignedI32(42); + /// let ptr = &data as *const AlignedI32; + /// + /// assert!(ptr.is_aligned()); + /// assert!(!ptr.wrapping_byte_add(1).is_aligned()); + /// ``` + /// + /// # At compiletime + /// **Note: Alignment at compiletime is experimental and subject to change. See the + /// [tracking issue] for details.** + /// + /// At compiletime, the compiler may not know where a value will end up in memory. + /// Calling this function on a pointer created from a reference at compiletime will only + /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer + /// is never aligned if cast to a type with a stricter alignment than the reference's + /// underlying allocation. + /// + #[cfg_attr(bootstrap, doc = "```ignore")] + #[cfg_attr(not(bootstrap), doc = "```")] + /// #![feature(pointer_is_aligned)] + /// #![feature(const_pointer_is_aligned)] + /// + /// // On some platforms, the alignment of primitives is less than their size. + /// #[repr(align(4))] + /// struct AlignedI32(i32); + /// #[repr(align(8))] + /// struct AlignedI64(i64); + /// + /// const _: () = { + /// let data = AlignedI32(42); + /// let ptr = &data as *const AlignedI32; + /// assert!(ptr.is_aligned()); + /// + /// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned. + /// let ptr1 = ptr.cast::(); + /// let ptr2 = ptr.wrapping_add(1).cast::(); + /// assert!(!ptr1.is_aligned()); + /// assert!(!ptr2.is_aligned()); + /// }; + /// ``` + /// + /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime + /// pointer is aligned, even if the compiletime pointer wasn't aligned. + /// + #[cfg_attr(bootstrap, doc = "```ignore")] + #[cfg_attr(not(bootstrap), doc = "```")] + /// #![feature(pointer_is_aligned)] + /// #![feature(const_pointer_is_aligned)] + /// + /// // On some platforms, the alignment of primitives is less than their size. + /// #[repr(align(4))] + /// struct AlignedI32(i32); + /// #[repr(align(8))] + /// struct AlignedI64(i64); + /// + /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned. + /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42); + /// const _: () = assert!(!COMPTIME_PTR.cast::().is_aligned()); + /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::().is_aligned()); + /// + /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned. + /// let runtime_ptr = COMPTIME_PTR; + /// assert_ne!( + /// runtime_ptr.cast::().is_aligned(), + /// runtime_ptr.wrapping_add(1).cast::().is_aligned(), + /// ); + /// ``` + /// + /// If a pointer is created from a fixed address, this function behaves the same during + /// runtime and compiletime. + /// + #[cfg_attr(bootstrap, doc = "```ignore")] + #[cfg_attr(not(bootstrap), doc = "```")] + /// #![feature(pointer_is_aligned)] + /// #![feature(const_pointer_is_aligned)] + /// + /// // On some platforms, the alignment of primitives is less than their size. + /// #[repr(align(4))] + /// struct AlignedI32(i32); + /// #[repr(align(8))] + /// struct AlignedI64(i64); + /// + /// const _: () = { + /// let ptr = 40 as *const AlignedI32; + /// assert!(ptr.is_aligned()); + /// + /// // For pointers with a known address, runtime and compiletime behavior are identical. + /// let ptr1 = ptr.cast::(); + /// let ptr2 = ptr.wrapping_add(1).cast::(); + /// assert!(ptr1.is_aligned()); + /// assert!(!ptr2.is_aligned()); + /// }; + /// ``` + /// + /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203 #[must_use] #[inline] #[unstable(feature = "pointer_is_aligned", issue = "96284")] - pub fn is_aligned(self) -> bool + #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")] + pub const fn is_aligned(self) -> bool where T: Sized, { - self.is_aligned_to(core::mem::align_of::()) + self.is_aligned_to(mem::align_of::()) } /// Returns whether the pointer is aligned to `align`. @@ -1366,16 +1485,121 @@ impl *const T { /// # Panics /// /// The function panics if `align` is not a power-of-two (this includes 0). + /// + /// # Examples + /// + /// Basic usage: + /// ``` + /// #![feature(pointer_is_aligned)] + /// #![feature(pointer_byte_offsets)] + /// + /// // On some platforms, the alignment of i32 is less than 4. + /// #[repr(align(4))] + /// struct AlignedI32(i32); + /// + /// let data = AlignedI32(42); + /// let ptr = &data as *const AlignedI32; + /// + /// assert!(ptr.is_aligned_to(1)); + /// assert!(ptr.is_aligned_to(2)); + /// assert!(ptr.is_aligned_to(4)); + /// + /// assert!(ptr.wrapping_byte_add(2).is_aligned_to(2)); + /// assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4)); + /// + /// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8)); + /// ``` + /// + /// # At compiletime + /// **Note: Alignment at compiletime is experimental and subject to change. See the + /// [tracking issue] for details.** + /// + /// At compiletime, the compiler may not know where a value will end up in memory. + /// Calling this function on a pointer created from a reference at compiletime will only + /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer + /// cannot be stricter aligned than the reference's underlying allocation. + /// + #[cfg_attr(bootstrap, doc = "```ignore")] + #[cfg_attr(not(bootstrap), doc = "```")] + /// #![feature(pointer_is_aligned)] + /// #![feature(const_pointer_is_aligned)] + /// + /// // On some platforms, the alignment of i32 is less than 4. + /// #[repr(align(4))] + /// struct AlignedI32(i32); + /// + /// const _: () = { + /// let data = AlignedI32(42); + /// let ptr = &data as *const AlignedI32; + /// + /// assert!(ptr.is_aligned_to(1)); + /// assert!(ptr.is_aligned_to(2)); + /// assert!(ptr.is_aligned_to(4)); + /// + /// // At compiletime, we know for sure that the pointer isn't aligned to 8. + /// assert!(!ptr.is_aligned_to(8)); + /// assert!(!ptr.wrapping_add(1).is_aligned_to(8)); + /// }; + /// ``` + /// + /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime + /// pointer is aligned, even if the compiletime pointer wasn't aligned. + /// + #[cfg_attr(bootstrap, doc = "```ignore")] + #[cfg_attr(not(bootstrap), doc = "```")] + /// #![feature(pointer_is_aligned)] + /// #![feature(const_pointer_is_aligned)] + /// + /// // On some platforms, the alignment of i32 is less than 4. + /// #[repr(align(4))] + /// struct AlignedI32(i32); + /// + /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned. + /// const COMPTIME_PTR: *const AlignedI32 = &AlignedI32(42); + /// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8)); + /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8)); + /// + /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned. + /// let runtime_ptr = COMPTIME_PTR; + /// assert_ne!( + /// runtime_ptr.is_aligned_to(8), + /// runtime_ptr.wrapping_add(1).is_aligned_to(8), + /// ); + /// ``` + /// + /// If a pointer is created from a fixed address, this function behaves the same during + /// runtime and compiletime. + /// + #[cfg_attr(bootstrap, doc = "```ignore")] + #[cfg_attr(not(bootstrap), doc = "```")] + /// #![feature(pointer_is_aligned)] + /// #![feature(const_pointer_is_aligned)] + /// + /// const _: () = { + /// let ptr = 40 as *const u8; + /// assert!(ptr.is_aligned_to(1)); + /// assert!(ptr.is_aligned_to(2)); + /// assert!(ptr.is_aligned_to(4)); + /// assert!(ptr.is_aligned_to(8)); + /// assert!(!ptr.is_aligned_to(16)); + /// }; + /// ``` + /// + /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203 #[must_use] #[inline] #[unstable(feature = "pointer_is_aligned", issue = "96284")] - pub fn is_aligned_to(self, align: usize) -> bool { + #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")] + pub const fn is_aligned_to(self, align: usize) -> bool { if !align.is_power_of_two() { panic!("is_aligned_to: align is not a power-of-two"); } - // Cast is needed for `T: !Sized` - self.cast::().addr() & align - 1 == 0 + // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead. + // The cast to `()` is used to + // 1. deal with fat pointers; and + // 2. ensure that `align_offset` doesn't actually try to compute an offset. + self.cast::<()>().align_offset(align) == 0 } } diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs index e0ddb3154de12..73923753a3020 100644 --- a/library/core/src/ptr/mod.rs +++ b/library/core/src/ptr/mod.rs @@ -1574,10 +1574,14 @@ pub unsafe fn write_volatile(dst: *mut T, src: T) { /// Align pointer `p`. /// -/// Calculate offset (in terms of elements of `stride` stride) that has to be applied +/// Calculate offset (in terms of elements of `size_of::()` stride) that has to be applied /// to pointer `p` so that pointer `p` would get aligned to `a`. /// -/// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic. +/// # Safety +/// `a` must be a power of two. +/// +/// # Notes +/// This implementation has been carefully tailored to not panic. It is UB for this to panic. /// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated /// constants. /// @@ -1587,7 +1591,7 @@ pub unsafe fn write_volatile(dst: *mut T, src: T) { /// /// Any questions go to @nagisa. #[lang = "align_offset"] -pub(crate) unsafe fn align_offset(p: *const T, a: usize) -> usize { +pub(crate) const unsafe fn align_offset(p: *const T, a: usize) -> usize { // FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <= // 1, where the method versions of these operations are not inlined. use intrinsics::{ @@ -1604,7 +1608,7 @@ pub(crate) unsafe fn align_offset(p: *const T, a: usize) -> usize { /// /// Implementation of this function shall not panic. Ever. #[inline] - unsafe fn mod_inv(x: usize, m: usize) -> usize { + const unsafe fn mod_inv(x: usize, m: usize) -> usize { /// Multiplicative modular inverse table modulo 2⁴ = 16. /// /// Note, that this table does not contain values where inverse does not exist (i.e., for @@ -1646,8 +1650,14 @@ pub(crate) unsafe fn align_offset(p: *const T, a: usize) -> usize { inverse & m_minus_one } - let addr = p.addr(); let stride = mem::size_of::(); + + // SAFETY: This is just an inlined `p.addr()` (which is not + // a `const fn` so we cannot call it). + // During const eval, we hook this function to ensure that the pointer never + // has provenance, making this sound. + let addr: usize = unsafe { mem::transmute(p) }; + // SAFETY: `a` is a power-of-two, therefore non-zero. let a_minus_one = unsafe { unchecked_sub(a, 1) }; diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs index 8f4809ec4baa4..8472b05ddbd40 100644 --- a/library/core/src/ptr/mut_ptr.rs +++ b/library/core/src/ptr/mut_ptr.rs @@ -1588,6 +1588,8 @@ impl *mut T { /// } /// # } /// ``` + #[must_use] + #[inline] #[stable(feature = "align_offset", since = "1.36.0")] #[rustc_const_unstable(feature = "const_align_offset", issue = "90962")] pub const fn align_offset(self, align: usize) -> usize @@ -1598,32 +1600,151 @@ impl *mut T { panic!("align_offset: align is not a power-of-two"); } - fn rt_impl(p: *mut T, align: usize) -> usize { - // SAFETY: `align` has been checked to be a power of 2 above - unsafe { align_offset(p, align) } + #[cfg(bootstrap)] + { + fn rt_impl(p: *mut T, align: usize) -> usize { + // SAFETY: `align` has been checked to be a power of 2 above + unsafe { align_offset(p, align) } + } + + const fn ctfe_impl(_: *mut T, _: usize) -> usize { + usize::MAX + } + + // SAFETY: + // It is permissible for `align_offset` to always return `usize::MAX`, + // algorithm correctness can not depend on `align_offset` returning non-max values. + // + // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can. + unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) } } - const fn ctfe_impl(_: *mut T, _: usize) -> usize { - usize::MAX + #[cfg(not(bootstrap))] + { + // SAFETY: `align` has been checked to be a power of 2 above + unsafe { align_offset(self, align) } } - - // SAFETY: - // It is permissible for `align_offset` to always return `usize::MAX`, - // algorithm correctness can not depend on `align_offset` returning non-max values. - // - // As such the behaviour can't change after replacing `align_offset` with `usize::MAX`, only performance can. - unsafe { intrinsics::const_eval_select((self, align), ctfe_impl, rt_impl) } } /// Returns whether the pointer is properly aligned for `T`. + /// + /// # Examples + /// + /// Basic usage: + /// ``` + /// #![feature(pointer_is_aligned)] + /// #![feature(pointer_byte_offsets)] + /// + /// // On some platforms, the alignment of i32 is less than 4. + /// #[repr(align(4))] + /// struct AlignedI32(i32); + /// + /// let mut data = AlignedI32(42); + /// let ptr = &mut data as *mut AlignedI32; + /// + /// assert!(ptr.is_aligned()); + /// assert!(!ptr.wrapping_byte_add(1).is_aligned()); + /// ``` + /// + /// # At compiletime + /// **Note: Alignment at compiletime is experimental and subject to change. See the + /// [tracking issue] for details.** + /// + /// At compiletime, the compiler may not know where a value will end up in memory. + /// Calling this function on a pointer created from a reference at compiletime will only + /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer + /// is never aligned if cast to a type with a stricter alignment than the reference's + /// underlying allocation. + /// + #[cfg_attr(bootstrap, doc = "```ignore")] + #[cfg_attr(not(bootstrap), doc = "```")] + /// #![feature(pointer_is_aligned)] + /// #![feature(const_pointer_is_aligned)] + /// #![feature(const_mut_refs)] + /// + /// // On some platforms, the alignment of primitives is less than their size. + /// #[repr(align(4))] + /// struct AlignedI32(i32); + /// #[repr(align(8))] + /// struct AlignedI64(i64); + /// + /// const _: () = { + /// let mut data = AlignedI32(42); + /// let ptr = &mut data as *mut AlignedI32; + /// assert!(ptr.is_aligned()); + /// + /// // At runtime either `ptr1` or `ptr2` would be aligned, but at compiletime neither is aligned. + /// let ptr1 = ptr.cast::(); + /// let ptr2 = ptr.wrapping_add(1).cast::(); + /// assert!(!ptr1.is_aligned()); + /// assert!(!ptr2.is_aligned()); + /// }; + /// ``` + /// + /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime + /// pointer is aligned, even if the compiletime pointer wasn't aligned. + /// + #[cfg_attr(bootstrap, doc = "```ignore")] + #[cfg_attr(not(bootstrap), doc = "```")] + /// #![feature(pointer_is_aligned)] + /// #![feature(const_pointer_is_aligned)] + /// + /// // On some platforms, the alignment of primitives is less than their size. + /// #[repr(align(4))] + /// struct AlignedI32(i32); + /// #[repr(align(8))] + /// struct AlignedI64(i64); + /// + /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned. + /// // Also, note that mutable references are not allowed in the final value of constants. + /// const COMPTIME_PTR: *mut AlignedI32 = (&AlignedI32(42) as *const AlignedI32).cast_mut(); + /// const _: () = assert!(!COMPTIME_PTR.cast::().is_aligned()); + /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).cast::().is_aligned()); + /// + /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned. + /// let runtime_ptr = COMPTIME_PTR; + /// assert_ne!( + /// runtime_ptr.cast::().is_aligned(), + /// runtime_ptr.wrapping_add(1).cast::().is_aligned(), + /// ); + /// ``` + /// + /// If a pointer is created from a fixed address, this function behaves the same during + /// runtime and compiletime. + /// + #[cfg_attr(bootstrap, doc = "```ignore")] + #[cfg_attr(not(bootstrap), doc = "```")] + /// #![feature(pointer_is_aligned)] + /// #![feature(const_pointer_is_aligned)] + /// + /// // On some platforms, the alignment of primitives is less than their size. + /// #[repr(align(4))] + /// struct AlignedI32(i32); + /// #[repr(align(8))] + /// struct AlignedI64(i64); + /// + /// const _: () = { + /// let ptr = 40 as *mut AlignedI32; + /// assert!(ptr.is_aligned()); + /// + /// // For pointers with a known address, runtime and compiletime behavior are identical. + /// let ptr1 = ptr.cast::(); + /// let ptr2 = ptr.wrapping_add(1).cast::(); + /// assert!(ptr1.is_aligned()); + /// assert!(!ptr2.is_aligned()); + /// }; + /// ``` + /// + /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203 #[must_use] #[inline] #[unstable(feature = "pointer_is_aligned", issue = "96284")] - pub fn is_aligned(self) -> bool + #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")] + pub const fn is_aligned(self) -> bool where T: Sized, { - self.is_aligned_to(core::mem::align_of::()) + self.is_aligned_to(mem::align_of::()) } /// Returns whether the pointer is aligned to `align`. @@ -1634,16 +1755,123 @@ impl *mut T { /// # Panics /// /// The function panics if `align` is not a power-of-two (this includes 0). + /// + /// # Examples + /// + /// Basic usage: + /// ``` + /// #![feature(pointer_is_aligned)] + /// #![feature(pointer_byte_offsets)] + /// + /// // On some platforms, the alignment of i32 is less than 4. + /// #[repr(align(4))] + /// struct AlignedI32(i32); + /// + /// let mut data = AlignedI32(42); + /// let ptr = &mut data as *mut AlignedI32; + /// + /// assert!(ptr.is_aligned_to(1)); + /// assert!(ptr.is_aligned_to(2)); + /// assert!(ptr.is_aligned_to(4)); + /// + /// assert!(ptr.wrapping_byte_add(2).is_aligned_to(2)); + /// assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4)); + /// + /// assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8)); + /// ``` + /// + /// # At compiletime + /// **Note: Alignment at compiletime is experimental and subject to change. See the + /// [tracking issue] for details.** + /// + /// At compiletime, the compiler may not know where a value will end up in memory. + /// Calling this function on a pointer created from a reference at compiletime will only + /// return `true` if the pointer is guaranteed to be aligned. This means that the pointer + /// cannot be stricter aligned than the reference's underlying allocation. + /// + #[cfg_attr(bootstrap, doc = "```ignore")] + #[cfg_attr(not(bootstrap), doc = "```")] + /// #![feature(pointer_is_aligned)] + /// #![feature(const_pointer_is_aligned)] + /// #![feature(const_mut_refs)] + /// + /// // On some platforms, the alignment of i32 is less than 4. + /// #[repr(align(4))] + /// struct AlignedI32(i32); + /// + /// const _: () = { + /// let mut data = AlignedI32(42); + /// let ptr = &mut data as *mut AlignedI32; + /// + /// assert!(ptr.is_aligned_to(1)); + /// assert!(ptr.is_aligned_to(2)); + /// assert!(ptr.is_aligned_to(4)); + /// + /// // At compiletime, we know for sure that the pointer isn't aligned to 8. + /// assert!(!ptr.is_aligned_to(8)); + /// assert!(!ptr.wrapping_add(1).is_aligned_to(8)); + /// }; + /// ``` + /// + /// Due to this behavior, it is possible that a runtime pointer derived from a compiletime + /// pointer is aligned, even if the compiletime pointer wasn't aligned. + /// + #[cfg_attr(bootstrap, doc = "```ignore")] + #[cfg_attr(not(bootstrap), doc = "```")] + /// #![feature(pointer_is_aligned)] + /// #![feature(const_pointer_is_aligned)] + /// + /// // On some platforms, the alignment of i32 is less than 4. + /// #[repr(align(4))] + /// struct AlignedI32(i32); + /// + /// // At compiletime, neither `COMPTIME_PTR` nor `COMPTIME_PTR + 1` is aligned. + /// // Also, note that mutable references are not allowed in the final value of constants. + /// const COMPTIME_PTR: *mut AlignedI32 = (&AlignedI32(42) as *const AlignedI32).cast_mut(); + /// const _: () = assert!(!COMPTIME_PTR.is_aligned_to(8)); + /// const _: () = assert!(!COMPTIME_PTR.wrapping_add(1).is_aligned_to(8)); + /// + /// // At runtime, either `runtime_ptr` or `runtime_ptr + 1` is aligned. + /// let runtime_ptr = COMPTIME_PTR; + /// assert_ne!( + /// runtime_ptr.is_aligned_to(8), + /// runtime_ptr.wrapping_add(1).is_aligned_to(8), + /// ); + /// ``` + /// + /// If a pointer is created from a fixed address, this function behaves the same during + /// runtime and compiletime. + /// + #[cfg_attr(bootstrap, doc = "```ignore")] + #[cfg_attr(not(bootstrap), doc = "```")] + /// #![feature(pointer_is_aligned)] + /// #![feature(const_pointer_is_aligned)] + /// + /// const _: () = { + /// let ptr = 40 as *mut u8; + /// assert!(ptr.is_aligned_to(1)); + /// assert!(ptr.is_aligned_to(2)); + /// assert!(ptr.is_aligned_to(4)); + /// assert!(ptr.is_aligned_to(8)); + /// assert!(!ptr.is_aligned_to(16)); + /// }; + /// ``` + /// + /// [tracking issue]: https://github.com/rust-lang/rust/issues/104203 #[must_use] #[inline] #[unstable(feature = "pointer_is_aligned", issue = "96284")] - pub fn is_aligned_to(self, align: usize) -> bool { + #[rustc_const_unstable(feature = "const_pointer_is_aligned", issue = "104203")] + pub const fn is_aligned_to(self, align: usize) -> bool { if !align.is_power_of_two() { panic!("is_aligned_to: align is not a power-of-two"); } - // Cast is needed for `T: !Sized` - self.cast::().addr() & align - 1 == 0 + // We can't use the address of `self` in a `const fn`, so we use `align_offset` instead. + // The cast to `()` is used to + // 1. deal with fat pointers; and + // 2. ensure that `align_offset` doesn't actually try to compute an offset. + self.cast::<()>().align_offset(align) == 0 } } diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs index 61d31b3448734..66d28770b87f9 100644 --- a/library/core/tests/lib.rs +++ b/library/core/tests/lib.rs @@ -4,6 +4,7 @@ #![feature(array_windows)] #![feature(bigint_helper_methods)] #![feature(cell_update)] +#![feature(const_align_offset)] #![feature(const_assume)] #![feature(const_align_of_val_raw)] #![feature(const_black_box)] @@ -18,6 +19,7 @@ #![feature(const_nonnull_new)] #![feature(const_num_from_num)] #![feature(const_pointer_byte_offsets)] +#![feature(const_pointer_is_aligned)] #![feature(const_ptr_as_ref)] #![feature(const_ptr_read)] #![feature(const_ptr_write)] @@ -81,6 +83,7 @@ #![feature(never_type)] #![feature(unwrap_infallible)] #![feature(pointer_byte_offsets)] +#![feature(pointer_is_aligned)] #![feature(portable_simd)] #![feature(ptr_metadata)] #![feature(once_cell)] diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs index 0977980ba47bf..390148550a4b3 100644 --- a/library/core/tests/ptr.rs +++ b/library/core/tests/ptr.rs @@ -358,6 +358,23 @@ fn align_offset_zst() { } } +#[test] +#[cfg(not(bootstrap))] +fn align_offset_zst_const() { + const { + // For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at + // all, because no amount of elements will align the pointer. + let mut p = 1; + while p < 1024 { + assert!(ptr::invalid::<()>(p).align_offset(p) == 0); + if p != 1 { + assert!(ptr::invalid::<()>(p + 1).align_offset(p) == !0); + } + p = (p + 1).next_power_of_two(); + } + } +} + #[test] fn align_offset_stride_one() { // For pointers of stride = 1, the pointer can always be aligned. The offset is equal to @@ -379,6 +396,26 @@ fn align_offset_stride_one() { } } +#[test] +#[cfg(not(bootstrap))] +fn align_offset_stride_one_const() { + const { + // For pointers of stride = 1, the pointer can always be aligned. The offset is equal to + // number of bytes. + let mut align = 1; + while align < 1024 { + let mut ptr = 1; + while ptr < 2 * align { + let expected = ptr % align; + let offset = if expected == 0 { 0 } else { align - expected }; + assert!(ptr::invalid::(ptr).align_offset(align) == offset); + ptr += 1; + } + align = (align + 1).next_power_of_two(); + } + } +} + #[test] fn align_offset_various_strides() { unsafe fn test_stride(ptr: *const T, align: usize) -> bool { @@ -455,6 +492,182 @@ fn align_offset_various_strides() { assert!(!x); } +#[test] +#[cfg(not(bootstrap))] +fn align_offset_various_strides_const() { + const unsafe fn test_stride(ptr: *const T, numptr: usize, align: usize) { + let mut expected = usize::MAX; + // Naive but definitely correct way to find the *first* aligned element of stride::. + let mut el = 0; + while el < align { + if (numptr + el * ::std::mem::size_of::()) % align == 0 { + expected = el; + break; + } + el += 1; + } + let got = ptr.align_offset(align); + assert!(got == expected); + } + + const { + // For pointers of stride != 1, we verify the algorithm against the naivest possible + // implementation + let mut align = 1; + let limit = 32; + while align < limit { + let mut ptr = 1; + while ptr < 4 * align { + unsafe { + #[repr(packed)] + struct A3(u16, u8); + test_stride::(ptr::invalid::(ptr), ptr, align); + + struct A4(u32); + test_stride::(ptr::invalid::(ptr), ptr, align); + + #[repr(packed)] + struct A5(u32, u8); + test_stride::(ptr::invalid::(ptr), ptr, align); + + #[repr(packed)] + struct A6(u32, u16); + test_stride::(ptr::invalid::(ptr), ptr, align); + + #[repr(packed)] + struct A7(u32, u16, u8); + test_stride::(ptr::invalid::(ptr), ptr, align); + + #[repr(packed)] + struct A8(u32, u32); + test_stride::(ptr::invalid::(ptr), ptr, align); + + #[repr(packed)] + struct A9(u32, u32, u8); + test_stride::(ptr::invalid::(ptr), ptr, align); + + #[repr(packed)] + struct A10(u32, u32, u16); + test_stride::(ptr::invalid::(ptr), ptr, align); + + test_stride::(ptr::invalid::(ptr), ptr, align); + test_stride::(ptr::invalid::(ptr), ptr, align); + } + ptr += 1; + } + align = (align + 1).next_power_of_two(); + } + } +} + +#[test] +#[cfg(not(bootstrap))] +fn align_offset_with_provenance_const() { + const { + // On some platforms (e.g. msp430-none-elf), the alignment of `i32` is less than 4. + #[repr(align(4))] + struct AlignedI32(i32); + + let data = AlignedI32(42); + + // `stride % align == 0` (usual case) + + let ptr: *const i32 = &data.0; + assert!(ptr.align_offset(1) == 0); + assert!(ptr.align_offset(2) == 0); + assert!(ptr.align_offset(4) == 0); + assert!(ptr.align_offset(8) == usize::MAX); + assert!(ptr.wrapping_byte_add(1).align_offset(1) == 0); + assert!(ptr.wrapping_byte_add(1).align_offset(2) == usize::MAX); + assert!(ptr.wrapping_byte_add(2).align_offset(1) == 0); + assert!(ptr.wrapping_byte_add(2).align_offset(2) == 0); + assert!(ptr.wrapping_byte_add(2).align_offset(4) == usize::MAX); + assert!(ptr.wrapping_byte_add(3).align_offset(1) == 0); + assert!(ptr.wrapping_byte_add(3).align_offset(2) == usize::MAX); + + assert!(ptr.wrapping_add(42).align_offset(4) == 0); + assert!(ptr.wrapping_add(42).align_offset(8) == usize::MAX); + + let ptr1: *const i8 = ptr.cast(); + assert!(ptr1.align_offset(1) == 0); + assert!(ptr1.align_offset(2) == 0); + assert!(ptr1.align_offset(4) == 0); + assert!(ptr1.align_offset(8) == usize::MAX); + assert!(ptr1.wrapping_byte_add(1).align_offset(1) == 0); + assert!(ptr1.wrapping_byte_add(1).align_offset(2) == 1); + assert!(ptr1.wrapping_byte_add(1).align_offset(4) == 3); + assert!(ptr1.wrapping_byte_add(1).align_offset(8) == usize::MAX); + assert!(ptr1.wrapping_byte_add(2).align_offset(1) == 0); + assert!(ptr1.wrapping_byte_add(2).align_offset(2) == 0); + assert!(ptr1.wrapping_byte_add(2).align_offset(4) == 2); + assert!(ptr1.wrapping_byte_add(2).align_offset(8) == usize::MAX); + assert!(ptr1.wrapping_byte_add(3).align_offset(1) == 0); + assert!(ptr1.wrapping_byte_add(3).align_offset(2) == 1); + assert!(ptr1.wrapping_byte_add(3).align_offset(4) == 1); + assert!(ptr1.wrapping_byte_add(3).align_offset(8) == usize::MAX); + + let ptr2: *const i16 = ptr.cast(); + assert!(ptr2.align_offset(1) == 0); + assert!(ptr2.align_offset(2) == 0); + assert!(ptr2.align_offset(4) == 0); + assert!(ptr2.align_offset(8) == usize::MAX); + assert!(ptr2.wrapping_byte_add(1).align_offset(1) == 0); + assert!(ptr2.wrapping_byte_add(1).align_offset(2) == usize::MAX); + assert!(ptr2.wrapping_byte_add(2).align_offset(1) == 0); + assert!(ptr2.wrapping_byte_add(2).align_offset(2) == 0); + assert!(ptr2.wrapping_byte_add(2).align_offset(4) == 1); + assert!(ptr2.wrapping_byte_add(2).align_offset(8) == usize::MAX); + assert!(ptr2.wrapping_byte_add(3).align_offset(1) == 0); + assert!(ptr2.wrapping_byte_add(3).align_offset(2) == usize::MAX); + + let ptr3: *const i64 = ptr.cast(); + assert!(ptr3.align_offset(1) == 0); + assert!(ptr3.align_offset(2) == 0); + assert!(ptr3.align_offset(4) == 0); + assert!(ptr3.align_offset(8) == usize::MAX); + assert!(ptr3.wrapping_byte_add(1).align_offset(1) == 0); + assert!(ptr3.wrapping_byte_add(1).align_offset(2) == usize::MAX); + + // `stride % align != 0` (edge case) + + let ptr4: *const [u8; 3] = ptr.cast(); + assert!(ptr4.align_offset(1) == 0); + assert!(ptr4.align_offset(2) == 0); + assert!(ptr4.align_offset(4) == 0); + assert!(ptr4.align_offset(8) == usize::MAX); + assert!(ptr4.wrapping_byte_add(1).align_offset(1) == 0); + assert!(ptr4.wrapping_byte_add(1).align_offset(2) == 1); + assert!(ptr4.wrapping_byte_add(1).align_offset(4) == 1); + assert!(ptr4.wrapping_byte_add(1).align_offset(8) == usize::MAX); + assert!(ptr4.wrapping_byte_add(2).align_offset(1) == 0); + assert!(ptr4.wrapping_byte_add(2).align_offset(2) == 0); + assert!(ptr4.wrapping_byte_add(2).align_offset(4) == 2); + assert!(ptr4.wrapping_byte_add(2).align_offset(8) == usize::MAX); + assert!(ptr4.wrapping_byte_add(3).align_offset(1) == 0); + assert!(ptr4.wrapping_byte_add(3).align_offset(2) == 1); + assert!(ptr4.wrapping_byte_add(3).align_offset(4) == 3); + assert!(ptr4.wrapping_byte_add(3).align_offset(8) == usize::MAX); + + let ptr5: *const [u8; 5] = ptr.cast(); + assert!(ptr5.align_offset(1) == 0); + assert!(ptr5.align_offset(2) == 0); + assert!(ptr5.align_offset(4) == 0); + assert!(ptr5.align_offset(8) == usize::MAX); + assert!(ptr5.wrapping_byte_add(1).align_offset(1) == 0); + assert!(ptr5.wrapping_byte_add(1).align_offset(2) == 1); + assert!(ptr5.wrapping_byte_add(1).align_offset(4) == 3); + assert!(ptr5.wrapping_byte_add(1).align_offset(8) == usize::MAX); + assert!(ptr5.wrapping_byte_add(2).align_offset(1) == 0); + assert!(ptr5.wrapping_byte_add(2).align_offset(2) == 0); + assert!(ptr5.wrapping_byte_add(2).align_offset(4) == 2); + assert!(ptr5.wrapping_byte_add(2).align_offset(8) == usize::MAX); + assert!(ptr5.wrapping_byte_add(3).align_offset(1) == 0); + assert!(ptr5.wrapping_byte_add(3).align_offset(2) == 1); + assert!(ptr5.wrapping_byte_add(3).align_offset(4) == 1); + assert!(ptr5.wrapping_byte_add(3).align_offset(8) == usize::MAX); + } +} + #[test] fn align_offset_issue_103361() { #[cfg(target_pointer_width = "64")] @@ -467,6 +680,72 @@ fn align_offset_issue_103361() { let _ = (SIZE as *const HugeSize).align_offset(SIZE); } +#[test] +#[cfg(not(bootstrap))] +fn align_offset_issue_103361_const() { + #[cfg(target_pointer_width = "64")] + const SIZE: usize = 1 << 47; + #[cfg(target_pointer_width = "32")] + const SIZE: usize = 1 << 30; + #[cfg(target_pointer_width = "16")] + const SIZE: usize = 1 << 13; + struct HugeSize([u8; SIZE - 1]); + + const { + assert!(ptr::invalid::(SIZE - 1).align_offset(SIZE) == SIZE - 1); + assert!(ptr::invalid::(SIZE).align_offset(SIZE) == 0); + assert!(ptr::invalid::(SIZE + 1).align_offset(SIZE) == 1); + } +} + +#[test] +fn is_aligned() { + let data = 42; + let ptr: *const i32 = &data; + assert!(ptr.is_aligned()); + assert!(ptr.is_aligned_to(1)); + assert!(ptr.is_aligned_to(2)); + assert!(ptr.is_aligned_to(4)); + assert!(ptr.wrapping_byte_add(2).is_aligned_to(1)); + assert!(ptr.wrapping_byte_add(2).is_aligned_to(2)); + assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4)); + + // At runtime either `ptr` or `ptr+1` is aligned to 8. + assert_ne!(ptr.is_aligned_to(8), ptr.wrapping_add(1).is_aligned_to(8)); +} + +#[test] +#[cfg(not(bootstrap))] +fn is_aligned_const() { + const { + let data = 42; + let ptr: *const i32 = &data; + assert!(ptr.is_aligned()); + assert!(ptr.is_aligned_to(1)); + assert!(ptr.is_aligned_to(2)); + assert!(ptr.is_aligned_to(4)); + assert!(ptr.wrapping_byte_add(2).is_aligned_to(1)); + assert!(ptr.wrapping_byte_add(2).is_aligned_to(2)); + assert!(!ptr.wrapping_byte_add(2).is_aligned_to(4)); + + // At comptime neither `ptr` nor `ptr+1` is aligned to 8. + assert!(!ptr.is_aligned_to(8)); + assert!(!ptr.wrapping_add(1).is_aligned_to(8)); + } +} + +#[test] +#[cfg(bootstrap)] +fn is_aligned_const() { + const { + let data = 42; + let ptr: *const i32 = &data; + // The bootstrap compiler always returns false for is_aligned. + assert!(!ptr.is_aligned()); + assert!(!ptr.is_aligned_to(1)); + } +} + #[test] fn offset_from() { let mut a = [0; 5]; diff --git a/src/test/assembly/is_aligned.rs b/src/test/assembly/is_aligned.rs new file mode 100644 index 0000000000000..04b5de8342370 --- /dev/null +++ b/src/test/assembly/is_aligned.rs @@ -0,0 +1,58 @@ +// assembly-output: emit-asm +// min-llvm-version: 14.0 +// only-x86_64 +// revisions: opt-speed opt-size +// [opt-speed] compile-flags: -Copt-level=1 +// [opt-size] compile-flags: -Copt-level=s +#![crate_type="rlib"] + +#![feature(core_intrinsics)] +#![feature(pointer_is_aligned)] + +// CHECK-LABEL: is_aligned_to_unchecked +// CHECK: decq +// CHECK-NEXT: testq +// CHECK-NEXT: sete +// CHECK: retq +#[no_mangle] +pub unsafe fn is_aligned_to_unchecked(ptr: *const u8, align: usize) -> bool { + unsafe { + std::intrinsics::assume(align.is_power_of_two()) + } + ptr.is_aligned_to(align) +} + +// CHECK-LABEL: is_aligned_1 +// CHECK: movb $1 +// CHECK: retq +#[no_mangle] +pub fn is_aligned_1(ptr: *const u8) -> bool { + ptr.is_aligned() +} + +// CHECK-LABEL: is_aligned_2 +// CHECK: testb $1 +// CHECK-NEXT: sete +// CHECK: retq +#[no_mangle] +pub fn is_aligned_2(ptr: *const u16) -> bool { + ptr.is_aligned() +} + +// CHECK-LABEL: is_aligned_4 +// CHECK: testb $3 +// CHECK-NEXT: sete +// CHECK: retq +#[no_mangle] +pub fn is_aligned_4(ptr: *const u32) -> bool { + ptr.is_aligned() +} + +// CHECK-LABEL: is_aligned_8 +// CHECK: testb $7 +// CHECK-NEXT: sete +// CHECK: retq +#[no_mangle] +pub fn is_aligned_8(ptr: *const u64) -> bool { + ptr.is_aligned() +} diff --git a/src/tools/miri/src/shims/intrinsics/mod.rs b/src/tools/miri/src/shims/intrinsics/mod.rs index 6004e2078ad4f..5ea82adb9c69c 100644 --- a/src/tools/miri/src/shims/intrinsics/mod.rs +++ b/src/tools/miri/src/shims/intrinsics/mod.rs @@ -368,11 +368,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } // Other - "exact_div" => { - let [num, denom] = check_arg_count(args)?; - this.exact_div(&this.read_immediate(num)?, &this.read_immediate(denom)?, dest)?; - } - "breakpoint" => { let [] = check_arg_count(args)?; // normally this would raise a SIGTRAP, which aborts if no debugger is connected