Skip to content

Commit

Permalink
avoid the core_intrinsics feature (for a transition)
Browse files Browse the repository at this point in the history
  • Loading branch information
RalfJung committed Nov 21, 2023
1 parent 2a331a2 commit 16d366a
Show file tree
Hide file tree
Showing 7 changed files with 58 additions and 7 deletions.
3 changes: 2 additions & 1 deletion src/arm_linux.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use core::intrinsics;
use core::mem;

use crate::intrinsics;

// Kernel-provided user-mode helper functions:
// https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt
unsafe fn __kuser_cmpxchg(oldval: u32, newval: u32, ptr: *mut u32) -> bool {
Expand Down
2 changes: 1 addition & 1 deletion src/int/specialized_div_rem/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ fn zero_div_fn() -> ! {
// Calling the intrinsic directly, to avoid the `assert_unsafe_precondition` that cannot be used
// here because it involves non-`inline` functions
// (https://github.com/rust-lang/compiler-builtins/issues/491).
unsafe { core::intrinsics::unreachable() }
unsafe { crate::intrinsics::unreachable() }
}

const USE_LZ: bool = {
Expand Down
50 changes: 49 additions & 1 deletion src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@
#![feature(cfg_target_has_atomic)]
#![feature(compiler_builtins)]
#![feature(core_ffi_c)]
#![feature(core_intrinsics)]
#![feature(intrinsics)]
#![feature(rustc_attrs)]
#![feature(inline_const)]
#![feature(lang_items)]
#![feature(linkage)]
Expand Down Expand Up @@ -80,3 +81,50 @@ pub mod x86;
pub mod x86_64;

pub mod probestack;

// `core` is changing the feature name for the `intrinsics` module.
// To permit that transition, we avoid using that feature for now.
mod intrinsics {
extern "rust-intrinsic" {
#[rustc_nounwind]
pub fn atomic_load_unordered<T: Copy>(src: *const T) -> T;

#[rustc_nounwind]
pub fn atomic_store_unordered<T: Copy>(dst: *mut T, val: T);

/// Informs the optimizer that this point in the code is not reachable,
/// enabling further optimizations.
///
/// N.B., this is very different from the `unreachable!()` macro: Unlike the
/// macro, which panics when it is executed, it is *undefined behavior* to
/// reach code marked with this function.
///
/// The stabilized version of this intrinsic is [`core::hint::unreachable_unchecked`].
#[rustc_nounwind]
pub fn unreachable() -> !;

/// Performs an exact division, resulting in undefined behavior where
/// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`
///
/// This intrinsic does not have a stable counterpart.
#[rustc_nounwind]
pub fn exact_div<T: Copy>(x: T, y: T) -> T;

/// Performs an unchecked division, resulting in undefined behavior
/// where `y == 0` or `x == T::MIN && y == -1`
///
/// Safe wrappers for this intrinsic are available on the integer
/// primitives via the `checked_div` method. For example,
/// [`u32::checked_div`]
#[rustc_nounwind]
pub fn unchecked_div<T: Copy>(x: T, y: T) -> T;
/// Returns the remainder of an unchecked division, resulting in
/// undefined behavior when `y == 0` or `x == T::MIN && y == -1`
///
/// Safe wrappers for this intrinsic are available on the integer
/// primitives via the `checked_rem` method. For example,
/// [`u32::checked_rem`]
#[rustc_nounwind]
pub fn unchecked_rem<T: Copy>(x: T, y: T) -> T;
}
}
2 changes: 1 addition & 1 deletion src/mem/impls.rs
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ pub unsafe fn copy_forward(mut dest: *mut u8, mut src: *const u8, mut n: usize)
// cfg needed because not all targets will have atomic loads that can be lowered
// (e.g. BPF, MSP430), or provided by an external library (e.g. RV32I)
#[cfg(target_has_atomic_load_store = "ptr")]
let mut prev_word = core::intrinsics::atomic_load_unordered(src_aligned);
let mut prev_word = crate::intrinsics::atomic_load_unordered(src_aligned);
#[cfg(not(target_has_atomic_load_store = "ptr"))]
let mut prev_word = core::ptr::read_volatile(src_aligned);

Expand Down
3 changes: 2 additions & 1 deletion src/mem/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,11 @@ type c_int = i16;
#[cfg(not(target_pointer_width = "16"))]
type c_int = i32;

use core::intrinsics::{atomic_load_unordered, atomic_store_unordered, exact_div};
use core::mem;
use core::ops::{BitOr, Shl};

use crate::intrinsics::{atomic_load_unordered, atomic_store_unordered, exact_div};

// memcpy/memmove/memset have optimized implementations on some architectures
#[cfg_attr(
all(not(feature = "no-asm"), target_arch = "x86_64"),
Expand Down
3 changes: 2 additions & 1 deletion src/mem/x86_64.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,10 @@
// Note that ERMSB does not enhance the backwards (DF=1) "rep movsb".

use core::arch::asm;
use core::intrinsics;
use core::mem;

use crate::intrinsics;

#[inline(always)]
#[cfg(target_feature = "ermsb")]
pub unsafe fn copy_forward(dest: *mut u8, src: *const u8, count: usize) {
Expand Down
2 changes: 1 addition & 1 deletion src/x86_64.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#![allow(unused_imports)]

use core::intrinsics;
use crate::intrinsics;

// NOTE These functions are implemented using assembly because they using a custom
// calling convention which can't be implemented using a normal Rust function
Expand Down

0 comments on commit 16d366a

Please sign in to comment.