Skip to content

Commit 2a1e192

Browse files
scottmcmgitbot
authored and
gitbot
committed
Update a bunch of library types for MCP807
This greatly reduces the number of places that actually use the `rustc_layout_scalar_valid_range_*` attributes down to just 3: ``` library/core\src\ptr\non_null.rs 68:#[rustc_layout_scalar_valid_range_start(1)] library/core\src\num\niche_types.rs 19: #[rustc_layout_scalar_valid_range_start($low)] 20: #[rustc_layout_scalar_valid_range_end($high)] ``` Everything else -- PAL Nanoseconds, alloc's `Cap`, niched FDs, etc -- all just wrap those `niche_types` types.
1 parent 35a3f89 commit 2a1e192

File tree

12 files changed

+296
-192
lines changed

12 files changed

+296
-192
lines changed

alloc/src/lib.rs

+1
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,7 @@
142142
#![feature(slice_range)]
143143
#![feature(std_internals)]
144144
#![feature(str_internals)]
145+
#![feature(temporary_niche_types)]
145146
#![feature(trusted_fused)]
146147
#![feature(trusted_len)]
147148
#![feature(trusted_random_access)]

alloc/src/raw_vec.rs

+22-24
Original file line numberDiff line numberDiff line change
@@ -33,21 +33,15 @@ enum AllocInit {
3333
Zeroed,
3434
}
3535

36-
#[repr(transparent)]
37-
#[cfg_attr(target_pointer_width = "16", rustc_layout_scalar_valid_range_end(0x7fff))]
38-
#[cfg_attr(target_pointer_width = "32", rustc_layout_scalar_valid_range_end(0x7fff_ffff))]
39-
#[cfg_attr(target_pointer_width = "64", rustc_layout_scalar_valid_range_end(0x7fff_ffff_ffff_ffff))]
40-
struct Cap(usize);
36+
type Cap = core::num::niche_types::UsizeNoHighBit;
4137

42-
impl Cap {
43-
const ZERO: Cap = unsafe { Cap(0) };
38+
const ZERO_CAP: Cap = unsafe { Cap::new_unchecked(0) };
4439

45-
/// `Cap(cap)`, except if `T` is a ZST then `Cap::ZERO`.
46-
///
47-
/// # Safety: cap must be <= `isize::MAX`.
48-
unsafe fn new<T>(cap: usize) -> Self {
49-
if T::IS_ZST { Cap::ZERO } else { unsafe { Self(cap) } }
50-
}
40+
/// `Cap(cap)`, except if `T` is a ZST then `Cap::ZERO`.
41+
///
42+
/// # Safety: cap must be <= `isize::MAX`.
43+
unsafe fn new_cap<T>(cap: usize) -> Cap {
44+
if T::IS_ZST { ZERO_CAP } else { unsafe { Cap::new_unchecked(cap) } }
5145
}
5246

5347
/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
@@ -257,7 +251,7 @@ impl<T, A: Allocator> RawVec<T, A> {
257251
// SAFETY: Precondition passed to the caller
258252
unsafe {
259253
let ptr = ptr.cast();
260-
let capacity = Cap::new::<T>(capacity);
254+
let capacity = new_cap::<T>(capacity);
261255
Self {
262256
inner: RawVecInner::from_raw_parts_in(ptr, capacity, alloc),
263257
_marker: PhantomData,
@@ -275,7 +269,7 @@ impl<T, A: Allocator> RawVec<T, A> {
275269
// SAFETY: Precondition passed to the caller
276270
unsafe {
277271
let ptr = ptr.cast();
278-
let capacity = Cap::new::<T>(capacity);
272+
let capacity = new_cap::<T>(capacity);
279273
Self { inner: RawVecInner::from_nonnull_in(ptr, capacity, alloc), _marker: PhantomData }
280274
}
281275
}
@@ -410,7 +404,7 @@ impl<A: Allocator> RawVecInner<A> {
410404
const fn new_in(alloc: A, align: usize) -> Self {
411405
let ptr = unsafe { core::mem::transmute(align) };
412406
// `cap: 0` means "unallocated". zero-sized types are ignored.
413-
Self { ptr, cap: Cap::ZERO, alloc }
407+
Self { ptr, cap: ZERO_CAP, alloc }
414408
}
415409

416410
#[cfg(not(no_global_oom_handling))]
@@ -483,7 +477,11 @@ impl<A: Allocator> RawVecInner<A> {
483477
// Allocators currently return a `NonNull<[u8]>` whose length
484478
// matches the size requested. If that ever changes, the capacity
485479
// here should change to `ptr.len() / mem::size_of::<T>()`.
486-
Ok(Self { ptr: Unique::from(ptr.cast()), cap: unsafe { Cap(capacity) }, alloc })
480+
Ok(Self {
481+
ptr: Unique::from(ptr.cast()),
482+
cap: unsafe { Cap::new_unchecked(capacity) },
483+
alloc,
484+
})
487485
}
488486

489487
#[inline]
@@ -508,7 +506,7 @@ impl<A: Allocator> RawVecInner<A> {
508506

509507
#[inline]
510508
const fn capacity(&self, elem_size: usize) -> usize {
511-
if elem_size == 0 { usize::MAX } else { self.cap.0 }
509+
if elem_size == 0 { usize::MAX } else { self.cap.as_inner() }
512510
}
513511

514512
#[inline]
@@ -518,15 +516,15 @@ impl<A: Allocator> RawVecInner<A> {
518516

519517
#[inline]
520518
fn current_memory(&self, elem_layout: Layout) -> Option<(NonNull<u8>, Layout)> {
521-
if elem_layout.size() == 0 || self.cap.0 == 0 {
519+
if elem_layout.size() == 0 || self.cap.as_inner() == 0 {
522520
None
523521
} else {
524522
// We could use Layout::array here which ensures the absence of isize and usize overflows
525523
// and could hypothetically handle differences between stride and size, but this memory
526524
// has already been allocated so we know it can't overflow and currently Rust does not
527525
// support such types. So we can do better by skipping some checks and avoid an unwrap.
528526
unsafe {
529-
let alloc_size = elem_layout.size().unchecked_mul(self.cap.0);
527+
let alloc_size = elem_layout.size().unchecked_mul(self.cap.as_inner());
530528
let layout = Layout::from_size_align_unchecked(alloc_size, elem_layout.align());
531529
Some((self.ptr.into(), layout))
532530
}
@@ -562,7 +560,7 @@ impl<A: Allocator> RawVecInner<A> {
562560
#[inline]
563561
#[track_caller]
564562
fn grow_one(&mut self, elem_layout: Layout) {
565-
if let Err(err) = self.grow_amortized(self.cap.0, 1, elem_layout) {
563+
if let Err(err) = self.grow_amortized(self.cap.as_inner(), 1, elem_layout) {
566564
handle_error(err);
567565
}
568566
}
@@ -627,7 +625,7 @@ impl<A: Allocator> RawVecInner<A> {
627625
// the size requested. If that ever changes, the capacity here should
628626
// change to `ptr.len() / mem::size_of::<T>()`.
629627
self.ptr = Unique::from(ptr.cast());
630-
self.cap = unsafe { Cap(cap) };
628+
self.cap = unsafe { Cap::new_unchecked(cap) };
631629
}
632630

633631
fn grow_amortized(
@@ -650,7 +648,7 @@ impl<A: Allocator> RawVecInner<A> {
650648

651649
// This guarantees exponential growth. The doubling cannot overflow
652650
// because `cap <= isize::MAX` and the type of `cap` is `usize`.
653-
let cap = cmp::max(self.cap.0 * 2, required_cap);
651+
let cap = cmp::max(self.cap.as_inner() * 2, required_cap);
654652
let cap = cmp::max(min_non_zero_cap(elem_layout.size()), cap);
655653

656654
let new_layout = layout_array(cap, elem_layout)?;
@@ -719,7 +717,7 @@ impl<A: Allocator> RawVecInner<A> {
719717
unsafe { self.alloc.deallocate(ptr, layout) };
720718
self.ptr =
721719
unsafe { Unique::new_unchecked(ptr::without_provenance_mut(elem_layout.align())) };
722-
self.cap = Cap::ZERO;
720+
self.cap = ZERO_CAP;
723721
} else {
724722
let ptr = unsafe {
725723
// Layout cannot overflow here because it would have

core/src/num/mod.rs

+4
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,10 @@ mod overflow_panic;
5151
mod saturating;
5252
mod wrapping;
5353

54+
/// 100% perma-unstable
55+
#[doc(hidden)]
56+
pub mod niche_types;
57+
5458
#[stable(feature = "rust1", since = "1.0.0")]
5559
#[cfg(not(no_fp_fmt_parse))]
5660
pub use dec2flt::ParseFloatError;

core/src/num/niche_types.rs

+162
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,162 @@
1+
#![unstable(
2+
feature = "temporary_niche_types",
3+
issue = "none",
4+
reason = "for core, alloc, and std internals until pattern types are further along"
5+
)]
6+
7+
use crate::cmp::Ordering;
8+
use crate::fmt;
9+
use crate::hash::{Hash, Hasher};
10+
use crate::marker::StructuralPartialEq;
11+
12+
macro_rules! define_valid_range_type {
13+
($(
14+
$(#[$m:meta])*
15+
$vis:vis struct $name:ident($int:ident as $uint:ident in $low:literal..=$high:literal);
16+
)+) => {$(
17+
#[derive(Clone, Copy, Eq)]
18+
#[repr(transparent)]
19+
#[rustc_layout_scalar_valid_range_start($low)]
20+
#[rustc_layout_scalar_valid_range_end($high)]
21+
$(#[$m])*
22+
$vis struct $name($int);
23+
24+
const _: () = {
25+
// With the `valid_range` attributes, it's always specified as unsigned
26+
assert!(<$uint>::MIN == 0);
27+
let ulow: $uint = $low;
28+
let uhigh: $uint = $high;
29+
assert!(ulow <= uhigh);
30+
31+
assert!(size_of::<$int>() == size_of::<$uint>());
32+
};
33+
34+
impl $name {
35+
#[inline]
36+
pub const unsafe fn new_unchecked(val: $int) -> Self {
37+
// SAFETY: same precondition
38+
unsafe { $name(val) }
39+
}
40+
41+
#[inline]
42+
pub const fn as_inner(self) -> $int {
43+
// SAFETY: This is a transparent wrapper, so unwrapping it is sound
44+
// (Not using `.0` due to MCP#807.)
45+
unsafe { crate::mem::transmute(self) }
46+
}
47+
}
48+
49+
// This is required to allow matching a constant. We don't get it from a derive
50+
// because the derived `PartialEq` would do a field projection, which is banned
51+
// by <https://github.com/rust-lang/compiler-team/issues/807>.
52+
impl StructuralPartialEq for $name {}
53+
54+
impl PartialEq for $name {
55+
#[inline]
56+
fn eq(&self, other: &Self) -> bool {
57+
self.as_inner() == other.as_inner()
58+
}
59+
}
60+
61+
impl Ord for $name {
62+
#[inline]
63+
fn cmp(&self, other: &Self) -> Ordering {
64+
Ord::cmp(&self.as_inner(), &other.as_inner())
65+
}
66+
}
67+
68+
impl PartialOrd for $name {
69+
#[inline]
70+
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
71+
Some(Ord::cmp(self, other))
72+
}
73+
}
74+
75+
impl Hash for $name {
76+
// Required method
77+
fn hash<H: Hasher>(&self, state: &mut H) {
78+
Hash::hash(&self.as_inner(), state);
79+
}
80+
}
81+
82+
impl fmt::Debug for $name {
83+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
84+
<$int as fmt::Debug>::fmt(&self.as_inner(), f)
85+
}
86+
}
87+
)+};
88+
}
89+
90+
define_valid_range_type! {
91+
pub struct Nanoseconds(u32 as u32 in 0..=999_999_999);
92+
}
93+
94+
impl Nanoseconds {
95+
// SAFETY: 0 is within the valid range
96+
pub const ZERO: Self = unsafe { Nanoseconds::new_unchecked(0) };
97+
}
98+
99+
impl Default for Nanoseconds {
100+
#[inline]
101+
fn default() -> Self {
102+
Self::ZERO
103+
}
104+
}
105+
106+
define_valid_range_type! {
107+
pub struct NonZeroU8Inner(u8 as u8 in 1..=0xff);
108+
pub struct NonZeroU16Inner(u16 as u16 in 1..=0xff_ff);
109+
pub struct NonZeroU32Inner(u32 as u32 in 1..=0xffff_ffff);
110+
pub struct NonZeroU64Inner(u64 as u64 in 1..=0xffffffff_ffffffff);
111+
pub struct NonZeroU128Inner(u128 as u128 in 1..=0xffffffffffffffff_ffffffffffffffff);
112+
113+
pub struct NonZeroI8Inner(i8 as u8 in 1..=0xff);
114+
pub struct NonZeroI16Inner(i16 as u16 in 1..=0xff_ff);
115+
pub struct NonZeroI32Inner(i32 as u32 in 1..=0xffff_ffff);
116+
pub struct NonZeroI64Inner(i64 as u64 in 1..=0xffffffff_ffffffff);
117+
pub struct NonZeroI128Inner(i128 as u128 in 1..=0xffffffffffffffff_ffffffffffffffff);
118+
}
119+
120+
#[cfg(target_pointer_width = "16")]
121+
define_valid_range_type! {
122+
pub struct UsizeNoHighBit(usize as usize in 0..=0x7fff);
123+
pub struct NonZeroUsizeInner(usize as usize in 1..=0xffff);
124+
pub struct NonZeroIsizeInner(isize as usize in 1..=0xffff);
125+
}
126+
#[cfg(target_pointer_width = "32")]
127+
define_valid_range_type! {
128+
pub struct UsizeNoHighBit(usize as usize in 0..=0x7fff_ffff);
129+
pub struct NonZeroUsizeInner(usize as usize in 1..=0xffff_ffff);
130+
pub struct NonZeroIsizeInner(isize as usize in 1..=0xffff_ffff);
131+
}
132+
#[cfg(target_pointer_width = "64")]
133+
define_valid_range_type! {
134+
pub struct UsizeNoHighBit(usize as usize in 0..=0x7fff_ffff_ffff_ffff);
135+
pub struct NonZeroUsizeInner(usize as usize in 1..=0xffff_ffff_ffff_ffff);
136+
pub struct NonZeroIsizeInner(isize as usize in 1..=0xffff_ffff_ffff_ffff);
137+
}
138+
139+
define_valid_range_type! {
140+
pub struct U32NotAllOnes(u32 as u32 in 0..=0xffff_fffe);
141+
pub struct I32NotAllOnes(i32 as u32 in 0..=0xffff_fffe);
142+
143+
pub struct U64NotAllOnes(u64 as u64 in 0..=0xffff_ffff_ffff_fffe);
144+
pub struct I64NotAllOnes(i64 as u64 in 0..=0xffff_ffff_ffff_fffe);
145+
}
146+
147+
pub trait NotAllOnesHelper {
148+
type Type;
149+
}
150+
pub type NotAllOnes<T> = <T as NotAllOnesHelper>::Type;
151+
impl NotAllOnesHelper for u32 {
152+
type Type = U32NotAllOnes;
153+
}
154+
impl NotAllOnesHelper for i32 {
155+
type Type = I32NotAllOnes;
156+
}
157+
impl NotAllOnesHelper for u64 {
158+
type Type = U64NotAllOnes;
159+
}
160+
impl NotAllOnesHelper for i64 {
161+
type Type = I64NotAllOnes;
162+
}

core/src/num/nonzero.rs

+1-30
Original file line numberDiff line numberDiff line change
@@ -37,41 +37,12 @@ pub unsafe trait ZeroablePrimitive: Sized + Copy + private::Sealed {
3737
macro_rules! impl_zeroable_primitive {
3838
($($NonZeroInner:ident ( $primitive:ty )),+ $(,)?) => {
3939
mod private {
40-
use super::*;
41-
4240
#[unstable(
4341
feature = "nonzero_internals",
4442
reason = "implementation detail which may disappear or be replaced at any time",
4543
issue = "none"
4644
)]
4745
pub trait Sealed {}
48-
49-
$(
50-
// This inner type is never shown directly, so intentionally does not have Debug
51-
#[expect(missing_debug_implementations)]
52-
// Since this struct is non-generic and derives Copy,
53-
// the derived Clone is `*self` and thus doesn't field-project.
54-
#[derive(Clone, Copy)]
55-
#[repr(transparent)]
56-
#[rustc_layout_scalar_valid_range_start(1)]
57-
#[rustc_nonnull_optimization_guaranteed]
58-
#[unstable(
59-
feature = "nonzero_internals",
60-
reason = "implementation detail which may disappear or be replaced at any time",
61-
issue = "none"
62-
)]
63-
pub struct $NonZeroInner($primitive);
64-
65-
// This is required to allow matching a constant. We don't get it from a derive
66-
// because the derived `PartialEq` would do a field projection, which is banned
67-
// by <https://github.com/rust-lang/compiler-team/issues/807>.
68-
#[unstable(
69-
feature = "nonzero_internals",
70-
reason = "implementation detail which may disappear or be replaced at any time",
71-
issue = "none"
72-
)]
73-
impl StructuralPartialEq for $NonZeroInner {}
74-
)+
7546
}
7647

7748
$(
@@ -88,7 +59,7 @@ macro_rules! impl_zeroable_primitive {
8859
issue = "none"
8960
)]
9061
unsafe impl ZeroablePrimitive for $primitive {
91-
type NonZeroInner = private::$NonZeroInner;
62+
type NonZeroInner = super::niche_types::$NonZeroInner;
9263
}
9364
)+
9465
};

0 commit comments

Comments
 (0)