From 5e6184cdb7685cbfceb2cfe80357715d0d04ec53 Mon Sep 17 00:00:00 2001 From: Ralf Jung <post@ralfj.de> Date: Thu, 18 Apr 2024 00:33:46 +0200 Subject: [PATCH 1/4] interpret/binary_int_op: avoid dropping to raw ints until we determined the sign --- .../rustc_const_eval/src/interpret/operand.rs | 24 +++- .../src/interpret/operator.rs | 113 +++++++++--------- .../rustc_middle/src/mir/interpret/value.rs | 22 ++-- compiler/rustc_middle/src/ty/consts/int.rs | 31 +++-- 4 files changed, 116 insertions(+), 74 deletions(-) diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index c120154ce2a70..8283c29207fcc 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -6,9 +6,10 @@ use std::assert_matches::assert_matches; use either::{Either, Left, Right}; use rustc_hir::def::Namespace; +use rustc_middle::mir::interpret::ScalarSizeMismatch; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter}; -use rustc_middle::ty::{ConstInt, Ty, TyCtxt}; +use rustc_middle::ty::{ConstInt, ScalarInt, Ty, TyCtxt}; use rustc_middle::{mir, ty}; use rustc_target::abi::{self, Abi, HasDataLayout, Size}; @@ -210,6 +211,12 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { ImmTy { imm: Immediate::Uninit, layout } } + #[inline] + pub fn from_scalar_int(s: ScalarInt, layout: TyAndLayout<'tcx>) -> Self { + assert_eq!(s.size(), layout.size); + Self::from_scalar(Scalar::from(s), layout) + } + #[inline] pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> { Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout)) @@ -223,7 +230,6 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { pub fn try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self> { Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout)) } - #[inline] pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self { Self::from_scalar(Scalar::from_int(i, layout.size), layout) @@ -242,6 +248,20 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { Self::from_scalar(Scalar::from_i8(c as i8), layout) } + /// Return the immediate as a `ScalarInt`. Ensures that it has the size that the layout of the + /// immediate indcates. + #[inline] + pub fn to_scalar_int(&self) -> InterpResult<'tcx, ScalarInt> { + let s = self.to_scalar().to_scalar_int()?; + if s.size() != self.layout.size { + throw_ub!(ScalarSizeMismatch(ScalarSizeMismatch { + target_size: self.layout.size.bytes(), + data_size: s.size().bytes(), + })); + } + Ok(s) + } + #[inline] pub fn to_const_int(self) -> ConstInt { assert!(self.layout.ty.is_integral()); diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs index 5665bb4999f59..1a126d1fbb096 100644 --- a/compiler/rustc_const_eval/src/interpret/operator.rs +++ b/compiler/rustc_const_eval/src/interpret/operator.rs @@ -2,7 +2,7 @@ use rustc_apfloat::{Float, FloatConvert}; use rustc_middle::mir; use rustc_middle::mir::interpret::{InterpResult, Scalar}; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; -use rustc_middle::ty::{self, FloatTy, Ty}; +use rustc_middle::ty::{self, FloatTy, ScalarInt, Ty}; use rustc_span::symbol::sym; use rustc_target::abi::Abi; @@ -146,14 +146,20 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { fn binary_int_op( &self, bin_op: mir::BinOp, - // passing in raw bits - l: u128, - left_layout: TyAndLayout<'tcx>, - r: u128, - right_layout: TyAndLayout<'tcx>, + left: &ImmTy<'tcx, M::Provenance>, + right: &ImmTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> { use rustc_middle::mir::BinOp::*; + // This checks the size, so that we can just assert it below. + let l = left.to_scalar_int()?; + let r = right.to_scalar_int()?; + // Prepare to convert the values to signed or unsigned form. + let l_signed = || l.try_to_int(left.layout.size).unwrap(); + let l_unsigned = || l.try_to_uint(left.layout.size).unwrap(); + let r_signed = || r.try_to_int(right.layout.size).unwrap(); + let r_unsigned = || r.try_to_uint(right.layout.size).unwrap(); + let throw_ub_on_overflow = match bin_op { AddUnchecked => Some(sym::unchecked_add), SubUnchecked => Some(sym::unchecked_sub), @@ -165,69 +171,72 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Shift ops can have an RHS with a different numeric type. if matches!(bin_op, Shl | ShlUnchecked | Shr | ShrUnchecked) { - let size = left_layout.size.bits(); + let size = left.layout.size.bits(); // The shift offset is implicitly masked to the type size. (This is the one MIR operator // that does *not* directly map to a single LLVM operation.) Compute how much we // actually shift and whether there was an overflow due to shifting too much. - let (shift_amount, overflow) = if right_layout.abi.is_signed() { - let shift_amount = self.sign_extend(r, right_layout) as i128; + let (shift_amount, overflow) = if right.layout.abi.is_signed() { + let shift_amount = r_signed(); let overflow = shift_amount < 0 || shift_amount >= i128::from(size); + // Deliberately wrapping `as` casts: shift_amount *can* be negative, but the result + // of the `as` will be equal modulo `size` (since it is a power of two). let masked_amount = (shift_amount as u128) % u128::from(size); - debug_assert_eq!(overflow, shift_amount != (masked_amount as i128)); + assert_eq!(overflow, shift_amount != (masked_amount as i128)); (masked_amount, overflow) } else { - let shift_amount = r; + let shift_amount = r_unsigned(); let masked_amount = shift_amount % u128::from(size); (masked_amount, shift_amount != masked_amount) }; let shift_amount = u32::try_from(shift_amount).unwrap(); // we masked so this will always fit // Compute the shifted result. - let result = if left_layout.abi.is_signed() { - let l = self.sign_extend(l, left_layout) as i128; + let result = if left.layout.abi.is_signed() { + let l = l_signed(); let result = match bin_op { Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(), Shr | ShrUnchecked => l.checked_shr(shift_amount).unwrap(), _ => bug!(), }; - result as u128 + ScalarInt::truncate_from_int(result, left.layout.size).0 } else { - match bin_op { + let l = l_unsigned(); + let result = match bin_op { Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(), Shr | ShrUnchecked => l.checked_shr(shift_amount).unwrap(), _ => bug!(), - } + }; + ScalarInt::truncate_from_uint(result, left.layout.size).0 }; - let truncated = self.truncate(result, left_layout); if overflow && let Some(intrinsic_name) = throw_ub_on_overflow { throw_ub_custom!( fluent::const_eval_overflow_shift, - val = if right_layout.abi.is_signed() { - (self.sign_extend(r, right_layout) as i128).to_string() + val = if right.layout.abi.is_signed() { + r_signed().to_string() } else { - r.to_string() + r_unsigned().to_string() }, name = intrinsic_name ); } - return Ok((ImmTy::from_uint(truncated, left_layout), overflow)); + return Ok((ImmTy::from_scalar_int(result, left.layout), overflow)); } // For the remaining ops, the types must be the same on both sides - if left_layout.ty != right_layout.ty { + if left.layout.ty != right.layout.ty { span_bug!( self.cur_span(), "invalid asymmetric binary op {bin_op:?}: {l:?} ({l_ty}), {r:?} ({r_ty})", - l_ty = left_layout.ty, - r_ty = right_layout.ty, + l_ty = left.layout.ty, + r_ty = right.layout.ty, ) } - let size = left_layout.size; + let size = left.layout.size; // Operations that need special treatment for signed integers - if left_layout.abi.is_signed() { + if left.layout.abi.is_signed() { let op: Option<fn(&i128, &i128) -> bool> = match bin_op { Lt => Some(i128::lt), Le => Some(i128::le), @@ -236,18 +245,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { _ => None, }; if let Some(op) = op { - let l = self.sign_extend(l, left_layout) as i128; - let r = self.sign_extend(r, right_layout) as i128; - return Ok((ImmTy::from_bool(op(&l, &r), *self.tcx), false)); + return Ok((ImmTy::from_bool(op(&l_signed(), &r_signed()), *self.tcx), false)); } if bin_op == Cmp { - let l = self.sign_extend(l, left_layout) as i128; - let r = self.sign_extend(r, right_layout) as i128; - return Ok(self.three_way_compare(l, r)); + return Ok(self.three_way_compare(l_signed(), r_signed())); } let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op { - Div if r == 0 => throw_ub!(DivisionByZero), - Rem if r == 0 => throw_ub!(RemainderByZero), + Div if r.is_null() => throw_ub!(DivisionByZero), + Rem if r.is_null() => throw_ub!(RemainderByZero), Div => Some(i128::overflowing_div), Rem => Some(i128::overflowing_rem), Add | AddUnchecked => Some(i128::overflowing_add), @@ -256,8 +261,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { _ => None, }; if let Some(op) = op { - let l = self.sign_extend(l, left_layout) as i128; - let r = self.sign_extend(r, right_layout) as i128; + let l = l_signed(); + let r = r_signed(); // We need a special check for overflowing Rem and Div since they are *UB* // on overflow, which can happen with "int_min $OP -1". @@ -272,17 +277,19 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } let (result, oflo) = op(l, r); - // This may be out-of-bounds for the result type, so we have to truncate ourselves. + // This may be out-of-bounds for the result type, so we have to truncate. // If that truncation loses any information, we have an overflow. - let result = result as u128; - let truncated = self.truncate(result, left_layout); - let overflow = oflo || self.sign_extend(truncated, left_layout) != result; + let (result, lossy) = ScalarInt::truncate_from_int(result, left.layout.size); + let overflow = oflo || lossy; if overflow && let Some(intrinsic_name) = throw_ub_on_overflow { throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name); } - return Ok((ImmTy::from_uint(truncated, left_layout), overflow)); + return Ok((ImmTy::from_scalar_int(result, left.layout), overflow)); } } + // From here on it's okay to treat everything as unsigned. + let l = l_unsigned(); + let r = r_unsigned(); if bin_op == Cmp { return Ok(self.three_way_compare(l, r)); @@ -297,12 +304,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Gt => ImmTy::from_bool(l > r, *self.tcx), Ge => ImmTy::from_bool(l >= r, *self.tcx), - BitOr => ImmTy::from_uint(l | r, left_layout), - BitAnd => ImmTy::from_uint(l & r, left_layout), - BitXor => ImmTy::from_uint(l ^ r, left_layout), + BitOr => ImmTy::from_uint(l | r, left.layout), + BitAnd => ImmTy::from_uint(l & r, left.layout), + BitXor => ImmTy::from_uint(l ^ r, left.layout), Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Rem | Div => { - assert!(!left_layout.abi.is_signed()); + assert!(!left.layout.abi.is_signed()); let op: fn(u128, u128) -> (u128, bool) = match bin_op { Add | AddUnchecked => u128::overflowing_add, Sub | SubUnchecked => u128::overflowing_sub, @@ -316,21 +323,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let (result, oflo) = op(l, r); // Truncate to target type. // If that truncation loses any information, we have an overflow. - let truncated = self.truncate(result, left_layout); - let overflow = oflo || truncated != result; + let (result, lossy) = ScalarInt::truncate_from_uint(result, left.layout.size); + let overflow = oflo || lossy; if overflow && let Some(intrinsic_name) = throw_ub_on_overflow { throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name); } - return Ok((ImmTy::from_uint(truncated, left_layout), overflow)); + return Ok((ImmTy::from_scalar_int(result, left.layout), overflow)); } _ => span_bug!( self.cur_span(), "invalid binary op {:?}: {:?}, {:?} (both {})", bin_op, - l, - r, - right_layout.ty, + left, + right, + right.layout.ty, ), }; @@ -427,9 +434,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { right.layout.ty ); - let l = left.to_scalar().to_bits(left.layout.size)?; - let r = right.to_scalar().to_bits(right.layout.size)?; - self.binary_int_op(bin_op, l, left.layout, r, right.layout) + self.binary_int_op(bin_op, left, right) } _ if left.layout.ty.is_any_ptr() => { // The RHS type must be a `pointer` *or an integer type* (for `Offset`). diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs index 9f9433e483bc0..38ddd34777a9e 100644 --- a/compiler/rustc_middle/src/mir/interpret/value.rs +++ b/compiler/rustc_middle/src/mir/interpret/value.rs @@ -300,6 +300,11 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> { } } + #[inline(always)] + pub fn to_scalar_int(self) -> InterpResult<'tcx, ScalarInt> { + self.try_to_int().map_err(|_| err_unsup!(ReadPointerAsInt(None)).into()) + } + #[inline(always)] #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980) pub fn assert_int(self) -> ScalarInt { @@ -311,16 +316,13 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> { #[inline] pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> { assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST"); - self.try_to_int() - .map_err(|_| err_unsup!(ReadPointerAsInt(None)))? - .to_bits(target_size) - .map_err(|size| { - err_ub!(ScalarSizeMismatch(ScalarSizeMismatch { - target_size: target_size.bytes(), - data_size: size.bytes(), - })) - .into() - }) + self.to_scalar_int()?.to_bits(target_size).map_err(|size| { + err_ub!(ScalarSizeMismatch(ScalarSizeMismatch { + target_size: target_size.bytes(), + data_size: size.bytes(), + })) + .into() + }) } #[inline(always)] diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs index 71d7dfd8b01ef..046ced36b64a4 100644 --- a/compiler/rustc_middle/src/ty/consts/int.rs +++ b/compiler/rustc_middle/src/ty/consts/int.rs @@ -167,9 +167,12 @@ impl<D: Decoder> Decodable<D> for ScalarInt { impl ScalarInt { pub const TRUE: ScalarInt = ScalarInt { data: 1_u128, size: NonZero::new(1).unwrap() }; - pub const FALSE: ScalarInt = ScalarInt { data: 0_u128, size: NonZero::new(1).unwrap() }; + fn raw(data: u128, size: Size) -> Self { + Self { data, size: NonZero::new(size.bytes() as u8).unwrap() } + } + #[inline] pub fn size(self) -> Size { Size::from_bytes(self.size.get()) @@ -196,7 +199,7 @@ impl ScalarInt { #[inline] pub fn null(size: Size) -> Self { - Self { data: 0, size: NonZero::new(size.bytes() as u8).unwrap() } + Self::raw(0, size) } #[inline] @@ -207,11 +210,15 @@ impl ScalarInt { #[inline] pub fn try_from_uint(i: impl Into<u128>, size: Size) -> Option<Self> { let data = i.into(); - if size.truncate(data) == data { - Some(Self { data, size: NonZero::new(size.bytes() as u8).unwrap() }) - } else { - None - } + if size.truncate(data) == data { Some(Self::raw(data, size)) } else { None } + } + + /// Returns the truncated result, and whether truncation changed the value. + #[inline] + pub fn truncate_from_uint(i: impl Into<u128>, size: Size) -> (Self, bool) { + let data = i.into(); + let r = Self::raw(size.truncate(data), size); + (r, r.data != data) } #[inline] @@ -220,12 +227,20 @@ impl ScalarInt { // `into` performed sign extension, we have to truncate let truncated = size.truncate(i as u128); if size.sign_extend(truncated) as i128 == i { - Some(Self { data: truncated, size: NonZero::new(size.bytes() as u8).unwrap() }) + Some(Self::raw(truncated, size)) } else { None } } + /// Returns the truncated result, and whether truncation changed the value. + #[inline] + pub fn truncate_from_int(i: impl Into<i128>, size: Size) -> (Self, bool) { + let data = i.into(); + let r = Self::raw(size.truncate(data as u128), size); + (r, size.sign_extend(r.data) as i128 != data) + } + #[inline] pub fn try_from_target_usize(i: impl Into<u128>, tcx: TyCtxt<'_>) -> Option<Self> { Self::try_from_uint(i, tcx.data_layout.pointer_size) From 28693258ae49ae64aab4937e303fa60a8d6efe05 Mon Sep 17 00:00:00 2001 From: Ralf Jung <post@ralfj.de> Date: Thu, 18 Apr 2024 08:38:37 +0200 Subject: [PATCH 2/4] ScalarInt: add methods to assert being a (u)int of given size --- .../rustc_codegen_cranelift/src/constant.rs | 41 ++++++------ .../src/value_and_place.rs | 4 +- .../src/interpret/discriminant.rs | 3 +- .../src/interpret/operator.rs | 8 +-- compiler/rustc_middle/src/mir/consts.rs | 6 +- .../rustc_middle/src/mir/interpret/value.rs | 4 +- compiler/rustc_middle/src/ty/consts.rs | 2 +- compiler/rustc_middle/src/ty/consts/int.rs | 67 +++++++++++-------- .../src/known_panics_lint.rs | 2 +- .../rustc_mir_transform/src/match_branches.rs | 3 +- .../rustc_mir_transform/src/promote_consts.rs | 6 +- compiler/rustc_transmute/src/layout/tree.rs | 2 +- 12 files changed, 77 insertions(+), 71 deletions(-) diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs index 635ed6c8e88b1..cb05c17ec2a88 100644 --- a/compiler/rustc_codegen_cranelift/src/constant.rs +++ b/compiler/rustc_codegen_cranelift/src/constant.rs @@ -110,7 +110,7 @@ pub(crate) fn codegen_const_value<'tcx>( if fx.clif_type(layout.ty).is_some() { return CValue::const_val(fx, layout, int); } else { - let raw_val = int.size().truncate(int.to_bits(int.size()).unwrap()); + let raw_val = int.size().truncate(int.assert_bits(int.size())); let val = match int.size().bytes() { 1 => fx.bcx.ins().iconst(types::I8, raw_val as i64), 2 => fx.bcx.ins().iconst(types::I16, raw_val as i64), @@ -491,27 +491,24 @@ pub(crate) fn mir_operand_get_const_val<'tcx>( return None; } let scalar_int = mir_operand_get_const_val(fx, operand)?; - let scalar_int = match fx - .layout_of(*ty) - .size - .cmp(&scalar_int.size()) - { - Ordering::Equal => scalar_int, - Ordering::Less => match ty.kind() { - ty::Uint(_) => ScalarInt::try_from_uint( - scalar_int.try_to_uint(scalar_int.size()).unwrap(), - fx.layout_of(*ty).size, - ) - .unwrap(), - ty::Int(_) => ScalarInt::try_from_int( - scalar_int.try_to_int(scalar_int.size()).unwrap(), - fx.layout_of(*ty).size, - ) - .unwrap(), - _ => unreachable!(), - }, - Ordering::Greater => return None, - }; + let scalar_int = + match fx.layout_of(*ty).size.cmp(&scalar_int.size()) { + Ordering::Equal => scalar_int, + Ordering::Less => match ty.kind() { + ty::Uint(_) => ScalarInt::try_from_uint( + scalar_int.assert_uint(scalar_int.size()), + fx.layout_of(*ty).size, + ) + .unwrap(), + ty::Int(_) => ScalarInt::try_from_int( + scalar_int.assert_int(scalar_int.size()), + fx.layout_of(*ty).size, + ) + .unwrap(), + _ => unreachable!(), + }, + Ordering::Greater => return None, + }; computed_scalar_int = Some(scalar_int); } Rvalue::Use(operand) => { diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs index fc5b88a54fe5a..ad863903ceecf 100644 --- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs +++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs @@ -326,7 +326,7 @@ impl<'tcx> CValue<'tcx> { let val = match layout.ty.kind() { ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => { - let const_val = const_val.to_bits(layout.size).unwrap(); + let const_val = const_val.assert_bits(layout.size); let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64); let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64); fx.bcx.ins().iconcat(lsb, msb) @@ -338,7 +338,7 @@ impl<'tcx> CValue<'tcx> { | ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..) => { - let raw_val = const_val.size().truncate(const_val.to_bits(layout.size).unwrap()); + let raw_val = const_val.size().truncate(const_val.assert_bits(layout.size)); fx.bcx.ins().iconst(clif_ty, raw_val as i64) } ty::Float(FloatTy::F32) => { diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs index 704f597cfdb6b..caacc6f57d3c2 100644 --- a/compiler/rustc_const_eval/src/interpret/discriminant.rs +++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs @@ -295,8 +295,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { &niche_start_val, )? .to_scalar() - .try_to_int() - .unwrap(); + .assert_int(); Ok(Some((tag, tag_field))) } } diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs index 1a126d1fbb096..9af755e40de87 100644 --- a/compiler/rustc_const_eval/src/interpret/operator.rs +++ b/compiler/rustc_const_eval/src/interpret/operator.rs @@ -155,10 +155,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let l = left.to_scalar_int()?; let r = right.to_scalar_int()?; // Prepare to convert the values to signed or unsigned form. - let l_signed = || l.try_to_int(left.layout.size).unwrap(); - let l_unsigned = || l.try_to_uint(left.layout.size).unwrap(); - let r_signed = || r.try_to_int(right.layout.size).unwrap(); - let r_unsigned = || r.try_to_uint(right.layout.size).unwrap(); + let l_signed = || l.assert_int(left.layout.size); + let l_unsigned = || l.assert_uint(left.layout.size); + let r_signed = || r.assert_int(right.layout.size); + let r_unsigned = || r.assert_uint(right.layout.size); let throw_ub_on_overflow = match bin_op { AddUnchecked => Some(sym::unchecked_add), diff --git a/compiler/rustc_middle/src/mir/consts.rs b/compiler/rustc_middle/src/mir/consts.rs index 155af06201273..0af012e90cbae 100644 --- a/compiler/rustc_middle/src/mir/consts.rs +++ b/compiler/rustc_middle/src/mir/consts.rs @@ -87,7 +87,7 @@ impl<'tcx> ConstValue<'tcx> { } pub fn try_to_bits(&self, size: Size) -> Option<u128> { - self.try_to_scalar_int()?.to_bits(size).ok() + self.try_to_scalar_int()?.try_to_bits(size).ok() } pub fn try_to_bool(&self) -> Option<bool> { @@ -260,7 +260,7 @@ impl<'tcx> Const<'tcx> { #[inline] pub fn try_to_bits(self, size: Size) -> Option<u128> { - self.try_to_scalar_int()?.to_bits(size).ok() + self.try_to_scalar_int()?.try_to_bits(size).ok() } #[inline] @@ -334,7 +334,7 @@ impl<'tcx> Const<'tcx> { let int = self.try_eval_scalar_int(tcx, param_env)?; let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(self.ty())).ok()?.size; - int.to_bits(size).ok() + int.try_to_bits(size).ok() } /// Panics if the value cannot be evaluated or doesn't contain a valid integer of the given type. diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs index 38ddd34777a9e..22779b388308d 100644 --- a/compiler/rustc_middle/src/mir/interpret/value.rs +++ b/compiler/rustc_middle/src/mir/interpret/value.rs @@ -236,7 +236,7 @@ impl<Prov> Scalar<Prov> { ) -> Result<Either<u128, Pointer<Prov>>, ScalarSizeMismatch> { assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST"); Ok(match self { - Scalar::Int(int) => Left(int.to_bits(target_size).map_err(|size| { + Scalar::Int(int) => Left(int.try_to_bits(target_size).map_err(|size| { ScalarSizeMismatch { target_size: target_size.bytes(), data_size: size.bytes() } })?), Scalar::Ptr(ptr, sz) => { @@ -316,7 +316,7 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> { #[inline] pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> { assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST"); - self.to_scalar_int()?.to_bits(target_size).map_err(|size| { + self.to_scalar_int()?.try_to_bits(target_size).map_err(|size| { err_ub!(ScalarSizeMismatch(ScalarSizeMismatch { target_size: target_size.bytes(), data_size: size.bytes(), diff --git a/compiler/rustc_middle/src/ty/consts.rs b/compiler/rustc_middle/src/ty/consts.rs index 49b806b8369f8..6d2d91dc9ab47 100644 --- a/compiler/rustc_middle/src/ty/consts.rs +++ b/compiler/rustc_middle/src/ty/consts.rs @@ -406,7 +406,7 @@ impl<'tcx> Const<'tcx> { let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(self.ty())).ok()?.size; // if `ty` does not depend on generic parameters, use an empty param_env - int.to_bits(size).ok() + int.try_to_bits(size).ok() } #[inline] diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs index 046ced36b64a4..2bf2341cbc46c 100644 --- a/compiler/rustc_middle/src/ty/consts/int.rs +++ b/compiler/rustc_middle/src/ty/consts/int.rs @@ -247,14 +247,7 @@ impl ScalarInt { } #[inline] - pub fn assert_bits(self, target_size: Size) -> u128 { - self.to_bits(target_size).unwrap_or_else(|size| { - bug!("expected int of size {}, but got size {}", target_size.bytes(), size.bytes()) - }) - } - - #[inline] - pub fn to_bits(self, target_size: Size) -> Result<u128, Size> { + pub fn try_to_bits(self, target_size: Size) -> Result<u128, Size> { assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST"); if target_size.bytes() == u64::from(self.size.get()) { self.check_data(); @@ -264,16 +257,28 @@ impl ScalarInt { } } + #[inline] + pub fn assert_bits(self, target_size: Size) -> u128 { + self.try_to_bits(target_size).unwrap_or_else(|size| { + bug!("expected int of size {}, but got size {}", target_size.bytes(), size.bytes()) + }) + } + /// Tries to convert the `ScalarInt` to an unsigned integer of the given size. /// Fails if the size of the `ScalarInt` is not equal to `size` and returns the /// `ScalarInt`s size in that case. #[inline] pub fn try_to_uint(self, size: Size) -> Result<u128, Size> { - self.to_bits(size) + self.try_to_bits(size) + } + + #[inline] + pub fn assert_uint(self, size: Size) -> u128 { + self.assert_bits(size) } // Tries to convert the `ScalarInt` to `u8`. Fails if the `size` of the `ScalarInt` - // in not equal to `Size { raw: 1 }` and returns the `size` value of the `ScalarInt` in + // in not equal to 1 byte and returns the `size` value of the `ScalarInt` in // that case. #[inline] pub fn try_to_u8(self) -> Result<u8, Size> { @@ -281,7 +286,7 @@ impl ScalarInt { } /// Tries to convert the `ScalarInt` to `u16`. Fails if the size of the `ScalarInt` - /// in not equal to `Size { raw: 2 }` and returns the `size` value of the `ScalarInt` in + /// in not equal to 2 bytes and returns the `size` value of the `ScalarInt` in /// that case. #[inline] pub fn try_to_u16(self) -> Result<u16, Size> { @@ -289,7 +294,7 @@ impl ScalarInt { } /// Tries to convert the `ScalarInt` to `u32`. Fails if the `size` of the `ScalarInt` - /// in not equal to `Size { raw: 4 }` and returns the `size` value of the `ScalarInt` in + /// in not equal to 4 bytes and returns the `size` value of the `ScalarInt` in /// that case. #[inline] pub fn try_to_u32(self) -> Result<u32, Size> { @@ -297,7 +302,7 @@ impl ScalarInt { } /// Tries to convert the `ScalarInt` to `u64`. Fails if the `size` of the `ScalarInt` - /// in not equal to `Size { raw: 8 }` and returns the `size` value of the `ScalarInt` in + /// in not equal to 8 bytes and returns the `size` value of the `ScalarInt` in /// that case. #[inline] pub fn try_to_u64(self) -> Result<u64, Size> { @@ -305,7 +310,7 @@ impl ScalarInt { } /// Tries to convert the `ScalarInt` to `u128`. Fails if the `size` of the `ScalarInt` - /// in not equal to `Size { raw: 16 }` and returns the `size` value of the `ScalarInt` in + /// in not equal to 16 bytes and returns the `size` value of the `ScalarInt` in /// that case. #[inline] pub fn try_to_u128(self) -> Result<u128, Size> { @@ -318,7 +323,7 @@ impl ScalarInt { } // Tries to convert the `ScalarInt` to `bool`. Fails if the `size` of the `ScalarInt` - // in not equal to `Size { raw: 1 }` or if the value is not 0 or 1 and returns the `size` + // in not equal to 1 byte or if the value is not 0 or 1 and returns the `size` // value of the `ScalarInt` in that case. #[inline] pub fn try_to_bool(self) -> Result<bool, Size> { @@ -334,40 +339,46 @@ impl ScalarInt { /// `ScalarInt`s size in that case. #[inline] pub fn try_to_int(self, size: Size) -> Result<i128, Size> { - let b = self.to_bits(size)?; + let b = self.try_to_bits(size)?; Ok(size.sign_extend(b) as i128) } + #[inline] + pub fn assert_int(self, size: Size) -> i128 { + let b = self.assert_bits(size); + size.sign_extend(b) as i128 + } + /// Tries to convert the `ScalarInt` to i8. - /// Fails if the size of the `ScalarInt` is not equal to `Size { raw: 1 }` + /// Fails if the size of the `ScalarInt` is not equal to 1 byte /// and returns the `ScalarInt`s size in that case. pub fn try_to_i8(self) -> Result<i8, Size> { self.try_to_int(Size::from_bits(8)).map(|v| i8::try_from(v).unwrap()) } /// Tries to convert the `ScalarInt` to i16. - /// Fails if the size of the `ScalarInt` is not equal to `Size { raw: 2 }` + /// Fails if the size of the `ScalarInt` is not equal to 2 bytes /// and returns the `ScalarInt`s size in that case. pub fn try_to_i16(self) -> Result<i16, Size> { self.try_to_int(Size::from_bits(16)).map(|v| i16::try_from(v).unwrap()) } /// Tries to convert the `ScalarInt` to i32. - /// Fails if the size of the `ScalarInt` is not equal to `Size { raw: 4 }` + /// Fails if the size of the `ScalarInt` is not equal to 4 bytes /// and returns the `ScalarInt`s size in that case. pub fn try_to_i32(self) -> Result<i32, Size> { self.try_to_int(Size::from_bits(32)).map(|v| i32::try_from(v).unwrap()) } /// Tries to convert the `ScalarInt` to i64. - /// Fails if the size of the `ScalarInt` is not equal to `Size { raw: 8 }` + /// Fails if the size of the `ScalarInt` is not equal to 8 bytes /// and returns the `ScalarInt`s size in that case. pub fn try_to_i64(self) -> Result<i64, Size> { self.try_to_int(Size::from_bits(64)).map(|v| i64::try_from(v).unwrap()) } /// Tries to convert the `ScalarInt` to i128. - /// Fails if the size of the `ScalarInt` is not equal to `Size { raw: 16 }` + /// Fails if the size of the `ScalarInt` is not equal to 16 bytes /// and returns the `ScalarInt`s size in that case. pub fn try_to_i128(self) -> Result<i128, Size> { self.try_to_int(Size::from_bits(128)) @@ -381,7 +392,7 @@ impl ScalarInt { #[inline] pub fn try_to_float<F: Float>(self) -> Result<F, Size> { // Going through `to_uint` to check size and truncation. - Ok(F::from_bits(self.to_bits(Size::from_bits(F::BITS))?)) + Ok(F::from_bits(self.try_to_bits(Size::from_bits(F::BITS))?)) } #[inline] @@ -430,7 +441,7 @@ macro_rules! try_from { fn try_from(int: ScalarInt) -> Result<Self, Size> { // The `unwrap` cannot fail because to_bits (if it succeeds) // is guaranteed to return a value that fits into the size. - int.to_bits(Size::from_bytes(std::mem::size_of::<$ty>())) + int.try_to_bits(Size::from_bytes(std::mem::size_of::<$ty>())) .map(|u| u.try_into().unwrap()) } } @@ -465,7 +476,7 @@ impl TryFrom<ScalarInt> for char { #[inline] fn try_from(int: ScalarInt) -> Result<Self, Self::Error> { - let Ok(bits) = int.to_bits(Size::from_bytes(std::mem::size_of::<char>())) else { + let Ok(bits) = int.try_to_bits(Size::from_bytes(std::mem::size_of::<char>())) else { return Err(CharTryFromScalarInt); }; match char::from_u32(bits.try_into().unwrap()) { @@ -487,7 +498,7 @@ impl TryFrom<ScalarInt> for Half { type Error = Size; #[inline] fn try_from(int: ScalarInt) -> Result<Self, Size> { - int.to_bits(Size::from_bytes(2)).map(Self::from_bits) + int.try_to_bits(Size::from_bytes(2)).map(Self::from_bits) } } @@ -503,7 +514,7 @@ impl TryFrom<ScalarInt> for Single { type Error = Size; #[inline] fn try_from(int: ScalarInt) -> Result<Self, Size> { - int.to_bits(Size::from_bytes(4)).map(Self::from_bits) + int.try_to_bits(Size::from_bytes(4)).map(Self::from_bits) } } @@ -519,7 +530,7 @@ impl TryFrom<ScalarInt> for Double { type Error = Size; #[inline] fn try_from(int: ScalarInt) -> Result<Self, Size> { - int.to_bits(Size::from_bytes(8)).map(Self::from_bits) + int.try_to_bits(Size::from_bytes(8)).map(Self::from_bits) } } @@ -535,7 +546,7 @@ impl TryFrom<ScalarInt> for Quad { type Error = Size; #[inline] fn try_from(int: ScalarInt) -> Result<Self, Size> { - int.to_bits(Size::from_bytes(16)).map(Self::from_bits) + int.try_to_bits(Size::from_bytes(16)).map(Self::from_bits) } } diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs index 2218154ea5e78..2744026a7c9de 100644 --- a/compiler/rustc_mir_transform/src/known_panics_lint.rs +++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs @@ -796,7 +796,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> { if let Some(ref value) = self.eval_operand(discr) && let Some(value_const) = self.use_ecx(|this| this.ecx.read_scalar(value)) && let Ok(constant) = value_const.try_to_int() - && let Ok(constant) = constant.to_bits(constant.size()) + && let Ok(constant) = constant.try_to_bits(constant.size()) { // We managed to evaluate the discriminant, so we know we only need to visit // one target. diff --git a/compiler/rustc_mir_transform/src/match_branches.rs b/compiler/rustc_mir_transform/src/match_branches.rs index 4d9a198eeb293..a8a576e4efe4a 100644 --- a/compiler/rustc_mir_transform/src/match_branches.rs +++ b/compiler/rustc_mir_transform/src/match_branches.rs @@ -369,8 +369,7 @@ impl<'tcx> SimplifyMatch<'tcx> for SimplifyToExp { } fn int_equal(l: ScalarInt, r: impl Into<u128>, size: Size) -> bool { - l.try_to_int(l.size()).unwrap() - == ScalarInt::try_from_uint(r, size).unwrap().try_to_int(size).unwrap() + l.assert_int(l.size()) == ScalarInt::try_from_uint(r, size).unwrap().assert_int(size) } // We first compare the two branches, and then the other branches need to fulfill the same conditions. diff --git a/compiler/rustc_mir_transform/src/promote_consts.rs b/compiler/rustc_mir_transform/src/promote_consts.rs index a9d4b860b7ad8..1f4af0ec63dd9 100644 --- a/compiler/rustc_mir_transform/src/promote_consts.rs +++ b/compiler/rustc_mir_transform/src/promote_consts.rs @@ -490,14 +490,14 @@ impl<'tcx> Validator<'_, 'tcx> { } _ => None, }; - match rhs_val.map(|x| x.try_to_uint(sz).unwrap()) { + match rhs_val.map(|x| x.assert_uint(sz)) { // for the zero test, int vs uint does not matter Some(x) if x != 0 => {} // okay _ => return Err(Unpromotable), // value not known or 0 -- not okay } // Furthermore, for signed divison, we also have to exclude `int::MIN / -1`. if lhs_ty.is_signed() { - match rhs_val.map(|x| x.try_to_int(sz).unwrap()) { + match rhs_val.map(|x| x.assert_int(sz)) { Some(-1) | None => { // The RHS is -1 or unknown, so we have to be careful. // But is the LHS int::MIN? @@ -508,7 +508,7 @@ impl<'tcx> Validator<'_, 'tcx> { _ => None, }; let lhs_min = sz.signed_int_min(); - match lhs_val.map(|x| x.try_to_int(sz).unwrap()) { + match lhs_val.map(|x| x.assert_int(sz)) { Some(x) if x != lhs_min => {} // okay _ => return Err(Unpromotable), // value not known or int::MIN -- not okay } diff --git a/compiler/rustc_transmute/src/layout/tree.rs b/compiler/rustc_transmute/src/layout/tree.rs index 12c984f16032a..edd3227210b25 100644 --- a/compiler/rustc_transmute/src/layout/tree.rs +++ b/compiler/rustc_transmute/src/layout/tree.rs @@ -420,7 +420,7 @@ pub(crate) mod rustc { fn from_tag(tag: ScalarInt, tcx: TyCtxt<'tcx>) -> Self { use rustc_target::abi::Endian; let size = tag.size(); - let bits = tag.to_bits(size).unwrap(); + let bits = tag.assert_bits(size); let bytes: [u8; 16]; let bytes = match tcx.data_layout.endian { Endian::Little => { From a1049bdfe830ab90f456f2b21647848561514dc2 Mon Sep 17 00:00:00 2001 From: Ralf Jung <post@ralfj.de> Date: Thu, 18 Apr 2024 08:44:17 +0200 Subject: [PATCH 3/4] avoid PartialOrd on ScalarInt we don't know their sign so we cannot, in general, order them properly --- compiler/rustc_middle/src/thir.rs | 17 ++++++++--------- compiler/rustc_middle/src/ty/consts/int.rs | 2 +- compiler/rustc_middle/src/ty/consts/valtree.rs | 2 +- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/compiler/rustc_middle/src/thir.rs b/compiler/rustc_middle/src/thir.rs index 8763e94c8b05e..36eb99750b68f 100644 --- a/compiler/rustc_middle/src/thir.rs +++ b/compiler/rustc_middle/src/thir.rs @@ -16,7 +16,7 @@ use rustc_hir::{BindingAnnotation, ByRef, HirId, MatchSource, RangeEnd}; use rustc_index::newtype_index; use rustc_index::IndexVec; use rustc_middle::middle::region; -use rustc_middle::mir::interpret::{AllocId, Scalar}; +use rustc_middle::mir::interpret::AllocId; use rustc_middle::mir::{self, BinOp, BorrowKind, FakeReadCause, UnOp}; use rustc_middle::ty::adjustment::PointerCoercion; use rustc_middle::ty::layout::IntegerExt; @@ -1012,15 +1012,14 @@ impl<'tcx> PatRangeBoundary<'tcx> { // raw data comparisons are appropriate. E.g. `unicode-normalization` has // many ranges such as '\u{037A}'..='\u{037F}', and chars can be compared // in this way. - (Finite(mir::Const::Ty(a)), Finite(mir::Const::Ty(b))) - if matches!(ty.kind(), ty::Uint(_) | ty::Char) => - { - return Some(a.to_valtree().cmp(&b.to_valtree())); + (Finite(a), Finite(b)) if matches!(ty.kind(), ty::Uint(_) | ty::Char) => { + if let (Some(a), Some(b)) = (a.try_to_scalar_int(), b.try_to_scalar_int()) { + let sz = ty.primitive_size(tcx); + let a = a.assert_uint(sz); + let b = b.assert_uint(sz); + return Some(a.cmp(&b)); + } } - ( - Finite(mir::Const::Val(mir::ConstValue::Scalar(Scalar::Int(a)), _)), - Finite(mir::Const::Val(mir::ConstValue::Scalar(Scalar::Int(b)), _)), - ) if matches!(ty.kind(), ty::Uint(_) | ty::Char) => return Some(a.cmp(&b)), _ => {} } diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs index 2bf2341cbc46c..40ac87873a0a4 100644 --- a/compiler/rustc_middle/src/ty/consts/int.rs +++ b/compiler/rustc_middle/src/ty/consts/int.rs @@ -126,7 +126,7 @@ impl IntoDiagArg for ConstInt { /// /// This is a packed struct in order to allow this type to be optimally embedded in enums /// (like Scalar). -#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] #[repr(packed)] pub struct ScalarInt { /// The first `size` bytes of `data` are the value. diff --git a/compiler/rustc_middle/src/ty/consts/valtree.rs b/compiler/rustc_middle/src/ty/consts/valtree.rs index ffa0e89c47328..96bc5515a5695 100644 --- a/compiler/rustc_middle/src/ty/consts/valtree.rs +++ b/compiler/rustc_middle/src/ty/consts/valtree.rs @@ -3,7 +3,7 @@ use crate::mir::interpret::Scalar; use crate::ty::{self, Ty, TyCtxt}; use rustc_macros::{HashStable, TyDecodable, TyEncodable}; -#[derive(Copy, Clone, Debug, Hash, TyEncodable, TyDecodable, Eq, PartialEq, Ord, PartialOrd)] +#[derive(Copy, Clone, Debug, Hash, TyEncodable, TyDecodable, Eq, PartialEq)] #[derive(HashStable)] /// This datastructure is used to represent the value of constants used in the type system. /// From 12b540a1cce0e8b2a6570025680b5c0d7db9823c Mon Sep 17 00:00:00 2001 From: Ralf Jung <post@ralfj.de> Date: Thu, 18 Apr 2024 19:24:14 +0200 Subject: [PATCH 4/4] Revert "avoid PartialOrd on ScalarInt" This reverts commit a1049bdfe830ab90f456f2b21647848561514dc2. --- compiler/rustc_middle/src/thir.rs | 17 +++++++++-------- compiler/rustc_middle/src/ty/consts/int.rs | 2 +- compiler/rustc_middle/src/ty/consts/valtree.rs | 2 +- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/compiler/rustc_middle/src/thir.rs b/compiler/rustc_middle/src/thir.rs index 36eb99750b68f..8763e94c8b05e 100644 --- a/compiler/rustc_middle/src/thir.rs +++ b/compiler/rustc_middle/src/thir.rs @@ -16,7 +16,7 @@ use rustc_hir::{BindingAnnotation, ByRef, HirId, MatchSource, RangeEnd}; use rustc_index::newtype_index; use rustc_index::IndexVec; use rustc_middle::middle::region; -use rustc_middle::mir::interpret::AllocId; +use rustc_middle::mir::interpret::{AllocId, Scalar}; use rustc_middle::mir::{self, BinOp, BorrowKind, FakeReadCause, UnOp}; use rustc_middle::ty::adjustment::PointerCoercion; use rustc_middle::ty::layout::IntegerExt; @@ -1012,14 +1012,15 @@ impl<'tcx> PatRangeBoundary<'tcx> { // raw data comparisons are appropriate. E.g. `unicode-normalization` has // many ranges such as '\u{037A}'..='\u{037F}', and chars can be compared // in this way. - (Finite(a), Finite(b)) if matches!(ty.kind(), ty::Uint(_) | ty::Char) => { - if let (Some(a), Some(b)) = (a.try_to_scalar_int(), b.try_to_scalar_int()) { - let sz = ty.primitive_size(tcx); - let a = a.assert_uint(sz); - let b = b.assert_uint(sz); - return Some(a.cmp(&b)); - } + (Finite(mir::Const::Ty(a)), Finite(mir::Const::Ty(b))) + if matches!(ty.kind(), ty::Uint(_) | ty::Char) => + { + return Some(a.to_valtree().cmp(&b.to_valtree())); } + ( + Finite(mir::Const::Val(mir::ConstValue::Scalar(Scalar::Int(a)), _)), + Finite(mir::Const::Val(mir::ConstValue::Scalar(Scalar::Int(b)), _)), + ) if matches!(ty.kind(), ty::Uint(_) | ty::Char) => return Some(a.cmp(&b)), _ => {} } diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs index 40ac87873a0a4..2bf2341cbc46c 100644 --- a/compiler/rustc_middle/src/ty/consts/int.rs +++ b/compiler/rustc_middle/src/ty/consts/int.rs @@ -126,7 +126,7 @@ impl IntoDiagArg for ConstInt { /// /// This is a packed struct in order to allow this type to be optimally embedded in enums /// (like Scalar). -#[derive(Clone, Copy, Eq, PartialEq, Hash)] +#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] #[repr(packed)] pub struct ScalarInt { /// The first `size` bytes of `data` are the value. diff --git a/compiler/rustc_middle/src/ty/consts/valtree.rs b/compiler/rustc_middle/src/ty/consts/valtree.rs index 96bc5515a5695..ffa0e89c47328 100644 --- a/compiler/rustc_middle/src/ty/consts/valtree.rs +++ b/compiler/rustc_middle/src/ty/consts/valtree.rs @@ -3,7 +3,7 @@ use crate::mir::interpret::Scalar; use crate::ty::{self, Ty, TyCtxt}; use rustc_macros::{HashStable, TyDecodable, TyEncodable}; -#[derive(Copy, Clone, Debug, Hash, TyEncodable, TyDecodable, Eq, PartialEq)] +#[derive(Copy, Clone, Debug, Hash, TyEncodable, TyDecodable, Eq, PartialEq, Ord, PartialOrd)] #[derive(HashStable)] /// This datastructure is used to represent the value of constants used in the type system. ///