diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index 881607f65ba65..da4b76a4d527d 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -122,7 +122,7 @@ #![feature(on_unimplemented)] #![feature(exact_chunks)] #![feature(pointer_methods)] -#![feature(inclusive_range_fields)] +#![feature(inclusive_range_methods)] #![cfg_attr(stage0, feature(generic_param_attrs))] #![feature(rustc_const_unstable)] diff --git a/src/liballoc/tests/lib.rs b/src/liballoc/tests/lib.rs index 3227216930700..1c8ff316e55aa 100644 --- a/src/liballoc/tests/lib.rs +++ b/src/liballoc/tests/lib.rs @@ -25,7 +25,7 @@ #![feature(try_reserve)] #![feature(unboxed_closures)] #![feature(exact_chunks)] -#![feature(inclusive_range_fields)] +#![feature(inclusive_range_methods)] extern crate alloc_system; extern crate core; diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs index f4ed24cc3a372..04dd42583d406 100644 --- a/src/libcore/lib.rs +++ b/src/libcore/lib.rs @@ -102,6 +102,7 @@ #![feature(untagged_unions)] #![feature(unwind_attributes)] #![feature(doc_alias)] +#![feature(inclusive_range_methods)] #![cfg_attr(not(stage0), feature(mmx_target_feature))] #![cfg_attr(not(stage0), feature(tbm_target_feature))] diff --git a/src/libcore/ops/range.rs b/src/libcore/ops/range.rs index d70f7ae66f903..b01a769eda7fd 100644 --- a/src/libcore/ops/range.rs +++ b/src/libcore/ops/range.rs @@ -318,9 +318,9 @@ impl> RangeTo { /// # Examples /// /// ``` -/// #![feature(inclusive_range_fields)] +/// #![feature(inclusive_range_methods)] /// -/// assert_eq!((3..=5), std::ops::RangeInclusive { start: 3, end: 5 }); +/// assert_eq!((3..=5), std::ops::RangeInclusive::new(3, 5)); /// assert_eq!(3 + 4 + 5, (3..=5).sum()); /// /// let arr = [0, 1, 2, 3]; @@ -331,14 +331,88 @@ impl> RangeTo { #[derive(Clone, PartialEq, Eq, Hash)] // not Copy -- see #27186 #[stable(feature = "inclusive_range", since = "1.26.0")] pub struct RangeInclusive { + // FIXME: The current representation follows RFC 1980, + // but it is known that LLVM is not able to optimize loops following that RFC. + // Consider adding an extra `bool` field to indicate emptiness of the range. + // See #45222 for performance test cases. + #[cfg(not(stage0))] + pub(crate) start: Idx, + #[cfg(not(stage0))] + pub(crate) end: Idx, /// The lower bound of the range (inclusive). + #[cfg(stage0)] #[unstable(feature = "inclusive_range_fields", issue = "49022")] pub start: Idx, /// The upper bound of the range (inclusive). + #[cfg(stage0)] #[unstable(feature = "inclusive_range_fields", issue = "49022")] pub end: Idx, } +impl RangeInclusive { + /// Creates a new inclusive range. Equivalent to writing `start..=end`. + /// + /// # Examples + /// + /// ``` + /// #![feature(inclusive_range_methods)] + /// use std::ops::RangeInclusive; + /// + /// assert_eq!(3..=5, RangeInclusive::new(3, 5)); + /// ``` + #[unstable(feature = "inclusive_range_methods", issue = "49022")] + #[inline] + pub const fn new(start: Idx, end: Idx) -> Self { + Self { start, end } + } + + /// Returns the lower bound of the range (inclusive). + /// + /// When using an inclusive range for iteration, the values of `start()` and + /// [`end()`] are unspecified after the iteration ended. To determine + /// whether the inclusive range is empty, use the [`is_empty()`] method + /// instead of comparing `start() > end()`. + /// + /// [`end()`]: #method.end + /// [`is_empty()`]: #method.is_empty + /// + /// # Examples + /// + /// ``` + /// #![feature(inclusive_range_methods)] + /// + /// assert_eq!((3..=5).start(), &3); + /// ``` + #[unstable(feature = "inclusive_range_methods", issue = "49022")] + #[inline] + pub fn start(&self) -> &Idx { + &self.start + } + + /// Returns the upper bound of the range (inclusive). + /// + /// When using an inclusive range for iteration, the values of [`start()`] + /// and `end()` are unspecified after the iteration ended. To determine + /// whether the inclusive range is empty, use the [`is_empty()`] method + /// instead of comparing `start() > end()`. + /// + /// [`start()`]: #method.start + /// [`is_empty()`]: #method.is_empty + /// + /// # Examples + /// + /// ``` + /// #![feature(inclusive_range_methods)] + /// + /// assert_eq!((3..=5).end(), &5); + /// ``` + #[unstable(feature = "inclusive_range_methods", issue = "49022")] + #[inline] + pub fn end(&self) -> &Idx { + &self.end + } +} + #[stable(feature = "inclusive_range", since = "1.26.0")] impl fmt::Debug for RangeInclusive { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { diff --git a/src/libcore/tests/lib.rs b/src/libcore/tests/lib.rs index e4d277179382f..f6750c590b33a 100644 --- a/src/libcore/tests/lib.rs +++ b/src/libcore/tests/lib.rs @@ -44,7 +44,7 @@ #![feature(exact_chunks)] #![cfg_attr(stage0, feature(atomic_nand))] #![feature(reverse_bits)] -#![feature(inclusive_range_fields)] +#![feature(inclusive_range_methods)] #![feature(iterator_find_map)] extern crate core; diff --git a/src/libcore/tests/ops.rs b/src/libcore/tests/ops.rs index bed08f86d72c1..d66193b1687c8 100644 --- a/src/libcore/tests/ops.rs +++ b/src/libcore/tests/ops.rs @@ -50,21 +50,21 @@ fn test_full_range() { #[test] fn test_range_inclusive() { - let mut r = RangeInclusive { start: 1i8, end: 2 }; + let mut r = RangeInclusive::new(1i8, 2); assert_eq!(r.next(), Some(1)); assert_eq!(r.next(), Some(2)); assert_eq!(r.next(), None); - r = RangeInclusive { start: 127i8, end: 127 }; + r = RangeInclusive::new(127i8, 127); assert_eq!(r.next(), Some(127)); assert_eq!(r.next(), None); - r = RangeInclusive { start: -128i8, end: -128 }; + r = RangeInclusive::new(-128i8, -128); assert_eq!(r.next_back(), Some(-128)); assert_eq!(r.next_back(), None); // degenerate - r = RangeInclusive { start: 1, end: -1 }; + r = RangeInclusive::new(1, -1); assert_eq!(r.size_hint(), (0, Some(0))); assert_eq!(r.next(), None); } diff --git a/src/librustc/hir/lowering.rs b/src/librustc/hir/lowering.rs index e4b9fc1385d40..196f7879980e8 100644 --- a/src/librustc/hir/lowering.rs +++ b/src/librustc/hir/lowering.rs @@ -3119,6 +3119,20 @@ impl<'a> LoweringContext<'a> { ExprKind::Index(ref el, ref er) => { hir::ExprIndex(P(self.lower_expr(el)), P(self.lower_expr(er))) } + // Desugar `..=` to `std::ops::RangeInclusive::new(, )` + ExprKind::Range(Some(ref e1), Some(ref e2), RangeLimits::Closed) => { + // FIXME: Use head_sp directly after RangeInclusive::new() is stabilized in stage0. + let span = self.allow_internal_unstable(CompilerDesugaringKind::DotFill, e.span); + let id = self.lower_node_id(e.id); + let e1 = self.lower_expr(e1); + let e2 = self.lower_expr(e2); + let ty_path = P(self.std_path(span, &["ops", "RangeInclusive"], false)); + let ty = self.ty_path(id, span, hir::QPath::Resolved(None, ty_path)); + let new_seg = P(hir::PathSegment::from_name(Symbol::intern("new"))); + let new_path = hir::QPath::TypeRelative(ty, new_seg); + let new = P(self.expr(span, hir::ExprPath(new_path), ThinVec::new())); + hir::ExprCall(new, hir_vec![e1, e2]) + } ExprKind::Range(ref e1, ref e2, lims) => { use syntax::ast::RangeLimits::*; @@ -3128,7 +3142,7 @@ impl<'a> LoweringContext<'a> { (&None, &Some(..), HalfOpen) => "RangeTo", (&Some(..), &Some(..), HalfOpen) => "Range", (&None, &Some(..), Closed) => "RangeToInclusive", - (&Some(..), &Some(..), Closed) => "RangeInclusive", + (&Some(..), &Some(..), Closed) => unreachable!(), (_, &None, Closed) => self.diagnostic() .span_fatal(e.span, "inclusive range with no end") .raise(), diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index 24892dfcc8f7d..9dc9fb1144efa 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -69,7 +69,7 @@ #![feature(trusted_len)] #![feature(catch_expr)] #![feature(test)] -#![feature(inclusive_range_fields)] +#![feature(inclusive_range_methods)] #![recursion_limit="512"] diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 55137e2891123..47b52cacd56bd 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -19,7 +19,6 @@ use std::cmp; use std::fmt; use std::i128; use std::mem; -use std::ops::RangeInclusive; use ich::StableHashingContext; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, @@ -492,7 +491,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { ty::TyFloat(FloatTy::F64) => scalar(F64), ty::TyFnPtr(_) => { let mut ptr = scalar_unit(Pointer); - ptr.valid_range.start = 1; + ptr.valid_range = 1..=*ptr.valid_range.end(); tcx.intern_layout(LayoutDetails::scalar(self, ptr)) } @@ -506,7 +505,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => { let mut data_ptr = scalar_unit(Pointer); if !ty.is_unsafe_ptr() { - data_ptr.valid_range.start = 1; + data_ptr.valid_range = 1..=*data_ptr.valid_range.end(); } let pointee = tcx.normalize_erasing_regions(param_env, pointee); @@ -524,7 +523,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { } ty::TyDynamic(..) => { let mut vtable = scalar_unit(Pointer); - vtable.valid_range.start = 1; + vtable.valid_range = 1..=*vtable.valid_range.end(); vtable } _ => return Err(LayoutError::Unknown(unsized_part)) @@ -751,8 +750,8 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { match st.abi { Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => { - if scalar.valid_range.start == 0 { - scalar.valid_range.start = 1; + if *scalar.valid_range.start() == 0 { + scalar.valid_range = 1..=*scalar.valid_range.end(); } } _ => {} @@ -788,18 +787,15 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { } } } - if niche_variants.start > v { - niche_variants.start = v; - } - niche_variants.end = v; + niche_variants = *niche_variants.start().min(&v)..=v; } - if niche_variants.start > niche_variants.end { + if niche_variants.start() > niche_variants.end() { dataful_variant = None; } if let Some(i) = dataful_variant { - let count = (niche_variants.end - niche_variants.start + 1) as u128; + let count = (niche_variants.end() - niche_variants.start() + 1) as u128; for (field_index, &field) in variants[i].iter().enumerate() { let (offset, niche, niche_start) = match self.find_niche(field, count)? { @@ -1659,10 +1655,10 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let max_value = !0u128 >> (128 - bits); // Find out how many values are outside the valid range. - let niches = if v.start <= v.end { - v.start + (max_value - v.end) + let niches = if v.start() <= v.end() { + v.start() + (max_value - v.end()) } else { - v.start - v.end - 1 + v.start() - v.end() - 1 }; // Give up if we can't fit `count` consecutive niches. @@ -1670,11 +1666,11 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { return None; } - let niche_start = v.end.wrapping_add(1) & max_value; - let niche_end = v.end.wrapping_add(count) & max_value; + let niche_start = v.end().wrapping_add(1) & max_value; + let niche_end = v.end().wrapping_add(count) & max_value; Some((offset, Scalar { value, - valid_range: v.start..=niche_end + valid_range: *v.start()..=niche_end }, niche_start)) }; @@ -1744,14 +1740,14 @@ impl<'a> HashStable> for Variants { } NicheFilling { dataful_variant, - niche_variants: RangeInclusive { start, end }, + ref niche_variants, ref niche, niche_start, ref variants, } => { dataful_variant.hash_stable(hcx, hasher); - start.hash_stable(hcx, hasher); - end.hash_stable(hcx, hasher); + niche_variants.start().hash_stable(hcx, hasher); + niche_variants.end().hash_stable(hcx, hasher); niche.hash_stable(hcx, hasher); niche_start.hash_stable(hcx, hasher); variants.hash_stable(hcx, hasher); @@ -1814,10 +1810,10 @@ impl<'a> HashStable> for Scalar { fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { - let Scalar { value, valid_range: RangeInclusive { start, end } } = *self; + let Scalar { value, ref valid_range } = *self; value.hash_stable(hcx, hasher); - start.hash_stable(hcx, hasher); - end.hash_stable(hcx, hasher); + valid_range.start().hash_stable(hcx, hasher); + valid_range.end().hash_stable(hcx, hasher); } } diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index f6e9994b5da3f..c8cebf8328d59 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -917,8 +917,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M niche_start, .. } => { - let variants_start = niche_variants.start as u128; - let variants_end = niche_variants.end as u128; + let variants_start = *niche_variants.start() as u128; + let variants_end = *niche_variants.end() as u128; match raw_discr { PrimVal::Ptr(_) => { assert!(niche_start == 0); @@ -984,7 +984,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M if variant_index != dataful_variant { let (niche_dest, niche) = self.place_field(dest, mir::Field::new(0), layout)?; - let niche_value = ((variant_index - niche_variants.start) as u128) + let niche_value = ((variant_index - niche_variants.start()) as u128) .wrapping_add(niche_start); self.write_primval(niche_dest, PrimVal::Bytes(niche_value), niche.ty)?; } diff --git a/src/librustc_mir/lib.rs b/src/librustc_mir/lib.rs index 75b7a10097df4..a6dc4c74f3634 100644 --- a/src/librustc_mir/lib.rs +++ b/src/librustc_mir/lib.rs @@ -31,7 +31,7 @@ Rust MIR: a lowered representation of Rust. Also: an experiment! #![feature(range_contains)] #![feature(rustc_diagnostic_macros)] #![feature(nonzero)] -#![feature(inclusive_range_fields)] +#![feature(inclusive_range_methods)] #![feature(crate_visibility_modifier)] #![feature(never_type)] #![cfg_attr(stage0, feature(try_trait))] diff --git a/src/librustc_target/abi/mod.rs b/src/librustc_target/abi/mod.rs index 346e5667a7bab..f73085196f4e6 100644 --- a/src/librustc_target/abi/mod.rs +++ b/src/librustc_target/abi/mod.rs @@ -555,8 +555,8 @@ impl Scalar { let bits = self.value.size(cx).bits(); assert!(bits <= 128); let mask = !0u128 >> (128 - bits); - let start = self.valid_range.start; - let end = self.valid_range.end; + let start = *self.valid_range.start(); + let end = *self.valid_range.end(); assert_eq!(start, start & mask); assert_eq!(end, end & mask); start..(end.wrapping_add(1) & mask) diff --git a/src/librustc_target/lib.rs b/src/librustc_target/lib.rs index 927d5c7e15a20..45f2ee13bbdc9 100644 --- a/src/librustc_target/lib.rs +++ b/src/librustc_target/lib.rs @@ -29,7 +29,7 @@ #![feature(const_fn)] #![feature(fs_read_write)] #![feature(inclusive_range)] -#![feature(inclusive_range_fields)] +#![feature(inclusive_range_methods)] #![feature(slice_patterns)] #[macro_use] diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 1d0d7ec601f1f..1838dae049ad7 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -388,8 +388,8 @@ impl<'a, 'tcx> FnTypeExt<'a, 'tcx> for FnType<'tcx, Ty<'tcx>> { return; } - if scalar.valid_range.start < scalar.valid_range.end { - if scalar.valid_range.start > 0 { + if scalar.valid_range.start() < scalar.valid_range.end() { + if *scalar.valid_range.start() > 0 { attrs.set(ArgAttribute::NonNull); } } diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index 123b9cf7931a5..2fc6c9d443301 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -1236,7 +1236,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { self.layout, self.layout.fields.offset(0), self.layout.field(cx, 0).size); - name.push_str(&adt.variants[niche_variants.start].name.as_str()); + name.push_str(&adt.variants[*niche_variants.start()].name.as_str()); // Create the (singleton) list of descriptions of union members. vec![ diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index 96a10e8b99d32..9259fef279f1a 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -29,7 +29,7 @@ #![feature(rustc_diagnostic_macros)] #![feature(slice_sort_by_cached_key)] #![feature(optin_builtin_traits)] -#![feature(inclusive_range_fields)] +#![feature(inclusive_range_methods)] use rustc::dep_graph::WorkProduct; use syntax_pos::symbol::Symbol; diff --git a/src/librustc_trans/mir/place.rs b/src/librustc_trans/mir/place.rs index 8532c0b149ded..79859aee64d87 100644 --- a/src/librustc_trans/mir/place.rs +++ b/src/librustc_trans/mir/place.rs @@ -99,7 +99,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> { bx.range_metadata(load, range); } } - layout::Pointer if vr.start < vr.end && !vr.contains(&0) => { + layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => { bx.nonnull_metadata(load); } _ => {} @@ -287,7 +287,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> { .. } => { let niche_llty = discr.layout.immediate_llvm_type(bx.cx); - if niche_variants.start == niche_variants.end { + if niche_variants.start() == niche_variants.end() { // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_start == 0 { // HACK(eddyb) Using `C_null` as it works on all types. @@ -296,13 +296,13 @@ impl<'a, 'tcx> PlaceRef<'tcx> { C_uint_big(niche_llty, niche_start) }; bx.select(bx.icmp(llvm::IntEQ, lldiscr, niche_llval), - C_uint(cast_to, niche_variants.start as u64), + C_uint(cast_to, *niche_variants.start() as u64), C_uint(cast_to, dataful_variant as u64)) } else { // Rebase from niche values to discriminant values. - let delta = niche_start.wrapping_sub(niche_variants.start as u128); + let delta = niche_start.wrapping_sub(*niche_variants.start() as u128); let lldiscr = bx.sub(lldiscr, C_uint_big(niche_llty, delta)); - let lldiscr_max = C_uint(niche_llty, niche_variants.end as u64); + let lldiscr_max = C_uint(niche_llty, *niche_variants.end() as u64); bx.select(bx.icmp(llvm::IntULE, lldiscr, lldiscr_max), bx.intcast(lldiscr, cast_to, false), C_uint(cast_to, dataful_variant as u64)) @@ -352,7 +352,7 @@ impl<'a, 'tcx> PlaceRef<'tcx> { let niche = self.project_field(bx, 0); let niche_llty = niche.layout.immediate_llvm_type(bx.cx); - let niche_value = ((variant_index - niche_variants.start) as u128) + let niche_value = ((variant_index - *niche_variants.start()) as u128) .wrapping_add(niche_start); // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_value == 0 { diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 79e906ca975fb..4fa54dc276d20 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -301,7 +301,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { if let layout::Int(_, s) = scalar.value { signed = s; - if scalar.valid_range.end > scalar.valid_range.start { + if scalar.valid_range.end() > scalar.valid_range.start() { // We want `table[e as usize]` to not // have bound checks, and this is the most // convenient place to put the `assume`. @@ -309,7 +309,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { base::call_assume(&bx, bx.icmp( llvm::IntULE, llval, - C_uint_big(ll_t_in, scalar.valid_range.end) + C_uint_big(ll_t_in, *scalar.valid_range.end()) )); } }