diff --git a/mk/main.mk b/mk/main.mk index b70926388caa4..f2234e0920348 100644 --- a/mk/main.mk +++ b/mk/main.mk @@ -294,7 +294,7 @@ LLVM_VERSION_$(1)=$$(shell "$$(LLVM_CONFIG_$(1))" --version) LLVM_BINDIR_$(1)=$$(shell "$$(LLVM_CONFIG_$(1))" --bindir) LLVM_INCDIR_$(1)=$$(shell "$$(LLVM_CONFIG_$(1))" --includedir) LLVM_LIBDIR_$(1)=$$(shell "$$(LLVM_CONFIG_$(1))" --libdir) -LLVM_LIBDIR_RUSTFLAGS_$(1)=-L "$$(LLVM_LIBDIR_$(1))" +LLVM_LIBDIR_RUSTFLAGS_$(1)=-L native="$$(LLVM_LIBDIR_$(1))" LLVM_LDFLAGS_$(1)=$$(shell "$$(LLVM_CONFIG_$(1))" --ldflags) ifeq ($$(findstring freebsd,$(1)),freebsd) # On FreeBSD, it may search wrong headers (that are for pre-installed LLVM), diff --git a/src/doc/reference.md b/src/doc/reference.md index 284fcf6aed0c2..dbcfafaf1c17a 100644 --- a/src/doc/reference.md +++ b/src/doc/reference.md @@ -1452,7 +1452,7 @@ fn draw_twice(surface: Surface, sh: T) { } ``` -Traits also define an [trait object](#trait-objects) with the same +Traits also define a [trait object](#trait-objects) with the same name as the trait. Values of this type are created by coercing from a pointer of some specific type to a pointer of trait type. For example, `&T` could be coerced to `&Shape` if `T: Shape` holds (and similarly @@ -1881,11 +1881,15 @@ type int8_t = i8; - `no_start` - disable linking to the `native` crate, which specifies the "start" language item. - `no_std` - disable linking to the `std` crate. -- `plugin` — load a list of named crates as compiler plugins, e.g. +- `plugin` - load a list of named crates as compiler plugins, e.g. `#![plugin(foo, bar)]`. Optional arguments for each plugin, i.e. `#![plugin(foo(... args ...))]`, are provided to the plugin's registrar function. The `plugin` feature gate is required to use this attribute. +- `recursion_limit` - Sets the maximum depth for potentially + infinitely-recursive compile-time operations like + auto-dereference or macro expansion. The default is + `#![recursion_limit="64"]`. ### Module-only attributes diff --git a/src/doc/trpl/crates-and-modules.md b/src/doc/trpl/crates-and-modules.md index 6989099206586..7e90252456366 100644 --- a/src/doc/trpl/crates-and-modules.md +++ b/src/doc/trpl/crates-and-modules.md @@ -115,7 +115,7 @@ $ ls target/debug build deps examples libphrases-a7448e02a0468eaa.rlib native ``` -`libphrase-hash.rlib` is the compiled crate. Before we see how to use this +`libphrases-hash.rlib` is the compiled crate. Before we see how to use this crate from another crate, let’s break it up into multiple files. # Multiple file crates diff --git a/src/doc/trpl/error-handling.md b/src/doc/trpl/error-handling.md index 8dd5a3650ef52..518e65f35c008 100644 --- a/src/doc/trpl/error-handling.md +++ b/src/doc/trpl/error-handling.md @@ -208,8 +208,8 @@ Because these kinds of situations are relatively rare, use panics sparingly. In certain circumstances, even though a function may fail, we may want to treat it as a panic instead. For example, `io::stdin().read_line(&mut buffer)` returns -a `Result`, when there is an error reading the line. This allows us to -handle and possibly recover from error. +a `Result`, which can indicate an error if one occurs when reading the line. +This allows us to handle and possibly recover from errors. If we don't want to handle this error, and would rather just abort the program, we can use the `unwrap()` method: diff --git a/src/doc/trpl/testing.md b/src/doc/trpl/testing.md index cbf33febf876f..587f60343c344 100644 --- a/src/doc/trpl/testing.md +++ b/src/doc/trpl/testing.md @@ -120,13 +120,26 @@ And that's reflected in the summary line: test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured ``` -We also get a non-zero status code: +We also get a non-zero status code. We can use `$?` on OS X and Linux: ```bash $ echo $? 101 ``` +On Windows, if you’re using `cmd`: + +```bash +> echo %ERRORLEVEL% +``` + +And if you’re using PowerShell: + +```bash +> echo $LASTEXITCODE # the code itself +> echo $? # a boolean, fail or succeed +``` + This is useful if you want to integrate `cargo test` into other tooling. We can invert our test's failure with another attribute: `should_panic`: @@ -219,6 +232,66 @@ fn it_works() { This is a very common use of `assert_eq!`: call some function with some known arguments and compare it to the expected output. +# The `ignore` attribute + +Sometimes a few specific tests can be very time-consuming to execute. These +can be disabled by default by using the `ignore` attribute: + +```rust +#[test] +fn it_works() { + assert_eq!(4, add_two(2)); +} + +#[test] +#[ignore] +fn expensive_test() { + // code that takes an hour to run +} +``` + +Now we run our tests and see that `it_works` is run, but `expensive_test` is +not: + +```bash +$ cargo test + Compiling adder v0.0.1 (file:///home/you/projects/adder) + Running target/adder-91b3e234d4ed382a + +running 2 tests +test expensive_test ... ignored +test it_works ... ok + +test result: ok. 1 passed; 0 failed; 1 ignored; 0 measured + + Doc-tests adder + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured +``` + +The expensive tests can be run explicitly using `cargo test -- --ignored`: + +```bash +$ cargo test -- --ignored + Running target/adder-91b3e234d4ed382a + +running 1 test +test expensive_test ... ok + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured + + Doc-tests adder + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured +``` + +The `--ignored` argument is an argument to the test binary, and not to cargo, +which is why the command is `cargo test -- --ignored`. + # The `tests` module There is one way in which our existing example is not idiomatic: it's diff --git a/src/liballoc/arc.rs b/src/liballoc/arc.rs index bb2daa2a1d742..b68d7976540a0 100644 --- a/src/liballoc/arc.rs +++ b/src/liballoc/arc.rs @@ -73,6 +73,7 @@ use boxed::Box; use core::sync::atomic; use core::sync::atomic::Ordering::{Relaxed, Release, Acquire, SeqCst}; +use core::borrow; use core::fmt; use core::cmp::Ordering; use core::mem::{align_of_val, size_of_val}; @@ -1109,3 +1110,7 @@ mod tests { assert!(y.upgrade().is_none()); } } + +impl borrow::Borrow for Arc { + fn borrow(&self) -> &T { &**self } +} diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs index e3019f952fe70..a6e0f3a9bd97d 100644 --- a/src/liballoc/boxed.rs +++ b/src/liballoc/boxed.rs @@ -57,6 +57,7 @@ use heap; use raw_vec::RawVec; use core::any::Any; +use core::borrow; use core::cmp::Ordering; use core::fmt; use core::hash::{self, Hash}; @@ -562,3 +563,10 @@ impl Clone for Box<[T]> { } } +impl borrow::Borrow for Box { + fn borrow(&self) -> &T { &**self } +} + +impl borrow::BorrowMut for Box { + fn borrow_mut(&mut self) -> &mut T { &mut **self } +} diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index 9649d0f71a14d..b1fb5be4d21bf 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -158,6 +158,7 @@ use boxed::Box; #[cfg(test)] use std::boxed::Box; +use core::borrow; use core::cell::Cell; use core::cmp::Ordering; use core::fmt; @@ -1091,3 +1092,7 @@ mod tests { assert_eq!(foo, foo.clone()); } } + +impl borrow::Borrow for Rc { + fn borrow(&self) -> &T { &**self } +} diff --git a/src/libcollections/borrow.rs b/src/libcollections/borrow.rs index bfd069152509d..bd1864b28cdd3 100644 --- a/src/libcollections/borrow.rs +++ b/src/libcollections/borrow.rs @@ -21,119 +21,10 @@ use core::ops::Deref; use core::option::Option; use fmt; -use alloc::{boxed, rc, arc}; use self::Cow::*; -/// A trait for borrowing data. -/// -/// In general, there may be several ways to "borrow" a piece of data. The -/// typical ways of borrowing a type `T` are `&T` (a shared borrow) and `&mut T` -/// (a mutable borrow). But types like `Vec` provide additional kinds of -/// borrows: the borrowed slices `&[T]` and `&mut [T]`. -/// -/// When writing generic code, it is often desirable to abstract over all ways -/// of borrowing data from a given type. That is the role of the `Borrow` -/// trait: if `T: Borrow`, then `&U` can be borrowed from `&T`. A given -/// type can be borrowed as multiple different types. In particular, `Vec: -/// Borrow>` and `Vec: Borrow<[T]>`. -/// -/// If you are implementing `Borrow` and both `Self` and `Borrowed` implement -/// `Hash`, `Eq`, and/or `Ord`, they must produce the same result. -/// -/// `Borrow` is very similar to, but different than, `AsRef`. See -/// [the book][book] for more. -/// -/// [book]: ../../book/borrow-and-asref.html -#[stable(feature = "rust1", since = "1.0.0")] -pub trait Borrow { - /// Immutably borrows from an owned value. - /// - /// # Examples - /// - /// ``` - /// use std::borrow::Borrow; - /// - /// fn check>(s: T) { - /// assert_eq!("Hello", s.borrow()); - /// } - /// - /// let s = "Hello".to_string(); - /// - /// check(s); - /// - /// let s = "Hello"; - /// - /// check(s); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - fn borrow(&self) -> &Borrowed; -} - -/// A trait for mutably borrowing data. -/// -/// Similar to `Borrow`, but for mutable borrows. -#[stable(feature = "rust1", since = "1.0.0")] -pub trait BorrowMut : Borrow { - /// Mutably borrows from an owned value. - /// - /// # Examples - /// - /// ``` - /// use std::borrow::BorrowMut; - /// - /// fn check>(mut v: T) { - /// assert_eq!(&mut [1, 2, 3], v.borrow_mut()); - /// } - /// - /// let v = vec![1, 2, 3]; - /// - /// check(v); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - fn borrow_mut(&mut self) -> &mut Borrowed; -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Borrow for T { - fn borrow(&self) -> &T { self } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl BorrowMut for T { - fn borrow_mut(&mut self) -> &mut T { self } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T: ?Sized> Borrow for &'a T { - fn borrow(&self) -> &T { &**self } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T: ?Sized> Borrow for &'a mut T { - fn borrow(&self) -> &T { &**self } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T: ?Sized> BorrowMut for &'a mut T { - fn borrow_mut(&mut self) -> &mut T { &mut **self } -} - -impl Borrow for boxed::Box { - fn borrow(&self) -> &T { &**self } -} - -impl BorrowMut for boxed::Box { - fn borrow_mut(&mut self) -> &mut T { &mut **self } -} - -impl Borrow for rc::Rc { - fn borrow(&self) -> &T { &**self } -} - -impl Borrow for arc::Arc { - fn borrow(&self) -> &T { &**self } -} +pub use core::borrow::{Borrow, BorrowMut}; #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> Borrow for Cow<'a, B> where B: ToOwned, ::Owned: 'a { diff --git a/src/libcollections/btree/map.rs b/src/libcollections/btree/map.rs index 2835e28a9462c..11d389d85bad9 100644 --- a/src/libcollections/btree/map.rs +++ b/src/libcollections/btree/map.rs @@ -22,7 +22,7 @@ use core::fmt::Debug; use core::hash::{Hash, Hasher}; use core::iter::{Map, FromIterator}; use core::ops::Index; -use core::{iter, fmt, mem, usize}; +use core::{fmt, mem, usize}; use Bound::{self, Included, Excluded, Unbounded}; use borrow::Borrow; @@ -915,7 +915,7 @@ impl Eq for BTreeMap {} impl PartialOrd for BTreeMap { #[inline] fn partial_cmp(&self, other: &BTreeMap) -> Option { - iter::order::partial_cmp(self.iter(), other.iter()) + self.iter().partial_cmp(other.iter()) } } @@ -923,7 +923,7 @@ impl PartialOrd for BTreeMap { impl Ord for BTreeMap { #[inline] fn cmp(&self, other: &BTreeMap) -> Ordering { - iter::order::cmp(self.iter(), other.iter()) + self.iter().cmp(other.iter()) } } diff --git a/src/libcollections/lib.rs b/src/libcollections/lib.rs index 2639e6dce468f..d1e91b28c46fd 100644 --- a/src/libcollections/lib.rs +++ b/src/libcollections/lib.rs @@ -56,6 +56,7 @@ #![feature(unicode)] #![feature(unique)] #![feature(unsafe_no_drop_flag, filling_drop)] +#![feature(decode_utf16)] #![feature(utf8_error)] #![cfg_attr(test, feature(rand, test))] diff --git a/src/libcollections/linked_list.rs b/src/libcollections/linked_list.rs index 80ef2067819cf..891e8b7b2c961 100644 --- a/src/libcollections/linked_list.rs +++ b/src/libcollections/linked_list.rs @@ -25,7 +25,7 @@ use alloc::boxed::Box; use core::cmp::Ordering; use core::fmt; use core::hash::{Hasher, Hash}; -use core::iter::{self, FromIterator}; +use core::iter::FromIterator; use core::mem; use core::ptr; @@ -917,12 +917,12 @@ impl<'a, T: 'a + Copy> Extend<&'a T> for LinkedList { impl PartialEq for LinkedList { fn eq(&self, other: &LinkedList) -> bool { self.len() == other.len() && - iter::order::eq(self.iter(), other.iter()) + self.iter().eq(other.iter()) } fn ne(&self, other: &LinkedList) -> bool { self.len() != other.len() || - iter::order::ne(self.iter(), other.iter()) + self.iter().ne(other.iter()) } } @@ -932,7 +932,7 @@ impl Eq for LinkedList {} #[stable(feature = "rust1", since = "1.0.0")] impl PartialOrd for LinkedList { fn partial_cmp(&self, other: &LinkedList) -> Option { - iter::order::partial_cmp(self.iter(), other.iter()) + self.iter().partial_cmp(other.iter()) } } @@ -940,7 +940,7 @@ impl PartialOrd for LinkedList { impl Ord for LinkedList { #[inline] fn cmp(&self, other: &LinkedList) -> Ordering { - iter::order::cmp(self.iter(), other.iter()) + self.iter().cmp(other.iter()) } } diff --git a/src/libcollections/string.rs b/src/libcollections/string.rs index 5c5f6cace6a4b..08ac64778bb02 100644 --- a/src/libcollections/string.rs +++ b/src/libcollections/string.rs @@ -20,8 +20,8 @@ use core::ops::{self, Deref, Add, Index}; use core::ptr; use core::slice; use core::str::pattern::Pattern; +use rustc_unicode::char::{decode_utf16, REPLACEMENT_CHARACTER}; use rustc_unicode::str as unicode_str; -use rustc_unicode::str::Utf16Item; use borrow::{Cow, IntoCow}; use range::RangeArgument; @@ -267,14 +267,7 @@ impl String { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn from_utf16(v: &[u16]) -> Result { - let mut s = String::with_capacity(v.len()); - for c in unicode_str::utf16_items(v) { - match c { - Utf16Item::ScalarValue(c) => s.push(c), - Utf16Item::LoneSurrogate(_) => return Err(FromUtf16Error(())), - } - } - Ok(s) + decode_utf16(v.iter().cloned()).collect::>().map_err(|_| FromUtf16Error(())) } /// Decode a UTF-16 encoded vector `v` into a string, replacing @@ -294,7 +287,7 @@ impl String { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn from_utf16_lossy(v: &[u16]) -> String { - unicode_str::utf16_items(v).map(|c| c.to_char_lossy()).collect() + decode_utf16(v.iter().cloned()).map(|r| r.unwrap_or(REPLACEMENT_CHARACTER)).collect() } /// Creates a new `String` from a length, capacity, and pointer. @@ -813,11 +806,7 @@ impl<'a> Extend<&'a char> for String { #[stable(feature = "rust1", since = "1.0.0")] impl<'a> Extend<&'a str> for String { fn extend>(&mut self, iterable: I) { - let iterator = iterable.into_iter(); - // A guess that at least one byte per iterator element will be needed. - let (lower_bound, _) = iterator.size_hint(); - self.reserve(lower_bound); - for s in iterator { + for s in iterable { self.push_str(s) } } diff --git a/src/libcollections/vec_deque.rs b/src/libcollections/vec_deque.rs index 96e24b412d525..79e8988679184 100644 --- a/src/libcollections/vec_deque.rs +++ b/src/libcollections/vec_deque.rs @@ -20,7 +20,7 @@ use core::cmp::Ordering; use core::fmt; -use core::iter::{self, repeat, FromIterator}; +use core::iter::{repeat, FromIterator}; use core::ops::{Index, IndexMut}; use core::ptr; use core::slice; @@ -1676,7 +1676,7 @@ impl Eq for VecDeque {} #[stable(feature = "rust1", since = "1.0.0")] impl PartialOrd for VecDeque { fn partial_cmp(&self, other: &VecDeque) -> Option { - iter::order::partial_cmp(self.iter(), other.iter()) + self.iter().partial_cmp(other.iter()) } } @@ -1684,7 +1684,7 @@ impl PartialOrd for VecDeque { impl Ord for VecDeque { #[inline] fn cmp(&self, other: &VecDeque) -> Ordering { - iter::order::cmp(self.iter(), other.iter()) + self.iter().cmp(other.iter()) } } diff --git a/src/libcore/any.rs b/src/libcore/any.rs index 899e32d29a65b..16760f7170708 100644 --- a/src/libcore/any.rs +++ b/src/libcore/any.rs @@ -13,11 +13,12 @@ //! //! `Any` itself can be used to get a `TypeId`, and has more features when used //! as a trait object. As `&Any` (a borrowed trait object), it has the `is` and -//! `as_ref` methods, to test if the contained value is of a given type, and to -//! get a reference to the inner value as a type. As `&mut Any`, there is also -//! the `as_mut` method, for getting a mutable reference to the inner value. -//! `Box` adds the `move` method, which will unwrap a `Box` from the -//! object. See the extension traits (`*Ext`) for the full details. +//! `downcast_ref` methods, to test if the contained value is of a given type, +//! and to get a reference to the inner value as a type. As `&mut Any`, there +//! is also the `downcast_mut` method, for getting a mutable reference to the +//! inner value. `Box` adds the `move` method, which will unwrap a +//! `Box` from the object. See the extension traits (`*Ext`) for the full +//! details. //! //! Note that &Any is limited to testing whether a value is of a specified //! concrete type, and cannot be used to test whether a type implements a trait. diff --git a/src/libcore/borrow.rs b/src/libcore/borrow.rs new file mode 100644 index 0000000000000..79330d3a61ea7 --- /dev/null +++ b/src/libcore/borrow.rs @@ -0,0 +1,109 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A module for working with borrowed data. + +#![stable(feature = "rust1", since = "1.0.0")] + +use marker::Sized; + +/// A trait for borrowing data. +/// +/// In general, there may be several ways to "borrow" a piece of data. The +/// typical ways of borrowing a type `T` are `&T` (a shared borrow) and `&mut T` +/// (a mutable borrow). But types like `Vec` provide additional kinds of +/// borrows: the borrowed slices `&[T]` and `&mut [T]`. +/// +/// When writing generic code, it is often desirable to abstract over all ways +/// of borrowing data from a given type. That is the role of the `Borrow` +/// trait: if `T: Borrow`, then `&U` can be borrowed from `&T`. A given +/// type can be borrowed as multiple different types. In particular, `Vec: +/// Borrow>` and `Vec: Borrow<[T]>`. +/// +/// If you are implementing `Borrow` and both `Self` and `Borrowed` implement +/// `Hash`, `Eq`, and/or `Ord`, they must produce the same result. +/// +/// `Borrow` is very similar to, but different than, `AsRef`. See +/// [the book][book] for more. +/// +/// [book]: ../../book/borrow-and-asref.html +#[stable(feature = "rust1", since = "1.0.0")] +pub trait Borrow { + /// Immutably borrows from an owned value. + /// + /// # Examples + /// + /// ``` + /// use std::borrow::Borrow; + /// + /// fn check>(s: T) { + /// assert_eq!("Hello", s.borrow()); + /// } + /// + /// let s = "Hello".to_string(); + /// + /// check(s); + /// + /// let s = "Hello"; + /// + /// check(s); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + fn borrow(&self) -> &Borrowed; +} + +/// A trait for mutably borrowing data. +/// +/// Similar to `Borrow`, but for mutable borrows. +#[stable(feature = "rust1", since = "1.0.0")] +pub trait BorrowMut : Borrow { + /// Mutably borrows from an owned value. + /// + /// # Examples + /// + /// ``` + /// use std::borrow::BorrowMut; + /// + /// fn check>(mut v: T) { + /// assert_eq!(&mut [1, 2, 3], v.borrow_mut()); + /// } + /// + /// let v = vec![1, 2, 3]; + /// + /// check(v); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + fn borrow_mut(&mut self) -> &mut Borrowed; +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Borrow for T { + fn borrow(&self) -> &T { self } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl BorrowMut for T { + fn borrow_mut(&mut self) -> &mut T { self } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<'a, T: ?Sized> Borrow for &'a T { + fn borrow(&self) -> &T { &**self } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<'a, T: ?Sized> Borrow for &'a mut T { + fn borrow(&self) -> &T { &**self } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<'a, T: ?Sized> BorrowMut for &'a mut T { + fn borrow_mut(&mut self) -> &mut T { &mut **self } +} diff --git a/src/libcore/default.rs b/src/libcore/default.rs index f5103d394eef0..0e318f204eb9d 100644 --- a/src/libcore/default.rs +++ b/src/libcore/default.rs @@ -78,7 +78,7 @@ #![stable(feature = "rust1", since = "1.0.0")] -/// A trait that types which have a useful default value should implement. +/// A trait for giving a type a useful default value. /// /// A struct can derive default implementations of `Default` for basic types using /// `#[derive(Default)]`. diff --git a/src/libcore/iter.rs b/src/libcore/iter.rs index ee32999ba8fba..3d17b10ba3a85 100644 --- a/src/libcore/iter.rs +++ b/src/libcore/iter.rs @@ -58,7 +58,7 @@ use clone::Clone; use cmp; -use cmp::{Ord, PartialOrd, PartialEq}; +use cmp::{Ord, PartialOrd, PartialEq, Ordering}; use default::Default; use marker; use mem; @@ -98,6 +98,13 @@ pub trait Iterator { /// /// An upper bound of `None` means either there is no known upper bound, or /// the upper bound does not fit within a `usize`. + /// + /// # Examples + /// + /// ``` + /// let it = (0..10).filter(|x| x % 2 == 0).chain(15..20); + /// assert_eq!((5, Some(15)), it.size_hint()); + /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn size_hint(&self) -> (usize, Option) { (0, None) } @@ -184,7 +191,7 @@ pub trait Iterator { fn chain(self, other: U) -> Chain where Self: Sized, U: IntoIterator, { - Chain{a: self, b: other.into_iter(), flag: false} + Chain{a: self, b: other.into_iter(), state: ChainState::Both} } /// Creates an iterator that iterates over both this and the specified @@ -1005,6 +1012,198 @@ pub trait Iterator { { self.fold(One::one(), |p, e| p * e) } + + /// Lexicographically compares the elements of this `Iterator` with those + /// of another. + #[unstable(feature = "iter_order", reason = "needs review and revision", issue = "27737")] + fn cmp(mut self, other: I) -> Ordering where + I: IntoIterator, + Self::Item: Ord, + Self: Sized, + { + let mut other = other.into_iter(); + + loop { + match (self.next(), other.next()) { + (None, None) => return Ordering::Equal, + (None, _ ) => return Ordering::Less, + (_ , None) => return Ordering::Greater, + (Some(x), Some(y)) => match x.cmp(&y) { + Ordering::Equal => (), + non_eq => return non_eq, + }, + } + } + } + + /// Lexicographically compares the elements of this `Iterator` with those + /// of another. + #[unstable(feature = "iter_order", reason = "needs review and revision", issue = "27737")] + fn partial_cmp(mut self, other: I) -> Option where + I: IntoIterator, + Self::Item: PartialOrd, + Self: Sized, + { + let mut other = other.into_iter(); + + loop { + match (self.next(), other.next()) { + (None, None) => return Some(Ordering::Equal), + (None, _ ) => return Some(Ordering::Less), + (_ , None) => return Some(Ordering::Greater), + (Some(x), Some(y)) => match x.partial_cmp(&y) { + Some(Ordering::Equal) => (), + non_eq => return non_eq, + }, + } + } + } + + /// Determines if the elements of this `Iterator` are equal to those of + /// another. + #[unstable(feature = "iter_order", reason = "needs review and revision", issue = "27737")] + fn eq(mut self, other: I) -> bool where + I: IntoIterator, + Self::Item: PartialEq, + Self: Sized, + { + let mut other = other.into_iter(); + + loop { + match (self.next(), other.next()) { + (None, None) => return true, + (None, _) | (_, None) => return false, + (Some(x), Some(y)) => if x != y { return false }, + } + } + } + + /// Determines if the elements of this `Iterator` are unequal to those of + /// another. + #[unstable(feature = "iter_order", reason = "needs review and revision", issue = "27737")] + fn ne(mut self, other: I) -> bool where + I: IntoIterator, + Self::Item: PartialEq, + Self: Sized, + { + let mut other = other.into_iter(); + + loop { + match (self.next(), other.next()) { + (None, None) => return false, + (None, _) | (_, None) => return true, + (Some(x), Some(y)) => if x.ne(&y) { return true }, + } + } + } + + /// Determines if the elements of this `Iterator` are lexicographically + /// less than those of another. + #[unstable(feature = "iter_order", reason = "needs review and revision", issue = "27737")] + fn lt(mut self, other: I) -> bool where + I: IntoIterator, + Self::Item: PartialOrd, + Self: Sized, + { + let mut other = other.into_iter(); + + loop { + match (self.next(), other.next()) { + (None, None) => return false, + (None, _ ) => return true, + (_ , None) => return false, + (Some(x), Some(y)) => { + match x.partial_cmp(&y) { + Some(Ordering::Less) => return true, + Some(Ordering::Equal) => {} + Some(Ordering::Greater) => return false, + None => return false, + } + }, + } + } + } + + /// Determines if the elements of this `Iterator` are lexicographically + /// less or equal to those of another. + #[unstable(feature = "iter_order", reason = "needs review and revision", issue = "27737")] + fn le(mut self, other: I) -> bool where + I: IntoIterator, + Self::Item: PartialOrd, + Self: Sized, + { + let mut other = other.into_iter(); + + loop { + match (self.next(), other.next()) { + (None, None) => return true, + (None, _ ) => return true, + (_ , None) => return false, + (Some(x), Some(y)) => { + match x.partial_cmp(&y) { + Some(Ordering::Less) => return true, + Some(Ordering::Equal) => {} + Some(Ordering::Greater) => return false, + None => return false, + } + }, + } + } + } + + /// Determines if the elements of this `Iterator` are lexicographically + /// greater than those of another. + #[unstable(feature = "iter_order", reason = "needs review and revision", issue = "27737")] + fn gt(mut self, other: I) -> bool where + I: IntoIterator, + Self::Item: PartialOrd, + Self: Sized, + { + let mut other = other.into_iter(); + + loop { + match (self.next(), other.next()) { + (None, None) => return false, + (None, _ ) => return false, + (_ , None) => return true, + (Some(x), Some(y)) => { + match x.partial_cmp(&y) { + Some(Ordering::Less) => return false, + Some(Ordering::Equal) => {} + Some(Ordering::Greater) => return true, + None => return false, + } + } + } + } + } + + /// Determines if the elements of this `Iterator` are lexicographically + /// greater than or equal to those of another. + #[unstable(feature = "iter_order", reason = "needs review and revision", issue = "27737")] + fn ge(mut self, other: I) -> bool where + I: IntoIterator, + Self::Item: PartialOrd, + Self: Sized, + { + let mut other = other.into_iter(); + + loop { + match (self.next(), other.next()) { + (None, None) => return true, + (None, _ ) => return false, + (_ , None) => return true, + (Some(x), Some(y)) => { + match x.partial_cmp(&y) { + Some(Ordering::Less) => return false, + Some(Ordering::Equal) => {} + Some(Ordering::Greater) => return true, + None => return false, + } + }, + } + } + } } /// Select an element from an iterator based on the given projection @@ -1277,7 +1476,30 @@ impl Iterator for Cycle where I: Clone + Iterator { pub struct Chain { a: A, b: B, - flag: bool, + state: ChainState, +} + +// The iterator protocol specifies that iteration ends with the return value +// `None` from `.next()` (or `.next_back()`) and it is unspecified what +// further calls return. The chain adaptor must account for this since it uses +// two subiterators. +// +// It uses three states: +// +// - Both: `a` and `b` are remaining +// - Front: `a` remaining +// - Back: `b` remaining +// +// The fourth state (neither iterator is remaining) only occurs after Chain has +// returned None once, so we don't need to store this state. +#[derive(Clone)] +enum ChainState { + // both front and back iterator are remaining + Both, + // only front is remaining + Front, + // only back is remaining + Back, } #[stable(feature = "rust1", since = "1.0.0")] @@ -1289,42 +1511,58 @@ impl Iterator for Chain where #[inline] fn next(&mut self) -> Option { - if self.flag { - self.b.next() - } else { - match self.a.next() { - Some(x) => return Some(x), - _ => () - } - self.flag = true; - self.b.next() + match self.state { + ChainState::Both => match self.a.next() { + elt @ Some(..) => return elt, + None => { + self.state = ChainState::Back; + self.b.next() + } + }, + ChainState::Front => self.a.next(), + ChainState::Back => self.b.next(), } } #[inline] fn count(self) -> usize { - (if !self.flag { self.a.count() } else { 0 }) + self.b.count() + match self.state { + ChainState::Both => self.a.count() + self.b.count(), + ChainState::Front => self.a.count(), + ChainState::Back => self.b.count(), + } } #[inline] fn nth(&mut self, mut n: usize) -> Option { - if !self.flag { - for x in self.a.by_ref() { - if n == 0 { - return Some(x) + match self.state { + ChainState::Both | ChainState::Front => { + for x in self.a.by_ref() { + if n == 0 { + return Some(x) + } + n -= 1; + } + if let ChainState::Both = self.state { + self.state = ChainState::Back; } - n -= 1; } - self.flag = true; + ChainState::Back => {} + } + if let ChainState::Back = self.state { + self.b.nth(n) + } else { + None } - self.b.nth(n) } #[inline] fn last(self) -> Option { - let a_last = if self.flag { None } else { self.a.last() }; - let b_last = self.b.last(); - b_last.or(a_last) + match self.state { + ChainState::Both => self.b.last().or(self.a.last()), + ChainState::Front => self.a.last(), + ChainState::Back => self.b.last() + } } #[inline] @@ -1350,9 +1588,16 @@ impl DoubleEndedIterator for Chain where { #[inline] fn next_back(&mut self) -> Option { - match self.b.next_back() { - Some(x) => Some(x), - None => self.a.next_back() + match self.state { + ChainState::Both => match self.b.next_back() { + elt @ Some(..) => return elt, + None => { + self.state = ChainState::Front; + self.a.next_back() + } + }, + ChainState::Front => self.a.next_back(), + ChainState::Back => self.b.next_back(), } } } @@ -2654,146 +2899,79 @@ pub fn once(value: T) -> Once { /// /// If two sequences are equal up until the point where one ends, /// the shorter sequence compares less. +#[deprecated(since = "1.4.0", reason = "use the equivalent methods on `Iterator` instead")] #[unstable(feature = "iter_order", reason = "needs review and revision", issue = "27737")] pub mod order { use cmp; use cmp::{Eq, Ord, PartialOrd, PartialEq}; - use cmp::Ordering::{Equal, Less, Greater}; use option::Option; - use option::Option::{Some, None}; use super::Iterator; /// Compare `a` and `b` for equality using `Eq` - pub fn equals(mut a: L, mut b: R) -> bool where + pub fn equals(a: L, b: R) -> bool where A: Eq, L: Iterator, R: Iterator, { - loop { - match (a.next(), b.next()) { - (None, None) => return true, - (None, _) | (_, None) => return false, - (Some(x), Some(y)) => if x != y { return false }, - } - } + a.eq(b) } /// Order `a` and `b` lexicographically using `Ord` - pub fn cmp(mut a: L, mut b: R) -> cmp::Ordering where + pub fn cmp(a: L, b: R) -> cmp::Ordering where A: Ord, L: Iterator, R: Iterator, { - loop { - match (a.next(), b.next()) { - (None, None) => return Equal, - (None, _ ) => return Less, - (_ , None) => return Greater, - (Some(x), Some(y)) => match x.cmp(&y) { - Equal => (), - non_eq => return non_eq, - }, - } - } + a.cmp(b) } /// Order `a` and `b` lexicographically using `PartialOrd` - pub fn partial_cmp(mut a: L, mut b: R) -> Option where + pub fn partial_cmp(a: L, b: R) -> Option where L::Item: PartialOrd { - loop { - match (a.next(), b.next()) { - (None, None) => return Some(Equal), - (None, _ ) => return Some(Less), - (_ , None) => return Some(Greater), - (Some(x), Some(y)) => match x.partial_cmp(&y) { - Some(Equal) => (), - non_eq => return non_eq, - }, - } - } + a.partial_cmp(b) } /// Compare `a` and `b` for equality (Using partial equality, `PartialEq`) - pub fn eq(mut a: L, mut b: R) -> bool where + pub fn eq(a: L, b: R) -> bool where L::Item: PartialEq, { - loop { - match (a.next(), b.next()) { - (None, None) => return true, - (None, _) | (_, None) => return false, - (Some(x), Some(y)) => if !x.eq(&y) { return false }, - } - } + a.eq(b) } /// Compares `a` and `b` for nonequality (Using partial equality, `PartialEq`) - pub fn ne(mut a: L, mut b: R) -> bool where + pub fn ne(a: L, b: R) -> bool where L::Item: PartialEq, { - loop { - match (a.next(), b.next()) { - (None, None) => return false, - (None, _) | (_, None) => return true, - (Some(x), Some(y)) => if x.ne(&y) { return true }, - } - } + a.ne(b) } /// Returns `a` < `b` lexicographically (Using partial order, `PartialOrd`) - pub fn lt(mut a: L, mut b: R) -> bool where + pub fn lt(a: L, b: R) -> bool where L::Item: PartialOrd, { - loop { - match (a.next(), b.next()) { - (None, None) => return false, - (None, _ ) => return true, - (_ , None) => return false, - (Some(x), Some(y)) => if x.ne(&y) { return x.lt(&y) }, - } - } + a.lt(b) } /// Returns `a` <= `b` lexicographically (Using partial order, `PartialOrd`) - pub fn le(mut a: L, mut b: R) -> bool where + pub fn le(a: L, b: R) -> bool where L::Item: PartialOrd, { - loop { - match (a.next(), b.next()) { - (None, None) => return true, - (None, _ ) => return true, - (_ , None) => return false, - (Some(x), Some(y)) => if x.ne(&y) { return x.le(&y) }, - } - } + a.le(b) } /// Returns `a` > `b` lexicographically (Using partial order, `PartialOrd`) - pub fn gt(mut a: L, mut b: R) -> bool where + pub fn gt(a: L, b: R) -> bool where L::Item: PartialOrd, { - loop { - match (a.next(), b.next()) { - (None, None) => return false, - (None, _ ) => return false, - (_ , None) => return true, - (Some(x), Some(y)) => if x.ne(&y) { return x.gt(&y) }, - } - } + a.gt(b) } /// Returns `a` >= `b` lexicographically (Using partial order, `PartialOrd`) - pub fn ge(mut a: L, mut b: R) -> bool where + pub fn ge(a: L, b: R) -> bool where L::Item: PartialOrd, { - loop { - match (a.next(), b.next()) { - (None, None) => return true, - (None, _ ) => return false, - (_ , None) => return true, - (Some(x), Some(y)) => if x.ne(&y) { return x.ge(&y) }, - } - } + a.ge(b) } } diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs index 551ea233f7627..78a467e365799 100644 --- a/src/libcore/lib.rs +++ b/src/libcore/lib.rs @@ -139,6 +139,7 @@ pub mod cmp; pub mod clone; pub mod default; pub mod convert; +pub mod borrow; /* Core types and methods on primitives */ diff --git a/src/libcore/macros.rs b/src/libcore/macros.rs index 9f4d61a50d57a..21038f25be3cf 100644 --- a/src/libcore/macros.rs +++ b/src/libcore/macros.rs @@ -254,6 +254,51 @@ macro_rules! unreachable { /// A standardised placeholder for marking unfinished code. It panics with the /// message `"not yet implemented"` when executed. +/// +/// This can be useful if you are prototyping and are just looking to have your +/// code typecheck, or if you're implementing a trait that requires multiple +/// methods, and you're only planning on using one of them. +/// +/// # Examples +/// +/// Here's an example of some in-progress code. We have a trait `Foo`: +/// +/// ``` +/// trait Foo { +/// fn bar(&self); +/// fn baz(&self); +/// } +/// ``` +/// +/// We want to implement `Foo` on one of our types, but we also want to work on +/// just `bar()` first. In order for our code to compile, we need to implement +/// `baz()`, so we can use `unimplemented!`: +/// +/// ``` +/// # trait Foo { +/// # fn foo(&self); +/// # fn bar(&self); +/// # } +/// struct MyStruct; +/// +/// impl Foo for MyStruct { +/// fn foo(&self) { +/// // implementation goes here +/// } +/// +/// fn bar(&self) { +/// // let's not worry about implementing bar() for now +/// unimplemented!(); +/// } +/// } +/// +/// fn main() { +/// let s = MyStruct; +/// s.foo(); +/// +/// // we aren't even using bar() yet, so this is fine. +/// } +/// ``` #[macro_export] #[unstable(feature = "core", reason = "relationship with panic is unclear")] diff --git a/src/libcore/marker.rs b/src/libcore/marker.rs index aa5eda68e9822..35dde63e52b52 100644 --- a/src/libcore/marker.rs +++ b/src/libcore/marker.rs @@ -45,7 +45,7 @@ pub trait Sized { } /// Types that can be "unsized" to a dynamically sized type. -#[unstable(feature = "unsize", issue = "27779")] +#[unstable(feature = "unsize", issue = "27732")] #[lang="unsize"] pub trait Unsize { // Empty. diff --git a/src/libcore/num/flt2dec/bignum.rs b/src/libcore/num/flt2dec/bignum.rs index ee1f6ffdd0aef..ee2ffbffab654 100644 --- a/src/libcore/num/flt2dec/bignum.rs +++ b/src/libcore/num/flt2dec/bignum.rs @@ -448,12 +448,10 @@ macro_rules! define_bignum { impl ::cmp::Ord for $name { fn cmp(&self, other: &$name) -> ::cmp::Ordering { use cmp::max; - use iter::order; - let sz = max(self.size, other.size); let lhs = self.base[..sz].iter().cloned().rev(); let rhs = other.base[..sz].iter().cloned().rev(); - order::cmp(lhs, rhs) + lhs.cmp(rhs) } } diff --git a/src/libcore/ops.rs b/src/libcore/ops.rs index 7c386c6c33e43..3fb720ab6c83c 100644 --- a/src/libcore/ops.rs +++ b/src/libcore/ops.rs @@ -423,7 +423,7 @@ pub trait Rem { fn rem(self, rhs: RHS) -> Self::Output; } -macro_rules! rem_impl { +macro_rules! rem_impl_integer { ($($t:ty)*) => ($( /// This operation satisfies `n % d == n - (n / d) * d`. The /// result has the same sign as the left operand. @@ -439,9 +439,28 @@ macro_rules! rem_impl { )*) } -rem_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 } +rem_impl_integer! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 } + +#[cfg(not(stage0))] +macro_rules! rem_impl_float { + ($($t:ty)*) => ($( + #[stable(feature = "rust1", since = "1.0.0")] + impl Rem for $t { + type Output = $t; + + #[inline] + fn rem(self, other: $t) -> $t { self % other } + } + + forward_ref_binop! { impl Rem, rem for $t, $t } + )*) +} + +#[cfg(not(stage0))] +rem_impl_float! { f32 f64 } #[stable(feature = "rust1", since = "1.0.0")] +#[cfg(stage0)] impl Rem for f32 { type Output = f32; @@ -463,6 +482,7 @@ impl Rem for f32 { } #[stable(feature = "rust1", since = "1.0.0")] +#[cfg(stage0)] impl Rem for f64 { type Output = f64; @@ -473,7 +493,9 @@ impl Rem for f64 { } } +#[cfg(stage0)] forward_ref_binop! { impl Rem, rem for f64, f64 } +#[cfg(stage0)] forward_ref_binop! { impl Rem, rem for f32, f32 } /// The `Neg` trait is used to specify the functionality of unary `-`. diff --git a/src/libcore/option.rs b/src/libcore/option.rs index e64048c82d839..a36a120689cc6 100644 --- a/src/libcore/option.rs +++ b/src/libcore/option.rs @@ -779,6 +779,26 @@ impl IntoIterator for Option { } } +#[stable(since = "1.4.0", feature = "option_iter")] +impl<'a, T> IntoIterator for &'a Option { + type Item = &'a T; + type IntoIter = Iter<'a, T>; + + fn into_iter(self) -> Iter<'a, T> { + self.iter() + } +} + +#[stable(since = "1.4.0", feature = "option_iter")] +impl<'a, T> IntoIterator for &'a mut Option { + type Item = &'a mut T; + type IntoIter = IterMut<'a, T>; + + fn into_iter(mut self) -> IterMut<'a, T> { + self.iter_mut() + } +} + ///////////////////////////////////////////////////////////////////////////// // The Option Iterators ///////////////////////////////////////////////////////////////////////////// diff --git a/src/libcore/result.rs b/src/libcore/result.rs index 8300faa5a16fe..2546d9cd63d83 100644 --- a/src/libcore/result.rs +++ b/src/libcore/result.rs @@ -815,6 +815,26 @@ impl IntoIterator for Result { } } +#[stable(since = "1.4.0", feature = "result_iter")] +impl<'a, T, E> IntoIterator for &'a Result { + type Item = &'a T; + type IntoIter = Iter<'a, T>; + + fn into_iter(self) -> Iter<'a, T> { + self.iter() + } +} + +#[stable(since = "1.4.0", feature = "result_iter")] +impl<'a, T, E> IntoIterator for &'a mut Result { + type Item = &'a mut T; + type IntoIter = IterMut<'a, T>; + + fn into_iter(mut self) -> IterMut<'a, T> { + self.iter_mut() + } +} + ///////////////////////////////////////////////////////////////////////////// // The Result Iterators ///////////////////////////////////////////////////////////////////////////// diff --git a/src/libcore/slice.rs b/src/libcore/slice.rs index e63eb9f4cf835..fdd5e61c8f27b 100644 --- a/src/libcore/slice.rs +++ b/src/libcore/slice.rs @@ -1557,7 +1557,7 @@ impl Eq for [T] {} #[stable(feature = "rust1", since = "1.0.0")] impl Ord for [T] { fn cmp(&self, other: &[T]) -> Ordering { - order::cmp(self.iter(), other.iter()) + self.iter().cmp(other.iter()) } } @@ -1565,22 +1565,22 @@ impl Ord for [T] { impl PartialOrd for [T] { #[inline] fn partial_cmp(&self, other: &[T]) -> Option { - order::partial_cmp(self.iter(), other.iter()) + self.iter().partial_cmp(other.iter()) } #[inline] fn lt(&self, other: &[T]) -> bool { - order::lt(self.iter(), other.iter()) + self.iter().lt(other.iter()) } #[inline] fn le(&self, other: &[T]) -> bool { - order::le(self.iter(), other.iter()) + self.iter().le(other.iter()) } #[inline] fn ge(&self, other: &[T]) -> bool { - order::ge(self.iter(), other.iter()) + self.iter().ge(other.iter()) } #[inline] fn gt(&self, other: &[T]) -> bool { - order::gt(self.iter(), other.iter()) + self.iter().gt(other.iter()) } } diff --git a/src/libcoretest/char.rs b/src/libcoretest/char.rs index f03f8e0ee782e..d23442379bcd5 100644 --- a/src/libcoretest/char.rs +++ b/src/libcoretest/char.rs @@ -207,3 +207,12 @@ fn test_len_utf16() { assert!('\u{a66e}'.len_utf16() == 1); assert!('\u{1f4a9}'.len_utf16() == 2); } + +#[test] +fn test_decode_utf16() { + fn check(s: &[u16], expected: &[Result]) { + assert_eq!(::std::char::decode_utf16(s.iter().cloned()).collect::>(), expected); + } + check(&[0xD800, 0x41, 0x42], &[Err(0xD800), Ok('A'), Ok('B')]); + check(&[0xD800, 0], &[Err(0xD800), Ok('\0')]); +} diff --git a/src/libcoretest/iter.rs b/src/libcoretest/iter.rs index ea65c118e5e98..9def44191db05 100644 --- a/src/libcoretest/iter.rs +++ b/src/libcoretest/iter.rs @@ -9,7 +9,6 @@ // except according to those terms. use core::iter::*; -use core::iter::order::*; use core::{i8, i16, isize}; use core::usize; @@ -21,51 +20,51 @@ fn test_lt() { let xs = [1,2,3]; let ys = [1,2,0]; - assert!(!lt(xs.iter(), ys.iter())); - assert!(!le(xs.iter(), ys.iter())); - assert!( gt(xs.iter(), ys.iter())); - assert!( ge(xs.iter(), ys.iter())); + assert!(!xs.iter().lt(ys.iter())); + assert!(!xs.iter().le(ys.iter())); + assert!( xs.iter().gt(ys.iter())); + assert!( xs.iter().ge(ys.iter())); - assert!( lt(ys.iter(), xs.iter())); - assert!( le(ys.iter(), xs.iter())); - assert!(!gt(ys.iter(), xs.iter())); - assert!(!ge(ys.iter(), xs.iter())); + assert!( ys.iter().lt(xs.iter())); + assert!( ys.iter().le(xs.iter())); + assert!(!ys.iter().gt(xs.iter())); + assert!(!ys.iter().ge(xs.iter())); - assert!( lt(empty.iter(), xs.iter())); - assert!( le(empty.iter(), xs.iter())); - assert!(!gt(empty.iter(), xs.iter())); - assert!(!ge(empty.iter(), xs.iter())); + assert!( empty.iter().lt(xs.iter())); + assert!( empty.iter().le(xs.iter())); + assert!(!empty.iter().gt(xs.iter())); + assert!(!empty.iter().ge(xs.iter())); // Sequence with NaN let u = [1.0f64, 2.0]; let v = [0.0f64/0.0, 3.0]; - assert!(!lt(u.iter(), v.iter())); - assert!(!le(u.iter(), v.iter())); - assert!(!gt(u.iter(), v.iter())); - assert!(!ge(u.iter(), v.iter())); + assert!(!u.iter().lt(v.iter())); + assert!(!u.iter().le(v.iter())); + assert!(!u.iter().gt(v.iter())); + assert!(!u.iter().ge(v.iter())); let a = [0.0f64/0.0]; let b = [1.0f64]; let c = [2.0f64]; - assert!(lt(a.iter(), b.iter()) == (a[0] < b[0])); - assert!(le(a.iter(), b.iter()) == (a[0] <= b[0])); - assert!(gt(a.iter(), b.iter()) == (a[0] > b[0])); - assert!(ge(a.iter(), b.iter()) == (a[0] >= b[0])); + assert!(a.iter().lt(b.iter()) == (a[0] < b[0])); + assert!(a.iter().le(b.iter()) == (a[0] <= b[0])); + assert!(a.iter().gt(b.iter()) == (a[0] > b[0])); + assert!(a.iter().ge(b.iter()) == (a[0] >= b[0])); - assert!(lt(c.iter(), b.iter()) == (c[0] < b[0])); - assert!(le(c.iter(), b.iter()) == (c[0] <= b[0])); - assert!(gt(c.iter(), b.iter()) == (c[0] > b[0])); - assert!(ge(c.iter(), b.iter()) == (c[0] >= b[0])); + assert!(c.iter().lt(b.iter()) == (c[0] < b[0])); + assert!(c.iter().le(b.iter()) == (c[0] <= b[0])); + assert!(c.iter().gt(b.iter()) == (c[0] > b[0])); + assert!(c.iter().ge(b.iter()) == (c[0] >= b[0])); } #[test] fn test_multi_iter() { let xs = [1,2,3,4]; let ys = [4,3,2,1]; - assert!(eq(xs.iter(), ys.iter().rev())); - assert!(lt(xs.iter(), xs.iter().skip(2))); + assert!(xs.iter().eq(ys.iter().rev())); + assert!(xs.iter().lt(xs.iter().skip(2))); } #[test] @@ -729,6 +728,26 @@ fn test_double_ended_chain() { assert_eq!(it.next_back().unwrap(), &5); assert_eq!(it.next_back().unwrap(), &7); assert_eq!(it.next_back(), None); + + + // test that .chain() is well behaved with an unfused iterator + struct CrazyIterator(bool); + impl CrazyIterator { fn new() -> CrazyIterator { CrazyIterator(false) } } + impl Iterator for CrazyIterator { + type Item = i32; + fn next(&mut self) -> Option { + if self.0 { Some(99) } else { self.0 = true; None } + } + } + + impl DoubleEndedIterator for CrazyIterator { + fn next_back(&mut self) -> Option { + self.next() + } + } + + assert_eq!(CrazyIterator::new().chain(0..10).rev().last(), Some(0)); + assert!((0..10).chain(CrazyIterator::new()).rev().any(|i| i == 0)); } #[test] diff --git a/src/libcoretest/lib.rs b/src/libcoretest/lib.rs index 6313e42e0edc5..dda1b096e88ed 100644 --- a/src/libcoretest/lib.rs +++ b/src/libcoretest/lib.rs @@ -19,6 +19,7 @@ #![feature(float_from_str_radix)] #![feature(flt2dec)] #![feature(dec2flt)] +#![feature(decode_utf16)] #![feature(fmt_radix)] #![feature(iter_arith)] #![feature(iter_arith)] diff --git a/src/libcoretest/option.rs b/src/libcoretest/option.rs index 04271ed5dd1a1..3e564cf197061 100644 --- a/src/libcoretest/option.rs +++ b/src/libcoretest/option.rs @@ -180,11 +180,14 @@ fn test_iter() { assert_eq!(it.next(), Some(&val)); assert_eq!(it.size_hint(), (0, Some(0))); assert!(it.next().is_none()); + + let mut it = (&x).into_iter(); + assert_eq!(it.next(), Some(&val)); } #[test] fn test_mut_iter() { - let val = 5; + let mut val = 5; let new_val = 11; let mut x = Some(val); @@ -205,6 +208,10 @@ fn test_mut_iter() { assert!(it.next().is_none()); } assert_eq!(x, Some(new_val)); + + let mut y = Some(val); + let mut it = (&mut y).into_iter(); + assert_eq!(it.next(), Some(&mut val)); } #[test] diff --git a/src/libcoretest/result.rs b/src/libcoretest/result.rs index 02ea6b10e6e03..6e9f653dcd8ac 100644 --- a/src/libcoretest/result.rs +++ b/src/libcoretest/result.rs @@ -150,3 +150,36 @@ pub fn test_expect_err() { let err: Result = Err("All good"); err.expect("Got expected error"); } + +#[test] +pub fn test_iter() { + let ok: Result = Ok(100); + let mut it = ok.iter(); + assert_eq!(it.size_hint(), (1, Some(1))); + assert_eq!(it.next(), Some(&100)); + assert_eq!(it.size_hint(), (0, Some(0))); + assert!(it.next().is_none()); + assert_eq!((&ok).into_iter().next(), Some(&100)); + + let err: Result = Err("error"); + assert_eq!(err.iter().next(), None); +} + +#[test] +pub fn test_iter_mut() { + let mut ok: Result = Ok(100); + for loc in ok.iter_mut() { + *loc = 200; + } + assert_eq!(ok, Ok(200)); + for loc in &mut ok { + *loc = 300; + } + assert_eq!(ok, Ok(300)); + + let mut err: Result = Err("error"); + for loc in err.iter_mut() { + *loc = 200; + } + assert_eq!(err, Err("error")); +} diff --git a/src/librustc/diagnostics.rs b/src/librustc/diagnostics.rs index baa9750d311aa..5f907c6cbeee8 100644 --- a/src/librustc/diagnostics.rs +++ b/src/librustc/diagnostics.rs @@ -731,9 +731,14 @@ type X = u32; // ok! "##, E0133: r##" -Using unsafe functionality, such as dereferencing raw pointers and calling -functions via FFI or marked as unsafe, is potentially dangerous and disallowed -by safety checks. These safety checks can be relaxed for a section of the code +Using unsafe functionality, is potentially dangerous and disallowed +by safety checks. Examples: + +- Dereferencing raw pointers +- Calling functions via FFI +- Calling functions marked unsafe + +These safety checks can be relaxed for a section of the code by wrapping the unsafe instructions with an `unsafe` block. For instance: ``` @@ -831,9 +836,7 @@ is a size mismatch in one of the impls. It is also possible to manually transmute: ``` -let result: SomeType = mem::uninitialized(); -unsafe { copy_nonoverlapping(&v, &result) }; -result // `v` transmuted to type `SomeType` +ptr::read(&v as *const _ as *const SomeType) // `v` transmuted to `SomeType` ``` "##, diff --git a/src/librustc/middle/check_const.rs b/src/librustc/middle/check_const.rs index 1ed43a570410c..9153fd6484e2e 100644 --- a/src/librustc/middle/check_const.rs +++ b/src/librustc/middle/check_const.rs @@ -548,7 +548,7 @@ fn check_expr<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>, e: &ast::Expr, node_ty: Ty<'tcx>) { match node_ty.sty { ty::TyStruct(def, _) | - ty::TyEnum(def, _) if def.has_dtor(v.tcx) => { + ty::TyEnum(def, _) if def.has_dtor() => { v.add_qualif(ConstQualif::NEEDS_DROP); if v.mode != Mode::Var { v.tcx.sess.span_err(e.span, diff --git a/src/librustc/middle/entry.rs b/src/librustc/middle/entry.rs index c6e5b654f9a5c..8cdd4f7fe74c5 100644 --- a/src/librustc/middle/entry.rs +++ b/src/librustc/middle/entry.rs @@ -11,20 +11,19 @@ use ast_map; use session::{config, Session}; -use syntax::ast::{Name, NodeId, Item, ItemFn}; +use syntax; +use syntax::ast::{NodeId, Item}; use syntax::attr; use syntax::codemap::Span; -use syntax::parse::token; +use syntax::entry::EntryPointType; use syntax::visit; use syntax::visit::Visitor; -struct EntryContext<'a, 'ast: 'a> { +struct EntryContext<'a> { session: &'a Session, - ast_map: &'a ast_map::Map<'ast>, - - // The interned Name for "main". - main_name: Name, + // The current depth in the ast + depth: usize, // The top-level function called 'main' main_fn: Option<(NodeId, Span)>, @@ -40,9 +39,11 @@ struct EntryContext<'a, 'ast: 'a> { non_main_fns: Vec<(NodeId, Span)> , } -impl<'a, 'ast, 'v> Visitor<'v> for EntryContext<'a, 'ast> { +impl<'a, 'v> Visitor<'v> for EntryContext<'a> { fn visit_item(&mut self, item: &Item) { + self.depth += 1; find_item(item, self); + self.depth -= 1; } } @@ -63,8 +64,7 @@ pub fn find_entry_point(session: &Session, ast_map: &ast_map::Map) { let mut ctxt = EntryContext { session: session, - main_name: token::intern("main"), - ast_map: ast_map, + depth: 0, main_fn: None, attr_main_fn: None, start_fn: None, @@ -77,44 +77,35 @@ pub fn find_entry_point(session: &Session, ast_map: &ast_map::Map) { } fn find_item(item: &Item, ctxt: &mut EntryContext) { - match item.node { - ItemFn(..) => { - if item.ident.name == ctxt.main_name { - ctxt.ast_map.with_path(item.id, |path| { - if path.count() == 1 { - // This is a top-level function so can be 'main' - if ctxt.main_fn.is_none() { - ctxt.main_fn = Some((item.id, item.span)); - } else { - span_err!(ctxt.session, item.span, E0136, - "multiple 'main' functions"); - } - } else { - // This isn't main - ctxt.non_main_fns.push((item.id, item.span)); - } - }); + match syntax::entry::entry_point_type(item, ctxt.depth) { + EntryPointType::MainNamed => { + if ctxt.main_fn.is_none() { + ctxt.main_fn = Some((item.id, item.span)); + } else { + span_err!(ctxt.session, item.span, E0136, + "multiple 'main' functions"); } - - if attr::contains_name(&item.attrs, "main") { - if ctxt.attr_main_fn.is_none() { - ctxt.attr_main_fn = Some((item.id, item.span)); - } else { - span_err!(ctxt.session, item.span, E0137, - "multiple functions with a #[main] attribute"); - } + }, + EntryPointType::OtherMain => { + ctxt.non_main_fns.push((item.id, item.span)); + }, + EntryPointType::MainAttr => { + if ctxt.attr_main_fn.is_none() { + ctxt.attr_main_fn = Some((item.id, item.span)); + } else { + span_err!(ctxt.session, item.span, E0137, + "multiple functions with a #[main] attribute"); } - - if attr::contains_name(&item.attrs, "start") { - if ctxt.start_fn.is_none() { - ctxt.start_fn = Some((item.id, item.span)); - } else { - span_err!(ctxt.session, item.span, E0138, - "multiple 'start' functions"); - } + }, + EntryPointType::Start => { + if ctxt.start_fn.is_none() { + ctxt.start_fn = Some((item.id, item.span)); + } else { + span_err!(ctxt.session, item.span, E0138, + "multiple 'start' functions"); } - } - _ => () + }, + EntryPointType::None => () } visit::walk_item(ctxt, item); diff --git a/src/librustc/middle/expr_use_visitor.rs b/src/librustc/middle/expr_use_visitor.rs index ce03e10418d2e..d8f3ff3d9cb26 100644 --- a/src/librustc/middle/expr_use_visitor.rs +++ b/src/librustc/middle/expr_use_visitor.rs @@ -239,8 +239,9 @@ impl OverloadedCallType { // mem_categorization, it requires a TYPER, which is a type that // supplies types from the tree. After type checking is complete, you // can just use the tcx as the typer. - -pub struct ExprUseVisitor<'d, 't, 'a: 't, 'tcx:'a+'d> { +// +// FIXME(stage0): the :'t here is probably only important for stage0 +pub struct ExprUseVisitor<'d, 't, 'a: 't, 'tcx:'a+'d+'t> { typer: &'t infer::InferCtxt<'a, 'tcx>, mc: mc::MemCategorizationContext<'t, 'a, 'tcx>, delegate: &'d mut (Delegate<'tcx>+'d), diff --git a/src/librustc/middle/infer/higher_ranked/mod.rs b/src/librustc/middle/infer/higher_ranked/mod.rs index fb8da9b65daee..0c539a5d0e0c2 100644 --- a/src/librustc/middle/infer/higher_ranked/mod.rs +++ b/src/librustc/middle/infer/higher_ranked/mod.rs @@ -14,7 +14,6 @@ use super::{CombinedSnapshot, InferCtxt, HigherRankedType, SkolemizationMap}; use super::combine::CombineFields; -use middle::subst; use middle::ty::{self, TypeError, Binder}; use middle::ty_fold::{self, TypeFoldable}; use middle::ty_relate::{Relate, RelateResult, TypeRelation}; @@ -455,63 +454,6 @@ impl<'a,'tcx> InferCtxtExt for InferCtxt<'a,'tcx> { } } -/// Constructs and returns a substitution that, for a given type -/// scheme parameterized by `generics`, will replace every generic -/// parameter in the type with a skolemized type/region (which one can -/// think of as a "fresh constant", except at the type/region level of -/// reasoning). -/// -/// Since we currently represent bound/free type parameters in the -/// same way, this only has an effect on regions. -/// -/// (Note that unlike a substitution from `ty::construct_free_substs`, -/// this inserts skolemized regions rather than free regions; this -/// allows one to use `fn leak_check` to catch attmepts to unify the -/// skolemized regions with e.g. the `'static` lifetime) -pub fn construct_skolemized_substs<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>, - generics: &ty::Generics<'tcx>, - snapshot: &CombinedSnapshot) - -> (subst::Substs<'tcx>, SkolemizationMap) -{ - let mut map = FnvHashMap(); - - // map T => T - let mut types = subst::VecPerParamSpace::empty(); - push_types_from_defs(infcx.tcx, &mut types, generics.types.as_slice()); - - // map early- or late-bound 'a => fresh 'a - let mut regions = subst::VecPerParamSpace::empty(); - push_region_params(infcx, &mut map, &mut regions, generics.regions.as_slice(), snapshot); - - let substs = subst::Substs { types: types, - regions: subst::NonerasedRegions(regions) }; - return (substs, map); - - fn push_region_params<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>, - map: &mut SkolemizationMap, - regions: &mut subst::VecPerParamSpace, - region_params: &[ty::RegionParameterDef], - snapshot: &CombinedSnapshot) - { - for r in region_params { - let br = r.to_bound_region(); - let skol_var = infcx.region_vars.new_skolemized(br, &snapshot.region_vars_snapshot); - let sanity_check = map.insert(br, skol_var); - assert!(sanity_check.is_none()); - regions.push(r.space, skol_var); - } - } - - fn push_types_from_defs<'tcx>(tcx: &ty::ctxt<'tcx>, - types: &mut subst::VecPerParamSpace>, - defs: &[ty::TypeParameterDef<'tcx>]) { - for def in defs { - let ty = tcx.mk_param_from_def(def); - types.push(def.space, ty); - } - } -} - pub fn skolemize_late_bound_regions<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>, binder: &ty::Binder, snapshot: &CombinedSnapshot) diff --git a/src/librustc/middle/infer/mod.rs b/src/librustc/middle/infer/mod.rs index 4e8ed01c6b9e0..158ef745de33f 100644 --- a/src/librustc/middle/infer/mod.rs +++ b/src/librustc/middle/infer/mod.rs @@ -948,15 +948,6 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { }) } - pub fn construct_skolemized_subst(&self, - generics: &ty::Generics<'tcx>, - snapshot: &CombinedSnapshot) - -> (subst::Substs<'tcx>, SkolemizationMap) { - /*! See `higher_ranked::construct_skolemized_subst` */ - - higher_ranked::construct_skolemized_substs(self, generics, snapshot) - } - pub fn skolemize_late_bound_regions(&self, value: &ty::Binder, snapshot: &CombinedSnapshot) diff --git a/src/librustc/middle/infer/region_inference/mod.rs b/src/librustc/middle/infer/region_inference/mod.rs index 1785fe09f87a4..d81f8e0ae9093 100644 --- a/src/librustc/middle/infer/region_inference/mod.rs +++ b/src/librustc/middle/infer/region_inference/mod.rs @@ -373,7 +373,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> { let sc = self.skolemization_count.get(); self.skolemization_count.set(sc + 1); - ReSkolemized(sc, br) + ReSkolemized(ty::SkolemizedRegionVid { index: sc }, br) } pub fn new_bound(&self, debruijn: ty::DebruijnIndex) -> Region { diff --git a/src/librustc/middle/reachable.rs b/src/librustc/middle/reachable.rs index 206e1f2ba641b..4eef000169668 100644 --- a/src/librustc/middle/reachable.rs +++ b/src/librustc/middle/reachable.rs @@ -355,9 +355,11 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { // this properly would result in the necessity of computing *type* // reachability, which might result in a compile time loss. fn mark_destructors_reachable(&mut self) { - for (_, destructor_def_id) in self.tcx.destructor_for_type.borrow().iter() { - if destructor_def_id.is_local() { - self.reachable_symbols.insert(destructor_def_id.node); + for adt in self.tcx.adt_defs() { + if let Some(destructor_def_id) = adt.destructor() { + if destructor_def_id.is_local() { + self.reachable_symbols.insert(destructor_def_id.node); + } } } } diff --git a/src/librustc/middle/ty.rs b/src/librustc/middle/ty.rs index eff560653c140..e9be9010d4a8c 100644 --- a/src/librustc/middle/ty.rs +++ b/src/librustc/middle/ty.rs @@ -112,7 +112,7 @@ pub struct CrateAnalysis { #[derive(Copy, Clone)] pub enum DtorKind { NoDtor, - TraitDtor(DefId, bool) + TraitDtor(bool) } impl DtorKind { @@ -126,7 +126,7 @@ impl DtorKind { pub fn has_drop_flag(&self) -> bool { match self { &NoDtor => false, - &TraitDtor(_, flag) => flag + &TraitDtor(flag) => flag } } } @@ -797,12 +797,6 @@ pub struct ctxt<'tcx> { /// True if the variance has been computed yet; false otherwise. pub variance_computed: Cell, - /// A mapping from the def ID of an enum or struct type to the def ID - /// of the method that implements its destructor. If the type is not - /// present in this map, it does not have a destructor. This map is - /// populated during the coherence phase of typechecking. - pub destructor_for_type: RefCell>, - /// A method will be in this list if and only if it is a destructor. pub destructors: RefCell, @@ -1502,7 +1496,62 @@ pub struct DebruijnIndex { pub depth: u32, } -/// Representation of regions: +/// Representation of regions. +/// +/// Unlike types, most region variants are "fictitious", not concrete, +/// regions. Among these, `ReStatic`, `ReEmpty` and `ReScope` are the only +/// ones representing concrete regions. +/// +/// ## Bound Regions +/// +/// These are regions that are stored behind a binder and must be substituted +/// with some concrete region before being used. There are 2 kind of +/// bound regions: early-bound, which are bound in a TypeScheme/TraitDef, +/// and are substituted by a Substs, and late-bound, which are part of +/// higher-ranked types (e.g. `for<'a> fn(&'a ())`) and are substituted by +/// the likes of `liberate_late_bound_regions`. The distinction exists +/// because higher-ranked lifetimes aren't supported in all places. See [1][2]. +/// +/// Unlike TyParam-s, bound regions are not supposed to exist "in the wild" +/// outside their binder, e.g. in types passed to type inference, and +/// should first be substituted (by skolemized regions, free regions, +/// or region variables). +/// +/// ## Skolemized and Free Regions +/// +/// One often wants to work with bound regions without knowing their precise +/// identity. For example, when checking a function, the lifetime of a borrow +/// can end up being assigned to some region parameter. In these cases, +/// it must be ensured that bounds on the region can't be accidentally +/// assumed without being checked. +/// +/// The process of doing that is called "skolemization". The bound regions +/// are replaced by skolemized markers, which don't satisfy any relation +/// not explicity provided. +/// +/// There are 2 kinds of skolemized regions in rustc: `ReFree` and +/// `ReSkolemized`. When checking an item's body, `ReFree` is supposed +/// to be used. These also support explicit bounds: both the internally-stored +/// *scope*, which the region is assumed to outlive, as well as other +/// relations stored in the `FreeRegionMap`. Note that these relations +/// aren't checked when you `make_subregion` (or `mk_eqty`), only by +/// `resolve_regions_and_report_errors`. +/// +/// When working with higher-ranked types, some region relations aren't +/// yet known, so you can't just call `resolve_regions_and_report_errors`. +/// `ReSkolemized` is designed for this purpose. In these contexts, +/// there's also the risk that some inference variable laying around will +/// get unified with your skolemized region: if you want to check whether +/// `for<'a> Foo<'_>: 'a`, and you substitute your bound region `'a` +/// with a skolemized region `'%a`, the variable `'_` would just be +/// instantiated to the skolemized region `'%a`, which is wrong because +/// the inference variable is supposed to satisfy the relation +/// *for every value of the skolemized region*. To ensure that doesn't +/// happen, you can use `leak_check`. This is more clearly explained +/// by infer/higher_ranked/README.md. +/// +/// [1] http://smallcultfollowing.com/babysteps/blog/2013/10/29/intermingled-parameter-lists/ +/// [2] http://smallcultfollowing.com/babysteps/blog/2013/11/04/intermingled-parameter-lists/ #[derive(Clone, PartialEq, Eq, Hash, Copy)] pub enum Region { // Region bound in a type or fn declaration which will be @@ -1532,7 +1581,7 @@ pub enum Region { /// A skolemized region - basically the higher-ranked version of ReFree. /// Should not exist after typeck. - ReSkolemized(u32, BoundRegion), + ReSkolemized(SkolemizedRegionVid, BoundRegion), /// Empty lifetime is for data that is never accessed. /// Bottom in the region lattice. We treat ReEmpty somewhat @@ -2168,6 +2217,11 @@ pub struct RegionVid { pub index: u32 } +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +pub struct SkolemizedRegionVid { + pub index: u32 +} + #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub enum InferTy { TyVar(TyVid), @@ -2997,7 +3051,7 @@ impl<'a, 'tcx> ParameterEnvironment<'a, 'tcx> { _ => return Err(TypeIsStructural), }; - if adt.has_dtor(tcx) { + if adt.has_dtor() { return Err(TypeHasDestructor) } @@ -3202,6 +3256,7 @@ bitflags! { const IS_PHANTOM_DATA = 1 << 3, const IS_SIMD = 1 << 4, const IS_FUNDAMENTAL = 1 << 5, + const IS_NO_DROP_FLAG = 1 << 6, } } @@ -3252,6 +3307,7 @@ pub struct FieldDefData<'tcx, 'container: 'tcx> { pub struct AdtDefData<'tcx, 'container: 'tcx> { pub did: DefId, pub variants: Vec>, + destructor: Cell>, flags: Cell, } @@ -3287,6 +3343,9 @@ impl<'tcx, 'container> AdtDefData<'tcx, 'container> { if attr::contains_name(&attrs, "fundamental") { flags = flags | AdtFlags::IS_FUNDAMENTAL; } + if attr::contains_name(&attrs, "unsafe_no_drop_flag") { + flags = flags | AdtFlags::IS_NO_DROP_FLAG; + } if tcx.lookup_simd(did) { flags = flags | AdtFlags::IS_SIMD; } @@ -3300,6 +3359,7 @@ impl<'tcx, 'container> AdtDefData<'tcx, 'container> { did: did, variants: variants, flags: Cell::new(flags), + destructor: Cell::new(None) } } @@ -3350,8 +3410,11 @@ impl<'tcx, 'container> AdtDefData<'tcx, 'container> { } /// Returns whether this type has a destructor. - pub fn has_dtor(&self, tcx: &ctxt<'tcx>) -> bool { - tcx.destructor_for_type.borrow().contains_key(&self.did) + pub fn has_dtor(&self) -> bool { + match self.dtor_kind() { + NoDtor => false, + TraitDtor(..) => true + } } /// Asserts this is a struct and returns the struct's unique @@ -3413,6 +3476,24 @@ impl<'tcx, 'container> AdtDefData<'tcx, 'container> { _ => panic!("unexpected def {:?} in variant_of_def", def) } } + + pub fn destructor(&self) -> Option { + self.destructor.get() + } + + pub fn set_destructor(&self, dtor: DefId) { + assert!(self.destructor.get().is_none()); + self.destructor.set(Some(dtor)); + } + + pub fn dtor_kind(&self) -> DtorKind { + match self.destructor.get() { + Some(_) => { + TraitDtor(!self.flags.get().intersects(AdtFlags::IS_NO_DROP_FLAG)) + } + None => NoDtor, + } + } } impl<'tcx, 'container> VariantDefData<'tcx, 'container> { @@ -3796,7 +3877,6 @@ impl<'tcx> ctxt<'tcx> { normalized_cache: RefCell::new(FnvHashMap()), lang_items: lang_items, provided_method_sources: RefCell::new(DefIdMap()), - destructor_for_type: RefCell::new(DefIdMap()), destructors: RefCell::new(DefIdSet()), inherent_impls: RefCell::new(DefIdMap()), impl_items: RefCell::new(DefIdMap()), @@ -4619,7 +4699,7 @@ impl<'tcx> TyS<'tcx> { }) }); - if def.has_dtor(cx) { + if def.has_dtor() { res = res | TC::OwnsDtor; } @@ -5957,18 +6037,6 @@ impl<'tcx> ctxt<'tcx> { self.with_path(id, |path| ast_map::path_to_string(path)) } - /* If struct_id names a struct with a dtor. */ - pub fn ty_dtor(&self, struct_id: DefId) -> DtorKind { - match self.destructor_for_type.borrow().get(&struct_id) { - Some(&method_def_id) => { - let flag = !self.has_attr(struct_id, "unsafe_no_drop_flag"); - - TraitDtor(method_def_id, flag) - } - None => NoDtor, - } - } - pub fn with_path(&self, id: DefId, f: F) -> T where F: FnOnce(ast_map::PathElems) -> T, { @@ -6053,6 +6121,11 @@ impl<'tcx> ctxt<'tcx> { self.lookup_adt_def_master(did) } + /// Return the list of all interned ADT definitions + pub fn adt_defs(&self) -> Vec> { + self.adt_defs.borrow().values().cloned().collect() + } + /// Given the did of an item, returns its full set of predicates. pub fn lookup_predicates(&self, did: DefId) -> GenericPredicates<'tcx> { lookup_locally_or_in_crate_store( @@ -6700,8 +6773,8 @@ impl<'tcx> ctxt<'tcx> { /// Returns true if this ADT is a dtorck type, i.e. whether it being /// safe for destruction requires it to be alive fn is_adt_dtorck(&self, adt: AdtDef<'tcx>) -> bool { - let dtor_method = match self.destructor_for_type.borrow().get(&adt.did) { - Some(dtor) => *dtor, + let dtor_method = match adt.destructor() { + Some(dtor) => dtor, None => return false }; let impl_did = self.impl_of_method(dtor_method).unwrap_or_else(|| { diff --git a/src/librustc/util/ppaux.rs b/src/librustc/util/ppaux.rs index 48c2e1e6dca7a..ac51f46a7e943 100644 --- a/src/librustc/util/ppaux.rs +++ b/src/librustc/util/ppaux.rs @@ -418,7 +418,7 @@ impl fmt::Debug for ty::Region { } ty::ReSkolemized(id, ref bound_region) => { - write!(f, "ReSkolemized({}, {:?})", id, bound_region) + write!(f, "ReSkolemized({}, {:?})", id.index, bound_region) } ty::ReEmpty => write!(f, "ReEmpty") diff --git a/src/librustc_borrowck/borrowck/check_loans.rs b/src/librustc_borrowck/borrowck/check_loans.rs index 3e3ce5c7be002..7f9128228a576 100644 --- a/src/librustc_borrowck/borrowck/check_loans.rs +++ b/src/librustc_borrowck/borrowck/check_loans.rs @@ -747,7 +747,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { } LpExtend(ref lp_base, _, LpInterior(InteriorField(_))) => { match lp_base.to_type().sty { - ty::TyStruct(def, _) | ty::TyEnum(def, _) if def.has_dtor(self.tcx()) => { + ty::TyStruct(def, _) | ty::TyEnum(def, _) if def.has_dtor() => { // In the case where the owner implements drop, then // the path must be initialized to prevent a case of // partial reinitialization diff --git a/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs b/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs index 308ae42c16f07..2d08183ba6eca 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs @@ -180,7 +180,7 @@ fn check_and_get_illegal_move_origin<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, mc::cat_interior(ref b, mc::InteriorElement(Kind::Pattern, _)) => { match b.ty.sty { ty::TyStruct(def, _) | ty::TyEnum(def, _) => { - if def.has_dtor(bccx.tcx) { + if def.has_dtor() { Some(cmt.clone()) } else { check_and_get_illegal_move_origin(bccx, b) diff --git a/src/librustc_borrowck/borrowck/gather_loans/move_error.rs b/src/librustc_borrowck/borrowck/gather_loans/move_error.rs index 1246449327d96..465fffe33851f 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/move_error.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/move_error.rs @@ -137,7 +137,7 @@ fn report_cannot_move_out_of<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, mc::cat_interior(ref b, mc::InteriorField(_)) => { match b.ty.sty { ty::TyStruct(def, _) | - ty::TyEnum(def, _) if def.has_dtor(bccx.tcx) => { + ty::TyEnum(def, _) if def.has_dtor() => { bccx.span_err( move_from.span, &format!("cannot move out of type `{}`, \ diff --git a/src/librustc_lint/builtin.rs b/src/librustc_lint/builtin.rs index 7af2b3c1bfcc9..e582b9266cd94 100644 --- a/src/librustc_lint/builtin.rs +++ b/src/librustc_lint/builtin.rs @@ -1952,26 +1952,26 @@ impl LintPass for MissingCopyImplementations { if !cx.exported_items.contains(&item.id) { return; } - if cx.tcx.destructor_for_type.borrow().contains_key(&DefId::local(item.id)) { - return; - } - let ty = match item.node { + let (def, ty) = match item.node { ast::ItemStruct(_, ref ast_generics) => { if ast_generics.is_parameterized() { return; } - cx.tcx.mk_struct(cx.tcx.lookup_adt_def(DefId::local(item.id)), - cx.tcx.mk_substs(Substs::empty())) + let def = cx.tcx.lookup_adt_def(DefId::local(item.id)); + (def, cx.tcx.mk_struct(def, + cx.tcx.mk_substs(Substs::empty()))) } ast::ItemEnum(_, ref ast_generics) => { if ast_generics.is_parameterized() { return; } - cx.tcx.mk_enum(cx.tcx.lookup_adt_def(DefId::local(item.id)), - cx.tcx.mk_substs(Substs::empty())) + let def = cx.tcx.lookup_adt_def(DefId::local(item.id)); + (def, cx.tcx.mk_enum(def, + cx.tcx.mk_substs(Substs::empty()))) } _ => return, }; + if def.has_dtor() { return; } let parameter_environment = cx.tcx.empty_parameter_environment(); // FIXME (@jroesch) should probably inver this so that the parameter env still impls this // method @@ -2583,7 +2583,7 @@ impl LintPass for DropWithReprExtern { let self_type_did = self_type_def.did; let hints = ctx.tcx.lookup_repr_hints(self_type_did); if hints.iter().any(|attr| *attr == attr::ReprExtern) && - ctx.tcx.ty_dtor(self_type_did).has_drop_flag() { + self_type_def.dtor_kind().has_drop_flag() { let drop_impl_span = ctx.tcx.map.def_id_span(drop_impl_did, codemap::DUMMY_SP); let self_defn_span = ctx.tcx.map.def_id_span(self_type_did, diff --git a/src/librustc_trans/trans/_match.rs b/src/librustc_trans/trans/_match.rs index def9fdbb5219a..e964afc7b699e 100644 --- a/src/librustc_trans/trans/_match.rs +++ b/src/librustc_trans/trans/_match.rs @@ -875,8 +875,10 @@ fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>, debug_loc: DebugLoc) -> Result<'blk, 'tcx> { fn compare_str<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - lhs: ValueRef, - rhs: ValueRef, + lhs_data: ValueRef, + lhs_len: ValueRef, + rhs_data: ValueRef, + rhs_len: ValueRef, rhs_t: Ty<'tcx>, debug_loc: DebugLoc) -> Result<'blk, 'tcx> { @@ -884,10 +886,6 @@ fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>, None, &format!("comparison of `{}`", rhs_t), StrEqFnLangItem); - let lhs_data = Load(cx, expr::get_dataptr(cx, lhs)); - let lhs_len = Load(cx, expr::get_meta(cx, lhs)); - let rhs_data = Load(cx, expr::get_dataptr(cx, rhs)); - let rhs_len = Load(cx, expr::get_meta(cx, rhs)); callee::trans_lang_call(cx, did, &[lhs_data, lhs_len, rhs_data, rhs_len], None, debug_loc) } @@ -899,7 +897,13 @@ fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>, match rhs_t.sty { ty::TyRef(_, mt) => match mt.ty.sty { - ty::TyStr => compare_str(cx, lhs, rhs, rhs_t, debug_loc), + ty::TyStr => { + let lhs_data = Load(cx, expr::get_dataptr(cx, lhs)); + let lhs_len = Load(cx, expr::get_meta(cx, lhs)); + let rhs_data = Load(cx, expr::get_dataptr(cx, rhs)); + let rhs_len = Load(cx, expr::get_meta(cx, rhs)); + compare_str(cx, lhs_data, lhs_len, rhs_data, rhs_len, rhs_t, debug_loc) + } ty::TyArray(ty, _) | ty::TySlice(ty) => match ty.sty { ty::TyUint(ast::TyU8) => { // NOTE: cast &[u8] and &[u8; N] to &str and abuse the str_eq lang item, @@ -907,24 +911,24 @@ fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>, let pat_len = val_ty(rhs).element_type().array_length(); let ty_str_slice = cx.tcx().mk_static_str(); - let rhs_str = alloc_ty(cx, ty_str_slice, "rhs_str"); - Store(cx, expr::get_dataptr(cx, rhs), expr::get_dataptr(cx, rhs_str)); - Store(cx, C_uint(cx.ccx(), pat_len), expr::get_meta(cx, rhs_str)); + let rhs_data = GEPi(cx, rhs, &[0, 0]); + let rhs_len = C_uint(cx.ccx(), pat_len); - let lhs_str; + let lhs_data; + let lhs_len; if val_ty(lhs) == val_ty(rhs) { // Both the discriminant and the pattern are thin pointers - lhs_str = alloc_ty(cx, ty_str_slice, "lhs_str"); - Store(cx, expr::get_dataptr(cx, lhs), expr::get_dataptr(cx, lhs_str)); - Store(cx, C_uint(cx.ccx(), pat_len), expr::get_meta(cx, lhs_str)); - } - else { + lhs_data = GEPi(cx, lhs, &[0, 0]); + lhs_len = C_uint(cx.ccx(), pat_len); + } else { // The discriminant is a fat pointer let llty_str_slice = type_of::type_of(cx.ccx(), ty_str_slice).ptr_to(); - lhs_str = PointerCast(cx, lhs, llty_str_slice); + let lhs_str = PointerCast(cx, lhs, llty_str_slice); + lhs_data = Load(cx, expr::get_dataptr(cx, lhs_str)); + lhs_len = Load(cx, expr::get_meta(cx, lhs_str)); } - compare_str(cx, lhs_str, rhs_str, rhs_t, debug_loc) + compare_str(cx, lhs_data, lhs_len, rhs_data, rhs_len, rhs_t, debug_loc) }, _ => cx.sess().bug("only byte strings supported in compare_values"), }, @@ -1192,8 +1196,7 @@ fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let unsized_ty = def.struct_variant().fields.last().map(|field| { monomorphize::field_ty(bcx.tcx(), substs, field) }).unwrap(); - let llty = type_of::type_of(bcx.ccx(), unsized_ty); - let scratch = alloca_no_lifetime(bcx, llty, "__struct_field_fat_ptr"); + let scratch = alloc_ty(bcx, unsized_ty, "__struct_field_fat_ptr"); let data = adt::trans_field_ptr(bcx, &*repr, struct_val, 0, arg_count); let len = Load(bcx, expr::get_meta(bcx, val.val)); Store(bcx, data, expr::get_dataptr(bcx, scratch)); @@ -1520,12 +1523,8 @@ fn create_bindings_map<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pat: &ast::Pat, match bm { ast::BindByValue(_) if !moves_by_default || reassigned => { - llmatch = alloca_no_lifetime(bcx, - llvariable_ty.ptr_to(), - "__llmatch"); - let llcopy = alloca_no_lifetime(bcx, - llvariable_ty, - &bcx.name(name)); + llmatch = alloca(bcx, llvariable_ty.ptr_to(), "__llmatch"); + let llcopy = alloca(bcx, llvariable_ty, &bcx.name(name)); trmode = if moves_by_default { TrByMoveIntoCopy(llcopy) } else { @@ -1536,15 +1535,11 @@ fn create_bindings_map<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pat: &ast::Pat, // in this case, the final type of the variable will be T, // but during matching we need to store a *T as explained // above - llmatch = alloca_no_lifetime(bcx, - llvariable_ty.ptr_to(), - &bcx.name(name)); + llmatch = alloca(bcx, llvariable_ty.ptr_to(), &bcx.name(name)); trmode = TrByMoveRef; } ast::BindByRef(_) => { - llmatch = alloca_no_lifetime(bcx, - llvariable_ty, - &bcx.name(name)); + llmatch = alloca(bcx, llvariable_ty, &bcx.name(name)); trmode = TrByRef; } }; @@ -1745,6 +1740,7 @@ fn mk_binding_alloca<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>, // Subtle: be sure that we *populate* the memory *before* // we schedule the cleanup. + call_lifetime_start(bcx, llval); let bcx = populate(arg, bcx, datum); bcx.fcx.schedule_lifetime_end(cleanup_scope, llval); bcx.fcx.schedule_drop_mem(cleanup_scope, llval, var_ty, lvalue.dropflag_hint(bcx)); diff --git a/src/librustc_trans/trans/adt.rs b/src/librustc_trans/trans/adt.rs index 46211e6bd01ae..e425ffcaebf64 100644 --- a/src/librustc_trans/trans/adt.rs +++ b/src/librustc_trans/trans/adt.rs @@ -250,7 +250,7 @@ fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, monomorphize::field_ty(cx.tcx(), substs, field) }).collect::>(); let packed = cx.tcx().lookup_packed(def.did); - let dtor = cx.tcx().ty_dtor(def.did).has_drop_flag(); + let dtor = def.dtor_kind().has_drop_flag(); if dtor { ftys.push(cx.tcx().dtor_type()); } @@ -265,7 +265,7 @@ fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let hint = *cx.tcx().lookup_repr_hints(def.did).get(0) .unwrap_or(&attr::ReprAny); - let dtor = cx.tcx().ty_dtor(def.did).has_drop_flag(); + let dtor = def.dtor_kind().has_drop_flag(); if cases.is_empty() { // Uninhabitable; represent as unit diff --git a/src/librustc_trans/trans/base.rs b/src/librustc_trans/trans/base.rs index 7cbb7862c61a4..28047ee5812e7 100644 --- a/src/librustc_trans/trans/base.rs +++ b/src/librustc_trans/trans/base.rs @@ -1020,17 +1020,10 @@ pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, name: &str) -> let ccx = bcx.ccx(); let ty = type_of::type_of(ccx, t); assert!(!t.has_param_types()); - let val = alloca(bcx, ty, name); - return val; + alloca(bcx, ty, name) } pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef { - let p = alloca_no_lifetime(cx, ty, name); - call_lifetime_start(cx, p); - p -} - -pub fn alloca_no_lifetime(cx: Block, ty: Type, name: &str) -> ValueRef { let _icx = push_ctxt("alloca"); if cx.unreachable.get() { unsafe { @@ -1742,7 +1735,9 @@ pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, expr::SaveIn(d) => d, expr::Ignore => { if !type_is_zero_size(ccx, result_ty) { - alloc_ty(bcx, result_ty, "constructor_result") + let llresult = alloc_ty(bcx, result_ty, "constructor_result"); + call_lifetime_start(bcx, llresult); + llresult } else { C_undef(type_of::type_of(ccx, result_ty).ptr_to()) } diff --git a/src/librustc_trans/trans/callee.rs b/src/librustc_trans/trans/callee.rs index efbe542a5e531..266038990ff13 100644 --- a/src/librustc_trans/trans/callee.rs +++ b/src/librustc_trans/trans/callee.rs @@ -725,7 +725,9 @@ pub fn trans_call_inner<'a, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, let llty = type_of::type_of(ccx, ret_ty); Some(common::C_undef(llty.ptr_to())) } else { - Some(alloc_ty(bcx, ret_ty, "__llret")) + let llresult = alloc_ty(bcx, ret_ty, "__llret"); + call_lifetime_start(bcx, llresult); + Some(llresult) } } else { None diff --git a/src/librustc_trans/trans/cleanup.rs b/src/librustc_trans/trans/cleanup.rs index ecfbaf5790306..d226bc3f155df 100644 --- a/src/librustc_trans/trans/cleanup.rs +++ b/src/librustc_trans/trans/cleanup.rs @@ -730,8 +730,9 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx let prev_bcx = self.new_block(true, "resume", None); let personality = self.personality.get().expect( "create_landing_pad() should have set this"); - build::Resume(prev_bcx, - build::Load(prev_bcx, personality)); + let lp = build::Load(prev_bcx, personality); + base::call_lifetime_end(prev_bcx, personality); + build::Resume(prev_bcx, lp); prev_llbb = prev_bcx.llbb; break; } @@ -883,6 +884,7 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx } None => { let addr = base::alloca(pad_bcx, common::val_ty(llretval), ""); + base::call_lifetime_start(pad_bcx, addr); self.personality.set(Some(addr)); build::Store(pad_bcx, llretval, addr); } diff --git a/src/librustc_trans/trans/common.rs b/src/librustc_trans/trans/common.rs index 0ae518fea2bd3..80e618861073e 100644 --- a/src/librustc_trans/trans/common.rs +++ b/src/librustc_trans/trans/common.rs @@ -504,7 +504,7 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { output: ty::FnOutput<'tcx>, name: &str) -> ValueRef { if self.needs_ret_allocas { - base::alloca_no_lifetime(bcx, match output { + base::alloca(bcx, match output { ty::FnConverging(output_type) => type_of::type_of(bcx.ccx(), output_type), ty::FnDiverging => Type::void(bcx.ccx()) }, name) diff --git a/src/librustc_trans/trans/datum.rs b/src/librustc_trans/trans/datum.rs index 2c8123412cddc..a57b5d1bbde26 100644 --- a/src/librustc_trans/trans/datum.rs +++ b/src/librustc_trans/trans/datum.rs @@ -101,7 +101,6 @@ use trans::cleanup; use trans::cleanup::{CleanupMethods, DropHintDatum, DropHintMethods}; use trans::expr; use trans::tvec; -use trans::type_of; use middle::ty::Ty; use std::fmt; @@ -302,12 +301,10 @@ pub fn lvalue_scratch_datum<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>, -> DatumBlock<'blk, 'tcx, Lvalue> where F: FnOnce(A, Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>, { - let llty = type_of::type_of(bcx.ccx(), ty); - let scratch = alloca(bcx, llty, name); + let scratch = alloc_ty(bcx, ty, name); // Subtle. Populate the scratch memory *before* scheduling cleanup. let bcx = populate(arg, bcx, scratch); - bcx.fcx.schedule_lifetime_end(scope, scratch); bcx.fcx.schedule_drop_mem(scope, scratch, ty, None); DatumBlock::new(bcx, Datum::new(scratch, ty, Lvalue::new("datum::lvalue_scratch_datum"))) @@ -322,8 +319,8 @@ pub fn rvalue_scratch_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>, name: &str) -> Datum<'tcx, Rvalue> { - let llty = type_of::type_of(bcx.ccx(), ty); - let scratch = alloca(bcx, llty, name); + let scratch = alloc_ty(bcx, ty, name); + call_lifetime_start(bcx, scratch); Datum::new(scratch, ty, Rvalue::new(ByRef)) } @@ -500,7 +497,12 @@ impl<'tcx> Datum<'tcx, Rvalue> { ByValue => { lvalue_scratch_datum( bcx, self.ty, name, scope, self, - |this, bcx, llval| this.store_to(bcx, llval)) + |this, bcx, llval| { + call_lifetime_start(bcx, llval); + let bcx = this.store_to(bcx, llval); + bcx.fcx.schedule_lifetime_end(scope, llval); + bcx + }) } } } diff --git a/src/librustc_trans/trans/expr.rs b/src/librustc_trans/trans/expr.rs index efccba91650ca..20d189a5cd75e 100644 --- a/src/librustc_trans/trans/expr.rs +++ b/src/librustc_trans/trans/expr.rs @@ -246,9 +246,8 @@ pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // Maybe just get the value directly, instead of loading it? immediate_rvalue(load_ty(bcx, global, const_ty), const_ty) } else { - let llty = type_of::type_of(bcx.ccx(), const_ty); - // HACK(eddyb) get around issues with lifetime intrinsics. - let scratch = alloca_no_lifetime(bcx, llty, "const"); + let scratch = alloc_ty(bcx, const_ty, "const"); + call_lifetime_start(bcx, scratch); let lldest = if !const_ty.is_structural() { // Cast pointer to slot, because constants have different types. PointerCast(bcx, scratch, val_ty(global)) @@ -390,14 +389,7 @@ fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // (You might think there is a more elegant way to do this than a // skip_reborrows bool, but then you remember that the borrow checker exists). if skip_reborrows == 0 && adj.autoref.is_some() { - if !type_is_sized(bcx.tcx(), datum.ty) { - // Arrange cleanup - let lval = unpack_datum!(bcx, - datum.to_lvalue_datum(bcx, "ref_fat_ptr", expr.id)); - datum = unpack_datum!(bcx, ref_fat_ptr(bcx, lval)); - } else { - datum = unpack_datum!(bcx, auto_ref(bcx, datum, expr)); - } + datum = unpack_datum!(bcx, auto_ref(bcx, datum, expr)); } if let Some(target) = adj.unsize { @@ -410,10 +402,9 @@ fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, datum.to_rvalue_datum(bcx, "__coerce_source")); let target = bcx.monomorphize(&target); - let llty = type_of::type_of(bcx.ccx(), target); - // HACK(eddyb) get around issues with lifetime intrinsics. - let scratch = alloca_no_lifetime(bcx, llty, "__coerce_target"); + let scratch = alloc_ty(bcx, target, "__coerce_target"); + call_lifetime_start(bcx, scratch); let target_datum = Datum::new(scratch, target, Rvalue::new(ByRef)); bcx = coerce_unsized(bcx, expr.span, source_datum, target_datum); @@ -1267,7 +1258,7 @@ fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, def::DefStruct(_) => { let ty = expr_ty(bcx, ref_expr); match ty.sty { - ty::TyStruct(def, _) if def.has_dtor(bcx.tcx()) => { + ty::TyStruct(def, _) if def.has_dtor() => { let repr = adt::represent_type(bcx.ccx(), ty); adt::trans_set_discr(bcx, &*repr, lldest, 0); } @@ -1447,7 +1438,11 @@ pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, // temporary stack slot let addr = match dest { SaveIn(pos) => pos, - Ignore => alloc_ty(bcx, ty, "temp"), + Ignore => { + let llresult = alloc_ty(bcx, ty, "temp"); + call_lifetime_start(bcx, llresult); + llresult + } }; // This scope holds intermediates that must be cleaned should diff --git a/src/librustc_trans/trans/foreign.rs b/src/librustc_trans/trans/foreign.rs index b1c85ce54b73f..4949539c136d0 100644 --- a/src/librustc_trans/trans/foreign.rs +++ b/src/librustc_trans/trans/foreign.rs @@ -296,10 +296,7 @@ pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // Ensure that we always have the Rust value indirectly, // because it makes bitcasting easier. if !rust_indirect { - let scratch = - base::alloca(bcx, - type_of::type_of(ccx, passed_arg_tys[i]), - "__arg"); + let scratch = base::alloc_ty(bcx, passed_arg_tys[i], "__arg"); if type_is_fat_ptr(ccx.tcx(), passed_arg_tys[i]) { Store(bcx, llargs_rust[i + offset], expr::get_dataptr(bcx, scratch)); Store(bcx, llargs_rust[i + offset + 1], expr::get_meta(bcx, scratch)); @@ -432,6 +429,7 @@ pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // - Truncating foreign type to correct integral type and then // bitcasting to the struct type yields invalid cast errors. let llscratch = base::alloca(bcx, llforeign_ret_ty, "__cast"); + base::call_lifetime_start(bcx, llscratch); Store(bcx, llforeign_retval, llscratch); let llscratch_i8 = BitCast(bcx, llscratch, Type::i8(ccx).ptr_to()); let llretptr_i8 = BitCast(bcx, llretptr, Type::i8(ccx).ptr_to()); @@ -442,6 +440,7 @@ pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, debug!("llrust_size={}", llrust_size); base::call_memcpy(bcx, llretptr_i8, llscratch_i8, C_uint(ccx, llrust_size), llalign as u32); + base::call_lifetime_end(bcx, llscratch); } } diff --git a/src/librustc_trans/trans/glue.rs b/src/librustc_trans/trans/glue.rs index 93b637ecb4f65..40a290a27150e 100644 --- a/src/librustc_trans/trans/glue.rs +++ b/src/librustc_trans/trans/glue.rs @@ -16,11 +16,9 @@ use back::link::*; use llvm; use llvm::{ValueRef, get_param}; -use metadata::csearch; -use middle::def_id::{DefId, LOCAL_CRATE}; use middle::lang_items::ExchangeFreeFnLangItem; -use middle::subst; -use middle::subst::{Subst, Substs}; +use middle::subst::{Substs}; +use middle::traits; use middle::ty::{self, Ty}; use trans::adt; use trans::adt::GetDtorType; // for tcx.dtor_type() @@ -33,16 +31,15 @@ use trans::common::*; use trans::debuginfo::DebugLoc; use trans::declare; use trans::expr; -use trans::foreign; -use trans::inline; use trans::machine::*; use trans::monomorphize; -use trans::type_of::{type_of, type_of_dtor, sizing_type_of, align_of}; +use trans::type_of::{type_of, sizing_type_of, align_of}; use trans::type_::Type; use arena::TypedArena; use libc::c_uint; use syntax::ast; +use syntax::codemap::DUMMY_SP; pub fn trans_exchange_free_dyn<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, @@ -187,9 +184,12 @@ pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, skip_dtor: bool) -> Block<'blk, 'tcx> { let _icx = push_ctxt("drop_ty_immediate"); - let vp = alloca(bcx, type_of(bcx.ccx(), t), ""); + let vp = alloc_ty(bcx, t, ""); + call_lifetime_start(bcx, vp); store_ty(bcx, v, vp, t); - drop_ty_core(bcx, vp, t, debug_loc, skip_dtor, None) + let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor, None); + call_lifetime_end(bcx, vp); + bcx } pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef { @@ -287,10 +287,7 @@ fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, - struct_data: ValueRef, - dtor_did: DefId, - class_did: DefId, - substs: &subst::Substs<'tcx>) + struct_data: ValueRef) -> Block<'blk, 'tcx> { assert!(type_is_sized(bcx.tcx(), t), "Precondition: caller must ensure t is sized"); @@ -318,59 +315,19 @@ fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None); with_cond(bcx, drop_flag_dtor_needed, |cx| { - trans_struct_drop(cx, t, struct_data, dtor_did, class_did, substs) + trans_struct_drop(cx, t, struct_data) }) } - -pub fn get_res_dtor<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - did: DefId, - parent_id: DefId, - substs: &Substs<'tcx>) - -> ValueRef { - let _icx = push_ctxt("trans_res_dtor"); - let did = inline::maybe_instantiate_inline(ccx, did); - - if !substs.types.is_empty() { - assert_eq!(did.krate, LOCAL_CRATE); - - // Since we're in trans we don't care for any region parameters - let substs = ccx.tcx().mk_substs(Substs::erased(substs.types.clone())); - - let (val, _, _) = monomorphize::monomorphic_fn(ccx, did, substs, None); - - val - } else if did.is_local() { - get_item_val(ccx, did.node) - } else { - let tcx = ccx.tcx(); - let name = csearch::get_symbol(&ccx.sess().cstore, did); - let class_ty = tcx.lookup_item_type(parent_id).ty.subst(tcx, substs); - let llty = type_of_dtor(ccx, class_ty); - foreign::get_extern_fn(ccx, &mut *ccx.externs().borrow_mut(), &name[..], llvm::CCallConv, - llty, ccx.tcx().mk_nil()) - } -} - fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, - v0: ValueRef, - dtor_did: DefId, - class_did: DefId, - substs: &subst::Substs<'tcx>) + v0: ValueRef) -> Block<'blk, 'tcx> { debug!("trans_struct_drop t: {}", t); + let tcx = bcx.tcx(); + let mut bcx = bcx; - // Find and call the actual destructor - let dtor_addr = get_res_dtor(bcx.ccx(), dtor_did, class_did, substs); - - // Class dtors have no explicit args, so the params should - // just consist of the environment (self). - let params = unsafe { - let ty = Type::from_ref(llvm::LLVMTypeOf(dtor_addr)); - ty.element_type().func_params() - }; - assert_eq!(params.len(), if type_is_sized(bcx.tcx(), t) { 1 } else { 2 }); + let def = t.ty_adt_def().unwrap(); // Be sure to put the contents into a scope so we can use an invoke // instruction to call the user destructor but still call the field @@ -384,15 +341,37 @@ fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // discriminant (if any) in case of variant swap in drop code. bcx.fcx.schedule_drop_adt_contents(cleanup::CustomScope(contents_scope), v0, t); - let glue_type = get_drop_glue_type(bcx.ccx(), t); - let dtor_ty = bcx.tcx().mk_ctor_fn(class_did, &[glue_type], bcx.tcx().mk_nil()); - let (_, bcx) = if type_is_sized(bcx.tcx(), t) { - invoke(bcx, dtor_addr, &[v0], dtor_ty, DebugLoc::None) + let (sized_args, unsized_args); + let args: &[ValueRef] = if type_is_sized(tcx, t) { + sized_args = [v0]; + &sized_args } else { - let args = [Load(bcx, expr::get_dataptr(bcx, v0)), Load(bcx, expr::get_meta(bcx, v0))]; - invoke(bcx, dtor_addr, &args, dtor_ty, DebugLoc::None) + unsized_args = [Load(bcx, expr::get_dataptr(bcx, v0)), Load(bcx, expr::get_meta(bcx, v0))]; + &unsized_args }; + bcx = callee::trans_call_inner(bcx, DebugLoc::None, |bcx, _| { + let trait_ref = ty::Binder(ty::TraitRef { + def_id: tcx.lang_items.drop_trait().unwrap(), + substs: tcx.mk_substs(Substs::trans_empty().with_self_ty(t)) + }); + let vtbl = match fulfill_obligation(bcx.ccx(), DUMMY_SP, trait_ref) { + traits::VtableImpl(data) => data, + _ => tcx.sess.bug(&format!("dtor for {:?} is not an impl???", t)) + }; + let dtor_did = def.destructor().unwrap(); + let datum = callee::trans_fn_ref_with_substs(bcx.ccx(), + dtor_did, + ExprId(0), + bcx.fcx.param_substs, + vtbl.substs); + callee::Callee { + bcx: bcx, + data: callee::Fn(datum.val), + ty: datum.ty + } + }, callee::ArgVals(args), Some(expr::Ignore)).bcx; + bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope) } @@ -557,27 +536,26 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK }) } } - ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => { - let tcx = bcx.tcx(); - match (tcx.ty_dtor(def.did), skip_dtor) { - (ty::TraitDtor(dtor, true), false) => { + ty::TyStruct(def, _) | ty::TyEnum(def, _) => { + match (def.dtor_kind(), skip_dtor) { + (ty::TraitDtor(true), false) => { // FIXME(16758) Since the struct is unsized, it is hard to // find the drop flag (which is at the end of the struct). // Lets just ignore the flag and pretend everything will be // OK. if type_is_sized(bcx.tcx(), t) { - trans_struct_drop_flag(bcx, t, v0, dtor, def.did, substs) + trans_struct_drop_flag(bcx, t, v0) } else { // Give the user a heads up that we are doing something // stupid and dangerous. bcx.sess().warn(&format!("Ignoring drop flag in destructor for {}\ because the struct is unsized. See issue\ #16758", t)); - trans_struct_drop(bcx, t, v0, dtor, def.did, substs) + trans_struct_drop(bcx, t, v0) } } - (ty::TraitDtor(dtor, false), false) => { - trans_struct_drop(bcx, t, v0, dtor, def.did, substs) + (ty::TraitDtor(false), false) => { + trans_struct_drop(bcx, t, v0) } (ty::NoDtor, _) | (_, true) => { // No dtor? Just the default case diff --git a/src/librustc_trans/trans/intrinsic.rs b/src/librustc_trans/trans/intrinsic.rs index 8b4b810214d2f..aab22290efed4 100644 --- a/src/librustc_trans/trans/intrinsic.rs +++ b/src/librustc_trans/trans/intrinsic.rs @@ -393,7 +393,9 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, expr::SaveIn(d) => d, expr::Ignore => { if !type_is_zero_size(ccx, ret_ty) { - alloc_ty(bcx, ret_ty, "intrinsic_result") + let llresult = alloc_ty(bcx, ret_ty, "intrinsic_result"); + call_lifetime_start(bcx, llresult); + llresult } else { C_undef(llret_ty.ptr_to()) } @@ -964,6 +966,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, match dest { expr::Ignore => { bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location); + call_lifetime_end(bcx, llresult); } expr::SaveIn(_) => {} } diff --git a/src/librustc_trans/trans/tvec.rs b/src/librustc_trans/trans/tvec.rs index f3a3268bebbd5..019c38869b27d 100644 --- a/src/librustc_trans/trans/tvec.rs +++ b/src/librustc_trans/trans/tvec.rs @@ -106,11 +106,11 @@ pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, debug!(" vt={}, count={}", vt.to_string(ccx), count); let fixed_ty = bcx.tcx().mk_array(vt.unit_ty, count); - let llfixed_ty = type_of::type_of(bcx.ccx(), fixed_ty); // Always create an alloca even if zero-sized, to preserve // the non-null invariant of the inner slice ptr - let llfixed = base::alloca(bcx, llfixed_ty, ""); + let llfixed = base::alloc_ty(bcx, fixed_ty, ""); + call_lifetime_start(bcx, llfixed); if count > 0 { // Arrange for the backing array to be cleaned up. diff --git a/src/librustc_trans/trans/type_of.rs b/src/librustc_trans/trans/type_of.rs index c8ea6e6ec4271..171d6961470bf 100644 --- a/src/librustc_trans/trans/type_of.rs +++ b/src/librustc_trans/trans/type_of.rs @@ -487,11 +487,3 @@ fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, format!("{}.{}", did.krate, tstr) } } - -pub fn type_of_dtor<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, self_ty: Ty<'tcx>) -> Type { - if type_is_sized(ccx.tcx(), self_ty) { - Type::func(&[type_of(ccx, self_ty).ptr_to()], &Type::void(ccx)) - } else { - Type::func(&type_of(ccx, self_ty).field_types(), &Type::void(ccx)) - } -} diff --git a/src/librustc_typeck/check/dropck.rs b/src/librustc_typeck/check/dropck.rs index b6a91ce8a64e3..a8c77f863b700 100644 --- a/src/librustc_typeck/check/dropck.rs +++ b/src/librustc_typeck/check/dropck.rs @@ -11,9 +11,11 @@ use check::regionck::{self, Rcx}; use middle::def_id::{DefId, LOCAL_CRATE}; +use middle::free_region::FreeRegionMap; use middle::infer; use middle::region; use middle::subst::{self, Subst}; +use middle::traits; use middle::ty::{self, Ty}; use util::nodemap::FnvHashSet; @@ -75,53 +77,23 @@ fn ensure_drop_params_and_item_params_correspond<'tcx>( drop_impl_ty: &ty::Ty<'tcx>, self_type_did: DefId) -> Result<(), ()> { - // New strategy based on review suggestion from nikomatsakis. - // - // (In the text and code below, "named" denotes "struct/enum", and - // "generic params" denotes "type and region params") - // - // 1. Create fresh skolemized type/region "constants" for each of - // the named type's generic params. Instantiate the named type - // with the fresh constants, yielding `named_skolem`. - // - // 2. Create unification variables for each of the Drop impl's - // generic params. Instantiate the impl's Self's type with the - // unification-vars, yielding `drop_unifier`. - // - // 3. Attempt to unify Self_unif with Type_skolem. If unification - // succeeds, continue (i.e. with the predicate checks). - - let ty::TypeScheme { generics: ref named_type_generics, - ty: named_type } = - tcx.lookup_item_type(self_type_did); - - let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None, false); - - infcx.commit_if_ok(|snapshot| { - let (named_type_to_skolem, skol_map) = - infcx.construct_skolemized_subst(named_type_generics, snapshot); - let named_type_skolem = named_type.subst(tcx, &named_type_to_skolem); - - let drop_impl_span = tcx.map.def_id_span(drop_impl_did, codemap::DUMMY_SP); - let drop_to_unifier = - infcx.fresh_substs_for_generics(drop_impl_span, drop_impl_generics); - let drop_unifier = drop_impl_ty.subst(tcx, &drop_to_unifier); - - if let Ok(()) = infer::mk_eqty(&infcx, true, infer::TypeOrigin::Misc(drop_impl_span), - named_type_skolem, drop_unifier) { - // Even if we did manage to equate the types, the process - // may have just gathered unsolvable region constraints - // like `R == 'static` (represented as a pair of subregion - // constraints) for some skolemization constant R. - // - // However, the leak_check method allows us to confirm - // that no skolemized regions escaped (i.e. were related - // to other regions in the constraint graph). - if let Ok(()) = infcx.leak_check(&skol_map, snapshot) { - return Ok(()) - } - } + assert!(drop_impl_did.is_local() && self_type_did.is_local()); + + // check that the impl type can be made to match the trait type. + + let impl_param_env = ty::ParameterEnvironment::for_item(tcx, self_type_did.node); + let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, Some(impl_param_env), true); + + let named_type = tcx.lookup_item_type(self_type_did).ty; + let named_type = named_type.subst(tcx, &infcx.parameter_environment.free_substs); + let drop_impl_span = tcx.map.def_id_span(drop_impl_did, codemap::DUMMY_SP); + let fresh_impl_substs = + infcx.fresh_substs_for_generics(drop_impl_span, drop_impl_generics); + let fresh_impl_self_ty = drop_impl_ty.subst(tcx, &fresh_impl_substs); + + if let Err(_) = infer::mk_eqty(&infcx, true, infer::TypeOrigin::Misc(drop_impl_span), + named_type, fresh_impl_self_ty) { span_err!(tcx.sess, drop_impl_span, E0366, "Implementations of Drop cannot be specialized"); let item_span = tcx.map.span(self_type_did.node); @@ -129,7 +101,17 @@ fn ensure_drop_params_and_item_params_correspond<'tcx>( "Use same sequence of generic type and region \ parameters that is on the struct/enum definition"); return Err(()); - }) + } + + if let Err(ref errors) = infcx.fulfillment_cx.borrow_mut().select_all_or_error(&infcx) { + // this could be reached when we get lazy normalization + traits::report_fulfillment_errors(&infcx, errors); + return Err(()); + } + + let free_regions = FreeRegionMap::new(); + infcx.resolve_regions_and_report_errors(&free_regions, drop_impl_did.node); + Ok(()) } /// Confirms that every predicate imposed by dtor_predicates is diff --git a/src/librustc_typeck/coherence/mod.rs b/src/librustc_typeck/coherence/mod.rs index aadd74708abc4..f8778fbc42d2d 100644 --- a/src/librustc_typeck/coherence/mod.rs +++ b/src/librustc_typeck/coherence/mod.rs @@ -311,9 +311,7 @@ impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> { match self_type.ty.sty { ty::TyEnum(type_def, _) | ty::TyStruct(type_def, _) => { - tcx.destructor_for_type - .borrow_mut() - .insert(type_def.did, method_def_id.def_id()); + type_def.set_destructor(method_def_id.def_id()); tcx.destructors .borrow_mut() .insert(method_def_id.def_id()); diff --git a/src/librustc_typeck/diagnostics.rs b/src/librustc_typeck/diagnostics.rs index 0223079b8bf39..0dbfffe5c644a 100644 --- a/src/librustc_typeck/diagnostics.rs +++ b/src/librustc_typeck/diagnostics.rs @@ -73,10 +73,39 @@ the enum. "##, E0025: r##" -Each field of a struct can only be bound once in a pattern. Each occurrence of a -field name binds the value of that field, so to fix this error you will have to -remove or alter the duplicate uses of the field name. Perhaps you misspelt -another field name? +Each field of a struct can only be bound once in a pattern. Erroneous code +example: + +``` +struct Foo { + a: u8, + b: u8, +} + +fn main(){ + let x = Foo { a:1, b:2 }; + + let Foo { a: x, a: y } = x; + // error: field `a` bound multiple times in the pattern +} +``` + +Each occurrence of a field name binds the value of that field, so to fix this +error you will have to remove or alter the duplicate uses of the field name. +Perhaps you misspelled another field name? Example: + +``` +struct Foo { + a: u8, + b: u8, +} + +fn main(){ + let x = Foo { a:1, b:2 }; + + let Foo { a: x, b: y } = x; // ok! +} +``` "##, E0026: r##" @@ -401,10 +430,35 @@ extern "C" { "##, E0046: r##" +Items are missing in a trait implementation. Erroneous code example: + +``` +trait Foo { + fn foo(); +} + +struct Bar; + +impl Foo for Bar {} +// error: not all trait items implemented, missing: `foo` +``` + When trying to make some type implement a trait `Foo`, you must, at minimum, provide implementations for all of `Foo`'s required methods (meaning the methods that do not have default implementations), as well as any required -trait items like associated types or constants. +trait items like associated types or constants. Example: + +``` +trait Foo { + fn foo(); +} + +struct Bar; + +impl Foo for Bar { + fn foo() {} // ok! +} +``` "##, E0049: r##" @@ -615,14 +669,62 @@ variadic functions (except for its C-FFI). E0062: r##" This error indicates that during an attempt to build a struct or struct-like -enum variant, one of the fields was specified more than once. Each field should -be specified exactly one time. +enum variant, one of the fields was specified more than once. Erroneous code +example: + +``` +struct Foo { + x: i32 +} + +fn main() { + let x = Foo { + x: 0, + x: 0, // error: field `x` specified more than once + }; +} +``` + +Each field should be specified exactly one time. Example: + +``` +struct Foo { + x: i32 +} + +fn main() { + let x = Foo { x: 0 }; // ok! +} +``` "##, E0063: r##" This error indicates that during an attempt to build a struct or struct-like -enum variant, one of the fields was not provided. Each field should be -specified exactly once. +enum variant, one of the fields was not provided. Erroneous code example: + +``` +struct Foo { + x: i32, + y: i32 +} + +fn main() { + let x = Foo { x: 0 }; // error: missing field: `y` +} +``` + +Each field should be specified exactly once. Example: + +``` +struct Foo { + x: i32, + y: i32 +} + +fn main() { + let x = Foo { x: 0, y: 0 }; // ok! +} +``` "##, E0066: r##" @@ -1025,7 +1127,7 @@ fn main() { } ``` -The number of supplied parameters much exactly match the number of defined type +The number of supplied parameters must exactly match the number of defined type parameters. "##, @@ -1620,6 +1722,12 @@ extern { E0131: r##" It is not possible to define `main` with type parameters, or even with function parameters. When `main` is present, it must take no arguments and return `()`. +Erroneous code example: + +``` +fn main() { // error: main function is not allowed to have type parameters +} +``` "##, E0132: r##" @@ -1627,7 +1735,7 @@ It is not possible to declare type parameters on a function that has the `start` attribute. Such a function must have the following type signature: ``` -fn(isize, *const *const u8) -> isize +fn(isize, *const *const u8) -> isize; ``` "##, @@ -1779,7 +1887,7 @@ rfcs/blob/master/text/0019-opt-in-builtin-traits.md). E0193: r##" `where` clauses must use generic type parameters: it does not make sense to use -them otherwise. An example causing this error: +them otherwise. An example causing this error: ``` trait Foo { @@ -1881,7 +1989,6 @@ unsafe impl Foo { } // converting it to this will fix it impl Foo { } ``` - "##, E0198: r##" @@ -1898,7 +2005,6 @@ unsafe impl !Clone for Foo { } // this will compile impl !Clone for Foo { } ``` - "##, E0199: r##" @@ -1916,7 +2022,6 @@ unsafe impl Bar for Foo { } // this will compile impl Bar for Foo { } ``` - "##, E0200: r##" @@ -1934,7 +2039,6 @@ impl Bar for Foo { } // this will compile unsafe impl Bar for Foo { } ``` - "##, E0201: r##" @@ -2717,6 +2821,36 @@ It is also possible to overload most operators for your own type by implementing traits from `std::ops`. "##, +E0370: r##" +The maximum value of an enum was reached, so it cannot be automatically +set in the next enum value. Erroneous code example: + +``` +enum Foo { + X = 0x7fffffffffffffff, + Y // error: enum discriminant overflowed on value after + // 9223372036854775807: i64; set explicitly via + // Y = -9223372036854775808 if that is desired outcome +} +``` + +To fix this, please set manually the next enum value or put the enum variant +with the maximum value at the end of the enum. Examples: + +``` +enum Foo { + X = 0x7fffffffffffffff, + Y = 0, // ok! +} + +// or: +enum Foo { + Y = 0, // ok! + X = 0x7fffffffffffffff, +} +``` +"##, + E0371: r##" When `Trait2` is a subtrait of `Trait1` (for example, when `Trait2` has a definition like `trait Trait2: Trait1 { ... }`), it is not allowed to implement @@ -2869,44 +3003,44 @@ https://doc.rust-lang.org/std/marker/struct.PhantomData.html } register_diagnostics! { - E0068, - E0085, - E0086, +// E0068, +// E0085, +// E0086, E0090, E0103, // @GuillaumeGomez: I was unable to get this error, try your best! E0104, E0118, E0122, - E0123, - E0127, - E0129, - E0141, +// E0123, +// E0127, +// E0129, +// E0141, // E0159, // use of trait `{}` as struct constructor E0163, E0164, E0167, // E0168, - E0173, // manual implementations of unboxed closure traits are experimental +// E0173, // manual implementations of unboxed closure traits are experimental E0174, // explicit use of unboxed closure methods are experimental E0182, E0183, - E0187, // can't infer the kind of the closure - E0188, // can not cast a immutable reference to a mutable pointer - E0189, // deprecated: can only cast a boxed pointer to a boxed object - E0190, // deprecated: can only cast a &-pointer to an &-object +// E0187, // can't infer the kind of the closure +// E0188, // can not cast a immutable reference to a mutable pointer +// E0189, // deprecated: can only cast a boxed pointer to a boxed object +// E0190, // deprecated: can only cast a &-pointer to an &-object E0196, // cannot determine a type for this closure E0203, // type parameter has more than one relaxed default bound, // and only one is supported E0208, - E0209, // builtin traits can only be implemented on structs or enums +// E0209, // builtin traits can only be implemented on structs or enums E0212, // cannot extract an associated type from a higher-ranked trait bound - E0213, // associated types are not accepted in this context +// E0213, // associated types are not accepted in this context E0214, // parenthesized parameters may only be used with a trait // E0215, // angle-bracket notation is not stable with `Fn` // E0216, // parenthetical notation is only stable with `Fn` - E0217, // ambiguous associated type, defined in multiple supertraits - E0218, // no associated type defined - E0219, // associated type defined in higher-ranked supertrait +// E0217, // ambiguous associated type, defined in multiple supertraits +// E0218, // no associated type defined +// E0219, // associated type defined in higher-ranked supertrait // E0222, // Error code E0045 (variadic function must have C calling // convention) duplicate E0224, // at least one non-builtin train is required for an object type @@ -2916,25 +3050,24 @@ register_diagnostics! { E0229, // associated type bindings are not allowed here E0230, // there is no type parameter on trait E0231, // only named substitution parameters are allowed - E0233, - E0234, +// E0233, +// E0234, // E0235, // structure constructor specifies a structure of type but E0236, // no lang item for range syntax E0237, // no lang item for range syntax E0238, // parenthesized parameters may only be used with a trait - E0239, // `next` method of `Iterator` trait has unexpected type - E0240, - E0241, +// E0239, // `next` method of `Iterator` trait has unexpected type +// E0240, +// E0241, E0242, // internal error looking up a definition E0245, // not a trait - E0246, // invalid recursive type +// E0246, // invalid recursive type E0247, // found module name used as a type - E0319, // trait impls for defaulted traits allowed just for structs/enums +// E0319, // trait impls for defaulted traits allowed just for structs/enums E0320, // recursive overflow during dropck E0321, // extended coherence rules for defaulted traits violated E0328, // cannot implement Unsize explicitly E0329, // associated const depends on type parameter or Self. - E0370, // discriminant overflow E0374, // the trait `CoerceUnsized` may only be implemented for a coercion // between structures with one field being coerced, none found E0375, // the trait `CoerceUnsized` may only be implemented for a coercion diff --git a/src/librustc_unicode/char.rs b/src/librustc_unicode/char.rs index 780f8aa5be936..e08b3244109d5 100644 --- a/src/librustc_unicode/char.rs +++ b/src/librustc_unicode/char.rs @@ -503,3 +503,116 @@ impl char { ToUppercase(CaseMappingIter::new(conversions::to_upper(self))) } } + +/// An iterator that decodes UTF-16 encoded codepoints from an iterator of `u16`s. +#[unstable(feature = "decode_utf16", reason = "recently exposed", issue = "27830")] +#[derive(Clone)] +pub struct DecodeUtf16 where I: Iterator { + iter: I, + buf: Option, +} + +/// Create an iterator over the UTF-16 encoded codepoints in `iterable`, +/// returning unpaired surrogates as `Err`s. +/// +/// # Examples +/// +/// ``` +/// #![feature(decode_utf16)] +/// +/// use std::char::decode_utf16; +/// +/// fn main() { +/// // 𝄞music +/// let v = [0xD834, 0xDD1E, 0x006d, 0x0075, +/// 0x0073, 0xDD1E, 0x0069, 0x0063, +/// 0xD834]; +/// +/// assert_eq!(decode_utf16(v.iter().cloned()).collect::>(), +/// vec![Ok('𝄞'), +/// Ok('m'), Ok('u'), Ok('s'), +/// Err(0xDD1E), +/// Ok('i'), Ok('c'), +/// Err(0xD834)]); +/// } +/// ``` +/// +/// A lossy decoder can be obtained by replacing `Err` results with the replacement character: +/// +/// ``` +/// #![feature(decode_utf16)] +/// +/// use std::char::{decode_utf16, REPLACEMENT_CHARACTER}; +/// +/// fn main() { +/// // 𝄞music +/// let v = [0xD834, 0xDD1E, 0x006d, 0x0075, +/// 0x0073, 0xDD1E, 0x0069, 0x0063, +/// 0xD834]; +/// +/// assert_eq!(decode_utf16(v.iter().cloned()) +/// .map(|r| r.unwrap_or(REPLACEMENT_CHARACTER)) +/// .collect::(), +/// "𝄞mus�ic�"); +/// } +/// ``` +#[unstable(feature = "decode_utf16", reason = "recently exposed", issue = "27830")] +#[inline] +pub fn decode_utf16>(iterable: I) -> DecodeUtf16 { + DecodeUtf16 { + iter: iterable.into_iter(), + buf: None, + } +} + +#[unstable(feature = "decode_utf16", reason = "recently exposed", issue = "27830")] +impl> Iterator for DecodeUtf16 { + type Item = Result; + + fn next(&mut self) -> Option> { + let u = match self.buf.take() { + Some(buf) => buf, + None => match self.iter.next() { + Some(u) => u, + None => return None + } + }; + + if u < 0xD800 || 0xDFFF < u { + // not a surrogate + Some(Ok(unsafe { from_u32_unchecked(u as u32) })) + } else if u >= 0xDC00 { + // a trailing surrogate + Some(Err(u)) + } else { + let u2 = match self.iter.next() { + Some(u2) => u2, + // eof + None => return Some(Err(u)) + }; + if u2 < 0xDC00 || u2 > 0xDFFF { + // not a trailing surrogate so we're not a valid + // surrogate pair, so rewind to redecode u2 next time. + self.buf = Some(u2); + return Some(Err(u)) + } + + // all ok, so lets decode it. + let c = (((u - 0xD800) as u32) << 10 | (u2 - 0xDC00) as u32) + 0x1_0000; + Some(Ok(unsafe { from_u32_unchecked(c) })) + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (low, high) = self.iter.size_hint(); + // we could be entirely valid surrogates (2 elements per + // char), or entirely non-surrogates (1 element per char) + (low / 2, high) + } +} + +/// U+FFFD REPLACEMENT CHARACTER (�) is used in Unicode to represent a decoding error. +/// It can occur, for example, when giving ill-formed UTF-8 bytes to `String::from_utf8_lossy`. +#[unstable(feature = "decode_utf16", reason = "recently added", issue = "27830")] +pub const REPLACEMENT_CHARACTER: char = '\u{FFFD}'; diff --git a/src/librustc_unicode/lib.rs b/src/librustc_unicode/lib.rs index d046393cdeb08..4f0aa69d77199 100644 --- a/src/librustc_unicode/lib.rs +++ b/src/librustc_unicode/lib.rs @@ -46,6 +46,7 @@ mod tables; mod u_str; pub mod char; +#[allow(deprecated)] pub mod str { pub use u_str::{UnicodeStr, SplitWhitespace}; pub use u_str::{utf8_char_width, is_utf16, Utf16Items, Utf16Item}; diff --git a/src/librustc_unicode/u_str.rs b/src/librustc_unicode/u_str.rs index f6e6ac508a75c..67333c98fcf84 100644 --- a/src/librustc_unicode/u_str.rs +++ b/src/librustc_unicode/u_str.rs @@ -13,8 +13,9 @@ //! This module provides functionality to `str` that requires the Unicode methods provided by the //! unicode parts of the CharExt trait. +use char::{DecodeUtf16, decode_utf16}; use core::char; -use core::iter::Filter; +use core::iter::{Cloned, Filter}; use core::slice; use core::str::Split; @@ -119,11 +120,18 @@ pub fn is_utf16(v: &[u16]) -> bool { /// An iterator that decodes UTF-16 encoded codepoints from a vector /// of `u16`s. +#[deprecated(since = "1.4.0", reason = "renamed to `char::DecodeUtf16`")] +#[unstable(feature = "decode_utf16", reason = "not exposed in std", issue = "27830")] +#[allow(deprecated)] #[derive(Clone)] pub struct Utf16Items<'a> { - iter: slice::Iter<'a, u16> + decoder: DecodeUtf16>> } + /// The possibilities for values decoded from a `u16` stream. +#[deprecated(since = "1.4.0", reason = "`char::DecodeUtf16` uses `Result` instead")] +#[unstable(feature = "decode_utf16", reason = "not exposed in std", issue = "27830")] +#[allow(deprecated)] #[derive(Copy, PartialEq, Eq, Clone, Debug)] pub enum Utf16Item { /// A valid codepoint. @@ -132,6 +140,7 @@ pub enum Utf16Item { LoneSurrogate(u16) } +#[allow(deprecated)] impl Utf16Item { /// Convert `self` to a `char`, taking `LoneSurrogate`s to the /// replacement character (U+FFFD). @@ -144,49 +153,22 @@ impl Utf16Item { } } +#[deprecated(since = "1.4.0", reason = "use `char::DecodeUtf16` instead")] +#[unstable(feature = "decode_utf16", reason = "not exposed in std", issue = "27830")] +#[allow(deprecated)] impl<'a> Iterator for Utf16Items<'a> { type Item = Utf16Item; fn next(&mut self) -> Option { - let u = match self.iter.next() { - Some(u) => *u, - None => return None - }; - - if u < 0xD800 || 0xDFFF < u { - // not a surrogate - Some(Utf16Item::ScalarValue(unsafe { char::from_u32_unchecked(u as u32) })) - } else if u >= 0xDC00 { - // a trailing surrogate - Some(Utf16Item::LoneSurrogate(u)) - } else { - // preserve state for rewinding. - let old = self.iter.clone(); - - let u2 = match self.iter.next() { - Some(u2) => *u2, - // eof - None => return Some(Utf16Item::LoneSurrogate(u)) - }; - if u2 < 0xDC00 || u2 > 0xDFFF { - // not a trailing surrogate so we're not a valid - // surrogate pair, so rewind to redecode u2 next time. - self.iter = old.clone(); - return Some(Utf16Item::LoneSurrogate(u)) - } - - // all ok, so lets decode it. - let c = (((u - 0xD800) as u32) << 10 | (u2 - 0xDC00) as u32) + 0x1_0000; - Some(Utf16Item::ScalarValue(unsafe { char::from_u32_unchecked(c) })) - } + self.decoder.next().map(|result| match result { + Ok(c) => Utf16Item::ScalarValue(c), + Err(s) => Utf16Item::LoneSurrogate(s), + }) } #[inline] fn size_hint(&self) -> (usize, Option) { - let (low, high) = self.iter.size_hint(); - // we could be entirely valid surrogates (2 elements per - // char), or entirely non-surrogates (1 element per char) - (low / 2, high) + self.decoder.size_hint() } } @@ -196,7 +178,7 @@ impl<'a> Iterator for Utf16Items<'a> { /// # Examples /// /// ``` -/// #![feature(unicode)] +/// #![feature(unicode, decode_utf16)] /// /// extern crate rustc_unicode; /// @@ -216,8 +198,11 @@ impl<'a> Iterator for Utf16Items<'a> { /// LoneSurrogate(0xD834)]); /// } /// ``` +#[deprecated(since = "1.4.0", reason = "renamed to `char::decode_utf16`")] +#[unstable(feature = "decode_utf16", reason = "not exposed in std", issue = "27830")] +#[allow(deprecated)] pub fn utf16_items<'a>(v: &'a [u16]) -> Utf16Items<'a> { - Utf16Items { iter : v.iter() } + Utf16Items { decoder: decode_utf16(v.iter().cloned()) } } /// Iterator adaptor for encoding `char`s to UTF-16. diff --git a/src/libserialize/json.rs b/src/libserialize/json.rs index e474f47a1b5aa..09f98978653e3 100644 --- a/src/libserialize/json.rs +++ b/src/libserialize/json.rs @@ -209,8 +209,6 @@ use std::str::FromStr; use std::string; use std::{char, f64, fmt, str}; use std; -use rustc_unicode::str as unicode_str; -use rustc_unicode::str::Utf16Item; use Encodable; @@ -1712,11 +1710,13 @@ impl> Parser { _ => return self.error(UnexpectedEndOfHexEscape), } - let buf = [n1, try!(self.decode_hex_escape())]; - match unicode_str::utf16_items(&buf).next() { - Some(Utf16Item::ScalarValue(c)) => res.push(c), - _ => return self.error(LoneLeadingSurrogateInHexEscape), + let n2 = try!(self.decode_hex_escape()); + if n2 < 0xDC00 || n2 > 0xDFFF { + return self.error(LoneLeadingSurrogateInHexEscape) } + let c = (((n1 - 0xD800) as u32) << 10 | + (n2 - 0xDC00) as u32) + 0x1_0000; + res.push(char::from_u32(c).unwrap()); } n => match char::from_u32(n as u32) { diff --git a/src/libstd/collections/mod.rs b/src/libstd/collections/mod.rs index 4367dda84663e..83e28e39a7253 100644 --- a/src/libstd/collections/mod.rs +++ b/src/libstd/collections/mod.rs @@ -25,9 +25,9 @@ //! //! Rust's collections can be grouped into four major categories: //! -//! * Sequences: `Vec`, `VecDeque`, `LinkedList`, `BitVec` -//! * Maps: `HashMap`, `BTreeMap`, `VecMap` -//! * Sets: `HashSet`, `BTreeSet`, `BitSet` +//! * Sequences: `Vec`, `VecDeque`, `LinkedList` +//! * Maps: `HashMap`, `BTreeMap` +//! * Sets: `HashSet`, `BTreeSet` //! * Misc: `BinaryHeap` //! //! # When Should You Use Which Collection? @@ -70,22 +70,11 @@ //! * You want to be able to get all of the entries in order on-demand. //! * You want a sorted map. //! -//! ### Use a `VecMap` when: -//! * You want a `HashMap` but with known to be small `usize` keys. -//! * You want a `BTreeMap`, but with known to be small `usize` keys. -//! //! ### Use the `Set` variant of any of these `Map`s when: //! * You just want to remember which keys you've seen. //! * There is no meaningful value to associate with your keys. //! * You just want a set. //! -//! ### Use a `BitVec` when: -//! * You want to store an unbounded number of booleans in a small space. -//! * You want a bit vector. -//! -//! ### Use a `BitSet` when: -//! * You want a `BitVec`, but want `Set` properties -//! //! ### Use a `BinaryHeap` when: //! //! * You want to store a bunch of elements, but only ever want to process the @@ -123,31 +112,20 @@ //! | Vec | O(1) | O(n-i)* | O(n-i) | O(m)* | O(n-i) | //! | VecDeque | O(1) | O(min(i, n-i))* | O(min(i, n-i)) | O(m)* | O(min(i, n-i)) | //! | LinkedList | O(min(i, n-i)) | O(min(i, n-i)) | O(min(i, n-i)) | O(1) | O(min(i, n-i)) | -//! | BitVec | O(1) | O(n-i)* | O(n-i) | O(m)* | O(n-i) | //! //! Note that where ties occur, Vec is generally going to be faster than VecDeque, and VecDeque -//! is generally going to be faster than LinkedList. BitVec is not a general purpose collection, and -//! therefore cannot reasonably be compared. +//! is generally going to be faster than LinkedList. //! //! ## Maps //! -//! For Sets, all operations have the cost of the equivalent Map operation. For -//! BitSet, -//! refer to VecMap. +//! For Sets, all operations have the cost of the equivalent Map operation. //! //! | | get | insert | remove | predecessor | //! |----------|-----------|----------|----------|-------------| //! | HashMap | O(1)~ | O(1)~* | O(1)~ | N/A | //! | BTreeMap | O(log n) | O(log n) | O(log n) | O(log n) | -//! | VecMap | O(1) | O(1)? | O(1) | O(n) | -//! -//! Note that VecMap is *incredibly* inefficient in terms of space. The O(1) -//! insertion time assumes space for the element is already allocated. -//! Otherwise, a large key may require a massive reallocation, with no direct -//! relation to the number of elements in the collection. VecMap should only be -//! seriously considered for small keys. //! -//! Note also that BTreeMap's precise performance depends on the value of B. +//! Note that BTreeMap's precise performance depends on the value of B. //! //! # Correct and Efficient Usage of Collections //! diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index 179f0727d46f3..fca4c66112eb6 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -242,6 +242,7 @@ #![feature(unicode)] #![feature(unique)] #![feature(unsafe_no_drop_flag, filling_drop)] +#![feature(decode_utf16)] #![feature(vec_push_all)] #![feature(vec_resize)] #![feature(wrapping)] diff --git a/src/libstd/path.rs b/src/libstd/path.rs index 71aed0408711e..66893ffd33071 100644 --- a/src/libstd/path.rs +++ b/src/libstd/path.rs @@ -598,8 +598,11 @@ impl<'a> Components<'a> { /// how much of the prefix is left from the point of view of iteration? #[inline] fn prefix_remaining(&self) -> usize { - if self.front == State::Prefix { self.prefix_len() } - else { 0 } + if self.front == State::Prefix { + self.prefix_len() + } else { + 0 + } } // Given the iteration so far, how much of the pre-State::Body path is left? @@ -632,9 +635,11 @@ impl<'a> Components<'a> { /// ``` /// use std::path::Path; /// - /// let path = Path::new("/tmp/foo/bar.txt"); + /// let mut components = Path::new("/tmp/foo/bar.txt").components(); + /// components.next(); + /// components.next(); /// - /// println!("{:?}", path.components().as_path()); + /// assert_eq!(Path::new("foo/bar.txt"), components.as_path()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn as_path(&self) -> &'a Path { @@ -882,7 +887,7 @@ impl<'a> DoubleEndedIterator for Components<'a> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a> cmp::PartialEq for Components<'a> { fn eq(&self, other: &Components<'a>) -> bool { - iter::order::eq(self.clone(), other.clone()) + Iterator::eq(self.clone(), other.clone()) } } @@ -892,14 +897,14 @@ impl<'a> cmp::Eq for Components<'a> {} #[stable(feature = "rust1", since = "1.0.0")] impl<'a> cmp::PartialOrd for Components<'a> { fn partial_cmp(&self, other: &Components<'a>) -> Option { - iter::order::partial_cmp(self.clone(), other.clone()) + Iterator::partial_cmp(self.clone(), other.clone()) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a> cmp::Ord for Components<'a> { fn cmp(&self, other: &Components<'a>) -> cmp::Ordering { - iter::order::cmp(self.clone(), other.clone()) + Iterator::cmp(self.clone(), other.clone()) } } @@ -1162,14 +1167,14 @@ impl cmp::Eq for PathBuf {} #[stable(feature = "rust1", since = "1.0.0")] impl cmp::PartialOrd for PathBuf { fn partial_cmp(&self, other: &PathBuf) -> Option { - self.components().partial_cmp(&other.components()) + self.components().partial_cmp(other.components()) } } #[stable(feature = "rust1", since = "1.0.0")] impl cmp::Ord for PathBuf { fn cmp(&self, other: &PathBuf) -> cmp::Ordering { - self.components().cmp(&other.components()) + self.components().cmp(other.components()) } } @@ -1691,7 +1696,7 @@ impl<'a> fmt::Display for Display<'a> { #[stable(feature = "rust1", since = "1.0.0")] impl cmp::PartialEq for Path { fn eq(&self, other: &Path) -> bool { - iter::order::eq(self.components(), other.components()) + self.components().eq(other.components()) } } @@ -1701,14 +1706,14 @@ impl cmp::Eq for Path {} #[stable(feature = "rust1", since = "1.0.0")] impl cmp::PartialOrd for Path { fn partial_cmp(&self, other: &Path) -> Option { - self.components().partial_cmp(&other.components()) + self.components().partial_cmp(other.components()) } } #[stable(feature = "rust1", since = "1.0.0")] impl cmp::Ord for Path { fn cmp(&self, other: &Path) -> cmp::Ordering { - self.components().cmp(&other.components()) + self.components().cmp(other.components()) } } diff --git a/src/libstd/sys/common/wtf8.rs b/src/libstd/sys/common/wtf8.rs index 9e4a80a411bb1..eb313d275a191 100644 --- a/src/libstd/sys/common/wtf8.rs +++ b/src/libstd/sys/common/wtf8.rs @@ -37,7 +37,6 @@ use hash::{Hash, Hasher}; use iter::FromIterator; use mem; use ops; -use rustc_unicode::str::{Utf16Item, utf16_items}; use slice; use str; use string::String; @@ -186,14 +185,14 @@ impl Wtf8Buf { /// will always return the original code units. pub fn from_wide(v: &[u16]) -> Wtf8Buf { let mut string = Wtf8Buf::with_capacity(v.len()); - for item in utf16_items(v) { + for item in char::decode_utf16(v.iter().cloned()) { match item { - Utf16Item::ScalarValue(c) => string.push_char(c), - Utf16Item::LoneSurrogate(s) => { + Ok(ch) => string.push_char(ch), + Err(surrogate) => { // Surrogates are known to be in the code point range. - let code_point = unsafe { CodePoint::from_u32_unchecked(s as u32) }; + let code_point = unsafe { CodePoint::from_u32_unchecked(surrogate as u32) }; // Skip the WTF-8 concatenation check, - // surrogate pairs are already decoded by utf16_items + // surrogate pairs are already decoded by decode_utf16 string.push_code_point_unchecked(code_point) } } diff --git a/src/libstd/sys/windows/os.rs b/src/libstd/sys/windows/os.rs index 3e640ceaddddd..1680ea88d0bdb 100644 --- a/src/libstd/sys/windows/os.rs +++ b/src/libstd/sys/windows/os.rs @@ -74,7 +74,7 @@ pub fn error_string(errnum: i32) -> String { langId, buf.as_mut_ptr(), buf.len() as DWORD, - ptr::null()); + ptr::null()) as usize; if res == 0 { // Sometimes FormatMessageW can fail e.g. system doesn't like langId, let fm_err = errno(); @@ -82,8 +82,7 @@ pub fn error_string(errnum: i32) -> String { errnum, fm_err); } - let b = buf.iter().position(|&b| b == 0).unwrap_or(buf.len()); - match String::from_utf16(&buf[..b]) { + match String::from_utf16(&buf[..res]) { Ok(mut msg) => { // Trim trailing CRLF inserted by FormatMessageW let len = msg.trim_right().len(); diff --git a/src/libsyntax/entry.rs b/src/libsyntax/entry.rs new file mode 100644 index 0000000000000..b6c5d0066a233 --- /dev/null +++ b/src/libsyntax/entry.rs @@ -0,0 +1,42 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use attr; +use ast::{Item, ItemFn}; + +pub enum EntryPointType { + None, + MainNamed, + MainAttr, + Start, + OtherMain, // Not an entry point, but some other function named main +} + +pub fn entry_point_type(item: &Item, depth: usize) -> EntryPointType { + match item.node { + ItemFn(..) => { + if attr::contains_name(&item.attrs, "start") { + EntryPointType::Start + } else if attr::contains_name(&item.attrs, "main") { + EntryPointType::MainAttr + } else if item.ident.name == "main" { + if depth == 1 { + // This is a top-level function so can be 'main' + EntryPointType::MainNamed + } else { + EntryPointType::OtherMain + } + } else { + EntryPointType::None + } + } + _ => EntryPointType::None, + } +} diff --git a/src/libsyntax/lib.rs b/src/libsyntax/lib.rs index 0d1fa6dd7265a..d1c862ad40b25 100644 --- a/src/libsyntax/lib.rs +++ b/src/libsyntax/lib.rs @@ -90,6 +90,7 @@ pub mod attr; pub mod codemap; pub mod config; pub mod diagnostic; +pub mod entry; pub mod feature_gate; pub mod fold; pub mod owned_slice; diff --git a/src/libsyntax/test.rs b/src/libsyntax/test.rs index c24c1364bd2a1..7cd44e5fb4eff 100644 --- a/src/libsyntax/test.rs +++ b/src/libsyntax/test.rs @@ -14,6 +14,7 @@ #![allow(unused_imports)] use self::HasTestSignature::*; +use std::iter; use std::slice; use std::mem; use std::vec; @@ -24,6 +25,7 @@ use codemap::{DUMMY_SP, Span, ExpnInfo, NameAndSpan, MacroAttribute}; use codemap; use diagnostic; use config; +use entry::{self, EntryPointType}; use ext::base::ExtCtxt; use ext::build::AstBuilder; use ext::expand::ExpansionConfig; @@ -173,28 +175,6 @@ impl<'a> fold::Folder for TestHarnessGenerator<'a> { let tests = mem::replace(&mut self.tests, tests); let tested_submods = mem::replace(&mut self.tested_submods, tested_submods); - // Remove any #[main] from the AST so it doesn't clash with - // the one we're going to add. Only if compiling an executable. - - mod_folded.items = mem::replace(&mut mod_folded.items, vec![]).move_map(|item| { - item.map(|ast::Item {id, ident, attrs, node, vis, span}| { - ast::Item { - id: id, - ident: ident, - attrs: attrs.into_iter().filter_map(|attr| { - if !attr.check_name("main") { - Some(attr) - } else { - None - } - }).collect(), - node: node, - vis: vis, - span: span - } - }) - }); - if !tests.is_empty() || !tested_submods.is_empty() { let (it, sym) = mk_reexport_mod(&mut self.cx, tests, tested_submods); mod_folded.items.push(it); @@ -211,6 +191,55 @@ impl<'a> fold::Folder for TestHarnessGenerator<'a> { } } +struct EntryPointCleaner { + // Current depth in the ast + depth: usize, +} + +impl fold::Folder for EntryPointCleaner { + fn fold_item(&mut self, i: P) -> SmallVector> { + self.depth += 1; + let folded = fold::noop_fold_item(i, self).expect_one("noop did something"); + self.depth -= 1; + + // Remove any #[main] or #[start] from the AST so it doesn't + // clash with the one we're going to add, but mark it as + // #[allow(dead_code)] to avoid printing warnings. + let folded = match entry::entry_point_type(&*folded, self.depth) { + EntryPointType::MainNamed | + EntryPointType::MainAttr | + EntryPointType::Start => + folded.map(|ast::Item {id, ident, attrs, node, vis, span}| { + let allow_str = InternedString::new("allow"); + let dead_code_str = InternedString::new("dead_code"); + let allow_dead_code_item = + attr::mk_list_item(allow_str, + vec![attr::mk_word_item(dead_code_str)]); + let allow_dead_code = attr::mk_attr_outer(attr::mk_attr_id(), + allow_dead_code_item); + + ast::Item { + id: id, + ident: ident, + attrs: attrs.into_iter() + .filter(|attr| { + !attr.check_name("main") && !attr.check_name("start") + }) + .chain(iter::once(allow_dead_code)) + .collect(), + node: node, + vis: vis, + span: span + } + }), + EntryPointType::None | + EntryPointType::OtherMain => folded, + }; + + SmallVector::one(folded) + } +} + fn mk_reexport_mod(cx: &mut TestCtxt, tests: Vec, tested_submods: Vec<(ast::Ident, ast::Ident)>) -> (P, ast::Ident) { let super_ = token::str_to_ident("super"); @@ -246,6 +275,10 @@ fn generate_test_harness(sess: &ParseSess, krate: ast::Crate, cfg: &ast::CrateConfig, sd: &diagnostic::SpanHandler) -> ast::Crate { + // Remove the entry points + let mut cleaner = EntryPointCleaner { depth: 0 }; + let krate = cleaner.fold_crate(krate); + let mut feature_gated_cfgs = vec![]; let mut cx: TestCtxt = TestCtxt { sess: sess, diff --git a/src/rustbook/static/rustbook.css b/src/rustbook/static/rustbook.css index 3e0537c5551f6..6b9e7aa58f247 100644 --- a/src/rustbook/static/rustbook.css +++ b/src/rustbook/static/rustbook.css @@ -27,7 +27,7 @@ h1, h2, h3, h4, h5, h6 { @media only screen { #toc { - position: absolute; + position: fixed; left: 0px; top: 0px; bottom: 0px; @@ -44,11 +44,9 @@ h1, h2, h3, h4, h5, h6 { #page-wrapper { position: absolute; - overflow-y: auto; left: 310px; right: 0px; top: 0px; - bottom: 0px; box-sizing: border-box; background: none repeat scroll 0% 0% #FFF; -webkit-overflow-scrolling: touch; diff --git a/src/test/compile-fail/reject-specialized-drops-8142.rs b/src/test/compile-fail/reject-specialized-drops-8142.rs index 1e189528f18c7..b12e26fddf6d2 100644 --- a/src/test/compile-fail/reject-specialized-drops-8142.rs +++ b/src/test/compile-fail/reject-specialized-drops-8142.rs @@ -37,7 +37,9 @@ impl<'al,'adds_bnd> Drop for L<'al,'adds_bnd> where 'adds_bnd:'al { // RE impl<'ml> Drop for M<'ml> { fn drop(&mut self) { } } // ACCEPT impl Drop for N<'static> { fn drop(&mut self) { } } // REJECT -//~^ ERROR Implementations of Drop cannot be specialized +//~^ ERROR mismatched types +//~| expected `N<'n>` +//~| found `N<'static>` impl Drop for O { fn drop(&mut self) { } } // ACCEPT @@ -57,9 +59,9 @@ impl<'t,Bt:'t> Drop for T<'t,Bt> { fn drop(&mut self) { } } // ACCEPT impl Drop for U { fn drop(&mut self) { } } // ACCEPT impl Drop for V { fn drop(&mut self) { } } // REJECT -//~^ERROR Implementations of Drop cannot be specialized +//~^ ERROR Implementations of Drop cannot be specialized impl<'lw> Drop for W<'lw,'lw> { fn drop(&mut self) { } } // REJECT -//~^ERROR Implementations of Drop cannot be specialized +//~^ ERROR cannot infer an appropriate lifetime pub fn main() { } diff --git a/src/test/compile-fail/test-warns-dead-code.rs b/src/test/compile-fail/test-warns-dead-code.rs new file mode 100644 index 0000000000000..0e25f1e965ab9 --- /dev/null +++ b/src/test/compile-fail/test-warns-dead-code.rs @@ -0,0 +1,17 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// compile-flags: --test + +#![deny(dead_code)] + +fn dead() {} //~ error: function is never used: `dead` + +fn main() {} diff --git a/src/test/run-pass/issue-27997.rs b/src/test/run-pass/issue-27997.rs new file mode 100644 index 0000000000000..cd81f68969377 --- /dev/null +++ b/src/test/run-pass/issue-27997.rs @@ -0,0 +1,48 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(const_fn)] + +use std::sync::atomic::{Ordering, AtomicUsize}; + +use std::mem; +struct S { + _u: U, + size_of_u: usize, + _v: V, + size_of_v: usize +} + +impl S { + fn new(u: U, v: V) -> Self { + S { + _u: u, + size_of_u: mem::size_of::(), + _v: v, + size_of_v: mem::size_of::() + } + } +} + +static COUNT: AtomicUsize = AtomicUsize::new(0); + +impl Drop for S { + fn drop(&mut self) { + assert_eq!(mem::size_of::(), self.size_of_u); + assert_eq!(mem::size_of::(), self.size_of_v); + COUNT.store(COUNT.load(Ordering::SeqCst)+1, Ordering::SeqCst); + } +} + +fn main() { + assert_eq!(COUNT.load(Ordering::SeqCst), 0); + { S::new(0u8, 1u16); } + assert_eq!(COUNT.load(Ordering::SeqCst), 1); +} diff --git a/src/test/run-pass/test-main-not-dead-attr.rs b/src/test/run-pass/test-main-not-dead-attr.rs new file mode 100644 index 0000000000000..295559b6ddb6f --- /dev/null +++ b/src/test/run-pass/test-main-not-dead-attr.rs @@ -0,0 +1,18 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// compile-flags: --test + +#![feature(main)] + +#![deny(dead_code)] + +#[main] +fn foo() { panic!(); } diff --git a/src/test/run-pass/test-main-not-dead.rs b/src/test/run-pass/test-main-not-dead.rs new file mode 100644 index 0000000000000..7de3ca7479659 --- /dev/null +++ b/src/test/run-pass/test-main-not-dead.rs @@ -0,0 +1,15 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// compile-flags: --test + +#![deny(dead_code)] + +fn main() { panic!(); } diff --git a/src/test/run-pass/test-runner-hides-buried-main.rs b/src/test/run-pass/test-runner-hides-buried-main.rs new file mode 100644 index 0000000000000..7ba10850403e0 --- /dev/null +++ b/src/test/run-pass/test-runner-hides-buried-main.rs @@ -0,0 +1,24 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// compile-flags: --test + +#![feature(main)] + +#![allow(dead_code)] + +mod a { + fn b() { + || { + #[main] + fn c() { panic!(); } + }; + } +} diff --git a/src/test/run-pass/test-runner-hides-start.rs b/src/test/run-pass/test-runner-hides-start.rs new file mode 100644 index 0000000000000..fc94b19ada1fb --- /dev/null +++ b/src/test/run-pass/test-runner-hides-start.rs @@ -0,0 +1,16 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// compile-flags: --test + +#![feature(start)] + +#[start] +fn start(_: isize, _: *const *const u8) -> isize { panic!(); }