Skip to content

Commit 28d6623

Browse files
committed
Auto merge of #38148 - frewsxcv:rollup, r=frewsxcv
Rollup of 15 pull requests - Successful merges: #37859, #37919, #38020, #38028, #38029, #38065, #38073, #38077, #38089, #38090, #38096, #38112, #38113, #38130, #38141 - Failed merges:
2 parents 2cdbd5e + 2e038ed commit 28d6623

File tree

21 files changed

+738
-93
lines changed

21 files changed

+738
-93
lines changed

src/bootstrap/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ The script accepts commands, flags, and filters to determine what to do:
3232
# build the whole compiler
3333
./x.py build
3434
35-
# build the stage1 compier
35+
# build the stage1 compiler
3636
./x.py build --stage 1
3737
3838
# build stage0 libstd

src/doc/book/testing.md

+5-5
Original file line numberDiff line numberDiff line change
@@ -589,11 +589,11 @@ please see the [Documentation chapter](documentation.html).
589589

590590
# Testing and concurrency
591591

592-
One thing that is important to note when writing tests are run concurrently
593-
using threads. For this reason you should take care that your tests are written
594-
in such a way as to not depend on each-other, or on any shared state. "Shared
595-
state" can also include the environment, such as the current working directory,
596-
or environment variables.
592+
One thing that is important to note when writing tests is that they may be run
593+
concurrently using threads. For this reason you should take care that your tests
594+
are written in such a way as to not depend on each-other, or on any shared
595+
state. "Shared state" can also include the environment, such as the current
596+
working directory, or environment variables.
597597

598598
If this is an issue it is possible to control this concurrency, either by
599599
setting the environment variable `RUST_TEST_THREADS`, or by passing the argument

src/doc/reference.md

+5-4
Original file line numberDiff line numberDiff line change
@@ -740,13 +740,14 @@ There are several kinds of item:
740740
* [`extern crate` declarations](#extern-crate-declarations)
741741
* [`use` declarations](#use-declarations)
742742
* [modules](#modules)
743-
* [functions](#functions)
743+
* [function definitions](#functions)
744+
* [`extern` blocks](#external-blocks)
744745
* [type definitions](grammar.html#type-definitions)
745-
* [structs](#structs)
746-
* [enumerations](#enumerations)
746+
* [struct definitions](#structs)
747+
* [enumeration definitions](#enumerations)
747748
* [constant items](#constant-items)
748749
* [static items](#static-items)
749-
* [traits](#traits)
750+
* [trait definitions](#traits)
750751
* [implementations](#implementations)
751752

752753
Some items form an implicit scope for the declaration of sub-items. In other

src/grammar/verify.rs

+19-17
Original file line numberDiff line numberDiff line change
@@ -23,23 +23,23 @@ use std::fs::File;
2323
use std::io::{BufRead, Read};
2424
use std::path::Path;
2525

26-
use syntax::parse;
2726
use syntax::parse::lexer;
2827
use rustc::dep_graph::DepGraph;
2928
use rustc::session::{self, config};
3029
use rustc::middle::cstore::DummyCrateStore;
3130

3231
use std::rc::Rc;
3332
use syntax::ast;
34-
use syntax::ast::Name;
3533
use syntax::codemap;
3634
use syntax::parse::token::{self, BinOpToken, DelimToken, Lit, Token};
3735
use syntax::parse::lexer::TokenAndSpan;
3836
use syntax_pos::Pos;
3937

38+
use syntax::symbol::{Symbol, keywords};
39+
4040
fn parse_token_list(file: &str) -> HashMap<String, token::Token> {
4141
fn id() -> token::Token {
42-
Token::Ident(ast::Ident::with_empty_ctxt(Name(0)))
42+
Token::Ident(ast::Ident::with_empty_ctxt(keywords::Invalid.name()))
4343
}
4444

4545
let mut res = HashMap::new();
@@ -65,7 +65,7 @@ fn parse_token_list(file: &str) -> HashMap<String, token::Token> {
6565
"SHL" => Token::BinOp(BinOpToken::Shl),
6666
"LBRACE" => Token::OpenDelim(DelimToken::Brace),
6767
"RARROW" => Token::RArrow,
68-
"LIT_STR" => Token::Literal(Lit::Str_(Name(0)), None),
68+
"LIT_STR" => Token::Literal(Lit::Str_(keywords::Invalid.name()), None),
6969
"DOTDOT" => Token::DotDot,
7070
"MOD_SEP" => Token::ModSep,
7171
"DOTDOTDOT" => Token::DotDotDot,
@@ -75,21 +75,22 @@ fn parse_token_list(file: &str) -> HashMap<String, token::Token> {
7575
"ANDAND" => Token::AndAnd,
7676
"AT" => Token::At,
7777
"LBRACKET" => Token::OpenDelim(DelimToken::Bracket),
78-
"LIT_STR_RAW" => Token::Literal(Lit::StrRaw(Name(0), 0), None),
78+
"LIT_STR_RAW" => Token::Literal(Lit::StrRaw(keywords::Invalid.name(), 0), None),
7979
"RPAREN" => Token::CloseDelim(DelimToken::Paren),
8080
"SLASH" => Token::BinOp(BinOpToken::Slash),
8181
"COMMA" => Token::Comma,
82-
"LIFETIME" => Token::Lifetime(ast::Ident::with_empty_ctxt(Name(0))),
82+
"LIFETIME" => Token::Lifetime(
83+
ast::Ident::with_empty_ctxt(keywords::Invalid.name())),
8384
"CARET" => Token::BinOp(BinOpToken::Caret),
8485
"TILDE" => Token::Tilde,
8586
"IDENT" => id(),
8687
"PLUS" => Token::BinOp(BinOpToken::Plus),
87-
"LIT_CHAR" => Token::Literal(Lit::Char(Name(0)), None),
88-
"LIT_BYTE" => Token::Literal(Lit::Byte(Name(0)), None),
88+
"LIT_CHAR" => Token::Literal(Lit::Char(keywords::Invalid.name()), None),
89+
"LIT_BYTE" => Token::Literal(Lit::Byte(keywords::Invalid.name()), None),
8990
"EQ" => Token::Eq,
9091
"RBRACKET" => Token::CloseDelim(DelimToken::Bracket),
9192
"COMMENT" => Token::Comment,
92-
"DOC_COMMENT" => Token::DocComment(Name(0)),
93+
"DOC_COMMENT" => Token::DocComment(keywords::Invalid.name()),
9394
"DOT" => Token::Dot,
9495
"EQEQ" => Token::EqEq,
9596
"NE" => Token::Ne,
@@ -99,9 +100,9 @@ fn parse_token_list(file: &str) -> HashMap<String, token::Token> {
99100
"BINOP" => Token::BinOp(BinOpToken::Plus),
100101
"POUND" => Token::Pound,
101102
"OROR" => Token::OrOr,
102-
"LIT_INTEGER" => Token::Literal(Lit::Integer(Name(0)), None),
103+
"LIT_INTEGER" => Token::Literal(Lit::Integer(keywords::Invalid.name()), None),
103104
"BINOPEQ" => Token::BinOpEq(BinOpToken::Plus),
104-
"LIT_FLOAT" => Token::Literal(Lit::Float(Name(0)), None),
105+
"LIT_FLOAT" => Token::Literal(Lit::Float(keywords::Invalid.name()), None),
105106
"WHITESPACE" => Token::Whitespace,
106107
"UNDERSCORE" => Token::Underscore,
107108
"MINUS" => Token::BinOp(BinOpToken::Minus),
@@ -111,10 +112,11 @@ fn parse_token_list(file: &str) -> HashMap<String, token::Token> {
111112
"OR" => Token::BinOp(BinOpToken::Or),
112113
"GT" => Token::Gt,
113114
"LE" => Token::Le,
114-
"LIT_BINARY" => Token::Literal(Lit::ByteStr(Name(0)), None),
115-
"LIT_BINARY_RAW" => Token::Literal(Lit::ByteStrRaw(Name(0), 0), None),
115+
"LIT_BINARY" => Token::Literal(Lit::ByteStr(keywords::Invalid.name()), None),
116+
"LIT_BINARY_RAW" => Token::Literal(
117+
Lit::ByteStrRaw(keywords::Invalid.name(), 0), None),
116118
"QUESTION" => Token::Question,
117-
"SHEBANG" => Token::Shebang(Name(0)),
119+
"SHEBANG" => Token::Shebang(keywords::Invalid.name()),
118120
_ => continue,
119121
};
120122

@@ -158,7 +160,7 @@ fn fix(mut lit: &str) -> ast::Name {
158160
let leading_hashes = count(lit);
159161

160162
// +1/-1 to adjust for single quotes
161-
parse::token::intern(&lit[leading_hashes + 1..lit.len() - leading_hashes - 1])
163+
Symbol::intern(&lit[leading_hashes + 1..lit.len() - leading_hashes - 1])
162164
}
163165

164166
/// Assuming a char/byte literal, strip the 'b' prefix and the single quotes.
@@ -168,7 +170,7 @@ fn fixchar(mut lit: &str) -> ast::Name {
168170
lit = &lit[1..];
169171
}
170172

171-
parse::token::intern(&lit[1..lit.len() - 1])
173+
Symbol::intern(&lit[1..lit.len() - 1])
172174
}
173175

174176
fn count(lit: &str) -> usize {
@@ -196,7 +198,7 @@ fn parse_antlr_token(s: &str, tokens: &HashMap<String, token::Token>, surrogate_
196198
let not_found = format!("didn't find token {:?} in the map", toknum);
197199
let proto_tok = tokens.get(toknum).expect(&not_found[..]);
198200

199-
let nm = parse::token::intern(content);
201+
let nm = Symbol::intern(content);
200202

201203
debug!("What we got: content (`{}`), proto: {:?}", content, proto_tok);
202204

src/libcore/option.rs

+10
Original file line numberDiff line numberDiff line change
@@ -659,6 +659,16 @@ impl<T> Option<T> {
659659
impl<'a, T: Clone> Option<&'a T> {
660660
/// Maps an `Option<&T>` to an `Option<T>` by cloning the contents of the
661661
/// option.
662+
///
663+
/// # Examples
664+
///
665+
/// ```
666+
/// let x = 12;
667+
/// let opt_x = Some(&x);
668+
/// assert_eq!(opt_x, Some(&12));
669+
/// let cloned = opt_x.cloned();
670+
/// assert_eq!(cloned, Some(12));
671+
/// ```
662672
#[stable(feature = "rust1", since = "1.0.0")]
663673
pub fn cloned(self) -> Option<T> {
664674
self.map(|t| t.clone())

src/librustc/session/config.rs

+2
Original file line numberDiff line numberDiff line change
@@ -886,6 +886,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
886886
"enable incremental compilation (experimental)"),
887887
incremental_info: bool = (false, parse_bool, [UNTRACKED],
888888
"print high-level information about incremental reuse (or the lack thereof)"),
889+
incremental_dump_hash: bool = (false, parse_bool, [UNTRACKED],
890+
"dump hash information in textual format to stdout"),
889891
dump_dep_graph: bool = (false, parse_bool, [UNTRACKED],
890892
"dump the dependency graph to $RUST_DEP_GRAPH (default: /tmp/dep_graph.gv)"),
891893
query_dep_graph: bool = (false, parse_bool, [UNTRACKED],

src/librustc_incremental/persist/file_format.rs

+15-1
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ use std::path::Path;
2424
use std::fs::File;
2525
use std::env;
2626

27+
use rustc::session::Session;
2728
use rustc::session::config::nightly_options;
2829

2930
/// The first few bytes of files generated by incremental compilation
@@ -59,7 +60,7 @@ pub fn write_file_header<W: io::Write>(stream: &mut W) -> io::Result<()> {
5960
/// incompatible version of the compiler.
6061
/// - Returns `Err(..)` if some kind of IO error occurred while reading the
6162
/// file.
62-
pub fn read_file(path: &Path) -> io::Result<Option<Vec<u8>>> {
63+
pub fn read_file(sess: &Session, path: &Path) -> io::Result<Option<Vec<u8>>> {
6364
if !path.exists() {
6465
return Ok(None);
6566
}
@@ -72,6 +73,7 @@ pub fn read_file(path: &Path) -> io::Result<Option<Vec<u8>>> {
7273
let mut file_magic = [0u8; 4];
7374
file.read_exact(&mut file_magic)?;
7475
if file_magic != FILE_MAGIC {
76+
report_format_mismatch(sess, path, "Wrong FILE_MAGIC");
7577
return Ok(None)
7678
}
7779
}
@@ -85,6 +87,7 @@ pub fn read_file(path: &Path) -> io::Result<Option<Vec<u8>>> {
8587
((header_format_version[1] as u16) << 8);
8688

8789
if header_format_version != HEADER_FORMAT_VERSION {
90+
report_format_mismatch(sess, path, "Wrong HEADER_FORMAT_VERSION");
8891
return Ok(None)
8992
}
9093
}
@@ -99,6 +102,7 @@ pub fn read_file(path: &Path) -> io::Result<Option<Vec<u8>>> {
99102
file.read_exact(&mut buffer[..])?;
100103

101104
if &buffer[..] != rustc_version().as_bytes() {
105+
report_format_mismatch(sess, path, "Different compiler version");
102106
return Ok(None);
103107
}
104108
}
@@ -109,6 +113,16 @@ pub fn read_file(path: &Path) -> io::Result<Option<Vec<u8>>> {
109113
Ok(Some(data))
110114
}
111115

116+
fn report_format_mismatch(sess: &Session, file: &Path, message: &str) {
117+
debug!("read_file: {}", message);
118+
119+
if sess.opts.debugging_opts.incremental_info {
120+
println!("incremental: ignoring cache artifact `{}`: {}",
121+
file.file_name().unwrap().to_string_lossy(),
122+
message);
123+
}
124+
}
125+
112126
fn rustc_version() -> String {
113127
if nightly_options::is_nightly_build() {
114128
if let Some(val) = env::var_os("RUSTC_FORCE_INCR_COMP_ARTIFACT_HEADER") {

src/librustc_incremental/persist/fs.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -435,8 +435,8 @@ fn copy_files(target_dir: &Path,
435435
}
436436

437437
if print_stats_on_success {
438-
println!("incr. comp. session directory: {} files hard-linked", files_linked);
439-
println!("incr. comp. session directory: {} files copied", files_copied);
438+
println!("incremental: session directory: {} files hard-linked", files_linked);
439+
println!("incremental: session directory: {} files copied", files_copied);
440440
}
441441

442442
Ok(files_linked > 0 || files_copied == 0)

src/librustc_incremental/persist/hash.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ impl<'a, 'tcx> HashContext<'a, 'tcx> {
156156

157157
let hashes_file_path = metadata_hash_import_path(&session_dir);
158158

159-
match file_format::read_file(&hashes_file_path)
159+
match file_format::read_file(self.tcx.sess, &hashes_file_path)
160160
{
161161
Ok(Some(data)) => {
162162
match self.load_from_data(cnum, &data, svh) {

src/librustc_incremental/persist/load.rs

+34-11
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ fn load_dep_graph_if_exists<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
9393
}
9494

9595
fn load_data(sess: &Session, path: &Path) -> Option<Vec<u8>> {
96-
match file_format::read_file(path) {
96+
match file_format::read_file(sess, path) {
9797
Ok(Some(data)) => return Some(data),
9898
Ok(None) => {
9999
// The file either didn't exist or was produced by an incompatible
@@ -132,6 +132,10 @@ pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
132132
let prev_commandline_args_hash = u64::decode(&mut dep_graph_decoder)?;
133133

134134
if prev_commandline_args_hash != tcx.sess.opts.dep_tracking_hash() {
135+
if tcx.sess.opts.debugging_opts.incremental_info {
136+
println!("incremental: completely ignoring cache because of \
137+
differing commandline arguments");
138+
}
135139
// We can't reuse the cache, purge it.
136140
debug!("decode_dep_graph: differing commandline arg hashes");
137141
for swp in work_products {
@@ -192,7 +196,8 @@ pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
192196
if tcx.sess.opts.debugging_opts.incremental_info {
193197
// It'd be nice to pretty-print these paths better than just
194198
// using the `Debug` impls, but wev.
195-
println!("module {:?} is dirty because {:?} changed or was removed",
199+
println!("incremental: module {:?} is dirty because {:?} \
200+
changed or was removed",
196201
target_node,
197202
raw_source_node.map_def(|&index| {
198203
Some(directory.def_path_string(tcx, index))
@@ -250,11 +255,24 @@ fn dirty_nodes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
250255
current_hash);
251256
continue;
252257
}
258+
259+
if tcx.sess.opts.debugging_opts.incremental_dump_hash {
260+
println!("node {:?} is dirty as hash is {:?} was {:?}",
261+
dep_node.map_def(|&def_id| Some(tcx.def_path(def_id))).unwrap(),
262+
current_hash,
263+
hash.hash);
264+
}
265+
253266
debug!("initial_dirty_nodes: {:?} is dirty as hash is {:?}, was {:?}",
254267
dep_node.map_def(|&def_id| Some(tcx.def_path(def_id))).unwrap(),
255268
current_hash,
256269
hash.hash);
257270
} else {
271+
if tcx.sess.opts.debugging_opts.incremental_dump_hash {
272+
println!("node {:?} is dirty as it was removed",
273+
hash.dep_node);
274+
}
275+
258276
debug!("initial_dirty_nodes: {:?} is dirty as it was removed",
259277
hash.dep_node);
260278
}
@@ -277,14 +295,19 @@ fn reconcile_work_products<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
277295
debug!("reconcile_work_products: dep-node for {:?} is dirty", swp);
278296
delete_dirty_work_product(tcx, swp);
279297
} else {
280-
let all_files_exist =
281-
swp.work_product
282-
.saved_files
283-
.iter()
284-
.all(|&(_, ref file_name)| {
285-
let path = in_incr_comp_dir_sess(tcx.sess, &file_name);
286-
path.exists()
287-
});
298+
let mut all_files_exist = true;
299+
for &(_, ref file_name) in swp.work_product.saved_files.iter() {
300+
let path = in_incr_comp_dir_sess(tcx.sess, file_name);
301+
if !path.exists() {
302+
all_files_exist = false;
303+
304+
if tcx.sess.opts.debugging_opts.incremental_info {
305+
println!("incremental: could not find file for up-to-date work product: {}",
306+
path.display());
307+
}
308+
}
309+
}
310+
288311
if all_files_exist {
289312
debug!("reconcile_work_products: all files for {:?} exist", swp);
290313
tcx.dep_graph.insert_previous_work_product(&swp.id, swp.work_product);
@@ -331,7 +354,7 @@ fn load_prev_metadata_hashes(tcx: TyCtxt,
331354

332355
debug!("load_prev_metadata_hashes() - File: {}", file_path.display());
333356

334-
let data = match file_format::read_file(&file_path) {
357+
let data = match file_format::read_file(tcx.sess, &file_path) {
335358
Ok(Some(data)) => data,
336359
Ok(None) => {
337360
debug!("load_prev_metadata_hashes() - File produced by incompatible \

src/librustc_incremental/persist/save.rs

+15
Original file line numberDiff line numberDiff line change
@@ -159,6 +159,12 @@ pub fn encode_dep_graph(preds: &Predecessors,
159159
}
160160
}
161161

162+
if tcx.sess.opts.debugging_opts.incremental_dump_hash {
163+
for (dep_node, hash) in &preds.hashes {
164+
println!("HIR hash for {:?} is {}", dep_node, hash);
165+
}
166+
}
167+
162168
// Create the serialized dep-graph.
163169
let graph = SerializedDepGraph {
164170
edges: edges,
@@ -248,6 +254,15 @@ pub fn encode_metadata_hashes(tcx: TyCtxt,
248254
let hash = state.finish();
249255

250256
debug!("save: metadata hash for {:?} is {}", def_id, hash);
257+
258+
if tcx.sess.opts.debugging_opts.incremental_dump_hash {
259+
println!("metadata hash for {:?} is {}", def_id, hash);
260+
for dep_node in sources {
261+
println!("metadata hash for {:?} depends on {:?} with hash {}",
262+
def_id, dep_node, preds.hashes[dep_node]);
263+
}
264+
}
265+
251266
serialized_hashes.hashes.push(SerializedMetadataHash {
252267
def_index: def_id.index,
253268
hash: hash,

src/librustc_trans/base.rs

+5
Original file line numberDiff line numberDiff line change
@@ -1981,6 +1981,11 @@ fn trans_reuse_previous_work_products(tcx: TyCtxt,
19811981
debug!("trans_reuse_previous_work_products: reusing {:?}", work_product);
19821982
return Some(work_product);
19831983
} else {
1984+
if tcx.sess.opts.debugging_opts.incremental_info {
1985+
println!("incremental: CGU `{}` invalidated because of \
1986+
changed partitioning hash.",
1987+
cgu.name());
1988+
}
19841989
debug!("trans_reuse_previous_work_products: \
19851990
not reusing {:?} because hash changed to {:?}",
19861991
work_product, hash);

0 commit comments

Comments
 (0)