From cb12460467aa98cb82587308341939679ff2a7d1 Mon Sep 17 00:00:00 2001 From: Mara Bos Date: Thu, 30 May 2019 12:49:54 +0200 Subject: [PATCH 01/32] Implement Clone::clone_from for Option. --- src/libcore/option.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/src/libcore/option.rs b/src/libcore/option.rs index 6b7f491effb30..c75ecb059e813 100644 --- a/src/libcore/option.rs +++ b/src/libcore/option.rs @@ -145,7 +145,7 @@ use crate::pin::Pin; // which basically means it must be `Option`. /// The `Option` type. See [the module level documentation](index.html) for more. -#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)] +#[derive(Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)] #[stable(feature = "rust1", since = "1.0.0")] pub enum Option { /// No value @@ -1040,6 +1040,25 @@ fn expect_failed(msg: &str) -> ! { // Trait implementations ///////////////////////////////////////////////////////////////////////////// +#[stable(feature = "rust1", since = "1.0.0")] +impl Clone for Option { + #[inline] + fn clone(&self) -> Self { + match self { + Some(x) => Some(x.clone()), + None => None, + } + } + + #[inline] + fn clone_from(&mut self, source: &Self) { + match (self, source) { + (Some(to), Some(from)) => to.clone_from(from), + (to, from) => *to = from.clone(), + } + } +} + #[stable(feature = "rust1", since = "1.0.0")] impl Default for Option { /// Returns [`None`][Option::None]. From 67fd99589ad161fcd154cfdb0f33d349ec23896d Mon Sep 17 00:00:00 2001 From: Mara Bos Date: Thu, 30 May 2019 12:50:06 +0200 Subject: [PATCH 02/32] Implement Clone::clone_from for Result. --- src/libcore/result.rs | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/src/libcore/result.rs b/src/libcore/result.rs index bf8fd63b6446f..8a09877ce1f4b 100644 --- a/src/libcore/result.rs +++ b/src/libcore/result.rs @@ -240,7 +240,7 @@ use crate::ops::{self, Deref}; /// /// [`Ok`]: enum.Result.html#variant.Ok /// [`Err`]: enum.Result.html#variant.Err -#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)] +#[derive(Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)] #[must_use = "this `Result` may be an `Err` variant, which should be handled"] #[stable(feature = "rust1", since = "1.0.0")] pub enum Result { @@ -1003,6 +1003,27 @@ fn unwrap_failed(msg: &str, error: E) -> ! { // Trait implementations ///////////////////////////////////////////////////////////////////////////// +#[stable(feature = "rust1", since = "1.0.0")] +impl Clone for Result { + #[inline] + fn clone(&self) -> Self { + match self { + Ok(x) => Ok(x.clone()), + Err(x) => Err(x.clone()), + } + } + + #[inline] + fn clone_from(&mut self, source: &Self) { + match (self, source) { + (Ok(to), Ok(from)) => to.clone_from(from), + (Err(to), Err(from)) => to.clone_from(from), + (to, from) => *to = from.clone(), + } + } +} + + #[stable(feature = "rust1", since = "1.0.0")] impl IntoIterator for Result { type Item = T; From 5fb099dc786c1bee7116fecb4965d34ad5e0a4a5 Mon Sep 17 00:00:00 2001 From: Cedric Date: Sat, 8 Jun 2019 10:49:46 +0200 Subject: [PATCH 03/32] use pattern matching for slices destructuring --- src/libsyntax/diagnostics/plugin.rs | 23 +++----- src/libsyntax/parse/mod.rs | 55 ++++++++----------- src/libsyntax_ext/deriving/cmp/ord.rs | 4 +- src/libsyntax_ext/deriving/cmp/partial_eq.rs | 4 +- src/libsyntax_ext/deriving/cmp/partial_ord.rs | 8 +-- src/libsyntax_ext/deriving/hash.rs | 4 +- src/libsyntax_ext/trace_macros.rs | 6 +- 7 files changed, 45 insertions(+), 59 deletions(-) diff --git a/src/libsyntax/diagnostics/plugin.rs b/src/libsyntax/diagnostics/plugin.rs index 9f01b9b9f9b58..b958a760e8278 100644 --- a/src/libsyntax/diagnostics/plugin.rs +++ b/src/libsyntax/diagnostics/plugin.rs @@ -33,8 +33,8 @@ pub fn expand_diagnostic_used<'cx>(ecx: &'cx mut ExtCtxt<'_>, span: Span, token_tree: &[TokenTree]) -> Box { - let code = match (token_tree.len(), token_tree.get(0)) { - (1, Some(&TokenTree::Token(Token { kind: token::Ident(code, _), .. }))) => code, + let code = match token_tree { + &[TokenTree::Token(Token { kind: token::Ident(code, _), .. })] => code, _ => unreachable!() }; @@ -66,22 +66,15 @@ pub fn expand_register_diagnostic<'cx>(ecx: &'cx mut ExtCtxt<'_>, span: Span, token_tree: &[TokenTree]) -> Box { - let (code, description) = match ( - token_tree.len(), - token_tree.get(0), - token_tree.get(1), - token_tree.get(2) - ) { - (1, Some(&TokenTree::Token(Token { kind: token::Ident(code, _), .. })), None, None) => { + let (code, description) = match token_tree { + &[TokenTree::Token(Token { kind: token::Ident(code, _), .. })] => { (code, None) }, - (3, Some(&TokenTree::Token(Token { kind: token::Ident(code, _), .. })), - Some(&TokenTree::Token(Token { kind: token::Comma, .. })), - Some(&TokenTree::Token(Token { - kind: token::Literal(token::Lit { symbol, .. }), .. - }))) => { + &[TokenTree::Token(Token { kind: token::Ident(code, _), .. }), + TokenTree::Token(Token { kind: token::Comma, .. }), + TokenTree::Token(Token { kind: token::Literal(token::Lit { symbol, .. }), ..})] => { (code, Some(symbol)) - } + }, _ => unreachable!() }; diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index 063823bbf4d11..1d5f1001ac962 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -424,47 +424,40 @@ mod tests { string_to_stream("macro_rules! zip (($a)=>($a))".to_string()).trees().collect(); let tts: &[TokenTree] = &tts[..]; - match (tts.len(), tts.get(0), tts.get(1), tts.get(2), tts.get(3)) { - ( - 4, - Some(&TokenTree::Token(Token { - kind: token::Ident(name_macro_rules, false), .. - })), - Some(&TokenTree::Token(Token { kind: token::Not, .. })), - Some(&TokenTree::Token(Token { kind: token::Ident(name_zip, false), .. })), - Some(&TokenTree::Delimited(_, macro_delim, ref macro_tts)), - ) + match tts { + &[TokenTree::Token(Token {kind: token::Ident(name_macro_rules, false), ..}), + TokenTree::Token(Token { kind: token::Not, .. }), + TokenTree::Token(Token { kind: token::Ident(name_zip, false), .. }), + TokenTree::Delimited(_, macro_delim, ref macro_tts) + ] if name_macro_rules == sym::macro_rules && name_zip.as_str() == "zip" => { let tts = ¯o_tts.trees().collect::>(); - match (tts.len(), tts.get(0), tts.get(1), tts.get(2)) { - ( - 3, - Some(&TokenTree::Delimited(_, first_delim, ref first_tts)), - Some(&TokenTree::Token(Token { kind: token::FatArrow, .. })), - Some(&TokenTree::Delimited(_, second_delim, ref second_tts)), - ) + match tts { + &[ + TokenTree::Delimited(_, first_delim, ref first_tts), + TokenTree::Token(Token { kind: token::FatArrow, .. }), + TokenTree::Delimited(_, second_delim, ref second_tts), + ] if macro_delim == token::Paren => { let tts = &first_tts.trees().collect::>(); - match (tts.len(), tts.get(0), tts.get(1)) { - ( - 2, - Some(&TokenTree::Token(Token { kind: token::Dollar, .. })), - Some(&TokenTree::Token(Token { + match tts { + &[ + TokenTree::Token(Token { kind: token::Dollar, .. }), + TokenTree::Token(Token { kind: token::Ident(name, false), .. - })), - ) + }), + ] if first_delim == token::Paren && name.as_str() == "a" => {}, _ => panic!("value 3: {:?} {:?}", first_delim, first_tts), } let tts = &second_tts.trees().collect::>(); - match (tts.len(), tts.get(0), tts.get(1)) { - ( - 2, - Some(&TokenTree::Token(Token { kind: token::Dollar, .. })), - Some(&TokenTree::Token(Token { + match tts { + &[ + TokenTree::Token(Token { kind: token::Dollar, .. }), + TokenTree::Token(Token { kind: token::Ident(name, false), .. - })), - ) + }), + ] if second_delim == token::Paren && name.as_str() == "a" => {}, _ => panic!("value 4: {:?} {:?}", second_delim, second_tts), } diff --git a/src/libsyntax_ext/deriving/cmp/ord.rs b/src/libsyntax_ext/deriving/cmp/ord.rs index b25a9e4c50fbe..844865d57c7ad 100644 --- a/src/libsyntax_ext/deriving/cmp/ord.rs +++ b/src/libsyntax_ext/deriving/cmp/ord.rs @@ -82,8 +82,8 @@ pub fn cs_cmp(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_>) -> P< // } let new = { - let other_f = match (other_fs.len(), other_fs.get(0)) { - (1, Some(o_f)) => o_f, + let other_f = match other_fs { + [o_f] => o_f, _ => cx.span_bug(span, "not exactly 2 arguments in `derive(Ord)`"), }; diff --git a/src/libsyntax_ext/deriving/cmp/partial_eq.rs b/src/libsyntax_ext/deriving/cmp/partial_eq.rs index 6172f27261ecf..732bb234389a0 100644 --- a/src/libsyntax_ext/deriving/cmp/partial_eq.rs +++ b/src/libsyntax_ext/deriving/cmp/partial_eq.rs @@ -25,8 +25,8 @@ pub fn expand_deriving_partial_eq(cx: &mut ExtCtxt<'_>, -> P { let op = |cx: &mut ExtCtxt<'_>, span: Span, self_f: P, other_fs: &[P]| { - let other_f = match (other_fs.len(), other_fs.get(0)) { - (1, Some(o_f)) => o_f, + let other_f = match other_fs { + [o_f] => o_f, _ => cx.span_bug(span, "not exactly 2 arguments in `derive(PartialEq)`"), }; diff --git a/src/libsyntax_ext/deriving/cmp/partial_ord.rs b/src/libsyntax_ext/deriving/cmp/partial_ord.rs index 3980741f252dd..a30a7d78222f4 100644 --- a/src/libsyntax_ext/deriving/cmp/partial_ord.rs +++ b/src/libsyntax_ext/deriving/cmp/partial_ord.rs @@ -143,8 +143,8 @@ pub fn cs_partial_cmp(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_ // } let new = { - let other_f = match (other_fs.len(), other_fs.get(0)) { - (1, Some(o_f)) => o_f, + let other_f = match other_fs { + [o_f] => o_f, _ => { cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`") @@ -193,8 +193,8 @@ fn cs_op(less: bool, }; let par_cmp = |cx: &mut ExtCtxt<'_>, span, self_f: P, other_fs: &[P], default| { - let other_f = match (other_fs.len(), other_fs.get(0)) { - (1, Some(o_f)) => o_f, + let other_f = match other_fs { + [o_f] => o_f, _ => cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`"), }; diff --git a/src/libsyntax_ext/deriving/hash.rs b/src/libsyntax_ext/deriving/hash.rs index e7f99d4578226..7ad04aebf6e2e 100644 --- a/src/libsyntax_ext/deriving/hash.rs +++ b/src/libsyntax_ext/deriving/hash.rs @@ -52,8 +52,8 @@ pub fn expand_deriving_hash(cx: &mut ExtCtxt<'_>, } fn hash_substructure(cx: &mut ExtCtxt<'_>, trait_span: Span, substr: &Substructure<'_>) -> P { - let state_expr = match (substr.nonself_args.len(), substr.nonself_args.get(0)) { - (1, Some(o_f)) => o_f, + let state_expr = match &substr.nonself_args { + &[o_f] => o_f, _ => { cx.span_bug(trait_span, "incorrect number of arguments in `derive(Hash)`") diff --git a/src/libsyntax_ext/trace_macros.rs b/src/libsyntax_ext/trace_macros.rs index 6c74f77ff1fb5..512513e9b414c 100644 --- a/src/libsyntax_ext/trace_macros.rs +++ b/src/libsyntax_ext/trace_macros.rs @@ -16,11 +16,11 @@ pub fn expand_trace_macros(cx: &mut ExtCtxt<'_>, feature_gate::EXPLAIN_TRACE_MACROS); } - match (tt.len(), tt.first()) { - (1, Some(TokenTree::Token(token))) if token.is_keyword(kw::True) => { + match tt { + [TokenTree::Token(token)] if token.is_keyword(kw::True) => { cx.set_trace_macros(true); } - (1, Some(TokenTree::Token(token))) if token.is_keyword(kw::False) => { + [TokenTree::Token(token)] if token.is_keyword(kw::False) => { cx.set_trace_macros(false); } _ => cx.span_err(sp, "trace_macros! accepts only `true` or `false`"), From ad91a8e59abca232a3e4449186712c032ab12519 Mon Sep 17 00:00:00 2001 From: Cedric Date: Sat, 8 Jun 2019 11:38:15 +0200 Subject: [PATCH 04/32] improve style --- src/libsyntax/parse/mod.rs | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index 1d5f1001ac962..72573af9c40ab 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -425,27 +425,26 @@ mod tests { let tts: &[TokenTree] = &tts[..]; match tts { - &[TokenTree::Token(Token {kind: token::Ident(name_macro_rules, false), ..}), - TokenTree::Token(Token { kind: token::Not, .. }), - TokenTree::Token(Token { kind: token::Ident(name_zip, false), .. }), - TokenTree::Delimited(_, macro_delim, ref macro_tts) + &[ + TokenTree::Token(Token {kind: token::Ident(name_macro_rules, false), ..}), + TokenTree::Token(Token {kind: token::Not, ..}), + TokenTree::Token(Token {kind: token::Ident(name_zip, false), ..}), + TokenTree::Delimited(_, macro_delim, ref macro_tts) ] if name_macro_rules == sym::macro_rules && name_zip.as_str() == "zip" => { let tts = ¯o_tts.trees().collect::>(); match tts { &[ TokenTree::Delimited(_, first_delim, ref first_tts), - TokenTree::Token(Token { kind: token::FatArrow, .. }), + TokenTree::Token(Token {kind: token::FatArrow, ..}), TokenTree::Delimited(_, second_delim, ref second_tts), ] if macro_delim == token::Paren => { let tts = &first_tts.trees().collect::>(); match tts { &[ - TokenTree::Token(Token { kind: token::Dollar, .. }), - TokenTree::Token(Token { - kind: token::Ident(name, false), .. - }), + TokenTree::Token(Token {kind: token::Dollar, ..}), + TokenTree::Token(Token {kind: token::Ident(name, false), ..}), ] if first_delim == token::Paren && name.as_str() == "a" => {}, _ => panic!("value 3: {:?} {:?}", first_delim, first_tts), @@ -453,10 +452,8 @@ mod tests { let tts = &second_tts.trees().collect::>(); match tts { &[ - TokenTree::Token(Token { kind: token::Dollar, .. }), - TokenTree::Token(Token { - kind: token::Ident(name, false), .. - }), + TokenTree::Token(Token {kind: token::Dollar, ..}), + TokenTree::Token(Token {kind: token::Ident(name, false), ..}), ] if second_delim == token::Paren && name.as_str() == "a" => {}, _ => panic!("value 4: {:?} {:?}", second_delim, second_tts), From 4123b5d796345bb01f0f40b2e28e6c194371fabe Mon Sep 17 00:00:00 2001 From: Cedric Date: Sat, 8 Jun 2019 12:18:13 +0200 Subject: [PATCH 05/32] fix bad style for structs --- src/libsyntax/diagnostics/plugin.rs | 16 +++++++++++----- src/libsyntax/parse/mod.rs | 16 ++++++++-------- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/src/libsyntax/diagnostics/plugin.rs b/src/libsyntax/diagnostics/plugin.rs index b958a760e8278..b4cf24f5ac519 100644 --- a/src/libsyntax/diagnostics/plugin.rs +++ b/src/libsyntax/diagnostics/plugin.rs @@ -34,7 +34,9 @@ pub fn expand_diagnostic_used<'cx>(ecx: &'cx mut ExtCtxt<'_>, token_tree: &[TokenTree]) -> Box { let code = match token_tree { - &[TokenTree::Token(Token { kind: token::Ident(code, _), .. })] => code, + &[ + TokenTree::Token(Token { kind: token::Ident(code, _), .. }) + ] => code, _ => unreachable!() }; @@ -67,12 +69,16 @@ pub fn expand_register_diagnostic<'cx>(ecx: &'cx mut ExtCtxt<'_>, token_tree: &[TokenTree]) -> Box { let (code, description) = match token_tree { - &[TokenTree::Token(Token { kind: token::Ident(code, _), .. })] => { + &[ + TokenTree::Token(Token { kind: token::Ident(code, _), .. }) + ] => { (code, None) }, - &[TokenTree::Token(Token { kind: token::Ident(code, _), .. }), - TokenTree::Token(Token { kind: token::Comma, .. }), - TokenTree::Token(Token { kind: token::Literal(token::Lit { symbol, .. }), ..})] => { + &[ + TokenTree::Token(Token { kind: token::Ident(code, _), .. }), + TokenTree::Token(Token { kind: token::Comma, .. }), + TokenTree::Token(Token { kind: token::Literal(token::Lit { symbol, .. }), ..}) + ] => { (code, Some(symbol)) }, _ => unreachable!() diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index 72573af9c40ab..025972d8ca78b 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -426,9 +426,9 @@ mod tests { match tts { &[ - TokenTree::Token(Token {kind: token::Ident(name_macro_rules, false), ..}), - TokenTree::Token(Token {kind: token::Not, ..}), - TokenTree::Token(Token {kind: token::Ident(name_zip, false), ..}), + TokenTree::Token(Token { kind: token::Ident(name_macro_rules, false), .. }), + TokenTree::Token(Token { kind: token::Not, .. }), + TokenTree::Token(Token { kind: token::Ident(name_zip, false), .. }), TokenTree::Delimited(_, macro_delim, ref macro_tts) ] if name_macro_rules == sym::macro_rules && name_zip.as_str() == "zip" => { @@ -436,15 +436,15 @@ mod tests { match tts { &[ TokenTree::Delimited(_, first_delim, ref first_tts), - TokenTree::Token(Token {kind: token::FatArrow, ..}), + TokenTree::Token(Token { kind: token::FatArrow, .. }), TokenTree::Delimited(_, second_delim, ref second_tts), ] if macro_delim == token::Paren => { let tts = &first_tts.trees().collect::>(); match tts { &[ - TokenTree::Token(Token {kind: token::Dollar, ..}), - TokenTree::Token(Token {kind: token::Ident(name, false), ..}), + TokenTree::Token(Token { kind: token::Dollar, .. }), + TokenTree::Token(Token { kind: token::Ident(name, false), .. }), ] if first_delim == token::Paren && name.as_str() == "a" => {}, _ => panic!("value 3: {:?} {:?}", first_delim, first_tts), @@ -452,8 +452,8 @@ mod tests { let tts = &second_tts.trees().collect::>(); match tts { &[ - TokenTree::Token(Token {kind: token::Dollar, ..}), - TokenTree::Token(Token {kind: token::Ident(name, false), ..}), + TokenTree::Token(Token { kind: token::Dollar, .. }), + TokenTree::Token(Token { kind: token::Ident(name, false), .. }), ] if second_delim == token::Paren && name.as_str() == "a" => {}, _ => panic!("value 4: {:?} {:?}", second_delim, second_tts), From dd442a1fcfba98be5454c2ca6f56bce098e458ed Mon Sep 17 00:00:00 2001 From: Cedric Date: Sat, 8 Jun 2019 13:29:43 +0200 Subject: [PATCH 06/32] use default binding mode in match clauses --- src/libsyntax/diagnostics/plugin.rs | 10 +++++----- src/libsyntax/parse/mod.rs | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/libsyntax/diagnostics/plugin.rs b/src/libsyntax/diagnostics/plugin.rs index b4cf24f5ac519..98351048c3526 100644 --- a/src/libsyntax/diagnostics/plugin.rs +++ b/src/libsyntax/diagnostics/plugin.rs @@ -34,7 +34,7 @@ pub fn expand_diagnostic_used<'cx>(ecx: &'cx mut ExtCtxt<'_>, token_tree: &[TokenTree]) -> Box { let code = match token_tree { - &[ + [ TokenTree::Token(Token { kind: token::Ident(code, _), .. }) ] => code, _ => unreachable!() @@ -69,17 +69,17 @@ pub fn expand_register_diagnostic<'cx>(ecx: &'cx mut ExtCtxt<'_>, token_tree: &[TokenTree]) -> Box { let (code, description) = match token_tree { - &[ + [ TokenTree::Token(Token { kind: token::Ident(code, _), .. }) ] => { - (code, None) + (*code, None) }, - &[ + [ TokenTree::Token(Token { kind: token::Ident(code, _), .. }), TokenTree::Token(Token { kind: token::Comma, .. }), TokenTree::Token(Token { kind: token::Literal(token::Lit { symbol, .. }), ..}) ] => { - (code, Some(symbol)) + (*code, Some(*symbol)) }, _ => unreachable!() }; diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index 025972d8ca78b..b912bf8295ae1 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -434,15 +434,15 @@ mod tests { if name_macro_rules == sym::macro_rules && name_zip.as_str() == "zip" => { let tts = ¯o_tts.trees().collect::>(); match tts { - &[ - TokenTree::Delimited(_, first_delim, ref first_tts), + [ + TokenTree::Delimited(_, first_delim, first_tts), TokenTree::Token(Token { kind: token::FatArrow, .. }), - TokenTree::Delimited(_, second_delim, ref second_tts), + TokenTree::Delimited(_, second_delim, second_tts), ] if macro_delim == token::Paren => { let tts = &first_tts.trees().collect::>(); match tts { - &[ + [ TokenTree::Token(Token { kind: token::Dollar, .. }), TokenTree::Token(Token { kind: token::Ident(name, false), .. }), ] @@ -451,7 +451,7 @@ mod tests { } let tts = &second_tts.trees().collect::>(); match tts { - &[ + [ TokenTree::Token(Token { kind: token::Dollar, .. }), TokenTree::Token(Token { kind: token::Ident(name, false), .. }), ] From 4c242a948c9f109c25c1ae54102f7a66271fa2ca Mon Sep 17 00:00:00 2001 From: Cedric Date: Sat, 8 Jun 2019 16:21:15 +0200 Subject: [PATCH 07/32] cast vec to slices --- src/libsyntax/parse/mod.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index b912bf8295ae1..ebe5e70d1a7a4 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -425,15 +425,15 @@ mod tests { let tts: &[TokenTree] = &tts[..]; match tts { - &[ + [ TokenTree::Token(Token { kind: token::Ident(name_macro_rules, false), .. }), TokenTree::Token(Token { kind: token::Not, .. }), TokenTree::Token(Token { kind: token::Ident(name_zip, false), .. }), - TokenTree::Delimited(_, macro_delim, ref macro_tts) + TokenTree::Delimited(_, macro_delim, macro_tts) ] if name_macro_rules == sym::macro_rules && name_zip.as_str() == "zip" => { let tts = ¯o_tts.trees().collect::>(); - match tts { + match &tts[..] { [ TokenTree::Delimited(_, first_delim, first_tts), TokenTree::Token(Token { kind: token::FatArrow, .. }), @@ -441,7 +441,7 @@ mod tests { ] if macro_delim == token::Paren => { let tts = &first_tts.trees().collect::>(); - match tts { + match &tts[..] { [ TokenTree::Token(Token { kind: token::Dollar, .. }), TokenTree::Token(Token { kind: token::Ident(name, false), .. }), @@ -450,7 +450,7 @@ mod tests { _ => panic!("value 3: {:?} {:?}", first_delim, first_tts), } let tts = &second_tts.trees().collect::>(); - match tts { + match &tts[..] { [ TokenTree::Token(Token { kind: token::Dollar, .. }), TokenTree::Token(Token { kind: token::Ident(name, false), .. }), From 0a4504d400805b2d102a433af98cc33f185fdcf2 Mon Sep 17 00:00:00 2001 From: Cedric Date: Sat, 8 Jun 2019 20:43:24 +0200 Subject: [PATCH 08/32] fix libsyntax test --- src/libsyntax/parse/mod.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index ebe5e70d1a7a4..19dd8ebd0879a 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -431,7 +431,7 @@ mod tests { TokenTree::Token(Token { kind: token::Ident(name_zip, false), .. }), TokenTree::Delimited(_, macro_delim, macro_tts) ] - if name_macro_rules == sym::macro_rules && name_zip.as_str() == "zip" => { + if name_macro_rules == &sym::macro_rules && name_zip.as_str() == "zip" => { let tts = ¯o_tts.trees().collect::>(); match &tts[..] { [ @@ -439,14 +439,14 @@ mod tests { TokenTree::Token(Token { kind: token::FatArrow, .. }), TokenTree::Delimited(_, second_delim, second_tts), ] - if macro_delim == token::Paren => { + if macro_delim == &token::Paren => { let tts = &first_tts.trees().collect::>(); match &tts[..] { [ TokenTree::Token(Token { kind: token::Dollar, .. }), TokenTree::Token(Token { kind: token::Ident(name, false), .. }), ] - if first_delim == token::Paren && name.as_str() == "a" => {}, + if first_delim == &token::Paren && name.as_str() == "a" => {}, _ => panic!("value 3: {:?} {:?}", first_delim, first_tts), } let tts = &second_tts.trees().collect::>(); @@ -455,7 +455,7 @@ mod tests { TokenTree::Token(Token { kind: token::Dollar, .. }), TokenTree::Token(Token { kind: token::Ident(name, false), .. }), ] - if second_delim == token::Paren && name.as_str() == "a" => {}, + if second_delim == &token::Paren && name.as_str() == "a" => {}, _ => panic!("value 4: {:?} {:?}", second_delim, second_tts), } }, From 26d4c8f01c07fdb3b0c0354dd9b509c955a87e9c Mon Sep 17 00:00:00 2001 From: Adrian Friedli Date: Sat, 8 Jun 2019 22:30:45 +0200 Subject: [PATCH 09/32] implement nth_back for Range --- src/libcore/iter/range.rs | 13 +++++++++++++ src/libcore/tests/iter.rs | 17 +++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/src/libcore/iter/range.rs b/src/libcore/iter/range.rs index 6bbf776fb8f17..e171108a146a6 100644 --- a/src/libcore/iter/range.rs +++ b/src/libcore/iter/range.rs @@ -281,6 +281,19 @@ impl DoubleEndedIterator for ops::Range { None } } + + #[inline] + fn nth_back(&mut self, n: usize) -> Option { + if let Some(minus_n) = self.end.sub_usize(n) { + if minus_n > self.start { + self.end = minus_n.sub_one(); + return Some(self.end.clone()) + } + } + + self.end = self.start.clone(); + None + } } #[stable(feature = "fused", since = "1.26.0")] diff --git a/src/libcore/tests/iter.rs b/src/libcore/tests/iter.rs index bedb9e756129c..171a33695bc6a 100644 --- a/src/libcore/tests/iter.rs +++ b/src/libcore/tests/iter.rs @@ -1657,6 +1657,23 @@ fn test_range_nth() { assert_eq!(r, 20..20); } +#[test] +fn test_range_nth_back() { + assert_eq!((10..15).nth_back(0), Some(14)); + assert_eq!((10..15).nth_back(1), Some(13)); + assert_eq!((10..15).nth_back(4), Some(10)); + assert_eq!((10..15).nth_back(5), None); + assert_eq!((-120..80_i8).nth_back(199), Some(-120)); + + let mut r = 10..20; + assert_eq!(r.nth_back(2), Some(17)); + assert_eq!(r, 10..17); + assert_eq!(r.nth_back(2), Some(14)); + assert_eq!(r, 10..14); + assert_eq!(r.nth_back(10), None); + assert_eq!(r, 10..10); +} + #[test] fn test_range_from_nth() { assert_eq!((10..).nth(0), Some(10)); From 20efb19043335587e27a1c11bfeb596611e4bef7 Mon Sep 17 00:00:00 2001 From: Mark Rousskov Date: Thu, 6 Jun 2019 08:41:31 -0600 Subject: [PATCH 10/32] Make a few methods private --- src/librustc/traits/on_unimplemented.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/librustc/traits/on_unimplemented.rs b/src/librustc/traits/on_unimplemented.rs index 1c17ace90c2fb..14b968c83b2ea 100644 --- a/src/librustc/traits/on_unimplemented.rs +++ b/src/librustc/traits/on_unimplemented.rs @@ -52,7 +52,7 @@ fn parse_error(tcx: TyCtxt<'_, '_, '_>, span: Span, } impl<'a, 'gcx, 'tcx> OnUnimplementedDirective { - pub fn parse(tcx: TyCtxt<'a, 'gcx, 'tcx>, + fn parse(tcx: TyCtxt<'a, 'gcx, 'tcx>, trait_def_id: DefId, items: &[NestedMetaItem], span: Span, @@ -215,7 +215,7 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedDirective { } impl<'a, 'gcx, 'tcx> OnUnimplementedFormatString { - pub fn try_parse(tcx: TyCtxt<'a, 'gcx, 'tcx>, + fn try_parse(tcx: TyCtxt<'a, 'gcx, 'tcx>, trait_def_id: DefId, from: LocalInternedString, err_sp: Span) From 7795b155e04dcf245f240f4486485b1099fb4a99 Mon Sep 17 00:00:00 2001 From: Mark Rousskov Date: Tue, 4 Jun 2019 08:57:40 -0600 Subject: [PATCH 11/32] Inline raw method --- src/libfmt_macros/lib.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/libfmt_macros/lib.rs b/src/libfmt_macros/lib.rs index 6fed83021609d..d5ce389ed5f7f 100644 --- a/src/libfmt_macros/lib.rs +++ b/src/libfmt_macros/lib.rs @@ -302,22 +302,19 @@ impl<'a> Parser<'a> { } } - fn raw(&self) -> usize { - self.style.map(|raw| raw + 1).unwrap_or(0) - } - fn to_span_index(&self, pos: usize) -> SpanIndex { let mut pos = pos; + let raw = self.style.map(|raw| raw + 1).unwrap_or(0); for skip in &self.skips { if pos > *skip { pos += 1; - } else if pos == *skip && self.raw() == 0 { + } else if pos == *skip && raw == 0 { pos += 1; } else { break; } } - SpanIndex(self.raw() + pos + 1) + SpanIndex(raw + pos + 1) } /// Forces consumption of the specified character. If the character is not From dc13072b7b7143fe98926632b9b89e7ffff2cdb7 Mon Sep 17 00:00:00 2001 From: Mark Rousskov Date: Mon, 3 Jun 2019 20:47:42 -0600 Subject: [PATCH 12/32] Use Symbol for named arguments in fmt_macros --- Cargo.lock | 3 ++ src/libfmt_macros/Cargo.toml | 3 ++ src/libfmt_macros/lib.rs | 37 +++++++++++++++---------- src/librustc/traits/error_reporting.rs | 32 ++++++++++----------- src/librustc/traits/on_unimplemented.rs | 34 +++++++++++------------ src/libsyntax_ext/format.rs | 25 ++++++++--------- src/libsyntax_pos/symbol.rs | 6 ++++ 7 files changed, 79 insertions(+), 61 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f35b7344dfed..73605af7936f2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -910,6 +910,9 @@ dependencies = [ [[package]] name = "fmt_macros" version = "0.0.0" +dependencies = [ + "syntax_pos 0.0.0", +] [[package]] name = "fnv" diff --git a/src/libfmt_macros/Cargo.toml b/src/libfmt_macros/Cargo.toml index 50779a2d9ad08..fc32f21ec4e0a 100644 --- a/src/libfmt_macros/Cargo.toml +++ b/src/libfmt_macros/Cargo.toml @@ -8,3 +8,6 @@ edition = "2018" name = "fmt_macros" path = "lib.rs" crate-type = ["dylib"] + +[dependencies] +syntax_pos = { path = "../libsyntax_pos" } diff --git a/src/libfmt_macros/lib.rs b/src/libfmt_macros/lib.rs index d5ce389ed5f7f..9d6720810b818 100644 --- a/src/libfmt_macros/lib.rs +++ b/src/libfmt_macros/lib.rs @@ -24,6 +24,8 @@ use std::str; use std::string; use std::iter; +use syntax_pos::Symbol; + /// A piece is a portion of the format string which represents the next part /// to emit. These are emitted as a stream by the `Parser` class. #[derive(Copy, Clone, PartialEq)] @@ -39,7 +41,7 @@ pub enum Piece<'a> { #[derive(Copy, Clone, PartialEq)] pub struct Argument<'a> { /// Where to find this argument - pub position: Position<'a>, + pub position: Position, /// How to format the argument pub format: FormatSpec<'a>, } @@ -54,9 +56,9 @@ pub struct FormatSpec<'a> { /// Packed version of various flags provided pub flags: u32, /// The integer precision to use - pub precision: Count<'a>, + pub precision: Count, /// The string width requested for the resulting format - pub width: Count<'a>, + pub width: Count, /// The descriptor string representing the name of the format desired for /// this argument, this can be empty or any number of characters, although /// it is required to be one word. @@ -65,16 +67,16 @@ pub struct FormatSpec<'a> { /// Enum describing where an argument for a format can be located. #[derive(Copy, Clone, PartialEq)] -pub enum Position<'a> { +pub enum Position { /// The argument is implied to be located at an index ArgumentImplicitlyIs(usize), /// The argument is located at a specific index given in the format ArgumentIs(usize), /// The argument has a name. - ArgumentNamed(&'a str), + ArgumentNamed(Symbol), } -impl Position<'_> { +impl Position { pub fn index(&self) -> Option { match self { ArgumentIs(i) | ArgumentImplicitlyIs(i) => Some(*i), @@ -119,11 +121,11 @@ pub enum Flag { /// A count is used for the precision and width parameters of an integer, and /// can reference either an argument or a literal integer. #[derive(Copy, Clone, PartialEq)] -pub enum Count<'a> { +pub enum Count { /// The count is specified explicitly. CountIs(usize), /// The count is specified by the argument with the given name. - CountIsName(&'a str), + CountIsName(Symbol), /// The count is specified by the argument at the given index. CountIsParam(usize), /// The count is implied and cannot be explicitly specified. @@ -431,12 +433,14 @@ impl<'a> Parser<'a> { /// integer index of an argument, a named argument, or a blank string. /// Returns `Some(parsed_position)` if the position is not implicitly /// consuming a macro argument, `None` if it's the case. - fn position(&mut self) -> Option> { + fn position(&mut self) -> Option { if let Some(i) = self.integer() { Some(ArgumentIs(i)) } else { match self.cur.peek() { - Some(&(_, c)) if c.is_alphabetic() => Some(ArgumentNamed(self.word())), + Some(&(_, c)) if c.is_alphabetic() => { + Some(ArgumentNamed(Symbol::intern(self.word()))) + } Some(&(pos, c)) if c == '_' => { let invalid_name = self.string(pos); self.err_with_note(format!("invalid argument name `{}`", invalid_name), @@ -444,7 +448,7 @@ impl<'a> Parser<'a> { "argument names cannot start with an underscore", self.to_span_index(pos), self.to_span_index(pos + invalid_name.len())); - Some(ArgumentNamed(invalid_name)) + Some(ArgumentNamed(Symbol::intern(invalid_name))) }, // This is an `ArgumentNext`. @@ -552,7 +556,7 @@ impl<'a> Parser<'a> { /// Parses a Count parameter at the current position. This does not check /// for 'CountIsNextParam' because that is only used in precision, not /// width. - fn count(&mut self) -> Count<'a> { + fn count(&mut self) -> Count { if let Some(i) = self.integer() { if self.consume('$') { CountIsParam(i) @@ -566,7 +570,7 @@ impl<'a> Parser<'a> { self.cur = tmp; CountImplied } else if self.consume('$') { - CountIsName(word) + CountIsName(Symbol::intern(word)) } else { self.cur = tmp; CountImplied @@ -756,6 +760,8 @@ mod tests { } #[test] fn format_counts() { + use syntax_pos::{GLOBALS, Globals, edition}; + GLOBALS.set(&Globals::new(edition::DEFAULT_EDITION), || { same("{:10s}", &[NextArgument(Argument { position: ArgumentImplicitlyIs(0), @@ -811,11 +817,12 @@ mod tests { fill: None, align: AlignUnknown, flags: 0, - precision: CountIsName("b"), - width: CountIsName("a"), + precision: CountIsName(Symbol::intern("b")), + width: CountIsName(Symbol::intern("a")), ty: "s", }, })]); + }); } #[test] fn format_flags() { diff --git a/src/librustc/traits/error_reporting.rs b/src/librustc/traits/error_reporting.rs index 490501bde73e5..50d2eeef421c1 100644 --- a/src/librustc/traits/error_reporting.rs +++ b/src/librustc/traits/error_reporting.rs @@ -353,7 +353,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { _ => { // this is a "direct", user-specified, rather than derived, // obligation. - flags.push(("direct".to_owned(), None)); + flags.push((sym::direct, None)); } } @@ -365,27 +365,27 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // Currently I'm leaving it for what I need for `try`. if self.tcx.trait_of_item(item) == Some(trait_ref.def_id) { let method = self.tcx.item_name(item); - flags.push(("from_method".to_owned(), None)); - flags.push(("from_method".to_owned(), Some(method.to_string()))); + flags.push((sym::from_method, None)); + flags.push((sym::from_method, Some(method.to_string()))); } } if let Some(t) = self.get_parent_trait_ref(&obligation.cause.code) { - flags.push(("parent_trait".to_owned(), Some(t))); + flags.push((sym::parent_trait, Some(t))); } if let Some(k) = obligation.cause.span.compiler_desugaring_kind() { - flags.push(("from_desugaring".to_owned(), None)); - flags.push(("from_desugaring".to_owned(), Some(k.name().to_string()))); + flags.push((sym::from_desugaring, None)); + flags.push((sym::from_desugaring, Some(k.name().to_string()))); } let generics = self.tcx.generics_of(def_id); let self_ty = trait_ref.self_ty(); // This is also included through the generics list as `Self`, // but the parser won't allow you to use it - flags.push(("_Self".to_owned(), Some(self_ty.to_string()))); + flags.push((sym::_Self, Some(self_ty.to_string()))); if let Some(def) = self_ty.ty_adt_def() { // We also want to be able to select self's original // signature with no type arguments resolved - flags.push(("_Self".to_owned(), Some(self.tcx.type_of(def.did).to_string()))); + flags.push((sym::_Self, Some(self.tcx.type_of(def.did).to_string()))); } for param in generics.params.iter() { @@ -396,38 +396,38 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { }, GenericParamDefKind::Lifetime => continue, }; - let name = param.name.to_string(); + let name = param.name.as_symbol(); flags.push((name, Some(value))); } if let Some(true) = self_ty.ty_adt_def().map(|def| def.did.is_local()) { - flags.push(("crate_local".to_owned(), None)); + flags.push((sym::crate_local, None)); } // Allow targeting all integers using `{integral}`, even if the exact type was resolved if self_ty.is_integral() { - flags.push(("_Self".to_owned(), Some("{integral}".to_owned()))); + flags.push((sym::_Self, Some("{integral}".to_owned()))); } if let ty::Array(aty, len) = self_ty.sty { - flags.push(("_Self".to_owned(), Some("[]".to_owned()))); - flags.push(("_Self".to_owned(), Some(format!("[{}]", aty)))); + flags.push((sym::_Self, Some("[]".to_owned()))); + flags.push((sym::_Self, Some(format!("[{}]", aty)))); if let Some(def) = aty.ty_adt_def() { // We also want to be able to select the array's type's original // signature with no type arguments resolved flags.push(( - "_Self".to_owned(), + sym::_Self, Some(format!("[{}]", self.tcx.type_of(def.did).to_string())), )); let tcx = self.tcx; if let Some(len) = len.assert_usize(tcx) { flags.push(( - "_Self".to_owned(), + sym::_Self, Some(format!("[{}; {}]", self.tcx.type_of(def.did).to_string(), len)), )); } else { flags.push(( - "_Self".to_owned(), + sym::_Self, Some(format!("[{}; _]", self.tcx.type_of(def.did).to_string())), )); } diff --git a/src/librustc/traits/on_unimplemented.rs b/src/librustc/traits/on_unimplemented.rs index 14b968c83b2ea..b78396c90dc65 100644 --- a/src/librustc/traits/on_unimplemented.rs +++ b/src/librustc/traits/on_unimplemented.rs @@ -7,7 +7,7 @@ use crate::util::nodemap::FxHashMap; use syntax::ast::{MetaItem, NestedMetaItem}; use syntax::attr; -use syntax::symbol::sym; +use syntax::symbol::{Symbol, kw, sym}; use syntax_pos::Span; use syntax_pos::symbol::LocalInternedString; @@ -167,7 +167,7 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedDirective { pub fn evaluate(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, trait_ref: ty::TraitRef<'tcx>, - options: &[(String, Option)]) + options: &[(Symbol, Option)]) -> OnUnimplementedNote { let mut message = None; @@ -180,7 +180,7 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedDirective { if !attr::eval_condition(condition, &tcx.sess.parse_sess, &mut |c| { c.ident().map_or(false, |ident| { options.contains(&( - ident.to_string(), + ident.name, c.value_str().map(|s| s.as_str().to_string()) )) }) @@ -203,8 +203,8 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedDirective { } } - let options: FxHashMap = options.into_iter() - .filter_map(|(k, v)| v.as_ref().map(|v| (k.to_owned(), v.to_owned()))) + let options: FxHashMap = options.into_iter() + .filter_map(|(k, v)| v.as_ref().map(|v| (*k, v.to_owned()))) .collect(); OnUnimplementedNote { label: label.map(|l| l.format(tcx, trait_ref, &options)), @@ -241,16 +241,16 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedFormatString { Piece::String(_) => (), // Normal string, no need to check it Piece::NextArgument(a) => match a.position { // `{Self}` is allowed - Position::ArgumentNamed(s) if s == "Self" => (), + Position::ArgumentNamed(s) if s == kw::SelfUpper => (), // `{ThisTraitsName}` is allowed - Position::ArgumentNamed(s) if s == name.as_str() => (), + Position::ArgumentNamed(s) if s == name => (), // `{from_method}` is allowed - Position::ArgumentNamed(s) if s == "from_method" => (), + Position::ArgumentNamed(s) if s == sym::from_method => (), // `{from_desugaring}` is allowed - Position::ArgumentNamed(s) if s == "from_desugaring" => (), + Position::ArgumentNamed(s) if s == sym::from_desugaring => (), // So is `{A}` if A is a type parameter Position::ArgumentNamed(s) => match generics.params.iter().find(|param| { - param.name.as_str() == s + param.name.as_symbol() == s }) { Some(_) => (), None => { @@ -276,7 +276,7 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedFormatString { &self, tcx: TyCtxt<'a, 'gcx, 'tcx>, trait_ref: ty::TraitRef<'tcx>, - options: &FxHashMap, + options: &FxHashMap, ) -> String { let name = tcx.item_name(trait_ref.def_id); let trait_str = tcx.def_path_str(trait_ref.def_id); @@ -289,9 +289,9 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedFormatString { }, GenericParamDefKind::Lifetime => return None }; - let name = param.name.to_string(); + let name = param.name.as_symbol(); Some((name, value)) - }).collect::>(); + }).collect::>(); let empty_string = String::new(); let parser = Parser::new(&self.0, None, vec![], false); @@ -299,15 +299,15 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedFormatString { match p { Piece::String(s) => s, Piece::NextArgument(a) => match a.position { - Position::ArgumentNamed(s) => match generic_map.get(s) { + Position::ArgumentNamed(s) => match generic_map.get(&s) { Some(val) => val, - None if s == name.as_str() => { + None if s == name => { &trait_str } None => { - if let Some(val) = options.get(s) { + if let Some(val) = options.get(&s) { val - } else if s == "from_desugaring" || s == "from_method" { + } else if s == sym::from_desugaring || s == sym::from_method { // don't break messages using these two arguments incorrectly &empty_string } else { diff --git a/src/libsyntax_ext/format.rs b/src/libsyntax_ext/format.rs index 377164728f42a..2863daa13a99c 100644 --- a/src/libsyntax_ext/format.rs +++ b/src/libsyntax_ext/format.rs @@ -28,7 +28,7 @@ enum ArgumentType { enum Position { Exact(usize), - Named(String), + Named(Symbol), } struct Context<'a, 'b: 'a> { @@ -57,7 +57,7 @@ struct Context<'a, 'b: 'a> { /// Unique format specs seen for each argument. arg_unique_types: Vec>, /// Map from named arguments to their resolved indices. - names: FxHashMap, + names: FxHashMap, /// The latest consecutive literal strings, or empty if there weren't any. literal: String, @@ -127,9 +127,9 @@ fn parse_args<'a>( ecx: &mut ExtCtxt<'a>, sp: Span, tts: &[tokenstream::TokenTree] -) -> Result<(P, Vec>, FxHashMap), DiagnosticBuilder<'a>> { +) -> Result<(P, Vec>, FxHashMap), DiagnosticBuilder<'a>> { let mut args = Vec::>::new(); - let mut names = FxHashMap::::default(); + let mut names = FxHashMap::::default(); let mut p = ecx.new_parser_from_tts(tts); @@ -158,11 +158,10 @@ fn parse_args<'a>( "expected ident, positional arguments cannot follow named arguments", )); }; - let name: &str = &name.as_str(); p.expect(&token::Eq)?; let e = p.parse_expr()?; - if let Some(prev) = names.get(name) { + if let Some(prev) = names.get(&name) { ecx.struct_span_err(e.span, &format!("duplicate argument named `{}`", name)) .span_note(args[*prev].span, "previously here") .emit(); @@ -174,7 +173,7 @@ fn parse_args<'a>( // if the input is valid, we can simply append to the positional // args. And remember the names. let slot = args.len(); - names.insert(name.to_string(), slot); + names.insert(name, slot); args.push(e); } else { let e = p.parse_expr()?; @@ -188,7 +187,7 @@ impl<'a, 'b> Context<'a, 'b> { fn resolve_name_inplace(&self, p: &mut parse::Piece<'_>) { // NOTE: the `unwrap_or` branch is needed in case of invalid format // arguments, e.g., `format_args!("{foo}")`. - let lookup = |s| *self.names.get(s).unwrap_or(&0); + let lookup = |s: Symbol| *self.names.get(&s).unwrap_or(&0); match *p { parse::String(_) => {} @@ -222,7 +221,7 @@ impl<'a, 'b> Context<'a, 'b> { // it's written second, so it should come after width/precision. let pos = match arg.position { parse::ArgumentIs(i) | parse::ArgumentImplicitlyIs(i) => Exact(i), - parse::ArgumentNamed(s) => Named(s.to_string()), + parse::ArgumentNamed(s) => Named(s), }; let ty = Placeholder(arg.format.ty.to_string()); @@ -232,7 +231,7 @@ impl<'a, 'b> Context<'a, 'b> { } } - fn verify_count(&mut self, c: parse::Count<'_>) { + fn verify_count(&mut self, c: parse::Count) { match c { parse::CountImplied | parse::CountIs(..) => {} @@ -240,7 +239,7 @@ impl<'a, 'b> Context<'a, 'b> { self.verify_arg_type(Exact(i), Count); } parse::CountIsName(s) => { - self.verify_arg_type(Named(s.to_string()), Count); + self.verify_arg_type(Named(s), Count); } } } @@ -390,7 +389,7 @@ impl<'a, 'b> Context<'a, 'b> { ecx.std_path(&[sym::fmt, sym::rt, sym::v1, Symbol::intern(s)]) } - fn build_count(&self, c: parse::Count<'_>) -> P { + fn build_count(&self, c: parse::Count) -> P { let sp = self.macsp; let count = |c, arg| { let mut path = Context::rtpath(self.ecx, "Count"); @@ -739,7 +738,7 @@ pub fn expand_preparsed_format_args(ecx: &mut ExtCtxt<'_>, sp: Span, efmt: P, args: Vec>, - names: FxHashMap, + names: FxHashMap, append_newline: bool) -> P { // NOTE: this verbose way of initializing `Vec>` is because diff --git a/src/libsyntax_pos/symbol.rs b/src/libsyntax_pos/symbol.rs index 302b3c75263cf..95d74ce054ef9 100644 --- a/src/libsyntax_pos/symbol.rs +++ b/src/libsyntax_pos/symbol.rs @@ -203,6 +203,7 @@ symbols! { core_intrinsics, crate_id, crate_in_paths, + crate_local, crate_name, crate_type, crate_visibility_modifier, @@ -221,6 +222,7 @@ symbols! { deref, deref_mut, derive, + direct, doc, doc_alias, doc_cfg, @@ -278,8 +280,10 @@ symbols! { format_args_nl, from, From, + from_desugaring, from_error, from_generator, + from_method, from_ok, from_usize, fundamental, @@ -443,6 +447,7 @@ symbols! { panic_impl, panic_implementation, panic_runtime, + parent_trait, partial_cmp, PartialOrd, passes, @@ -569,6 +574,7 @@ symbols! { __rust_unstable_column, rvalue_static_promotion, sanitizer_runtime, + _Self, self_in_typedefs, self_struct_ctor, Send, From a8594400927c7363d24625ea1521b89c344ec03d Mon Sep 17 00:00:00 2001 From: Mark Rousskov Date: Thu, 6 Jun 2019 16:49:51 -0600 Subject: [PATCH 13/32] Shift padding out of suggestions for format strings --- src/libsyntax_ext/format.rs | 8 ++++---- src/libsyntax_ext/format_foreign.rs | 12 ++++++------ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/libsyntax_ext/format.rs b/src/libsyntax_ext/format.rs index 2863daa13a99c..1fdc2e6274a2d 100644 --- a/src/libsyntax_ext/format.rs +++ b/src/libsyntax_ext/format.rs @@ -1043,7 +1043,9 @@ pub fn expand_preparsed_format_args(ecx: &mut ExtCtxt<'_>, let mut show_doc_note = false; let mut suggestions = vec![]; - for sub in foreign::$kind::iter_subs(fmt_str) { + // account for `"` and account for raw strings `r#` + let padding = str_style.map(|i| i + 2).unwrap_or(1); + for sub in foreign::$kind::iter_subs(fmt_str, padding) { let trn = match sub.translate() { Some(trn) => trn, @@ -1064,9 +1066,7 @@ pub fn expand_preparsed_format_args(ecx: &mut ExtCtxt<'_>, } if let Some((start, end)) = pos { - // account for `"` and account for raw strings `r#` - let padding = str_style.map(|i| i + 2).unwrap_or(1); - let sp = fmt_sp.from_inner_byte_pos(start + padding, end + padding); + let sp = fmt_sp.from_inner_byte_pos(start, end); suggestions.push((sp, trn)); } else { diag.help(&format!("`{}` should be written as `{}`", sub, trn)); diff --git a/src/libsyntax_ext/format_foreign.rs b/src/libsyntax_ext/format_foreign.rs index 261b2f373cefd..b279dbced847c 100644 --- a/src/libsyntax_ext/format_foreign.rs +++ b/src/libsyntax_ext/format_foreign.rs @@ -263,10 +263,10 @@ pub mod printf { } /// Returns an iterator over all substitutions in a given string. - pub fn iter_subs(s: &str) -> Substitutions<'_> { + pub fn iter_subs(s: &str, start_pos: usize) -> Substitutions<'_> { Substitutions { s, - pos: 0, + pos: start_pos, } } @@ -711,7 +711,7 @@ pub mod printf { #[test] fn test_iter() { let s = "The %d'th word %% is: `%.*s` %!\n"; - let subs: Vec<_> = iter_subs(s).map(|sub| sub.translate()).collect(); + let subs: Vec<_> = iter_subs(s, 0).map(|sub| sub.translate()).collect(); assert_eq!( subs.iter().map(|ms| ms.as_ref().map(|s| &s[..])).collect::>(), vec![Some("{}"), None, Some("{:.*}"), None] @@ -804,10 +804,10 @@ pub mod shell { } /// Returns an iterator over all substitutions in a given string. - pub fn iter_subs(s: &str) -> Substitutions<'_> { + pub fn iter_subs(s: &str, start_pos: usize) -> Substitutions<'_> { Substitutions { s, - pos: 0, + pos: start_pos, } } @@ -940,7 +940,7 @@ pub mod shell { fn test_iter() { use super::iter_subs; let s = "The $0'th word $$ is: `$WORD` $!\n"; - let subs: Vec<_> = iter_subs(s).map(|sub| sub.translate()).collect(); + let subs: Vec<_> = iter_subs(s, 0).map(|sub| sub.translate()).collect(); assert_eq!( subs.iter().map(|ms| ms.as_ref().map(|s| &s[..])).collect::>(), vec![Some("{0}"), None, Some("{WORD}")] From b1c357e0c366f5fb865151a9dd144413b4bf6911 Mon Sep 17 00:00:00 2001 From: Mark Rousskov Date: Tue, 4 Jun 2019 09:03:43 -0600 Subject: [PATCH 14/32] Introduce InnerSpan abstraction This should be used when trying to get at subsets of a larger span, especially when the larger span is not available in the code attempting to work with those subsets (especially common in the fmt_macros crate). This is usually a good replacement for (BytePos, BytePos) and (usize, usize) tuples. This commit also removes from_inner_byte_pos, since it took usize arguments, which is error prone. --- src/libfmt_macros/lib.rs | 89 +++++++++---------- .../passes/check_code_block_syntax.rs | 4 +- src/librustdoc/passes/mod.rs | 6 +- src/libsyntax_ext/format.rs | 14 ++- src/libsyntax_ext/format_foreign.rs | 26 +++--- src/libsyntax_pos/lib.rs | 18 +++- 6 files changed, 82 insertions(+), 75 deletions(-) diff --git a/src/libfmt_macros/lib.rs b/src/libfmt_macros/lib.rs index 9d6720810b818..66355801b6c54 100644 --- a/src/libfmt_macros/lib.rs +++ b/src/libfmt_macros/lib.rs @@ -24,7 +24,16 @@ use std::str; use std::string; use std::iter; -use syntax_pos::Symbol; +use syntax_pos::{InnerSpan, Symbol}; + +#[derive(Copy, Clone)] +struct InnerOffset(usize); + +impl InnerOffset { + fn to(self, end: InnerOffset) -> InnerSpan { + InnerSpan::new(self.0, end.0) + } +} /// A piece is a portion of the format string which represents the next part /// to emit. These are emitted as a stream by the `Parser` class. @@ -136,9 +145,8 @@ pub struct ParseError { pub description: string::String, pub note: Option, pub label: string::String, - pub start: SpanIndex, - pub end: SpanIndex, - pub secondary_label: Option<(string::String, SpanIndex, SpanIndex)>, + pub span: InnerSpan, + pub secondary_label: Option<(string::String, InnerSpan)>, } /// The parser structure for interpreting the input format string. This is @@ -157,24 +165,15 @@ pub struct Parser<'a> { /// `Some(raw count)` when the string is "raw", used to position spans correctly style: Option, /// Start and end byte offset of every successfully parsed argument - pub arg_places: Vec<(SpanIndex, SpanIndex)>, + pub arg_places: Vec, /// Characters that need to be shifted skips: Vec, - /// Span offset of the last opening brace seen, used for error reporting - last_opening_brace_pos: Option, + /// Span of the last opening brace seen, used for error reporting + last_opening_brace: Option, /// Wether the source string is comes from `println!` as opposed to `format!` or `print!` append_newline: bool, } -#[derive(Clone, Copy, Debug)] -pub struct SpanIndex(pub usize); - -impl SpanIndex { - pub fn unwrap(self) -> usize { - self.0 - } -} - impl<'a> Iterator for Parser<'a> { type Item = Piece<'a>; @@ -182,19 +181,20 @@ impl<'a> Iterator for Parser<'a> { if let Some(&(pos, c)) = self.cur.peek() { match c { '{' => { - let curr_last_brace = self.last_opening_brace_pos; - self.last_opening_brace_pos = Some(self.to_span_index(pos)); + let curr_last_brace = self.last_opening_brace; + let byte_pos = self.to_span_index(pos); + self.last_opening_brace = Some(byte_pos.to(byte_pos)); self.cur.next(); if self.consume('{') { - self.last_opening_brace_pos = curr_last_brace; + self.last_opening_brace = curr_last_brace; Some(String(self.string(pos + 1))) } else { let arg = self.argument(); - if let Some(arg_pos) = self.must_consume('}').map(|end| { - (self.to_span_index(pos), self.to_span_index(end + 1)) - }) { - self.arg_places.push(arg_pos); + if let Some(end) = self.must_consume('}') { + let start = self.to_span_index(pos); + let end = self.to_span_index(end + 1); + self.arg_places.push(start.to(end)); } Some(NextArgument(arg)) } @@ -209,8 +209,7 @@ impl<'a> Iterator for Parser<'a> { "unmatched `}` found", "unmatched `}`", "if you intended to print `}`, you can escape it using `}}`", - err_pos, - err_pos, + err_pos.to(err_pos), ); None } @@ -242,7 +241,7 @@ impl<'a> Parser<'a> { style, arg_places: vec![], skips, - last_opening_brace_pos: None, + last_opening_brace: None, append_newline, } } @@ -254,15 +253,13 @@ impl<'a> Parser<'a> { &mut self, description: S1, label: S2, - start: SpanIndex, - end: SpanIndex, + span: InnerSpan, ) { self.errors.push(ParseError { description: description.into(), note: None, label: label.into(), - start, - end, + span, secondary_label: None, }); } @@ -275,15 +272,13 @@ impl<'a> Parser<'a> { description: S1, label: S2, note: S3, - start: SpanIndex, - end: SpanIndex, + span: InnerSpan, ) { self.errors.push(ParseError { description: description.into(), note: Some(note.into()), label: label.into(), - start, - end, + span, secondary_label: None, }); } @@ -304,7 +299,7 @@ impl<'a> Parser<'a> { } } - fn to_span_index(&self, pos: usize) -> SpanIndex { + fn to_span_index(&self, pos: usize) -> InnerOffset { let mut pos = pos; let raw = self.style.map(|raw| raw + 1).unwrap_or(0); for skip in &self.skips { @@ -316,7 +311,7 @@ impl<'a> Parser<'a> { break; } } - SpanIndex(raw + pos + 1) + InnerOffset(raw + pos + 1) } /// Forces consumption of the specified character. If the character is not @@ -334,8 +329,8 @@ impl<'a> Parser<'a> { let label = "expected `}`".to_owned(); let (note, secondary_label) = if c == '}' { (Some("if you intended to print `{`, you can escape it using `{{`".to_owned()), - self.last_opening_brace_pos.map(|pos| { - ("because of this opening brace".to_owned(), pos, pos) + self.last_opening_brace.map(|sp| { + ("because of this opening brace".to_owned(), sp) })) } else { (None, None) @@ -344,8 +339,7 @@ impl<'a> Parser<'a> { description, note, label, - start: pos, - end: pos, + span: pos.to(pos), secondary_label, }); None @@ -359,8 +353,8 @@ impl<'a> Parser<'a> { let label = format!("expected `{:?}`", c); let (note, secondary_label) = if c == '}' { (Some("if you intended to print `{`, you can escape it using `{{`".to_owned()), - self.last_opening_brace_pos.map(|pos| { - ("because of this opening brace".to_owned(), pos, pos) + self.last_opening_brace.map(|sp| { + ("because of this opening brace".to_owned(), sp) })) } else { (None, None) @@ -369,12 +363,11 @@ impl<'a> Parser<'a> { description, note, label, - start: pos, - end: pos, + span: pos.to(pos), secondary_label, }); } else { - self.err(description, format!("expected `{:?}`", c), pos, pos); + self.err(description, format!("expected `{:?}`", c), pos.to(pos)); } None } @@ -446,8 +439,10 @@ impl<'a> Parser<'a> { self.err_with_note(format!("invalid argument name `{}`", invalid_name), "invalid argument name", "argument names cannot start with an underscore", - self.to_span_index(pos), - self.to_span_index(pos + invalid_name.len())); + self.to_span_index(pos).to( + self.to_span_index(pos + invalid_name.len()) + ), + ); Some(ArgumentNamed(Symbol::intern(invalid_name))) }, diff --git a/src/librustdoc/passes/check_code_block_syntax.rs b/src/librustdoc/passes/check_code_block_syntax.rs index 694843ad7f71e..6d51278b4e5e8 100644 --- a/src/librustdoc/passes/check_code_block_syntax.rs +++ b/src/librustdoc/passes/check_code_block_syntax.rs @@ -2,7 +2,7 @@ use errors::Applicability; use syntax::parse::lexer::{StringReader as Lexer}; use syntax::parse::{ParseSess, token}; use syntax::source_map::FilePathMapping; -use syntax_pos::FileName; +use syntax_pos::{InnerSpan, FileName}; use crate::clean; use crate::core::DocContext; @@ -63,7 +63,7 @@ impl<'a, 'tcx> SyntaxChecker<'a, 'tcx> { } if code_block.syntax.is_none() && code_block.is_fenced { - let sp = sp.from_inner_byte_pos(0, 3); + let sp = sp.from_inner(InnerSpan::new(0, 3)); diag.span_suggestion( sp, "mark blocks that do not contain Rust code as text", diff --git a/src/librustdoc/passes/mod.rs b/src/librustdoc/passes/mod.rs index 018ab5dea6081..8fc6b9fdbe6b9 100644 --- a/src/librustdoc/passes/mod.rs +++ b/src/librustdoc/passes/mod.rs @@ -6,7 +6,7 @@ use rustc::lint as lint; use rustc::middle::privacy::AccessLevels; use rustc::util::nodemap::DefIdSet; use std::mem; -use syntax_pos::{DUMMY_SP, Span}; +use syntax_pos::{DUMMY_SP, InnerSpan, Span}; use std::ops::Range; use crate::clean::{self, GetDefId, Item}; @@ -440,10 +440,10 @@ crate fn source_span_for_markdown_range( } } - let sp = span_of_attrs(attrs).from_inner_byte_pos( + let sp = span_of_attrs(attrs).from_inner(InnerSpan::new( md_range.start + start_bytes, md_range.end + start_bytes + end_bytes, - ); + )); Some(sp) } diff --git a/src/libsyntax_ext/format.rs b/src/libsyntax_ext/format.rs index 1fdc2e6274a2d..85b524786b2f5 100644 --- a/src/libsyntax_ext/format.rs +++ b/src/libsyntax_ext/format.rs @@ -900,15 +900,15 @@ pub fn expand_preparsed_format_args(ecx: &mut ExtCtxt<'_>, if !parser.errors.is_empty() { let err = parser.errors.remove(0); - let sp = fmt.span.from_inner_byte_pos(err.start.unwrap(), err.end.unwrap()); + let sp = fmt.span.from_inner(err.span); let mut e = ecx.struct_span_err(sp, &format!("invalid format string: {}", err.description)); e.span_label(sp, err.label + " in format string"); if let Some(note) = err.note { e.note(¬e); } - if let Some((label, start, end)) = err.secondary_label { - let sp = fmt.span.from_inner_byte_pos(start.unwrap(), end.unwrap()); + if let Some((label, span)) = err.secondary_label { + let sp = fmt.span.from_inner(span); e.span_label(sp, label); } e.emit(); @@ -916,9 +916,7 @@ pub fn expand_preparsed_format_args(ecx: &mut ExtCtxt<'_>, } let arg_spans = parser.arg_places.iter() - .map(|&(parse::SpanIndex(start), parse::SpanIndex(end))| { - fmt.span.from_inner_byte_pos(start, end) - }) + .map(|span| fmt.span.from_inner(*span)) .collect(); let mut cx = Context { @@ -1065,8 +1063,8 @@ pub fn expand_preparsed_format_args(ecx: &mut ExtCtxt<'_>, show_doc_note = true; } - if let Some((start, end)) = pos { - let sp = fmt_sp.from_inner_byte_pos(start, end); + if let Some(inner_sp) = pos { + let sp = fmt_sp.from_inner(inner_sp); suggestions.push((sp, trn)); } else { diag.help(&format!("`{}` should be written as `{}`", sub, trn)); diff --git a/src/libsyntax_ext/format_foreign.rs b/src/libsyntax_ext/format_foreign.rs index b279dbced847c..7ad5997bf2c09 100644 --- a/src/libsyntax_ext/format_foreign.rs +++ b/src/libsyntax_ext/format_foreign.rs @@ -1,5 +1,6 @@ pub mod printf { use super::strcursor::StrCursor as Cur; + use syntax_pos::InnerSpan; /// Represents a single `printf`-style substitution. #[derive(Clone, PartialEq, Debug)] @@ -18,7 +19,7 @@ pub mod printf { } } - pub fn position(&self) -> Option<(usize, usize)> { + pub fn position(&self) -> Option { match *self { Substitution::Format(ref fmt) => Some(fmt.position), _ => None, @@ -28,7 +29,7 @@ pub mod printf { pub fn set_position(&mut self, start: usize, end: usize) { match self { Substitution::Format(ref mut fmt) => { - fmt.position = (start, end); + fmt.position = InnerSpan::new(start, end); } _ => {} } @@ -65,7 +66,7 @@ pub mod printf { /// Type of parameter being converted. pub type_: &'a str, /// Byte offset for the start and end of this formatting directive. - pub position: (usize, usize), + pub position: InnerSpan, } impl Format<'_> { @@ -282,9 +283,9 @@ pub mod printf { let (mut sub, tail) = parse_next_substitution(self.s)?; self.s = tail; match sub { - Substitution::Format(_) => if let Some((start, end)) = sub.position() { - sub.set_position(start + self.pos, end + self.pos); - self.pos += end; + Substitution::Format(_) => if let Some(inner_span) = sub.position() { + sub.set_position(inner_span.start + self.pos, inner_span.end + self.pos); + self.pos += inner_span.end; } Substitution::Escape => self.pos += 2, } @@ -373,7 +374,7 @@ pub mod printf { precision: None, length: None, type_: at.slice_between(next).unwrap(), - position: (start.at, next.at), + position: InnerSpan::new(start.at, next.at), }), next.slice_after() )); @@ -560,7 +561,7 @@ pub mod printf { drop(next); end = at; - let position = (start.at, end.at); + let position = InnerSpan::new(start.at, end.at); let f = Format { span: start.slice_between(end).unwrap(), @@ -650,7 +651,7 @@ pub mod printf { precision: $prec, length: $len, type_: $type_, - position: $pos, + position: syntax_pos::InnerSpan::new($pos.0, $pos.1), }), "!" )) @@ -761,6 +762,7 @@ pub mod printf { pub mod shell { use super::strcursor::StrCursor as Cur; + use syntax_pos::InnerSpan; #[derive(Clone, PartialEq, Debug)] pub enum Substitution<'a> { @@ -778,11 +780,11 @@ pub mod shell { } } - pub fn position(&self) -> Option<(usize, usize)> { + pub fn position(&self) -> Option { match self { Substitution::Ordinal(_, pos) | Substitution::Name(_, pos) | - Substitution::Escape(pos) => Some(*pos), + Substitution::Escape(pos) => Some(InnerSpan::new(pos.0, pos.1)), } } @@ -823,7 +825,7 @@ pub mod shell { match parse_next_substitution(self.s) { Some((mut sub, tail)) => { self.s = tail; - if let Some((start, end)) = sub.position() { + if let Some(InnerSpan { start, end }) = sub.position() { sub.set_position(start + self.pos, end + self.pos); self.pos += end; } diff --git a/src/libsyntax_pos/lib.rs b/src/libsyntax_pos/lib.rs index 8f5595968a738..bf0ab5fae4e87 100644 --- a/src/libsyntax_pos/lib.rs +++ b/src/libsyntax_pos/lib.rs @@ -504,10 +504,10 @@ impl Span { ) } - pub fn from_inner_byte_pos(self, start: usize, end: usize) -> Span { + pub fn from_inner(self, inner: InnerSpan) -> Span { let span = self.data(); - Span::new(span.lo + BytePos::from_usize(start), - span.lo + BytePos::from_usize(end), + Span::new(span.lo + BytePos::from_usize(inner.start), + span.lo + BytePos::from_usize(inner.end), span.ctxt) } @@ -1395,6 +1395,18 @@ pub struct MalformedSourceMapPositions { pub end_pos: BytePos } +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub struct InnerSpan { + pub start: usize, + pub end: usize, +} + +impl InnerSpan { + pub fn new(start: usize, end: usize) -> InnerSpan { + InnerSpan { start, end } + } +} + // Given a slice of line start positions and a position, returns the index of // the line the position is on. Returns -1 if the position is located before // the first line. From 6b9740bf0217ce88d0c2370e955a3c6372d39041 Mon Sep 17 00:00:00 2001 From: Mark Rousskov Date: Thu, 6 Jun 2019 18:38:34 -0600 Subject: [PATCH 15/32] Add comment about raw strings to self.style --- src/libfmt_macros/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/libfmt_macros/lib.rs b/src/libfmt_macros/lib.rs index 66355801b6c54..520cc870859be 100644 --- a/src/libfmt_macros/lib.rs +++ b/src/libfmt_macros/lib.rs @@ -301,6 +301,8 @@ impl<'a> Parser<'a> { fn to_span_index(&self, pos: usize) -> InnerOffset { let mut pos = pos; + // This handles the raw string case, the raw argument is the number of # + // in r###"..."### (we need to add one because of the `r`). let raw = self.style.map(|raw| raw + 1).unwrap_or(0); for skip in &self.skips { if pos > *skip { From 45df52f2cca9bee04690f476f4a3f791ea1b04cc Mon Sep 17 00:00:00 2001 From: Mark Rousskov Date: Thu, 6 Jun 2019 19:31:24 -0600 Subject: [PATCH 16/32] Properly point at the last opening brace --- src/libfmt_macros/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libfmt_macros/lib.rs b/src/libfmt_macros/lib.rs index 520cc870859be..b851183debfc5 100644 --- a/src/libfmt_macros/lib.rs +++ b/src/libfmt_macros/lib.rs @@ -183,7 +183,7 @@ impl<'a> Iterator for Parser<'a> { '{' => { let curr_last_brace = self.last_opening_brace; let byte_pos = self.to_span_index(pos); - self.last_opening_brace = Some(byte_pos.to(byte_pos)); + self.last_opening_brace = Some(byte_pos.to(InnerOffset(byte_pos.0 + 1))); self.cur.next(); if self.consume('{') { self.last_opening_brace = curr_last_brace; From 8590074a01364c2263f6e3c3c42e4137e2f77b65 Mon Sep 17 00:00:00 2001 From: Adrian Friedli Date: Sun, 9 Jun 2019 22:45:11 +0200 Subject: [PATCH 17/32] implement nth_back for RangeInclusive --- src/libcore/iter/range.rs | 28 ++++++++++++++++++++++++++++ src/libcore/tests/iter.rs | 20 ++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/src/libcore/iter/range.rs b/src/libcore/iter/range.rs index e171108a146a6..efda3b263cc97 100644 --- a/src/libcore/iter/range.rs +++ b/src/libcore/iter/range.rs @@ -451,6 +451,34 @@ impl DoubleEndedIterator for ops::RangeInclusive { }) } + #[inline] + fn nth_back(&mut self, n: usize) -> Option { + self.compute_is_empty(); + if self.is_empty.unwrap_or_default() { + return None; + } + + if let Some(minus_n) = self.end.sub_usize(n) { + use crate::cmp::Ordering::*; + + match minus_n.partial_cmp(&self.start) { + Some(Greater) => { + self.is_empty = Some(false); + self.end = minus_n.sub_one(); + return Some(minus_n); + } + Some(Equal) => { + self.is_empty = Some(true); + return Some(minus_n); + } + _ => {} + } + } + + self.is_empty = Some(true); + None + } + #[inline] fn try_rfold(&mut self, init: B, mut f: F) -> R where Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try diff --git a/src/libcore/tests/iter.rs b/src/libcore/tests/iter.rs index 171a33695bc6a..020618ae7aeed 100644 --- a/src/libcore/tests/iter.rs +++ b/src/libcore/tests/iter.rs @@ -1731,6 +1731,26 @@ fn test_range_inclusive_nth() { assert_eq!(ExactSizeIterator::is_empty(&r), true); } +#[test] +fn test_range_inclusive_nth_back() { + assert_eq!((10..=15).nth_back(0), Some(15)); + assert_eq!((10..=15).nth_back(1), Some(14)); + assert_eq!((10..=15).nth_back(5), Some(10)); + assert_eq!((10..=15).nth_back(6), None); + assert_eq!((-120..=80_i8).nth_back(200), Some(-120)); + + let mut r = 10_u8..=20; + assert_eq!(r.nth_back(2), Some(18)); + assert_eq!(r, 10..=17); + assert_eq!(r.nth_back(2), Some(15)); + assert_eq!(r, 10..=14); + assert_eq!(r.is_empty(), false); + assert_eq!(ExactSizeIterator::is_empty(&r), false); + assert_eq!(r.nth_back(10), None); + assert_eq!(r.is_empty(), true); + assert_eq!(ExactSizeIterator::is_empty(&r), true); +} + #[test] fn test_range_step() { #![allow(deprecated)] From 715578ea5769ccf21696fe64eed6971b19c1787c Mon Sep 17 00:00:00 2001 From: Petr Hosek Date: Sun, 9 Jun 2019 16:53:46 -0700 Subject: [PATCH 18/32] Pass cflags rather than cxxflags to LLVM as CMAKE_C_FLAGS We mistakenly pass cxxflags from the configuration to LLVM build as CMAKE_C_FLAGS. --- src/bootstrap/native.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs index bf3601cb312fd..8b6e856a8aba8 100644 --- a/src/bootstrap/native.rs +++ b/src/bootstrap/native.rs @@ -416,7 +416,7 @@ fn configure_cmake(builder: &Builder<'_>, cfg.build_arg("-j").build_arg(builder.jobs().to_string()); let mut cflags = builder.cflags(target, GitRepo::Llvm).join(" "); - if let Some(ref s) = builder.config.llvm_cxxflags { + if let Some(ref s) = builder.config.llvm_cflags { cflags.push_str(&format!(" {}", s)); } cfg.define("CMAKE_C_FLAGS", cflags); From f9f8bfabf00c477d3430b276bf74b8335c92b82a Mon Sep 17 00:00:00 2001 From: Tyler Mandry Date: Thu, 30 May 2019 14:27:12 -0700 Subject: [PATCH 19/32] Collect conflict information in GeneratorLayout --- src/librustc/mir/mod.rs | 23 ++++++ src/librustc_mir/dataflow/mod.rs | 56 +++++++++++++ src/librustc_mir/transform/generator.rs | 104 +++++++++++++++++++++++- 3 files changed, 179 insertions(+), 4 deletions(-) diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index 74beb1b1bc7de..149d067204fa2 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -9,6 +9,7 @@ use crate::hir::def_id::DefId; use crate::hir::{self, InlineAsm as HirInlineAsm}; use crate::mir::interpret::{ConstValue, InterpError, Scalar}; use crate::mir::visit::MirVisitable; +use rustc_data_structures::bit_set::BitSet; use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::graph::dominators::{dominators, Dominators}; use rustc_data_structures::graph::{self, GraphPredecessors, GraphSuccessors}; @@ -1500,6 +1501,13 @@ impl<'tcx> BasicBlockData<'tcx> { self.terminator.as_mut().expect("invalid terminator state") } + pub fn is_unreachable(&self) -> bool { + match self.terminator().kind { + TerminatorKind::Unreachable => true, + _ => false, + } + } + pub fn retain_statements(&mut self, mut f: F) where F: FnMut(&mut Statement<'_>) -> bool, @@ -2997,6 +3005,11 @@ pub struct GeneratorLayout<'tcx> { /// be stored in multiple variants. pub variant_fields: IndexVec>, + /// Which saved locals are storage-live at the same time. Locals that do not + /// have conflicts with each other are allowed to overlap in the computed + /// layout. + pub storage_conflicts: IndexVec>, + /// Names and scopes of all the stored generator locals. /// NOTE(tmandry) This is *strictly* a temporary hack for codegen /// debuginfo generation, and will be removed at some point. @@ -3193,6 +3206,7 @@ BraceStructTypeFoldableImpl! { impl<'tcx> TypeFoldable<'tcx> for GeneratorLayout<'tcx> { field_tys, variant_fields, + storage_conflicts, __local_debuginfo_codegen_only_do_not_use, } } @@ -3572,6 +3586,15 @@ impl<'tcx> TypeFoldable<'tcx> for GeneratorSavedLocal { } } +impl<'tcx, T: Idx> TypeFoldable<'tcx> for BitSet { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, _: &mut F) -> Self { + self.clone() + } + fn super_visit_with>(&self, _: &mut V) -> bool { + false + } +} + impl<'tcx> TypeFoldable<'tcx> for Constant<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { Constant { diff --git a/src/librustc_mir/dataflow/mod.rs b/src/librustc_mir/dataflow/mod.rs index 8e2068269ceaa..0c1a06b4a1f60 100644 --- a/src/librustc_mir/dataflow/mod.rs +++ b/src/librustc_mir/dataflow/mod.rs @@ -380,6 +380,62 @@ pub fn state_for_location<'tcx, T: BitDenotation<'tcx>>(loc: Location, gen_set.to_dense() } +/// Calls `f` with the dataflow state at every location in `mir`. +/// Ignores blocks that terminate in `unreachable`. +pub fn for_each_location<'tcx, T: BitDenotation<'tcx>>( + mir: &Body<'tcx>, + analysis: &T, + result: &DataflowResults<'tcx, T>, + mut f: impl FnMut(&HybridBitSet, Location) +) { + for (block, bb_data) in mir.basic_blocks().iter_enumerated() { + if bb_data.is_unreachable() { + continue; + } + for_each_block_location(mir, block, bb_data, analysis, result, &mut f); + } +} + +fn for_each_block_location<'tcx, T: BitDenotation<'tcx>>( + mir: &Body<'tcx>, + block: BasicBlock, + bb_data: &BasicBlockData<'tcx>, + analysis: &T, + result: &DataflowResults<'tcx, T>, + f: &mut impl FnMut(&HybridBitSet, Location) +) { + let statements = &bb_data.statements; + + let mut on_entry = result.sets().on_entry_set_for(block.index()).to_owned(); + let mut kill_set = on_entry.to_hybrid(); + let mut gen_set = kill_set.clone(); + + { + let mut sets = BlockSets { + on_entry: &mut on_entry, + kill_set: &mut kill_set, + gen_set: &mut gen_set, + }; + // FIXME: This location is technically wrong, but there isn't a way to + // denote the start of a block. + f(sets.gen_set, Location { block, statement_index: 0 }); + + for statement_index in 0..statements.len() { + let loc = Location { block, statement_index }; + analysis.before_statement_effect(&mut sets, loc); + f(sets.gen_set, loc); + analysis.statement_effect(&mut sets, loc); + f(sets.gen_set, loc); + } + + let term_loc = Location { block, statement_index: mir[block].statements.len() }; + analysis.before_terminator_effect(&mut sets, term_loc); + f(sets.gen_set, term_loc); + analysis.before_statement_effect(&mut sets, term_loc); + f(sets.gen_set, term_loc); + } +} + pub struct DataflowAnalysis<'a, 'tcx: 'a, O> where O: BitDenotation<'tcx> { flow_state: DataflowState<'tcx, O>, diff --git a/src/librustc_mir/transform/generator.rs b/src/librustc_mir/transform/generator.rs index d2c75ebe8d6aa..c1fc49e42c8ed 100644 --- a/src/librustc_mir/transform/generator.rs +++ b/src/librustc_mir/transform/generator.rs @@ -66,7 +66,8 @@ use std::mem; use crate::transform::{MirPass, MirSource}; use crate::transform::simplify; use crate::transform::no_landing_pads::no_landing_pads; -use crate::dataflow::{do_dataflow, DebugFormatted, state_for_location}; +use crate::dataflow::{DataflowResults}; +use crate::dataflow::{do_dataflow, DebugFormatted, state_for_location, for_each_location}; use crate::dataflow::{MaybeStorageLive, HaveBeenBorrowedLocals}; use crate::util::dump_mir; use crate::util::liveness; @@ -400,6 +401,7 @@ fn locals_live_across_suspend_points( movable: bool, ) -> ( liveness::LiveVarSet, + IndexVec>, FxHashMap, BitSet, ) { @@ -503,7 +505,99 @@ fn locals_live_across_suspend_points( // The generator argument is ignored set.remove(self_arg()); - (set, storage_liveness_map, suspending_blocks) + let storage_conflicts = compute_storage_conflicts( + body, + &set, + &ignored, + storage_live, + storage_live_analysis); + + (set, storage_conflicts, storage_liveness_map, suspending_blocks) +} + +/// For every saved local, looks for which locals are StorageLive at the same +/// time. Generates a bitset for every local of all the other locals that may be +/// StorageLive simultaneously with that local. This is used in the layout +/// computation; see `GeneratorLayout` for more. +fn compute_storage_conflicts( + body: &'mir Body<'tcx>, + stored_locals: &liveness::LiveVarSet, + ignored: &StorageIgnored, + storage_live: DataflowResults<'tcx, MaybeStorageLive<'mir, 'tcx>>, + storage_live_analysis: MaybeStorageLive<'mir, 'tcx>, +) -> IndexVec> { + debug!("compute_storage_conflicts({:?})", body.span); + debug!("ignored = {:?}", ignored.0); + + // Storage ignored locals are not eligible for overlap, since their storage + // is always live. + let mut ineligible_locals = ignored.0.clone(); + ineligible_locals.intersect(&stored_locals); + + // Of our remaining candidates, find out if any have overlapping storage + // liveness. Those that do must be in the same variant to remain candidates. + // FIXME(tmandry): Consider using sparse bitsets here once we have good + // benchmarks for generators. + let mut local_conflicts: IndexVec = + // Add conflicts for every ineligible local. + iter::repeat(ineligible_locals.clone()) + .take(body.local_decls.len()) + .collect(); + + for_each_location(body, &storage_live_analysis, &storage_live, |state, loc| { + let mut eligible_storage_live = state.clone().to_dense(); + eligible_storage_live.intersect(&stored_locals); + + for local in eligible_storage_live.iter() { + let mut overlaps = eligible_storage_live.clone(); + overlaps.remove(local); + local_conflicts[local].union(&overlaps); + + if !overlaps.is_empty() { + trace!("at {:?}, local {:?} conflicts with {:?}", + loc, local, overlaps); + } + } + }); + + // NOTE: Today we store a full conflict bitset for every local. Technically + // this is twice as many bits as we need, since the relation is symmetric. + // However, in practice these bitsets are not usually large. The layout code + // also needs to keep track of how many conflicts each local has, so it's + // simpler to keep it this way for now. + let storage_conflicts: IndexVec = stored_locals + .iter() + .map(|local_a| { + if ineligible_locals.contains(local_a) { + // Conflicts with everything. + BitSet::new_filled(stored_locals.count()) + } else { + // Keep overlap information only for stored locals. + renumber_bitset(&local_conflicts[local_a], stored_locals) + } + }) + .collect(); + + storage_conflicts +} + +/// Renumbers the items present in `stored_locals` and applies the renumbering +/// to 'input`. +/// +/// For example, if `stored_locals = [1, 3, 5]`, this would be renumbered to +/// `[0, 1, 2]`. Thus, if `input = [3, 5]` we would return `[1, 2]`. +fn renumber_bitset(input: &BitSet, stored_locals: &liveness::LiveVarSet) +-> BitSet { + assert!(stored_locals.superset(&input), "{:?} not a superset of {:?}", stored_locals, input); + let mut out = BitSet::new_empty(stored_locals.count()); + for (idx, local) in stored_locals.iter().enumerate() { + let saved_local = GeneratorSavedLocal::from(idx); + if input.contains(local) { + out.insert(saved_local); + } + } + debug!("renumber_bitset({:?}, {:?}) => {:?}", input, stored_locals, out); + out } fn compute_layout<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -517,7 +611,7 @@ fn compute_layout<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, FxHashMap) { // Use a liveness analysis to compute locals which are live across a suspension point - let (live_locals, storage_liveness, suspending_blocks) = + let (live_locals, storage_conflicts, storage_liveness, suspending_blocks) = locals_live_across_suspend_points(tcx, body, source, movable); // Erase regions from the types passed in from typeck so we can compare them with @@ -547,7 +641,7 @@ fn compute_layout<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let dummy_local = LocalDecl::new_internal(tcx.mk_unit(), body.span); - // Gather live locals and their indices replacing values in mir.local_decls with a dummy + // Gather live locals and their indices replacing values in body.local_decls with a dummy // to avoid changing local indices let live_decls = live_locals.iter().map(|local| { let var = mem::replace(&mut body.local_decls[local], dummy_local.clone()); @@ -568,6 +662,7 @@ fn compute_layout<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, remap.insert(local, (var.ty, variant_index, idx)); decls.push(var); } + debug!("generator saved local mappings: {:?}", decls); let field_tys = decls.iter().map(|field| field.ty).collect::>(); // Put every var in each variant, for now. @@ -578,6 +673,7 @@ fn compute_layout<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let layout = GeneratorLayout { field_tys, variant_fields: empty_variants.chain(state_variants).collect(), + storage_conflicts, __local_debuginfo_codegen_only_do_not_use: decls, }; From 786875824ca6281b87bafb0edff099a3a05f73ae Mon Sep 17 00:00:00 2001 From: Tyler Mandry Date: Tue, 7 May 2019 19:47:18 -0700 Subject: [PATCH 20/32] Overlap locals that never have storage live at the same time ...and are only included in a single variant. --- src/librustc/ty/layout.rs | 214 +++++++++++++++++++++++++++++++++++--- 1 file changed, 201 insertions(+), 13 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 8e2c3dd3d8ad9..208ea224242b1 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -14,6 +14,9 @@ use std::ops::Bound; use crate::hir; use crate::ich::StableHashingContext; +use crate::mir::GeneratorSavedLocal; +use crate::ty::subst::Subst; +use rustc_data_structures::bit_set::BitSet; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; @@ -612,34 +615,219 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { } ty::Generator(def_id, ref substs, _) => { - // FIXME(tmandry): For fields that are repeated in multiple - // variants in the GeneratorLayout, we need code to ensure that - // the offset of these fields never change. Right now this is - // not an issue since every variant has every field, but once we - // optimize this we have to be more careful. + // When laying out generators, we divide our saved local fields + // into two categories: overlap-eligible and overlap-ineligible. + // + // Those fields which are ineligible for overlap go in a + // "prefix" at the beginning of the layout, and always have + // space reserved for them. + // + // Overlap-eligible fields are only assigned to one variant, so + // we lay those fields out for each variant and put them right + // after the prefix. + // + // Finally, in the layout details, we point to the fields + // from the variants they are assigned to. It is possible for + // some fields to be included in multiple variants. No field + // ever "moves around" in the layout; its offset is always the + // same. + // + // Also included in the layout are the upvars and the + // discriminant. These are included as fields on the "outer" + // layout; they are not part of any variant. + + let info = tcx.generator_layout(def_id); + let subst_field = |ty: Ty<'tcx>| { ty.subst(tcx, substs.substs) }; + + #[derive(Clone, Debug, PartialEq)] + enum SavedLocalEligibility { + Unassigned, + Assigned(VariantIdx), + // FIXME: Use newtype_index so we aren't wasting bytes + Ineligible(Option), + } + use SavedLocalEligibility::*; + + let mut assignments: IndexVec = + iter::repeat(Unassigned) + .take(info.field_tys.len()) + .collect(); + + // The saved locals not eligible for overlap. These will get + // "promoted" to the prefix of our generator. + let mut eligible_locals = BitSet::new_filled(info.field_tys.len()); + + // Figure out which of our saved locals are fields in only + // one variant. The rest are deemed ineligible for overlap. + for (variant_index, fields) in info.variant_fields.iter_enumerated() { + for local in fields { + match assignments[*local] { + Unassigned => { + assignments[*local] = Assigned(variant_index); + } + Assigned(idx) => { + // We've already seen this local at another suspension + // point, so it is no longer a candidate. + trace!("removing local {:?} in >1 variant ({:?}, {:?})", + local, variant_index, idx); + eligible_locals.remove(*local); + assignments[*local] = Ineligible(None); + } + Ineligible(_) => {}, + } + } + } + + // Next, check every pair of eligible locals to see if they + // conflict. + for (local_a, conflicts_a) in info.storage_conflicts.iter_enumerated() { + if !eligible_locals.contains(local_a) { + continue; + } + + for local_b in conflicts_a.iter() { + // local_a and local_b have overlapping storage, therefore they + // cannot overlap in the generator layout. The only way to guarantee + // this is if they are in the same variant, or one is ineligible + // (which means it is stored in every variant). + if !eligible_locals.contains(local_b) || + assignments[local_a] == assignments[local_b] + { + continue; + } + + // If they conflict, we will choose one to make ineligible. + let conflicts_b = &info.storage_conflicts[local_b]; + let (remove, other) = if conflicts_a.count() > conflicts_b.count() { + (local_a, local_b) + } else { + (local_b, local_a) + }; + eligible_locals.remove(remove); + assignments[remove] = Ineligible(None); + trace!("removing local {:?} due to conflict with {:?}", remove, other); + } + } + + let mut ineligible_locals = BitSet::new_filled(info.field_tys.len()); + ineligible_locals.subtract(&eligible_locals); + // Write down the order of our locals that will be promoted to + // the prefix. + for (idx, local) in ineligible_locals.iter().enumerate() { + assignments[local] = Ineligible(Some(idx as u32)); + } + debug!("generator saved local assignments: {:?}", assignments); + + // Build a prefix layout, including "promoting" all ineligible + // locals as part of the prefix. let discr_index = substs.prefix_tys(def_id, tcx).count(); + let promoted_tys = + ineligible_locals.iter().map(|local| subst_field(info.field_tys[local])); let prefix_tys = substs.prefix_tys(def_id, tcx) - .chain(iter::once(substs.discr_ty(tcx))); + .chain(iter::once(substs.discr_ty(tcx))) + .chain(promoted_tys); let prefix = univariant_uninterned( &prefix_tys.map(|ty| self.layout_of(ty)).collect::, _>>()?, &ReprOptions::default(), StructKind::AlwaysSized)?; + let (prefix_size, prefix_align) = (prefix.size, prefix.align); + + // Split the prefix layout into the "outer" fields (upvars and + // discriminant) and the "promoted" fields. Promoted fields will + // get included in each variant that requested them in + // GeneratorLayout. + let renumber_indices = |mut index: Vec| -> Vec { + debug!("renumber_indices({:?})", index); + let mut inverse_index = (0..index.len() as u32).collect::>(); + inverse_index.sort_unstable_by_key(|i| index[*i as usize]); + for i in 0..index.len() { + index[inverse_index[i] as usize] = i as u32; + } + debug!("renumber_indices() => {:?}", index); + index + }; + debug!("prefix = {:#?}", prefix); + let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields { + FieldPlacement::Arbitrary { offsets, memory_index } => { + let (offsets_a, offsets_b) = + offsets.split_at(discr_index + 1); + let (memory_index_a, memory_index_b) = + memory_index.split_at(discr_index + 1); + let outer_fields = FieldPlacement::Arbitrary { + offsets: offsets_a.to_vec(), + memory_index: renumber_indices(memory_index_a.to_vec()) + }; + (outer_fields, + offsets_b.to_vec(), + renumber_indices(memory_index_b.to_vec())) + } + _ => bug!(), + }; let mut size = prefix.size; let mut align = prefix.align; - let variants_tys = substs.state_tys(def_id, tcx); - let variants = variants_tys.enumerate().map(|(i, variant_tys)| { + let variants = info.variant_fields.iter_enumerated().map(|(index, variant_fields)| { + // Only include overlap-eligible fields when we compute our variant layout. + let variant_only_tys = variant_fields.iter().flat_map(|local| { + let ty = info.field_tys[*local]; + match assignments[*local] { + Unassigned => bug!(), + Assigned(v) if v == index => Some(subst_field(ty)), + Assigned(_) => bug!("assignment does not match variant"), + Ineligible(_) => None, + } + }); + let mut variant = univariant_uninterned( - &variant_tys.map(|ty| self.layout_of(ty)).collect::, _>>()?, + &variant_only_tys + .map(|ty| self.layout_of(ty)) + .collect::, _>>()?, &ReprOptions::default(), - StructKind::Prefixed(prefix.size, prefix.align.abi))?; + StructKind::Prefixed(prefix_size, prefix_align.abi))?; + variant.variants = Variants::Single { index }; - variant.variants = Variants::Single { index: VariantIdx::new(i) }; + let (offsets, memory_index) = match variant.fields { + FieldPlacement::Arbitrary { offsets, memory_index } => + (offsets, memory_index), + _ => bug!(), + }; + + // Now, stitch the promoted and variant-only fields back + // together in the order they are mentioned by our + // GeneratorLayout. + let mut next_variant_field = 0; + let mut combined_offsets = Vec::new(); + let mut combined_memory_index = Vec::new(); + for local in variant_fields.iter() { + match assignments[*local] { + Unassigned => bug!(), + Assigned(_) => { + combined_offsets.push(offsets[next_variant_field]); + // Shift memory indices by the number of + // promoted fields, which all come first. We + // may not use all promoted fields in our + // variant but that's okay; we'll renumber them + // below. + combined_memory_index.push( + promoted_memory_index.len() as u32 + + memory_index[next_variant_field]); + next_variant_field += 1; + } + Ineligible(field_idx) => { + let field_idx = field_idx.unwrap() as usize; + combined_offsets.push(promoted_offsets[field_idx]); + combined_memory_index.push(promoted_memory_index[field_idx]); + } + } + } + variant.fields = FieldPlacement::Arbitrary { + offsets: combined_offsets, + memory_index: renumber_indices(combined_memory_index), + }; size = size.max(variant.size); align = align.max(variant.align); - Ok(variant) }).collect::, _>>()?; @@ -661,7 +849,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { discr_index, variants, }, - fields: prefix.fields, + fields: outer_fields, abi, size, align, From 9de451c6d67129dc34e10d05f5bcb7983897aa2b Mon Sep 17 00:00:00 2001 From: Tyler Mandry Date: Thu, 9 May 2019 18:07:30 -0700 Subject: [PATCH 21/32] Only include generator saved locals in the variants that need them --- src/librustc_mir/transform/generator.rs | 82 +++++++++++++++---------- 1 file changed, 48 insertions(+), 34 deletions(-) diff --git a/src/librustc_mir/transform/generator.rs b/src/librustc_mir/transform/generator.rs index c1fc49e42c8ed..073beafcf1fa1 100644 --- a/src/librustc_mir/transform/generator.rs +++ b/src/librustc_mir/transform/generator.rs @@ -401,9 +401,9 @@ fn locals_live_across_suspend_points( movable: bool, ) -> ( liveness::LiveVarSet, + Vec>, IndexVec>, FxHashMap, - BitSet, ) { let dead_unwinds = BitSet::new_empty(body.basic_blocks().len()); let def_id = source.def_id(); @@ -447,13 +447,10 @@ fn locals_live_across_suspend_points( ); let mut storage_liveness_map = FxHashMap::default(); - - let mut suspending_blocks = BitSet::new_empty(body.basic_blocks().len()); + let mut live_locals_at_suspension_points = Vec::new(); for (block, data) in body.basic_blocks().iter_enumerated() { if let TerminatorKind::Yield { .. } = data.terminator().kind { - suspending_blocks.insert(block); - let loc = Location { block: block, statement_index: data.statements.len(), @@ -494,16 +491,25 @@ fn locals_live_across_suspend_points( // and their storage is live (the `storage_liveness` variable) storage_liveness.intersect(&liveness.outs[block]); + // The generator argument is ignored + storage_liveness.remove(self_arg()); + let live_locals = storage_liveness; - // Add the locals life at this suspension point to the set of locals which live across + // Add the locals live at this suspension point to the set of locals which live across // any suspension points set.union(&live_locals); + + live_locals_at_suspension_points.push(live_locals); } } - // The generator argument is ignored - set.remove(self_arg()); + // Renumber our liveness_map bitsets to include only the locals we are + // saving. + let live_locals_at_suspension_points = live_locals_at_suspension_points + .iter() + .map(|live_locals| renumber_bitset(&live_locals, &set)) + .collect(); let storage_conflicts = compute_storage_conflicts( body, @@ -512,7 +518,7 @@ fn locals_live_across_suspend_points( storage_live, storage_live_analysis); - (set, storage_conflicts, storage_liveness_map, suspending_blocks) + (set, live_locals_at_suspension_points, storage_conflicts, storage_liveness_map) } /// For every saved local, looks for which locals are StorageLive at the same @@ -611,7 +617,7 @@ fn compute_layout<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, FxHashMap) { // Use a liveness analysis to compute locals which are live across a suspension point - let (live_locals, storage_conflicts, storage_liveness, suspending_blocks) = + let (live_locals, live_locals_at_suspension_points, storage_conflicts, storage_liveness) = locals_live_across_suspend_points(tcx, body, source, movable); // Erase regions from the types passed in from typeck so we can compare them with @@ -641,38 +647,46 @@ fn compute_layout<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let dummy_local = LocalDecl::new_internal(tcx.mk_unit(), body.span); - // Gather live locals and their indices replacing values in body.local_decls with a dummy - // to avoid changing local indices - let live_decls = live_locals.iter().map(|local| { + // Gather live locals and their indices replacing values in body.local_decls + // with a dummy to avoid changing local indices. + let mut locals = IndexVec::::new(); + let mut tys = IndexVec::::new(); + let mut decls = IndexVec::::new(); + for (idx, local) in live_locals.iter().enumerate() { let var = mem::replace(&mut body.local_decls[local], dummy_local.clone()); - (local, var) - }); + locals.push(local); + tys.push(var.ty); + decls.push(var); + debug!("generator saved local {:?} => {:?}", GeneratorSavedLocal::from(idx), local); + } - // For now we will access everything via variant #3, leaving empty variants - // for the UNRESUMED, RETURNED, and POISONED states. - // If there were a yield-less generator without a variant #3, it would not - // have any vars to remap, so we would never use this. - let variant_index = VariantIdx::new(3); + // Leave empty variants for the UNRESUMED, RETURNED, and POISONED states. + const RESERVED_VARIANTS: usize = 3; + // Build the generator variant field list. // Create a map from local indices to generator struct indices. - // We also create a vector of the LocalDecls of these locals. + let mut variant_fields: IndexVec> = + iter::repeat(IndexVec::new()).take(RESERVED_VARIANTS).collect(); let mut remap = FxHashMap::default(); - let mut decls = IndexVec::new(); - for (idx, (local, var)) in live_decls.enumerate() { - remap.insert(local, (var.ty, variant_index, idx)); - decls.push(var); + for (suspension_point_idx, live_locals) in live_locals_at_suspension_points.iter().enumerate() { + let variant_index = VariantIdx::from(RESERVED_VARIANTS + suspension_point_idx); + let mut fields = IndexVec::new(); + for (idx, saved_local) in live_locals.iter().enumerate() { + fields.push(saved_local); + // Note that if a field is included in multiple variants, it will be + // added overwritten here. That's fine; fields do not move around + // inside generators, so it doesn't matter which variant index we + // access them by. + remap.insert(locals[saved_local], (tys[saved_local], variant_index, idx)); + } + variant_fields.push(fields); } - debug!("generator saved local mappings: {:?}", decls); - let field_tys = decls.iter().map(|field| field.ty).collect::>(); - - // Put every var in each variant, for now. - let all_vars = (0..field_tys.len()).map(GeneratorSavedLocal::from).collect(); - let empty_variants = iter::repeat(IndexVec::new()).take(3); - let state_variants = iter::repeat(all_vars).take(suspending_blocks.count()); + debug!("generator variant_fields = {:?}", variant_fields); + debug!("generator storage_conflicts = {:?}", storage_conflicts); let layout = GeneratorLayout { - field_tys, - variant_fields: empty_variants.chain(state_variants).collect(), + field_tys: tys, + variant_fields, storage_conflicts, __local_debuginfo_codegen_only_do_not_use: decls, }; From 63d73fd70bcea43b457a9863d8b6d20ebc89c09f Mon Sep 17 00:00:00 2001 From: Tyler Mandry Date: Thu, 9 May 2019 18:13:40 -0700 Subject: [PATCH 22/32] Add test suite --- src/test/run-pass/async-fn-size.rs | 106 ++++++++++++++++++ src/test/run-pass/generator/overlap-locals.rs | 27 +++++ 2 files changed, 133 insertions(+) create mode 100644 src/test/run-pass/async-fn-size.rs create mode 100644 src/test/run-pass/generator/overlap-locals.rs diff --git a/src/test/run-pass/async-fn-size.rs b/src/test/run-pass/async-fn-size.rs new file mode 100644 index 0000000000000..05afd6d401977 --- /dev/null +++ b/src/test/run-pass/async-fn-size.rs @@ -0,0 +1,106 @@ +// edition:2018 +// aux-build:arc_wake.rs + +#![feature(async_await, await_macro)] + +extern crate arc_wake; + +use std::pin::Pin; +use std::future::Future; +use std::sync::{ + Arc, + atomic::{self, AtomicUsize}, +}; +use std::task::{Context, Poll}; +use arc_wake::ArcWake; + +struct Counter { + wakes: AtomicUsize, +} + +impl ArcWake for Counter { + fn wake(self: Arc) { + Self::wake_by_ref(&self) + } + fn wake_by_ref(arc_self: &Arc) { + arc_self.wakes.fetch_add(1, atomic::Ordering::SeqCst); + } +} + +struct WakeOnceThenComplete(bool, u8); + +impl Future for WakeOnceThenComplete { + type Output = u8; + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + if self.0 { + Poll::Ready(self.1) + } else { + cx.waker().wake_by_ref(); + self.0 = true; + Poll::Pending + } + } +} + +fn wait(fut: impl Future) -> u8 { + let mut fut = Box::pin(fut); + let counter = Arc::new(Counter { wakes: AtomicUsize::new(0) }); + let waker = ArcWake::into_waker(counter.clone()); + let mut cx = Context::from_waker(&waker); + loop { + match fut.as_mut().poll(&mut cx) { + Poll::Ready(out) => return out, + Poll::Pending => (), + } + } +} + +fn base() -> WakeOnceThenComplete { WakeOnceThenComplete(false, 1) } + +async fn await1_level1() -> u8 { + await!(base()) +} + +async fn await2_level1() -> u8 { + await!(base()) + await!(base()) +} + +async fn await3_level1() -> u8 { + await!(base()) + await!(base()) + await!(base()) +} + +async fn await3_level2() -> u8 { + await!(await3_level1()) + await!(await3_level1()) + await!(await3_level1()) +} + +async fn await3_level3() -> u8 { + await!(await3_level2()) + await!(await3_level2()) + await!(await3_level2()) +} + +async fn await3_level4() -> u8 { + await!(await3_level3()) + await!(await3_level3()) + await!(await3_level3()) +} + +async fn await3_level5() -> u8 { + await!(await3_level4()) + await!(await3_level4()) + await!(await3_level4()) +} + +fn main() { + assert_eq!(2, std::mem::size_of_val(&base())); + assert_eq!(8, std::mem::size_of_val(&await1_level1())); + assert_eq!(12, std::mem::size_of_val(&await2_level1())); + assert_eq!(12, std::mem::size_of_val(&await3_level1())); + assert_eq!(20, std::mem::size_of_val(&await3_level2())); + assert_eq!(28, std::mem::size_of_val(&await3_level3())); + assert_eq!(36, std::mem::size_of_val(&await3_level4())); + assert_eq!(44, std::mem::size_of_val(&await3_level5())); + + assert_eq!(1, wait(base())); + assert_eq!(1, wait(await1_level1())); + assert_eq!(2, wait(await2_level1())); + assert_eq!(3, wait(await3_level1())); + assert_eq!(9, wait(await3_level2())); + assert_eq!(27, wait(await3_level3())); + assert_eq!(81, wait(await3_level4())); + assert_eq!(243, wait(await3_level5())); +} diff --git a/src/test/run-pass/generator/overlap-locals.rs b/src/test/run-pass/generator/overlap-locals.rs new file mode 100644 index 0000000000000..704484a480e26 --- /dev/null +++ b/src/test/run-pass/generator/overlap-locals.rs @@ -0,0 +1,27 @@ +#![feature(generators)] + +fn main() { + let a = || { + { + let w: i32 = 4; + yield; + println!("{:?}", w); + } + { + let x: i32 = 5; + yield; + println!("{:?}", x); + } + { + let y: i32 = 6; + yield; + println!("{:?}", y); + } + { + let z: i32 = 7; + yield; + println!("{:?}", z); + } + }; + assert_eq!(8, std::mem::size_of_val(&a)); +} From a38991f755e0180101d46351a63db7b6657f08b4 Mon Sep 17 00:00:00 2001 From: Tyler Mandry Date: Wed, 22 May 2019 16:36:36 -0700 Subject: [PATCH 23/32] Small review fixes --- src/librustc/ty/layout.rs | 79 ++++++++++++++----------- src/librustc_mir/dataflow/mod.rs | 2 +- src/librustc_mir/transform/generator.rs | 78 ++++++++++++++---------- 3 files changed, 92 insertions(+), 67 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 208ea224242b1..74f352eb3be6f 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -649,13 +649,11 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { use SavedLocalEligibility::*; let mut assignments: IndexVec = - iter::repeat(Unassigned) - .take(info.field_tys.len()) - .collect(); + IndexVec::from_elem_n(Unassigned, info.field_tys.len()); // The saved locals not eligible for overlap. These will get // "promoted" to the prefix of our generator. - let mut eligible_locals = BitSet::new_filled(info.field_tys.len()); + let mut ineligible_locals = BitSet::new_empty(info.field_tys.len()); // Figure out which of our saved locals are fields in only // one variant. The rest are deemed ineligible for overlap. @@ -670,7 +668,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { // point, so it is no longer a candidate. trace!("removing local {:?} in >1 variant ({:?}, {:?})", local, variant_index, idx); - eligible_locals.remove(*local); + ineligible_locals.insert(*local); assignments[*local] = Ineligible(None); } Ineligible(_) => {}, @@ -681,46 +679,50 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { // Next, check every pair of eligible locals to see if they // conflict. for (local_a, conflicts_a) in info.storage_conflicts.iter_enumerated() { - if !eligible_locals.contains(local_a) { + if ineligible_locals.contains(local_a) { continue; } for local_b in conflicts_a.iter() { - // local_a and local_b have overlapping storage, therefore they + // local_a and local_b are storage live at the same time, therefore they // cannot overlap in the generator layout. The only way to guarantee // this is if they are in the same variant, or one is ineligible // (which means it is stored in every variant). - if !eligible_locals.contains(local_b) || + if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] { continue; } // If they conflict, we will choose one to make ineligible. + // This is not always optimal; it's just a greedy heuristic + // that seems to produce good results most of the time. let conflicts_b = &info.storage_conflicts[local_b]; let (remove, other) = if conflicts_a.count() > conflicts_b.count() { (local_a, local_b) } else { (local_b, local_a) }; - eligible_locals.remove(remove); + ineligible_locals.insert(remove); assignments[remove] = Ineligible(None); trace!("removing local {:?} due to conflict with {:?}", remove, other); } } - let mut ineligible_locals = BitSet::new_filled(info.field_tys.len()); - ineligible_locals.subtract(&eligible_locals); - // Write down the order of our locals that will be promoted to // the prefix. - for (idx, local) in ineligible_locals.iter().enumerate() { - assignments[local] = Ineligible(Some(idx as u32)); + { + let mut idx = 0u32; + for local in ineligible_locals.iter() { + assignments[local] = Ineligible(Some(idx)); + idx += 1; + } } debug!("generator saved local assignments: {:?}", assignments); // Build a prefix layout, including "promoting" all ineligible - // locals as part of the prefix. + // locals as part of the prefix. We compute the layout of all of + // these fields at once to get optimal packing. let discr_index = substs.prefix_tys(def_id, tcx).count(); let promoted_tys = ineligible_locals.iter().map(|local| subst_field(info.field_tys[local])); @@ -733,20 +735,23 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { StructKind::AlwaysSized)?; let (prefix_size, prefix_align) = (prefix.size, prefix.align); - // Split the prefix layout into the "outer" fields (upvars and - // discriminant) and the "promoted" fields. Promoted fields will - // get included in each variant that requested them in - // GeneratorLayout. - let renumber_indices = |mut index: Vec| -> Vec { - debug!("renumber_indices({:?})", index); - let mut inverse_index = (0..index.len() as u32).collect::>(); - inverse_index.sort_unstable_by_key(|i| index[*i as usize]); + let recompute_memory_index = |offsets: &Vec| -> Vec { + debug!("recompute_memory_index({:?})", offsets); + let mut inverse_index = (0..offsets.len() as u32).collect::>(); + inverse_index.sort_unstable_by_key(|i| offsets[*i as usize]); + + let mut index = vec![0; offsets.len()]; for i in 0..index.len() { index[inverse_index[i] as usize] = i as u32; } - debug!("renumber_indices() => {:?}", index); + debug!("recompute_memory_index() => {:?}", index); index }; + + // Split the prefix layout into the "outer" fields (upvars and + // discriminant) and the "promoted" fields. Promoted fields will + // get included in each variant that requested them in + // GeneratorLayout. debug!("prefix = {:#?}", prefix); let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields { FieldPlacement::Arbitrary { offsets, memory_index } => { @@ -756,11 +761,11 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { memory_index.split_at(discr_index + 1); let outer_fields = FieldPlacement::Arbitrary { offsets: offsets_a.to_vec(), - memory_index: renumber_indices(memory_index_a.to_vec()) + memory_index: recompute_memory_index(&memory_index_a.to_vec()) }; (outer_fields, offsets_b.to_vec(), - renumber_indices(memory_index_b.to_vec())) + recompute_memory_index(&memory_index_b.to_vec())) } _ => bug!(), }; @@ -769,15 +774,17 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let mut align = prefix.align; let variants = info.variant_fields.iter_enumerated().map(|(index, variant_fields)| { // Only include overlap-eligible fields when we compute our variant layout. - let variant_only_tys = variant_fields.iter().flat_map(|local| { - let ty = info.field_tys[*local]; - match assignments[*local] { - Unassigned => bug!(), - Assigned(v) if v == index => Some(subst_field(ty)), - Assigned(_) => bug!("assignment does not match variant"), - Ineligible(_) => None, - } - }); + let variant_only_tys = variant_fields + .iter() + .filter(|local| { + match assignments[**local] { + Unassigned => bug!(), + Assigned(v) if v == index => true, + Assigned(_) => bug!("assignment does not match variant"), + Ineligible(_) => false, + } + }) + .map(|local| subst_field(info.field_tys[*local])); let mut variant = univariant_uninterned( &variant_only_tys @@ -823,7 +830,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { } variant.fields = FieldPlacement::Arbitrary { offsets: combined_offsets, - memory_index: renumber_indices(combined_memory_index), + memory_index: recompute_memory_index(&combined_memory_index), }; size = size.max(variant.size); diff --git a/src/librustc_mir/dataflow/mod.rs b/src/librustc_mir/dataflow/mod.rs index 0c1a06b4a1f60..03a48a38c4a6d 100644 --- a/src/librustc_mir/dataflow/mod.rs +++ b/src/librustc_mir/dataflow/mod.rs @@ -431,7 +431,7 @@ fn for_each_block_location<'tcx, T: BitDenotation<'tcx>>( let term_loc = Location { block, statement_index: mir[block].statements.len() }; analysis.before_terminator_effect(&mut sets, term_loc); f(sets.gen_set, term_loc); - analysis.before_statement_effect(&mut sets, term_loc); + analysis.terminator_effect(&mut sets, term_loc); f(sets.gen_set, term_loc); } } diff --git a/src/librustc_mir/transform/generator.rs b/src/librustc_mir/transform/generator.rs index 073beafcf1fa1..1bcffcfb4d298 100644 --- a/src/librustc_mir/transform/generator.rs +++ b/src/librustc_mir/transform/generator.rs @@ -394,17 +394,33 @@ impl<'tcx> Visitor<'tcx> for StorageIgnored { } } +struct LivenessInfo { + /// Which locals are live across any suspension point. + /// + /// GeneratorSavedLocal is indexed in terms of the elements in this set; + /// i.e. GeneratorSavedLocal::new(1) corresponds to the second local + /// included in this set. + live_locals: liveness::LiveVarSet, + + /// The set of saved locals live at each suspension point. + live_locals_at_suspension_points: Vec>, + + /// For every saved local, the set of other saved locals that are + /// storage-live at the same time as this local. We cannot overlap locals in + /// the layout which have conflicting storage. + storage_conflicts: IndexVec>, + + /// For every suspending block, the locals which are storage-live across + /// that suspension point. + storage_liveness: FxHashMap, +} + fn locals_live_across_suspend_points( tcx: TyCtxt<'a, 'tcx, 'tcx>, body: &Body<'tcx>, source: MirSource<'tcx>, movable: bool, -) -> ( - liveness::LiveVarSet, - Vec>, - IndexVec>, - FxHashMap, -) { +) -> LivenessInfo { let dead_unwinds = BitSet::new_empty(body.basic_blocks().len()); let def_id = source.def_id(); @@ -434,7 +450,7 @@ fn locals_live_across_suspend_points( }; // Calculate the liveness of MIR locals ignoring borrows. - let mut set = liveness::LiveVarSet::new_empty(body.local_decls.len()); + let mut live_locals = liveness::LiveVarSet::new_empty(body.local_decls.len()); let mut liveness = liveness::liveness_of_locals( body, ); @@ -489,18 +505,17 @@ fn locals_live_across_suspend_points( // Locals live are live at this point only if they are used across // suspension points (the `liveness` variable) // and their storage is live (the `storage_liveness` variable) - storage_liveness.intersect(&liveness.outs[block]); + let mut live_locals_here = storage_liveness; + live_locals_here.intersect(&liveness.outs[block]); // The generator argument is ignored - storage_liveness.remove(self_arg()); - - let live_locals = storage_liveness; + live_locals_here.remove(self_arg()); // Add the locals live at this suspension point to the set of locals which live across // any suspension points - set.union(&live_locals); + live_locals.union(&live_locals_here); - live_locals_at_suspension_points.push(live_locals); + live_locals_at_suspension_points.push(live_locals_here); } } @@ -508,17 +523,22 @@ fn locals_live_across_suspend_points( // saving. let live_locals_at_suspension_points = live_locals_at_suspension_points .iter() - .map(|live_locals| renumber_bitset(&live_locals, &set)) + .map(|live_here| renumber_bitset(&live_here, &live_locals)) .collect(); let storage_conflicts = compute_storage_conflicts( body, - &set, + &live_locals, &ignored, storage_live, storage_live_analysis); - (set, live_locals_at_suspension_points, storage_conflicts, storage_liveness_map) + LivenessInfo { + live_locals, + live_locals_at_suspension_points, + storage_conflicts, + storage_liveness: storage_liveness_map, + } } /// For every saved local, looks for which locals are StorageLive at the same @@ -555,14 +575,11 @@ fn compute_storage_conflicts( eligible_storage_live.intersect(&stored_locals); for local in eligible_storage_live.iter() { - let mut overlaps = eligible_storage_live.clone(); - overlaps.remove(local); - local_conflicts[local].union(&overlaps); + local_conflicts[local].union(&eligible_storage_live); + } - if !overlaps.is_empty() { - trace!("at {:?}, local {:?} conflicts with {:?}", - loc, local, overlaps); - } + if eligible_storage_live.count() > 1 { + trace!("at {:?}, eligible_storage_live={:?}", loc, eligible_storage_live); } }); @@ -617,8 +634,9 @@ fn compute_layout<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, FxHashMap) { // Use a liveness analysis to compute locals which are live across a suspension point - let (live_locals, live_locals_at_suspension_points, storage_conflicts, storage_liveness) = - locals_live_across_suspend_points(tcx, body, source, movable); + let LivenessInfo { + live_locals, live_locals_at_suspension_points, storage_conflicts, storage_liveness + } = locals_live_across_suspend_points(tcx, body, source, movable); // Erase regions from the types passed in from typeck so we can compare them with // MIR types @@ -673,11 +691,11 @@ fn compute_layout<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let mut fields = IndexVec::new(); for (idx, saved_local) in live_locals.iter().enumerate() { fields.push(saved_local); - // Note that if a field is included in multiple variants, it will be - // added overwritten here. That's fine; fields do not move around - // inside generators, so it doesn't matter which variant index we - // access them by. - remap.insert(locals[saved_local], (tys[saved_local], variant_index, idx)); + // Note that if a field is included in multiple variants, we will + // just use the first one here. That's fine; fields do not move + // around inside generators, so it doesn't matter which variant + // index we access them by. + remap.entry(locals[saved_local]).or_insert((tys[saved_local], variant_index, idx)); } variant_fields.push(fields); } From 66e7493543bfef2bdd12454155670cc810de8ea9 Mon Sep 17 00:00:00 2001 From: Tyler Mandry Date: Fri, 31 May 2019 15:59:56 -0700 Subject: [PATCH 24/32] Add more functionality to BitMatrix --- src/librustc_data_structures/bit_set.rs | 84 ++++++++++++++++++- src/librustc_data_structures/stable_hasher.rs | 10 +++ 2 files changed, 93 insertions(+), 1 deletion(-) diff --git a/src/librustc_data_structures/bit_set.rs b/src/librustc_data_structures/bit_set.rs index ec7ff3bd81397..7a11ca070071b 100644 --- a/src/librustc_data_structures/bit_set.rs +++ b/src/librustc_data_structures/bit_set.rs @@ -636,7 +636,7 @@ impl GrowableBitSet { /// /// All operations that involve a row and/or column index will panic if the /// index exceeds the relevant bound. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Eq, PartialEq, RustcDecodable, RustcEncodable)] pub struct BitMatrix { num_rows: usize, num_columns: usize, @@ -658,6 +658,23 @@ impl BitMatrix { } } + /// Creates a new matrix, with `row` used as the value for every row. + pub fn from_row_n(row: &BitSet, num_rows: usize) -> BitMatrix { + let num_columns = row.domain_size(); + let words_per_row = num_words(num_columns); + assert_eq!(words_per_row, row.words().len()); + BitMatrix { + num_rows, + num_columns, + words: iter::repeat(row.words()).take(num_rows).flatten().cloned().collect(), + marker: PhantomData, + } + } + + pub fn rows(&self) -> impl Iterator { + (0..self.num_rows).map(R::new) + } + /// The range of bits for a given row. fn range(&self, row: R) -> (usize, usize) { let words_per_row = num_words(self.num_columns); @@ -737,6 +754,49 @@ impl BitMatrix { changed } + /// Adds the bits from `with` to the bits from row `write`, and + /// returns `true` if anything changed. + pub fn union_row_with(&mut self, with: &BitSet, write: R) -> bool { + assert!(write.index() < self.num_rows); + assert_eq!(with.domain_size(), self.num_columns); + let (write_start, write_end) = self.range(write); + let mut changed = false; + for (read_index, write_index) in (0..with.words().len()).zip(write_start..write_end) { + let word = self.words[write_index]; + let new_word = word | with.words()[read_index]; + self.words[write_index] = new_word; + changed |= word != new_word; + } + changed + } + + /// Sets every cell in `row` to true. + pub fn insert_all_into_row(&mut self, row: R) { + assert!(row.index() < self.num_rows); + let (start, end) = self.range(row); + let words = &mut self.words[..]; + for index in start..end { + words[index] = !0; + } + self.clear_excess_bits(row); + } + + /// Clear excess bits in the final word of the row. + fn clear_excess_bits(&mut self, row: R) { + let num_bits_in_final_word = self.num_columns % WORD_BITS; + if num_bits_in_final_word > 0 { + let mask = (1 << num_bits_in_final_word) - 1; + let (_, end) = self.range(row); + let final_word_idx = end - 1; + self.words[final_word_idx] &= mask; + } + } + + /// Gets a slice of the underlying words. + pub fn words(&self) -> &[Word] { + &self.words + } + /// Iterates through all the columns set to true in a given row of /// the matrix. pub fn iter<'a>(&'a self, row: R) -> BitIter<'a, C> { @@ -748,6 +808,12 @@ impl BitMatrix { marker: PhantomData, } } + + /// Returns the number of elements in `row`. + pub fn count(&self, row: R) -> usize { + let (start, end) = self.range(row); + self.words[start..end].iter().map(|e| e.count_ones() as usize).sum() + } } /// A fixed-column-size, variable-row-size 2D bit matrix with a moderately @@ -1057,6 +1123,7 @@ fn matrix_iter() { matrix.insert(2, 99); matrix.insert(4, 0); matrix.union_rows(3, 5); + matrix.insert_all_into_row(6); let expected = [99]; let mut iter = expected.iter(); @@ -1068,6 +1135,7 @@ fn matrix_iter() { let expected = [22, 75]; let mut iter = expected.iter(); + assert_eq!(matrix.count(3), expected.len()); for i in matrix.iter(3) { let j = *iter.next().unwrap(); assert_eq!(i, j); @@ -1076,6 +1144,7 @@ fn matrix_iter() { let expected = [0]; let mut iter = expected.iter(); + assert_eq!(matrix.count(4), expected.len()); for i in matrix.iter(4) { let j = *iter.next().unwrap(); assert_eq!(i, j); @@ -1084,11 +1153,24 @@ fn matrix_iter() { let expected = [22, 75]; let mut iter = expected.iter(); + assert_eq!(matrix.count(5), expected.len()); for i in matrix.iter(5) { let j = *iter.next().unwrap(); assert_eq!(i, j); } assert!(iter.next().is_none()); + + assert_eq!(matrix.count(6), 100); + let mut count = 0; + for (idx, i) in matrix.iter(6).enumerate() { + assert_eq!(idx, i); + count += 1; + } + assert_eq!(count, 100); + + if let Some(i) = matrix.iter(7).next() { + panic!("expected no elements in row, but contains element {:?}", i); + } } #[test] diff --git a/src/librustc_data_structures/stable_hasher.rs b/src/librustc_data_structures/stable_hasher.rs index 270d952062764..0c81c27a96ee5 100644 --- a/src/librustc_data_structures/stable_hasher.rs +++ b/src/librustc_data_structures/stable_hasher.rs @@ -503,6 +503,16 @@ impl HashStable for bit_set::BitSet } } +impl HashStable +for bit_set::BitMatrix +{ + fn hash_stable(&self, + ctx: &mut CTX, + hasher: &mut StableHasher) { + self.words().hash_stable(ctx, hasher); + } +} + impl_stable_hash_via_hash!(::std::path::Path); impl_stable_hash_via_hash!(::std::path::PathBuf); From 6680d03d14c60e7e7d4b7e0347b8829151855854 Mon Sep 17 00:00:00 2001 From: Tyler Mandry Date: Fri, 31 May 2019 16:53:27 -0700 Subject: [PATCH 25/32] Use BitMatrix for storage conflicts --- src/librustc/mir/mod.rs | 6 ++-- src/librustc/ty/layout.rs | 9 ++--- src/librustc_mir/transform/generator.rs | 46 +++++++++++++------------ 3 files changed, 32 insertions(+), 29 deletions(-) diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index 149d067204fa2..3e9f7b51cb1c5 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -9,7 +9,7 @@ use crate::hir::def_id::DefId; use crate::hir::{self, InlineAsm as HirInlineAsm}; use crate::mir::interpret::{ConstValue, InterpError, Scalar}; use crate::mir::visit::MirVisitable; -use rustc_data_structures::bit_set::BitSet; +use rustc_data_structures::bit_set::BitMatrix; use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::graph::dominators::{dominators, Dominators}; use rustc_data_structures::graph::{self, GraphPredecessors, GraphSuccessors}; @@ -3008,7 +3008,7 @@ pub struct GeneratorLayout<'tcx> { /// Which saved locals are storage-live at the same time. Locals that do not /// have conflicts with each other are allowed to overlap in the computed /// layout. - pub storage_conflicts: IndexVec>, + pub storage_conflicts: BitMatrix, /// Names and scopes of all the stored generator locals. /// NOTE(tmandry) This is *strictly* a temporary hack for codegen @@ -3586,7 +3586,7 @@ impl<'tcx> TypeFoldable<'tcx> for GeneratorSavedLocal { } } -impl<'tcx, T: Idx> TypeFoldable<'tcx> for BitSet { +impl<'tcx, R: Idx, C: Idx> TypeFoldable<'tcx> for BitMatrix { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, _: &mut F) -> Self { self.clone() } diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 74f352eb3be6f..279784ef701b1 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -678,12 +678,13 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { // Next, check every pair of eligible locals to see if they // conflict. - for (local_a, conflicts_a) in info.storage_conflicts.iter_enumerated() { + for local_a in info.storage_conflicts.rows() { + let conflicts_a = info.storage_conflicts.count(local_a); if ineligible_locals.contains(local_a) { continue; } - for local_b in conflicts_a.iter() { + for local_b in info.storage_conflicts.iter(local_a) { // local_a and local_b are storage live at the same time, therefore they // cannot overlap in the generator layout. The only way to guarantee // this is if they are in the same variant, or one is ineligible @@ -697,8 +698,8 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { // If they conflict, we will choose one to make ineligible. // This is not always optimal; it's just a greedy heuristic // that seems to produce good results most of the time. - let conflicts_b = &info.storage_conflicts[local_b]; - let (remove, other) = if conflicts_a.count() > conflicts_b.count() { + let conflicts_b = info.storage_conflicts.count(local_b); + let (remove, other) = if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) diff --git a/src/librustc_mir/transform/generator.rs b/src/librustc_mir/transform/generator.rs index 1bcffcfb4d298..e9adb976c2892 100644 --- a/src/librustc_mir/transform/generator.rs +++ b/src/librustc_mir/transform/generator.rs @@ -59,7 +59,7 @@ use rustc::ty::layout::VariantIdx; use rustc::ty::subst::SubstsRef; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; -use rustc_data_structures::bit_set::BitSet; +use rustc_data_structures::bit_set::{BitSet, BitMatrix}; use std::borrow::Cow; use std::iter; use std::mem; @@ -408,7 +408,7 @@ struct LivenessInfo { /// For every saved local, the set of other saved locals that are /// storage-live at the same time as this local. We cannot overlap locals in /// the layout which have conflicting storage. - storage_conflicts: IndexVec>, + storage_conflicts: BitMatrix, /// For every suspending block, the locals which are storage-live across /// that suspension point. @@ -551,7 +551,9 @@ fn compute_storage_conflicts( ignored: &StorageIgnored, storage_live: DataflowResults<'tcx, MaybeStorageLive<'mir, 'tcx>>, storage_live_analysis: MaybeStorageLive<'mir, 'tcx>, -) -> IndexVec> { +) -> BitMatrix { + assert_eq!(body.local_decls.len(), ignored.0.domain_size()); + assert_eq!(body.local_decls.len(), stored_locals.domain_size()); debug!("compute_storage_conflicts({:?})", body.span); debug!("ignored = {:?}", ignored.0); @@ -564,18 +566,15 @@ fn compute_storage_conflicts( // liveness. Those that do must be in the same variant to remain candidates. // FIXME(tmandry): Consider using sparse bitsets here once we have good // benchmarks for generators. - let mut local_conflicts: IndexVec = - // Add conflicts for every ineligible local. - iter::repeat(ineligible_locals.clone()) - .take(body.local_decls.len()) - .collect(); + let mut local_conflicts: BitMatrix = + BitMatrix::from_row_n(&ineligible_locals, body.local_decls.len()); for_each_location(body, &storage_live_analysis, &storage_live, |state, loc| { let mut eligible_storage_live = state.clone().to_dense(); eligible_storage_live.intersect(&stored_locals); for local in eligible_storage_live.iter() { - local_conflicts[local].union(&eligible_storage_live); + local_conflicts.union_row_with(&eligible_storage_live, local); } if eligible_storage_live.count() > 1 { @@ -588,19 +587,22 @@ fn compute_storage_conflicts( // However, in practice these bitsets are not usually large. The layout code // also needs to keep track of how many conflicts each local has, so it's // simpler to keep it this way for now. - let storage_conflicts: IndexVec = stored_locals - .iter() - .map(|local_a| { - if ineligible_locals.contains(local_a) { - // Conflicts with everything. - BitSet::new_filled(stored_locals.count()) - } else { - // Keep overlap information only for stored locals. - renumber_bitset(&local_conflicts[local_a], stored_locals) + let mut storage_conflicts = BitMatrix::new(stored_locals.count(), stored_locals.count()); + for (idx_a, local_a) in stored_locals.iter().enumerate() { + let saved_local_a = GeneratorSavedLocal::new(idx_a); + if ineligible_locals.contains(local_a) { + // Conflicts with everything. + storage_conflicts.insert_all_into_row(saved_local_a); + } else { + // Keep overlap information only for stored locals. + for (idx_b, local_b) in stored_locals.iter().enumerate() { + let saved_local_b = GeneratorSavedLocal::new(idx_b); + if local_conflicts.contains(local_a, local_b) { + storage_conflicts.insert(saved_local_a, saved_local_b); + } } - }) - .collect(); - + } + } storage_conflicts } @@ -700,7 +702,7 @@ fn compute_layout<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, variant_fields.push(fields); } debug!("generator variant_fields = {:?}", variant_fields); - debug!("generator storage_conflicts = {:?}", storage_conflicts); + debug!("generator storage_conflicts = {:#?}", storage_conflicts); let layout = GeneratorLayout { field_tys: tys, From fbdff56f4ba2383c9d4bea58531dea66f5b2afa6 Mon Sep 17 00:00:00 2001 From: Tyler Mandry Date: Fri, 31 May 2019 20:51:07 -0700 Subject: [PATCH 26/32] Use DataflowResultsConsumer and remove dataflow::for_each_location --- src/librustc_mir/dataflow/at_location.rs | 5 + src/librustc_mir/dataflow/mod.rs | 56 ---------- src/librustc_mir/transform/generator.rs | 126 ++++++++++++++++------- 3 files changed, 94 insertions(+), 93 deletions(-) diff --git a/src/librustc_mir/dataflow/at_location.rs b/src/librustc_mir/dataflow/at_location.rs index d43fa4257e06c..9cba34b425350 100644 --- a/src/librustc_mir/dataflow/at_location.rs +++ b/src/librustc_mir/dataflow/at_location.rs @@ -131,6 +131,11 @@ where curr_state.subtract(&self.stmt_kill); f(curr_state.iter()); } + + /// Returns a bitset of the elements present in the current state. + pub fn as_dense(&self) -> &BitSet { + &self.curr_state + } } impl<'tcx, BD> FlowsAtLocation for FlowAtLocation<'tcx, BD> diff --git a/src/librustc_mir/dataflow/mod.rs b/src/librustc_mir/dataflow/mod.rs index 03a48a38c4a6d..8e2068269ceaa 100644 --- a/src/librustc_mir/dataflow/mod.rs +++ b/src/librustc_mir/dataflow/mod.rs @@ -380,62 +380,6 @@ pub fn state_for_location<'tcx, T: BitDenotation<'tcx>>(loc: Location, gen_set.to_dense() } -/// Calls `f` with the dataflow state at every location in `mir`. -/// Ignores blocks that terminate in `unreachable`. -pub fn for_each_location<'tcx, T: BitDenotation<'tcx>>( - mir: &Body<'tcx>, - analysis: &T, - result: &DataflowResults<'tcx, T>, - mut f: impl FnMut(&HybridBitSet, Location) -) { - for (block, bb_data) in mir.basic_blocks().iter_enumerated() { - if bb_data.is_unreachable() { - continue; - } - for_each_block_location(mir, block, bb_data, analysis, result, &mut f); - } -} - -fn for_each_block_location<'tcx, T: BitDenotation<'tcx>>( - mir: &Body<'tcx>, - block: BasicBlock, - bb_data: &BasicBlockData<'tcx>, - analysis: &T, - result: &DataflowResults<'tcx, T>, - f: &mut impl FnMut(&HybridBitSet, Location) -) { - let statements = &bb_data.statements; - - let mut on_entry = result.sets().on_entry_set_for(block.index()).to_owned(); - let mut kill_set = on_entry.to_hybrid(); - let mut gen_set = kill_set.clone(); - - { - let mut sets = BlockSets { - on_entry: &mut on_entry, - kill_set: &mut kill_set, - gen_set: &mut gen_set, - }; - // FIXME: This location is technically wrong, but there isn't a way to - // denote the start of a block. - f(sets.gen_set, Location { block, statement_index: 0 }); - - for statement_index in 0..statements.len() { - let loc = Location { block, statement_index }; - analysis.before_statement_effect(&mut sets, loc); - f(sets.gen_set, loc); - analysis.statement_effect(&mut sets, loc); - f(sets.gen_set, loc); - } - - let term_loc = Location { block, statement_index: mir[block].statements.len() }; - analysis.before_terminator_effect(&mut sets, term_loc); - f(sets.gen_set, term_loc); - analysis.terminator_effect(&mut sets, term_loc); - f(sets.gen_set, term_loc); - } -} - pub struct DataflowAnalysis<'a, 'tcx: 'a, O> where O: BitDenotation<'tcx> { flow_state: DataflowState<'tcx, O>, diff --git a/src/librustc_mir/transform/generator.rs b/src/librustc_mir/transform/generator.rs index e9adb976c2892..9dc9f8fdf72a9 100644 --- a/src/librustc_mir/transform/generator.rs +++ b/src/librustc_mir/transform/generator.rs @@ -66,8 +66,8 @@ use std::mem; use crate::transform::{MirPass, MirSource}; use crate::transform::simplify; use crate::transform::no_landing_pads::no_landing_pads; -use crate::dataflow::{DataflowResults}; -use crate::dataflow::{do_dataflow, DebugFormatted, state_for_location, for_each_location}; +use crate::dataflow::{DataflowResults, DataflowResultsConsumer, FlowAtLocation}; +use crate::dataflow::{do_dataflow, DebugFormatted, state_for_location}; use crate::dataflow::{MaybeStorageLive, HaveBeenBorrowedLocals}; use crate::util::dump_mir; use crate::util::liveness; @@ -541,6 +541,25 @@ fn locals_live_across_suspend_points( } } +/// Renumbers the items present in `stored_locals` and applies the renumbering +/// to 'input`. +/// +/// For example, if `stored_locals = [1, 3, 5]`, this would be renumbered to +/// `[0, 1, 2]`. Thus, if `input = [3, 5]` we would return `[1, 2]`. +fn renumber_bitset(input: &BitSet, stored_locals: &liveness::LiveVarSet) +-> BitSet { + assert!(stored_locals.superset(&input), "{:?} not a superset of {:?}", stored_locals, input); + let mut out = BitSet::new_empty(stored_locals.count()); + for (idx, local) in stored_locals.iter().enumerate() { + let saved_local = GeneratorSavedLocal::from(idx); + if input.contains(local) { + out.insert(saved_local); + } + } + debug!("renumber_bitset({:?}, {:?}) => {:?}", input, stored_locals, out); + out +} + /// For every saved local, looks for which locals are StorageLive at the same /// time. Generates a bitset for every local of all the other locals that may be /// StorageLive simultaneously with that local. This is used in the layout @@ -550,7 +569,7 @@ fn compute_storage_conflicts( stored_locals: &liveness::LiveVarSet, ignored: &StorageIgnored, storage_live: DataflowResults<'tcx, MaybeStorageLive<'mir, 'tcx>>, - storage_live_analysis: MaybeStorageLive<'mir, 'tcx>, + _storage_live_analysis: MaybeStorageLive<'mir, 'tcx>, ) -> BitMatrix { assert_eq!(body.local_decls.len(), ignored.0.domain_size()); assert_eq!(body.local_decls.len(), stored_locals.domain_size()); @@ -562,26 +581,18 @@ fn compute_storage_conflicts( let mut ineligible_locals = ignored.0.clone(); ineligible_locals.intersect(&stored_locals); - // Of our remaining candidates, find out if any have overlapping storage - // liveness. Those that do must be in the same variant to remain candidates. - // FIXME(tmandry): Consider using sparse bitsets here once we have good - // benchmarks for generators. - let mut local_conflicts: BitMatrix = - BitMatrix::from_row_n(&ineligible_locals, body.local_decls.len()); - - for_each_location(body, &storage_live_analysis, &storage_live, |state, loc| { - let mut eligible_storage_live = state.clone().to_dense(); - eligible_storage_live.intersect(&stored_locals); - - for local in eligible_storage_live.iter() { - local_conflicts.union_row_with(&eligible_storage_live, local); - } - - if eligible_storage_live.count() > 1 { - trace!("at {:?}, eligible_storage_live={:?}", loc, eligible_storage_live); - } - }); + // Compute the storage conflicts for all eligible locals. + let mut visitor = StorageConflictVisitor { + body, + stored_locals: &stored_locals, + local_conflicts: BitMatrix::from_row_n(&ineligible_locals, body.local_decls.len()) + }; + let mut state = FlowAtLocation::new(storage_live); + visitor.analyze_results(&mut state); + let local_conflicts = visitor.local_conflicts; + // Compress the matrix using only stored locals (Local -> GeneratorSavedLocal). + // // NOTE: Today we store a full conflict bitset for every local. Technically // this is twice as many bits as we need, since the relation is symmetric. // However, in practice these bitsets are not usually large. The layout code @@ -606,23 +617,64 @@ fn compute_storage_conflicts( storage_conflicts } -/// Renumbers the items present in `stored_locals` and applies the renumbering -/// to 'input`. -/// -/// For example, if `stored_locals = [1, 3, 5]`, this would be renumbered to -/// `[0, 1, 2]`. Thus, if `input = [3, 5]` we would return `[1, 2]`. -fn renumber_bitset(input: &BitSet, stored_locals: &liveness::LiveVarSet) --> BitSet { - assert!(stored_locals.superset(&input), "{:?} not a superset of {:?}", stored_locals, input); - let mut out = BitSet::new_empty(stored_locals.count()); - for (idx, local) in stored_locals.iter().enumerate() { - let saved_local = GeneratorSavedLocal::from(idx); - if input.contains(local) { - out.insert(saved_local); +struct StorageConflictVisitor<'body, 'tcx: 'body, 's> { + body: &'body Body<'tcx>, + stored_locals: &'s liveness::LiveVarSet, + // FIXME(tmandry): Consider using sparse bitsets here once we have good + // benchmarks for generators. + local_conflicts: BitMatrix, +} + +impl<'body, 'tcx: 'body, 's> DataflowResultsConsumer<'body, 'tcx> +for StorageConflictVisitor<'body, 'tcx, 's> { + type FlowState = FlowAtLocation<'tcx, MaybeStorageLive<'body, 'tcx>>; + + fn body(&self) -> &'body Body<'tcx> { + self.body + } + + fn visit_block_entry(&mut self, + block: BasicBlock, + flow_state: &Self::FlowState) { + // statement_index is only used for logging, so this is fine. + self.apply_state(flow_state, Location { block, statement_index: 0 }); + } + + fn visit_statement_entry(&mut self, + loc: Location, + _stmt: &Statement<'tcx>, + flow_state: &Self::FlowState) { + self.apply_state(flow_state, loc); + } + + fn visit_terminator_entry(&mut self, + loc: Location, + _term: &Terminator<'tcx>, + flow_state: &Self::FlowState) { + self.apply_state(flow_state, loc); + } +} + +impl<'body, 'tcx: 'body, 's> StorageConflictVisitor<'body, 'tcx, 's> { + fn apply_state(&mut self, + flow_state: &FlowAtLocation<'tcx, MaybeStorageLive<'body, 'tcx>>, + loc: Location) { + // Ignore unreachable blocks. + if self.body.basic_blocks()[loc.block].is_unreachable() { + return; + } + + let mut eligible_storage_live = flow_state.as_dense().clone(); + eligible_storage_live.intersect(&self.stored_locals); + + for local in eligible_storage_live.iter() { + self.local_conflicts.union_row_with(&eligible_storage_live, local); + } + + if eligible_storage_live.count() > 1 { + trace!("at {:?}, eligible_storage_live={:?}", loc, eligible_storage_live); } } - debug!("renumber_bitset({:?}, {:?}) => {:?}", input, stored_locals, out); - out } fn compute_layout<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, From c158d1ca0c9cf8c7caab7b5d1d52fbf970ce9c90 Mon Sep 17 00:00:00 2001 From: Tyler Mandry Date: Fri, 31 May 2019 21:30:08 -0700 Subject: [PATCH 27/32] Extract univariant_uninterned as method --- src/librustc/ty/layout.rs | 458 +++++++++++++++++++------------------- 1 file changed, 234 insertions(+), 224 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 279784ef701b1..d3caedb50ae2c 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -215,260 +215,268 @@ pub struct LayoutCx<'tcx, C> { pub param_env: ty::ParamEnv<'tcx>, } +#[derive(Copy, Clone, Debug)] +enum StructKind { + /// A tuple, closure, or univariant which cannot be coerced to unsized. + AlwaysSized, + /// A univariant, the last field of which may be coerced to unsized. + MaybeUnsized, + /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag). + Prefixed(Size, Align), +} + impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { - fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> { - let tcx = self.tcx; - let param_env = self.param_env; + fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutDetails { let dl = self.data_layout(); - let scalar_unit = |value: Primitive| { - let bits = value.size(dl).bits(); - assert!(bits <= 128); - Scalar { - value, - valid_range: 0..=(!0 >> (128 - bits)) - } - }; - let scalar = |value: Primitive| { - tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value))) - }; - let scalar_pair = |a: Scalar, b: Scalar| { - let b_align = b.value.align(dl); - let align = a.value.align(dl).max(b_align).max(dl.aggregate_align); - let b_offset = a.value.size(dl).align_to(b_align.abi); - let size = (b_offset + b.value.size(dl)).align_to(align.abi); - LayoutDetails { - variants: Variants::Single { index: VariantIdx::new(0) }, - fields: FieldPlacement::Arbitrary { - offsets: vec![Size::ZERO, b_offset], - memory_index: vec![0, 1] - }, - abi: Abi::ScalarPair(a, b), - align, - size - } - }; - - #[derive(Copy, Clone, Debug)] - enum StructKind { - /// A tuple, closure, or univariant which cannot be coerced to unsized. - AlwaysSized, - /// A univariant, the last field of which may be coerced to unsized. - MaybeUnsized, - /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag). - Prefixed(Size, Align), + let b_align = b.value.align(dl); + let align = a.value.align(dl).max(b_align).max(dl.aggregate_align); + let b_offset = a.value.size(dl).align_to(b_align.abi); + let size = (b_offset + b.value.size(dl)).align_to(align.abi); + LayoutDetails { + variants: Variants::Single { index: VariantIdx::new(0) }, + fields: FieldPlacement::Arbitrary { + offsets: vec![Size::ZERO, b_offset], + memory_index: vec![0, 1] + }, + abi: Abi::ScalarPair(a, b), + align, + size } + } - let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| { - let packed = repr.packed(); - if packed && repr.align > 0 { - bug!("struct cannot be packed and aligned"); - } + fn univariant_uninterned(&self, + ty: Ty<'tcx>, + fields: &[TyLayout<'_>], + repr: &ReprOptions, + kind: StructKind) -> Result> { + let dl = self.data_layout(); + let packed = repr.packed(); + if packed && repr.align > 0 { + bug!("struct cannot be packed and aligned"); + } - let pack = Align::from_bytes(repr.pack as u64).unwrap(); + let pack = Align::from_bytes(repr.pack as u64).unwrap(); - let mut align = if packed { - dl.i8_align - } else { - dl.aggregate_align - }; + let mut align = if packed { + dl.i8_align + } else { + dl.aggregate_align + }; - let mut sized = true; - let mut offsets = vec![Size::ZERO; fields.len()]; - let mut inverse_memory_index: Vec = (0..fields.len() as u32).collect(); + let mut sized = true; + let mut offsets = vec![Size::ZERO; fields.len()]; + let mut inverse_memory_index: Vec = (0..fields.len() as u32).collect(); - let mut optimize = !repr.inhibit_struct_field_reordering_opt(); - if let StructKind::Prefixed(_, align) = kind { - optimize &= align.bytes() == 1; - } + let mut optimize = !repr.inhibit_struct_field_reordering_opt(); + if let StructKind::Prefixed(_, align) = kind { + optimize &= align.bytes() == 1; + } - if optimize { - let end = if let StructKind::MaybeUnsized = kind { - fields.len() - 1 - } else { - fields.len() - }; - let optimizing = &mut inverse_memory_index[..end]; - let field_align = |f: &TyLayout<'_>| { - if packed { f.align.abi.min(pack) } else { f.align.abi } - }; - match kind { - StructKind::AlwaysSized | - StructKind::MaybeUnsized => { - optimizing.sort_by_key(|&x| { - // Place ZSTs first to avoid "interesting offsets", - // especially with only one or two non-ZST fields. - let f = &fields[x as usize]; - (!f.is_zst(), cmp::Reverse(field_align(f))) - }); - } - StructKind::Prefixed(..) => { - optimizing.sort_by_key(|&x| field_align(&fields[x as usize])); - } + if optimize { + let end = if let StructKind::MaybeUnsized = kind { + fields.len() - 1 + } else { + fields.len() + }; + let optimizing = &mut inverse_memory_index[..end]; + let field_align = |f: &TyLayout<'_>| { + if packed { f.align.abi.min(pack) } else { f.align.abi } + }; + match kind { + StructKind::AlwaysSized | + StructKind::MaybeUnsized => { + optimizing.sort_by_key(|&x| { + // Place ZSTs first to avoid "interesting offsets", + // especially with only one or two non-ZST fields. + let f = &fields[x as usize]; + (!f.is_zst(), cmp::Reverse(field_align(f))) + }); + } + StructKind::Prefixed(..) => { + optimizing.sort_by_key(|&x| field_align(&fields[x as usize])); } } + } - // inverse_memory_index holds field indices by increasing memory offset. - // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5. - // We now write field offsets to the corresponding offset slot; - // field 5 with offset 0 puts 0 in offsets[5]. - // At the bottom of this function, we use inverse_memory_index to produce memory_index. + // inverse_memory_index holds field indices by increasing memory offset. + // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5. + // We now write field offsets to the corresponding offset slot; + // field 5 with offset 0 puts 0 in offsets[5]. + // At the bottom of this function, we use inverse_memory_index to produce memory_index. - let mut offset = Size::ZERO; + let mut offset = Size::ZERO; - if let StructKind::Prefixed(prefix_size, prefix_align) = kind { - let prefix_align = if packed { - prefix_align.min(pack) - } else { - prefix_align - }; - align = align.max(AbiAndPrefAlign::new(prefix_align)); - offset = prefix_size.align_to(prefix_align); + if let StructKind::Prefixed(prefix_size, prefix_align) = kind { + let prefix_align = if packed { + prefix_align.min(pack) + } else { + prefix_align + }; + align = align.max(AbiAndPrefAlign::new(prefix_align)); + offset = prefix_size.align_to(prefix_align); + } + + for &i in &inverse_memory_index { + let field = fields[i as usize]; + if !sized { + bug!("univariant: field #{} of `{}` comes after unsized field", + offsets.len(), ty); } - for &i in &inverse_memory_index { - let field = fields[i as usize]; - if !sized { - bug!("univariant: field #{} of `{}` comes after unsized field", - offsets.len(), ty); - } + if field.is_unsized() { + sized = false; + } - if field.is_unsized() { - sized = false; - } + // Invariant: offset < dl.obj_size_bound() <= 1<<61 + let field_align = if packed { + field.align.min(AbiAndPrefAlign::new(pack)) + } else { + field.align + }; + offset = offset.align_to(field_align.abi); + align = align.max(field_align); - // Invariant: offset < dl.obj_size_bound() <= 1<<61 - let field_align = if packed { - field.align.min(AbiAndPrefAlign::new(pack)) - } else { - field.align - }; - offset = offset.align_to(field_align.abi); - align = align.max(field_align); + debug!("univariant offset: {:?} field: {:#?}", offset, field); + offsets[i as usize] = offset; - debug!("univariant offset: {:?} field: {:#?}", offset, field); - offsets[i as usize] = offset; + offset = offset.checked_add(field.size, dl) + .ok_or(LayoutError::SizeOverflow(ty))?; + } - offset = offset.checked_add(field.size, dl) - .ok_or(LayoutError::SizeOverflow(ty))?; - } + if repr.align > 0 { + let repr_align = repr.align as u64; + align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap())); + debug!("univariant repr_align: {:?}", repr_align); + } - if repr.align > 0 { - let repr_align = repr.align as u64; - align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap())); - debug!("univariant repr_align: {:?}", repr_align); - } + debug!("univariant min_size: {:?}", offset); + let min_size = offset; - debug!("univariant min_size: {:?}", offset); - let min_size = offset; + // As stated above, inverse_memory_index holds field indices by increasing offset. + // This makes it an already-sorted view of the offsets vec. + // To invert it, consider: + // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0. + // Field 5 would be the first element, so memory_index is i: + // Note: if we didn't optimize, it's already right. - // As stated above, inverse_memory_index holds field indices by increasing offset. - // This makes it an already-sorted view of the offsets vec. - // To invert it, consider: - // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0. - // Field 5 would be the first element, so memory_index is i: - // Note: if we didn't optimize, it's already right. + let mut memory_index; + if optimize { + memory_index = vec![0; inverse_memory_index.len()]; - let mut memory_index; - if optimize { - memory_index = vec![0; inverse_memory_index.len()]; + for i in 0..inverse_memory_index.len() { + memory_index[inverse_memory_index[i] as usize] = i as u32; + } + } else { + memory_index = inverse_memory_index; + } - for i in 0..inverse_memory_index.len() { - memory_index[inverse_memory_index[i] as usize] = i as u32; - } - } else { - memory_index = inverse_memory_index; - } - - let size = min_size.align_to(align.abi); - let mut abi = Abi::Aggregate { sized }; - - // Unpack newtype ABIs and find scalar pairs. - if sized && size.bytes() > 0 { - // All other fields must be ZSTs, and we need them to all start at 0. - let mut zst_offsets = - offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst()); - if zst_offsets.all(|(_, o)| o.bytes() == 0) { - let mut non_zst_fields = - fields.iter().enumerate().filter(|&(_, f)| !f.is_zst()); - - match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) { - // We have exactly one non-ZST field. - (Some((i, field)), None, None) => { - // Field fills the struct and it has a scalar or scalar pair ABI. - if offsets[i].bytes() == 0 && - align.abi == field.align.abi && - size == field.size { - match field.abi { - // For plain scalars, or vectors of them, we can't unpack - // newtypes for `#[repr(C)]`, as that affects C ABIs. - Abi::Scalar(_) | Abi::Vector { .. } if optimize => { - abi = field.abi.clone(); - } - // But scalar pairs are Rust-specific and get - // treated as aggregates by C ABIs anyway. - Abi::ScalarPair(..) => { - abi = field.abi.clone(); - } - _ => {} + let size = min_size.align_to(align.abi); + let mut abi = Abi::Aggregate { sized }; + + // Unpack newtype ABIs and find scalar pairs. + if sized && size.bytes() > 0 { + // All other fields must be ZSTs, and we need them to all start at 0. + let mut zst_offsets = + offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst()); + if zst_offsets.all(|(_, o)| o.bytes() == 0) { + let mut non_zst_fields = + fields.iter().enumerate().filter(|&(_, f)| !f.is_zst()); + + match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) { + // We have exactly one non-ZST field. + (Some((i, field)), None, None) => { + // Field fills the struct and it has a scalar or scalar pair ABI. + if offsets[i].bytes() == 0 && + align.abi == field.align.abi && + size == field.size { + match field.abi { + // For plain scalars, or vectors of them, we can't unpack + // newtypes for `#[repr(C)]`, as that affects C ABIs. + Abi::Scalar(_) | Abi::Vector { .. } if optimize => { + abi = field.abi.clone(); + } + // But scalar pairs are Rust-specific and get + // treated as aggregates by C ABIs anyway. + Abi::ScalarPair(..) => { + abi = field.abi.clone(); } + _ => {} } } + } - // Two non-ZST fields, and they're both scalars. - (Some((i, &TyLayout { - details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, .. - })), Some((j, &TyLayout { - details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, .. - })), None) => { - // Order by the memory placement, not source order. - let ((i, a), (j, b)) = if offsets[i] < offsets[j] { - ((i, a), (j, b)) - } else { - ((j, b), (i, a)) - }; - let pair = scalar_pair(a.clone(), b.clone()); - let pair_offsets = match pair.fields { - FieldPlacement::Arbitrary { - ref offsets, - ref memory_index - } => { - assert_eq!(memory_index, &[0, 1]); - offsets - } - _ => bug!() - }; - if offsets[i] == pair_offsets[0] && - offsets[j] == pair_offsets[1] && - align == pair.align && - size == pair.size { - // We can use `ScalarPair` only when it matches our - // already computed layout (including `#[repr(C)]`). - abi = pair.abi; + // Two non-ZST fields, and they're both scalars. + (Some((i, &TyLayout { + details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, .. + })), Some((j, &TyLayout { + details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, .. + })), None) => { + // Order by the memory placement, not source order. + let ((i, a), (j, b)) = if offsets[i] < offsets[j] { + ((i, a), (j, b)) + } else { + ((j, b), (i, a)) + }; + let pair = self.scalar_pair(a.clone(), b.clone()); + let pair_offsets = match pair.fields { + FieldPlacement::Arbitrary { + ref offsets, + ref memory_index + } => { + assert_eq!(memory_index, &[0, 1]); + offsets } + _ => bug!() + }; + if offsets[i] == pair_offsets[0] && + offsets[j] == pair_offsets[1] && + align == pair.align && + size == pair.size { + // We can use `ScalarPair` only when it matches our + // already computed layout (including `#[repr(C)]`). + abi = pair.abi; } - - _ => {} } + + _ => {} } } + } - if sized && fields.iter().any(|f| f.abi.is_uninhabited()) { - abi = Abi::Uninhabited; - } + if sized && fields.iter().any(|f| f.abi.is_uninhabited()) { + abi = Abi::Uninhabited; + } - Ok(LayoutDetails { - variants: Variants::Single { index: VariantIdx::new(0) }, - fields: FieldPlacement::Arbitrary { - offsets, - memory_index - }, - abi, - align, - size - }) + Ok(LayoutDetails { + variants: Variants::Single { index: VariantIdx::new(0) }, + fields: FieldPlacement::Arbitrary { + offsets, + memory_index + }, + abi, + align, + size + }) + } + + fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> { + let tcx = self.tcx; + let param_env = self.param_env; + let dl = self.data_layout(); + let scalar_unit = |value: Primitive| { + let bits = value.size(dl).bits(); + assert!(bits <= 128); + Scalar { + value, + valid_range: 0..=(!0 >> (128 - bits)) + } }; + let scalar = |value: Primitive| { + tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value))) + }; + let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| { - Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?)) + Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?)) }; debug_assert!(!ty.has_infer_types()); @@ -540,7 +548,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { }; // Effectively a (ptr, meta) tuple. - tcx.intern_layout(scalar_pair(data_ptr, metadata)) + tcx.intern_layout(self.scalar_pair(data_ptr, metadata)) } // Arrays and slices. @@ -605,7 +613,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)? } ty::Dynamic(..) | ty::Foreign(..) => { - let mut unit = univariant_uninterned(&[], &ReprOptions::default(), + let mut unit = self.univariant_uninterned(ty, &[], &ReprOptions::default(), StructKind::AlwaysSized)?; match unit.abi { Abi::Aggregate { ref mut sized } => *sized = false, @@ -730,7 +738,8 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let prefix_tys = substs.prefix_tys(def_id, tcx) .chain(iter::once(substs.discr_ty(tcx))) .chain(promoted_tys); - let prefix = univariant_uninterned( + let prefix = self.univariant_uninterned( + ty, &prefix_tys.map(|ty| self.layout_of(ty)).collect::, _>>()?, &ReprOptions::default(), StructKind::AlwaysSized)?; @@ -787,7 +796,8 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { }) .map(|local| subst_field(info.field_tys[*local])); - let mut variant = univariant_uninterned( + let mut variant = self.univariant_uninterned( + ty, &variant_only_tys .map(|ty| self.layout_of(ty)) .collect::, _>>()?, @@ -1049,7 +1059,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { else { StructKind::AlwaysSized } }; - let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?; + let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?; st.variants = Variants::Single { index: v }; let (start, end) = self.tcx.layout_scalar_valid_range(def.did); match st.abi { @@ -1128,7 +1138,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let mut align = dl.aggregate_align; let st = variants.iter_enumerated().map(|(j, v)| { - let mut st = univariant_uninterned(v, + let mut st = self.univariant_uninterned(ty, v, &def.repr, StructKind::AlwaysSized)?; st.variants = Variants::Single { index: j }; @@ -1236,7 +1246,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { // Create the set of structs that represent each variant. let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| { - let mut st = univariant_uninterned(&field_layouts, + let mut st = self.univariant_uninterned(ty, &field_layouts, &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?; st.variants = Variants::Single { index: i }; // Find the first field we can't move later @@ -1368,7 +1378,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { } } if let Some((prim, offset)) = common_prim { - let pair = scalar_pair(tag.clone(), scalar_unit(prim)); + let pair = self.scalar_pair(tag.clone(), scalar_unit(prim)); let pair_offsets = match pair.fields { FieldPlacement::Arbitrary { ref offsets, From 9f3ad881dfd28ae918a29ee3c9501e97f8e60e21 Mon Sep 17 00:00:00 2001 From: Tyler Mandry Date: Fri, 31 May 2019 21:59:37 -0700 Subject: [PATCH 28/32] Extract generator_layout as a method --- src/librustc/ty/layout.rs | 525 ++++++++++++++++++++------------------ 1 file changed, 271 insertions(+), 254 deletions(-) diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index d3caedb50ae2c..caff75a36cd5e 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -14,7 +14,8 @@ use std::ops::Bound; use crate::hir; use crate::ich::StableHashingContext; -use crate::mir::GeneratorSavedLocal; +use crate::mir::{GeneratorLayout, GeneratorSavedLocal}; +use crate::ty::GeneratorSubsts; use crate::ty::subst::Subst; use rustc_data_structures::bit_set::BitSet; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; @@ -622,259 +623,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { tcx.intern_layout(unit) } - ty::Generator(def_id, ref substs, _) => { - // When laying out generators, we divide our saved local fields - // into two categories: overlap-eligible and overlap-ineligible. - // - // Those fields which are ineligible for overlap go in a - // "prefix" at the beginning of the layout, and always have - // space reserved for them. - // - // Overlap-eligible fields are only assigned to one variant, so - // we lay those fields out for each variant and put them right - // after the prefix. - // - // Finally, in the layout details, we point to the fields - // from the variants they are assigned to. It is possible for - // some fields to be included in multiple variants. No field - // ever "moves around" in the layout; its offset is always the - // same. - // - // Also included in the layout are the upvars and the - // discriminant. These are included as fields on the "outer" - // layout; they are not part of any variant. - - let info = tcx.generator_layout(def_id); - let subst_field = |ty: Ty<'tcx>| { ty.subst(tcx, substs.substs) }; - - #[derive(Clone, Debug, PartialEq)] - enum SavedLocalEligibility { - Unassigned, - Assigned(VariantIdx), - // FIXME: Use newtype_index so we aren't wasting bytes - Ineligible(Option), - } - use SavedLocalEligibility::*; - - let mut assignments: IndexVec = - IndexVec::from_elem_n(Unassigned, info.field_tys.len()); - - // The saved locals not eligible for overlap. These will get - // "promoted" to the prefix of our generator. - let mut ineligible_locals = BitSet::new_empty(info.field_tys.len()); - - // Figure out which of our saved locals are fields in only - // one variant. The rest are deemed ineligible for overlap. - for (variant_index, fields) in info.variant_fields.iter_enumerated() { - for local in fields { - match assignments[*local] { - Unassigned => { - assignments[*local] = Assigned(variant_index); - } - Assigned(idx) => { - // We've already seen this local at another suspension - // point, so it is no longer a candidate. - trace!("removing local {:?} in >1 variant ({:?}, {:?})", - local, variant_index, idx); - ineligible_locals.insert(*local); - assignments[*local] = Ineligible(None); - } - Ineligible(_) => {}, - } - } - } - - // Next, check every pair of eligible locals to see if they - // conflict. - for local_a in info.storage_conflicts.rows() { - let conflicts_a = info.storage_conflicts.count(local_a); - if ineligible_locals.contains(local_a) { - continue; - } - - for local_b in info.storage_conflicts.iter(local_a) { - // local_a and local_b are storage live at the same time, therefore they - // cannot overlap in the generator layout. The only way to guarantee - // this is if they are in the same variant, or one is ineligible - // (which means it is stored in every variant). - if ineligible_locals.contains(local_b) || - assignments[local_a] == assignments[local_b] - { - continue; - } - - // If they conflict, we will choose one to make ineligible. - // This is not always optimal; it's just a greedy heuristic - // that seems to produce good results most of the time. - let conflicts_b = info.storage_conflicts.count(local_b); - let (remove, other) = if conflicts_a > conflicts_b { - (local_a, local_b) - } else { - (local_b, local_a) - }; - ineligible_locals.insert(remove); - assignments[remove] = Ineligible(None); - trace!("removing local {:?} due to conflict with {:?}", remove, other); - } - } - - // Write down the order of our locals that will be promoted to - // the prefix. - { - let mut idx = 0u32; - for local in ineligible_locals.iter() { - assignments[local] = Ineligible(Some(idx)); - idx += 1; - } - } - debug!("generator saved local assignments: {:?}", assignments); - - // Build a prefix layout, including "promoting" all ineligible - // locals as part of the prefix. We compute the layout of all of - // these fields at once to get optimal packing. - let discr_index = substs.prefix_tys(def_id, tcx).count(); - let promoted_tys = - ineligible_locals.iter().map(|local| subst_field(info.field_tys[local])); - let prefix_tys = substs.prefix_tys(def_id, tcx) - .chain(iter::once(substs.discr_ty(tcx))) - .chain(promoted_tys); - let prefix = self.univariant_uninterned( - ty, - &prefix_tys.map(|ty| self.layout_of(ty)).collect::, _>>()?, - &ReprOptions::default(), - StructKind::AlwaysSized)?; - let (prefix_size, prefix_align) = (prefix.size, prefix.align); - - let recompute_memory_index = |offsets: &Vec| -> Vec { - debug!("recompute_memory_index({:?})", offsets); - let mut inverse_index = (0..offsets.len() as u32).collect::>(); - inverse_index.sort_unstable_by_key(|i| offsets[*i as usize]); - - let mut index = vec![0; offsets.len()]; - for i in 0..index.len() { - index[inverse_index[i] as usize] = i as u32; - } - debug!("recompute_memory_index() => {:?}", index); - index - }; - - // Split the prefix layout into the "outer" fields (upvars and - // discriminant) and the "promoted" fields. Promoted fields will - // get included in each variant that requested them in - // GeneratorLayout. - debug!("prefix = {:#?}", prefix); - let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields { - FieldPlacement::Arbitrary { offsets, memory_index } => { - let (offsets_a, offsets_b) = - offsets.split_at(discr_index + 1); - let (memory_index_a, memory_index_b) = - memory_index.split_at(discr_index + 1); - let outer_fields = FieldPlacement::Arbitrary { - offsets: offsets_a.to_vec(), - memory_index: recompute_memory_index(&memory_index_a.to_vec()) - }; - (outer_fields, - offsets_b.to_vec(), - recompute_memory_index(&memory_index_b.to_vec())) - } - _ => bug!(), - }; - - let mut size = prefix.size; - let mut align = prefix.align; - let variants = info.variant_fields.iter_enumerated().map(|(index, variant_fields)| { - // Only include overlap-eligible fields when we compute our variant layout. - let variant_only_tys = variant_fields - .iter() - .filter(|local| { - match assignments[**local] { - Unassigned => bug!(), - Assigned(v) if v == index => true, - Assigned(_) => bug!("assignment does not match variant"), - Ineligible(_) => false, - } - }) - .map(|local| subst_field(info.field_tys[*local])); - - let mut variant = self.univariant_uninterned( - ty, - &variant_only_tys - .map(|ty| self.layout_of(ty)) - .collect::, _>>()?, - &ReprOptions::default(), - StructKind::Prefixed(prefix_size, prefix_align.abi))?; - variant.variants = Variants::Single { index }; - - let (offsets, memory_index) = match variant.fields { - FieldPlacement::Arbitrary { offsets, memory_index } => - (offsets, memory_index), - _ => bug!(), - }; - - // Now, stitch the promoted and variant-only fields back - // together in the order they are mentioned by our - // GeneratorLayout. - let mut next_variant_field = 0; - let mut combined_offsets = Vec::new(); - let mut combined_memory_index = Vec::new(); - for local in variant_fields.iter() { - match assignments[*local] { - Unassigned => bug!(), - Assigned(_) => { - combined_offsets.push(offsets[next_variant_field]); - // Shift memory indices by the number of - // promoted fields, which all come first. We - // may not use all promoted fields in our - // variant but that's okay; we'll renumber them - // below. - combined_memory_index.push( - promoted_memory_index.len() as u32 + - memory_index[next_variant_field]); - next_variant_field += 1; - } - Ineligible(field_idx) => { - let field_idx = field_idx.unwrap() as usize; - combined_offsets.push(promoted_offsets[field_idx]); - combined_memory_index.push(promoted_memory_index[field_idx]); - } - } - } - variant.fields = FieldPlacement::Arbitrary { - offsets: combined_offsets, - memory_index: recompute_memory_index(&combined_memory_index), - }; - - size = size.max(variant.size); - align = align.max(variant.align); - Ok(variant) - }).collect::, _>>()?; - - let abi = if prefix.abi.is_uninhabited() || - variants.iter().all(|v| v.abi.is_uninhabited()) { - Abi::Uninhabited - } else { - Abi::Aggregate { sized: true } - }; - let discr = match &self.layout_of(substs.discr_ty(tcx))?.abi { - Abi::Scalar(s) => s.clone(), - _ => bug!(), - }; - - let layout = tcx.intern_layout(LayoutDetails { - variants: Variants::Multiple { - discr, - discr_kind: DiscriminantKind::Tag, - discr_index, - variants, - }, - fields: outer_fields, - abi, - size, - align, - }); - debug!("generator layout ({:?}): {:#?}", ty, layout); - layout - } + ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, &substs)?, ty::Closure(def_id, ref substs) => { let tys = substs.upvar_tys(def_id, tcx); @@ -1443,7 +1192,275 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { } }) } +} + +/// Overlap eligibility and variant assignment for each GeneratorSavedLocal. +#[derive(Clone, Debug, PartialEq)] +enum SavedLocalEligibility { + Unassigned, + Assigned(VariantIdx), + // FIXME: Use newtype_index so we aren't wasting bytes + Ineligible(Option), +} + +// When laying out generators, we divide our saved local fields into two +// categories: overlap-eligible and overlap-ineligible. +// +// Those fields which are ineligible for overlap go in a "prefix" at the +// beginning of the layout, and always have space reserved for them. +// +// Overlap-eligible fields are only assigned to one variant, so we lay +// those fields out for each variant and put them right after the +// prefix. +// +// Finally, in the layout details, we point to the fields from the +// variants they are assigned to. It is possible for some fields to be +// included in multiple variants. No field ever "moves around" in the +// layout; its offset is always the same. +// +// Also included in the layout are the upvars and the discriminant. +// These are included as fields on the "outer" layout; they are not part +// of any variant. +impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { + /// Compute the eligibility and assignment of each local. + fn generator_saved_local_eligibility(&self, info: &GeneratorLayout<'tcx>) + -> (BitSet, IndexVec) { + use SavedLocalEligibility::*; + + let mut assignments: IndexVec = + IndexVec::from_elem_n(Unassigned, info.field_tys.len()); + + // The saved locals not eligible for overlap. These will get + // "promoted" to the prefix of our generator. + let mut ineligible_locals = BitSet::new_empty(info.field_tys.len()); + + // Figure out which of our saved locals are fields in only + // one variant. The rest are deemed ineligible for overlap. + for (variant_index, fields) in info.variant_fields.iter_enumerated() { + for local in fields { + match assignments[*local] { + Unassigned => { + assignments[*local] = Assigned(variant_index); + } + Assigned(idx) => { + // We've already seen this local at another suspension + // point, so it is no longer a candidate. + trace!("removing local {:?} in >1 variant ({:?}, {:?})", + local, variant_index, idx); + ineligible_locals.insert(*local); + assignments[*local] = Ineligible(None); + } + Ineligible(_) => {}, + } + } + } + + // Next, check every pair of eligible locals to see if they + // conflict. + for local_a in info.storage_conflicts.rows() { + let conflicts_a = info.storage_conflicts.count(local_a); + if ineligible_locals.contains(local_a) { + continue; + } + + for local_b in info.storage_conflicts.iter(local_a) { + // local_a and local_b are storage live at the same time, therefore they + // cannot overlap in the generator layout. The only way to guarantee + // this is if they are in the same variant, or one is ineligible + // (which means it is stored in every variant). + if ineligible_locals.contains(local_b) || + assignments[local_a] == assignments[local_b] + { + continue; + } + + // If they conflict, we will choose one to make ineligible. + // This is not always optimal; it's just a greedy heuristic that + // seems to produce good results most of the time. + let conflicts_b = info.storage_conflicts.count(local_b); + let (remove, other) = if conflicts_a > conflicts_b { + (local_a, local_b) + } else { + (local_b, local_a) + }; + ineligible_locals.insert(remove); + assignments[remove] = Ineligible(None); + trace!("removing local {:?} due to conflict with {:?}", remove, other); + } + } + + // Write down the order of our locals that will be promoted to the prefix. + { + let mut idx = 0u32; + for local in ineligible_locals.iter() { + assignments[local] = Ineligible(Some(idx)); + idx += 1; + } + } + debug!("generator saved local assignments: {:?}", assignments); + + (ineligible_locals, assignments) + } + + /// Compute the full generator layout. + fn generator_layout( + &self, + ty: Ty<'tcx>, + def_id: hir::def_id::DefId, + substs: &GeneratorSubsts<'tcx>, + ) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> { + use SavedLocalEligibility::*; + let tcx = self.tcx; + let recompute_memory_index = |offsets: &Vec| -> Vec { + debug!("recompute_memory_index({:?})", offsets); + let mut inverse_index = (0..offsets.len() as u32).collect::>(); + inverse_index.sort_unstable_by_key(|i| offsets[*i as usize]); + + let mut index = vec![0; offsets.len()]; + for i in 0..index.len() { + index[inverse_index[i] as usize] = i as u32; + } + debug!("recompute_memory_index() => {:?}", index); + index + }; + let subst_field = |ty: Ty<'tcx>| { ty.subst(tcx, substs.substs) }; + + let info = tcx.generator_layout(def_id); + let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info); + + // Build a prefix layout, including "promoting" all ineligible + // locals as part of the prefix. We compute the layout of all of + // these fields at once to get optimal packing. + let discr_index = substs.prefix_tys(def_id, tcx).count(); + let promoted_tys = + ineligible_locals.iter().map(|local| subst_field(info.field_tys[local])); + let prefix_tys = substs.prefix_tys(def_id, tcx) + .chain(iter::once(substs.discr_ty(tcx))) + .chain(promoted_tys); + let prefix = self.univariant_uninterned( + ty, + &prefix_tys.map(|ty| self.layout_of(ty)).collect::, _>>()?, + &ReprOptions::default(), + StructKind::AlwaysSized)?; + let (prefix_size, prefix_align) = (prefix.size, prefix.align); + + // Split the prefix layout into the "outer" fields (upvars and + // discriminant) and the "promoted" fields. Promoted fields will + // get included in each variant that requested them in + // GeneratorLayout. + debug!("prefix = {:#?}", prefix); + let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields { + FieldPlacement::Arbitrary { offsets, memory_index } => { + let (offsets_a, offsets_b) = + offsets.split_at(discr_index + 1); + let (memory_index_a, memory_index_b) = + memory_index.split_at(discr_index + 1); + let outer_fields = FieldPlacement::Arbitrary { + offsets: offsets_a.to_vec(), + memory_index: recompute_memory_index(&memory_index_a.to_vec()) + }; + (outer_fields, + offsets_b.to_vec(), + recompute_memory_index(&memory_index_b.to_vec())) + } + _ => bug!(), + }; + + let mut size = prefix.size; + let mut align = prefix.align; + let variants = info.variant_fields.iter_enumerated().map(|(index, variant_fields)| { + // Only include overlap-eligible fields when we compute our variant layout. + let variant_only_tys = variant_fields + .iter() + .filter(|local| { + match assignments[**local] { + Unassigned => bug!(), + Assigned(v) if v == index => true, + Assigned(_) => bug!("assignment does not match variant"), + Ineligible(_) => false, + } + }) + .map(|local| subst_field(info.field_tys[*local])); + + let mut variant = self.univariant_uninterned( + ty, + &variant_only_tys + .map(|ty| self.layout_of(ty)) + .collect::, _>>()?, + &ReprOptions::default(), + StructKind::Prefixed(prefix_size, prefix_align.abi))?; + variant.variants = Variants::Single { index }; + + let (offsets, memory_index) = match variant.fields { + FieldPlacement::Arbitrary { offsets, memory_index } => (offsets, memory_index), + _ => bug!(), + }; + + // Now, stitch the promoted and variant-only fields back together in + // the order they are mentioned by our GeneratorLayout. + let mut next_variant_field = 0; + let mut combined_offsets = Vec::new(); + let mut combined_memory_index = Vec::new(); + for local in variant_fields.iter() { + match assignments[*local] { + Unassigned => bug!(), + Assigned(_) => { + combined_offsets.push(offsets[next_variant_field]); + // Shift memory indices by the number of promoted + // fields, which all come first. We may not use all + // promoted fields in our variant but that's okay; we'll + // renumber them below. + combined_memory_index.push( + promoted_memory_index.len() as u32 + + memory_index[next_variant_field]); + next_variant_field += 1; + } + Ineligible(field_idx) => { + let field_idx = field_idx.unwrap() as usize; + combined_offsets.push(promoted_offsets[field_idx]); + combined_memory_index.push(promoted_memory_index[field_idx]); + } + } + } + variant.fields = FieldPlacement::Arbitrary { + offsets: combined_offsets, + memory_index: recompute_memory_index(&combined_memory_index), + }; + + size = size.max(variant.size); + align = align.max(variant.align); + Ok(variant) + }).collect::, _>>()?; + + let abi = if prefix.abi.is_uninhabited() || + variants.iter().all(|v| v.abi.is_uninhabited()) { + Abi::Uninhabited + } else { + Abi::Aggregate { sized: true } + }; + let discr = match &self.layout_of(substs.discr_ty(tcx))?.abi { + Abi::Scalar(s) => s.clone(), + _ => bug!(), + }; + + let layout = tcx.intern_layout(LayoutDetails { + variants: Variants::Multiple { + discr, + discr_kind: DiscriminantKind::Tag, + discr_index, + variants, + }, + fields: outer_fields, + abi, + size, + align, + }); + debug!("generator layout ({:?}): {:#?}", ty, layout); + Ok(layout) + } +} +impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { /// This is invoked by the `layout_raw` query to record the final /// layout of each type. #[inline(always)] From 7d3211339b4b76cf7177cf6b48c9e2c97cfae003 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Miku=C5=82a?= Date: Tue, 11 Jun 2019 11:49:38 +0200 Subject: [PATCH 29/32] Migrate rust-by-example to MdBook2 --- src/bootstrap/doc.rs | 2 +- src/doc/rust-by-example | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/bootstrap/doc.rs b/src/bootstrap/doc.rs index 2a3577a3d2044..278ae8a9addcc 100644 --- a/src/bootstrap/doc.rs +++ b/src/bootstrap/doc.rs @@ -64,7 +64,7 @@ book!( EmbeddedBook, "src/doc/embedded-book", "embedded-book", RustbookVersion::MdBook2; Nomicon, "src/doc/nomicon", "nomicon", RustbookVersion::MdBook2; Reference, "src/doc/reference", "reference", RustbookVersion::MdBook1; - RustByExample, "src/doc/rust-by-example", "rust-by-example", RustbookVersion::MdBook1; + RustByExample, "src/doc/rust-by-example", "rust-by-example", RustbookVersion::MdBook2; RustcBook, "src/doc/rustc", "rustc", RustbookVersion::MdBook1; RustdocBook, "src/doc/rustdoc", "rustdoc", RustbookVersion::MdBook2; ); diff --git a/src/doc/rust-by-example b/src/doc/rust-by-example index 18566f4dedc3e..d8eec1dd65470 160000 --- a/src/doc/rust-by-example +++ b/src/doc/rust-by-example @@ -1 +1 @@ -Subproject commit 18566f4dedc3ef5bf61f5f85685d5966db99cc11 +Subproject commit d8eec1dd65470b9a68e80ac1cba8fad0daac4916 From f2c37a55a4456c4af8b3b3b53cdd4f703a910b29 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Fri, 7 Jun 2019 11:30:11 -0700 Subject: [PATCH 30/32] ci: Collect CPU usage statistics on Azure This commit adds a script which we'll execute on Azure Pipelines which is intended to run in the background and passively collect CPU usage statistics for our builders. The intention here is that we can use this information over time to diagnose issues with builders, see where we can optimize our build, fix parallelism issues, etc. This might not end up being too useful in the long run but it's data we've wanted to collect for quite some time now, so here's a stab at it! Comments about how this is intended to work can be found in the python script used here to collect CPU usage statistics. Closes #48828 --- .azure-pipelines/steps/run.yml | 16 +++ src/ci/cpu-usage-over-time.py | 175 +++++++++++++++++++++++++++++++++ 2 files changed, 191 insertions(+) create mode 100644 src/ci/cpu-usage-over-time.py diff --git a/.azure-pipelines/steps/run.yml b/.azure-pipelines/steps/run.yml index cfa28a6a1d70f..38aa022b624db 100644 --- a/.azure-pipelines/steps/run.yml +++ b/.azure-pipelines/steps/run.yml @@ -11,6 +11,12 @@ steps: - checkout: self fetchDepth: 2 +# Spawn a background process to collect CPU usage statistics which we'll upload +# at the end of the build. See the comments in the script here for more +# information. +- bash: python src/ci/cpu-usage-over-time.py &> cpu-usage.csv & + displayName: "Collect CPU-usage statistics in the background" + - bash: printenv | sort displayName: Show environment variables @@ -136,3 +142,13 @@ steps: AWS_SECRET_ACCESS_KEY: $(AWS_SECRET_ACCESS_KEY) condition: and(succeeded(), or(eq(variables.DEPLOY, '1'), eq(variables.DEPLOY_ALT, '1'))) displayName: Upload artifacts + +# Upload CPU usage statistics that we've been gathering this whole time. Always +# execute this step in case we want to inspect failed builds, but don't let +# errors here ever fail the build since this is just informational. +- bash: aws s3 cp --acl public-read cpu-usage.csv s3://$DEPLOY_BUCKET/rustc-builds/$BUILD_SOURCEVERSION/cpu-$SYSTEM_JOBNAME.csv + env: + AWS_SECRET_ACCESS_KEY: $(AWS_SECRET_ACCESS_KEY) + condition: contains(variables, 'AWS_SECRET_ACCESS_KEY') + continueOnError: true + displayName: Upload CPU usage statistics diff --git a/src/ci/cpu-usage-over-time.py b/src/ci/cpu-usage-over-time.py new file mode 100644 index 0000000000000..78427a6360a9f --- /dev/null +++ b/src/ci/cpu-usage-over-time.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python +# ignore-tidy-linelength + +# This is a small script that we use on CI to collect CPU usage statistics of +# our builders. By seeing graphs of CPU usage over time we hope to correlate +# that with possible improvements to Rust's own build system, ideally diagnosing +# that either builders are always fully using their CPU resources or they're +# idle for long stretches of time. +# +# This script is relatively simple, but it's platform specific. Each platform +# (OSX/Windows/Linux) has a different way of calculating the current state of +# CPU at a point in time. We then compare two captured states to determine the +# percentage of time spent in one state versus another. The state capturing is +# all platform-specific but the loop at the bottom is the cross platform part +# that executes everywhere. +# +# # Viewing statistics +# +# All builders will upload their CPU statistics as CSV files to our S3 buckets. +# These URLS look like: +# +# https://$bucket.s3.amazonaws.com/rustc-builds/$commit/cpu-$builder.csv +# +# for example +# +# https://rust-lang-ci2.s3.amazonaws.com/rustc-builds/68baada19cd5340f05f0db15a3e16d6671609bcc/cpu-x86_64-apple.csv +# +# Each CSV file has two columns. The first is the timestamp of the measurement +# and the second column is the % of idle cpu time in that time slice. Ideally +# the second column is always zero. +# +# Once you've downloaded a file there's various ways to plot it and visualize +# it. For command line usage you can use a script like so: +# +# set timefmt '%Y-%m-%dT%H:%M:%S' +# set xdata time +# set ylabel "Idle CPU %" +# set xlabel "Time" +# set datafile sep ',' +# set term png +# set output "printme.png" +# set grid +# builder = "i686-apple" +# plot "cpu-".builder.".csv" using 1:2 with lines title builder +# +# Executed as `gnuplot < ./foo.plot` it will generate a graph called +# `printme.png` which you can then open up. If you know how to improve this +# script or the viewing process that would be much appreciated :) (or even if +# you know how to automate it!) + +import datetime +import sys +import time + +if sys.platform == 'linux2': + class State: + def __init__(self): + with open('/proc/stat', 'r') as file: + data = file.readline().split() + if data[0] != 'cpu': + raise Exception('did not start with "cpu"') + self.user = int(data[1]) + self.nice = int(data[2]) + self.system = int(data[3]) + self.idle = int(data[4]) + self.iowait = int(data[5]) + self.irq = int(data[6]) + self.softirq = int(data[7]) + self.steal = int(data[8]) + self.guest = int(data[9]) + self.guest_nice = int(data[10]) + + def idle_since(self, prev): + user = self.user - prev.user + nice = self.nice - prev.nice + system = self.system - prev.system + idle = self.idle - prev.idle + iowait = self.iowait - prev.iowait + irq = self.irq - prev.irq + softirq = self.softirq - prev.softirq + steal = self.steal - prev.steal + guest = self.guest - prev.guest + guest_nice = self.guest_nice - prev.guest_nice + total = user + nice + system + idle + iowait + irq + softirq + steal + guest + guest_nice + return float(idle) / float(total) * 100 + +elif sys.platform == 'win32': + from ctypes.wintypes import DWORD + from ctypes import Structure, windll, WinError, GetLastError, byref + + class FILETIME(Structure): + _fields_ = [ + ("dwLowDateTime", DWORD), + ("dwHighDateTime", DWORD), + ] + + class State: + def __init__(self): + idle, kernel, user = FILETIME(), FILETIME(), FILETIME() + + success = windll.kernel32.GetSystemTimes( + byref(idle), + byref(kernel), + byref(user), + ) + + assert success, WinError(GetLastError())[1] + + self.idle = (idle.dwHighDateTime << 32) | idle.dwLowDateTime + self.kernel = (kernel.dwHighDateTime << 32) | kernel.dwLowDateTime + self.user = (user.dwHighDateTime << 32) | user.dwLowDateTime + + def idle_since(self, prev): + idle = self.idle - prev.idle + user = self.user - prev.user + kernel = self.kernel - prev.kernel + return float(idle) / float(user + kernel) * 100 + +elif sys.platform == 'darwin': + from ctypes import * + libc = cdll.LoadLibrary('/usr/lib/libc.dylib') + + PROESSOR_CPU_LOAD_INFO = c_int(2) + CPU_STATE_USER = 0 + CPU_STATE_SYSTEM = 1 + CPU_STATE_IDLE = 2 + CPU_STATE_NICE = 3 + c_int_p = POINTER(c_int) + + class State: + def __init__(self): + num_cpus_u = c_uint(0) + cpu_info = c_int_p() + cpu_info_cnt = c_int(0) + err = libc.host_processor_info( + libc.mach_host_self(), + PROESSOR_CPU_LOAD_INFO, + byref(num_cpus_u), + byref(cpu_info), + byref(cpu_info_cnt), + ) + assert err == 0 + self.user = 0 + self.system = 0 + self.idle = 0 + self.nice = 0 + cur = 0 + while cur < cpu_info_cnt.value: + self.user += cpu_info[cur + CPU_STATE_USER] + self.system += cpu_info[cur + CPU_STATE_SYSTEM] + self.idle += cpu_info[cur + CPU_STATE_IDLE] + self.nice += cpu_info[cur + CPU_STATE_NICE] + cur += num_cpus_u.value + + def idle_since(self, prev): + user = self.user - prev.user + system = self.system - prev.system + idle = self.idle - prev.idle + nice = self.nice - prev.nice + return float(idle) / float(user + system + idle + nice) * 100.0 + +else: + print('unknown platform', sys.platform) + sys.exit(1) + +cur_state = State(); +print("Time,Idle") +while True: + time.sleep(1); + next_state = State(); + now = datetime.datetime.utcnow().isoformat() + idle = next_state.idle_since(cur_state) + print("%s,%s" % (now, idle)) + sys.stdout.flush() + cur_state = next_state From 87d5fe0e5361df9f0e198dad807c8201b6cb4ea3 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sun, 9 Jun 2019 12:53:47 +0200 Subject: [PATCH 31/32] is_fp and is_floating_point do the same thing, remove the former also consistently mark all these is_* methods for inlining --- src/librustc/ty/sty.rs | 39 ++++++++++++++++++++------ src/librustc_codegen_ssa/mir/rvalue.rs | 4 +-- src/librustc_typeck/check/mod.rs | 2 +- 3 files changed, 34 insertions(+), 11 deletions(-) diff --git a/src/librustc/ty/sty.rs b/src/librustc/ty/sty.rs index 5cc46b21fca1f..6a1f603f22206 100644 --- a/src/librustc/ty/sty.rs +++ b/src/librustc/ty/sty.rs @@ -1665,6 +1665,7 @@ impl RegionKind { /// Type utilities impl<'a, 'gcx, 'tcx> TyS<'tcx> { + #[inline] pub fn is_unit(&self) -> bool { match self.sty { Tuple(ref tys) => tys.is_empty(), @@ -1672,6 +1673,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_never(&self) -> bool { match self.sty { Never => true, @@ -1726,6 +1728,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_primitive(&self) -> bool { match self.sty { Bool | Char | Int(_) | Uint(_) | Float(_) => true, @@ -1741,6 +1744,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_ty_infer(&self) -> bool { match self.sty { Infer(_) => true, @@ -1748,6 +1752,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_phantom_data(&self) -> bool { if let Adt(def, _) = self.sty { def.is_phantom_data() @@ -1756,8 +1761,10 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_bool(&self) -> bool { self.sty == Bool } + #[inline] pub fn is_param(&self, index: u32) -> bool { match self.sty { ty::Param(ref data) => data.index == index, @@ -1765,6 +1772,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_self(&self) -> bool { match self.sty { Param(ref p) => p.is_self(), @@ -1772,6 +1780,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_slice(&self) -> bool { match self.sty { RawPtr(TypeAndMut { ty, .. }) | Ref(_, ty, _) => match ty.sty { @@ -1814,6 +1823,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_region_ptr(&self) -> bool { match self.sty { Ref(..) => true, @@ -1821,6 +1831,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_mutable_pointer(&self) -> bool { match self.sty { RawPtr(TypeAndMut { mutbl: hir::Mutability::MutMutable, .. }) | @@ -1829,6 +1840,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_unsafe_ptr(&self) -> bool { match self.sty { RawPtr(_) => return true, @@ -1837,6 +1849,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } /// Returns `true` if this type is an `Arc`. + #[inline] pub fn is_arc(&self) -> bool { match self.sty { Adt(def, _) => def.is_arc(), @@ -1845,6 +1858,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } /// Returns `true` if this type is an `Rc`. + #[inline] pub fn is_rc(&self) -> bool { match self.sty { Adt(def, _) => def.is_rc(), @@ -1852,6 +1866,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_box(&self) -> bool { match self.sty { Adt(def, _) => def.is_box(), @@ -1870,6 +1885,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { /// A scalar type is one that denotes an atomic datum, with no sub-components. /// (A RawPtr is scalar because it represents a non-managed pointer, so its /// contents are abstract to rustc.) + #[inline] pub fn is_scalar(&self) -> bool { match self.sty { Bool | Char | Int(_) | Float(_) | Uint(_) | @@ -1880,6 +1896,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } /// Returns `true` if this type is a floating point type. + #[inline] pub fn is_floating_point(&self) -> bool { match self.sty { Float(_) | @@ -1888,6 +1905,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_trait(&self) -> bool { match self.sty { Dynamic(..) => true, @@ -1895,6 +1913,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_enum(&self) -> bool { match self.sty { Adt(adt_def, _) => { @@ -1904,6 +1923,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_closure(&self) -> bool { match self.sty { Closure(..) => true, @@ -1911,6 +1931,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_generator(&self) -> bool { match self.sty { Generator(..) => true, @@ -1926,6 +1947,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_fresh_ty(&self) -> bool { match self.sty { Infer(FreshTy(_)) => true, @@ -1933,6 +1955,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_fresh(&self) -> bool { match self.sty { Infer(FreshTy(_)) => true, @@ -1942,6 +1965,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_char(&self) -> bool { match self.sty { Char => true, @@ -1950,17 +1974,11 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } #[inline] - pub fn is_fp(&self) -> bool { - match self.sty { - Infer(FloatVar(_)) | Float(_) => true, - _ => false - } - } - pub fn is_numeric(&self) -> bool { - self.is_integral() || self.is_fp() + self.is_integral() || self.is_floating_point() } + #[inline] pub fn is_signed(&self) -> bool { match self.sty { Int(_) => true, @@ -1968,6 +1986,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_pointer_sized(&self) -> bool { match self.sty { Int(ast::IntTy::Isize) | Uint(ast::UintTy::Usize) => true, @@ -1975,6 +1994,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_machine(&self) -> bool { match self.sty { Int(..) | Uint(..) | Float(..) => true, @@ -1982,6 +2002,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn has_concrete_skeleton(&self) -> bool { match self.sty { Param(_) | Infer(_) | Error => false, @@ -2028,6 +2049,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_fn(&self) -> bool { match self.sty { FnDef(..) | FnPtr(_) => true, @@ -2043,6 +2065,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_impl_trait(&self) -> bool { match self.sty { Opaque(..) => true, diff --git a/src/librustc_codegen_ssa/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs index 7a2bd18df4ec9..87e15ba6aac5e 100644 --- a/src/librustc_codegen_ssa/mir/rvalue.rs +++ b/src/librustc_codegen_ssa/mir/rvalue.rs @@ -429,7 +429,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Rvalue::UnaryOp(op, ref operand) => { let operand = self.codegen_operand(&mut bx, operand); let lloperand = operand.immediate(); - let is_float = operand.layout.ty.is_fp(); + let is_float = operand.layout.ty.is_floating_point(); let llval = match op { mir::UnOp::Not => bx.not(lloperand), mir::UnOp::Neg => if is_float { @@ -536,7 +536,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { rhs: Bx::Value, input_ty: Ty<'tcx>, ) -> Bx::Value { - let is_float = input_ty.is_fp(); + let is_float = input_ty.is_floating_point(); let is_signed = input_ty.is_signed(); let is_unit = input_ty.is_unit(); match op { diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index 2e53b380cb71a..7f4234c5f262e 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -4089,7 +4089,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { hir::UnNeg => { let result = self.check_user_unop(expr, oprnd_t, unop); // If it's builtin, we can reuse the type, this helps inference. - if !(oprnd_t.is_integral() || oprnd_t.is_fp()) { + if !oprnd_t.is_numeric() { oprnd_t = result; } } From aeefbc4cbad4461f4f846e897a04e0b3ce8cdb22 Mon Sep 17 00:00:00 2001 From: Tyler Mandry Date: Mon, 10 Jun 2019 15:54:00 -0700 Subject: [PATCH 32/32] More review fixes --- src/librustc/mir/mod.rs | 7 ----- src/librustc/ty/layout.rs | 42 ++++++++----------------- src/librustc_mir/transform/generator.rs | 7 +++-- 3 files changed, 17 insertions(+), 39 deletions(-) diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index 3e9f7b51cb1c5..310228838e0ad 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -1501,13 +1501,6 @@ impl<'tcx> BasicBlockData<'tcx> { self.terminator.as_mut().expect("invalid terminator state") } - pub fn is_unreachable(&self) -> bool { - match self.terminator().kind { - TerminatorKind::Unreachable => true, - _ => false, - } - } - pub fn retain_statements(&mut self, mut f: F) where F: FnMut(&mut Statement<'_>) -> bool, diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index caff75a36cd5e..2cf61d7b3f529 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -1311,7 +1311,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { ) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> { use SavedLocalEligibility::*; let tcx = self.tcx; - let recompute_memory_index = |offsets: &Vec| -> Vec { + let recompute_memory_index = |offsets: &[Size]| -> Vec { debug!("recompute_memory_index({:?})", offsets); let mut inverse_index = (0..offsets.len() as u32).collect::>(); inverse_index.sort_unstable_by_key(|i| offsets[*i as usize]); @@ -1349,19 +1349,14 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { // get included in each variant that requested them in // GeneratorLayout. debug!("prefix = {:#?}", prefix); - let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields { - FieldPlacement::Arbitrary { offsets, memory_index } => { - let (offsets_a, offsets_b) = - offsets.split_at(discr_index + 1); - let (memory_index_a, memory_index_b) = - memory_index.split_at(discr_index + 1); - let outer_fields = FieldPlacement::Arbitrary { - offsets: offsets_a.to_vec(), - memory_index: recompute_memory_index(&memory_index_a.to_vec()) - }; - (outer_fields, - offsets_b.to_vec(), - recompute_memory_index(&memory_index_b.to_vec())) + let (outer_fields, promoted_offsets) = match prefix.fields { + FieldPlacement::Arbitrary { mut offsets, .. } => { + let offsets_b = offsets.split_off(discr_index + 1); + let offsets_a = offsets; + + let memory_index = recompute_memory_index(&offsets_a); + let outer_fields = FieldPlacement::Arbitrary { offsets: offsets_a, memory_index }; + (outer_fields, offsets_b) } _ => bug!(), }; @@ -1391,8 +1386,8 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { StructKind::Prefixed(prefix_size, prefix_align.abi))?; variant.variants = Variants::Single { index }; - let (offsets, memory_index) = match variant.fields { - FieldPlacement::Arbitrary { offsets, memory_index } => (offsets, memory_index), + let offsets = match variant.fields { + FieldPlacement::Arbitrary { offsets, .. } => offsets, _ => bug!(), }; @@ -1400,32 +1395,21 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { // the order they are mentioned by our GeneratorLayout. let mut next_variant_field = 0; let mut combined_offsets = Vec::new(); - let mut combined_memory_index = Vec::new(); for local in variant_fields.iter() { match assignments[*local] { Unassigned => bug!(), Assigned(_) => { combined_offsets.push(offsets[next_variant_field]); - // Shift memory indices by the number of promoted - // fields, which all come first. We may not use all - // promoted fields in our variant but that's okay; we'll - // renumber them below. - combined_memory_index.push( - promoted_memory_index.len() as u32 + - memory_index[next_variant_field]); next_variant_field += 1; } Ineligible(field_idx) => { let field_idx = field_idx.unwrap() as usize; combined_offsets.push(promoted_offsets[field_idx]); - combined_memory_index.push(promoted_memory_index[field_idx]); } } } - variant.fields = FieldPlacement::Arbitrary { - offsets: combined_offsets, - memory_index: recompute_memory_index(&combined_memory_index), - }; + let memory_index = recompute_memory_index(&combined_offsets); + variant.fields = FieldPlacement::Arbitrary { offsets: combined_offsets, memory_index }; size = size.max(variant.size); align = align.max(variant.align); diff --git a/src/librustc_mir/transform/generator.rs b/src/librustc_mir/transform/generator.rs index 9dc9f8fdf72a9..e3c3506454601 100644 --- a/src/librustc_mir/transform/generator.rs +++ b/src/librustc_mir/transform/generator.rs @@ -660,9 +660,10 @@ impl<'body, 'tcx: 'body, 's> StorageConflictVisitor<'body, 'tcx, 's> { flow_state: &FlowAtLocation<'tcx, MaybeStorageLive<'body, 'tcx>>, loc: Location) { // Ignore unreachable blocks. - if self.body.basic_blocks()[loc.block].is_unreachable() { - return; - } + match self.body.basic_blocks()[loc.block].terminator().kind { + TerminatorKind::Unreachable => return, + _ => (), + }; let mut eligible_storage_live = flow_state.as_dense().clone(); eligible_storage_live.intersect(&self.stored_locals);