From 86b0f149d8051d13fea515d687a7514c20f11c10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20K=C3=BCber?= Date: Thu, 18 Jun 2020 14:01:25 -0700 Subject: [PATCH 1/4] Perform obligation deduplication to avoid buggy `ExistentialMismatch` Fix #59326. --- src/librustc_middle/ty/relate.rs | 16 +++++++++++++--- src/test/ui/issues/issue-59326.rs | 26 ++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 3 deletions(-) create mode 100644 src/test/ui/issues/issue-59326.rs diff --git a/src/librustc_middle/ty/relate.rs b/src/librustc_middle/ty/relate.rs index aeb3a0716fb42..7fb41a1dc5ac2 100644 --- a/src/librustc_middle/ty/relate.rs +++ b/src/librustc_middle/ty/relate.rs @@ -617,12 +617,22 @@ impl<'tcx> Relate<'tcx> for &'tcx ty::List> { a: &Self, b: &Self, ) -> RelateResult<'tcx, Self> { - if a.len() != b.len() { + let tcx = relation.tcx(); + + // FIXME: this is wasteful, but want to do a perf run to see how slow it is. + // We need to perform this deduplication as we sometimes generate duplicate projections + // in `a`. + let mut a_v: Vec<_> = a.into_iter().collect(); + let mut b_v: Vec<_> = b.into_iter().collect(); + a_v.sort_by(|a, b| a.stable_cmp(tcx, b)); + a_v.dedup(); + b_v.sort_by(|a, b| a.stable_cmp(tcx, b)); + b_v.dedup(); + if a_v.len() != b_v.len() { return Err(TypeError::ExistentialMismatch(expected_found(relation, a, b))); } - let tcx = relation.tcx(); - let v = a.iter().zip(b.iter()).map(|(ep_a, ep_b)| { + let v = a_v.into_iter().zip(b_v.into_iter()).map(|(ep_a, ep_b)| { use crate::ty::ExistentialPredicate::*; match (ep_a, ep_b) { (Trait(ref a), Trait(ref b)) => Ok(Trait(relation.relate(a, b)?)), diff --git a/src/test/ui/issues/issue-59326.rs b/src/test/ui/issues/issue-59326.rs new file mode 100644 index 0000000000000..c0e8837749eb4 --- /dev/null +++ b/src/test/ui/issues/issue-59326.rs @@ -0,0 +1,26 @@ +// check-pass +trait Service { + type S; +} + +trait Framing { + type F; +} + +impl Framing for () { + type F = (); +} + +trait HttpService: Service {} + +type BoxService = Box>; + +fn build_server BoxService>(_: F) {} + +fn make_server() -> Box> { + unimplemented!() +} + +fn main() { + build_server(|| make_server()) +} From 2804236b465806acd9ac72d33679e88d3af6f360 Mon Sep 17 00:00:00 2001 From: Vadim Petrochenkov Date: Mon, 22 Jun 2020 00:40:11 +0300 Subject: [PATCH 2/4] rustc_lexer: Simplify shebang parsing once more --- src/librustc_lexer/src/lib.rs | 177 +++++++------------- src/test/ui/parser/shebang/shebang-empty.rs | 4 + src/test/ui/parser/shebang/shebang-space.rs | 5 + src/test/ui/shebang.rs | 5 - 4 files changed, 66 insertions(+), 125 deletions(-) create mode 100644 src/test/ui/parser/shebang/shebang-empty.rs create mode 100644 src/test/ui/parser/shebang/shebang-space.rs delete mode 100644 src/test/ui/shebang.rs diff --git a/src/librustc_lexer/src/lib.rs b/src/librustc_lexer/src/lib.rs index c2139d07f378a..77b3d26463dfe 100644 --- a/src/librustc_lexer/src/lib.rs +++ b/src/librustc_lexer/src/lib.rs @@ -29,7 +29,7 @@ mod tests; use self::LiteralKind::*; use self::TokenKind::*; use crate::cursor::{Cursor, EOF_CHAR}; -use std::convert::TryInto; +use std::convert::TryFrom; /// Parsed token. /// It doesn't contain information about data that has been parsed, @@ -142,84 +142,24 @@ pub enum LiteralKind { /// "b"abc"", "b"abc" ByteStr { terminated: bool }, /// "r"abc"", "r#"abc"#", "r####"ab"###"c"####", "r#"a" - RawStr(UnvalidatedRawStr), + RawStr { n_hashes: u16, err: Option }, /// "br"abc"", "br#"abc"#", "br####"ab"###"c"####", "br#"a" - RawByteStr(UnvalidatedRawStr), -} - -/// Represents something that looks like a raw string, but may have some -/// problems. Use `.validate()` to convert it into something -/// usable. -#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub struct UnvalidatedRawStr { - /// The prefix (`r###"`) is valid - valid_start: bool, - - /// The postfix (`"###`) is valid - valid_end: bool, - - /// The number of leading `#` - n_start_hashes: usize, - /// The number of trailing `#`. `n_end_hashes` <= `n_start_hashes` - n_end_hashes: usize, - /// The offset starting at `r` or `br` where the user may have intended to end the string. - /// Currently, it is the longest sequence of pattern `"#+"`. - possible_terminator_offset: Option, + RawByteStr { n_hashes: u16, err: Option }, } /// Error produced validating a raw string. Represents cases like: -/// - `r##~"abcde"##`: `LexRawStrError::InvalidStarter` -/// - `r###"abcde"##`: `LexRawStrError::NoTerminator { expected: 3, found: 2, possible_terminator_offset: Some(11)` -/// - Too many `#`s (>65536): `TooManyDelimiters` +/// - `r##~"abcde"##`: `InvalidStarter` +/// - `r###"abcde"##`: `NoTerminator { expected: 3, found: 2, possible_terminator_offset: Some(11)` +/// - Too many `#`s (>65535): `TooManyDelimiters` #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub enum LexRawStrError { +pub enum RawStrError { /// Non `#` characters exist between `r` and `"` eg. `r#~"..` - InvalidStarter, + InvalidStarter { bad_char: char }, /// The string was never terminated. `possible_terminator_offset` is the number of characters after `r` or `br` where they /// may have intended to terminate it. NoTerminator { expected: usize, found: usize, possible_terminator_offset: Option }, - /// More than 65536 `#`s exist. - TooManyDelimiters, -} - -/// Raw String that contains a valid prefix (`#+"`) and postfix (`"#+`) where -/// there are a matching number of `#` characters in both. Note that this will -/// not consume extra trailing `#` characters: `r###"abcde"####` is lexed as a -/// `ValidatedRawString { n_hashes: 3 }` followed by a `#` token. -#[derive(Debug, Eq, PartialEq, Copy, Clone)] -pub struct ValidatedRawStr { - n_hashes: u16, -} - -impl ValidatedRawStr { - pub fn num_hashes(&self) -> u16 { - self.n_hashes - } -} - -impl UnvalidatedRawStr { - pub fn validate(self) -> Result { - if !self.valid_start { - return Err(LexRawStrError::InvalidStarter); - } - - // Only up to 65535 `#`s are allowed in raw strings - let n_start_safe: u16 = - self.n_start_hashes.try_into().map_err(|_| LexRawStrError::TooManyDelimiters)?; - - if self.n_start_hashes > self.n_end_hashes || !self.valid_end { - Err(LexRawStrError::NoTerminator { - expected: self.n_start_hashes, - found: self.n_end_hashes, - possible_terminator_offset: self.possible_terminator_offset, - }) - } else { - // Since the lexer should never produce a literal with n_end > n_start, if n_start <= n_end, - // they must be equal. - debug_assert_eq!(self.n_start_hashes, self.n_end_hashes); - Ok(ValidatedRawStr { n_hashes: n_start_safe }) - } - } + /// More than 65535 `#`s exist. + TooManyDelimiters { found: usize }, } /// Base of numeric literal encoding according to its prefix. @@ -239,21 +179,18 @@ pub enum Base { /// but shebang isn't a part of rust syntax. pub fn strip_shebang(input: &str) -> Option { // Shebang must start with `#!` literally, without any preceding whitespace. - if input.starts_with("#!") { - let input_tail = &input[2..]; - // Shebang must have something non-whitespace after `#!` on the first line. - let first_line_tail = input_tail.lines().next()?; - if first_line_tail.contains(|c| !is_whitespace(c)) { - // Ok, this is a shebang but if the next non-whitespace token is `[` or maybe - // a doc comment (due to `TokenKind::(Line,Block)Comment` ambiguity at lexer level), - // then it may be valid Rust code, so consider it Rust code. - let next_non_whitespace_token = tokenize(input_tail).map(|tok| tok.kind).filter(|tok| - !matches!(tok, TokenKind::Whitespace | TokenKind::LineComment | TokenKind::BlockComment { .. }) - ).next(); - if next_non_whitespace_token != Some(TokenKind::OpenBracket) { - // No other choice than to consider this a shebang. - return Some(2 + first_line_tail.len()); - } + // For simplicity we consider any line starting with `#!` a shebang, + // regardless of restrictions put on shebangs by specific platforms. + if let Some(input_tail) = input.strip_prefix("#!") { + // Ok, this is a shebang but if the next non-whitespace token is `[` or maybe + // a doc comment (due to `TokenKind::(Line,Block)Comment` ambiguity at lexer level), + // then it may be valid Rust code, so consider it Rust code. + let next_non_whitespace_token = tokenize(input_tail).map(|tok| tok.kind).find(|tok| + !matches!(tok, TokenKind::Whitespace | TokenKind::LineComment | TokenKind::BlockComment { .. }) + ); + if next_non_whitespace_token != Some(TokenKind::OpenBracket) { + // No other choice than to consider this a shebang. + return Some(2 + input_tail.lines().next().unwrap_or_default().len()); } } None @@ -354,12 +291,12 @@ impl Cursor<'_> { 'r' => match (self.first(), self.second()) { ('#', c1) if is_id_start(c1) => self.raw_ident(), ('#', _) | ('"', _) => { - let raw_str_i = self.raw_double_quoted_string(1); + let (n_hashes, err) = self.raw_double_quoted_string(1); let suffix_start = self.len_consumed(); - if raw_str_i.n_end_hashes == raw_str_i.n_start_hashes { + if err.is_none() { self.eat_literal_suffix(); } - let kind = RawStr(raw_str_i); + let kind = RawStr { n_hashes, err }; Literal { kind, suffix_start } } _ => self.ident(), @@ -389,14 +326,12 @@ impl Cursor<'_> { } ('r', '"') | ('r', '#') => { self.bump(); - let raw_str_i = self.raw_double_quoted_string(2); + let (n_hashes, err) = self.raw_double_quoted_string(2); let suffix_start = self.len_consumed(); - let terminated = raw_str_i.n_start_hashes == raw_str_i.n_end_hashes; - if terminated { + if err.is_none() { self.eat_literal_suffix(); } - - let kind = RawByteStr(raw_str_i); + let kind = RawByteStr { n_hashes, err }; Literal { kind, suffix_start } } _ => self.ident(), @@ -692,27 +627,34 @@ impl Cursor<'_> { false } - /// Eats the double-quoted string and returns an `UnvalidatedRawStr`. - fn raw_double_quoted_string(&mut self, prefix_len: usize) -> UnvalidatedRawStr { + /// Eats the double-quoted string and returns `n_hashes` and an error if encountered. + fn raw_double_quoted_string(&mut self, prefix_len: usize) -> (u16, Option) { + // Wrap the actual function to handle the error with too many hashes. + // This way, it eats the whole raw string. + let (n_hashes, err) = self.raw_string_unvalidated(prefix_len); + // Only up to 65535 `#`s are allowed in raw strings + match u16::try_from(n_hashes) { + Ok(num) => (num, err), + // We lie about the number of hashes here :P + Err(_) => (0, Some(RawStrError::TooManyDelimiters { found: n_hashes })), + } + } + + fn raw_string_unvalidated(&mut self, prefix_len: usize) -> (usize, Option) { debug_assert!(self.prev() == 'r'); - let mut valid_start: bool = false; let start_pos = self.len_consumed(); - let (mut possible_terminator_offset, mut max_hashes) = (None, 0); + let mut possible_terminator_offset = None; + let mut max_hashes = 0; // Count opening '#' symbols. let n_start_hashes = self.eat_while(|c| c == '#'); // Check that string is started. match self.bump() { - Some('"') => valid_start = true, - _ => { - return UnvalidatedRawStr { - valid_start, - valid_end: false, - n_start_hashes, - n_end_hashes: 0, - possible_terminator_offset, - }; + Some('"') => (), + c => { + let c = c.unwrap_or(EOF_CHAR); + return (n_start_hashes, Some(RawStrError::InvalidStarter { bad_char: c })); } } @@ -722,13 +664,14 @@ impl Cursor<'_> { self.eat_while(|c| c != '"'); if self.is_eof() { - return UnvalidatedRawStr { - valid_start, - valid_end: false, + return ( n_start_hashes, - n_end_hashes: max_hashes, - possible_terminator_offset, - }; + Some(RawStrError::NoTerminator { + expected: n_start_hashes, + found: max_hashes, + possible_terminator_offset, + }), + ); } // Eat closing double quote. @@ -737,7 +680,7 @@ impl Cursor<'_> { // Check that amount of closing '#' symbols // is equal to the amount of opening ones. // Note that this will not consume extra trailing `#` characters: - // `r###"abcde"####` is lexed as a `LexedRawString { n_hashes: 3 }` + // `r###"abcde"####` is lexed as a `RawStr { n_hashes: 3 }` // followed by a `#` token. let mut hashes_left = n_start_hashes; let is_closing_hash = |c| { @@ -751,13 +694,7 @@ impl Cursor<'_> { let n_end_hashes = self.eat_while(is_closing_hash); if n_end_hashes == n_start_hashes { - return UnvalidatedRawStr { - valid_start, - valid_end: true, - n_start_hashes, - n_end_hashes, - possible_terminator_offset: None, - }; + return (n_start_hashes, None); } else if n_end_hashes > max_hashes { // Keep track of possible terminators to give a hint about // where there might be a missing terminator diff --git a/src/test/ui/parser/shebang/shebang-empty.rs b/src/test/ui/parser/shebang/shebang-empty.rs new file mode 100644 index 0000000000000..e38cc637e945e --- /dev/null +++ b/src/test/ui/parser/shebang/shebang-empty.rs @@ -0,0 +1,4 @@ +#! + +// check-pass +fn main() {} diff --git a/src/test/ui/parser/shebang/shebang-space.rs b/src/test/ui/parser/shebang/shebang-space.rs new file mode 100644 index 0000000000000..0978b759d2a6e --- /dev/null +++ b/src/test/ui/parser/shebang/shebang-space.rs @@ -0,0 +1,5 @@ +#! + +// check-pass +// ignore-tidy-end-whitespace +fn main() {} diff --git a/src/test/ui/shebang.rs b/src/test/ui/shebang.rs deleted file mode 100644 index 3d3ba468be955..0000000000000 --- a/src/test/ui/shebang.rs +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env rustx - -// run-pass - -pub fn main() { println!("Hello World"); } From 9785a60290a037da077f15fd5a8a49aacde35cbb Mon Sep 17 00:00:00 2001 From: Oliver Middleton Date: Tue, 23 Jun 2020 09:18:51 +0100 Subject: [PATCH 3/4] rustdoc: Fix doc aliases with crate filtering Fix a crash when searching for an alias contained in the currently selected filter crate. Also remove alias search results for crates that should be filtered out. The test suite needed to be fixed to actually take into account the crate filtering and check that there are no results when none are expected. --- src/librustdoc/html/static/main.js | 13 +++++++------ src/test/rustdoc-js/doc-alias-filter-out.js | 9 +++++++++ src/test/rustdoc-js/doc-alias-filter-out.rs | 4 ++++ src/test/rustdoc-js/doc-alias-filter.js | 17 +++++++++++++++++ src/test/rustdoc-js/doc-alias-filter.rs | 7 +++++++ src/tools/rustdoc-js/tester.js | 13 +++++++++++-- 6 files changed, 55 insertions(+), 8 deletions(-) create mode 100644 src/test/rustdoc-js/doc-alias-filter-out.js create mode 100644 src/test/rustdoc-js/doc-alias-filter-out.rs create mode 100644 src/test/rustdoc-js/doc-alias-filter.js create mode 100644 src/test/rustdoc-js/doc-alias-filter.rs diff --git a/src/librustdoc/html/static/main.js b/src/librustdoc/html/static/main.js index ac5a2f96b26c6..596c19fb0a057 100644 --- a/src/librustdoc/html/static/main.js +++ b/src/librustdoc/html/static/main.js @@ -1006,12 +1006,13 @@ function defocusSearchBar() { var aliases = []; var crateAliases = []; var i; - if (filterCrates !== undefined && - ALIASES[filterCrates] && - ALIASES[filterCrates][query.search]) { - for (i = 0; i < ALIASES[crate][query.search].length; ++i) { - aliases.push( - createAliasFromItem(searchIndex[ALIASES[filterCrates][query.search]])); + if (filterCrates !== undefined) { + if (ALIASES[filterCrates] && ALIASES[filterCrates][query.search]) { + for (i = 0; i < ALIASES[filterCrates][query.search].length; ++i) { + aliases.push( + createAliasFromItem( + searchIndex[ALIASES[filterCrates][query.search][i]])); + } } } else { Object.keys(ALIASES).forEach(function(crate) { diff --git a/src/test/rustdoc-js/doc-alias-filter-out.js b/src/test/rustdoc-js/doc-alias-filter-out.js new file mode 100644 index 0000000000000..46a089d06ebef --- /dev/null +++ b/src/test/rustdoc-js/doc-alias-filter-out.js @@ -0,0 +1,9 @@ +// exact-check + +const QUERY = 'true'; + +const FILTER_CRATE = 'some_other_crate'; + +const EXPECTED = { + 'others': [], +}; diff --git a/src/test/rustdoc-js/doc-alias-filter-out.rs b/src/test/rustdoc-js/doc-alias-filter-out.rs new file mode 100644 index 0000000000000..815e8cedd16da --- /dev/null +++ b/src/test/rustdoc-js/doc-alias-filter-out.rs @@ -0,0 +1,4 @@ +#![feature(doc_alias)] + +#[doc(alias = "true")] +pub struct Foo; diff --git a/src/test/rustdoc-js/doc-alias-filter.js b/src/test/rustdoc-js/doc-alias-filter.js new file mode 100644 index 0000000000000..4b1e2e2970479 --- /dev/null +++ b/src/test/rustdoc-js/doc-alias-filter.js @@ -0,0 +1,17 @@ +// exact-check + +const QUERY = 'true'; + +const FILTER_CRATE = 'doc_alias_filter'; + +const EXPECTED = { + 'others': [ + { + 'path': 'doc_alias_filter', + 'name': 'Foo', + 'alias': 'true', + 'href': '../doc_alias_filter/struct.Foo.html', + 'is_alias': true + }, + ], +}; diff --git a/src/test/rustdoc-js/doc-alias-filter.rs b/src/test/rustdoc-js/doc-alias-filter.rs new file mode 100644 index 0000000000000..8887f8c2b0149 --- /dev/null +++ b/src/test/rustdoc-js/doc-alias-filter.rs @@ -0,0 +1,7 @@ +#![feature(doc_alias)] + +#[doc(alias = "true")] +pub struct Foo; + +#[doc(alias = "false")] +pub struct Bar; diff --git a/src/tools/rustdoc-js/tester.js b/src/tools/rustdoc-js/tester.js index 163571bc5b988..139e6f73f4216 100644 --- a/src/tools/rustdoc-js/tester.js +++ b/src/tools/rustdoc-js/tester.js @@ -269,6 +269,12 @@ function runSearch(query, expected, index, loaded, loadedFile, queryName) { break; } var entry = expected[key]; + + if (exact_check == true && entry.length !== results[key].length) { + error_text.push(queryName + "==> Expected exactly " + entry.length + + " results but found " + results[key].length + " in '" + key + "'"); + } + var prev_pos = -1; for (var i = 0; i < entry.length; ++i) { var entry_pos = lookForEntry(entry[i], results[key]); @@ -307,8 +313,11 @@ function checkResult(error_text, loadedFile, displaySuccess) { } function runChecks(testFile, loaded, index) { - var loadedFile = loadContent( - readFile(testFile) + 'exports.QUERY = QUERY;exports.EXPECTED = EXPECTED;'); + var testFileContent = readFile(testFile) + 'exports.QUERY = QUERY;exports.EXPECTED = EXPECTED;'; + if (testFileContent.indexOf("FILTER_CRATE") !== -1) { + testFileContent += "exports.FILTER_CRATE = FILTER_CRATE;"; + } + var loadedFile = loadContent(testFileContent); const expected = loadedFile.EXPECTED; const query = loadedFile.QUERY; From 250af0a589c1faa8acbe70076ccc5d34a552d4b3 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Mon, 8 Jun 2020 09:02:57 -0700 Subject: [PATCH 4/4] Change how compiler-builtins gets many CGUs This commit intends to fix an accidental regression from #70846. The goal of #70846 was to build compiler-builtins with a maximal number of CGUs to ensure that each module in the source corresponds to an object file. This high degree of control for compiler-builtins is desirable to ensure that there's at most one exported symbol per CGU, ideally enabling compiler-builtins to not conflict with the system libgcc as often. In #70846, however, only part of the compiler understands that compiler-builtins is built with many CGUs. The rest of the compiler thinks it's building with `sess.codegen_units()`. Notably the calculation of `sess.lto()` consults `sess.codegen_units()`, which when there's only one CGU it disables ThinLTO. This means that compiler-builtins is built without ThinLTO, which is quite harmful to performance! This is the root of the cause from #73135 where intrinsics were found to not be inlining trivial functions. The fix applied in this commit is to remove the special-casing of compiler-builtins in the compiler. Instead the build system is now responsible for special-casing compiler-builtins. It doesn't know exactly how many CGUs will be needed but it passes a large number that is assumed to be much greater than the number of source-level modules needed. After reading the various locations in the compiler source, this seemed like the best solution rather than adding more and more special casing in the compiler for compiler-builtins. Closes #73135 --- Cargo.toml | 13 ++++++ src/librustc_mir/monomorphize/partitioning.rs | 9 +---- .../partitioning/compiler-builtins.rs | 40 ------------------- 3 files changed, 14 insertions(+), 48 deletions(-) delete mode 100644 src/test/codegen-units/partitioning/compiler-builtins.rs diff --git a/Cargo.toml b/Cargo.toml index f2177a99a9b88..f10d539d8296b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,6 +42,19 @@ debug-assertions = false debug = false debug-assertions = false +[profile.release.package.compiler_builtins] +# For compiler-builtins we always use a high number of codegen units. +# The goal here is to place every single intrinsic into its own object +# file to avoid symbol clashes with the system libgcc if possible. Note +# that this number doesn't actually produce this many object files, we +# just don't create more than this number of object files. +# +# It's a bit of a bummer that we have to pass this here, unfortunately. +# Ideally this would be specified through an env var to Cargo so Cargo +# knows how many CGUs are for this specific crate, but for now +# per-crate configuration isn't specifiable in the environment. +codegen-units = 10000 + # We want the RLS to use the version of Cargo that we've got vendored in this # repository to ensure that the same exact version of Cargo is used by both the # RLS and the Cargo binary itself. The RLS depends on Cargo as a git repository diff --git a/src/librustc_mir/monomorphize/partitioning.rs b/src/librustc_mir/monomorphize/partitioning.rs index db1ea72c0a531..a945c1d626a9a 100644 --- a/src/librustc_mir/monomorphize/partitioning.rs +++ b/src/librustc_mir/monomorphize/partitioning.rs @@ -454,18 +454,11 @@ fn default_visibility(tcx: TyCtxt<'_>, id: DefId, is_generic: bool) -> Visibilit fn merge_codegen_units<'tcx>( tcx: TyCtxt<'tcx>, initial_partitioning: &mut PreInliningPartitioning<'tcx>, - mut target_cgu_count: usize, + target_cgu_count: usize, ) { assert!(target_cgu_count >= 1); let codegen_units = &mut initial_partitioning.codegen_units; - if tcx.is_compiler_builtins(LOCAL_CRATE) { - // Compiler builtins require some degree of control over how mono items - // are partitioned into compilation units. Provide it by keeping the - // original partitioning when compiling the compiler builtins crate. - target_cgu_count = codegen_units.len(); - } - // Note that at this point in time the `codegen_units` here may not be in a // deterministic order (but we know they're deterministically the same set). // We want this merging to produce a deterministic ordering of codegen units diff --git a/src/test/codegen-units/partitioning/compiler-builtins.rs b/src/test/codegen-units/partitioning/compiler-builtins.rs deleted file mode 100644 index 25195743b0400..0000000000000 --- a/src/test/codegen-units/partitioning/compiler-builtins.rs +++ /dev/null @@ -1,40 +0,0 @@ -// Verifies that during compiler_builtins compilation the codegen units are kept -// unmerged. Even when only a single codegen unit is requested with -Ccodegen-units=1. -// -// compile-flags: -Zprint-mono-items=eager -Ccodegen-units=1 - -#![compiler_builtins] -#![crate_type="lib"] -#![feature(compiler_builtins)] - -mod atomics { - //~ MONO_ITEM fn compiler_builtins::atomics[0]::sync_1[0] @@ compiler_builtins-cgu.0[External] - #[no_mangle] - pub extern "C" fn sync_1() {} - - //~ MONO_ITEM fn compiler_builtins::atomics[0]::sync_2[0] @@ compiler_builtins-cgu.0[External] - #[no_mangle] - pub extern "C" fn sync_2() {} - - //~ MONO_ITEM fn compiler_builtins::atomics[0]::sync_3[0] @@ compiler_builtins-cgu.0[External] - #[no_mangle] - pub extern "C" fn sync_3() {} -} - -mod x { - //~ MONO_ITEM fn compiler_builtins::x[0]::x[0] @@ compiler_builtins-cgu.1[External] - #[no_mangle] - pub extern "C" fn x() {} -} - -mod y { - //~ MONO_ITEM fn compiler_builtins::y[0]::y[0] @@ compiler_builtins-cgu.2[External] - #[no_mangle] - pub extern "C" fn y() {} -} - -mod z { - //~ MONO_ITEM fn compiler_builtins::z[0]::z[0] @@ compiler_builtins-cgu.3[External] - #[no_mangle] - pub extern "C" fn z() {} -}