diff --git a/src/bin/cargo/commands/owner.rs b/src/bin/cargo/commands/owner.rs index e2c672667b4..e9d5d85213b 100644 --- a/src/bin/cargo/commands/owner.rs +++ b/src/bin/cargo/commands/owner.rs @@ -7,7 +7,14 @@ pub fn cli() -> App { .about("Manage the owners of a crate on the registry") .arg(opt("quiet", "No output printed to stdout").short("q")) .arg(Arg::with_name("crate")) - .arg(multi_opt("add", "LOGIN", "Name of a user or team to invite as an owner").short("a")) + .arg( + multi_opt( + "add", + "LOGIN", + "Name of a user or team to invite as an owner", + ) + .short("a"), + ) .arg( multi_opt( "remove", diff --git a/src/bin/cargo/commands/test.rs b/src/bin/cargo/commands/test.rs index 84d26502428..06ef20d849c 100644 --- a/src/bin/cargo/commands/test.rs +++ b/src/bin/cargo/commands/test.rs @@ -19,8 +19,11 @@ pub fn cli() -> App { .last(true), ) .arg( - opt("quiet", "Display one character per test instead of one line") - .short("q") + opt( + "quiet", + "Display one character per test instead of one line", + ) + .short("q"), ) .arg_targets_all( "Test only this package's library unit tests", @@ -131,9 +134,9 @@ pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { } else if test_name.is_some() { if let CompileFilter::Default { .. } = compile_opts.filter { compile_opts.filter = ops::CompileFilter::new( - LibRule::Default, // compile the library, so the unit tests can be run filtered - FilterRule::All, // compile the binaries, so the unit tests in binaries can be run filtered - FilterRule::All, // compile the tests, so the integration tests can be run filtered + LibRule::Default, // compile the library, so the unit tests can be run filtered + FilterRule::All, // compile the binaries, so the unit tests in binaries can be run filtered + FilterRule::All, // compile the tests, so the integration tests can be run filtered FilterRule::none(), // specify --examples to unit test binaries filtered FilterRule::none(), // specify --benches to unit test benchmarks filtered ); // also, specify --doc to run doc tests filtered diff --git a/src/bin/cargo/commands/version.rs b/src/bin/cargo/commands/version.rs index 2e55ab83687..81c6838e7ab 100644 --- a/src/bin/cargo/commands/version.rs +++ b/src/bin/cargo/commands/version.rs @@ -3,7 +3,8 @@ use crate::command_prelude::*; use crate::cli; pub fn cli() -> App { - subcommand("version").about("Show version information") + subcommand("version") + .about("Show version information") .arg(opt("quiet", "No output printed to stdout").short("q")) } diff --git a/src/cargo/core/compiler/build_context/mod.rs b/src/cargo/core/compiler/build_context/mod.rs index e216bfa800d..febdc65f4b5 100644 --- a/src/cargo/core/compiler/build_context/mod.rs +++ b/src/cargo/core/compiler/build_context/mod.rs @@ -265,9 +265,7 @@ impl TargetConfig { } "rustc-cdylib-link-arg" => { let args = value.list(k)?; - output - .linker_args - .extend(args.iter().map(|v| v.0.clone())); + output.linker_args.extend(args.iter().map(|v| v.0.clone())); } "rustc-cfg" => { let list = value.list(k)?; diff --git a/src/cargo/core/compiler/context/compilation_files.rs b/src/cargo/core/compiler/context/compilation_files.rs index 309c35e439b..a8bc75d40bb 100644 --- a/src/cargo/core/compiler/context/compilation_files.rs +++ b/src/cargo/core/compiler/context/compilation_files.rs @@ -166,8 +166,13 @@ impl<'a, 'cfg: 'a> CompilationFiles<'a, 'cfg> { } } - /// Returns the root of the build output tree. + /// Returns the root of the build output tree for the target pub fn target_root(&self) -> &Path { + self.target.as_ref().unwrap_or(&self.host).dest() + } + + /// Returns the root of the build output tree for the host + pub fn host_root(&self) -> &Path { self.host.dest() } diff --git a/src/cargo/core/compiler/context/mod.rs b/src/cargo/core/compiler/context/mod.rs index 67e22d5ec83..b0ae71e299a 100644 --- a/src/cargo/core/compiler/context/mod.rs +++ b/src/cargo/core/compiler/context/mod.rs @@ -430,9 +430,10 @@ impl<'a, 'cfg> Context<'a, 'cfg> { path.display() ) }; - let suggestion = "Consider changing their names to be unique or compiling them separately.\n\ - This may become a hard error in the future; see \ - ."; + let suggestion = + "Consider changing their names to be unique or compiling them separately.\n\ + This may become a hard error in the future; see \ + ."; let report_collision = |unit: &Unit<'_>, other_unit: &Unit<'_>, path: &PathBuf| diff --git a/src/cargo/core/compiler/custom_build.rs b/src/cargo/core/compiler/custom_build.rs index 7516812842c..8f04085491f 100644 --- a/src/cargo/core/compiler/custom_build.rs +++ b/src/cargo/core/compiler/custom_build.rs @@ -8,10 +8,10 @@ use std::sync::{Arc, Mutex}; use crate::core::PackageId; use crate::util::errors::{CargoResult, CargoResultExt}; use crate::util::machine_message; +use crate::util::Cfg; use crate::util::{self, internal, paths, profile}; -use crate::util::{Cfg, Freshness}; -use super::job::Work; +use super::job::{Freshness, Job, Work}; use super::{fingerprint, Context, Kind, TargetConfig, Unit}; /// Contains the parsed output of a custom build script. @@ -80,10 +80,7 @@ pub struct BuildDeps { /// prepare work for. If the requirement is specified as both the target and the /// host platforms it is assumed that the two are equal and the build script is /// only run once (not twice). -pub fn prepare<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - unit: &Unit<'a>, -) -> CargoResult<(Work, Work, Freshness)> { +pub fn prepare<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult { let _p = profile::start(format!( "build script prepare: {}/{}", unit.pkg, @@ -91,21 +88,11 @@ pub fn prepare<'a, 'cfg>( )); let key = (unit.pkg.package_id(), unit.kind); - let overridden = cx.build_script_overridden.contains(&key); - let (work_dirty, work_fresh) = if overridden { - (Work::noop(), Work::noop()) - } else { - build_work(cx, unit)? - }; - if cx.bcx.build_config.build_plan { - Ok((work_dirty, work_fresh, Freshness::Dirty)) + if cx.build_script_overridden.contains(&key) { + fingerprint::prepare_target(cx, unit, false) } else { - // Now that we've prep'd our work, build the work needed to manage the - // fingerprint and then start returning that upwards. - let (freshness, dirty, fresh) = fingerprint::prepare_build_cmd(cx, unit)?; - - Ok((work_dirty.then(dirty), work_fresh.then(fresh), freshness)) + build_work(cx, unit) } } @@ -125,7 +112,7 @@ fn emit_build_output(output: &BuildOutput, package_id: PackageId) { }); } -fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<(Work, Work)> { +fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult { assert!(unit.mode.is_run_custom_build()); let bcx = &cx.bcx; let dependencies = cx.dep_targets(unit); @@ -248,7 +235,7 @@ fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoRes let output_file = script_run_dir.join("output"); let err_file = script_run_dir.join("stderr"); let root_output_file = script_run_dir.join("root-output"); - let host_target_root = cx.files().target_root().to_path_buf(); + let host_target_root = cx.files().host_root().to_path_buf(); let all = ( id, pkg_name.clone(), @@ -260,22 +247,7 @@ fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoRes let kind = unit.kind; let json_messages = bcx.build_config.json_messages(); let extra_verbose = bcx.config.extra_verbose(); - - // Check to see if the build script has already run, and if it has, keep - // track of whether it has told us about some explicit dependencies. - let prev_script_out_dir = paths::read_bytes(&root_output_file) - .and_then(|bytes| util::bytes2path(&bytes)) - .unwrap_or_else(|_| script_out_dir.clone()); - - let prev_output = BuildOutput::parse_file( - &output_file, - &pkg_name, - &prev_script_out_dir, - &script_out_dir, - ) - .ok(); - let deps = BuildDeps::new(&output_file, prev_output.as_ref()); - cx.build_explicit_deps.insert(*unit, deps); + let (prev_output, prev_script_out_dir) = prev_build_output(cx, unit); fs::create_dir_all(&script_dir)?; fs::create_dir_all(&script_out_dir)?; @@ -392,7 +364,17 @@ fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoRes Ok(()) }); - Ok((dirty, fresh)) + let mut job = if cx.bcx.build_config.build_plan { + Job::new(Work::noop(), Freshness::Dirty) + } else { + fingerprint::prepare_target(cx, unit, false)? + }; + if job.freshness() == Freshness::Dirty { + job.before(dirty); + } else { + job.before(fresh); + } + Ok(job) } impl BuildState { @@ -637,22 +619,20 @@ pub fn build_map<'b, 'cfg>(cx: &mut Context<'b, 'cfg>, units: &[Unit<'b>]) -> Ca return Ok(&out[unit]); } - { - let key = unit - .pkg - .manifest() - .links() - .map(|l| (l.to_string(), unit.kind)); - let build_state = &cx.build_state; - if let Some(output) = key.and_then(|k| build_state.overrides.get(&k)) { - let key = (unit.pkg.package_id(), unit.kind); - cx.build_script_overridden.insert(key); - build_state - .outputs - .lock() - .unwrap() - .insert(key, output.clone()); - } + let key = unit + .pkg + .manifest() + .links() + .map(|l| (l.to_string(), unit.kind)); + let build_state = &cx.build_state; + if let Some(output) = key.and_then(|k| build_state.overrides.get(&k)) { + let key = (unit.pkg.package_id(), unit.kind); + cx.build_script_overridden.insert(key); + build_state + .outputs + .lock() + .unwrap() + .insert(key, output.clone()); } let mut ret = BuildScripts::default(); @@ -661,6 +641,10 @@ pub fn build_map<'b, 'cfg>(cx: &mut Context<'b, 'cfg>, units: &[Unit<'b>]) -> Ca add_to_link(&mut ret, unit.pkg.package_id(), unit.kind); } + if unit.mode.is_run_custom_build() { + parse_previous_explicit_deps(cx, unit)?; + } + // We want to invoke the compiler deterministically to be cache-friendly // to rustc invocation caching schemes, so be sure to generate the same // set of build script dependency orderings via sorting the targets that @@ -694,4 +678,46 @@ pub fn build_map<'b, 'cfg>(cx: &mut Context<'b, 'cfg>, units: &[Unit<'b>]) -> Ca scripts.to_link.push((pkg, kind)); } } + + fn parse_previous_explicit_deps<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, + ) -> CargoResult<()> { + let script_run_dir = cx.files().build_script_run_dir(unit); + let output_file = script_run_dir.join("output"); + let (prev_output, _) = prev_build_output(cx, unit); + let deps = BuildDeps::new(&output_file, prev_output.as_ref()); + cx.build_explicit_deps.insert(*unit, deps); + Ok(()) + } +} + +/// Returns the previous parsed `BuildOutput`, if any, from a previous +/// execution. +/// +/// Also returns the directory containing the output, typically used later in +/// processing. +fn prev_build_output<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, +) -> (Option, PathBuf) { + let script_out_dir = cx.files().build_script_out_dir(unit); + let script_run_dir = cx.files().build_script_run_dir(unit); + let root_output_file = script_run_dir.join("root-output"); + let output_file = script_run_dir.join("output"); + + let prev_script_out_dir = paths::read_bytes(&root_output_file) + .and_then(|bytes| util::bytes2path(&bytes)) + .unwrap_or_else(|_| script_out_dir.clone()); + + ( + BuildOutput::parse_file( + &output_file, + &unit.pkg.to_string(), + &prev_script_out_dir, + &script_out_dir, + ) + .ok(), + prev_script_out_dir, + ) } diff --git a/src/cargo/core/compiler/fingerprint.rs b/src/cargo/core/compiler/fingerprint.rs index a01723c4498..2586a06a295 100644 --- a/src/cargo/core/compiler/fingerprint.rs +++ b/src/cargo/core/compiler/fingerprint.rs @@ -14,13 +14,14 @@ //! Unit. If any of the inputs changes from the last compilation, then the //! Unit is considered dirty. A missing fingerprint (such as during the //! first build) is also considered dirty. -//! - Dirty propagation is done in the `JobQueue`. When a Unit is dirty, the -//! `JobQueue` automatically treats anything that depends on it as dirty. -//! Anything that relies on this is probably a bug. The fingerprint should -//! always be complete (but there are some known limitations). This is a -//! problem because not all Units are built all at once. If two separate -//! `cargo` commands are run that build different Units, this dirty -//! propagation will not work across commands. +//! - Whether or not input files are actually present. For example a build +//! script which says it depends on a nonexistent file `foo` is always rerun. +//! - Propagation throughout the dependency graph of file modification time +//! information, used to detect changes on the filesystem. Each `Fingerprint` +//! keeps track of what files it'll be processing, and when necessary it will +//! check the `mtime` of each file (last modification time) and compare it to +//! dependencies and output to see if files have been changed or if a change +//! needs to force recompiles of downstream dependencies. //! //! Note: Fingerprinting is not a perfect solution. Filesystem mtime tracking //! is notoriously imprecise and problematic. Only a small part of the @@ -97,10 +98,10 @@ //! //! After the list of Units has been calculated, the Units are added to the //! `JobQueue`. As each one is added, the fingerprint is calculated, and the -//! dirty/fresh status is recorded in the `JobQueue`. A closure is used to -//! update the fingerprint on-disk when the Unit successfully finishes. The -//! closure will recompute the Fingerprint based on the updated information. -//! If the Unit fails to compile, the fingerprint is not updated. +//! dirty/fresh status is recorded. A closure is used to update the fingerprint +//! on-disk when the Unit successfully finishes. The closure will recompute the +//! Fingerprint based on the updated information. If the Unit fails to compile, +//! the fingerprint is not updated. //! //! Fingerprints are cached in the `Context`. This makes computing //! Fingerprints faster, but also is necessary for properly updating @@ -108,13 +109,41 @@ //! all dependencies, when it is updated, by using `Arc` clones, it //! automatically picks up the updates to its dependencies. //! +//! ## Considerations for inclusion in a fingerprint +//! +//! Over time we've realized a few items which historically were included in +//! fingerprint hashings should not actually be included. Examples are: +//! +//! * Modification time values. We strive to never include a modification time +//! inside a `Fingerprint` to get hashed into an actual value. While +//! theoretically fine to do, in practice this causes issues with common +//! applications like Docker. Docker, after a layer is built, will zero out +//! the nanosecond part of all filesystem modification times. This means that +//! the actual modification time is different for all build artifacts, which +//! if we tracked the actual values of modification times would cause +//! unnecessary recompiles. To fix this we instead only track paths which are +//! relevant. These paths are checked dynamically to see if they're up to +//! date, and the modifiation time doesn't make its way into the fingerprint +//! hash. +//! +//! * Absolute path names. We strive to maintain a property where if you rename +//! a project directory Cargo will continue to preserve all build artifacts +//! and reuse the cache. This means that we can't ever hash an absolute path +//! name. Instead we always hash relative path names and the "root" is passed +//! in at runtime dynamically. Some of this is best effort, but the general +//! idea is that we assume all acceses within a crate stay within that +//! crate. +//! +//! These are pretty tricky to test for unfortunately, but we should have a good +//! test suite nowadays and lord knows Cargo gets enough testing in the wild! +//! //! ## Build scripts //! //! The *running* of a build script (`CompileMode::RunCustomBuild`) is treated //! significantly different than all other Unit kinds. It has its own function -//! for calculating the Fingerprint (`prepare_build_cmd`) and has some unique -//! considerations. It does not track the same information as a normal Unit. -//! The information tracked depends on the `rerun-if-changed` and +//! for calculating the Fingerprint (`calculate_run_custom_build`) and has some +//! unique considerations. It does not track the same information as a normal +//! Unit. The information tracked depends on the `rerun-if-changed` and //! `rerun-if-env-changed` statements produced by the build script. If the //! script does not emit either of these statements, the Fingerprint runs in //! "old style" mode where an mtime change of *any* file in the package will @@ -165,7 +194,7 @@ use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; use std::time::SystemTime; -use failure::bail; +use failure::{bail, format_err}; use filetime::FileTime; use log::{debug, info}; use serde::de; @@ -176,44 +205,34 @@ use crate::core::Package; use crate::util; use crate::util::errors::{CargoResult, CargoResultExt}; use crate::util::paths; -use crate::util::{internal, profile, Dirty, Fresh, Freshness}; +use crate::util::{internal, profile}; use super::custom_build::BuildDeps; -use super::job::Work; -use super::{BuildContext, Context, FileFlavor, Unit}; - -/// A tuple result of the `prepare_foo` functions in this module. +use super::job::{ + Freshness::{Dirty, Fresh}, + Job, Work, +}; +use super::{BuildContext, Context, FileFlavor, Kind, Unit}; + +/// Determines if a `unit` is up-to-date, and if not prepares necessary work to +/// update the persisted fingerprint. /// -/// The first element of the triple is whether the target in question is -/// currently fresh or not, and the second two elements are work to perform when -/// the target is dirty or fresh, respectively. +/// This function will inspect `unit`, calculate a fingerprint for it, and then +/// return an appropriate `Job` to run. The returned `Job` will be a noop if +/// `unit` is considered "fresh", or if it was previously built and cached. +/// Otherwise the `Job` returned will write out the true fingerprint to the +/// filesystem, to be executed after the unit's work has completed. /// -/// Both units of work are always generated because a fresh package may still be -/// rebuilt if some upstream dependency changes. -pub type Preparation = (Freshness, Work, Work); - -/// Prepare the necessary work for the fingerprint for a specific target. -/// -/// When dealing with fingerprints, cargo gets to choose what granularity -/// "freshness" is considered at. One option is considering freshness at the -/// package level. This means that if anything in a package changes, the entire -/// package is rebuilt, unconditionally. This simplicity comes at a cost, -/// however, in that test-only changes will cause libraries to be rebuilt, which -/// is quite unfortunate! -/// -/// The cost was deemed high enough that fingerprints are now calculated at the -/// layer of a target rather than a package. Each target can then be kept track -/// of separately and only rebuilt as necessary. This requires cargo to -/// understand what the inputs are to a target, so we drive rustc with the -/// --dep-info flag to learn about all input files to a unit of compilation. -/// -/// This function will calculate the fingerprint for a target and prepare the -/// work necessary to either write the fingerprint or copy over all fresh files -/// from the old directories to their new locations. +/// The `force` flag is a way to force the `Job` to be "dirty", or always +/// update the fingerprint. **Beware using this flag** because it does not +/// transitively propagate throughout the dependency graph, it only forces this +/// one unit which is very unlikely to be what you want unless you're +/// exclusively talking about top-level units. pub fn prepare_target<'a, 'cfg>( cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>, -) -> CargoResult { + force: bool, +) -> CargoResult { let _p = profile::start(format!( "fingerprint: {} / {}", unit.pkg.package_id(), @@ -225,6 +244,9 @@ pub fn prepare_target<'a, 'cfg>( debug!("fingerprint at: {}", loc.display()); + // Figure out if this unit is up to date. After calculating the fingerprint + // compare it to an old version, if any, and attempt to print diagnostic + // information about failed comparisons to aid in debugging. let fingerprint = calculate(cx, unit)?; let mtime_on_use = cx.bcx.config.cli_unstable().mtime_on_use; let compare = compare_old_fingerprint(&loc, &*fingerprint, mtime_on_use); @@ -249,64 +271,54 @@ pub fn prepare_target<'a, 'cfg>( source.verify(unit.pkg.package_id())?; } - let root = cx.files().out_dir(unit); - let missing_outputs = { - let t = FileTime::from_system_time(SystemTime::now()); - if unit.mode.is_doc() { - !root - .join(unit.target.crate_name()) - .join("index.html") - .exists() - } else { - match cx - .outputs(unit)? - .iter() - .filter(|output| output.flavor != FileFlavor::DebugInfo) - .find(|output| { - if output.path.exists() { - if mtime_on_use { - // update the mtime so other cleaners know we used it - let _ = filetime::set_file_times(&output.path, t, t); - } - false - } else { - true - } - }) { - None => false, - Some(output) => { - info!("missing output path {:?}", output.path); - true - } + if compare.is_ok() && !force { + return Ok(Job::new(Work::noop(), Fresh)); + } + + let write_fingerprint = if unit.mode.is_run_custom_build() { + // For build scripts the `local` field of the fingerprint may change + // while we're executing it. For example it could be in the legacy + // "consider everything a dependency mode" and then we switch to "deps + // are explicitly specified" mode. + // + // To handle this movement we need to regenerate the `local` field of a + // build script's fingerprint after it's executed. We do this by + // using the `build_script_local_fingerprints` function which returns a + // thunk we can invoke on a foreign thread to calculate this. + let state = Arc::clone(&cx.build_state); + let key = (unit.pkg.package_id(), unit.kind); + let (gen_local, _overridden) = build_script_local_fingerprints(cx, unit); + let output_path = cx.build_explicit_deps[unit].build_script_output.clone(); + Work::new(move |_| { + let outputs = state.outputs.lock().unwrap(); + let outputs = &outputs[&key]; + let deps = BuildDeps::new(&output_path, Some(outputs)); + + // FIXME: it's basically buggy that we pass `None` to `call_box` + // here. See documentation on `build_script_local_fingerprints` + // below for more information. Despite this just try to proceed and + // hobble along if it happens to return `Some`. + if let Some(new_local) = gen_local.call_box(&deps, None)? { + *fingerprint.local.lock().unwrap() = new_local; + *fingerprint.memoized_hash.lock().unwrap() = None; } - } + + write_fingerprint(&loc, &fingerprint) + }) + } else { + Work::new(move |_| write_fingerprint(&loc, &fingerprint)) }; - let allow_failure = bcx.extra_args_for(unit).is_some(); - let target_root = cx.files().target_root().to_path_buf(); - let write_fingerprint = Work::new(move |_| { - match fingerprint.update_local(&target_root) { - Ok(()) => {} - Err(..) if allow_failure => return Ok(()), - Err(e) => return Err(e), - } - write_fingerprint(&loc, &*fingerprint) - }); - - let fresh = compare.is_ok() && !missing_outputs; - Ok(( - if fresh { Fresh } else { Dirty }, - write_fingerprint, - Work::noop(), - )) + Ok(Job::new(write_fingerprint, Dirty)) } /// A compilation unit dependency has a fingerprint that is comprised of: /// * its package ID /// * its extern crate name /// * its calculated fingerprint for the dependency +#[derive(Clone)] struct DepFingerprint { - pkg_id: String, + pkg_id: u64, name: String, fingerprint: Arc, } @@ -351,10 +363,10 @@ pub struct Fingerprint { deps: Vec, /// Information about the inputs that affect this Unit (such as source /// file mtimes or build script environment variables). - local: Vec, + local: Mutex>, /// Cached hash of the `Fingerprint` struct. Used to improve performance /// for hashing. - #[serde(skip_serializing, skip_deserializing)] + #[serde(skip)] memoized_hash: Mutex>, /// RUSTFLAGS/RUSTDOCFLAGS environment variable value (or config value). rustflags: Vec, @@ -362,6 +374,45 @@ pub struct Fingerprint { /// "description", which are exposed as environment variables during /// compilation. metadata: u64, + /// Description of whether the filesystem status for this unit is up to date + /// or should be considered stale. + #[serde(skip)] + fs_status: FsStatus, + /// Files, relative to `target_root`, that are produced by the step that + /// this `Fingerprint` represents. This is used to detect when the whole + /// fingerprint is out of date if this is missing, or if previous + /// fingerprints output files are regenerated and look newer than this one. + #[serde(skip)] + outputs: Vec, +} + +/// Indication of the status on the filesystem for a particular unit. +enum FsStatus { + /// This unit is to be considered stale, even if hash information all + /// matches. The filesystem inputs have changed (or are missing) and the + /// unit needs to subsequently be recompiled. + Stale, + + /// This unit is up-to-date, it does not need to be recompiled. If there are + /// any outputs then the `FileTime` listed here is the minimum of all their + /// mtimes. This is then later used to see if a unit is newer than one of + /// its dependants, causing the dependant to be recompiled. + UpToDate(Option), +} + +impl FsStatus { + fn up_to_date(&self) -> bool { + match self { + FsStatus::UpToDate(_) => true, + FsStatus::Stale => false, + } + } +} + +impl Default for FsStatus { + fn default() -> FsStatus { + FsStatus::Stale + } } impl Serialize for DepFingerprint { @@ -378,12 +429,11 @@ impl<'de> Deserialize<'de> for DepFingerprint { where D: de::Deserializer<'de>, { - let (pkg_id, name, hash) = <(String, String, u64)>::deserialize(d)?; + let (pkg_id, name, hash) = <(u64, String, u64)>::deserialize(d)?; Ok(DepFingerprint { pkg_id, name, fingerprint: Arc::new(Fingerprint { - local: vec![LocalFingerprint::Precalculated(String::new())], memoized_hash: Mutex::new(Some(hash)), ..Fingerprint::new() }), @@ -391,19 +441,128 @@ impl<'de> Deserialize<'de> for DepFingerprint { } } +/// A `LocalFingerprint` represents something that we use to detect direct +/// changes to a `Fingerprint`. +/// +/// This is where we track file information, env vars, etc. This +/// `LocalFingerprint` struct is hashed and if the hash changes will force a +/// recompile of any fingerprint it's included into. Note that the "local" +/// terminology comes from the fact that it only has to do with one crate, and +/// `Fingerprint` tracks the transitive propagation of fingerprint changes. +/// +/// Note that because this is hashed its contents are carefully managed. Like +/// mentioned in the above module docs, we don't want to hash absolute paths or +/// mtime information. +/// +/// Also note that a `LocalFingerprint` is used in `check_filesystem` to detect +/// when the filesystem contains stale information (based on mtime currently). +/// The paths here don't change much between compilations but they're used as +/// inputs when we probe the filesystem looking at information. #[derive(Debug, Serialize, Deserialize, Hash)] enum LocalFingerprint { + /// This is a precalculated fingerprint which has an opaque string we just + /// hash as usual. This variant is primarily used for git/crates.io + /// dependencies where the source never changes so we can quickly conclude + /// that there's some string we can hash and it won't really change much. + /// + /// This is also used for build scripts with no `rerun-if-*` statements, but + /// that's overall a mistake and causes bugs in Cargo. We shouldn't use this + /// for build scripts. Precalculated(String), - MtimeBased(MtimeSlot, PathBuf), - EnvBased(String, Option), + + /// This is used for crate compilations. The `dep_info` file is a relative + /// path anchored at `target_root(...)` to the dep-info file that Cargo + /// generates (which is a custom serialization after parsing rustc's own + /// `dep-info` output). + /// + /// The `dep_info` file, when present, also lists a number of other files + /// for us to look at. If any of those files are newer than this file then + /// we need to recompile. + CheckDepInfo { + dep_info: PathBuf, + }, + + /// This represents a nonempty set of `rerun-if-changed` annotations printed + /// out by a build script. The `output` file is a arelative file anchored at + /// `target_root(...)` which is the actual output of the build script. That + /// output has already been parsed and the paths printed out via + /// `rerun-if-changed` are listed in `paths`. The `paths` field is relative + /// to `pkg.root()` + /// + /// This is considered up-to-date if all of the `paths` are older than + /// `output`, otherwise we need to recompile. + RerunIfChanged { + output: PathBuf, + paths: Vec, + }, + + /// This represents a single `rerun-if-env-changed` annotation printed by a + /// build script. The exact env var and value are hashed here. There's no + /// filesystem dependence here, and if the values are changed the hash will + /// change forcing a recompile. + RerunIfEnvChanged { + var: String, + val: Option, + }, +} + +enum StaleFile { + Missing(PathBuf), + Changed { + reference: PathBuf, + reference_mtime: FileTime, + stale: PathBuf, + stale_mtime: FileTime, + }, } impl LocalFingerprint { - fn mtime(root: &Path, mtime: Option, path: &Path) -> LocalFingerprint { - let mtime = MtimeSlot(Mutex::new(mtime)); - assert!(path.is_absolute()); - let path = path.strip_prefix(root).unwrap_or(path); - LocalFingerprint::MtimeBased(mtime, path.to_path_buf()) + /// Checks dynamically at runtime if this `LocalFingerprint` has a stale + /// file. + /// + /// This will use the absolute root paths passed in if necessary to guide + /// file accesses. + fn find_stale_file( + &self, + pkg_root: &Path, + target_root: &Path, + ) -> CargoResult> { + match self { + // We need to parse `dep_info`, learn about all the files the crate + // depends on, and then see if any of them are newer than the + // dep_info file itself. If the `dep_info` file is missing then this + // unit has never been compiled! + LocalFingerprint::CheckDepInfo { dep_info } => { + let dep_info = target_root.join(dep_info); + if let Some(paths) = parse_dep_info(pkg_root, &dep_info)? { + Ok(find_stale_file(&dep_info, paths.iter())) + } else { + Ok(Some(StaleFile::Missing(dep_info))) + } + } + + // We need to verify that no paths listed in `paths` are newer than + // the `output` path itself, or the last time the build script ran. + LocalFingerprint::RerunIfChanged { output, paths } => Ok(find_stale_file( + &target_root.join(output), + paths.iter().map(|p| pkg_root.join(p)), + )), + + // These have no dependencies on the filesystem, and their values + // are included natively in the `Fingerprint` hash so nothing + // tocheck for here. + LocalFingerprint::RerunIfEnvChanged { .. } => Ok(None), + LocalFingerprint::Precalculated(..) => Ok(None), + } + } + + fn kind(&self) -> &'static str { + match self { + LocalFingerprint::Precalculated(..) => "precalculated", + LocalFingerprint::CheckDepInfo { .. } => "dep-info", + LocalFingerprint::RerunIfChanged { .. } => "rerun-if-changed", + LocalFingerprint::RerunIfEnvChanged { .. } => "rerun-if-env-changed", + } } } @@ -419,29 +578,15 @@ impl Fingerprint { path: 0, features: String::new(), deps: Vec::new(), - local: Vec::new(), + local: Mutex::new(Vec::new()), memoized_hash: Mutex::new(None), rustflags: Vec::new(), metadata: 0, + fs_status: FsStatus::Stale, + outputs: Vec::new(), } } - fn update_local(&self, root: &Path) -> CargoResult<()> { - for local in self.local.iter() { - match *local { - LocalFingerprint::MtimeBased(ref slot, ref path) => { - let path = root.join(path); - let mtime = paths::mtime(&path)?; - *slot.0.lock().unwrap() = Some(mtime); - } - LocalFingerprint::EnvBased(..) | LocalFingerprint::Precalculated(..) => continue, - } - } - - *self.memoized_hash.lock().unwrap() = None; - Ok(()) - } - fn hash(&self) -> u64 { if let Some(s) = *self.memoized_hash.lock().unwrap() { return s; @@ -451,6 +596,12 @@ impl Fingerprint { ret } + /// Compares this fingerprint with an old version which was previously + /// serialized to filesystem. + /// + /// The purpose of this is exclusively to produce a diagnostic message + /// indicating why we're recompiling something. This function always returns + /// an error, it will never return success. fn compare(&self, old: &Fingerprint) -> CargoResult<()> { if self.rustc != old.rustc { bail!("rust compiler has changed") @@ -474,49 +625,59 @@ impl Fingerprint { if self.rustflags != old.rustflags { bail!("RUSTFLAGS has changed") } - if self.local.len() != old.local.len() { - bail!("local lens changed"); - } if self.metadata != old.metadata { bail!("metadata changed") } - for (new, old) in self.local.iter().zip(&old.local) { + let my_local = self.local.lock().unwrap(); + let old_local = old.local.lock().unwrap(); + if my_local.len() != old_local.len() { + bail!("local lens changed"); + } + for (new, old) in my_local.iter().zip(old_local.iter()) { match (new, old) { - ( - &LocalFingerprint::Precalculated(ref a), - &LocalFingerprint::Precalculated(ref b), - ) => { + (LocalFingerprint::Precalculated(a), LocalFingerprint::Precalculated(b)) => { if a != b { bail!("precalculated components have changed: {} != {}", a, b) } } ( - &LocalFingerprint::MtimeBased(ref on_disk_mtime, ref ap), - &LocalFingerprint::MtimeBased(ref previously_built_mtime, ref bp), + LocalFingerprint::CheckDepInfo { dep_info: adep }, + LocalFingerprint::CheckDepInfo { dep_info: bdep }, ) => { - let on_disk_mtime = on_disk_mtime.0.lock().unwrap(); - let previously_built_mtime = previously_built_mtime.0.lock().unwrap(); - - let should_rebuild = match (*on_disk_mtime, *previously_built_mtime) { - (None, None) => false, - (Some(_), None) | (None, Some(_)) => true, - (Some(on_disk), Some(previously_built)) => on_disk > previously_built, - }; - - if should_rebuild { + if adep != bdep { + bail!("dep info output changed: {:?} != {:?}", adep, bdep) + } + } + ( + LocalFingerprint::RerunIfChanged { + output: aout, + paths: apaths, + }, + LocalFingerprint::RerunIfChanged { + output: bout, + paths: bpaths, + }, + ) => { + if aout != bout { + bail!("rerun-if-changed output changed: {:?} != {:?}", aout, bout) + } + if apaths != bpaths { bail!( - "mtime based components have changed: previously {:?} now {:?}, \ - paths are {:?} and {:?}", - *previously_built_mtime, - *on_disk_mtime, - ap, - bp + "rerun-if-changed output changed: {:?} != {:?}", + apaths, + bpaths, ) } } ( - &LocalFingerprint::EnvBased(ref akey, ref avalue), - &LocalFingerprint::EnvBased(ref bkey, ref bvalue), + LocalFingerprint::RerunIfEnvChanged { + var: akey, + val: avalue, + }, + LocalFingerprint::RerunIfEnvChanged { + var: bkey, + val: bvalue, + }, ) => { if *akey != *bkey { bail!("env vars changed: {} != {}", akey, bkey); @@ -530,7 +691,11 @@ impl Fingerprint { ) } } - _ => bail!("local fingerprint type has changed"), + (a, b) => bail!( + "local fingerprint type has changed ({} => {})", + b.kind(), + a.kind() + ), } } @@ -538,14 +703,125 @@ impl Fingerprint { bail!("number of dependencies has changed") } for (a, b) in self.deps.iter().zip(old.deps.iter()) { - if a.name != b.name || a.fingerprint.hash() != b.fingerprint.hash() { - bail!("new ({}) != old ({})", a.pkg_id, b.pkg_id) + if a.name != b.name { + let e = format_err!("`{}` != `{}`", a.name, b.name) + .context("unit dependency name changed"); + return Err(e.into()); + } + + if a.fingerprint.hash() != b.fingerprint.hash() { + let e = format_err!( + "new ({}/{:x}) != old ({}/{:x})", + a.name, + a.fingerprint.hash(), + b.name, + b.fingerprint.hash() + ) + .context("unit dependency information changed"); + return Err(e.into()); } } - // Two fingerprints may have different hash values, but still succeed - // in this compare function if the difference is due to a - // LocalFingerprint value that changes in a compatible way. For - // example, moving the mtime of a file backwards in time, + + if !self.fs_status.up_to_date() { + bail!("current filesystem status shows we're outdated"); + } + + // This typically means some filesystem modifications happened or + // something transitive was odd. In general we should strive to provide + // a better error message than this, so if you see this message a lot it + // likely means this method needs to be updated! + bail!("two fingerprint comparison turned up nothing obvious"); + } + + /// Dynamically inspect the local filesystem to update the `fs_status` field + /// of this `Fingerprint`. + /// + /// This function is used just after a `Fingerprint` is constructed to check + /// the local state of the filesystem and propagate any dirtiness from + /// dependencies up to this unit as well. This function assumes that the + /// unit starts out as `FsStatus::Stale` and then it will optionally switch + /// it to `UpToDate` if it can. + fn check_filesystem( + &mut self, + pkg_root: &Path, + target_root: &Path, + mtime_on_use: bool, + ) -> CargoResult<()> { + assert!(!self.fs_status.up_to_date()); + + // Get the `mtime` of all outputs. Optionally update their mtime + // afterwards based on the `mtime_on_use` flag. Afterwards we want the + // minimum mtime as it's the one we'll be comparing to inputs and + // dependencies. + let status = self + .outputs + .iter() + .map(|f| { + let mtime = paths::mtime(f).ok(); + if mtime_on_use { + let t = FileTime::from_system_time(SystemTime::now()); + drop(filetime::set_file_times(f, t, t)); + } + return mtime; + }) + .min(); + + let mtime = match status { + // We had no output files. This means we're an overridden build + // script and we're just always up to date because we aren't + // watching the filesystem. + None => { + self.fs_status = FsStatus::UpToDate(None); + return Ok(()); + } + + // At least one path failed to report its `mtime`. It probably + // doesn't exists, so leave ourselves as stale and bail out. + Some(None) => return Ok(()), + + // All files successfully reported an `mtime`, and we've got the + // minimum one, so let's keep going with that. + Some(Some(mtime)) => mtime, + }; + + for dep in self.deps.iter() { + let dep_mtime = match dep.fingerprint.fs_status { + // If our dependency is stale, so are we, so bail out. + FsStatus::Stale => return Ok(()), + + // If our dependencies is up to date and has no filesystem + // interactions, then we can move on to the next dependency. + FsStatus::UpToDate(None) => continue, + + FsStatus::UpToDate(Some(mtime)) => mtime, + }; + + // If the dependency is newer than our own output then it was + // recompiled previously. We transitively become stale ourselves in + // that case, so bail out. + // + // Note that this comparison should probably be `>=`, not `>`, but + // for a discussion of why it's `>` see the discussion about #5918 + // below in `find_stale`. + if dep_mtime > mtime { + return Ok(()); + } + } + + // If we reached this far then all dependencies are up to date. Check + // all our `LocalFingerprint` information to see if we have any stale + // files for this package itself. If we do find something log a helpful + // message and bail out so we stay stale. + for local in self.local.get_mut().unwrap().iter() { + if let Some(file) = local.find_stale_file(pkg_root, target_root)? { + file.log(); + return Ok(()); + } + } + + // Everything was up to date! Record such. + self.fs_status = FsStatus::UpToDate(Some(mtime)); + Ok(()) } } @@ -564,8 +840,9 @@ impl hash::Hash for Fingerprint { ref rustflags, .. } = *self; + let local = local.lock().unwrap(); ( - rustc, features, target, path, profile, local, metadata, rustflags, + rustc, features, target, path, profile, &*local, metadata, rustflags, ) .hash(h); @@ -615,7 +892,65 @@ impl<'de> de::Deserialize<'de> for MtimeSlot { } } -/// Calculates the fingerprint for a package/target pair. +impl DepFingerprint { + fn new<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + parent: &Unit<'a>, + dep: &Unit<'a>, + ) -> CargoResult { + let fingerprint = calculate(cx, dep)?; + let name = cx.bcx.extern_crate_name(parent, dep)?; + + // We need to be careful about what we hash here. We have a goal of + // supporting renaming a project directory and not rebuilding + // everything. To do that, however, we need to make sure that the cwd + // doesn't make its way into any hashes, and one source of that is the + // `SourceId` for `path` packages. + // + // We already have a requirement that `path` packages all have unique + // names (sort of for this same reason), so if the package source is a + // `path` then we just hash the name, but otherwise we hash the full + // id as it won't change when the directory is renamed. + let pkg_id = if dep.pkg.package_id().source_id().is_path() { + util::hash_u64(dep.pkg.package_id().name()) + } else { + util::hash_u64(dep.pkg.package_id()) + }; + + Ok(DepFingerprint { + pkg_id, + name, + fingerprint, + }) + } +} + +impl StaleFile { + /// Use the `log` crate to log a hopefully helpful message in diagnosing + /// what file is considered stale and why. This is intended to be used in + /// conjunction with `RUST_LOG` to determine why Cargo is recompiling + /// something. Currently there's no user-facing usage of this other than + /// that. + fn log(&self) { + match self { + StaleFile::Missing(path) => { + log::info!("stale: missing {:?}", path); + } + StaleFile::Changed { + reference, + reference_mtime, + stale, + stale_mtime, + } => { + log::info!("stale: changed {:?}", stale); + log::info!(" (vs) {:?}", reference); + log::info!(" {:?} != {:?}", reference_mtime, stale_mtime); + } + } + } +} + +/// Calculates the fingerprint for a `unit`. /// /// This fingerprint is used by Cargo to learn about when information such as: /// @@ -632,85 +967,108 @@ fn calculate<'a, 'cfg>( cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>, ) -> CargoResult> { - let bcx = cx.bcx; + // This function is slammed quite a lot, so the result is memoized. if let Some(s) = cx.fingerprints.get(unit) { return Ok(Arc::clone(s)); } + let mut fingerprint = if unit.mode.is_run_custom_build() { + calculate_run_custom_build(cx, unit)? + } else { + calculate_normal(cx, unit)? + }; + + // After we built the initial `Fingerprint` be sure to update the + // `fs_status` field of it. + let target_root = target_root(cx, unit); + let mtime_on_use = cx.bcx.config.cli_unstable().mtime_on_use; + fingerprint.check_filesystem(unit.pkg.root(), &target_root, mtime_on_use)?; - // Next, recursively calculate the fingerprint for all of our dependencies. + let fingerprint = Arc::new(fingerprint); + cx.fingerprints.insert(*unit, Arc::clone(&fingerprint)); + Ok(fingerprint) +} + +/// Calculate a fingerprint for a "normal" unit, or anything that's not a build +/// script. This is an internal helper of `calculate`, don't call directly. +fn calculate_normal<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, +) -> CargoResult { + // Recursively calculate the fingerprint for all of our dependencies. // - // Skip the fingerprints of build scripts, they are included below in the - // `local` vec. Also skip fingerprints of binaries because they don't - // actually induce a recompile, they're just dependencies in the sense - // that they need to be built. + // Skip fingerprints of binaries because they don't actually induce a + // recompile, they're just dependencies in the sense that they need to be + // built. let mut deps = cx .dep_targets(unit) .iter() - .filter(|u| !u.target.is_custom_build() && !u.target.is_bin()) - .map(|dep| { - calculate(cx, dep).and_then(|fingerprint| { - let name = cx.bcx.extern_crate_name(unit, dep)?; - Ok(DepFingerprint { - pkg_id: dep.pkg.package_id().to_string(), - name, - fingerprint, - }) - }) - }) + .filter(|u| !u.target.is_bin()) + .map(|dep| DepFingerprint::new(cx, unit, dep)) .collect::>>()?; deps.sort_by(|a, b| a.pkg_id.cmp(&b.pkg_id)); - // And finally, calculate what our own local fingerprint is. + // Afterwards calculate our own fingerprint information. We specially + // handle `path` packages to ensure we track files on the filesystem + // correctly, but otherwise upstream packages like from crates.io or git + // get bland fingerprints because they don't change without their + // `PackageId` changing. + let target_root = target_root(cx, unit); let local = if use_dep_info(unit) { let dep_info = dep_info_loc(cx, unit); - let mtime = dep_info_mtime_if_fresh(unit.pkg, &dep_info)?; - let mut local = vec![LocalFingerprint::mtime( - cx.files().target_root(), - mtime, - &dep_info, - )]; - // Include the fingerprint of the build script. - // - // This is not included for dependencies (Precalculated below) because - // Docker zeros the nanosecond part of the mtime when the image is - // saved, which prevents built dependencies from being cached. - // This has the consequence that if a dependency needs to be rebuilt - // (such as an environment variable tracked via rerun-if-env-changed), - // and you run two separate commands (`build` then `test`), the second - // command will erroneously think it is fresh. - // See: https://github.com/rust-lang/cargo/issues/6733 - local.extend(local_fingerprint_run_custom_build_deps(cx, unit)); - local + let dep_info = dep_info.strip_prefix(&target_root).unwrap().to_path_buf(); + vec![LocalFingerprint::CheckDepInfo { dep_info }] } else { let fingerprint = pkg_fingerprint(cx.bcx, unit.pkg)?; vec![LocalFingerprint::Precalculated(fingerprint)] }; + // Figure out what the outputs of our unit is, and we'll be storing them + // into the fingerprint as well. + let outputs = if unit.mode.is_doc() { + vec![cx + .files() + .out_dir(unit) + .join(unit.target.crate_name()) + .join("index.html")] + } else { + cx.outputs(unit)? + .iter() + .filter(|output| output.flavor != FileFlavor::DebugInfo) + .map(|output| output.path.clone()) + .collect() + }; + + // Fill out a bunch more information that we'll be tracking typically + // hashed to take up less space on disk as we just need to know when things + // change. let extra_flags = if unit.mode.is_doc() { - bcx.rustdocflags_args(unit)? + cx.bcx.rustdocflags_args(unit)? } else { - bcx.rustflags_args(unit)? + cx.bcx.rustflags_args(unit)? }; - let profile_hash = util::hash_u64(&(&unit.profile, unit.mode, bcx.extra_args_for(unit))); + let profile_hash = util::hash_u64((&unit.profile, unit.mode, cx.bcx.extra_args_for(unit))); // Include metadata since it is exposed as environment variables. let m = unit.pkg.manifest().metadata(); - let metadata = util::hash_u64(&(&m.authors, &m.description, &m.homepage, &m.repository)); - let fingerprint = Arc::new(Fingerprint { - rustc: util::hash_u64(&bcx.rustc.verbose_version), + let metadata = util::hash_u64((&m.authors, &m.description, &m.homepage, &m.repository)); + Ok(Fingerprint { + rustc: util::hash_u64(&cx.bcx.rustc.verbose_version), target: util::hash_u64(&unit.target), profile: profile_hash, // Note that .0 is hashed here, not .1 which is the cwd. That doesn't // actually affect the output artifact so there's no need to hash it. - path: util::hash_u64(&super::path_args(cx.bcx, unit).0), - features: format!("{:?}", bcx.resolve.features_sorted(unit.pkg.package_id())), + path: util::hash_u64(super::path_args(cx.bcx, unit).0), + features: format!( + "{:?}", + cx.bcx.resolve.features_sorted(unit.pkg.package_id()) + ), deps, - local, + local: Mutex::new(local), memoized_hash: Mutex::new(None), metadata, rustflags: extra_flags, - }); - cx.fingerprints.insert(*unit, Arc::clone(&fingerprint)); - Ok(fingerprint) + fs_status: FsStatus::Stale, + outputs, + }) } // We want to use the mtime for files if we're a path source, but if we're a @@ -722,150 +1080,145 @@ fn use_dep_info(unit: &Unit<'_>) -> bool { !unit.mode.is_doc() && path } -/// Prepare the necessary work for the fingerprint of a build command. -/// -/// The fingerprint for the execution of a build script can be in one of two -/// modes: -/// -/// - "old style": The fingerprint tracks the mtimes for all files in the -/// package. -/// - "new style": If the build script emits a "rerun-if" statement, then -/// Cargo only tracks the files an environment variables explicitly listed -/// by the script. -/// -/// Overridden build scripts are special; only the simulated output is -/// tracked. -pub fn prepare_build_cmd<'a, 'cfg>( +/// Calculate a fingerprint for an "execute a build script" unit. This is an +/// internal helper of `calculate`, don't call directly. +fn calculate_run_custom_build<'a, 'cfg>( cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>, -) -> CargoResult { - let _p = profile::start(format!("fingerprint build cmd: {}", unit.pkg.package_id())); - let new = cx.files().fingerprint_dir(unit); - let loc = new.join("build"); - - debug!("fingerprint at: {}", loc.display()); - - let (local, output_path) = build_script_local_fingerprints(cx, unit)?; +) -> CargoResult { + // Using the `BuildDeps` information we'll have previously parsed and + // inserted into `build_explicit_deps` built an initial snapshot of the + // `LocalFingerprint` list for this build script. If we previously executed + // the build script this means we'll be watching files and env vars. + // Otherwise if we haven't previously executed it we'll just start watching + // the whole crate. + let (gen_local, overridden) = build_script_local_fingerprints(cx, unit); + let deps = &cx.build_explicit_deps[unit]; + let local = gen_local + .call_box(deps, Some(&|| pkg_fingerprint(cx.bcx, unit.pkg)))? + .unwrap(); + let output = deps.build_script_output.clone(); - // Include the compilation of the build script itself in the fingerprint. - // If the build script is rebuilt, then it definitely needs to be run - // again. This should only find 1 dependency (for the build script) or 0 - // (if it is overridden). - // - // FIXME: This filters out `RunCustomBuild` units. These are `links` - // build scripts. Unfortunately, for many reasons, those would be very - // difficult to include, so for now this is slightly wrong. Reasons: - // Fingerprint::locals has to be rebuilt in the closure, LocalFingerprint - // isn't cloneable, Context is required to recompute them, build script - // fingerprints aren't shared in Context::fingerprints, etc. - // Ideally this would call local_fingerprint_run_custom_build_deps. - // See https://github.com/rust-lang/cargo/issues/6780 - let deps = if output_path.is_none() { + // Include any dependencies of our execution, which is typically just the + // compilation of the build script itself. (if the build script changes we + // should be rerun!). Note though that if we're an overridden build script + // we have no dependencies so no need to recurse in that case. + let deps = if overridden { // Overridden build scripts don't need to track deps. vec![] } else { cx.dep_targets(unit) .iter() - .filter(|u| !u.mode.is_run_custom_build()) - .map(|dep| { - calculate(cx, dep).and_then(|fingerprint| { - let name = cx.bcx.extern_crate_name(unit, dep)?; - Ok(DepFingerprint { - pkg_id: dep.pkg.package_id().to_string(), - name, - fingerprint, - }) - }) - }) + .map(|dep| DepFingerprint::new(cx, unit, dep)) .collect::>>()? }; - let mut fingerprint = Fingerprint { - local, + Ok(Fingerprint { + local: Mutex::new(local), rustc: util::hash_u64(&cx.bcx.rustc.verbose_version), deps, - ..Fingerprint::new() - }; - let mtime_on_use = cx.bcx.config.cli_unstable().mtime_on_use; - let compare = compare_old_fingerprint(&loc, &fingerprint, mtime_on_use); - log_compare(unit, &compare); + outputs: if overridden { Vec::new() } else { vec![output] }, - // When we write out the fingerprint, we may want to actually change the - // kind of fingerprint being recorded. If we started out, then the previous - // run of the build script (or if it had never run before) may indicate to - // use the `Precalculated` variant with the `pkg_fingerprint`. If the build - // script then prints `rerun-if-changed`, however, we need to record what's - // necessary for that fingerprint. - // - // Hence, if there were some `rerun-if-changed` directives forcibly change - // the kind of fingerprint by reinterpreting the dependencies output by the - // build script. - let state = Arc::clone(&cx.build_state); - let key = (unit.pkg.package_id(), unit.kind); - let pkg_root = unit.pkg.root().to_path_buf(); - let target_root = cx.files().target_root().to_path_buf(); - let write_fingerprint = Work::new(move |_| { - if let Some(output_path) = output_path { - let outputs = state.outputs.lock().unwrap(); - let outputs = &outputs[&key]; - if !outputs.rerun_if_changed.is_empty() || !outputs.rerun_if_env_changed.is_empty() { - let deps = BuildDeps::new(&output_path, Some(outputs)); - fingerprint.local = local_fingerprints_deps(&deps, &target_root, &pkg_root); - fingerprint.update_local(&target_root)?; - } - // FIXME: If a build script switches from new style to old style, - // this is bugged. It should recompute Fingerprint::local, but - // requires access to Context which we don't have here. - // See https://github.com/rust-lang/cargo/issues/6779 - } - write_fingerprint(&loc, &fingerprint) - }); - - Ok(( - if compare.is_ok() { Fresh } else { Dirty }, - write_fingerprint, - Work::noop(), - )) + // Most of the other info is blank here as we don't really include it + // in the execution of the build script, but... this may be a latent + // bug in Cargo. + ..Fingerprint::new() + }) } -/// Compute the `LocalFingerprint` values for a `RunCustomBuild` unit. +/// Get ready to compute the `LocalFingerprint` values for a `RunCustomBuild` +/// unit. /// -/// The second element of the return value is the path to the build script -/// `output` file. This is `None` for overridden build scripts. +/// This function has, what's on the surface, a seriously wonky interface. +/// You'll call this function and it'll return a closure and a boolean. The +/// boolean is pretty simple in that it indicates whether the `unit` has been +/// overridden via `.cargo/config`. The closure is much more complicated. +/// +/// This closure is intended to capture any local state necessary to compute +/// the `LocalFingerprint` values for this unit. It is `Send` and `'static` to +/// be sent to other threads as well (such as when we're executing build +/// scripts). That deduplication is the rationale for the closure at least. +/// +/// The arguments to the closure are a bit weirder, though, and I'll apologize +/// in advance for the weirdness too. The first argument to the closure (see +/// `MyFnOnce` below) is a `&BuildDeps`. This is the parsed version of a build +/// script, and when Cargo starts up this is cached from previous runs of a +/// build script. After a build script executes the output file is reparsed and +/// passed in here. +/// +/// The second argument is the weirdest, it's *optionally* a closure to +/// call `pkg_fingerprint` below. The `pkg_fingerprint` below requires access +/// to "source map" located in `Context`. That's very non-`'static` and +/// non-`Send`, so it can't be used on other threads, such as when we invoke +/// this after a build script has finished. The `Option` allows us to for sure +/// calculate it on the main thread at the beginning, and then swallow the bug +/// for now where a worker thread after a build script has finished doesn't +/// have access. Ideally there would be no second argument or it would be more +/// "first class" and not an `Option` but something that can be sent between +/// threads. In any case, it's a bug for now. +/// +/// This isn't the greatest of interfaces, and if there's suggestions to +/// improve please do so! +/// +/// FIXME(#6779) - see all the words above fn build_script_local_fingerprints<'a, 'cfg>( cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>, -) -> CargoResult<(Vec, Option)> { +) -> (Box, bool) { // First up, if this build script is entirely overridden, then we just - // return the hash of what we overrode it with. + // return the hash of what we overrode it with. This is the easy case! if let Some(fingerprint) = build_script_override_fingerprint(cx, unit) { debug!("override local fingerprints deps"); - // Note that the `None` here means that we don't want to update the local - // fingerprint afterwards because this is all just overridden. - return Ok((vec![fingerprint], None)); + return ( + Box::new( + move |_: &BuildDeps, _: Option<&dyn Fn() -> CargoResult>| { + Ok(Some(vec![fingerprint])) + }, + ), + true, // this is an overridden build script + ); } - // Next up we look at the previously listed dependencies for the build - // script. If there are none then we're in the "old mode" where we just - // assume that we're changed if anything in the packaged changed. The - // `Some` here though means that we want to update our local fingerprints - // after we're done as running this build script may have created more - // dependencies. - let deps = &cx.build_explicit_deps[unit]; - let output = deps.build_script_output.clone(); - if deps.rerun_if_changed.is_empty() && deps.rerun_if_env_changed.is_empty() { - debug!("old local fingerprints deps"); - let s = pkg_fingerprint(cx.bcx, unit.pkg)?; - return Ok((vec![LocalFingerprint::Precalculated(s)], Some(output))); - } + // ... Otherwise this is a "real" build script and we need to return a real + // closure. Our returned closure classifies the build script based on + // whether it prints `rerun-if-*`. If it *doesn't* print this it's where the + // magical second argument comes into play, which fingerprints a whole + // package. Remember that the fact that this is an `Option` is a bug, but a + // longstanding bug, in Cargo. Recent refactorings just made it painfully + // obvious. + let script_root = cx.files().build_script_run_dir(unit); + let pkg_root = unit.pkg.root().to_path_buf(); + let calculate = + move |deps: &BuildDeps, pkg_fingerprint: Option<&dyn Fn() -> CargoResult>| { + if deps.rerun_if_changed.is_empty() && deps.rerun_if_env_changed.is_empty() { + match pkg_fingerprint { + // FIXME: this is somewhat buggy with respect to docker and + // weird filesystems. The `Precalculated` variant + // constructed below will, for `path` dependencies, contain + // a stringified version of the mtime for the local crate. + // This violates one of the things we describe in this + // module's doc comment, never hashing mtimes. We should + // figure out a better scheme where a package fingerprint + // may be a string (like for a registry) or a list of files + // (like for a path dependency). Those list of files would + // be stored here rather than the the mtime of them. + Some(f) => { + debug!("old local fingerprints deps"); + let s = f()?; + return Ok(Some(vec![LocalFingerprint::Precalculated(s)])); + } + None => return Ok(None), + } + } - // Ok so now we're in "new mode" where we can have files listed as - // dependencies as well as env vars listed as dependencies. Process them all - // here. - Ok(( - local_fingerprints_deps(deps, cx.files().target_root(), unit.pkg.root()), - Some(output), - )) + // Ok so now we're in "new mode" where we can have files listed as + // dependencies as well as env vars listed as dependencies. Process + // them all here. + Ok(Some(local_fingerprints_deps(deps, &script_root, &pkg_root))) + }; + + // Note that `false` == "not overridden" + (Box::new(calculate), false) } /// Create a `LocalFingerprint` for an overridden build script. @@ -875,19 +1228,17 @@ fn build_script_override_fingerprint<'a, 'cfg>( unit: &Unit<'a>, ) -> Option { let state = cx.build_state.outputs.lock().unwrap(); - state - .get(&(unit.pkg.package_id(), unit.kind)) - .map(|output| { - let s = format!( - "overridden build state with hash: {}", - util::hash_u64(output) - ); - LocalFingerprint::Precalculated(s) - }) + let output = state.get(&(unit.pkg.package_id(), unit.kind))?; + let s = format!( + "overridden build state with hash: {}", + util::hash_u64(output) + ); + Some(LocalFingerprint::Precalculated(s)) } /// Compute the `LocalFingerprint` values for a `RunCustomBuild` unit for -/// non-overridden new-style build scripts only. +/// non-overridden new-style build scripts only. This is only used when `deps` +/// is already known to have a nonempty `rerun-if-*` somewhere. fn local_fingerprints_deps( deps: &BuildDeps, target_root: &Path, @@ -895,68 +1246,50 @@ fn local_fingerprints_deps( ) -> Vec { debug!("new local fingerprints deps"); let mut local = Vec::new(); + if !deps.rerun_if_changed.is_empty() { - let output = &deps.build_script_output; - let deps = deps.rerun_if_changed.iter().map(|p| pkg_root.join(p)); - let mtime = mtime_if_fresh(output, deps); - local.push(LocalFingerprint::mtime(target_root, mtime, output)); + // Note that like the module comment above says we are careful to never + // store an absolute path in `LocalFingerprint`, so ensure that we strip + // absolute prefixes from them. + let output = deps + .build_script_output + .strip_prefix(target_root) + .unwrap() + .to_path_buf(); + let paths = deps + .rerun_if_changed + .iter() + .map(|p| p.strip_prefix(pkg_root).unwrap_or(p).to_path_buf()) + .collect(); + local.push(LocalFingerprint::RerunIfChanged { output, paths }); } for var in deps.rerun_if_env_changed.iter() { let val = env::var(var).ok(); - local.push(LocalFingerprint::EnvBased(var.clone(), val)); + local.push(LocalFingerprint::RerunIfEnvChanged { + var: var.clone(), + val, + }); } local } -/// Compute `LocalFingerprint` values for the `RunCustomBuild` dependencies of -/// the given unit. -fn local_fingerprint_run_custom_build_deps<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - unit: &Unit<'a>, -) -> Vec { - cx.dep_targets(unit) - .iter() - .filter(|u| u.mode.is_run_custom_build()) - .map(|dep| { - // If the build script is overridden, use the override info as - // the override. Otherwise, use the last invocation time of - // the build script. If the build script re-runs during this - // run, dirty propagation within the JobQueue will ensure that - // this gets invalidated. This is only here to catch the - // situation when cargo is run a second time for another - // target that wasn't built previously (such as `cargo build` - // then `cargo test`). - // - // I suspect there is some edge case where this is incorrect, - // because the invoked timestamp is updated even if the build - // script fails to finish. However, I can't find any examples - // where it doesn't work. - build_script_override_fingerprint(cx, unit).unwrap_or_else(|| { - let ts_path = cx - .files() - .build_script_run_dir(dep) - .join("invoked.timestamp"); - let ts_path_mtime = paths::mtime(&ts_path).ok(); - LocalFingerprint::mtime(cx.files().target_root(), ts_path_mtime, &ts_path) - }) - }) - .collect() -} - fn write_fingerprint(loc: &Path, fingerprint: &Fingerprint) -> CargoResult<()> { debug_assert_ne!(fingerprint.rustc, 0); // fingerprint::new().rustc == 0, make sure it doesn't make it to the file system. // This is mostly so outside tools can reliably find out what rust version this file is for, // as we can use the full hash. let hash = fingerprint.hash(); - debug!("write fingerprint: {}", loc.display()); + debug!("write fingerprint ({:x}) : {}", hash, loc.display()); paths::write(loc, util::to_hex(hash).as_bytes())?; - paths::write( - &loc.with_extension("json"), - &serde_json::to_vec(&fingerprint).unwrap(), - )?; + + let json = serde_json::to_string(fingerprint).unwrap(); + if cfg!(debug_assertions) { + let f: Fingerprint = serde_json::from_str(&json).unwrap(); + assert_eq!(f.hash(), hash); + } + paths::write(&loc.with_extension("json"), json.as_bytes())?; Ok(()) } @@ -971,12 +1304,27 @@ pub fn prepare_init<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> Ca Ok(()) } +/// Returns the location that the dep-info file will show up at for the `unit` +/// specified. pub fn dep_info_loc<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> PathBuf { cx.files() .fingerprint_dir(unit) .join(&format!("dep-{}", filename(cx, unit))) } +/// Returns an absolute path that the `unit`'s outputs should always be relative +/// to. This `target_root` variable is used to store relative path names in +/// `Fingerprint` instead of absolute pathnames (see module comment). +fn target_root<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> PathBuf { + if unit.mode.is_run_custom_build() { + cx.files().build_script_run_dir(unit) + } else if unit.kind == Kind::Host { + cx.files().host_root().to_path_buf() + } else { + cx.files().target_root().to_path_buf() + } +} + fn compare_old_fingerprint( loc: &Path, new_fingerprint: &Fingerprint, @@ -992,22 +1340,29 @@ fn compare_old_fingerprint( let new_hash = new_fingerprint.hash(); - if util::to_hex(new_hash) == old_fingerprint_short { + if util::to_hex(new_hash) == old_fingerprint_short && new_fingerprint.fs_status.up_to_date() { return Ok(()); } let old_fingerprint_json = paths::read(&loc.with_extension("json"))?; - let old_fingerprint = serde_json::from_str(&old_fingerprint_json) + let old_fingerprint: Fingerprint = serde_json::from_str(&old_fingerprint_json) .chain_err(|| internal("failed to deserialize json"))?; - new_fingerprint.compare(&old_fingerprint) + debug_assert_eq!(util::to_hex(old_fingerprint.hash()), old_fingerprint_short); + let result = new_fingerprint.compare(&old_fingerprint); + assert!(result.is_err()); + return result; } fn log_compare(unit: &Unit<'_>, compare: &CargoResult<()>) { - let ce = match *compare { + let ce = match compare { Ok(..) => return, - Err(ref e) => e, + Err(e) => e, }; - info!("fingerprint error for {}: {}", unit.pkg, ce); + info!( + "fingerprint error for {}/{:?}/{:?}", + unit.pkg, unit.mode, unit.target, + ); + info!(" err: {}", ce); for cause in ce.iter_causes() { info!(" cause: {}", cause); @@ -1015,7 +1370,7 @@ fn log_compare(unit: &Unit<'_>, compare: &CargoResult<()>) { } // Parse the dep-info into a list of paths -pub fn parse_dep_info(pkg: &Package, dep_info: &Path) -> CargoResult>> { +pub fn parse_dep_info(pkg_root: &Path, dep_info: &Path) -> CargoResult>> { let data = match paths::read_bytes(dep_info) { Ok(data) => data, Err(_) => return Ok(None), @@ -1023,7 +1378,7 @@ pub fn parse_dep_info(pkg: &Package, dep_info: &Path) -> CargoResult, _>>()?; if paths.is_empty() { Ok(None) @@ -1032,14 +1387,6 @@ pub fn parse_dep_info(pkg: &Package, dep_info: &Path) -> CargoResult CargoResult> { - if let Some(paths) = parse_dep_info(pkg, dep_info)? { - Ok(mtime_if_fresh(dep_info, paths.iter())) - } else { - Ok(None) - } -} - fn pkg_fingerprint(bcx: &BuildContext<'_, '_>, pkg: &Package) -> CargoResult { let source_id = pkg.package_id().source_id(); let sources = bcx.packages.sources(); @@ -1050,24 +1397,21 @@ fn pkg_fingerprint(bcx: &BuildContext<'_, '_>, pkg: &Package) -> CargoResult(output: &Path, paths: I) -> Option +fn find_stale_file(reference: &Path, paths: I) -> Option where I: IntoIterator, I::Item: AsRef, { - let mtime = match paths::mtime(output) { + let reference_mtime = match paths::mtime(reference) { Ok(mtime) => mtime, - Err(..) => return None, + Err(..) => return Some(StaleFile::Missing(reference.to_path_buf())), }; - let any_stale = paths.into_iter().any(|path| { + for path in paths { let path = path.as_ref(); - let mtime2 = match paths::mtime(path) { + let path_mtime = match paths::mtime(path) { Ok(mtime) => mtime, - Err(..) => { - info!("stale: {} -- missing", path.display()); - return true; - } + Err(..) => return Some(StaleFile::Missing(path.to_path_buf())), }; // TODO: fix #5918. @@ -1088,19 +1432,19 @@ where // if equal, files were changed just after a previous build finished. // Unfortunately this became problematic when (in #6484) cargo switch to more accurately // measuring the start time of builds. - if mtime2 > mtime { - info!("stale: {} -- {} vs {}", path.display(), mtime2, mtime); - true - } else { - false + if path_mtime <= reference_mtime { + continue; } - }); - if any_stale { - None - } else { - Some(mtime) + return Some(StaleFile::Changed { + reference: reference.to_path_buf(), + reference_mtime, + stale: path.to_path_buf(), + stale_mtime: path_mtime, + }); } + + return None; } fn filename<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> String { @@ -1113,6 +1457,8 @@ fn filename<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> String { "test-" } else if unit.mode.is_doc() { "doc-" + } else if unit.mode.is_run_custom_build() { + "run-" } else { "" }; @@ -1183,3 +1529,30 @@ pub fn parse_rustc_dep_info(rustc_dep_info: &Path) -> CargoResult` we wouldn't need this. +trait MyFnOnce { + fn call_box( + self: Box, + f: &BuildDeps, + pkg_fingerprint: Option<&dyn Fn() -> CargoResult>, + ) -> CargoResult>>; +} + +impl MyFnOnce for F +where + F: FnOnce( + &BuildDeps, + Option<&dyn Fn() -> CargoResult>, + ) -> CargoResult>>, +{ + fn call_box( + self: Box, + f: &BuildDeps, + pkg_fingerprint: Option<&dyn Fn() -> CargoResult>, + ) -> CargoResult>> { + (*self)(f, pkg_fingerprint) + } +} diff --git a/src/cargo/core/compiler/job.rs b/src/cargo/core/compiler/job.rs index ca788b2ff2f..f056df38f5c 100644 --- a/src/cargo/core/compiler/job.rs +++ b/src/cargo/core/compiler/job.rs @@ -1,11 +1,12 @@ use std::fmt; +use std::mem; use super::job_queue::JobState; -use crate::util::{CargoResult, Dirty, Fresh, Freshness}; +use crate::util::CargoResult; pub struct Job { - dirty: Work, - fresh: Work, + work: Work, + fresh: Freshness, } /// Each proc should send its description before starting. @@ -50,17 +51,26 @@ impl Work { impl Job { /// Creates a new job representing a unit of work. - pub fn new(dirty: Work, fresh: Work) -> Job { - Job { dirty, fresh } + pub fn new(work: Work, fresh: Freshness) -> Job { + Job { work, fresh } } /// Consumes this job by running it, returning the result of the /// computation. - pub fn run(self, fresh: Freshness, state: &JobState<'_>) -> CargoResult<()> { - match fresh { - Fresh => self.fresh.call(state), - Dirty => self.dirty.call(state), - } + pub fn run(self, state: &JobState<'_>) -> CargoResult<()> { + self.work.call(state) + } + + /// Returns whether this job was fresh/dirty, where "fresh" means we're + /// likely to perform just some small bookeeping where "dirty" means we'll + /// probably do something slow like invoke rustc. + pub fn freshness(&self) -> Freshness { + self.fresh + } + + pub fn before(&mut self, next: Work) { + let prev = mem::replace(&mut self.work, Work::noop()); + self.work = next.then(prev); } } @@ -69,3 +79,13 @@ impl fmt::Debug for Job { write!(f, "Job {{ ... }}") } } + +/// Indication of the freshness of a package. +/// +/// A fresh package does not necessarily need to be rebuilt (unless a dependency +/// was also rebuilt), and a dirty package must always be rebuilt. +#[derive(PartialEq, Eq, Debug, Clone, Copy)] +pub enum Freshness { + Fresh, + Dirty, +} diff --git a/src/cargo/core/compiler/job_queue.rs b/src/cargo/core/compiler/job_queue.rs index 4fde1fbde73..1c23c0d2d7f 100644 --- a/src/cargo/core/compiler/job_queue.rs +++ b/src/cargo/core/compiler/job_queue.rs @@ -11,17 +11,20 @@ use crossbeam_utils::thread::Scope; use jobserver::{Acquired, HelperThread}; use log::{debug, info, trace}; +use super::context::OutputFile; +use super::job::{ + Freshness::{self, Dirty, Fresh}, + Job, +}; +use super::{BuildContext, BuildPlan, CompileMode, Context, Kind, Unit}; use crate::core::profiles::Profile; use crate::core::{PackageId, Target, TargetKind}; use crate::handle_error; use crate::util; use crate::util::diagnostic_server::{self, DiagnosticPrinter}; use crate::util::{internal, profile, CargoResult, CargoResultExt, ProcessBuilder}; -use crate::util::{Config, DependencyQueue, Dirty, Fresh, Freshness}; +use crate::util::{Config, DependencyQueue}; use crate::util::{Progress, ProgressStyle}; -use super::context::OutputFile; -use super::job::Job; -use super::{BuildContext, BuildPlan, CompileMode, Context, Kind, Unit}; /// A management structure of the entire dependency graph to compile. /// @@ -29,7 +32,7 @@ use super::{BuildContext, BuildPlan, CompileMode, Context, Kind, Unit}; /// actual compilation step of each package. Packages enqueue units of work and /// then later on the entire graph is processed and compiled. pub struct JobQueue<'a, 'cfg> { - queue: DependencyQueue, Vec<(Job, Freshness)>>, + queue: DependencyQueue, Vec>, tx: Sender>, rx: Receiver>, active: Vec>, @@ -45,9 +48,6 @@ pub struct JobQueue<'a, 'cfg> { struct PendingBuild { /// The number of jobs currently active. amt: usize, - /// The current freshness state of this package. Any dirty target within a - /// package will cause the entire package to become dirty. - fresh: Freshness, } #[derive(Clone, Copy, Eq, PartialEq, Hash)] @@ -154,13 +154,10 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { cx: &Context<'a, 'cfg>, unit: &Unit<'a>, job: Job, - fresh: Freshness, ) -> CargoResult<()> { let key = Key::new(unit); let deps = key.dependencies(cx)?; - self.queue - .queue(Fresh, &key, Vec::new(), &deps) - .push((job, fresh)); + self.queue.queue(&key, Vec::new(), &deps).push(job); *self.counts.entry(key.pkg).or_insert(0) += 1; Ok(()) } @@ -239,17 +236,10 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { // possible that can run. Note that this is also the point where we // start requesting job tokens. Each job after the first needs to // request a token. - while let Some((fresh, key, jobs)) = self.queue.dequeue() { - let total_fresh = jobs.iter().fold(fresh, |fresh, &(_, f)| f.combine(fresh)); - self.pending.insert( - key, - PendingBuild { - amt: jobs.len(), - fresh: total_fresh, - }, - ); - for (job, f) in jobs { - queue.push((key, job, f.combine(fresh))); + while let Some((key, jobs)) = self.queue.dequeue() { + self.pending.insert(key, PendingBuild { amt: jobs.len() }); + for job in jobs { + queue.push((key, job)); if !self.active.is_empty() || !queue.is_empty() { jobserver_helper.request_token(); } @@ -260,8 +250,8 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { // try to spawn it so long as we've got a jobserver token which says // we're able to perform some parallel work. while error.is_none() && self.active.len() < tokens.len() + 1 && !queue.is_empty() { - let (key, job, fresh) = queue.remove(0); - self.run(key, fresh, job, cx.bcx.config, scope, build_plan)?; + let (key, job) = queue.remove(0); + self.run(key, job, cx.bcx.config, scope, build_plan)?; } // If after all that we're not actually running anything then we're @@ -409,7 +399,6 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { fn run( &mut self, key: Key<'a>, - fresh: Freshness, job: Job, config: &Config, scope: &Scope<'a>, @@ -421,8 +410,9 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { *self.counts.get_mut(&key.pkg).unwrap() -= 1; let my_tx = self.tx.clone(); + let fresh = job.freshness(); let doit = move || { - let res = job.run(fresh, &JobState { tx: my_tx.clone() }); + let res = job.run(&JobState { tx: my_tx.clone() }); my_tx.send(Message::Finish(key, res)).unwrap(); }; @@ -477,7 +467,7 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> { let state = self.pending.get_mut(&key).unwrap(); state.amt -= 1; if state.amt == 0 { - self.queue.finish(&key, state.fresh); + self.queue.finish(&key); } Ok(()) } diff --git a/src/cargo/core/compiler/mod.rs b/src/cargo/core/compiler/mod.rs index c58a78679d1..aea50fcb21b 100644 --- a/src/cargo/core/compiler/mod.rs +++ b/src/cargo/core/compiler/mod.rs @@ -29,6 +29,7 @@ pub use self::compilation::{Compilation, Doctest}; pub use self::context::{Context, Unit}; pub use self::custom_build::{BuildMap, BuildOutput, BuildScripts}; use self::job::{Job, Work}; +pub use self::job::Freshness; use self::job_queue::JobQueue; pub use self::layout::is_bad_artifact_name; use self::output_depinfo::output_depinfo; @@ -37,7 +38,7 @@ use crate::core::profiles::{Lto, PanicStrategy, Profile}; use crate::core::{PackageId, Target}; use crate::util::errors::{CargoResult, CargoResultExt, Internal, ProcessError}; use crate::util::paths; -use crate::util::{self, machine_message, process, Freshness, ProcessBuilder}; +use crate::util::{self, machine_message, process, ProcessBuilder}; use crate::util::{internal, join_paths, profile}; /// Indicates whether an object is for the host architcture or the target architecture. @@ -141,35 +142,31 @@ fn compile<'a, 'cfg: 'a>( fingerprint::prepare_init(cx, unit)?; cx.links.validate(bcx.resolve, unit)?; - let (dirty, fresh, freshness) = if unit.mode.is_run_custom_build() { + let job = if unit.mode.is_run_custom_build() { custom_build::prepare(cx, unit)? } else if unit.mode == CompileMode::Doctest { // We run these targets later, so this is just a no-op for now. - (Work::noop(), Work::noop(), Freshness::Fresh) + Job::new(Work::noop(), Freshness::Fresh) } else if build_plan { - ( - rustc(cx, unit, &exec.clone())?, - Work::noop(), - Freshness::Dirty, - ) + Job::new(rustc(cx, unit, &exec.clone())?, Freshness::Dirty) } else { - let (mut freshness, dirty, fresh) = fingerprint::prepare_target(cx, unit)?; - let work = if unit.mode.is_doc() { - rustdoc(cx, unit)? + let force = exec.force_rebuild(unit) || force_rebuild; + let mut job = fingerprint::prepare_target(cx, unit, force)?; + job.before(if job.freshness() == Freshness::Dirty { + let work = if unit.mode.is_doc() { + rustdoc(cx, unit)? + } else { + rustc(cx, unit, exec)? + }; + work.then(link_targets(cx, unit, false)?) } else { - rustc(cx, unit, exec)? - }; - // Need to link targets on both the dirty and fresh. - let dirty = work.then(link_targets(cx, unit, false)?).then(dirty); - let fresh = link_targets(cx, unit, true)?.then(fresh); - - if exec.force_rebuild(unit) || force_rebuild { - freshness = Freshness::Dirty; - } + // Need to link targets on both the dirty and fresh. + link_targets(cx, unit, true)? + }); - (dirty, fresh, freshness) + job }; - jobs.enqueue(cx, unit, Job::new(dirty, fresh), freshness)?; + jobs.enqueue(cx, unit, job)?; drop(p); // Be sure to compile all dependencies of this target as well. @@ -234,7 +231,7 @@ fn rustc<'a, 'cfg>( exec.init(cx, unit); let exec = exec.clone(); - let root_output = cx.files().target_root().to_path_buf(); + let root_output = cx.files().host_root().to_path_buf(); let pkg_root = unit.pkg.root().to_path_buf(); let cwd = rustc .get_cwd() diff --git a/src/cargo/core/compiler/output_depinfo.rs b/src/cargo/core/compiler/output_depinfo.rs index 1c192db8662..b09f5344c58 100644 --- a/src/cargo/core/compiler/output_depinfo.rs +++ b/src/cargo/core/compiler/output_depinfo.rs @@ -39,7 +39,7 @@ fn add_deps_for_unit<'a, 'b>( if !unit.mode.is_run_custom_build() { // Add dependencies from rustc dep-info output (stored in fingerprint directory) let dep_info_loc = fingerprint::dep_info_loc(context, unit); - if let Some(paths) = fingerprint::parse_dep_info(unit.pkg, &dep_info_loc)? { + if let Some(paths) = fingerprint::parse_dep_info(unit.pkg.root(), &dep_info_loc)? { for path in paths { deps.insert(path); } diff --git a/src/cargo/core/source/mod.rs b/src/cargo/core/source/mod.rs index f19bdf311fc..7d9b36e3d6f 100644 --- a/src/cargo/core/source/mod.rs +++ b/src/cargo/core/source/mod.rs @@ -1,8 +1,8 @@ use std::collections::hash_map::HashMap; use std::fmt; -use crate::core::{Dependency, Package, PackageId, Summary}; use crate::core::package::PackageSet; +use crate::core::{Dependency, Package, PackageId, Summary}; use crate::util::{CargoResult, Config}; mod source_id; diff --git a/src/cargo/ops/cargo_compile.rs b/src/cargo/ops/cargo_compile.rs index 0a5905be052..28a7d659937 100644 --- a/src/cargo/ops/cargo_compile.rs +++ b/src/cargo/ops/cargo_compile.rs @@ -126,12 +126,16 @@ impl Packages { if !opt_out.is_empty() { ws.config().shell().warn(format!( "excluded package(s) {} not found in workspace `{}`", - opt_out.iter().map(|x| x.as_ref()).collect::>().join(", "), + opt_out + .iter() + .map(|x| x.as_ref()) + .collect::>() + .join(", "), ws.root().display(), ))?; } packages - }, + } Packages::Packages(packages) if packages.is_empty() => { vec![PackageIdSpec::from_package_id(ws.current()?.package_id())] } @@ -443,7 +447,11 @@ impl CompileFilter { all_bens: bool, all_targets: bool, ) -> CompileFilter { - let rule_lib = if lib_only { LibRule::True } else { LibRule::False }; + let rule_lib = if lib_only { + LibRule::True + } else { + LibRule::False + }; let rule_bins = FilterRule::new(bins, all_bins); let rule_tsts = FilterRule::new(tsts, all_tsts); let rule_exms = FilterRule::new(exms, all_exms); @@ -527,11 +535,13 @@ impl CompileFilter { TargetKind::Test => tests, TargetKind::Bench => benches, TargetKind::ExampleBin | TargetKind::ExampleLib(..) => examples, - TargetKind::Lib(..) => return match *lib { - LibRule::True => true, - LibRule::Default => true, - LibRule::False => false, - }, + TargetKind::Lib(..) => { + return match *lib { + LibRule::True => true, + LibRule::Default => true, + LibRule::False => false, + }; + } TargetKind::CustomBuild => return false, }; rule.matches(target) diff --git a/src/cargo/ops/cargo_install.rs b/src/cargo/ops/cargo_install.rs index 7591b148a07..12ea145a13e 100644 --- a/src/cargo/ops/cargo_install.rs +++ b/src/cargo/ops/cargo_install.rs @@ -6,13 +6,14 @@ use std::{env, fs}; use failure::{bail, format_err}; use tempfile::Builder as TempFileBuilder; +use crate::core::compiler::Freshness; use crate::core::compiler::{DefaultExecutor, Executor}; use crate::core::{Edition, PackageId, Source, SourceId, Workspace}; use crate::ops; use crate::ops::common_for_install_and_uninstall::*; use crate::sources::{GitSource, SourceConfigMap}; use crate::util::errors::{CargoResult, CargoResultExt}; -use crate::util::{paths, Config, Filesystem, Freshness}; +use crate::util::{paths, Config, Filesystem}; struct Transaction { bins: Vec, diff --git a/src/cargo/ops/cargo_package.rs b/src/cargo/ops/cargo_package.rs index 0908056da63..6409d7780de 100644 --- a/src/cargo/ops/cargo_package.rs +++ b/src/cargo/ops/cargo_package.rs @@ -505,10 +505,7 @@ fn hash_all(path: &Path) -> CargoResult> { Ok(result) } -fn report_hash_difference( - orig: &HashMap, - after: &HashMap, -) -> String { +fn report_hash_difference(orig: &HashMap, after: &HashMap) -> String { let mut changed = Vec::new(); let mut removed = Vec::new(); for (key, value) in orig { diff --git a/src/cargo/ops/cargo_test.rs b/src/cargo/ops/cargo_test.rs index f2af2ca5508..74433005a30 100644 --- a/src/cargo/ops/cargo_test.rs +++ b/src/cargo/ops/cargo_test.rs @@ -1,8 +1,8 @@ use std::ffi::OsString; use crate::core::compiler::{Compilation, Doctest}; -use crate::core::Workspace; use crate::core::shell::Verbosity; +use crate::core::Workspace; use crate::ops; use crate::util::errors::CargoResult; use crate::util::{CargoTestError, ProcessError, Test}; diff --git a/src/cargo/ops/common_for_install_and_uninstall.rs b/src/cargo/ops/common_for_install_and_uninstall.rs index c581772d457..b3652cd169a 100644 --- a/src/cargo/ops/common_for_install_and_uninstall.rs +++ b/src/cargo/ops/common_for_install_and_uninstall.rs @@ -8,12 +8,13 @@ use failure::{bail, format_err}; use semver::VersionReq; use serde::{Deserialize, Serialize}; +use crate::core::compiler::Freshness; use crate::core::{Dependency, Package, PackageId, Source, SourceId}; use crate::ops::{self, CompileFilter, CompileOptions}; use crate::sources::PathSource; use crate::util::errors::{CargoResult, CargoResultExt}; use crate::util::{Config, ToSemver}; -use crate::util::{FileLock, Filesystem, Freshness}; +use crate::util::{FileLock, Filesystem}; /// On-disk tracking for which package installed which binary. /// diff --git a/src/cargo/sources/git/utils.rs b/src/cargo/sources/git/utils.rs index 57606857899..bbaa32f9b25 100644 --- a/src/cargo/sources/git/utils.rs +++ b/src/cargo/sources/git/utils.rs @@ -508,7 +508,8 @@ where // callback asking for other authentication methods to try. Check // cred_helper_bad to make sure we only try the git credentail helper // once, to avoid looping forever. - if allowed.contains(git2::CredentialType::USER_PASS_PLAINTEXT) && cred_helper_bad.is_none() { + if allowed.contains(git2::CredentialType::USER_PASS_PLAINTEXT) && cred_helper_bad.is_none() + { let r = git2::Cred::credential_helper(cfg, url, username); cred_helper_bad = Some(r.is_err()); return r; diff --git a/src/cargo/sources/replaced.rs b/src/cargo/sources/replaced.rs index 465b0f08de4..1a8b1885530 100644 --- a/src/cargo/sources/replaced.rs +++ b/src/cargo/sources/replaced.rs @@ -115,7 +115,9 @@ impl<'cfg> Source for ReplacedSource<'cfg> { } fn add_to_yanked_whitelist(&mut self, pkgs: &[PackageId]) { - let pkgs = pkgs.iter().map(|id| id.with_source_id(self.replace_with)) + let pkgs = pkgs + .iter() + .map(|id| id.with_source_id(self.replace_with)) .collect::>(); self.inner.add_to_yanked_whitelist(&pkgs); } diff --git a/src/cargo/util/dependency_queue.rs b/src/cargo/util/dependency_queue.rs index e8b23e6263d..d86dd7c3671 100644 --- a/src/cargo/util/dependency_queue.rs +++ b/src/cargo/util/dependency_queue.rs @@ -8,8 +8,6 @@ use std::collections::hash_map::Entry::{Occupied, Vacant}; use std::collections::{HashMap, HashSet}; use std::hash::Hash; -pub use self::Freshness::{Dirty, Fresh}; - #[derive(Debug)] pub struct DependencyQueue { /// A list of all known keys to build. @@ -26,11 +24,6 @@ pub struct DependencyQueue { /// lifecycle of the DependencyQueue. reverse_dep_map: HashMap>, - /// A set of dirty packages. - /// - /// Packages may become dirty over time if their dependencies are rebuilt. - dirty: HashSet, - /// The packages which are currently being built, waiting for a call to /// `finish`. pending: HashSet, @@ -39,25 +32,6 @@ pub struct DependencyQueue { depth: HashMap, } -/// Indication of the freshness of a package. -/// -/// A fresh package does not necessarily need to be rebuilt (unless a dependency -/// was also rebuilt), and a dirty package must always be rebuilt. -#[derive(PartialEq, Eq, Debug, Clone, Copy)] -pub enum Freshness { - Fresh, - Dirty, -} - -impl Freshness { - pub fn combine(self, other: Freshness) -> Freshness { - match self { - Fresh => other, - Dirty => Dirty, - } - } -} - impl Default for DependencyQueue { fn default() -> DependencyQueue { DependencyQueue::new() @@ -70,7 +44,6 @@ impl DependencyQueue { DependencyQueue { dep_map: HashMap::new(), reverse_dep_map: HashMap::new(), - dirty: HashSet::new(), pending: HashSet::new(), depth: HashMap::new(), } @@ -80,16 +53,12 @@ impl DependencyQueue { /// /// It is assumed that any dependencies of this package will eventually also /// be added to the dependency queue. - pub fn queue(&mut self, fresh: Freshness, key: &K, value: V, dependencies: &[K]) -> &mut V { + pub fn queue(&mut self, key: &K, value: V, dependencies: &[K]) -> &mut V { let slot = match self.dep_map.entry(key.clone()) { Occupied(v) => return &mut v.into_mut().1, Vacant(v) => v, }; - if fresh == Dirty { - self.dirty.insert(key.clone()); - } - let mut my_dependencies = HashSet::new(); for dep in dependencies { my_dependencies.insert(dep.clone()); @@ -141,7 +110,7 @@ impl DependencyQueue { /// /// A package is ready to be built when it has 0 un-built dependencies. If /// `None` is returned then no packages are ready to be built. - pub fn dequeue(&mut self) -> Option<(Freshness, K, V)> { + pub fn dequeue(&mut self) -> Option<(K, V)> { // Look at all our crates and find everything that's ready to build (no // deps). After we've got that candidate set select the one which has // the maximum depth in the dependency graph. This way we should @@ -163,13 +132,8 @@ impl DependencyQueue { None => return None, }; let (_, data) = self.dep_map.remove(&key).unwrap(); - let fresh = if self.dirty.contains(&key) { - Dirty - } else { - Fresh - }; self.pending.insert(key.clone()); - Some((fresh, key, data)) + Some((key, data)) } /// Returns `true` if there are remaining packages to be built. @@ -186,16 +150,13 @@ impl DependencyQueue { /// /// This function will update the dependency queue with this information, /// possibly allowing the next invocation of `dequeue` to return a package. - pub fn finish(&mut self, key: &K, fresh: Freshness) { + pub fn finish(&mut self, key: &K) { assert!(self.pending.remove(key)); let reverse_deps = match self.reverse_dep_map.get(key) { Some(deps) => deps, None => return, }; for dep in reverse_deps.iter() { - if fresh == Dirty { - self.dirty.insert(dep.clone()); - } assert!(self.dep_map.get_mut(dep).unwrap().0.remove(key)); } } @@ -203,31 +164,31 @@ impl DependencyQueue { #[cfg(test)] mod test { - use super::{DependencyQueue, Freshness}; + use super::DependencyQueue; #[test] fn deep_first() { let mut q = DependencyQueue::new(); - q.queue(Freshness::Fresh, &1, (), &[]); - q.queue(Freshness::Fresh, &2, (), &[1]); - q.queue(Freshness::Fresh, &3, (), &[]); - q.queue(Freshness::Fresh, &4, (), &[2, 3]); - q.queue(Freshness::Fresh, &5, (), &[4, 3]); + q.queue(&1, (), &[]); + q.queue(&2, (), &[1]); + q.queue(&3, (), &[]); + q.queue(&4, (), &[2, 3]); + q.queue(&5, (), &[4, 3]); q.queue_finished(); - assert_eq!(q.dequeue(), Some((Freshness::Fresh, 1, ()))); - assert_eq!(q.dequeue(), Some((Freshness::Fresh, 3, ()))); + assert_eq!(q.dequeue(), Some((1, ()))); + assert_eq!(q.dequeue(), Some((3, ()))); assert_eq!(q.dequeue(), None); - q.finish(&3, Freshness::Fresh); + q.finish(&3); assert_eq!(q.dequeue(), None); - q.finish(&1, Freshness::Fresh); - assert_eq!(q.dequeue(), Some((Freshness::Fresh, 2, ()))); + q.finish(&1); + assert_eq!(q.dequeue(), Some((2, ()))); assert_eq!(q.dequeue(), None); - q.finish(&2, Freshness::Fresh); - assert_eq!(q.dequeue(), Some((Freshness::Fresh, 4, ()))); + q.finish(&2); + assert_eq!(q.dequeue(), Some((4, ()))); assert_eq!(q.dequeue(), None); - q.finish(&4, Freshness::Fresh); - assert_eq!(q.dequeue(), Some((Freshness::Fresh, 5, ()))); + q.finish(&4); + assert_eq!(q.dequeue(), Some((5, ()))); } } diff --git a/src/cargo/util/hex.rs b/src/cargo/util/hex.rs index 7e4dd00e9ae..ed60f9b8e5f 100644 --- a/src/cargo/util/hex.rs +++ b/src/cargo/util/hex.rs @@ -16,7 +16,7 @@ pub fn to_hex(num: u64) -> String { ]) } -pub fn hash_u64(hashable: &H) -> u64 { +pub fn hash_u64(hashable: H) -> u64 { let mut hasher = SipHasher::new_with_keys(0, 0); hashable.hash(&mut hasher); hasher.finish() diff --git a/src/cargo/util/mod.rs b/src/cargo/util/mod.rs index e46df6ce943..14f29c838d5 100644 --- a/src/cargo/util/mod.rs +++ b/src/cargo/util/mod.rs @@ -2,7 +2,7 @@ use std::time::Duration; pub use self::cfg::{Cfg, CfgExpr}; pub use self::config::{homedir, Config, ConfigValue}; -pub use self::dependency_queue::{DependencyQueue, Dirty, Fresh, Freshness}; +pub use self::dependency_queue::DependencyQueue; pub use self::diagnostic_server::RustfixDiagnosticServer; pub use self::errors::{internal, process_error}; pub use self::errors::{CargoResult, CargoResultExt, CliResult, Test}; diff --git a/tests/testsuite/alt_registry.rs b/tests/testsuite/alt_registry.rs index 80c47135c9c..1bae5739a2f 100644 --- a/tests/testsuite/alt_registry.rs +++ b/tests/testsuite/alt_registry.rs @@ -620,7 +620,13 @@ Caused by: .run(); for cmd in &[ - "init", "install foo", "login", "owner", "publish", "search", "yank", + "init", + "install foo", + "login", + "owner", + "publish", + "search", + "yank", ] { p.cargo(cmd) .arg("--registry") diff --git a/tests/testsuite/build_script.rs b/tests/testsuite/build_script.rs index 9f33402928a..106f1b1c8fa 100644 --- a/tests/testsuite/build_script.rs +++ b/tests/testsuite/build_script.rs @@ -2406,7 +2406,6 @@ fn fresh_builds_possible_with_link_libs() { .run(); p.cargo("build -v") - .env("RUST_LOG", "cargo::ops::cargo_rustc::fingerprint=info") .with_stderr( "\ [FRESH] foo v0.5.0 ([..]) @@ -3696,3 +3695,115 @@ fn optional_build_dep_and_required_normal_dep() { ) .run(); } + +#[test] +fn using_rerun_if_changed_does_not_rebuild() { + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + authors = [] + "#, + ) + .file( + "build.rs", + r#" + fn main() { + println!("cargo:rerun-if-changed=build.rs"); + } + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("build").run(); + p.cargo("build") + .with_stderr("[FINISHED] [..]") + .run(); +} + +#[test] +fn links_interrupted_can_restart() { + // Test for a `links` dependent build script getting canceled and then + // restarted. Steps: + // 1. Build to establish fingerprints. + // 2. Change something (an env var in this case) that triggers the + // dependent build script to run again. Kill the top-level build script + // while it is running (such as hitting Ctrl-C). + // 3. Run the build again, it should re-run the build script. + let bar = project() + .at("bar") + .file( + "Cargo.toml", + r#" + [package] + name = "bar" + version = "0.5.0" + authors = [] + links = "foo" + build = "build.rs" + "#, + ) + .file("src/lib.rs", "") + .file( + "build.rs", + r#" + fn main() { + println!("cargo:rerun-if-env-changed=SOMEVAR"); + } + "#, + ) + .build(); + + let p = project() + .file( + "Cargo.toml", + &format!( + r#" + [package] + name = "foo" + version = "0.5.0" + authors = [] + build = "build.rs" + + [dependencies.bar] + path = '{}' + "#, + bar.root().display() + ), + ) + .file("src/lib.rs", "") + .file( + "build.rs", + r#" + use std::env; + fn main() { + println!("cargo:rebuild-if-changed=build.rs"); + if std::path::Path::new("abort").exists() { + panic!("Crash!"); + } + } + "#, + ) + .build(); + + p.cargo("build").run(); + // Simulate the user hitting Ctrl-C during a build. + p.change_file("abort", ""); + // Set SOMEVAR to trigger a rebuild. + p.cargo("build") + .env("SOMEVAR", "1") + .with_stderr_contains("[..]Crash![..]") + .with_status(101) + .run(); + fs::remove_file(p.root().join("abort")).unwrap(); + // Try again without aborting the script. + // ***This is currently broken, the script does not re-run. + p.cargo("build -v") + .env("SOMEVAR", "1") + .with_stderr_contains("[RUNNING] [..]/foo-[..]/build-script-build[..]") + .run(); +} diff --git a/tests/testsuite/clean.rs b/tests/testsuite/clean.rs index 79c5a073729..a0ec3affcfb 100644 --- a/tests/testsuite/clean.rs +++ b/tests/testsuite/clean.rs @@ -28,10 +28,7 @@ fn different_dir() { p.cargo("build").run(); assert!(p.build_dir().is_dir()); - p.cargo("clean") - .cwd("src") - .with_stdout("") - .run(); + p.cargo("clean").cwd("src").with_stdout("").run(); assert!(!p.build_dir().is_dir()); } diff --git a/tests/testsuite/fix.rs b/tests/testsuite/fix.rs index a1c335f8cf4..ff38c21cdd7 100644 --- a/tests/testsuite/fix.rs +++ b/tests/testsuite/fix.rs @@ -685,7 +685,10 @@ fn fix_features() { #[test] fn shows_warnings() { let p = project() - .file("src/lib.rs", "#[deprecated] fn bar() {} pub fn foo() { let _ = bar(); }") + .file( + "src/lib.rs", + "#[deprecated] fn bar() {} pub fn foo() { let _ = bar(); }", + ) .build(); p.cargo("fix --allow-no-vcs") diff --git a/tests/testsuite/freshness.rs b/tests/testsuite/freshness.rs index a7c399414ea..76c10a71655 100644 --- a/tests/testsuite/freshness.rs +++ b/tests/testsuite/freshness.rs @@ -1851,6 +1851,7 @@ fn simulated_docker_deps_stay_cached() { zeropath(&paths::home()); if already_zero { + println!("already zero"); // If it was already truncated, then everything stays fresh. p.cargo("build -v") .with_stderr_unordered( @@ -1866,8 +1867,16 @@ fn simulated_docker_deps_stay_cached() { ) .run(); } else { + println!("not already zero"); // It is not ideal that `foo` gets recompiled, but that is the current // behavior. Currently mtimes are ignored for registry deps. + // + // Note that this behavior is due to the fact that `foo` has a build + // script in "old" mode where it doesn't print `rerun-if-*`. In this + // mode we use `Precalculated` to fingerprint a path dependency, where + // `Precalculated` is an opaque string which has the most recent mtime + // in it. It differs between builds because one has nsec=0 and the other + // likely has a nonzero nsec. Hence, the rebuild. p.cargo("build -v") .with_stderr_unordered( "\ @@ -1958,3 +1967,69 @@ fn edition_change_invalidates() { .run(); assert_eq!(p.glob("target/debug/deps/libfoo-*.rlib").count(), 1); } + +#[test] +fn rename_with_path_deps() { + let p = project() + .file( + "Cargo.toml", + r#" + [project] + name = "foo" + version = "0.5.0" + authors = [] + + [dependencies] + a = { path = 'a' } + "#, + ) + .file( + "src/lib.rs", + "extern crate a; pub fn foo() { a::foo(); }", + ) + .file( + "a/Cargo.toml", + r#" + [project] + name = "a" + version = "0.5.0" + authors = [] + + [dependencies] + b = { path = 'b' } + "#, + ) + .file( + "a/src/lib.rs", + "extern crate b; pub fn foo() { b::foo() }", + ) + .file( + "a/b/Cargo.toml", + r#" + [project] + name = "b" + version = "0.5.0" + authors = [] + "#, + ) + .file( + "a/b/src/lib.rs", + "pub fn foo() { }", + ); + let p = p.build(); + + p.cargo("build").run(); + + // Now rename the root directory and rerun `cargo run`. Not only should we + // not build anything but we also shouldn't crash. + let mut new = p.root(); + new.pop(); + new.push("foo2"); + + fs::rename(p.root(), &new).unwrap(); + + p.cargo("build") + .cwd(&new) + .with_stderr("[FINISHED] [..]") + .run(); +} diff --git a/tests/testsuite/package.rs b/tests/testsuite/package.rs index 3df9d603f6a..2b2a3555007 100644 --- a/tests/testsuite/package.rs +++ b/tests/testsuite/package.rs @@ -6,8 +6,7 @@ use std::path::Path; use crate::support::cargo_process; use crate::support::registry::Package; use crate::support::{ - basic_manifest, git, path2url, paths, project, publish::validate_crate_contents, - registry, + basic_manifest, git, path2url, paths, project, publish::validate_crate_contents, registry, }; use git2; @@ -880,9 +879,7 @@ fn ignore_workspace_specifier() { .file("bar/src/lib.rs", "") .build(); - p.cargo("package --no-verify") - .cwd("bar") - .run(); + p.cargo("package --no-verify").cwd("bar").run(); let f = File::open(&p.root().join("target/package/bar-0.1.0.crate")).unwrap(); let rewritten_toml = r#"# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO @@ -1256,7 +1253,9 @@ fn package_with_select_features() { ) .build(); - p.cargo("package --features required").masquerade_as_nightly_cargo().run(); + p.cargo("package --features required") + .masquerade_as_nightly_cargo() + .run(); } #[test] @@ -1285,7 +1284,9 @@ fn package_with_all_features() { ) .build(); - p.cargo("package --all-features").masquerade_as_nightly_cargo().run(); + p.cargo("package --all-features") + .masquerade_as_nightly_cargo() + .run(); } #[test] diff --git a/tests/testsuite/run.rs b/tests/testsuite/run.rs index a16613e984e..38ae8fe728c 100644 --- a/tests/testsuite/run.rs +++ b/tests/testsuite/run.rs @@ -1110,7 +1110,10 @@ fn default_run_workspace() { .file("b/src/main.rs", r#"fn main() {println!("run-b");}"#) .build(); - p.cargo("run").masquerade_as_nightly_cargo().with_stdout("run-a").run(); + p.cargo("run") + .masquerade_as_nightly_cargo() + .with_stdout("run-a") + .run(); } #[test] diff --git a/tests/testsuite/test.rs b/tests/testsuite/test.rs index f2a4e96f1a4..8e31513b03e 100644 --- a/tests/testsuite/test.rs +++ b/tests/testsuite/test.rs @@ -164,18 +164,20 @@ fn cargo_test_quiet_with_harness() { fn main() {} #[test] fn test_hello() {} "#, - ).build(); + ) + .build(); - p.cargo("test -q") - .with_stdout( - " + p.cargo("test -q") + .with_stdout( + " running 1 test . test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out -" - ).with_stderr("") - .run(); +", + ) + .with_stderr("") + .run(); } #[test] @@ -205,13 +207,10 @@ fn cargo_test_quiet_no_harness() { fn main() {} #[test] fn test_hello() {} "#, - ).build(); + ) + .build(); - p.cargo("test -q") - .with_stdout( - "" - ).with_stderr("") - .run(); + p.cargo("test -q").with_stdout("").with_stderr("").run(); } #[test] @@ -1553,7 +1552,8 @@ test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ", ) - .with_stderr("\ + .with_stderr( + "\ [COMPILING] foo v0.0.1 ([CWD]) [RUNNING] `rustc --crate-name foo src/lib.rs [..] --test [..]` [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] diff --git a/tests/testsuite/tool_paths.rs b/tests/testsuite/tool_paths.rs index 7cb91d2426f..9ba89670fe8 100644 --- a/tests/testsuite/tool_paths.rs +++ b/tests/testsuite/tool_paths.rs @@ -188,11 +188,13 @@ fn custom_runner_cfg() { p.cargo("run -- --param") .with_status(101) - .with_stderr_contains("\ + .with_stderr_contains( + "\ [COMPILING] foo v0.0.1 ([CWD]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] [RUNNING] `nonexistent-runner -r target/debug/foo[EXE] --param` -") +", + ) .run(); } @@ -220,11 +222,13 @@ fn custom_runner_cfg_precedence() { p.cargo("run -- --param") .with_status(101) - .with_stderr_contains("\ + .with_stderr_contains( + "\ [COMPILING] foo v0.0.1 ([CWD]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] [RUNNING] `nonexistent-runner -r target/debug/foo[EXE] --param` -") +", + ) .run(); } @@ -246,8 +250,10 @@ fn custom_runner_cfg_collision() { p.cargo("run -- --param") .with_status(101) - .with_stderr_contains("\ + .with_stderr_contains( + "\ [ERROR] several matching instances of `target.'cfg(..)'.runner` in `.cargo/config` -") +", + ) .run(); }