Skip to content

Implement the Cargo half of pipelined compilation (take 2) #6883

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 12 commits into from
May 10, 2019
4 changes: 2 additions & 2 deletions src/cargo/core/compiler/build_context/target_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,12 @@ pub struct TargetInfo {
}

/// Type of each file generated by a Unit.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum FileFlavor {
/// Not a special file type.
Normal,
/// Something you can link against (e.g., a library).
Linkable,
Linkable { rmeta: bool },
/// Piece of external debug information (e.g., `.dSYM`/`.pdb` file).
DebugInfo,
}
Expand Down
13 changes: 11 additions & 2 deletions src/cargo/core/compiler/context/compilation_files.rs
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ impl<'a, 'cfg: 'a> CompilationFiles<'a, 'cfg> {
path,
hardlink: None,
export_path: None,
flavor: FileFlavor::Linkable,
flavor: FileFlavor::Linkable { rmeta: false },
});
} else {
let mut add = |crate_type: &str, flavor: FileFlavor| -> CargoResult<()> {
Expand Down Expand Up @@ -372,12 +372,21 @@ impl<'a, 'cfg: 'a> CompilationFiles<'a, 'cfg> {
add(
kind.crate_type(),
if kind.linkable() {
FileFlavor::Linkable
FileFlavor::Linkable { rmeta: false }
} else {
FileFlavor::Normal
},
)?;
}
let path = out_dir.join(format!("lib{}.rmeta", file_stem));
if !unit.target.requires_upstream_objects() {
ret.push(OutputFile {
path,
hardlink: None,
export_path: None,
flavor: FileFlavor::Linkable { rmeta: true },
});
}
}
}
}
Expand Down
47 changes: 41 additions & 6 deletions src/cargo/core/compiler/context/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,17 @@ pub struct Context<'a, 'cfg: 'a> {
unit_dependencies: HashMap<Unit<'a>, Vec<Unit<'a>>>,
files: Option<CompilationFiles<'a, 'cfg>>,
package_cache: HashMap<PackageId, &'a Package>,

/// A flag indicating whether pipelining is enabled for this compilation
/// session. Pipelining largely only affects the edges of the dependency
/// graph that we generate at the end, and otherwise it's pretty
/// straightforward.
pipelining: bool,

/// A set of units which are compiling rlibs and are expected to produce
/// metadata files in addition to the rlib itself. This is only filled in
/// when `pipelining` above is enabled.
rmeta_required: HashSet<Unit<'a>>,
}

impl<'a, 'cfg> Context<'a, 'cfg> {
Expand All @@ -60,6 +71,12 @@ impl<'a, 'cfg> Context<'a, 'cfg> {
.chain_err(|| "failed to create jobserver")?,
};

let pipelining = bcx
.config
.get_bool("build.pipelining")?
.map(|t| t.val)
.unwrap_or(false);

Ok(Self {
bcx,
compilation: Compilation::new(bcx)?,
Expand All @@ -76,6 +93,8 @@ impl<'a, 'cfg> Context<'a, 'cfg> {
unit_dependencies: HashMap::new(),
files: None,
package_cache: HashMap::new(),
rmeta_required: HashSet::new(),
pipelining,
})
}

Expand Down Expand Up @@ -261,12 +280,7 @@ impl<'a, 'cfg> Context<'a, 'cfg> {
self.primary_packages
.extend(units.iter().map(|u| u.pkg.package_id()));

build_unit_dependencies(
units,
self.bcx,
&mut self.unit_dependencies,
&mut self.package_cache,
)?;
build_unit_dependencies(self, units)?;
let files = CompilationFiles::new(
units,
host_layout,
Expand Down Expand Up @@ -453,6 +467,27 @@ impl<'a, 'cfg> Context<'a, 'cfg> {
}
Ok(())
}

/// Returns whether when `parent` depends on `dep` if it only requires the
/// metadata file from `dep`.
pub fn only_requires_rmeta(&self, parent: &Unit<'a>, dep: &Unit<'a>) -> bool {
// this is only enabled when pipelining is enabled
self.pipelining
// We're only a candidate for requiring an `rmeta` file if we
// ourselves are building an rlib,
&& !parent.target.requires_upstream_objects()
&& parent.mode == CompileMode::Build
// Our dependency must also be built as an rlib, otherwise the
// object code must be useful in some fashion
&& !dep.target.requires_upstream_objects()
&& dep.mode == CompileMode::Build
}

/// Returns whether when `unit` is built whether it should emit metadata as
/// well because some compilations rely on that.
pub fn rmeta_required(&self, unit: &Unit<'a>) -> bool {
self.rmeta_required.contains(unit)
}
}

#[derive(Default)]
Expand Down
88 changes: 52 additions & 36 deletions src/cargo/core/compiler/context/unit_dependencies.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,41 +15,35 @@
//! (for example, with and without tests), so we actually build a dependency
//! graph of `Unit`s, which capture these properties.

use std::cell::RefCell;
use std::collections::{HashMap, HashSet};

use log::trace;

use super::{BuildContext, CompileMode, Kind};
use crate::core::compiler::Unit;
use crate::core::compiler::{BuildContext, CompileMode, Context, Kind};
use crate::core::dependency::Kind as DepKind;
use crate::core::package::Downloads;
use crate::core::profiles::UnitFor;
use crate::core::{Package, PackageId, Target};
use crate::CargoResult;
use log::trace;
use std::collections::{HashMap, HashSet};

struct State<'a: 'tmp, 'cfg: 'a, 'tmp> {
bcx: &'tmp BuildContext<'a, 'cfg>,
deps: &'tmp mut HashMap<Unit<'a>, Vec<Unit<'a>>>,
pkgs: RefCell<&'tmp mut HashMap<PackageId, &'a Package>>,
cx: &'tmp mut Context<'a, 'cfg>,
waiting_on_download: HashSet<PackageId>,
downloads: Downloads<'a, 'cfg>,
}

pub fn build_unit_dependencies<'a, 'cfg>(
cx: &mut Context<'a, 'cfg>,
roots: &[Unit<'a>],
bcx: &BuildContext<'a, 'cfg>,
deps: &mut HashMap<Unit<'a>, Vec<Unit<'a>>>,
pkgs: &mut HashMap<PackageId, &'a Package>,
) -> CargoResult<()> {
assert!(deps.is_empty(), "can only build unit deps once");
assert!(
cx.unit_dependencies.is_empty(),
"can only build unit deps once"
);

let mut state = State {
bcx,
deps,
pkgs: RefCell::new(pkgs),
downloads: cx.bcx.packages.enable_download()?,
cx,
waiting_on_download: HashSet::new(),
downloads: bcx.packages.enable_download()?,
};

loop {
Expand All @@ -62,7 +56,7 @@ pub fn build_unit_dependencies<'a, 'cfg>(
// cleared, and avoid building the lib thrice (once with `panic`, once
// without, once for `--test`). In particular, the lib included for
// Doc tests and examples are `Build` mode here.
let unit_for = if unit.mode.is_any_test() || bcx.build_config.test() {
let unit_for = if unit.mode.is_any_test() || state.cx.bcx.build_config.test() {
UnitFor::new_test()
} else if unit.target.is_custom_build() {
// This normally doesn't happen, except `clean` aggressively
Expand All @@ -79,20 +73,23 @@ pub fn build_unit_dependencies<'a, 'cfg>(

if !state.waiting_on_download.is_empty() {
state.finish_some_downloads()?;
state.deps.clear();
state.cx.unit_dependencies.clear();
} else {
break;
}
}
trace!("ALL UNIT DEPENDENCIES {:#?}", state.deps);

connect_run_custom_build_deps(&mut state);

trace!("ALL UNIT DEPENDENCIES {:#?}", state.cx.unit_dependencies);

record_units_requiring_metadata(state.cx);

// Dependencies are used in tons of places throughout the backend, many of
// which affect the determinism of the build itself. As a result be sure
// that dependency lists are always sorted to ensure we've always got a
// deterministic output.
for list in state.deps.values_mut() {
for list in state.cx.unit_dependencies.values_mut() {
list.sort();
}

Expand All @@ -104,16 +101,16 @@ fn deps_of<'a, 'cfg, 'tmp>(
state: &mut State<'a, 'cfg, 'tmp>,
unit_for: UnitFor,
) -> CargoResult<()> {
// Currently the `deps` map does not include `unit_for`. This should
// Currently the `unit_dependencies` map does not include `unit_for`. This should
// be safe for now. `TestDependency` only exists to clear the `panic`
// flag, and you'll never ask for a `unit` with `panic` set as a
// `TestDependency`. `CustomBuild` should also be fine since if the
// requested unit's settings are the same as `Any`, `CustomBuild` can't
// affect anything else in the hierarchy.
if !state.deps.contains_key(unit) {
if !state.cx.unit_dependencies.contains_key(unit) {
let unit_deps = compute_deps(unit, state, unit_for)?;
let to_insert: Vec<_> = unit_deps.iter().map(|&(unit, _)| unit).collect();
state.deps.insert(*unit, to_insert);
state.cx.unit_dependencies.insert(*unit, to_insert);
for (unit, unit_for) in unit_deps {
deps_of(&unit, state, unit_for)?;
}
Expand All @@ -131,13 +128,13 @@ fn compute_deps<'a, 'cfg, 'tmp>(
unit_for: UnitFor,
) -> CargoResult<Vec<(Unit<'a>, UnitFor)>> {
if unit.mode.is_run_custom_build() {
return compute_deps_custom_build(unit, state.bcx);
return compute_deps_custom_build(unit, state.cx.bcx);
} else if unit.mode.is_doc() && !unit.mode.is_any_test() {
// Note: this does not include doc test.
return compute_deps_doc(unit, state);
}

let bcx = state.bcx;
let bcx = state.cx.bcx;
let id = unit.pkg.package_id();
let deps = bcx.resolve.deps(id).filter(|&(_id, deps)| {
assert!(!deps.is_empty());
Expand Down Expand Up @@ -295,7 +292,7 @@ fn compute_deps_doc<'a, 'cfg, 'tmp>(
unit: &Unit<'a>,
state: &mut State<'a, 'cfg, 'tmp>,
) -> CargoResult<Vec<(Unit<'a>, UnitFor)>> {
let bcx = state.bcx;
let bcx = state.cx.bcx;
let deps = bcx
.resolve
.deps(unit.pkg.package_id())
Expand Down Expand Up @@ -448,7 +445,7 @@ fn connect_run_custom_build_deps(state: &mut State<'_, '_, '_>) {
// have the build script as the key and the library would be in the
// value's set.
let mut reverse_deps = HashMap::new();
for (unit, deps) in state.deps.iter() {
for (unit, deps) in state.cx.unit_dependencies.iter() {
for dep in deps {
if dep.mode == CompileMode::RunCustomBuild {
reverse_deps
Expand All @@ -469,7 +466,8 @@ fn connect_run_custom_build_deps(state: &mut State<'_, '_, '_>) {
// `dep_build_script` to manufacture an appropriate build script unit to
// depend on.
for unit in state
.deps
.cx
.unit_dependencies
.keys()
.filter(|k| k.mode == CompileMode::RunCustomBuild)
{
Expand All @@ -480,13 +478,13 @@ fn connect_run_custom_build_deps(state: &mut State<'_, '_, '_>) {

let to_add = reverse_deps
.iter()
.flat_map(|reverse_dep| state.deps[reverse_dep].iter())
.flat_map(|reverse_dep| state.cx.unit_dependencies[reverse_dep].iter())
.filter(|other| {
other.pkg != unit.pkg
&& other.target.linkable()
&& other.pkg.manifest().links().is_some()
})
.filter_map(|other| dep_build_script(other, state.bcx).map(|p| p.0))
.filter_map(|other| dep_build_script(other, state.cx.bcx).map(|p| p.0))
.collect::<HashSet<_>>();

if !to_add.is_empty() {
Expand All @@ -497,21 +495,39 @@ fn connect_run_custom_build_deps(state: &mut State<'_, '_, '_>) {

// And finally, add in all the missing dependencies!
for (unit, new_deps) in new_deps {
state.deps.get_mut(&unit).unwrap().extend(new_deps);
state
.cx
.unit_dependencies
.get_mut(&unit)
.unwrap()
.extend(new_deps);
}
}

/// Records the list of units which are required to emit metadata.
///
/// Units which depend only on the metadata of others requires the others to
/// actually produce metadata, so we'll record that here.
fn record_units_requiring_metadata(cx: &mut Context<'_, '_>) {
for (key, deps) in cx.unit_dependencies.iter() {
for dep in deps {
if cx.only_requires_rmeta(key, dep) {
cx.rmeta_required.insert(*dep);
}
}
}
}

impl<'a, 'cfg, 'tmp> State<'a, 'cfg, 'tmp> {
fn get(&mut self, id: PackageId) -> CargoResult<Option<&'a Package>> {
let mut pkgs = self.pkgs.borrow_mut();
if let Some(pkg) = pkgs.get(&id) {
if let Some(pkg) = self.cx.package_cache.get(&id) {
return Ok(Some(pkg));
}
if !self.waiting_on_download.insert(id) {
return Ok(None);
}
if let Some(pkg) = self.downloads.start(id)? {
pkgs.insert(id, pkg);
self.cx.package_cache.insert(id, pkg);
self.waiting_on_download.remove(&id);
return Ok(Some(pkg));
}
Expand All @@ -531,7 +547,7 @@ impl<'a, 'cfg, 'tmp> State<'a, 'cfg, 'tmp> {
loop {
let pkg = self.downloads.wait()?;
self.waiting_on_download.remove(&pkg.package_id());
self.pkgs.borrow_mut().insert(pkg.package_id(), pkg);
self.cx.package_cache.insert(pkg.package_id(), pkg);

// Arbitrarily choose that 5 or more packages concurrently download
// is a good enough number to "fill the network pipe". If we have
Expand Down
Loading