Skip to content

Commit

Permalink
Auto merge of #55871 - ljedrz:llvm_back_allocations, r=<try>
Browse files Browse the repository at this point in the history
codegen_llvm_back: improve allocations

This commit was split out from #54864. Last time it was causing an LLVM OOM, presumably due to aggressive preallocation strategy in `thin_lto`.

This time preallocations are more cautious and there are a few additional memory-related improvements (last 3 points from the list below).

- _gently_ preallocate vectors of known length
- `extend` instead of `append` where the argument is consumable
- turn 2 `push` loops into `extend`s
- create a vector from a function producing one instead of using `extend_from_slice` on it
- consume `modules` when no longer needed
- return an `impl Iterator` from `generate_lto_work`
- don't `collect` `globals`, as they are iterated over and consumed right afterwards

While I'm hoping it won't cause an OOM anymore, I would still consider this a "high-risk" PR and not roll it up.
  • Loading branch information
bors committed Nov 11, 2018
2 parents b76ee83 + dc1b2c7 commit 684fb37
Show file tree
Hide file tree
Showing 5 changed files with 33 additions and 27 deletions.
12 changes: 6 additions & 6 deletions src/librustc_codegen_llvm/back/link.rs
Original file line number Diff line number Diff line change
Expand Up @@ -134,12 +134,12 @@ pub(crate) fn link_binary(sess: &Session,
bug!("invalid output type `{:?}` for target os `{}`",
crate_type, sess.opts.target_triple);
}
let mut out_files = link_binary_output(sess,
codegen_results,
crate_type,
outputs,
crate_name);
out_filenames.append(&mut out_files);
let out_files = link_binary_output(sess,
codegen_results,
crate_type,
outputs,
crate_name);
out_filenames.extend(out_files);
}

// Remove the temporary object file and metadata if we aren't saving temps
Expand Down
26 changes: 16 additions & 10 deletions src/librustc_codegen_llvm/back/lto.rs
Original file line number Diff line number Diff line change
Expand Up @@ -279,17 +279,19 @@ fn fat_lto(cgcx: &CodegenContext,
// and we want to move everything to the same LLVM context. Currently the
// way we know of to do that is to serialize them to a string and them parse
// them later. Not great but hey, that's why it's "fat" LTO, right?
for module in modules {
serialized_modules.extend(modules.into_iter().map(|module| {
let buffer = ModuleBuffer::new(module.module_llvm.llmod());
let llmod_id = CString::new(&module.name[..]).unwrap();
serialized_modules.push((SerializedModule::Local(buffer), llmod_id));
}

(SerializedModule::Local(buffer), llmod_id)
}));

// For all serialized bitcode files we parse them and link them in as we did
// above, this is all mostly handled in C++. Like above, though, we don't
// know much about the memory management here so we err on the side of being
// save and persist everything with the original module.
let mut linker = Linker::new(llmod);
serialized_bitcode.reserve(serialized_modules.len());
for (bc_decoded, name) in serialized_modules {
info!("linking {:?}", name);
time_ext(cgcx.time_passes, None, &format!("ll link {:?}", name), || {
Expand Down Expand Up @@ -403,9 +405,10 @@ fn thin_lto(cgcx: &CodegenContext,
.map(|&(_, ref wp)| (wp.cgu_name.clone(), wp.clone()))
.collect();

let mut thin_buffers = Vec::new();
let mut module_names = Vec::new();
let mut thin_modules = Vec::new();
// Reserve memory only partially in order to avoid OOM
let mut thin_buffers = Vec::with_capacity(modules.len());
let mut module_names = Vec::with_capacity(modules.len());
let mut thin_modules = Vec::with_capacity(modules.len());

// FIXME: right now, like with fat LTO, we serialize all in-memory
// modules before working with them and ThinLTO. We really
Expand All @@ -414,7 +417,7 @@ fn thin_lto(cgcx: &CodegenContext,
// into the global index. It turns out that this loop is by far
// the most expensive portion of this small bit of global
// analysis!
for (i, module) in modules.iter().enumerate() {
for (i, module) in modules.into_iter().enumerate() {
info!("local module: {} - {}", i, module.name);
let name = CString::new(module.name.clone()).unwrap();
let buffer = ThinBuffer::new(module.module_llvm.llmod());
Expand Down Expand Up @@ -460,12 +463,15 @@ fn thin_lto(cgcx: &CodegenContext,
// incremental ThinLTO first where we could actually avoid
// looking at upstream modules entirely sometimes (the contents,
// we must always unconditionally look at the index).
let mut serialized = Vec::new();

let cached_modules = cached_modules.into_iter().map(|(sm, wp)| {
(sm, CString::new(wp.cgu_name).unwrap())
});

let upstream_cached_len = serialized_modules.len() + cached_modules.len();
let mut serialized = Vec::with_capacity(upstream_cached_len);
thin_modules.reserve(upstream_cached_len);
module_names.reserve(upstream_cached_len);

for (module, name) in serialized_modules.into_iter().chain(cached_modules) {
info!("upstream or cached module {:?}", name);
thin_modules.push(llvm::ThinLTOModule {
Expand Down Expand Up @@ -521,7 +527,7 @@ fn thin_lto(cgcx: &CodegenContext,
});

let mut copy_jobs = vec![];
let mut opt_jobs = vec![];
let mut opt_jobs = Vec::with_capacity(shared.module_names.len());

info!("checking which modules can be-reused and which have to be re-optimized.");
for (module_index, module_name) in shared.module_names.iter().enumerate() {
Expand Down
7 changes: 3 additions & 4 deletions src/librustc_codegen_llvm/back/rpath.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,14 +31,12 @@ pub fn get_rpath_flags(config: &mut RPathConfig) -> Vec<String> {
return Vec::new();
}

let mut flags = Vec::new();

debug!("preparing the RPATH!");

let libs = config.used_crates.clone();
let libs = libs.iter().filter_map(|&(_, ref l)| l.option()).collect::<Vec<_>>();
let rpaths = get_rpaths(config, &libs);
flags.extend_from_slice(&rpaths_to_flags(&rpaths));
let mut flags = rpaths_to_flags(&rpaths);

// Use DT_RUNPATH instead of DT_RPATH if available
if config.linker_is_gnu {
Expand All @@ -49,7 +47,8 @@ pub fn get_rpath_flags(config: &mut RPathConfig) -> Vec<String> {
}

fn rpaths_to_flags(rpaths: &[String]) -> Vec<String> {
let mut ret = Vec::new();
let mut ret = Vec::with_capacity(rpaths.len()); // the minimum needed capacity

for rpath in rpaths {
if rpath.contains(',') {
ret.push("-Wl,-rpath".into());
Expand Down
8 changes: 4 additions & 4 deletions src/librustc_codegen_llvm/back/write.rs
Original file line number Diff line number Diff line change
Expand Up @@ -648,7 +648,7 @@ unsafe fn optimize(cgcx: &CodegenContext,
fn generate_lto_work(cgcx: &CodegenContext,
modules: Vec<ModuleCodegen>,
import_only_modules: Vec<(SerializedModule, WorkProduct)>)
-> Vec<(WorkItem, u64)>
-> impl Iterator<Item = (WorkItem, u64)>
{
let mut timeline = cgcx.time_graph.as_ref().map(|tg| {
tg.start(CODEGEN_WORKER_TIMELINE,
Expand All @@ -670,7 +670,7 @@ fn generate_lto_work(cgcx: &CodegenContext,
}), 0)
});

lto_modules.chain(copy_jobs).collect()
lto_modules.chain(copy_jobs)
}

unsafe fn codegen(cgcx: &CodegenContext,
Expand Down Expand Up @@ -2586,8 +2586,8 @@ fn create_msvc_imps(cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::M
imp_name.extend(name.to_bytes());
let imp_name = CString::new(imp_name).unwrap();
(imp_name, val)
})
.collect::<Vec<_>>();
});

for (imp_name, val) in globals {
let imp = llvm::LLVMAddGlobal(llmod,
i8p_ty,
Expand Down
7 changes: 4 additions & 3 deletions src/librustc_codegen_utils/symbol_export.rs
Original file line number Diff line number Diff line change
Expand Up @@ -229,10 +229,11 @@ fn exported_symbols_provider_local<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
"__llvm_profile_raw_version",
"__llvm_profile_filename",
];
for sym in &PROFILER_WEAK_SYMBOLS {

symbols.extend(PROFILER_WEAK_SYMBOLS.iter().map(|sym| {
let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(sym));
symbols.push((exported_symbol, SymbolExportLevel::C));
}
(exported_symbol, SymbolExportLevel::C)
}));
}

if tcx.sess.crate_types.borrow().contains(&config::CrateType::Dylib) {
Expand Down

0 comments on commit 684fb37

Please sign in to comment.