Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add pinning for the runtime code based on GCChecker (caller pinning) #52

Draft
wants to merge 11 commits into
base: v1.9.2+RAI
Choose a base branch
from
63 changes: 47 additions & 16 deletions src/aotcompile.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -240,8 +240,16 @@ static void jl_ci_cache_lookup(const jl_cgparams_t &cgparams, jl_method_instance
jl_method_t *def = codeinst->def->def.method;
if ((jl_value_t*)*src_out == jl_nothing)
*src_out = NULL;
if (*src_out && jl_is_method(def))
*src_out = jl_uncompress_ir(def, codeinst, (jl_array_t*)*src_out);
if (*src_out && jl_is_method(def)) {
PTR_PIN(def);
PTR_PIN(codeinst);
PTR_PIN(*src_out);
auto temp = jl_uncompress_ir(def, codeinst, (jl_array_t*)*src_out);
PTR_UNPIN(*src_out);
*src_out = temp;
PTR_UNPIN(codeinst);
PTR_UNPIN(def);
}
}
if (*src_out == NULL || !jl_is_code_info(*src_out)) {
if (cgparams.lookup != jl_rettype_inferred) {
Expand All @@ -250,7 +258,10 @@ static void jl_ci_cache_lookup(const jl_cgparams_t &cgparams, jl_method_instance
else {
*src_out = jl_type_infer(mi, world, 0);
if (*src_out) {
codeinst = jl_get_method_inferred(mi, (*src_out)->rettype, (*src_out)->min_world, (*src_out)->max_world);
auto rettype = (*src_out)->rettype;
PTR_PIN(rettype);
codeinst = jl_get_method_inferred(mi, rettype, (*src_out)->min_world, (*src_out)->max_world);
PTR_UNPIN(rettype);
if ((*src_out)->inferred) {
jl_value_t *null = nullptr;
jl_atomic_cmpswap_relaxed(&codeinst->inferred, &null, jl_nothing);
Expand Down Expand Up @@ -321,11 +332,22 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm
// to compile, or an svec(rettype, sig) describing a C-callable alias to create.
jl_value_t *item = jl_array_ptr_ref(methods, i);
if (jl_is_simplevector(item)) {
if (worlds == 1)
jl_compile_extern_c(wrap(&clone), &params, NULL, jl_svecref(item, 0), jl_svecref(item, 1));
if (worlds == 1) {
// warp is not a safepoint, but it is a function defined in LLVM. We cannot persuade GCChecker that item won't be moved.
PTR_PIN(item);
auto el0 = jl_svecref(item, 0);
auto el1 = jl_svecref(item, 1);
PTR_PIN(el0);
PTR_PIN(el1);
jl_compile_extern_c(wrap(&clone), &params, NULL, el0, el1);
PTR_UNPIN(el1);
PTR_UNPIN(el0);
PTR_UNPIN(item);
}
continue;
}
mi = (jl_method_instance_t*)item;
PTR_PIN(mi);
src = NULL;
// if this method is generally visible to the current compilation world,
// and this is either the primary world, or not applicable in the primary world
Expand All @@ -337,15 +359,18 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm
if (src && !emitted.count(codeinst)) {
// now add it to our compilation results
JL_GC_PROMISE_ROOTED(codeinst->rettype);
PTR_PIN(codeinst->rettype);
orc::ThreadSafeModule result_m = jl_create_ts_module(name_from_method_instance(codeinst->def),
params.tsctx, params.imaging,
clone.getModuleUnlocked()->getDataLayout(),
Triple(clone.getModuleUnlocked()->getTargetTriple()));
jl_llvm_functions_t decls = jl_emit_code(result_m, mi, src, codeinst->rettype, params);
PTR_UNPIN(codeinst->rettype);
if (result_m)
emitted[codeinst] = {std::move(result_m), std::move(decls)};
}
}
PTR_UNPIN(mi);
}

// finally, make sure all referenced methods also get compiled or fixed up
Expand Down Expand Up @@ -1041,8 +1066,11 @@ void jl_add_optimization_passes_impl(LLVMPassManagerRef PM, int opt_level, int l
extern "C" JL_DLLEXPORT
void jl_get_llvmf_defn_impl(jl_llvmf_dump_t* dump, jl_method_instance_t *mi, size_t world, char getwrapper, char optimize, const jl_cgparams_t params)
{
if (jl_is_method(mi->def.method) && mi->def.method->source == NULL &&
mi->def.method->generator == NULL) {
// Extract this as a new var, otherwise GCChecker won't work correctly.
auto method = mi->def.method;
PTR_PIN(method);
if (jl_is_method(method) && method->source == NULL &&
method->generator == NULL) {
// not a generic function
dump->F = NULL;
return;
Expand All @@ -1052,27 +1080,29 @@ void jl_get_llvmf_defn_impl(jl_llvmf_dump_t* dump, jl_method_instance_t *mi, siz
jl_value_t *jlrettype = (jl_value_t*)jl_any_type;
jl_code_info_t *src = NULL;
JL_GC_PUSH2(&src, &jlrettype);
if (jl_is_method(mi->def.method) && mi->def.method->source != NULL && jl_ir_flag_inferred((jl_array_t*)mi->def.method->source)) {
src = (jl_code_info_t*)mi->def.method->source;
if (jl_is_method(method) && method->source != NULL && jl_ir_flag_inferred((jl_array_t*)method->source)) {
src = (jl_code_info_t*)method->source;
if (src && !jl_is_code_info(src))
src = jl_uncompress_ir(mi->def.method, NULL, (jl_array_t*)src);
src = jl_uncompress_ir(method, NULL, (jl_array_t*)src);
} else {
jl_value_t *ci = jl_rettype_inferred(mi, world, world);
if (ci != jl_nothing) {
jl_code_instance_t *codeinst = (jl_code_instance_t*)ci;
PTR_PIN(codeinst);
src = (jl_code_info_t*)jl_atomic_load_relaxed(&codeinst->inferred);
if ((jl_value_t*)src != jl_nothing && !jl_is_code_info(src) && jl_is_method(mi->def.method))
src = jl_uncompress_ir(mi->def.method, codeinst, (jl_array_t*)src);
if ((jl_value_t*)src != jl_nothing && !jl_is_code_info(src) && jl_is_method(method))
src = jl_uncompress_ir(method, codeinst, (jl_array_t*)src);
jlrettype = codeinst->rettype;
PTR_UNPIN(codeinst);
}
if (!src || (jl_value_t*)src == jl_nothing) {
src = jl_type_infer(mi, world, 0);
if (src)
jlrettype = src->rettype;
else if (jl_is_method(mi->def.method)) {
src = mi->def.method->generator ? jl_code_for_staged(mi) : (jl_code_info_t*)mi->def.method->source;
if (src && !jl_is_code_info(src) && jl_is_method(mi->def.method))
src = jl_uncompress_ir(mi->def.method, NULL, (jl_array_t*)src);
else if (jl_is_method(method)) {
src = method->generator ? jl_code_for_staged(mi) : (jl_code_info_t*)method->source;
if (src && !jl_is_code_info(src) && jl_is_method(method))
src = jl_uncompress_ir(method, NULL, (jl_array_t*)src);
}
// TODO: use mi->uninferred
}
Expand Down Expand Up @@ -1124,6 +1154,7 @@ void jl_get_llvmf_defn_impl(jl_llvmf_dump_t* dump, jl_method_instance_t *mi, siz
fname = &decls.functionObject;
F = cast<Function>(m.getModuleUnlocked()->getNamedValue(*fname));
}
PTR_UNPIN(method);
JL_GC_POP();
if (measure_compile_time_enabled) {
auto end = jl_hrtime();
Expand Down
29 changes: 25 additions & 4 deletions src/jitlayers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -374,7 +374,9 @@ void jl_extern_c_impl(jl_value_t *declrt, jl_tupletype_t *sigt)
if (!jl_is_tuple_type(sigt))
jl_type_error("@ccallable", (jl_value_t*)jl_anytuple_type_type, (jl_value_t*)sigt);
// check that f is a guaranteed singleton type
PTR_PIN(((jl_datatype_t*)(sigt))->parameters);
jl_datatype_t *ft = (jl_datatype_t*)jl_tparam0(sigt);
PTR_PIN(ft);
if (!jl_is_datatype(ft) || ft->instance == NULL)
jl_error("@ccallable: function object must be a singleton");

Expand All @@ -388,12 +390,18 @@ void jl_extern_c_impl(jl_value_t *declrt, jl_tupletype_t *sigt)
size_t i, nargs = jl_nparams(sigt);
for (i = 1; i < nargs; i++) {
jl_value_t *ati = jl_tparam(sigt, i);
PTR_PIN(ati);
if (!jl_is_concrete_type(ati) || jl_is_kind(ati) || !jl_type_mappable_to_c(ati))
jl_error("@ccallable: argument types must be concrete");
PTR_UNPIN(ati);
}
PTR_UNPIN(ft);
PTR_UNPIN(((jl_datatype_t*)(sigt))->parameters);

// save a record of this so that the alias is generated when we write an object file
PTR_PIN(ft->name->mt);
jl_method_t *meth = (jl_method_t*)jl_methtable_lookup(ft->name->mt, (jl_value_t*)sigt, jl_atomic_load_acquire(&jl_world_counter));
PTR_UNPIN(ft->name->mt);
if (!jl_is_method(meth))
jl_error("@ccallable: could not find requested method");
JL_GC_PUSH1(&meth);
Expand All @@ -420,16 +428,20 @@ jl_code_instance_t *jl_generate_fptr_impl(jl_method_instance_t *mi JL_PROPAGATES
compiler_start_time = jl_hrtime();
// if we don't have any decls already, try to generate it now
jl_code_info_t *src = NULL;
JL_GC_PUSH1(&src);
jl_code_instance_t *codeinst = NULL;
JL_GC_PUSH2(&src, &codeinst); // There are many places below where we need to pin codeinst, and codeinst is assigned many times. We just T pin &codeinst to make things easier.
JL_LOCK(&jl_codegen_lock); // also disables finalizers, to prevent any unexpected recursion
jl_value_t *ci = jl_rettype_inferred(mi, world, world);
jl_code_instance_t *codeinst = (ci == jl_nothing ? NULL : (jl_code_instance_t*)ci);
codeinst = (ci == jl_nothing ? NULL : (jl_code_instance_t*)ci);
if (codeinst) {
src = (jl_code_info_t*)jl_atomic_load_relaxed(&codeinst->inferred);
if ((jl_value_t*)src == jl_nothing)
src = NULL;
else if (jl_is_method(mi->def.method))
else if (jl_is_method(mi->def.method)) {
PTR_PIN(mi->def.method);
src = jl_uncompress_ir(mi->def.method, codeinst, (jl_array_t*)src);
PTR_UNPIN(mi->def.method);
}
}
else {
// identify whether this is an invalidated method that is being recompiled
Expand Down Expand Up @@ -493,20 +505,24 @@ void jl_generate_fptr_for_unspecialized_impl(jl_code_instance_t *unspec)
jl_code_info_t *src = NULL;
JL_GC_PUSH1(&src);
jl_method_t *def = unspec->def->def.method;
PTR_PIN(def);
if (jl_is_method(def)) {
src = (jl_code_info_t*)def->source;
if (src == NULL) {
// TODO: this is wrong
assert(def->generator);
// TODO: jl_code_for_staged can throw
PTR_PIN(unspec->def);
src = jl_code_for_staged(unspec->def);
PTR_UNPIN(unspec->def);
}
if (src && (jl_value_t*)src != jl_nothing)
src = jl_uncompress_ir(def, NULL, (jl_array_t*)src);
}
else {
src = (jl_code_info_t*)unspec->def->uninferred;
}
PTR_UNPIN(def);
assert(src && jl_is_code_info(src));
++UnspecFPtrCount;
_jl_compile_codeinst(unspec, src, unspec->min_world, *jl_ExecutionEngine->getContext());
Expand All @@ -531,6 +547,7 @@ jl_value_t *jl_dump_method_asm_impl(jl_method_instance_t *mi, size_t world,
// printing via disassembly
jl_code_instance_t *codeinst = jl_generate_fptr(mi, world);
if (codeinst) {
PTR_PIN(codeinst);
uintptr_t fptr = (uintptr_t)jl_atomic_load_acquire(&codeinst->invoke);
if (getwrapper)
return jl_dump_fptr_asm(fptr, raw_mc, asm_variant, debuginfo, binary);
Expand All @@ -556,8 +573,11 @@ jl_value_t *jl_dump_method_asm_impl(jl_method_instance_t *mi, size_t world,
// TODO: jl_code_for_staged can throw
src = def->generator ? jl_code_for_staged(mi) : (jl_code_info_t*)def->source;
}
if (src && (jl_value_t*)src != jl_nothing)
if (src && (jl_value_t*)src != jl_nothing) {
PTR_PIN(mi->def.method);
src = jl_uncompress_ir(mi->def.method, codeinst, (jl_array_t*)src);
PTR_UNPIN(mi->def.method);
}
}
fptr = (uintptr_t)jl_atomic_load_acquire(&codeinst->invoke);
specfptr = (uintptr_t)jl_atomic_load_relaxed(&codeinst->specptr.fptr);
Expand All @@ -575,6 +595,7 @@ jl_value_t *jl_dump_method_asm_impl(jl_method_instance_t *mi, size_t world,
jl_atomic_fetch_add_relaxed(&jl_cumulative_compile_time, end - compiler_start_time);
}
}
PTR_UNPIN(codeinst);
if (specfptr != 0)
return jl_dump_fptr_asm(specfptr, raw_mc, asm_variant, debuginfo, binary);
}
Expand Down