@@ -794,47 +794,6 @@ void jl_emit_codeinst_to_jit_impl(
794
794
emittedmodules[codeinst] = std::move (result_m);
795
795
}
796
796
797
- static void recursive_compile_graph (
798
- jl_code_instance_t *codeinst,
799
- jl_code_info_t *src)
800
- {
801
- jl_emit_codeinst_to_jit (codeinst, src);
802
- DenseSet<jl_code_instance_t *> Seen;
803
- SmallVector<jl_code_instance_t *> workqueue;
804
- workqueue.push_back (codeinst);
805
- // if any edges were incomplete, try to complete them now
806
- while (!workqueue.empty ()) {
807
- auto this_code = workqueue.pop_back_val ();
808
- if (Seen.insert (this_code).second ) {
809
- jl_code_instance_t *compiled_ci = jl_method_compiled_egal (codeinst);
810
- if (!compiled_ci) {
811
- if (this_code != codeinst) {
812
- JL_GC_PROMISE_ROOTED (this_code); // rooted transitively from following edges from original argument
813
- jl_emit_codeinst_to_jit (this_code, nullptr ); // contains safepoints
814
- }
815
- jl_unique_gcsafe_lock lock (engine_lock);
816
- auto edges = complete_graph.find (this_code);
817
- if (edges != complete_graph.end ()) {
818
- workqueue.append (edges->second );
819
- }
820
- }
821
- }
822
- }
823
- }
824
-
825
- // this generates llvm code for the lambda info
826
- // and adds the result to the jitlayers
827
- // (and the shadow module),
828
- // and generates code for it
829
- static void _jl_compile_codeinst (
830
- jl_code_instance_t *codeinst,
831
- jl_code_info_t *src)
832
- {
833
- recursive_compile_graph (codeinst, src);
834
- jl_compile_codeinst_now (codeinst);
835
- assert (jl_is_compiled_codeinst (codeinst));
836
- }
837
-
838
797
839
798
const char *jl_generate_ccallable (Module *llvmmod, void *sysimg_handle, jl_value_t *declrt, jl_value_t *sigt, jl_codegen_params_t ¶ms);
840
799
@@ -858,7 +817,6 @@ int jl_compile_extern_c_impl(LLVMOrcThreadSafeModuleRef llvmmod, void *p, void *
858
817
orc::ThreadSafeModule backing;
859
818
bool success = true ;
860
819
const char *name = " " ;
861
- SmallVector<jl_code_instance_t *,0 > dependencies;
862
820
if (into == NULL ) {
863
821
ctx = pparams ? pparams->tsctx : jl_ExecutionEngine->makeContext ();
864
822
backing = jl_create_ts_module (" cextern" , ctx, DL, TargetTriple);
@@ -886,11 +844,16 @@ int jl_compile_extern_c_impl(LLVMOrcThreadSafeModuleRef llvmmod, void *p, void *
886
844
}
887
845
params.tsctx_lock = params.tsctx .getLock (); // re-acquire lock
888
846
if (success && params.cache ) {
889
- for (auto &it : params.workqueue ) {
847
+ size_t newest_world = jl_atomic_load_acquire (&jl_world_counter);
848
+ for (auto &it : params.workqueue ) { // really just zero or one, and just the ABI not the rest of the metadata
890
849
jl_code_instance_t *codeinst = it.first ;
891
850
JL_GC_PROMISE_ROOTED (codeinst);
892
- dependencies.push_back (codeinst);
893
- recursive_compile_graph (codeinst, nullptr );
851
+ jl_code_instance_t *newest_ci = jl_type_infer (jl_get_ci_mi (codeinst), newest_world, SOURCE_MODE_ABI);
852
+ if (newest_ci) {
853
+ if (jl_egal (codeinst->rettype , newest_ci->rettype ))
854
+ it.first = codeinst;
855
+ jl_compile_codeinst_now (newest_ci);
856
+ }
894
857
}
895
858
jl_analyze_workqueue (nullptr , params, true );
896
859
assert (params.workqueue .empty ());
@@ -903,8 +866,6 @@ int jl_compile_extern_c_impl(LLVMOrcThreadSafeModuleRef llvmmod, void *p, void *
903
866
{ // lock scope
904
867
jl_unique_gcsafe_lock lock (extern_c_lock);
905
868
if (!jl_ExecutionEngine->getGlobalValueAddress (name)) {
906
- for (auto dep : dependencies)
907
- jl_compile_codeinst_now (dep);
908
869
{
909
870
auto Lock = backing.getContext ().getLock ();
910
871
jl_ExecutionEngine->optimizeDLSyms (*backing.getModuleUnlocked ()); // safepoint
@@ -975,7 +936,7 @@ int jl_compile_codeinst_impl(jl_code_instance_t *ci)
975
936
if (!jl_is_compiled_codeinst (ci)) {
976
937
++SpecFPtrCount;
977
938
uint64_t start = jl_typeinf_timing_begin ();
978
- _jl_compile_codeinst (ci, NULL );
939
+ jl_compile_codeinst_now (ci);
979
940
jl_typeinf_timing_end (start, 0 );
980
941
newly_compiled = 1 ;
981
942
}
@@ -1006,8 +967,7 @@ void jl_generate_fptr_for_unspecialized_impl(jl_code_instance_t *unspec)
1006
967
}
1007
968
else {
1008
969
jl_method_instance_t *mi = jl_get_ci_mi (unspec);
1009
- jl_code_instance_t *uninferred = jl_cached_uninferred (
1010
- jl_atomic_load_relaxed (&mi->cache ), 1 );
970
+ jl_code_instance_t *uninferred = jl_cached_uninferred (jl_atomic_load_relaxed (&mi->cache ), 1 );
1011
971
assert (uninferred);
1012
972
src = (jl_code_info_t *)jl_atomic_load_relaxed (&uninferred->inferred );
1013
973
assert (src);
@@ -1018,10 +978,16 @@ void jl_generate_fptr_for_unspecialized_impl(jl_code_instance_t *unspec)
1018
978
if (!jl_is_compiled_codeinst (unspec)) {
1019
979
assert (jl_is_code_info (src));
1020
980
++UnspecFPtrCount;
981
+ jl_svec_t *edges = (jl_svec_t *)src->edges ;
982
+ if (jl_is_svec (edges)) {
983
+ jl_atomic_store_release (&unspec->edges , edges); // n.b. this assumes the field was always empty svec(), which is not entirely true
984
+ jl_gc_wb (unspec, edges);
985
+ }
1021
986
jl_debuginfo_t *debuginfo = src->debuginfo ;
1022
987
jl_atomic_store_release (&unspec->debuginfo , debuginfo); // n.b. this assumes the field was previously NULL, which is not entirely true
1023
988
jl_gc_wb (unspec, debuginfo);
1024
- _jl_compile_codeinst (unspec, src);
989
+ jl_emit_codeinst_to_jit (unspec, src);
990
+ jl_compile_codeinst_now (unspec);
1025
991
}
1026
992
JL_UNLOCK (&jitlock); // Might GC
1027
993
}
0 commit comments