From c933d758231cf79989682898c3b4baf601d7788e Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 12 Feb 2024 18:08:52 +0000 Subject: [PATCH 01/64] Share reserves between Young Collector and Old Collector --- .../gc/shenandoah/shenandoahGeneration.cpp | 5 +-- .../share/gc/shenandoah/shenandoahHeap.cpp | 34 +++++++++++++------ .../share/gc/shenandoah/shenandoahHeap.hpp | 2 +- 3 files changed, 28 insertions(+), 13 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index f31753bb2de..a1df73112ad 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -249,8 +249,9 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap // First priority is to reclaim the easy garbage out of young-gen. - // maximum_young_evacuation_reserve is upper bound on memory to be evacuated out of young - const size_t maximum_young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100; + // maximum_young_evacuation_reserve is upper bound on memory to be evacuated into young Collector Reserve. This is + // bounded at the end of previous GC cycle, based on available memory and balancing of evacuation to old and young. + const size_t maximum_young_evacuation_reserve = heap->get_young_evac_reserve(); const size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve()); // maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted), diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 6bfaf2015ac..ce6bfe67b62 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1243,10 +1243,11 @@ void ShenandoahHeap::cancel_old_gc() { // Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations // and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to -// xfer_limit, and any excess is transferred to the young generation. -// xfer_limit is the maximum we're able to transfer from young to old. +// xfer_limit_mutator and planned collector reserves. Any excess is transferred to the young generation. +// xfer_limit_mutator is the maximum we're able to transfer from the young mutator budget to old. We may +// also xfer memory from the young Collector Reserve to the Old Collector Reserve. void ShenandoahHeap::adjust_generation_sizes_for_next_cycle( - size_t xfer_limit, size_t young_cset_regions, size_t old_cset_regions) { + size_t mutator_xfer_limit, size_t young_cset_regions, size_t old_cset_regions) { // We can limit the old reserve to the size of anticipated promotions: // max_old_reserve is an upper bound on memory evacuated from old and promoted to old, @@ -1268,7 +1269,7 @@ void ShenandoahHeap::adjust_generation_sizes_for_next_cycle( assert(ShenandoahOldEvacRatioPercent <= 100, "Error"); const size_t old_available = old_generation()->available(); // The free set will reserve this amount of memory to hold young evacuations - const size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; + size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; const size_t max_old_reserve = (ShenandoahOldEvacRatioPercent == 100) ? old_available : MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), old_available); @@ -1320,21 +1321,34 @@ void ShenandoahHeap::adjust_generation_sizes_for_next_cycle( const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions; old_region_surplus = MIN2(old_region_surplus, unaffiliated_old_regions); } else { - // We are running a deficit which we'd like to fill from young. + // We are running a deficit which we will try to fill from young. // Ignore that this will directly impact young_generation()->max_capacity(), // indirectly impacting young_reserve and old_reserve. These computations are conservative. const size_t old_need = old_reserve - max_old_available; // The old region deficit (rounded up) will come from young old_region_deficit = (old_need + region_size_bytes - 1) / region_size_bytes; - // Round down the regions we can transfer from young to old. If we're running short - // on young-gen memory, we restrict the xfer. Old-gen collection activities will be - // curtailed if the budget is restricted. - const size_t max_old_region_xfer = xfer_limit / region_size_bytes; - old_region_deficit = MIN2(old_region_deficit, max_old_region_xfer); + const size_t max_mutator_xfer = mutator_xfer_limit / region_size_bytes; + if (max_mutator_xfer < old_region_deficit) { + const size_t collector_reserve_sum = young_reserve + max_old_reserve; + const size_t intended_memory_for_old = (collector_reserve_sum * ShenandoahOldEvacRatioPercent) / 100; + assert(intended_memory_for_old > max_old_reserve, "Sanity"); + const size_t old_shortfall = intended_memory_for_old - max_old_reserve; + // round down + size_t reserve_xfer_regions = old_shortfall / region_size_bytes; + if (max_mutator_xfer + reserve_xfer_regions > old_region_deficit) { + reserve_xfer_regions = old_region_deficit - max_mutator_xfer; + } + old_region_deficit = max_mutator_xfer + reserve_xfer_regions; + + // Shrink the young evac reserve for subsequent GC + young_reserve -= reserve_xfer_regions * region_size_bytes; + } + // else, max_mutator_transfer is large enough to support the known deficit } assert(old_region_deficit == 0 || old_region_surplus == 0, "Only surplus or deficit, never both"); + set_young_evac_reserve(young_reserve); set_old_region_surplus(old_region_surplus); set_old_region_deficit(old_region_deficit); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp index 4c64992dae1..4ee2082ea93 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp @@ -853,7 +853,7 @@ class ShenandoahHeap : public CollectedHeap { void retire_plab(PLAB* plab, Thread* thread); void cancel_old_gc(); - void adjust_generation_sizes_for_next_cycle(size_t old_xfer_limit, size_t young_cset_regions, size_t old_cset_regions); + void adjust_generation_sizes_for_next_cycle(size_t mutator_xfer_limit, size_t young_cset_regions, size_t old_cset_regions); // ---------- Helper functions // From dd2a179c287ec00704b542d9f15d23c31e7d26c6 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 19 Feb 2024 17:45:32 +0000 Subject: [PATCH 02/64] Refinements and instrumentation to diagnose misbehavior --- .../heuristics/shenandoahGlobalHeuristics.cpp | 7 +++ .../gc/shenandoah/shenandoahConcurrentGC.cpp | 28 ++++++++- .../gc/shenandoah/shenandoahDegeneratedGC.cpp | 12 ++-- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 21 +++++++ .../share/gc/shenandoah/shenandoahFullGC.cpp | 4 ++ .../gc/shenandoah/shenandoahGeneration.cpp | 34 ++++++++--- .../share/gc/shenandoah/shenandoahHeap.cpp | 58 +++++++++++++++---- 7 files changed, 135 insertions(+), 29 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp index d8ae9bf84ae..af4954c10be 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp @@ -168,6 +168,13 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti if (regions_transferred_to_old > 0) { heap->generation_sizer()->force_transfer_to_old(regions_transferred_to_old); +#define KELVIN_DEBUG +#ifdef KELVIN_DEBUG + log_info(gc)("choose_global_cset setting young_evac_reserve: " SIZE_FORMAT ", old_evac_reserve: " SIZE_FORMAT, + heap->get_young_evac_reserve() - regions_transferred_to_old * region_size_bytes, + heap->get_old_evac_reserve() + regions_transferred_to_old * region_size_bytes); +#endif + heap->set_young_evac_reserve(heap->get_young_evac_reserve() - regions_transferred_to_old * region_size_bytes); heap->set_old_evac_reserve(heap->get_old_evac_reserve() + regions_transferred_to_old * region_size_bytes); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index 40c59bdb4c6..50f29d17fdd 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -53,6 +53,8 @@ #include "runtime/vmThread.hpp" #include "utilities/events.hpp" +#define KELVIN_DEBUG + // Breakpoint support class ShenandoahBreakpointGCScope : public StackObj { private: @@ -200,6 +202,10 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { } } +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN done with evacuation"); +#endif + if (heap->has_forwarded_objects()) { // Perform update-refs phase. vmop_entry_init_updaterefs(); @@ -225,6 +231,11 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { // on its next iteration and run a degenerated young cycle. vmop_entry_final_roots(); _abbreviated = true; + +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN says we're no longer missing rebuild after abbreviated cycle"); +#endif + heap->rebuild_free_set(true /*concurrent*/); } // We defer generation resizing actions until after cset regions have been recycled. We do this even following an @@ -240,6 +251,9 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { size_t old_region_surplus = heap->get_old_region_surplus(); size_t old_region_deficit = heap->get_old_region_deficit(); +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN Fetching old surplus: " SIZE_FORMAT " and deficit: " SIZE_FORMAT, old_region_surplus, old_region_deficit); +#endif if (old_region_surplus) { success = heap->generation_sizer()->transfer_to_young(old_region_surplus); region_destination = "young"; @@ -258,9 +272,6 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { } heap->set_old_region_surplus(0); heap->set_old_region_deficit(0); - heap->set_young_evac_reserve(0); - heap->set_old_evac_reserve(0); - heap->set_promoted_reserve(0); } // Report outside the heap lock @@ -272,6 +283,14 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available), byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available)); } +#ifdef KELVIN_DEBUG + { + log_info(gc)("KELVIN finished resizing generations"); + ShenandoahHeapLocker locker(heap->lock()); + heap->free_set()->log_status(); + } +#endif + return true; } @@ -1267,6 +1286,9 @@ void ShenandoahConcurrentGC::op_final_updaterefs() { Universe::verify(); } +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN: op_final_updaterefs() invokes rebuild"); +#endif heap->rebuild_free_set(true /*concurrent*/); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp index 7292bd55520..e479f9bbe27 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp @@ -328,14 +328,6 @@ void ShenandoahDegenGC::op_degenerated() { ShouldNotReachHere(); } - if (heap->mode()->is_generational()) { - // In case degeneration interrupted concurrent evacuation or update references, we need to clean up transient state. - // Otherwise, these actions have no effect. - heap->set_young_evac_reserve(0); - heap->set_old_evac_reserve(0); - heap->set_promoted_reserve(0); - } - if (ShenandoahVerify) { heap->verifier()->verify_after_degenerated(); } @@ -466,6 +458,10 @@ void ShenandoahDegenGC::op_update_roots() { Universe::verify(); } +#define KELVIN_DEBUG +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN: Degen:op_update_roots() invokes rebuild"); +#endif heap->rebuild_free_set(false /*concurrent*/); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index ffd22ca339b..e1f4048a909 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1181,6 +1181,11 @@ void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regio size_t young_available = _heap->young_generation()->available(); size_t young_unaffiliated_regions = _heap->young_generation()->free_unaffiliated_regions(); +#define KELVIN_DEBUG +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN rebuild(" SIZE_FORMAT ", " SIZE_FORMAT ")", young_cset_regions, old_cset_regions); +#endif + old_unaffiliated_regions += old_cset_regions; old_available += old_cset_regions * region_size_bytes; young_unaffiliated_regions += young_cset_regions; @@ -1225,6 +1230,11 @@ void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regio // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass. young_reserve = _heap->get_young_evac_reserve(); old_reserve = _heap->get_promoted_reserve() + _heap->get_old_evac_reserve(); +#define KELVIN_DEBUG +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN rebuild has reserve quantities for YOUNG: " SIZE_FORMAT ", OLD: " SIZE_FORMAT, + young_reserve, old_reserve); +#endif assert(old_reserve <= old_available, "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT, _heap->get_promoted_reserve(), _heap->get_old_evac_reserve(), old_available); @@ -1235,6 +1245,17 @@ void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regio // Affiliated old-gen regions are already in the OldCollector free set. Add in the relevant number of // unaffiliated regions. old_reserve = old_available; +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN rebuild has no reserve quantities for YOUNG: " SIZE_FORMAT ", OLD: " SIZE_FORMAT, + young_reserve, old_reserve); +#endif +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN rebuild setting young_evac_reserve: " SIZE_FORMAT ", old_evac_reserve: " SIZE_FORMAT + ", promo_reserve: " SIZE_FORMAT, young_reserve, old_reserve, (size_t) 0); +#endif + _heap->set_young_evac_reserve(young_reserve); + _heap->set_old_evac_reserve(old_reserve); + _heap->set_promoted_reserve(0); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp index a5aa3aa67e8..24ba9381c50 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp @@ -224,6 +224,10 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { if (heap->mode()->is_generational()) { // No need for old_gen->increase_used() as this was done when plabs were allocated. +#define KELVIN_DEBUG +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN at start of full gc, clearing young_evac_reserve, old_evac_reserve, promoted_reserve"); +#endif heap->set_young_evac_reserve(0); heap->set_old_evac_reserve(0); heap->set_promoted_reserve(0); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index a1df73112ad..91877eed968 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -254,6 +254,13 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap const size_t maximum_young_evacuation_reserve = heap->get_young_evac_reserve(); const size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve()); +#define KELVIN_DEBUG +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN compute_evac_budgets: max_young_evac_reserve: " SIZE_FORMAT ", young avail: " SIZE_FORMAT + ", young_evac_reserve: " SIZE_FORMAT, maximum_young_evacuation_reserve, + young_generation->available_with_reserve(), young_evacuation_reserve); +#endif + // maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted), // clamped by the old generation space available. // @@ -337,6 +344,11 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap // to old_evacuation_reserve because this memory is likely very fragmented, and we do not want to increase the likelihood // of old evacuation failure. +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN compute_budgets sets young_evac_reserve: " SIZE_FORMAT ", old_evac_reserve: " SIZE_FORMAT + ", promoted_reserve: " SIZE_FORMAT, young_evacuation_reserve, old_evacuation_reserve, + consumed_by_advance_promotion); +#endif heap->set_young_evac_reserve(young_evacuation_reserve); heap->set_old_evac_reserve(old_evacuation_reserve); heap->set_promoted_reserve(consumed_by_advance_promotion); @@ -347,7 +359,6 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap // Having chosen the collection set, adjust the budgets for generational mode based on its composition. Note // that young_generation->available() now knows about recently discovered immediate garbage. -// void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, ShenandoahCollectionSet* const collection_set) { // We may find that old_evacuation_reserve and/or loaned_for_young_evacuation are not fully consumed, in which case we may // be able to increase regions_available_to_loan @@ -382,6 +393,9 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, } else if (old_evacuated_committed < old_evacuation_reserve) { // This happens if the old-gen collection consumes less than full budget. old_evacuation_reserve = old_evacuated_committed; +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN adjust_evac_budgets sets old_evac_reserve: " SIZE_FORMAT, old_evacuation_reserve); +#endif heap->set_old_evac_reserve(old_evacuation_reserve); } @@ -393,6 +407,9 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, size_t total_young_available = young_generation->available_with_reserve(); assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate more than is available in young"); +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN adjust_evac_budgets sets young_evac_reserve: " SIZE_FORMAT, young_evacuated_reserve_used); +#endif heap->set_young_evac_reserve(young_evacuated_reserve_used); size_t old_available = old_generation->available(); @@ -458,6 +475,9 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, // Add in the excess_old memory to hold unanticipated promotions, if any. If there are more unanticipated // promotions than fit in reserved memory, they will be deferred until a future GC pass. size_t total_promotion_reserve = young_advance_promoted_reserve_used + excess_old; +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN adjust_evac_budgets sets promoted_reserve: " SIZE_FORMAT, total_promotion_reserve); +#endif heap->set_promoted_reserve(total_promotion_reserve); heap->reset_promoted_expended(); } @@ -701,16 +721,14 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { // GC is evacuating and updating references. // Find the amount that will be promoted, regions that will be promoted in - // place, and preselect older regions that will be promoted by evacuation. + // place, and preselected older regions that will be promoted by evacuation. compute_evacuation_budgets(heap); - // Choose the collection set, including the regions preselected above for - // promotion into the old generation. + // Choose the collection set, including the regions preselected above for promotion into the old generation. _heuristics->choose_collection_set(collection_set); - if (!collection_set->is_empty()) { - // only make use of evacuation budgets when we are evacuating - adjust_evacuation_budgets(heap, collection_set); - } + + // Even if collection_set->is_empty(), we want to adjust budgets, making reserves available to mutator. + adjust_evacuation_budgets(heap, collection_set); if (is_global()) { // We have just chosen a collection set for a global cycle. The mark bitmap covering old regions is complete, so diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index ce6bfe67b62..e2f09973c6a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1249,6 +1249,12 @@ void ShenandoahHeap::cancel_old_gc() { void ShenandoahHeap::adjust_generation_sizes_for_next_cycle( size_t mutator_xfer_limit, size_t young_cset_regions, size_t old_cset_regions) { +#define KELVIN_DEBUG +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN adjust_generation_sizes_for_next_cycle(xfer_limit: " SIZE_FORMAT ", young_cset_regions: " SIZE_FORMAT + ", old_cset_regions: " SIZE_FORMAT ")", mutator_xfer_limit, young_cset_regions, old_cset_regions); +#endif + // We can limit the old reserve to the size of anticipated promotions: // max_old_reserve is an upper bound on memory evacuated from old and promoted to old, // clamped by the old generation space available. @@ -1325,32 +1331,55 @@ void ShenandoahHeap::adjust_generation_sizes_for_next_cycle( // Ignore that this will directly impact young_generation()->max_capacity(), // indirectly impacting young_reserve and old_reserve. These computations are conservative. const size_t old_need = old_reserve - max_old_available; - // The old region deficit (rounded up) will come from young - old_region_deficit = (old_need + region_size_bytes - 1) / region_size_bytes; - const size_t max_mutator_xfer = mutator_xfer_limit / region_size_bytes; - if (max_mutator_xfer < old_region_deficit) { + old_region_deficit = (old_need + region_size_bytes - 1) / region_size_bytes; + size_t mutator_region_xfer = mutator_xfer_limit / region_size_bytes; + if (mutator_region_xfer < old_region_deficit) { const size_t collector_reserve_sum = young_reserve + max_old_reserve; const size_t intended_memory_for_old = (collector_reserve_sum * ShenandoahOldEvacRatioPercent) / 100; assert(intended_memory_for_old > max_old_reserve, "Sanity"); const size_t old_shortfall = intended_memory_for_old - max_old_reserve; // round down size_t reserve_xfer_regions = old_shortfall / region_size_bytes; - if (max_mutator_xfer + reserve_xfer_regions > old_region_deficit) { - reserve_xfer_regions = old_region_deficit - max_mutator_xfer; + if (mutator_region_xfer + reserve_xfer_regions > old_region_deficit) { + reserve_xfer_regions = old_region_deficit - mutator_region_xfer; } - old_region_deficit = max_mutator_xfer + reserve_xfer_regions; - - // Shrink the young evac reserve for subsequent GC + old_region_deficit = mutator_region_xfer + reserve_xfer_regions; + if (old_region_deficit > young_generation()->free_unaffiliated_regions() + young_cset_regions) { + size_t delta = old_region_deficit - (young_generation()->free_unaffiliated_regions() + young_cset_regions); + old_region_deficit -= delta; + if (delta > reserve_xfer_regions) { + delta -= reserve_xfer_regions; + reserve_xfer_regions = 0; + } + assert(delta <= mutator_region_xfer, "Sanity"); + mutator_region_xfer -= delta; + } +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN transferring " SIZE_FORMAT " regions from Mutator to Old Collector reserve", mutator_region_xfer); + log_info(gc)("KELVIN transferring " SIZE_FORMAT " regions from Collector reserve to Old Collector reserve", + reserve_xfer_regions); +#endif young_reserve -= reserve_xfer_regions * region_size_bytes; } // else, max_mutator_transfer is large enough to support the known deficit } assert(old_region_deficit == 0 || old_region_surplus == 0, "Only surplus or deficit, never both"); - set_young_evac_reserve(young_reserve); +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN setting old surplus; " SIZE_FORMAT ", deficiit: " SIZE_FORMAT, old_region_surplus, old_region_deficit); + log_info(gc)("KELVIN setting young_evac_reserve: " SIZE_FORMAT ", old_evac_reserve: " SIZE_FORMAT ", promoted_reserve: " SIZE_FORMAT, + young_reserve, reserve_for_mixed, reserve_for_promo); +#endif + set_old_region_surplus(old_region_surplus); set_old_region_deficit(old_region_deficit); + + // deficit/surplus adjustments to generation sizes will precede rebuild + set_young_evac_reserve(young_reserve); + set_old_evac_reserve(reserve_for_mixed); + set_promoted_reserve(reserve_for_promo); + set_evacuation_reserve_quantities(true); } // Called from stubs in JIT code or interpreter @@ -3170,6 +3199,12 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) { ShenandoahHeapLocker locker(lock()); size_t young_cset_regions, old_cset_regions; size_t first_old_region, last_old_region, old_region_count; + +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN ShenHeap::rebuild_free_set(%s)", concurrent? "true": "false"); +#endif + + _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count); // If there are no old regions, first_old_region will be greater than last_old_region assert((first_old_region > last_old_region) || @@ -3188,6 +3223,8 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) { // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this // available for transfer to old. Note that transfer of humongous regions does not impact available. size_t allocation_runway = young_heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions); + + // adjust_generation_sizes_for_next_cycle sets evacuation_reserve_quantities adjust_generation_sizes_for_next_cycle(allocation_runway, young_cset_regions, old_cset_regions); // Total old_available may have been expanded to hold anticipated promotions. We trigger if the fragmented available @@ -3200,6 +3237,7 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) { } // Rebuild free set based on adjusted generation sizes. _free_set->rebuild(young_cset_regions, old_cset_regions); + set_evacuation_reserve_quantities(false); if (mode()->is_generational() && (ShenandoahGenerationalHumongousReserve > 0)) { size_t old_region_span = (first_old_region <= last_old_region)? (last_old_region + 1 - first_old_region): 0; From d91bcefecdcaa7c4cd570e13ba8f54519e94273b Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 12 Feb 2024 14:16:51 +0000 Subject: [PATCH 03/64] Allow old-gen to expand when mutator memory is available --- src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index e2f09973c6a..4e1a6aafa86 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1275,11 +1275,11 @@ void ShenandoahHeap::adjust_generation_sizes_for_next_cycle( assert(ShenandoahOldEvacRatioPercent <= 100, "Error"); const size_t old_available = old_generation()->available(); // The free set will reserve this amount of memory to hold young evacuations - size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; - const size_t max_old_reserve = (ShenandoahOldEvacRatioPercent == 100) ? - old_available : MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), - old_available); + const size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; + // In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit. + const size_t max_old_reserve = (ShenandoahOldEvacRatioPercent == 100) ? + old_available + xfer_limit: (young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent); const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); // Decide how much old space we should reserve for a mixed collection @@ -1307,6 +1307,7 @@ void ShenandoahHeap::adjust_generation_sizes_for_next_cycle( const bool doing_promotions = promo_load > 0; if (doing_promotions) { // We're promoting and have a bound on the maximum amount that can be promoted + assert(max_old_reserve >= reserve_for_mixed, "Sanity"); const size_t available_for_promotions = max_old_reserve - reserve_for_mixed; reserve_for_promo = MIN2((size_t)(promo_load * ShenandoahPromoEvacWaste), available_for_promotions); } From c2cc3f69b60e5a5e41813658c02aed02bd975de6 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 19 Feb 2024 17:52:08 +0000 Subject: [PATCH 04/64] Fix merge conflicts --- src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 4e1a6aafa86..0e00b02b846 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1275,11 +1275,11 @@ void ShenandoahHeap::adjust_generation_sizes_for_next_cycle( assert(ShenandoahOldEvacRatioPercent <= 100, "Error"); const size_t old_available = old_generation()->available(); // The free set will reserve this amount of memory to hold young evacuations - const size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; + size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; // In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit. const size_t max_old_reserve = (ShenandoahOldEvacRatioPercent == 100) ? - old_available + xfer_limit: (young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent); + old_available + mutator_xfer_limit: (young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent); const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); // Decide how much old space we should reserve for a mixed collection From a794ca1e8aa24bf00e38250fad1b39c71197b13f Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Tue, 20 Feb 2024 16:43:12 +0000 Subject: [PATCH 05/64] Refine calculation of max_old_reserve --- src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 0e00b02b846..8c79a21c30f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1277,9 +1277,12 @@ void ShenandoahHeap::adjust_generation_sizes_for_next_cycle( // The free set will reserve this amount of memory to hold young evacuations size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; - // In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit. - const size_t max_old_reserve = (ShenandoahOldEvacRatioPercent == 100) ? - old_available + mutator_xfer_limit: (young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent); + // In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit and young_reserve + const size_t bound_on_old_reserve = old_available + xfer_limit + young_reserve; + const size_t max_old_reserve = (ShenandoahOldEvacRatioPercent == 100)? + bound_on_old_reserve: MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), + bound_on_old_reserve); + const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); // Decide how much old space we should reserve for a mixed collection From 6509bde10000af475692e794844aa2eec5252c64 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 21 Feb 2024 01:28:57 +0000 Subject: [PATCH 06/64] Reduce default value of ShenandoahOldEvacRatioPercent Given that mixed evacuation will sometimes borrow up to this amount of memory from the young evacuation reserve for old evacuations, we reduce the default value to reduce the impact on normal young GC. --- src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp index 7be8efd4121..1e430f5e1b9 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp @@ -410,14 +410,14 @@ "reserve/waste is incorrect, at the risk that application " \ "runs out of memory too early.") \ \ - product(uintx, ShenandoahOldEvacRatioPercent, 75, EXPERIMENTAL, \ + product(uintx, ShenandoahOldEvacRatioPercent, 50, EXPERIMENTAL, \ "The maximum proportion of evacuation from old-gen memory, " \ - "expressed as a percentage. The default value 75 denotes that no" \ - "more than 75% of the collection set evacuation workload may be " \ + "expressed as a percentage. The default value 50 denotes that no" \ + "more than 50% of the collection set evacuation workload may be " \ "towards evacuation of old-gen heap regions. This limits both the"\ "promotion of aged regions and the compaction of existing old " \ - "regions. A value of 75 denotes that the total evacuation work" \ - "may increase to up to four times the young gen evacuation work." \ + "regions. A value of 50 denotes that the total evacuation work" \ + "may increase to up to two times the young gen evacuation work." \ "A larger value allows quicker promotion and allows" \ "a smaller number of mixed evacuations to process " \ "the entire list of old-gen collection candidates at the cost " \ From c8e45554496c3a5fae5863c564d5b67c30826861 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 21 Feb 2024 02:20:29 +0000 Subject: [PATCH 07/64] Turn off debug instrumentation --- .../shenandoah/heuristics/shenandoahGlobalHeuristics.cpp | 2 +- .../share/gc/shenandoah/shenandoahConcurrentGC.cpp | 2 +- .../share/gc/shenandoah/shenandoahDegeneratedGC.cpp | 2 +- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 9 ++------- src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp | 2 +- src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp | 2 +- src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp | 4 ++-- 7 files changed, 9 insertions(+), 14 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp index af4954c10be..b6bf2ea68aa 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp @@ -168,7 +168,7 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti if (regions_transferred_to_old > 0) { heap->generation_sizer()->force_transfer_to_old(regions_transferred_to_old); -#define KELVIN_DEBUG +#undef KELVIN_DEBUG #ifdef KELVIN_DEBUG log_info(gc)("choose_global_cset setting young_evac_reserve: " SIZE_FORMAT ", old_evac_reserve: " SIZE_FORMAT, heap->get_young_evac_reserve() - regions_transferred_to_old * region_size_bytes, diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index 50f29d17fdd..21b7f372a18 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -53,7 +53,7 @@ #include "runtime/vmThread.hpp" #include "utilities/events.hpp" -#define KELVIN_DEBUG +#undef KELVIN_DEBUG // Breakpoint support class ShenandoahBreakpointGCScope : public StackObj { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp index e479f9bbe27..27bd0b65e63 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp @@ -458,7 +458,7 @@ void ShenandoahDegenGC::op_update_roots() { Universe::verify(); } -#define KELVIN_DEBUG +#undef KELVIN_DEBUG #ifdef KELVIN_DEBUG log_info(gc)("KELVIN: Degen:op_update_roots() invokes rebuild"); #endif diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index e1f4048a909..68a4f8fb17a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1181,11 +1181,6 @@ void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regio size_t young_available = _heap->young_generation()->available(); size_t young_unaffiliated_regions = _heap->young_generation()->free_unaffiliated_regions(); -#define KELVIN_DEBUG -#ifdef KELVIN_DEBUG - log_info(gc)("KELVIN rebuild(" SIZE_FORMAT ", " SIZE_FORMAT ")", young_cset_regions, old_cset_regions); -#endif - old_unaffiliated_regions += old_cset_regions; old_available += old_cset_regions * region_size_bytes; young_unaffiliated_regions += young_cset_regions; @@ -1230,7 +1225,7 @@ void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regio // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass. young_reserve = _heap->get_young_evac_reserve(); old_reserve = _heap->get_promoted_reserve() + _heap->get_old_evac_reserve(); -#define KELVIN_DEBUG +#undef KELVIN_DEBUG #ifdef KELVIN_DEBUG log_info(gc)("KELVIN rebuild has reserve quantities for YOUNG: " SIZE_FORMAT ", OLD: " SIZE_FORMAT, young_reserve, old_reserve); @@ -1251,7 +1246,7 @@ void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regio #endif #ifdef KELVIN_DEBUG log_info(gc)("KELVIN rebuild setting young_evac_reserve: " SIZE_FORMAT ", old_evac_reserve: " SIZE_FORMAT - ", promo_reserve: " SIZE_FORMAT, young_reserve, old_reserve, (size_t) 0); + ", promo_reserve: " SIZE_FORMAT, young_reserve, old_reserve, 0); #endif _heap->set_young_evac_reserve(young_reserve); _heap->set_old_evac_reserve(old_reserve); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp index 24ba9381c50..fff164fd07d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp @@ -224,7 +224,7 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { if (heap->mode()->is_generational()) { // No need for old_gen->increase_used() as this was done when plabs were allocated. -#define KELVIN_DEBUG +#undef KELVIN_DEBUG #ifdef KELVIN_DEBUG log_info(gc)("KELVIN at start of full gc, clearing young_evac_reserve, old_evac_reserve, promoted_reserve"); #endif diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 91877eed968..d303d1e30c7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -254,7 +254,7 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap const size_t maximum_young_evacuation_reserve = heap->get_young_evac_reserve(); const size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve()); -#define KELVIN_DEBUG +#undef KELVIN_DEBUG #ifdef KELVIN_DEBUG log_info(gc)("KELVIN compute_evac_budgets: max_young_evac_reserve: " SIZE_FORMAT ", young avail: " SIZE_FORMAT ", young_evac_reserve: " SIZE_FORMAT, maximum_young_evacuation_reserve, diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 8c79a21c30f..6c3b4899155 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1249,7 +1249,7 @@ void ShenandoahHeap::cancel_old_gc() { void ShenandoahHeap::adjust_generation_sizes_for_next_cycle( size_t mutator_xfer_limit, size_t young_cset_regions, size_t old_cset_regions) { -#define KELVIN_DEBUG +#undef KELVIN_DEBUG #ifdef KELVIN_DEBUG log_info(gc)("KELVIN adjust_generation_sizes_for_next_cycle(xfer_limit: " SIZE_FORMAT ", young_cset_regions: " SIZE_FORMAT ", old_cset_regions: " SIZE_FORMAT ")", mutator_xfer_limit, young_cset_regions, old_cset_regions); @@ -1278,7 +1278,7 @@ void ShenandoahHeap::adjust_generation_sizes_for_next_cycle( size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; // In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit and young_reserve - const size_t bound_on_old_reserve = old_available + xfer_limit + young_reserve; + const size_t bound_on_old_reserve = old_available + mutator_xfer_limit + young_reserve; const size_t max_old_reserve = (ShenandoahOldEvacRatioPercent == 100)? bound_on_old_reserve: MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve); From ce5d335db43a24a3da9fba408a719d757dd4cca3 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Sun, 25 Feb 2024 10:05:39 -0700 Subject: [PATCH 08/64] Fix multiple errors in impelmentation of freeset rebuild 1. Always require that evacuation reserve quantities be established before rebuilding free set 2. Establish evacuation reserve quantities before initial rebuild of free set 3. Fix the calucation of old reserves during adjust generation sizes for next gc cycle --- .../gc/shenandoah/shenandoahConcurrentGC.cpp | 10 +- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 48 +++---- .../share/gc/shenandoah/shenandoahHeap.cpp | 121 +++++++++++------- .../share/gc/shenandoah/shenandoahOldGC.cpp | 8 +- 4 files changed, 101 insertions(+), 86 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index 21b7f372a18..ee8d918a015 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -231,11 +231,6 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { // on its next iteration and run a degenerated young cycle. vmop_entry_final_roots(); _abbreviated = true; - -#ifdef KELVIN_DEBUG - log_info(gc)("KELVIN says we're no longer missing rebuild after abbreviated cycle"); -#endif - heap->rebuild_free_set(true /*concurrent*/); } // We defer generation resizing actions until after cset regions have been recycled. We do this even following an @@ -396,6 +391,11 @@ void ShenandoahConcurrentGC::entry_final_roots() { EventMark em("%s", msg); op_final_roots(); + +#ifdef KELVIN_DEBUG + log_info(gc)("KELVIN says we're no longer missing rebuild after abbreviated cycle"); +#endif + ShenandoahHeap::heap()->rebuild_free_set(true /*concurrent*/); } void ShenandoahConcurrentGC::entry_reset() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 68a4f8fb17a..61675349b6f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1221,37 +1221,29 @@ void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regio // promotions and evacuations. The partition between which old memory is reserved for evacuation and // which is reserved for promotion is enforced using thread-local variables that prescribe intentons for // each PLAB's available memory. - if (_heap->has_evacuation_reserve_quantities()) { - // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass. - young_reserve = _heap->get_young_evac_reserve(); - old_reserve = _heap->get_promoted_reserve() + _heap->get_old_evac_reserve(); + + // At end of final mark, evacuation reserve quantities are defined by choose_collection_set(). At end of update refs, + // evacuation reserve quantities are established by adjust_generation_sizes_for_next_cycle(). + + assert(_heap->has_evacuation_reserve_quantities(), "Always prepare reserve quantities before rebuild"); + + young_reserve = _heap->get_young_evac_reserve(); + old_reserve = _heap->get_promoted_reserve() + _heap->get_old_evac_reserve(); #undef KELVIN_DEBUG #ifdef KELVIN_DEBUG - log_info(gc)("KELVIN rebuild has reserve quantities for YOUNG: " SIZE_FORMAT ", OLD: " SIZE_FORMAT, - young_reserve, old_reserve); -#endif - assert(old_reserve <= old_available, - "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT, - _heap->get_promoted_reserve(), _heap->get_old_evac_reserve(), old_available); - } else { - // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults) - young_reserve = (young_capacity * ShenandoahEvacReserve) / 100; - // The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions. - // Affiliated old-gen regions are already in the OldCollector free set. Add in the relevant number of - // unaffiliated regions. - old_reserve = old_available; -#ifdef KELVIN_DEBUG - log_info(gc)("KELVIN rebuild has no reserve quantities for YOUNG: " SIZE_FORMAT ", OLD: " SIZE_FORMAT, - young_reserve, old_reserve); -#endif -#ifdef KELVIN_DEBUG - log_info(gc)("KELVIN rebuild setting young_evac_reserve: " SIZE_FORMAT ", old_evac_reserve: " SIZE_FORMAT - ", promo_reserve: " SIZE_FORMAT, young_reserve, old_reserve, 0); + size_t xfer_bytes = old_region_deficit * region_size_bytes; + log_info(gc)("KELVIN rebuild has reserve quantities for YOUNG: " SIZE_FORMAT ", OLD: " SIZE_FORMAT, + young_reserve, old_reserve); + log_info(gc)(" old_reserve is mixed_reserve: " SIZE_FORMAT " + evac_reserve: " SIZE_FORMAT, + _heap->get_promoted_reserve(), _heap->get_old_evac_reserve()); + log_info(gc)(" Old available is current: " SIZE_FORMAT " + old cset: " SIZE_FORMAT + " + xfer_bytes: " SIZE_FORMAT " equals: " SIZE_FORMAT, + _heap->old_generation()->available(), old_cset_regions * region_size_bytes, + xfer_bytes, _heap->old_generation()->available() + old_cset_regions*region_size_bytes + xfer_bytes); #endif - _heap->set_young_evac_reserve(young_reserve); - _heap->set_old_evac_reserve(old_reserve); - _heap->set_promoted_reserve(0); - } + assert(old_reserve <= old_available, + "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT, + _heap->get_promoted_reserve(), _heap->get_old_evac_reserve(), old_available); } // Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 6c3b4899155..180614e83dd 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -424,6 +424,11 @@ jint ShenandoahHeap::initialize() { // We are initializing free set. We ignore cset region tallies. size_t first_old, last_old, num_old; + size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; + set_young_evac_reserve(young_reserve); + set_old_evac_reserve((size_t) 0); + set_promoted_reserve((size_t) 0); + set_evacuation_reserve_quantities(true); _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); _free_set->rebuild(young_cset_regions, old_cset_regions); } @@ -1249,8 +1254,8 @@ void ShenandoahHeap::cancel_old_gc() { void ShenandoahHeap::adjust_generation_sizes_for_next_cycle( size_t mutator_xfer_limit, size_t young_cset_regions, size_t old_cset_regions) { -#undef KELVIN_DEBUG -#ifdef KELVIN_DEBUG +#undef KELVIN_RESIZE +#ifdef KELVIN_RESIZE log_info(gc)("KELVIN adjust_generation_sizes_for_next_cycle(xfer_limit: " SIZE_FORMAT ", young_cset_regions: " SIZE_FORMAT ", old_cset_regions: " SIZE_FORMAT ")", mutator_xfer_limit, young_cset_regions, old_cset_regions); #endif @@ -1273,7 +1278,9 @@ void ShenandoahHeap::adjust_generation_sizes_for_next_cycle( // We have to be careful in the event that SOEP is set to 100 by the user. assert(ShenandoahOldEvacRatioPercent <= 100, "Error"); - const size_t old_available = old_generation()->available(); + const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + + const size_t old_available = old_generation()->available() + old_cset_regions * region_size_bytes; // The free set will reserve this amount of memory to hold young evacuations size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; @@ -1283,8 +1290,13 @@ void ShenandoahHeap::adjust_generation_sizes_for_next_cycle( bound_on_old_reserve: MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve); - const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); - +#ifdef KELVIN_RESIZE + log_info(gc)("young_reserve: " SIZE_FORMAT ", bound_on_old_reserve: " SIZE_FORMAT ", max_old_reserve: " SIZE_FORMAT, + young_reserve, bound_on_old_reserve, max_old_reserve); + log_info(gc)("young_available: " SIZE_FORMAT ", young_cset: " SIZE_FORMAT ", total: " SIZE_FORMAT, + young_generation()->available(), young_cset_regions * region_size_bytes, + young_generation()->available() + young_cset_regions * region_size_bytes); +#endif // Decide how much old space we should reserve for a mixed collection size_t reserve_for_mixed = 0; const size_t mixed_candidates = old_heuristics()->unprocessed_old_collection_candidates(); @@ -1297,7 +1309,7 @@ void ShenandoahHeap::adjust_generation_sizes_for_next_cycle( assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes, "Unaffiliated available must be less than total available"); size_t old_fragmented_available = - old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes; + old_available - (old_generation()->free_unaffiliated_regions() + old_cset_regions) * region_size_bytes; reserve_for_mixed = max_evac_need + old_fragmented_available; if (reserve_for_mixed > max_old_reserve) { reserve_for_mixed = max_old_reserve; @@ -1316,62 +1328,79 @@ void ShenandoahHeap::adjust_generation_sizes_for_next_cycle( } // This is the total old we want to ideally reserve - const size_t old_reserve = reserve_for_mixed + reserve_for_promo; + size_t old_reserve = reserve_for_mixed + reserve_for_promo; assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations"); // We now check if the old generation is running a surplus or a deficit. size_t old_region_deficit = 0; size_t old_region_surplus = 0; - const size_t max_old_available = old_generation()->available() + old_cset_regions * region_size_bytes; - if (max_old_available >= old_reserve) { + size_t mutator_region_xfer_limit = mutator_xfer_limit / region_size_bytes; + // align the mutator_xfer_limit on region size + mutator_xfer_limit = mutator_region_xfer_limit * region_size_bytes; + + if (old_available >= old_reserve) { // We are running a surplus, so the old region surplus can go to young - const size_t old_surplus = max_old_available - old_reserve; + const size_t old_surplus = old_available - old_reserve; old_region_surplus = old_surplus / region_size_bytes; const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions; old_region_surplus = MIN2(old_region_surplus, unaffiliated_old_regions); + } else if (old_available + mutator_xfer_limit >= old_reserve) { + // Mutator's xfer limit is sufficient to satisfy our need: transfer all memory from there + size_t old_deficit = old_reserve - old_available; + old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes; } else { - // We are running a deficit which we will try to fill from young. - // Ignore that this will directly impact young_generation()->max_capacity(), - // indirectly impacting young_reserve and old_reserve. These computations are conservative. - const size_t old_need = old_reserve - max_old_available; - - old_region_deficit = (old_need + region_size_bytes - 1) / region_size_bytes; - size_t mutator_region_xfer = mutator_xfer_limit / region_size_bytes; - if (mutator_region_xfer < old_region_deficit) { - const size_t collector_reserve_sum = young_reserve + max_old_reserve; - const size_t intended_memory_for_old = (collector_reserve_sum * ShenandoahOldEvacRatioPercent) / 100; - assert(intended_memory_for_old > max_old_reserve, "Sanity"); - const size_t old_shortfall = intended_memory_for_old - max_old_reserve; - // round down - size_t reserve_xfer_regions = old_shortfall / region_size_bytes; - if (mutator_region_xfer + reserve_xfer_regions > old_region_deficit) { - reserve_xfer_regions = old_region_deficit - mutator_region_xfer; - } - old_region_deficit = mutator_region_xfer + reserve_xfer_regions; - if (old_region_deficit > young_generation()->free_unaffiliated_regions() + young_cset_regions) { - size_t delta = old_region_deficit - (young_generation()->free_unaffiliated_regions() + young_cset_regions); - old_region_deficit -= delta; - if (delta > reserve_xfer_regions) { - delta -= reserve_xfer_regions; - reserve_xfer_regions = 0; - } - assert(delta <= mutator_region_xfer, "Sanity"); - mutator_region_xfer -= delta; + // We'll try to xfer from both mutator excess and from young collector reserve + size_t available_reserves = old_available + young_reserve + mutator_xfer_limit; + size_t old_entitlement = (available_reserves * ShenandoahOldEvacRatioPercent) / 100; + + // Round old_entitlement down to nearest multiple of regions to be transferred to old + size_t entitled_xfer = old_entitlement - old_available; + entitled_xfer = region_size_bytes * (entitled_xfer / region_size_bytes); + old_entitlement = old_available + entitled_xfer; + + if (old_entitlement < old_reserve) { +#ifdef KELVIN_RESIZE + log_info(gc)("KELVIN entitlement (" SIZE_FORMAT ") < old reserve (" SIZE_FORMAT + "): before adjusting, promo reserve: " SIZE_FORMAT ", evac rserve: " SIZE_FORMAT, + old_entitlement, old_reserve, reserve_for_promo, reserve_for_mixed); +#endif + // There's not enough memory to satisfy our desire. Scale back our old-gen intentions. + size_t budget_overrun = old_reserve - old_entitlement;; + if (reserve_for_promo > budget_overrun) { + reserve_for_promo -= budget_overrun; + old_reserve -= budget_overrun; + } else { + budget_overrun -= reserve_for_promo; + reserve_for_promo = 0; + reserve_for_mixed = (reserve_for_mixed > budget_overrun)? reserve_for_mixed - budget_overrun: 0; + old_reserve = reserve_for_promo + reserve_for_mixed; } -#ifdef KELVIN_DEBUG - log_info(gc)("KELVIN transferring " SIZE_FORMAT " regions from Mutator to Old Collector reserve", mutator_region_xfer); - log_info(gc)("KELVIN transferring " SIZE_FORMAT " regions from Collector reserve to Old Collector reserve", - reserve_xfer_regions); +#ifdef KELVIN_RESIZE + log_info(gc)("KELVIN adter adjusments, old reserve (" SIZE_FORMAT + "), promo reserve: " SIZE_FORMAT ", evac rserve: " SIZE_FORMAT, + old_reserve, reserve_for_promo, reserve_for_mixed); #endif - young_reserve -= reserve_xfer_regions * region_size_bytes; } - // else, max_mutator_transfer is large enough to support the known deficit + + size_t old_deficit = old_reserve - old_available; + old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes; + assert(old_region_deficit >= mutator_region_xfer_limit, "Handle this different conditional branch"); + + // Shrink young_reserve to account for loan to old reserve + const size_t reserve_xfer_regions = old_region_deficit - mutator_region_xfer_limit; + young_reserve -= reserve_xfer_regions * region_size_bytes; + +#ifdef KELVIN_RESIZE + log_info(gc)("KELVIN transferring " SIZE_FORMAT " regions from Mutator to Old Collector reserve", mutator_region_xfer_limit); + log_info(gc)("KELVIN transferring " SIZE_FORMAT " regions from Collector reserve to Old Collector reserve", + reserve_xfer_regions); +#endif } assert(old_region_deficit == 0 || old_region_surplus == 0, "Only surplus or deficit, never both"); -#ifdef KELVIN_DEBUG - log_info(gc)("KELVIN setting old surplus; " SIZE_FORMAT ", deficiit: " SIZE_FORMAT, old_region_surplus, old_region_deficit); +#ifdef KELVIN_RESIZE + log_info(gc)("KELVIN setting old surplus; " SIZE_FORMAT ", deficit: " SIZE_FORMAT, old_region_surplus, old_region_deficit); log_info(gc)("KELVIN setting young_evac_reserve: " SIZE_FORMAT ", old_evac_reserve: " SIZE_FORMAT ", promoted_reserve: " SIZE_FORMAT, young_reserve, reserve_for_mixed, reserve_for_promo); #endif @@ -3208,7 +3237,6 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) { log_info(gc)("KELVIN ShenHeap::rebuild_free_set(%s)", concurrent? "true": "false"); #endif - _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count); // If there are no old regions, first_old_region will be greater than last_old_region assert((first_old_region > last_old_region) || @@ -3241,7 +3269,6 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) { } // Rebuild free set based on adjusted generation sizes. _free_set->rebuild(young_cset_regions, old_cset_regions); - set_evacuation_reserve_quantities(false); if (mode()->is_generational() && (ShenandoahGenerationalHumongousReserve > 0)) { size_t old_region_span = (first_old_region <= last_old_region)? (last_old_region + 1 - first_old_region): 0; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp index 57663b48f04..a9503a88a50 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp @@ -145,14 +145,10 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) { // We must execute this vm operation if we completed final mark. We cannot // return from here with weak roots in progress. This is not a valid gc state // for any young collections (or allocation failures) that interrupt the old - // collection. + // collection. This will reclaim immediate garbage, adjust generation sizes, + // and rebuild free set. vmop_entry_final_roots(); - // We do not rebuild_free following increments of old marking because memory has not been reclaimed.. However, we may - // need to transfer memory to OLD in order to efficiently support the mixed evacuations that might immediately follow. - size_t allocation_runway = heap->young_heuristics()->bytes_of_allocation_runway_before_gc_trigger(0); - heap->adjust_generation_sizes_for_next_cycle(allocation_runway, 0, 0); - bool success; size_t region_xfer; const char* region_destination; From 7bb1d386e00c41bde3a279b8ab2fc3a05ec1bd7f Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 26 Feb 2024 00:13:35 +0000 Subject: [PATCH 09/64] Remove dead code for inelastic plabs --- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index ffd22ca339b..a8dfff7b0ad 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -785,14 +785,6 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size()); } } - } else if (req.is_lab_alloc() && req.type() == ShenandoahAllocRequest::_alloc_plab) { - - // inelastic PLAB - size_t size = req.size(); - size_t usable_free = get_usable_free_words(r->free()); - if (size <= usable_free) { - result = allocate_aligned_plab(size, req, r); - } } else { size_t size = req.size(); result = r->allocate(size, req); From 8bc436704e90dfd9cb31b3bdb126a674708eca4e Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 26 Feb 2024 00:17:12 +0000 Subject: [PATCH 10/64] Revert "Remove dead code for inelastic plabs" This reverts commit 7bb1d386e00c41bde3a279b8ab2fc3a05ec1bd7f. --- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index a8dfff7b0ad..ffd22ca339b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -785,6 +785,14 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size()); } } + } else if (req.is_lab_alloc() && req.type() == ShenandoahAllocRequest::_alloc_plab) { + + // inelastic PLAB + size_t size = req.size(); + size_t usable_free = get_usable_free_words(r->free()); + if (size <= usable_free) { + result = allocate_aligned_plab(size, req, r); + } } else { size_t size = req.size(); result = r->allocate(size, req); From 99cce53b3e6e51cee2ed71e0b7c7caa016a8ed4f Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 26 Feb 2024 21:18:32 +0000 Subject: [PATCH 11/64] Round LAB sizes down rather than up to force alignment When we round up, we introduce the risk that the new size exceeds the maximum LAB size, resulting in an assertion error. --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 1 - .../share/gc/shenandoah/shenandoahHeap.cpp | 16 +++++++++++++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index ffd22ca339b..e80ebae6ec7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1532,7 +1532,6 @@ HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_ case ShenandoahAllocRequest::_alloc_plab: case ShenandoahAllocRequest::_alloc_gclab: case ShenandoahAllocRequest::_alloc_tlab: - in_new_region = false; assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT, req.size(), ShenandoahHeapRegion::humongous_threshold_words()); return nullptr; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index bb571bc995e..425e78a24ed 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1115,7 +1115,7 @@ HeapWord* ShenandoahHeap::allocate_from_plab_slow(Thread* thread, size_t size, b size_t unalignment = future_size % CardTable::card_size_in_words(); if (unalignment != 0) { - future_size = future_size - unalignment + CardTable::card_size_in_words(); + future_size -= unalignment; } // Record new heuristic value even if we take any shortcut. This captures @@ -1172,6 +1172,12 @@ HeapWord* ShenandoahHeap::allocate_from_plab_slow(Thread* thread, size_t size, b } return plab->allocate(size); } else { + // TODO: Add smarts here, or at minimum, a command-line option to reduce the need for shared allocations. For example, + // maybe we should go ahead and retire the PLAB if: + // + // (used within the PLAB) * MIN2(ShenandoahPromoEvacWaste, ShenandoahOldEvacWaste) >= PLAB size. + // + // If there's still at least min_size() words available within the current plab, don't retire it. Let's gnaw // away on this plab as long as we can. Meanwhile, return nullptr to force this particular allocation request // to be satisfied with a shared allocation. By packing more promotions into the previously allocated PLAB, we @@ -1372,8 +1378,12 @@ HeapWord* ShenandoahHeap::allocate_new_plab(size_t min_size, // Align requested sizes to card sized multiples size_t words_in_card = CardTable::card_size_in_words(); size_t align_mask = ~(words_in_card - 1); - min_size = (min_size + words_in_card - 1) & align_mask; - word_size = (word_size + words_in_card - 1) & align_mask; + + // Need to round down rather than rounding up. Otherwise, might overrun max size. + min_size = min_size & align_mask; + word_size = word_size & align_mask; + assert(word_size >= min_size, "Requested PLAB is too small"); + ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size); // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread // if we are at risk of infringing on the old-gen evacuation budget. From 11b26bb8ba77374eca8327fee956b575b294cbf3 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 26 Feb 2024 21:34:54 +0000 Subject: [PATCH 12/64] Revert "Round LAB sizes down rather than up to force alignment" This reverts commit 99cce53b3e6e51cee2ed71e0b7c7caa016a8ed4f. --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 1 + .../share/gc/shenandoah/shenandoahHeap.cpp | 16 +++------------- 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index e80ebae6ec7..ffd22ca339b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1532,6 +1532,7 @@ HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_ case ShenandoahAllocRequest::_alloc_plab: case ShenandoahAllocRequest::_alloc_gclab: case ShenandoahAllocRequest::_alloc_tlab: + in_new_region = false; assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT, req.size(), ShenandoahHeapRegion::humongous_threshold_words()); return nullptr; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 425e78a24ed..bb571bc995e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1115,7 +1115,7 @@ HeapWord* ShenandoahHeap::allocate_from_plab_slow(Thread* thread, size_t size, b size_t unalignment = future_size % CardTable::card_size_in_words(); if (unalignment != 0) { - future_size -= unalignment; + future_size = future_size - unalignment + CardTable::card_size_in_words(); } // Record new heuristic value even if we take any shortcut. This captures @@ -1172,12 +1172,6 @@ HeapWord* ShenandoahHeap::allocate_from_plab_slow(Thread* thread, size_t size, b } return plab->allocate(size); } else { - // TODO: Add smarts here, or at minimum, a command-line option to reduce the need for shared allocations. For example, - // maybe we should go ahead and retire the PLAB if: - // - // (used within the PLAB) * MIN2(ShenandoahPromoEvacWaste, ShenandoahOldEvacWaste) >= PLAB size. - // - // If there's still at least min_size() words available within the current plab, don't retire it. Let's gnaw // away on this plab as long as we can. Meanwhile, return nullptr to force this particular allocation request // to be satisfied with a shared allocation. By packing more promotions into the previously allocated PLAB, we @@ -1378,12 +1372,8 @@ HeapWord* ShenandoahHeap::allocate_new_plab(size_t min_size, // Align requested sizes to card sized multiples size_t words_in_card = CardTable::card_size_in_words(); size_t align_mask = ~(words_in_card - 1); - - // Need to round down rather than rounding up. Otherwise, might overrun max size. - min_size = min_size & align_mask; - word_size = word_size & align_mask; - assert(word_size >= min_size, "Requested PLAB is too small"); - + min_size = (min_size + words_in_card - 1) & align_mask; + word_size = (word_size + words_in_card - 1) & align_mask; ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size); // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread // if we are at risk of infringing on the old-gen evacuation budget. From f0b15acb1d8c936255a05536c2f9e6935f6a0347 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Tue, 27 Feb 2024 21:04:52 +0000 Subject: [PATCH 13/64] Make evacuation reserve quantities always valid In the previous implementaiton, reserve quantities were only sometimes established. --- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 3 ++- src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp | 4 ++++ src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp | 8 ++++++++ src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp | 7 ++++++- src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp | 2 ++ 5 files changed, 22 insertions(+), 2 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 61675349b6f..4d213fe9efc 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1225,8 +1225,9 @@ void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regio // At end of final mark, evacuation reserve quantities are defined by choose_collection_set(). At end of update refs, // evacuation reserve quantities are established by adjust_generation_sizes_for_next_cycle(). +#ifdef KELVIN_DEPRECATE assert(_heap->has_evacuation_reserve_quantities(), "Always prepare reserve quantities before rebuild"); - +#endif young_reserve = _heap->get_young_evac_reserve(); old_reserve = _heap->get_promoted_reserve() + _heap->get_old_evac_reserve(); #undef KELVIN_DEBUG diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index d303d1e30c7..7c122680370 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -752,7 +752,9 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { } // Freeset construction uses reserve quantities if they are valid +#ifdef KELVIN_DEPRECATE heap->set_evacuation_reserve_quantities(true); +#endif { ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset : ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset); @@ -764,7 +766,9 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); heap->free_set()->rebuild(young_cset_regions, old_cset_regions); } +#ifdef KELVIN_DEPRECATE heap->set_evacuation_reserve_quantities(false); +#endif } bool ShenandoahGeneration::is_bitmap_clear() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 180614e83dd..40ff799d5a8 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -428,7 +428,9 @@ jint ShenandoahHeap::initialize() { set_young_evac_reserve(young_reserve); set_old_evac_reserve((size_t) 0); set_promoted_reserve((size_t) 0); +#ifdef KELVIN_DEPRECATE set_evacuation_reserve_quantities(true); +#endif _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); _free_set->rebuild(young_cset_regions, old_cset_regions); } @@ -605,7 +607,9 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : _old_evac_reserve(0), _young_evac_reserve(0), _age_census(nullptr), +#ifdef KELVIN_DEPRECATE _has_evacuation_reserve_quantities(false), +#endif _cancel_requested_time(0), _young_generation(nullptr), _global_generation(nullptr), @@ -1412,7 +1416,9 @@ void ShenandoahHeap::adjust_generation_sizes_for_next_cycle( set_young_evac_reserve(young_reserve); set_old_evac_reserve(reserve_for_mixed); set_promoted_reserve(reserve_for_promo); +#ifdef KELVIN_DEPRECATE set_evacuation_reserve_quantities(true); +#endif } // Called from stubs in JIT code or interpreter @@ -2566,9 +2572,11 @@ void ShenandoahHeap::set_gc_state(uint mask, bool value) { _gc_state_changed = true; } +#ifdef KELVIN_DEPRECATE void ShenandoahHeap::set_evacuation_reserve_quantities(bool is_valid) { _has_evacuation_reserve_quantities = is_valid; } +#endif void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) { uint mask; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp index 4ee2082ea93..83cc0ea58ba 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp @@ -386,6 +386,7 @@ class ShenandoahHeap : public CollectedHeap { ShenandoahAgeCensus* _age_census; // Age census used for adapting tenuring threshold in generational mode +#ifdef KELVIN_DEPRECATE // At the end of final mark, but before we begin evacuating, heuristics calculate how much memory is required to // hold the results of evacuating to young-gen and to old-gen. These quantitites, stored in _promoted_reserve, // _old_evac_reserve, and _young_evac_reserve, are consulted prior to rebuilding the free set (ShenandoahFreeSet) @@ -399,7 +400,7 @@ class ShenandoahHeap : public CollectedHeap { // for old_collector set when not _has_evacuation_reserve_quantities is based in part on anticipated promotion as // determined by analysis of live data found during the previous GC pass which is one less than the current tenure age. bool _has_evacuation_reserve_quantities; - +#endif public: char gc_state() const; @@ -417,7 +418,9 @@ class ShenandoahHeap : public CollectedHeap { return _heap_changed.try_unset(); } +#ifdef KELVIN_DEPRECATE void set_evacuation_reserve_quantities(bool is_valid); +#endif void set_concurrent_young_mark_in_progress(bool in_progress); void set_concurrent_old_mark_in_progress(bool in_progress); void set_evacuation_in_progress(bool in_progress); @@ -433,7 +436,9 @@ class ShenandoahHeap : public CollectedHeap { inline bool is_stable() const; inline bool is_idle() const; +#ifdef KELVIN_DEPRECATE inline bool has_evacuation_reserve_quantities() const; +#endif inline bool is_concurrent_mark_in_progress() const; inline bool is_concurrent_young_mark_in_progress() const; inline bool is_concurrent_old_mark_in_progress() const; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp index c27d7334072..61b43ea1f50 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp @@ -709,9 +709,11 @@ inline bool ShenandoahHeap::is_stable() const { return _gc_state.is_clear(); } +#ifdef KELVIN_DEPRECATE inline bool ShenandoahHeap::has_evacuation_reserve_quantities() const { return _has_evacuation_reserve_quantities; } +#endif inline bool ShenandoahHeap::is_idle() const { return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS); From 28a382bb90c4844e4899dd507a347619b6d74cba Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 13 Mar 2024 19:52:50 +0000 Subject: [PATCH 14/64] Make satb-mode Info logging less verbose --- src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index 40c59bdb4c6..ba1261695d2 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -819,7 +819,7 @@ void ShenandoahConcurrentGC::op_final_mark() { } else { // Not is_generational() if (!heap->collection_set()->is_empty()) { - LogTarget(Info, gc, ergo) lt; + LogTarget(Debug, gc, ergo) lt; if (lt.is_enabled()) { ResourceMark rm; LogStream ls(lt); From d88130000c764dacf08ec27723132dd2a3d968de Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Thu, 11 Apr 2024 13:21:59 +0000 Subject: [PATCH 15/64] Change behavior of max_old and min_old --- src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.cpp | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.cpp index fe7feb68fb5..393b07d3bf0 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.cpp @@ -299,7 +299,9 @@ size_t ShenandoahGenerationSizer::max_size_for(ShenandoahGeneration* generation) case YOUNG: return max_young_size(); case OLD: - return min_young_size(); + // Officially, there is no limit on size of OLD, though the practical limit is heap size - min_young_size(). + // The pracital limit is enforced when we try to shrink young in order to expand old. + return ShenandoahHeap::heap()->max_capacity(); default: ShouldNotReachHere(); return 0; @@ -311,7 +313,9 @@ size_t ShenandoahGenerationSizer::min_size_for(ShenandoahGeneration* generation) case YOUNG: return min_young_size(); case OLD: - return ShenandoahHeap::heap()->max_capacity() - max_young_size(); + // Officially, there is no limit on size of OLD, though the practical limit is heap size - max_young_size(). + // The pracital limit is enforced when we try to expand young in order to shrink old. + return 0; default: ShouldNotReachHere(); return 0; From c2cb1b768f18b6ec529dd63c5982b0e9f7e2c078 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Thu, 11 Apr 2024 15:03:17 +0000 Subject: [PATCH 16/64] Revert "Change behavior of max_old and min_old" This reverts commit d88130000c764dacf08ec27723132dd2a3d968de. --- src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.cpp | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.cpp index 393b07d3bf0..fe7feb68fb5 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.cpp @@ -299,9 +299,7 @@ size_t ShenandoahGenerationSizer::max_size_for(ShenandoahGeneration* generation) case YOUNG: return max_young_size(); case OLD: - // Officially, there is no limit on size of OLD, though the practical limit is heap size - min_young_size(). - // The pracital limit is enforced when we try to shrink young in order to expand old. - return ShenandoahHeap::heap()->max_capacity(); + return min_young_size(); default: ShouldNotReachHere(); return 0; @@ -313,9 +311,7 @@ size_t ShenandoahGenerationSizer::min_size_for(ShenandoahGeneration* generation) case YOUNG: return min_young_size(); case OLD: - // Officially, there is no limit on size of OLD, though the practical limit is heap size - max_young_size(). - // The pracital limit is enforced when we try to expand young in order to shrink old. - return 0; + return ShenandoahHeap::heap()->max_capacity() - max_young_size(); default: ShouldNotReachHere(); return 0; From fecd9a07c881a69b7232a1ca52b1fdc608d0cbad Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 31 May 2024 00:18:10 +0000 Subject: [PATCH 17/64] Fixup some conflicts introduced by merge from upstream --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 41 +++++++++---------- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 13 ++---- .../gc/shenandoah/shenandoahGeneration.cpp | 3 +- .../shenandoah/shenandoahGenerationalHeap.cpp | 9 ++-- .../share/gc/shenandoah/shenandoahOldGC.cpp | 18 ++++---- 5 files changed, 36 insertions(+), 48 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 2b5869867cb..b8671aff193 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1162,7 +1162,7 @@ void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &o find_regions_with_alloc_capacity(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count); } -void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves) { +void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regions) { shenandoah_assert_heaplocked(); size_t young_reserve(0), old_reserve(0); @@ -1170,8 +1170,7 @@ void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regio young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve; old_reserve = 0; } else { - compute_young_and_old_reserves(young_cset_regions, old_cset_regions, have_evacuation_reserves, - young_reserve, old_reserve); + compute_young_and_old_reserves(young_cset_regions, old_cset_regions, young_reserve, old_reserve); } reserve_regions(young_reserve, old_reserve); @@ -1180,7 +1179,7 @@ void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regio log_status(); } -void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves, +void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions, size_t& young_reserve_result, size_t& old_reserve_result) const { const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); ShenandoahOldGeneration* const old_generation = _heap->old_generation(); @@ -1217,24 +1216,21 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions // which is reserved for promotion is enforced using thread-local variables that prescribe intentions for // each PLAB's available memory. -#define KELVIN_CONFIRM_SUCCESSFUL_MERGE -#ifdef KELVIN_CONFIRM_SUCCESSFUL_MERGE - assert(have_evacuation_reserves, "Always prepare reserve quantities before rebuild"); - // After testing with this assert: - // 1. make the following code unconditional - // 2. toss the else arm - // 3. get rid of the have_evacuation_reserves argument - // 4. maybe even get rid of the state variable from which have_evacuation_reserves value is derived -#endif - if (have_evacuation_reserves) { - // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass. - const size_t promoted_reserve = old_generation->get_promoted_reserve(); - const size_t old_evac_reserve = old_generation->get_evacuation_reserve(); - young_reserve_result = young_generation->get_evacuation_reserve(); - old_reserve_result = promoted_reserve + old_evac_reserve; - assert(old_reserve_result <= old_available, - "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT, - promoted_reserve, old_evac_reserve, old_available); + + // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass. + const size_t promoted_reserve = old_generation->get_promoted_reserve(); + const size_t old_evac_reserve = old_generation->get_evacuation_reserve(); + young_reserve_result = young_generation->get_evacuation_reserve(); + old_reserve_result = promoted_reserve + old_evac_reserve; + assert(old_reserve_result <= old_available, + "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT, + promoted_reserve, old_evac_reserve, old_available); +#ifdef KELVIN_DEPRECATE + // This code corresponds to a previous encarnation of this method, + // in which we would sometimes not have precomputed evacuation + // reserves, It is now deprecated because we now always have + // precomputed evacuation reserves. + } else { // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults) young_reserve_result = (young_capacity * ShenandoahEvacReserve) / 100; @@ -1243,6 +1239,7 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions // unaffiliated regions. old_reserve_result = old_available; } +#endif // Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector // free set. Because of this, old_available may not have enough memory to represent the intended reserve. Adjust diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index 15a3469651e..ff4e7d13fd1 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -197,15 +197,8 @@ class ShenandoahFreeSet : public CHeapObj { // hold the results of evacuating to young-gen and to old-gen. These quantities, stored in reserves for their, // respective generations, are consulted prior to rebuilding the free set (ShenandoahFreeSet) in preparation for // evacuation. When the free set is rebuilt, we make sure to reserve sufficient memory in the collector and - // old_collector sets to hold evacuations, if have_evacuation_reserves is true. The other time we rebuild the free - // set is at the end of GC, as we prepare to idle GC until the next trigger. In this case, have_evacuation_reserves - // is false because we don't yet know how much memory will need to be evacuated in the next GC cycle. When - // have_evacuation_reserves is false, the free set rebuild operation reserves for the collector and old_collector sets - // based on alternative mechanisms, such as ShenandoahEvacReserve, ShenandoahOldEvacReserve, and - // ShenandoahOldCompactionReserve. In a future planned enhancement, the reserve for old_collector set when the - // evacuation reserves are unknown, is based in part on anticipated promotion as determined by analysis of live data - // found during the previous GC pass which is one less than the current tenure age. - void rebuild(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves = false); + // old_collector sets to hold evacuations. + void rebuild(size_t young_cset_regions, size_t old_cset_regions); void move_collector_sets_to_mutator(size_t cset_regions); @@ -236,7 +229,7 @@ class ShenandoahFreeSet : public CHeapObj { // Reserve space for evacuations, with regions reserved for old evacuations placed to the right // of regions reserved of young evacuations. - void compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves, + void compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions, size_t &young_reserve_result, size_t &old_reserve_result) const; }; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 5b2cbba3887..e373ab61616 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -775,8 +775,7 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { // We are preparing for evacuation. At this time, we ignore cset region tallies. size_t first_old, last_old, num_old; heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); - // Free set construction uses reserve quantities, because they are known to be valid here - heap->free_set()->rebuild(young_cset_regions, old_cset_regions, true); + heap->free_set()->rebuild(young_cset_regions, old_cset_regions); } #ifdef KELVIN_DEPRECATE heap->set_evacuation_reserve_quantities(false); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 6f29bbadd2e..50ff1087f4c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -567,8 +567,8 @@ ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalHeap::balance_g // Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations // and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to -// xfer_limit, and any surplus is transferred to the young generation. -// xfer_limit is the maximum we're able to transfer from young to old. +// mutator_xfer_limit, and any surplus is transferred to the young generation. +// mutator_xfer_limit is the maximum we're able to transfer from young to old. void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_xfer_limit, size_t old_cset_regions) { // We can limit the old reserve to the size of anticipated promotions: @@ -590,11 +590,8 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x // We have to be careful in the event that SOEP is set to 100 by the user. assert(ShenandoahOldEvacRatioPercent <= 100, "Error"); const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); -#ifdef KELVIN_CHANGE_MISSING - const size_t old_available = old_generation()->available(); -#else const size_t old_available = old_generation()->available() + old_cset_regions * region_size_bytes; -#endif + // The free set will reserve this amount of memory to hold young evacuations (initialized to the ideal reserve) size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp index 5aac891eb01..362654639ee 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp @@ -148,18 +148,20 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) { // and rebuild free set. vmop_entry_final_roots(); - // We do not rebuild_free following increments of old marking because memory has not been reclaimed. However, we may - // need to transfer memory to OLD in order to efficiently support the mixed evacuations that might immediately follow. - - // TODO: if we do transfer memory, shouldn't we rebuild the free set? - - size_t allocation_runway = heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0); - heap->compute_old_generation_balance(allocation_runway, 0); - + // After concurrent old marking finishes, we may be able to reclaim immediate garbage from regions that are fully garbage. + // Furthermore, we may want to expand OLD in order to make room for the first mixed evacuation that immediately follows + // completion of OLD marking. This is why we rebuild free set here. ShenandoahGenerationalHeap::TransferResult result; { ShenandoahHeapLocker locker(heap->lock()); + size_t young_cset_regions, old_cset_regions; + size_t first_old, last_old, num_old; + size_t allocation_runway = heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0); + heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); + assert((young_cset_regions == 0) && (old_cset_regions == 0), "No ongoing evacuation when concurrent mark ends"); + heap->compute_old_generation_balance(allocation_runway, 0); result = heap->balance_generations(); + heap->free_set()->rebuild(0, 0); } LogTarget(Info, gc, ergo) lt; From 669be0b3eb96cfee4f61e8b57e2edc125a0cee78 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 3 Jun 2024 16:15:03 -0600 Subject: [PATCH 18/64] Do not plan to xfer Collector reserves unless they are unaffiliated --- src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp | 5 +++++ .../share/gc/shenandoah/shenandoahGenerationalHeap.cpp | 6 +++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index cbf80fd97a1..23419d0f975 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -221,6 +221,7 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { return false; } + // vmop_entry_final_updaterefs rebuilds free set in preparation for next GC. vmop_entry_final_updaterefs(); // Update references freed up collection set, kick the cleanup to reclaim the space. @@ -230,6 +231,8 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { // do not check for cancellation here because, at this point, the cycle is effectively // complete. If the cycle has been cancelled here, the control thread will detect it // on its next iteration and run a degenerated young cycle. + + // vmop_entry_final_updaterefs rebuilds free set in preparation for next GC. vmop_entry_final_roots(); _abbreviated = true; } @@ -410,6 +413,8 @@ void ShenandoahConcurrentGC::entry_final_roots() { #ifdef KELVIN_DEBUG log_info(gc)("KELVIN says we're no longer missing rebuild after abbreviated cycle"); + // But, debug tracebacks suggest we have not rebalanced generations + // following the rebuild that happened at start of evacuation. #endif ShenandoahHeap::heap()->rebuild_free_set(true /*concurrent*/); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 50ff1087f4c..19baf1e7b9c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -674,8 +674,12 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x // Round old_entitlement down to nearest multiple of regions to be transferred to old size_t entitled_xfer = old_entitlement - old_available; entitled_xfer = region_size_bytes * (entitled_xfer / region_size_bytes); + size_t unaffiliated_young_regions = young_generation()->free_unaffiliated_regions(); + size_t unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes; + if (entitled_xfer > unaffiliated_young_memory) { + entitled_xfer = unaffiliated_young_memory; + } old_entitlement = old_available + entitled_xfer; - if (old_entitlement < old_reserve) { #ifdef KELVIN_RESIZE log_info(gc)("KELVIN entitlement (" SIZE_FORMAT ") < old reserve (" SIZE_FORMAT From fb259d30fe8f7ff6dac7992ab2f847f649b1548e Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Tue, 4 Jun 2024 18:50:08 +0000 Subject: [PATCH 19/64] Resolve regressions with TestThreadFailure --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 11 +++++++---- .../gc/shenandoah/shenandoahGenerationalFullGC.cpp | 2 +- .../gc/shenandoah/shenandoahGenerationalHeap.cpp | 12 +++++++++++- .../gc/shenandoah/shenandoahGenerationalHeap.hpp | 2 +- src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp | 2 +- src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp | 2 +- .../jtreg/gc/shenandoah/oom/TestThreadFailure.java | 2 +- 7 files changed, 23 insertions(+), 10 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index b8671aff193..1c2272cf4f8 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1211,20 +1211,23 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions young_unaffiliated_regions += old_region_balance; } + size_t young_available = young_capacity - (young_generation->used() + young_generation->get_humongous_waste()); + young_available += young_cset_regions * region_size_bytes; + // All allocations taken from the old collector set are performed by GC, generally using PLABs for both // promotions and evacuations. The partition between which old memory is reserved for evacuation and // which is reserved for promotion is enforced using thread-local variables that prescribe intentions for // each PLAB's available memory. - // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass. const size_t promoted_reserve = old_generation->get_promoted_reserve(); const size_t old_evac_reserve = old_generation->get_evacuation_reserve(); young_reserve_result = young_generation->get_evacuation_reserve(); old_reserve_result = promoted_reserve + old_evac_reserve; - assert(old_reserve_result <= old_available, - "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT, - promoted_reserve, old_evac_reserve, old_available); + assert(old_reserve_result + young_reserve_result <= old_available + young_available, + "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT " + " SIZE_FORMAT + ") more than is available: " SIZE_FORMAT " + " SIZE_FORMAT, + promoted_reserve, old_evac_reserve, young_reserve_result, old_available, young_available); #ifdef KELVIN_DEPRECATE // This code corresponds to a previous encarnation of this method, // in which we would sometimes not have precomputed evacuation diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp index d75f4d08902..550c97e528c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp @@ -180,7 +180,7 @@ void ShenandoahGenerationalFullGC::compute_balances() { // In case this Full GC resulted from degeneration, clear the tally on anticipated promotion. heap->old_generation()->set_promotion_potential(0); // Invoke this in case we are able to transfer memory from OLD to YOUNG. - heap->compute_old_generation_balance(0, 0); + heap->compute_old_generation_balance(0, 0, 0); } ShenandoahPrepareForGenerationalCompactionObjectClosure::ShenandoahPrepareForGenerationalCompactionObjectClosure(PreservedMarks* preserved_marks, diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 19baf1e7b9c..9bd48913f20 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -569,7 +569,8 @@ ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalHeap::balance_g // and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to // mutator_xfer_limit, and any surplus is transferred to the young generation. // mutator_xfer_limit is the maximum we're able to transfer from young to old. -void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_xfer_limit, size_t old_cset_regions) { +void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_xfer_limit, + size_t old_cset_regions, size_t young_cset_regions) { // We can limit the old reserve to the size of anticipated promotions: // max_old_reserve is an upper bound on memory evacuated from old and promoted to old, @@ -591,6 +592,7 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x assert(ShenandoahOldEvacRatioPercent <= 100, "Error"); const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); const size_t old_available = old_generation()->available() + old_cset_regions * region_size_bytes; + const size_t young_available = young_generation()->available() + young_cset_regions * region_size_bytes; // The free set will reserve this amount of memory to hold young evacuations (initialized to the ideal reserve) size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; @@ -601,6 +603,10 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x bound_on_old_reserve: MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve); + if (young_reserve > young_available) { + young_reserve = young_available; + } + #ifdef KELVIN_RESIZE log_info(gc)("young_reserve: " SIZE_FORMAT ", bound_on_old_reserve: " SIZE_FORMAT ", max_old_reserve: " SIZE_FORMAT, young_reserve, bound_on_old_reserve, max_old_reserve); @@ -730,6 +736,10 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x young_reserve, reserve_for_mixed, reserve_for_promo); #endif + assert(young_reserve + reserve_for_mixed + reserve_for_promo <= old_available + young_available, + "Cannot reserve more memory than is available: " SIZE_FORMAT " + " SIZE_FORMAT " + " SIZE_FORMAT " <= " + SIZE_FORMAT " + " SIZE_FORMAT, young_reserve, reserve_for_mixed, reserve_for_promo, old_available, young_available); + // deficit/surplus adjustments to generation sizes will precede rebuild young_generation()->set_evacuation_reserve(young_reserve); old_generation()->set_evacuation_reserve(reserve_for_mixed); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp index ff34e44fb71..a1a7fa14cc3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp @@ -86,7 +86,7 @@ class ShenandoahGenerationalHeap : public ShenandoahHeap { void reset_generation_reserves(); // Computes the optimal size for the old generation, represented as a surplus or deficit of old regions - void compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions); + void compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions, size_t young_cset_regions); // Transfers surplus old regions to young, or takes regions from young to satisfy old region deficit TransferResult balance_generations(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 50cdc3897a7..4c8f3cb29c3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -2835,7 +2835,7 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) { // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this // available for transfer to old. Note that transfer of humongous regions does not impact available. size_t allocation_runway = young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions); - ShenandoahGenerationalHeap::heap()->compute_old_generation_balance(allocation_runway, old_cset_regions); + ShenandoahGenerationalHeap::heap()->compute_old_generation_balance(allocation_runway, old_cset_regions, young_cset_regions); // Total old_available may have been expanded to hold anticipated promotions. We trigger if the fragmented available // memory represents more than 16 regions worth of data. Note that fragmentation may increase when we promote regular diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp index 362654639ee..398860370c1 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp @@ -159,7 +159,7 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) { size_t allocation_runway = heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0); heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); assert((young_cset_regions == 0) && (old_cset_regions == 0), "No ongoing evacuation when concurrent mark ends"); - heap->compute_old_generation_balance(allocation_runway, 0); + heap->compute_old_generation_balance(allocation_runway, 0, 0); result = heap->balance_generations(); heap->free_set()->rebuild(0, 0); } diff --git a/test/hotspot/jtreg/gc/shenandoah/oom/TestThreadFailure.java b/test/hotspot/jtreg/gc/shenandoah/oom/TestThreadFailure.java index fcc9cf632f1..4042485437b 100644 --- a/test/hotspot/jtreg/gc/shenandoah/oom/TestThreadFailure.java +++ b/test/hotspot/jtreg/gc/shenandoah/oom/TestThreadFailure.java @@ -78,7 +78,7 @@ public static void main(String[] args) throws Exception { { ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder( "-Xmx32m", - "-XX:+UnlockExperimentalVMOptions", "-XX:ShenandoahNoProgressThreshold=12", + "-XX:+UnlockExperimentalVMOptions", "-XX:ShenandoahNoProgressThreshold=24", "-XX:+UseShenandoahGC", "-XX:ShenandoahGCMode=generational", TestThreadFailure.class.getName(), "test"); From d32f4282f91cf6440a79bfcebde3d2ae410d7426 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 5 Jun 2024 14:30:49 +0000 Subject: [PATCH 20/64] Change default ratio of old vs young evacuation --- .../gc/shenandoah/shenandoah_globals.hpp | 31 ++++++++++--------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp index f298d3928fa..52d72bdc87d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp @@ -409,27 +409,28 @@ "reserve/waste is incorrect, at the risk that application " \ "runs out of memory too early.") \ \ - product(uintx, ShenandoahOldEvacRatioPercent, 50, EXPERIMENTAL, \ - "The maximum proportion of evacuation from old-gen memory, " \ - "expressed as a percentage. The default value 50 denotes that no" \ - "more than 50% of the collection set evacuation workload may be " \ - "towards evacuation of old-gen heap regions. This limits both the"\ - "promotion of aged regions and the compaction of existing old " \ - "regions. A value of 50 denotes that the total evacuation work" \ - "may increase to up to two times the young gen evacuation work." \ - "A larger value allows quicker promotion and allows" \ + product(uintx, ShenandoahOldEvacRatioPercent, 12, EXPERIMENTAL, \ + "The maximum proportion of evacuation to old-gen memory, " \ + "expressed as a percentage. The default value 12 denotes that no" \ + "more than 12% of the collection set evacuation workload may be " \ + "copied into old-gen regions. This limits both the promotion of "\ + "aged young regions and the compaction of existing old regions. " \ + "Given a total evacuation budget of X, the amount of memory " \ + "dedicated to hold objects evacuated to young generation is 89.3%"\ + "(100/112). A larger value allows for quicker promotion and " \ "a smaller number of mixed evacuations to process " \ "the entire list of old-gen collection candidates at the cost " \ "of an increased disruption of the normal cadence of young-gen " \ "collections. A value of 100 allows a mixed evacuation to " \ "focus entirely on old-gen memory, allowing no young-gen " \ "regions to be collected, likely resulting in subsequent " \ - "allocation failures because the allocation pool is not " \ - "replenished. A value of 0 allows a mixed evacuation to" \ - "focus entirely on young-gen memory, allowing no old-gen " \ - "regions to be collected, likely resulting in subsequent " \ - "promotion failures and triggering of stop-the-world full GC " \ - "events.") \ + "allocation failures because the young-gen allocation pool is " \ + "not replenished. A value of 0 prevents mixed evacuations from " \ + "defragmenting old-gen memory, likely resulting in " \ + "subsequent promotion failures and triggering of stop-the-world " \ + "full GC events. Faiure to defragment old-gen memory can also " \ + "result in unconstrained expansion of old-gen, and shrinkage of " \ + "young gen, causing inefficient high frequency of young-gen GC.") \ range(0,100) \ \ product(uintx, ShenandoahMinYoungPercentage, 20, EXPERIMENTAL, \ From a57805fd07f11202c4a2bca24b1d7a6fca10d606 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Thu, 6 Jun 2024 15:07:03 +0000 Subject: [PATCH 21/64] Remove debug instrumentation --- .../heuristics/shenandoahGlobalHeuristics.cpp | 6 --- .../gc/shenandoah/shenandoahConcurrentGC.cpp | 25 ------------ .../gc/shenandoah/shenandoahDegeneratedGC.cpp | 4 -- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 15 ------- .../gc/shenandoah/shenandoahGeneration.cpp | 29 -------------- .../shenandoah/shenandoahGenerationalHeap.cpp | 40 ------------------- .../share/gc/shenandoah/shenandoahHeap.cpp | 11 ----- .../share/gc/shenandoah/shenandoahHeap.hpp | 24 ----------- .../gc/shenandoah/shenandoahHeap.inline.hpp | 6 --- 9 files changed, 160 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp index 4d3d13496ff..4782b41b10c 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp @@ -170,12 +170,6 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti if (regions_transferred_to_old > 0) { heap->generation_sizer()->force_transfer_to_old(regions_transferred_to_old); -#undef KELVIN_DEBUG -#ifdef KELVIN_DEBUG - log_info(gc)("choose_global_cset setting young_evac_reserve: " SIZE_FORMAT ", old_evac_reserve: " SIZE_FORMAT, - heap->get_young_evac_reserve() - regions_transferred_to_old * region_size_bytes, - heap->get_old_evac_reserve() + regions_transferred_to_old * region_size_bytes); -#endif heap->young_generation()->set_evacuation_reserve(young_evac_reserve - regions_transferred_to_old * region_size_bytes); heap->old_generation()->set_evacuation_reserve(old_evac_reserve + regions_transferred_to_old * region_size_bytes); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index 23419d0f975..2655e40ca85 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -54,8 +54,6 @@ #include "runtime/vmThread.hpp" #include "utilities/events.hpp" -#undef KELVIN_DEBUG - // Breakpoint support class ShenandoahBreakpointGCScope : public StackObj { private: @@ -203,10 +201,6 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { } } -#ifdef KELVIN_DEBUG - log_info(gc)("KELVIN done with evacuation"); -#endif - if (heap->has_forwarded_objects()) { // Perform update-refs phase. vmop_entry_init_updaterefs(); @@ -297,14 +291,6 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { result.print_on("Concurrent GC", &ls); } } -#ifdef KELVIN_DEBUG - { - log_info(gc)("KELVIN finished resizing generations"); - ShenandoahHeapLocker locker(heap->lock()); - heap->free_set()->log_status(); - } -#endif - return true; } @@ -408,14 +394,7 @@ void ShenandoahConcurrentGC::entry_final_roots() { static const char* msg = "Pause Final Roots"; ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots); EventMark em("%s", msg); - op_final_roots(); - -#ifdef KELVIN_DEBUG - log_info(gc)("KELVIN says we're no longer missing rebuild after abbreviated cycle"); - // But, debug tracebacks suggest we have not rebalanced generations - // following the rebuild that happened at start of evacuation. -#endif ShenandoahHeap::heap()->rebuild_free_set(true /*concurrent*/); } @@ -1304,10 +1283,6 @@ void ShenandoahConcurrentGC::op_final_updaterefs() { if (VerifyAfterGC) { Universe::verify(); } - -#ifdef KELVIN_DEBUG - log_info(gc)("KELVIN: op_final_updaterefs() invokes rebuild"); -#endif heap->rebuild_free_set(true /*concurrent*/); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp index 0d18acb63f7..33406109e96 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp @@ -445,10 +445,6 @@ void ShenandoahDegenGC::op_update_roots() { Universe::verify(); } -#undef KELVIN_DEBUG -#ifdef KELVIN_DEBUG - log_info(gc)("KELVIN: Degen:op_update_roots() invokes rebuild"); -#endif heap->rebuild_free_set(false /*concurrent*/); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 1c2272cf4f8..4606cb41152 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1228,21 +1228,6 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT " + " SIZE_FORMAT ") more than is available: " SIZE_FORMAT " + " SIZE_FORMAT, promoted_reserve, old_evac_reserve, young_reserve_result, old_available, young_available); -#ifdef KELVIN_DEPRECATE - // This code corresponds to a previous encarnation of this method, - // in which we would sometimes not have precomputed evacuation - // reserves, It is now deprecated because we now always have - // precomputed evacuation reserves. - - } else { - // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults) - young_reserve_result = (young_capacity * ShenandoahEvacReserve) / 100; - // The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions. - // Affiliated old-gen regions are already in the OldCollector free set. Add in the relevant number of - // unaffiliated regions. - old_reserve_result = old_available; - } -#endif // Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector // free set. Because of this, old_available may not have enough memory to represent the intended reserve. Adjust diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index e373ab61616..66899761bc7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -264,13 +264,6 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap const size_t maximum_young_evacuation_reserve = young_generation->get_evacuation_reserve(); const size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve()); -#undef KELVIN_DEBUG -#ifdef KELVIN_DEBUG - log_info(gc)("KELVIN compute_evac_budgets: max_young_evac_reserve: " SIZE_FORMAT ", young avail: " SIZE_FORMAT - ", young_evac_reserve: " SIZE_FORMAT, maximum_young_evacuation_reserve, - young_generation->available_with_reserve(), young_evacuation_reserve); -#endif - // maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted), // clamped by the old generation space available. // @@ -352,12 +345,6 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap // Note that unused old_promo_reserve might not be entirely consumed_by_advance_promotion. Do not transfer this // to old_evacuation_reserve because this memory is likely very fragmented, and we do not want to increase the likelihood // of old evacuation failure. -#ifdef KELVIN_DEBUG - log_info(gc)("KELVIN compute_budgets sets young_evac_reserve: " SIZE_FORMAT ", old_evac_reserve: " SIZE_FORMAT - ", promoted_reserve: " SIZE_FORMAT, young_evacuation_reserve, old_evacuation_reserve, - consumed_by_advance_promotion); -#endif - young_generation->set_evacuation_reserve(young_evacuation_reserve); old_generation->set_evacuation_reserve(old_evacuation_reserve); old_generation->set_promoted_reserve(consumed_by_advance_promotion); @@ -402,9 +389,6 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, } else if (old_evacuated_committed < old_evacuation_reserve) { // This happens if the old-gen collection consumes less than full budget. old_evacuation_reserve = old_evacuated_committed; -#ifdef KELVIN_DEBUG - log_info(gc)("KELVIN adjust_evac_budgets sets old_evac_reserve: " SIZE_FORMAT, old_evacuation_reserve); -#endif old_generation->set_evacuation_reserve(old_evacuation_reserve); } @@ -416,9 +400,6 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, size_t total_young_available = young_generation->available_with_reserve(); assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate more than is available in young"); -#ifdef KELVIN_DEBUG - log_info(gc)("KELVIN adjust_evac_budgets sets young_evac_reserve: " SIZE_FORMAT, young_evacuated_reserve_used); -#endif young_generation->set_evacuation_reserve(young_evacuated_reserve_used); size_t old_available = old_generation->available(); @@ -484,9 +465,6 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, // Add in the excess_old memory to hold unanticipated promotions, if any. If there are more unanticipated // promotions than fit in reserved memory, they will be deferred until a future GC pass. size_t total_promotion_reserve = young_advance_promoted_reserve_used + excess_old; -#ifdef KELVIN_DEBUG - log_info(gc)("KELVIN adjust_evac_budgets sets promoted_reserve: " SIZE_FORMAT, total_promotion_reserve); -#endif old_generation->set_promoted_reserve(total_promotion_reserve); old_generation->reset_promoted_expended(); } @@ -762,10 +740,6 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { } } -#ifdef KELVIN_DEPRECATE - // Freeset construction uses reserve quantities if they are valid - heap->set_evacuation_reserve_quantities(true); -#endif { ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset : ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset); @@ -777,9 +751,6 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); heap->free_set()->rebuild(young_cset_regions, old_cset_regions); } -#ifdef KELVIN_DEPRECATE - heap->set_evacuation_reserve_quantities(false); -#endif } bool ShenandoahGeneration::is_bitmap_clear() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 9bd48913f20..d5eaf7eac8e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -607,13 +607,6 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x young_reserve = young_available; } -#ifdef KELVIN_RESIZE - log_info(gc)("young_reserve: " SIZE_FORMAT ", bound_on_old_reserve: " SIZE_FORMAT ", max_old_reserve: " SIZE_FORMAT, - young_reserve, bound_on_old_reserve, max_old_reserve); - log_info(gc)("young_available: " SIZE_FORMAT ", young_cset: " SIZE_FORMAT ", total: " SIZE_FORMAT, - young_generation()->available(), young_cset_regions * region_size_bytes, - young_generation()->available() + young_cset_regions * region_size_bytes); -#endif // Decide how much old space we should reserve for a mixed collection size_t reserve_for_mixed = 0; const size_t mixed_candidate_live_memory = old_generation()->unprocessed_collection_candidates_live_memory(); @@ -661,17 +654,11 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions; old_region_surplus = MIN2(old_region_surplus, unaffiliated_old_regions); old_generation()->set_region_balance(checked_cast(old_region_surplus)); -#ifdef KELVIN_RESIZE - log_info(gc)("KELVIN setting old surplus; " SIZE_FORMAT, old_region_surplus); -#endif } else if (old_available + mutator_xfer_limit >= old_reserve) { // Mutator's xfer limit is sufficient to satisfy our need: transfer all memory from there size_t old_deficit = old_reserve - old_available; old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes; old_generation()->set_region_balance(0 - checked_cast(old_region_deficit)); -#ifdef KELVIN_RESIZE - log_info(gc)("KELVIN setting old deficit (from mutator xfer limit); " SIZE_FORMAT, old_region_deficit); -#endif } else { // We'll try to xfer from both mutator excess and from young collector reserve size_t available_reserves = old_available + young_reserve + mutator_xfer_limit; @@ -687,11 +674,6 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x } old_entitlement = old_available + entitled_xfer; if (old_entitlement < old_reserve) { -#ifdef KELVIN_RESIZE - log_info(gc)("KELVIN entitlement (" SIZE_FORMAT ") < old reserve (" SIZE_FORMAT - "): before adjusting, promo reserve: " SIZE_FORMAT ", evac rserve: " SIZE_FORMAT, - old_entitlement, old_reserve, reserve_for_promo, reserve_for_mixed); -#endif // There's not enough memory to satisfy our desire. Scale back our old-gen intentions. size_t budget_overrun = old_reserve - old_entitlement;; if (reserve_for_promo > budget_overrun) { @@ -703,11 +685,6 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x reserve_for_mixed = (reserve_for_mixed > budget_overrun)? reserve_for_mixed - budget_overrun: 0; old_reserve = reserve_for_promo + reserve_for_mixed; } -#ifdef KELVIN_RESIZE - log_info(gc)("KELVIN after adjusments, old reserve (" SIZE_FORMAT - "), promo reserve: " SIZE_FORMAT ", evac rserve: " SIZE_FORMAT, - old_reserve, reserve_for_promo, reserve_for_mixed); -#endif } size_t old_deficit = old_reserve - old_available; @@ -718,24 +695,10 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x const size_t reserve_xfer_regions = old_region_deficit - mutator_region_xfer_limit; young_reserve -= reserve_xfer_regions * region_size_bytes; -#ifdef KELVIN_RESIZE - log_info(gc)("KELVIN transferring " SIZE_FORMAT " regions from Mutator to Old Collector reserve", mutator_region_xfer_limit); - log_info(gc)("KELVIN transferring " SIZE_FORMAT " regions from Collector reserve to Old Collector reserve", - reserve_xfer_regions); -#endif old_generation()->set_region_balance(0 - checked_cast(old_region_deficit)); -#ifdef KELVIN_RESIZE - log_info(gc)("KELVIN setting old deficit; " SIZE_FORMAT, old_region_deficit); -#endif } assert(old_region_deficit == 0 || old_region_surplus == 0, "Only surplus or deficit, never both"); - -#ifdef KELVIN_RESIZE - log_info(gc)("KELVIN setting young_evac_reserve: " SIZE_FORMAT ", old_evac_reserve: " SIZE_FORMAT ", promoted_reserve: " SIZE_FORMAT, - young_reserve, reserve_for_mixed, reserve_for_promo); -#endif - assert(young_reserve + reserve_for_mixed + reserve_for_promo <= old_available + young_available, "Cannot reserve more memory than is available: " SIZE_FORMAT " + " SIZE_FORMAT " + " SIZE_FORMAT " <= " SIZE_FORMAT " + " SIZE_FORMAT, young_reserve, reserve_for_mixed, reserve_for_promo, old_available, young_available); @@ -744,9 +707,6 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x young_generation()->set_evacuation_reserve(young_reserve); old_generation()->set_evacuation_reserve(reserve_for_mixed); old_generation()->set_promoted_reserve(reserve_for_promo); -#ifdef KELVIN_DEPRECATE - set_evacuation_reserve_quantities(true); -#endif } void ShenandoahGenerationalHeap::reset_generation_reserves() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 4c8f3cb29c3..b4332699c52 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -430,9 +430,6 @@ jint ShenandoahHeap::initialize() { young_generation()->set_evacuation_reserve(young_reserve); old_generation()->set_evacuation_reserve((size_t) 0); old_generation()->set_promoted_reserve((size_t) 0); -#ifdef KELVIN_DEPRECATE - set_evacuation_reserve_quantities(true); -#endif _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); _free_set->rebuild(young_cset_regions, old_cset_regions); } @@ -578,10 +575,6 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : _gc_state_changed(false), _gc_no_progress_count(0), _age_census(nullptr), -#ifdef KELVIN_DEPRECATE - // may be premature to deprecate - _has_evacuation_reserve_quantities(false), -#endif _cancel_requested_time(0), _young_generation(nullptr), _global_generation(nullptr), @@ -2813,10 +2806,6 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) { size_t young_cset_regions, old_cset_regions; size_t first_old_region, last_old_region, old_region_count; -#ifdef KELVIN_DEBUG - log_info(gc)("KELVIN ShenHeap::rebuild_free_set(%s)", concurrent? "true": "false"); -#endif - _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count); // If there are no old regions, first_old_region will be greater than last_old_region assert((first_old_region > last_old_region) || diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp index eeb24ae5f84..16c939746c1 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp @@ -351,22 +351,6 @@ class ShenandoahHeap : public CollectedHeap { ShenandoahAgeCensus* _age_census; // Age census used for adapting tenuring threshold in generational mode -#ifdef KELVIN_DEPRECATE - // At the end of final mark, but before we begin evacuating, heuristics calculate how much memory is required to - // hold the results of evacuating to young-gen and to old-gen. These quantitites, stored in _promoted_reserve, - // _old_evac_reserve, and _young_evac_reserve, are consulted prior to rebuilding the free set (ShenandoahFreeSet) - // in preparation for evacuation. When the free set is rebuilt, we make sure to reserve sufficient memory in the - // collector and old_collector sets to hold if _has_evacuation_reserve_quantities is true. The other time we - // rebuild the freeset is at the end of GC, as we prepare to idle GC until the next trigger. In this case, - // _has_evacuation_reserve_quantities is false because we don't yet know how much memory will need to be evacuated - // in the next GC cycle. When _has_evacuation_reserve_quantities is false, the free set rebuild operation reserves - // for the collector and old_collector sets based on alternative mechanisms, such as ShenandoahEvacReserve, - // ShenandoahOldEvacReserve, and ShenandoahOldCompactionReserve. In a future planned enhancement, the reserve - // for old_collector set when not _has_evacuation_reserve_quantities is based in part on anticipated promotion as - // determined by analysis of live data found during the previous GC pass which is one less than the current tenure age. - bool _has_evacuation_reserve_quantities; -#endif - public: char gc_state() const; @@ -384,10 +368,6 @@ class ShenandoahHeap : public CollectedHeap { return _heap_changed.try_unset(); } -#ifdef KELVIN_DEPRECATE - void set_evacuation_reserve_quantities(bool is_valid); -#endif - void set_concurrent_young_mark_in_progress(bool in_progress); void set_concurrent_old_mark_in_progress(bool in_progress); void set_evacuation_in_progress(bool in_progress); @@ -404,10 +384,6 @@ class ShenandoahHeap : public CollectedHeap { inline bool is_stable() const; inline bool is_idle() const; -#ifdef KELVIN_DEPRECATE - inline bool has_evacuation_reserve_quantities() const; -#endif - inline bool is_concurrent_mark_in_progress() const; inline bool is_concurrent_young_mark_in_progress() const; inline bool is_concurrent_old_mark_in_progress() const; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp index 08feb9b01ad..ec1ff27debe 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp @@ -459,12 +459,6 @@ inline bool ShenandoahHeap::is_stable() const { return _gc_state.is_clear(); } -#ifdef KELVIN_DEPRECATE -inline bool ShenandoahHeap::has_evacuation_reserve_quantities() const { - return _has_evacuation_reserve_quantities; -} -#endif - inline bool ShenandoahHeap::is_idle() const { return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS); } From 358d2f747f8aada30dea2a68eff2c2501885354c Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 10 Jun 2024 00:48:59 +0000 Subject: [PATCH 22/64] Top off old evacuation regions for mixed evacuations --- .../shenandoahGenerationalHeuristics.cpp | 40 ++- .../heuristics/shenandoahOldHeuristics.cpp | 283 ++++++++++++------ .../heuristics/shenandoahOldHeuristics.hpp | 45 ++- .../heuristics/shenandoahYoungHeuristics.cpp | 10 + .../gc/shenandoah/shenandoahGeneration.cpp | 28 ++ .../shenandoah/shenandoahGenerationalHeap.cpp | 39 ++- .../share/gc/shenandoah/shenandoahHeap.cpp | 2 + .../gc/shenandoah/shenandoahOldGeneration.cpp | 10 + .../gc/shenandoah/shenandoahOldGeneration.hpp | 4 +- .../test_shenandoahOldHeuristic.cpp | 220 +++++++++++++- 10 files changed, 571 insertions(+), 110 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp index f5b0fdc0e26..f4432a9b7c4 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp @@ -170,12 +170,50 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio bool doing_promote_in_place = (humongous_regions_promoted + regular_regions_promoted_in_place > 0); if (doing_promote_in_place || (preselected_candidates > 0) || (immediate_percent <= ShenandoahImmediateThreshold)) { // Only young collections need to prime the collection set. + + bool need_to_finalize_piggyback = false; + size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, + fragmented_available,excess_fragmented_available; + uint included_old_regions; + if (_generation->is_young()) { - heap->old_generation()->heuristics()->prime_collection_set(collection_set); + heap->old_generation()->heuristics()->initialize_piggyback_evacs(collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available); + need_to_finalize_piggyback =heap->old_generation()->heuristics()->prime_collection_set(collection_set, + evacuated_old_bytes, + collected_old_bytes, + included_old_regions, + old_evacuation_reserve, + old_evacuation_budget, + unfragmented_available, + fragmented_available, + excess_fragmented_available); } // Call the subclasses to add young-gen regions into the collection set. choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free); + + if (_generation->is_young()) { + // Especially when young-gen trigger is expedited in order to finish mixed evacuations, there may not be + // enough consolidated garbage to make effective use of young-gen evacuation reserve. If there is still + // young-gen reserve available following selection of the young-gen collection set, see if we can use + // this memory to expand the old-gen evacuation collection set. + need_to_finalize_piggyback |= + heap->old_generation()->heuristics()->top_off_collection_set(collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available); + if (need_to_finalize_piggyback) { + heap->old_generation()->heuristics()->finalize_piggyback_evacs(collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available); + } + } } if (collection_set->has_old_regions()) { diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp index cbe97b7b922..e1792094839 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp @@ -30,6 +30,7 @@ #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" #include "gc/shenandoah/shenandoahOldGeneration.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" #include "logging/log.hpp" #include "utilities/quickSort.hpp" @@ -75,48 +76,33 @@ ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* genera { } -bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) { - if (unprocessed_old_collection_candidates() == 0) { - return false; +bool ShenandoahOldHeuristics::all_candidates_are_pinned() { +#ifdef ASSERT + if (uint(os::random()) % 100 < ShenandoahCoalesceChance) { + return true; } +#endif - _first_pinned_candidate = NOT_FOUND; - - uint included_old_regions = 0; - size_t evacuated_old_bytes = 0; - size_t collected_old_bytes = 0; - - // If a region is put into the collection set, then this region's free (not yet used) bytes are no longer - // "available" to hold the results of other evacuations. This may cause a decrease in the remaining amount - // of memory that can still be evacuated. We address this by reducing the evacuation budget by the amount - // of live memory in that region and by the amount of unallocated memory in that region if the evacuation - // budget is constrained by availability of free memory. - const size_t old_evacuation_reserve = _old_generation->get_evacuation_reserve(); - const size_t old_evacuation_budget = (size_t) ((double) old_evacuation_reserve / ShenandoahOldEvacWaste); - size_t unfragmented_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes(); - size_t fragmented_available; - size_t excess_fragmented_available; - - if (unfragmented_available > old_evacuation_budget) { - unfragmented_available = old_evacuation_budget; - fragmented_available = 0; - excess_fragmented_available = 0; - } else { - assert(_old_generation->available() >= old_evacuation_budget, "Cannot budget more than is available"); - fragmented_available = _old_generation->available() - unfragmented_available; - assert(fragmented_available + unfragmented_available >= old_evacuation_budget, "Budgets do not add up"); - if (fragmented_available + unfragmented_available > old_evacuation_budget) { - excess_fragmented_available = (fragmented_available + unfragmented_available) - old_evacuation_budget; - fragmented_available -= excess_fragmented_available; + for (uint i = _next_old_collection_candidate; i < _last_old_collection_candidate; ++i) { + ShenandoahHeapRegion* region = _region_data[i]._region; + if (!region->is_pinned()) { + return false; } } + return true; +} - size_t remaining_old_evacuation_budget = old_evacuation_budget; - log_info(gc)("Choose old regions for mixed collection: old evacuation budget: " SIZE_FORMAT "%s, candidates: %u", - byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget), - unprocessed_old_collection_candidates()); - - size_t lost_evacuation_capacity = 0; +bool ShenandoahOldHeuristics::add_old_regions_to_cset(ShenandoahCollectionSet* collection_set, + size_t &evacuated_old_bytes, size_t &collected_old_bytes, + uint &included_old_regions, const size_t old_evacuation_reserve, + const size_t old_evacuation_budget, + size_t &unfragmented_available, + size_t &fragmented_available, + size_t &excess_fragmented_available) { + if (unprocessed_old_collection_candidates() == 0) { + return false; + } + _first_pinned_candidate = NOT_FOUND; // The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen // concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates(). @@ -135,64 +121,156 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll size_t live_data_for_evacuation = r->get_live_data_bytes(); size_t lost_available = r->free(); - +#undef KELVIN_SHARE_RESERVES +#ifdef KELVIN_SHARE_RESERVES + log_info(gc)("Trying to add_old_region_to_cset(" SIZE_FORMAT ") with live_data: " SIZE_FORMAT ", lost_available: " SIZE_FORMAT, + r->index(), live_data_for_evacuation, lost_available); +#endif if ((lost_available > 0) && (excess_fragmented_available > 0)) { if (lost_available < excess_fragmented_available) { excess_fragmented_available -= lost_available; - lost_evacuation_capacity -= lost_available; - lost_available = 0; + lost_available = 0; +#ifdef KELVIN_SHARE_RESERVES + log_info(gc)(" Taking lost_available from excess_fragmented_available: " SIZE_FORMAT, excess_fragmented_available); +#endif } else { lost_available -= excess_fragmented_available; - lost_evacuation_capacity -= excess_fragmented_available; excess_fragmented_available = 0; +#ifdef KELVIN_SHARE_RESERVES + log_info(gc)(" Reducing lost_available to " SIZE_FORMAT ", setting excess_fragmented_available to zero", + lost_available); +#endif } } + + ssize_t fragmented_delta = 0; + ssize_t unfragmented_delta = 0; + size_t scaled_loss = (size_t) ((double) lost_available / ShenandoahOldEvacWaste); if ((lost_available > 0) && (fragmented_available > 0)) { - if (scaled_loss + live_data_for_evacuation < fragmented_available) { + if (scaled_loss < fragmented_available) { fragmented_available -= scaled_loss; + fragmented_delta = -scaled_loss; scaled_loss = 0; +#ifdef KELVIN_SHARE_RESERVES + log_info(gc)(" Reducing fragmented_available to " SIZE_FORMAT ", scaled_loss to zero", + fragmented_available); +#endif } else { - // We will have to allocate this region's evacuation memory from unfragmented memory, so don't bother - // to decrement scaled_loss + scaled_loss -= fragmented_available; + fragmented_delta = -fragmented_available; + fragmented_available = 0; +#ifdef KELVIN_SHARE_RESERVES + log_info(gc)(" Reducing fragmented_available to 0, scaled_loss to " SIZE_FORMAT, + scaled_loss); +#endif } } - if (scaled_loss > 0) { - // We were not able to account for the lost free memory within fragmented memory, so we need to take this - // allocation out of unfragmented memory. Unfragmented memory does not need to account for loss of free. - if (live_data_for_evacuation > unfragmented_available) { - // There is not room to evacuate this region or any that come after it in within the candidates array. - break; - } else { - unfragmented_available -= live_data_for_evacuation; - } + // Allocate replica from unfragmented memory if that exists + size_t evacuation_need = live_data_for_evacuation; + if (evacuation_need < unfragmented_available) { + unfragmented_available -= evacuation_need;; +#ifdef KELVIN_SHARE_RESERVES + log_info(gc)(" Satisfy allocation from unfragmented available: " SIZE_FORMAT, + unfragmented_available); +#endif } else { - // Since scaled_loss == 0, we have accounted for the loss of free memory, so we can allocate from either - // fragmented or unfragmented available memory. Use up the fragmented memory budget first. - size_t evacuation_need = live_data_for_evacuation; - - if (evacuation_need > fragmented_available) { - evacuation_need -= fragmented_available; - fragmented_available = 0; - } else { - fragmented_available -= evacuation_need; - evacuation_need = 0; + if (unfragmented_available > 0) { +#ifdef KELVIN_SHARE_RESERVES + log_info(gc)(" Partially satisfy from unfragmented_available: " SIZE_FORMAT ", which becomes zero", + unfragmented_available); +#endif + evacuation_need -= unfragmented_available; + unfragmented_delta = -unfragmented_available; + unfragmented_available = 0; } - if (evacuation_need > unfragmented_available) { - // There is not room to evacuate this region or any that come after it in within the candidates array. - break; + // Take the remaining allocation out of fragmented available + if (fragmented_available > evacuation_need) { + fragmented_available -= evacuation_need; +#ifdef KELVIN_SHARE_RESERVES + log_info(gc)(" Satisfied remnant " SIZE_FORMAT " from fragmented_available: " SIZE_FORMAT, + evacuation_need, fragmented_available); +#endif } else { - unfragmented_available -= evacuation_need; - // dead code: evacuation_need == 0; + // We cannot add this region into the collection set. We're done. Undo the adjustments to available. + fragmented_available -= fragmented_delta; + unfragmented_available -= unfragmented_delta; +#ifdef KELVIN_SHARE_RESERVES + log_info(gc)(" Cannot collect, restored fragmented_available: " SIZE_FORMAT ", unfragmented_available: " SIZE_FORMAT, + fragmented_available, unfragmented_available); +#endif + break; } - } + } collection_set->add_region(r); included_old_regions++; evacuated_old_bytes += live_data_for_evacuation; collected_old_bytes += r->garbage(); consume_old_collection_candidate(); } + return true; +} +void ShenandoahOldHeuristics::initialize_piggyback_evacs(ShenandoahCollectionSet* collection_set, + size_t &evacuated_old_bytes, size_t &collected_old_bytes, + uint &included_old_regions, size_t &old_evacuation_reserve, + size_t &old_evacuation_budget, + size_t &unfragmented_available, + size_t &fragmented_available, + size_t &excess_fragmented_available) { + included_old_regions = 0; + evacuated_old_bytes = 0; + collected_old_bytes = 0; + + // If a region is put into the collection set, then this region's free (not yet used) bytes are no longer + // "available" to hold the results of other evacuations. This may cause a decrease in the remaining amount + // of memory that can still be evacuated. We address this by reducing the evacuation budget by the amount + // of live memory in that region and by the amount of unallocated memory in that region if the evacuation + // budget is constrained by availability of free memory. + old_evacuation_reserve = _old_generation->get_evacuation_reserve(); + old_evacuation_budget = (size_t) ((double) old_evacuation_reserve / ShenandoahOldEvacWaste); + + // fragmented_available is the amount of memory within partially consumed old regions that may be required to + // hold the results of old evacuations. If all of the memory required by the old evacuation reserve is available + // in unfragmented regions (unaffiliated old regions), then fragmented_available is zero because we do not need + // to evacuate into the existing partially consumed old regions. + + // if fragmented_available is non-zero, excess_fragmented_available represents the amount of fragmented memory + // that is available within old, but is not required to hold the resuilts of old evacuation. As old-gen regions + // are added into the collection set, their free memory is subtracted from excess_fragmented_available until the + // excess is exhausted. For old-gen regions subsequently added to the collection set, their free memory is + // subtracted from fragmented_available and from the old_evacuation_budget (since the budget decreases when this + // fragmented_available memory decreases). After fragmented_available has been exhausted, any further old regions + // selected for the cset do not further decrease the old_evacuation_budget because all further evacuation is targeted + // to unfragmented regions. + + size_t unaffiliated_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes(); + if (unaffiliated_available > old_evacuation_reserve) { + unfragmented_available = old_evacuation_budget; + fragmented_available = 0; + excess_fragmented_available = 0; + } else { + assert(_old_generation->available() >= old_evacuation_reserve, "Cannot reserve more than is available"); + size_t affiliated_available = _old_generation->available() - unaffiliated_available; + assert(affiliated_available + unaffiliated_available >= old_evacuation_reserve, "Budgets do not add up"); + if (affiliated_available + unaffiliated_available > old_evacuation_reserve) { + excess_fragmented_available = (affiliated_available + unaffiliated_available) - old_evacuation_reserve; + affiliated_available -= excess_fragmented_available; + } + fragmented_available = (size_t) ((double) affiliated_available / ShenandoahOldEvacWaste); + unfragmented_available = (size_t) ((double) unaffiliated_available / ShenandoahOldEvacWaste); + } + log_info(gc)("Choose old regions for mixed collection: old evacuation budget: " SIZE_FORMAT "%s, candidates: %u", + byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget), + unprocessed_old_collection_candidates()); +} + + +bool ShenandoahOldHeuristics::finalize_piggyback_evacs(ShenandoahCollectionSet* collection_set, + const size_t evacuated_old_bytes, size_t collected_old_bytes, + const uint included_old_regions, const size_t old_evacuation_reserve, + const size_t old_evacuation_budget, + const size_t unfragmented_available) { if (_first_pinned_candidate != NOT_FOUND) { // Need to deal with pinned regions slide_pinned_regions_to_front(); @@ -222,12 +300,8 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll _old_generation->abandon_mixed_evacuations(); } else { log_info(gc)("No regions selected for mixed collection. " - "Old evacuation budget: " PROPERFMT ", Remaining evacuation budget: " PROPERFMT - ", Lost capacity: " PROPERFMT - ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT, + "Old evacuation budget: " PROPERFMT ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT, PROPERFMTARGS(old_evacuation_reserve), - PROPERFMTARGS(remaining_old_evacuation_budget), - PROPERFMTARGS(lost_evacuation_capacity), _next_old_collection_candidate, _last_old_collection_candidate); } } @@ -235,20 +309,61 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll return (included_old_regions > 0); } -bool ShenandoahOldHeuristics::all_candidates_are_pinned() { -#ifdef ASSERT - if (uint(os::random()) % 100 < ShenandoahCoalesceChance) { - return true; - } -#endif +bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set, + size_t &evacuated_old_bytes, size_t &collected_old_bytes, + uint &included_old_regions, size_t &old_evacuation_reserve, + size_t &old_evacuation_budget, + size_t &unfragmented_available, + size_t &fragmented_available, + size_t &excess_fragmented_available) { + return add_old_regions_to_cset(collection_set, evacuated_old_bytes, collected_old_bytes, included_old_regions, + old_evacuation_reserve, old_evacuation_budget, unfragmented_available, fragmented_available, + excess_fragmented_available); +} - for (uint i = _next_old_collection_candidate; i < _last_old_collection_candidate; ++i) { - ShenandoahHeapRegion* region = _region_data[i]._region; - if (!region->is_pinned()) { +bool ShenandoahOldHeuristics::top_off_collection_set(ShenandoahCollectionSet* collection_set, + size_t &evacuated_old_bytes, size_t &collected_old_bytes, + uint &included_old_regions, size_t &old_evacuation_reserve, + size_t &old_evacuation_budget, + size_t &unfragmented_available, + size_t &fragmented_available, + size_t &excess_fragmented_available) { + if (unprocessed_old_collection_candidates() == 0) { + return false; + } else { + ShenandoahYoungGeneration* young_generation = _heap->young_generation(); + size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions(); + + size_t max_young_cset = young_generation->get_evacuation_reserve(); + size_t planned_young_evac = collection_set->get_young_bytes_reserved_for_evacuation(); + size_t consumed_from_young_cset = (size_t) (planned_young_evac * ShenandoahEvacWaste); + size_t available_to_loan_from_young_reserve = ((consumed_from_young_cset >= max_young_cset)? + 0: max_young_cset - consumed_from_young_cset); + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + if ((young_unaffiliated_regions == 0) || (available_to_loan_from_young_reserve < region_size_bytes)) { return false; + } else { + size_t regions_for_old_expansion = (available_to_loan_from_young_reserve / region_size_bytes); + log_info(gc)("Augmenting old-gen evacuation budget from unexpended young-generation reserve by " SIZE_FORMAT " regions", + regions_for_old_expansion); + _heap->generation_sizer()->force_transfer_to_old(regions_for_old_expansion); + size_t budget_supplement = region_size_bytes * regions_for_old_expansion; + size_t supplement_after_waste = (size_t) (((double) budget_supplement) / ShenandoahOldEvacWaste); + old_evacuation_budget += supplement_after_waste; + unfragmented_available += supplement_after_waste; + + _old_generation->augment_evacuation_reserve(budget_supplement); + young_generation->set_evacuation_reserve(max_young_cset - budget_supplement); + +#ifdef KELVIN_SHARE_RESERVES + log_info(gc)("top_off_collection_set() transfers " SIZE_FORMAT " bytes from young_reserve to old_reserve", + budget_supplement); +#endif + return add_old_regions_to_cset(collection_set, evacuated_old_bytes, collected_old_bytes, included_old_regions, + old_evacuation_reserve, old_evacuation_budget, unfragmented_available, fragmented_available, + excess_fragmented_available); } } - return true; } void ShenandoahOldHeuristics::slide_pinned_regions_to_front() { diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp index 5fe0b877313..ff8fb0d982d 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp @@ -127,14 +127,55 @@ class ShenandoahOldHeuristics : public ShenandoahHeuristics { protected: void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, RegionData* data, size_t data_size, size_t free) override; +// Return true iff we need to finalize piggyback evacs +bool add_old_regions_to_cset(ShenandoahCollectionSet* collection_set, + size_t &evacuated_old_bytes, size_t &collected_old_bytes, + uint &included_old_regions, const size_t old_evacuation_reserve, + const size_t old_evacuation_budget, + size_t &unfragmented_available, + size_t &fragmented_available, + size_t &excess_fragmented_available); + public: explicit ShenandoahOldHeuristics(ShenandoahOldGeneration* generation, ShenandoahGenerationalHeap* gen_heap); // Prepare for evacuation of old-gen regions by capturing the mark results of a recently completed concurrent mark pass. void prepare_for_old_collections(); - // Return true iff the collection set is primed with at least one old-gen region. - bool prime_collection_set(ShenandoahCollectionSet* set); + void initialize_piggyback_evacs(ShenandoahCollectionSet* collection_set, + size_t &evacuated_old_bytes, size_t &collected_old_bytes, + uint &included_old_regions, size_t &old_evacuation_reserve, + size_t &old_evacuation_budget, + size_t &unfragmented_available, + size_t &fragmented_available, + size_t &excess_fragmented_available); + + // Return true iff we need to finalize piggyback evacs + bool prime_collection_set(ShenandoahCollectionSet* set, + size_t &evacuated_old_bytes, size_t &collected_old_bytes, + uint &included_old_regions, size_t &old_evacuation_reserve, + size_t &old_evacuation_budget, + size_t &unfragmented_available, + size_t &fragmented_available, + size_t &excess_fragmented_available); + + // Return true iff we need to finalize piggyback evacs + bool top_off_collection_set(ShenandoahCollectionSet* collection_set, + size_t &evacuated_old_bytes, size_t &collected_old_bytes, + uint &included_old_regions, size_t &old_evacuation_reserve, + size_t &old_evacuation_budget, + size_t &unfragmented_available, + size_t &fragmented_available, + size_t &excess_fragmented_available); + + + // Return true iff the collection set holds at least one unpinned mixed evacuation candidate + bool finalize_piggyback_evacs(ShenandoahCollectionSet* collection_set, + const size_t evacuated_old_bytes, size_t collected_old_bytes, + const uint included_old_regions, const size_t old_evacuation_reserve, + const size_t old_evacuation_budget, + const size_t unfragmented_available); + // How many old-collection candidates have not yet been processed? uint unprocessed_old_collection_candidates() const; diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp index 6770d5d09e2..a199fa8ea18 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp @@ -67,8 +67,15 @@ void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(Shenandoah // Better select garbage-first regions QuickSort::sort(data, (int) size, compare_by_garbage, false); +#ifdef KELVIN_RESERVES + log_info(gc)("YoungHeuristics::add_preselected() with size: " SIZE_FORMAT, size); +#endif size_t cur_young_garbage = add_preselected_regions_to_collection_set(cset, data, size); +#ifdef KELVIN_RESERVES + log_info(gc)("YoungHeuristics::choose_young_cset() with size: " SIZE_FORMAT ", actual_free: " SIZE_FORMAT + ", cur_young_garbage: " SIZE_FORMAT, size, actual_free, cur_young_garbage); +#endif choose_young_collection_set(cset, data, size, actual_free, cur_young_garbage); log_cset_composition(cset); @@ -92,6 +99,9 @@ void ShenandoahYoungHeuristics::choose_young_collection_set(ShenandoahCollection size_t cur_cset = 0; size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + max_cset; size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0; +#ifdef KELVIN_RESERVES + log_info(gc)("YoungHeuristics::choose_young_collection_set with max_cset: " SIZE_FORMAT, max_cset); +#endif log_info(gc, ergo)( diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 1d97b5028f6..cef90fe7aa3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -150,15 +150,31 @@ void ShenandoahGeneration::increase_allocated(size_t bytes) { } void ShenandoahGeneration::set_evacuation_reserve(size_t new_val) { +#undef KELVIN_RESERVES +#ifdef KELVIN_RESERVES + if (is_old()) { + log_info(gc)("set_evacuation_reserve(" SIZE_FORMAT ")", new_val); + } +#endif _evacuation_reserve = new_val; } size_t ShenandoahGeneration::get_evacuation_reserve() const { +#ifdef KELVIN_RESERVES + if (is_old()) { + log_info(gc)("get_evacuation_reserve() yields: " SIZE_FORMAT, _evacuation_reserve); + } +#endif return _evacuation_reserve; } void ShenandoahGeneration::augment_evacuation_reserve(size_t increment) { _evacuation_reserve += increment; +#ifdef KELVIN_RESERVES + if (is_old()) { + log_info(gc)("augment_evacuation_reserve(" SIZE_FORMAT ") yields: " SIZE_FORMAT, increment, _evacuation_reserve); + } +#endif } void ShenandoahGeneration::log_status(const char *msg) const { @@ -605,6 +621,11 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { if (heap->is_aging_cycle() && (r->age() + 1 == tenuring_threshold)) { if (r->garbage() >= old_garbage_threshold) { promo_potential += r->get_live_data_bytes(); +#undef KELVIN_RESERVES +#ifdef KELVIN_RESERVES + log_info(gc)("Adding " SIZE_FORMAT " to promo potential for region " SIZE_FORMAT " of age %u vs threshold %u", + r->get_live_data_bytes(), r->index(), r->age(), tenuring_threshold); +#endif } } } @@ -630,6 +651,10 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { // We rejected this promotable region from the collection set because we had no room to hold its copy. // Add this region to promo potential for next GC. promo_potential += region_live_data; +#ifdef KELVIN_RESERVES + log_info(gc)("Adding " SIZE_FORMAT " to promo potential for rejected region " SIZE_FORMAT " of age %u vs threshold %u", + region_live_data, region->index(), region->age(), tenuring_threshold); +#endif assert(!candidate_regions_for_promotion_by_copy[region->index()], "Shouldn't be selected"); } // We keep going even if one region is excluded from selection because we need to accumulate all eligible @@ -642,6 +667,9 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { heap->old_generation()->set_pad_for_promote_in_place(promote_in_place_pad); heap->old_generation()->set_promotion_potential(promo_potential); +#ifdef KELVIN_RESERVES + log_info(gc)("Establishing promo_potential as " SIZE_FORMAT, promo_potential); +#endif return old_consumed; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index a2abca1add2..51b953d2c3a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -603,11 +603,12 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x // The free set will reserve this amount of memory to hold young evacuations (initialized to the ideal reserve) size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; - // In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit and young_reserve + // If ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by mutator_xfer_limit and young_reserve const size_t bound_on_old_reserve = old_available + mutator_xfer_limit + young_reserve; - const size_t max_old_reserve = (ShenandoahOldEvacRatioPercent == 100)? - bound_on_old_reserve: MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), - bound_on_old_reserve); + const size_t max_old_reserve = ((ShenandoahOldEvacRatioPercent == 100)? + bound_on_old_reserve: + MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), + bound_on_old_reserve)); if (young_reserve > young_available) { young_reserve = young_available; @@ -634,6 +635,12 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x // Decide how much space we should reserve for promotions from young size_t reserve_for_promo = 0; const size_t promo_load = old_generation()->get_promotion_potential(); +#undef KELVIN_RESERVES +#ifdef KELVIN_RESERVES + log_info(gc)("promo_load fetched from old-gen is: " SIZE_FORMAT ", times PromoEvacWaste: " SIZE_FORMAT + ", available_for_promotions: " SIZE_FORMAT, + promo_load, (size_t) (promo_load * ShenandoahPromoEvacWaste), max_old_reserve - reserve_for_mixed); +#endif const bool doing_promotions = promo_load > 0; if (doing_promotions) { // We're promoting and have a bound on the maximum amount that can be promoted @@ -653,6 +660,13 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x size_t mutator_region_xfer_limit = mutator_xfer_limit / region_size_bytes; // align the mutator_xfer_limit on region size mutator_xfer_limit = mutator_region_xfer_limit * region_size_bytes; +#ifdef KELVIN_RESERVES + log_info(gc)("compute_old_generation_balance(), old_reserve: " SIZE_FORMAT " from promo: " SIZE_FORMAT " and evac: " SIZE_FORMAT, + old_reserve, reserve_for_promo, reserve_for_mixed); + log_info(gc)(" old_available: " SIZE_FORMAT, old_available); + log_info(gc)(" young_reserve: " SIZE_FORMAT, young_reserve); + log_info(gc)("mutator_xfer_limit: " SIZE_FORMAT, mutator_xfer_limit); +#endif if (old_available >= old_reserve) { // We are running a surplus, so the old region surplus can go to young const size_t old_surplus = old_available - old_reserve; @@ -702,6 +716,14 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x young_reserve -= reserve_xfer_regions * region_size_bytes; old_generation()->set_region_balance(0 - checked_cast(old_region_deficit)); + +#ifdef KELVIN_RESERVES + log_info(gc)("after adjustments, old_reserve: " SIZE_FORMAT " from promo: " SIZE_FORMAT " and evac: " SIZE_FORMAT, + old_reserve, reserve_for_promo, reserve_for_mixed); + log_info(gc)(" old_available: " SIZE_FORMAT, old_available); + log_info(gc)(" young_reserve: " SIZE_FORMAT, young_reserve); + log_info(gc)(" balance: " SSIZE_FORMAT, 0 - checked_cast(old_region_deficit)); +#endif } assert(old_region_deficit == 0 || old_region_surplus == 0, "Only surplus or deficit, never both"); @@ -716,6 +738,10 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x } void ShenandoahGenerationalHeap::reset_generation_reserves() { +#undef KELVIN_RESERVES +#ifdef KELVIN_RESERVES + log_info(gc)("reset_generation_reserves() clears everything"); +#endif young_generation()->set_evacuation_reserve(0); old_generation()->set_evacuation_reserve(0); old_generation()->set_promoted_reserve(0); @@ -1023,10 +1049,6 @@ void ShenandoahGenerationalHeap::complete_degenerated_cycle() { result.print_on("Degenerated GC", &ls); } - // In case degeneration interrupted concurrent evacuation or update references, we need to clean up - // transient state. Otherwise, these actions have no effect. - reset_generation_reserves(); - if (!old_generation()->is_parseable()) { ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_coalesce_and_fill); coalesce_and_fill_old_regions(false); @@ -1050,7 +1072,6 @@ void ShenandoahGenerationalHeap::complete_concurrent_cycle() { ShenandoahHeapLocker locker(lock()); result = balance_generations(); - reset_generation_reserves(); } LogTarget(Info, gc, ergo) lt; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 234b167e5bc..e420b426a8c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -2485,6 +2485,8 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) { size_t allocation_runway = young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions); ShenandoahGenerationalHeap::heap()->compute_old_generation_balance(allocation_runway, old_cset_regions, young_cset_regions); + // TODO: this comment seems out of place, and not confident it is entirely correct: + // Total old_available may have been expanded to hold anticipated promotions. We trigger if the fragmented available // memory represents more than 16 regions worth of data. Note that fragmentation may increase when we promote regular // regions in place when many of these regular regions have an abundant amount of available memory within them. Fragmentation diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp index 3d2d2b0e7de..a2a704598eb 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp @@ -200,16 +200,26 @@ ShenandoahOldGeneration::ShenandoahOldGeneration(uint max_queues, size_t max_cap void ShenandoahOldGeneration::set_promoted_reserve(size_t new_val) { shenandoah_assert_heaplocked_or_safepoint(); +#undef KELVIN_RESERVES +#ifdef KELVIN_RESERVES + log_info(gc)("set_promoted_reserve(" SIZE_FORMAT ")", new_val); +#endif _promoted_reserve = new_val; } size_t ShenandoahOldGeneration::get_promoted_reserve() const { +#ifdef KELVIN_RESERVES + log_info(gc)("get_promoted_reserve() yields: " SIZE_FORMAT, _promoted_reserve); +#endif return _promoted_reserve; } void ShenandoahOldGeneration::augment_promoted_reserve(size_t increment) { shenandoah_assert_heaplocked_or_safepoint(); _promoted_reserve += increment; +#ifdef KELVIN_RESERVES + log_info(gc)("augment_promoted_reserve(" SIZE_FORMAT ") yields " SIZE_FORMAT, increment, _promoted_reserve); +#endif } void ShenandoahOldGeneration::reset_promoted_expended() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp index f20e470d9d3..cbd24062b4d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp @@ -63,8 +63,8 @@ class ShenandoahOldGeneration : public ShenandoahGeneration { // remaining in a PLAB when it is retired. size_t _promoted_expended; - // Represents the quantity of live bytes we expect to promote in place during the next - // evacuation cycle. This value is used by the young heuristic to trigger mixed collections. + // Represents the quantity of live bytes we expect to promote during the next GC cycle, either by + // evacuation or by promote-in-place. This value is used by the young heuristic to trigger mixed collections. // It is also used when computing the optimum size for the old generation. size_t _promotion_potential; diff --git a/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp b/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp index 7bcbbf909d9..b58a3768aad 100644 --- a/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp +++ b/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp @@ -198,8 +198,26 @@ TEST_VM_F(ShenandoahOldHeuristicTest, prime_one_old_region) { size_t garbage = make_garbage_above_collection_threshold(10); _heuristics->prepare_for_old_collections(); - _heuristics->prime_collection_set(_collection_set); + size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, + fragmented_available,excess_fragmented_available; + uint included_old_regions; + + _heuristics->initialize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available); + if (_heuristics->prime_collection_set(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available)) { + _heuristics->finalize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available); + } EXPECT_TRUE(collection_set_is(10UL)); EXPECT_EQ(garbage, _collection_set->get_old_garbage()); EXPECT_EQ(0U, _heuristics->unprocessed_old_collection_candidates()); @@ -211,8 +229,25 @@ TEST_VM_F(ShenandoahOldHeuristicTest, prime_many_old_regions) { size_t g1 = make_garbage_above_collection_threshold(100); size_t g2 = make_garbage_above_collection_threshold(101); _heuristics->prepare_for_old_collections(); - _heuristics->prime_collection_set(_collection_set); + size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, + fragmented_available,excess_fragmented_available; + uint included_old_regions; + _heuristics->initialize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available); + if (_heuristics->prime_collection_set(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available)) { + _heuristics->finalize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available); + } EXPECT_TRUE(collection_set_is(100UL, 101UL)); EXPECT_EQ(g1 + g2, _collection_set->get_old_garbage()); EXPECT_EQ(0U, _heuristics->unprocessed_old_collection_candidates()); @@ -223,8 +258,25 @@ TEST_VM_F(ShenandoahOldHeuristicTest, require_multiple_mixed_evacuations) { size_t garbage = create_too_much_garbage_for_one_mixed_evacuation(); _heuristics->prepare_for_old_collections(); - _heuristics->prime_collection_set(_collection_set); + size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, + fragmented_available,excess_fragmented_available; + uint included_old_regions; + _heuristics->initialize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available); + if (_heuristics->prime_collection_set(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available)) { + _heuristics->finalize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available); + } EXPECT_LT(_collection_set->get_old_garbage(), garbage); EXPECT_GT(_heuristics->unprocessed_old_collection_candidates(), 0UL); } @@ -245,7 +297,24 @@ TEST_VM_F(ShenandoahOldHeuristicTest, skip_pinned_regions) { ASSERT_EQ(3UL, _heuristics->unprocessed_old_collection_candidates()); // Here the region is still pinned, so it cannot be added to the collection set. - _heuristics->prime_collection_set(_collection_set); + size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, + fragmented_available,excess_fragmented_available; + uint included_old_regions; + _heuristics->initialize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available); + if (_heuristics->prime_collection_set(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available)) { + _heuristics->finalize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available); + } // The two unpinned regions should be added to the collection set and the pinned // region should be retained at the front of the list of candidates as it would be @@ -258,8 +327,22 @@ TEST_VM_F(ShenandoahOldHeuristicTest, skip_pinned_regions) { // the now unpinned region should be added to the collection set. make_unpinned(1); _collection_set->clear(); - _heuristics->prime_collection_set(_collection_set); + _heuristics->initialize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available); + if (_heuristics->prime_collection_set(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available)) { + _heuristics->finalize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available); + } EXPECT_EQ(_collection_set->get_old_garbage(), g2); EXPECT_TRUE(collection_set_is(1UL)); EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 0UL); @@ -275,14 +358,47 @@ TEST_VM_F(ShenandoahOldHeuristicTest, pinned_region_is_first) { make_pinned(0); _heuristics->prepare_for_old_collections(); - _heuristics->prime_collection_set(_collection_set); + + size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, + fragmented_available,excess_fragmented_available; + uint included_old_regions; + _heuristics->initialize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available); + if (_heuristics->prime_collection_set(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available)) { + _heuristics->finalize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available); + } EXPECT_TRUE(collection_set_is(1UL, 2UL)); EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 1UL); make_unpinned(0); _collection_set->clear(); - _heuristics->prime_collection_set(_collection_set); + + _heuristics->initialize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available); + if (_heuristics->prime_collection_set(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available)) { + _heuristics->finalize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available); + } EXPECT_TRUE(collection_set_is(0UL)); EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 0UL); @@ -298,16 +414,47 @@ TEST_VM_F(ShenandoahOldHeuristicTest, pinned_region_is_last) { make_pinned(2); _heuristics->prepare_for_old_collections(); - _heuristics->prime_collection_set(_collection_set); + size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, + fragmented_available,excess_fragmented_available; + uint included_old_regions; + _heuristics->initialize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available); + if (_heuristics->prime_collection_set(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available)) { + _heuristics->finalize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available); + } EXPECT_TRUE(collection_set_is(0UL, 1UL)); EXPECT_EQ(_collection_set->get_old_garbage(), g1 + g2); EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 1UL); make_unpinned(2); _collection_set->clear(); - _heuristics->prime_collection_set(_collection_set); + _heuristics->initialize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available); + if (_heuristics->prime_collection_set(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available)) { + _heuristics->finalize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available); + } EXPECT_TRUE(collection_set_is(2UL)); EXPECT_EQ(_collection_set->get_old_garbage(), g3); EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 0UL); @@ -324,8 +471,25 @@ TEST_VM_F(ShenandoahOldHeuristicTest, unpinned_region_is_middle) { make_pinned(0); make_pinned(2); _heuristics->prepare_for_old_collections(); - _heuristics->prime_collection_set(_collection_set); + size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, + fragmented_available,excess_fragmented_available; + uint included_old_regions; + _heuristics->initialize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available); + if (_heuristics->prime_collection_set(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available)) { + _heuristics->finalize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available); + } EXPECT_TRUE(collection_set_is(1UL)); EXPECT_EQ(_collection_set->get_old_garbage(), g2); EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 2UL); @@ -333,8 +497,22 @@ TEST_VM_F(ShenandoahOldHeuristicTest, unpinned_region_is_middle) { make_unpinned(0); make_unpinned(2); _collection_set->clear(); - _heuristics->prime_collection_set(_collection_set); + _heuristics->initialize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available); + if (_heuristics->prime_collection_set(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available)) { + _heuristics->finalize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available); + } EXPECT_TRUE(collection_set_is(0UL, 2UL)); EXPECT_EQ(_collection_set->get_old_garbage(), g1 + g3); EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 0UL); @@ -351,8 +529,26 @@ TEST_VM_F(ShenandoahOldHeuristicTest, all_candidates_are_pinned) { make_pinned(1); make_pinned(2); _heuristics->prepare_for_old_collections(); - _heuristics->prime_collection_set(_collection_set); + + size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, + fragmented_available,excess_fragmented_available; + uint included_old_regions; + _heuristics->initialize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available); + if (_heuristics->prime_collection_set(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available)) { + _heuristics->finalize_piggyback_evacs(_collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available); + } // In the case when all candidates are pinned, we want to abandon // this set of mixed collection candidates so that another old collection // can run. This is meant to defend against "bad" JNI code that permanently From 8fbb0f5e6936b8cbae42bcb11400ff54810dd679 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 10 Jun 2024 01:16:13 +0000 Subject: [PATCH 23/64] Change default percentage of old-gen evacuation --- src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp index 52d72bdc87d..329269d0605 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp @@ -409,10 +409,10 @@ "reserve/waste is incorrect, at the risk that application " \ "runs out of memory too early.") \ \ - product(uintx, ShenandoahOldEvacRatioPercent, 12, EXPERIMENTAL, \ + product(uintx, ShenandoahOldEvacRatioPercent, 16, EXPERIMENTAL, \ "The maximum proportion of evacuation to old-gen memory, " \ - "expressed as a percentage. The default value 12 denotes that no" \ - "more than 12% of the collection set evacuation workload may be " \ + "expressed as a percentage. The default value 16 denotes that no" \ + "more than 16% of the collection set evacuation workload may be " \ "copied into old-gen regions. This limits both the promotion of "\ "aged young regions and the compaction of existing old regions. " \ "Given a total evacuation budget of X, the amount of memory " \ From f90ea26e17ddc365711f0f8a82b5913fc02feb0f Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 10 Jun 2024 13:57:46 +0000 Subject: [PATCH 24/64] Change default old-gen ratio and comment --- .../gc/shenandoah/shenandoah_globals.hpp | 47 ++++++++++++------- 1 file changed, 29 insertions(+), 18 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp index 329269d0605..de7aab07611 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp @@ -409,24 +409,35 @@ "reserve/waste is incorrect, at the risk that application " \ "runs out of memory too early.") \ \ - product(uintx, ShenandoahOldEvacRatioPercent, 16, EXPERIMENTAL, \ - "The maximum proportion of evacuation to old-gen memory, " \ - "expressed as a percentage. The default value 16 denotes that no" \ - "more than 16% of the collection set evacuation workload may be " \ - "copied into old-gen regions. This limits both the promotion of "\ - "aged young regions and the compaction of existing old regions. " \ - "Given a total evacuation budget of X, the amount of memory " \ - "dedicated to hold objects evacuated to young generation is 89.3%"\ - "(100/112). A larger value allows for quicker promotion and " \ - "a smaller number of mixed evacuations to process " \ - "the entire list of old-gen collection candidates at the cost " \ - "of an increased disruption of the normal cadence of young-gen " \ - "collections. A value of 100 allows a mixed evacuation to " \ - "focus entirely on old-gen memory, allowing no young-gen " \ - "regions to be collected, likely resulting in subsequent " \ - "allocation failures because the young-gen allocation pool is " \ - "not replenished. A value of 0 prevents mixed evacuations from " \ - "defragmenting old-gen memory, likely resulting in " \ + product(uintx, ShenandoahOldEvacRatioPercent, 35, EXPERIMENTAL, \ + "The maximum percentage by which the young evacuation reserve " \ + "can be adjusted in order to make room for old-generation " \ + "evacuations. If there is an abundance of free memory, this " \ + "will result in a larger total evacuation effort. If free " \ + "memory is in short supply, this may result in shrinking the " \ + "amount of young memory that can be evacuated by this amount. " \ + "The default value 35 denotes that no more than 35% of the " \ + "collection set evacuation workload may initially dedicated " \ + "to holding objects evacuated into old-gen memory. This limits " \ + "both the promotion of aged young regions and the compaction of " \ + "existing old regions. It does not restrict the collector from " \ + "copying more objects into old-generation memory if the " \ + "young-generation collection set does not consume all of the " \ + "memory originally set aside for young-generation evacuation. " \ + "It also does not restrict the amount of memory that can be " \ + "promoted in place, by simply changing the affiliation of the " \ + "region from young to old. Given a total evacuation budget of " \ + "X, the amount of memory initially dedicated to hold objects " \ + "evacuated to young generation is 65%. A larger value allows " \ + "for quicker promotion and a smaller number of mixed evacuations "\ + "to process the entire list of old-gen collection candidates at " \ + "the cost of an increased disruption of the normal cadence of " \ + "young-gen collections. A value of 100 allows a mixed " \ + "evacuation to focus entirely on old-gen memory, allowing no " \ + "young-gen regions to be collected, likely resulting in " \ + "subsequent ation failures because the young-gen allocation pool "\ + "is not replenished. A value of 0 prevents mixed evacuations " \ + "from defragmenting old-gen memory, likely resulting in " \ "subsequent promotion failures and triggering of stop-the-world " \ "full GC events. Faiure to defragment old-gen memory can also " \ "result in unconstrained expansion of old-gen, and shrinkage of " \ From 1cd110576881adffb670d7c881923f31fa856ff6 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 10 Jun 2024 20:45:38 +0000 Subject: [PATCH 25/64] Remove over-zealous assert and replace with comment --- .../share/gc/shenandoah/shenandoahGenerationalHeap.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 51b953d2c3a..bb20434ae47 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -707,9 +707,12 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x } } + // Because of adjustments above, old_reserve may be smaller now than it was when we tested the branch + // condition above: "(old_available + mutator_xfer_limit >= old_reserve) + // Therefore, we do NOT know that: mutator_xfer_limit < old_reserve - old_available + size_t old_deficit = old_reserve - old_available; old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes; - assert(old_region_deficit >= mutator_region_xfer_limit, "Handle this in different conditional branch"); // Shrink young_reserve to account for loan to old reserve const size_t reserve_xfer_regions = old_region_deficit - mutator_region_xfer_limit; From 786b27f0d681aec7d6dcc9bcf1fc10ec44c2b563 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Tue, 11 Jun 2024 14:24:42 +0000 Subject: [PATCH 26/64] Update default value and comment --- .../gc/shenandoah/shenandoah_globals.hpp | 55 ++++++++++--------- 1 file changed, 29 insertions(+), 26 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp index de7aab07611..7f78405d905 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp @@ -409,34 +409,37 @@ "reserve/waste is incorrect, at the risk that application " \ "runs out of memory too early.") \ \ - product(uintx, ShenandoahOldEvacRatioPercent, 35, EXPERIMENTAL, \ + product(uintx, ShenandoahOldEvacRatioPercent, 50, EXPERIMENTAL, \ "The maximum percentage by which the young evacuation reserve " \ "can be adjusted in order to make room for old-generation " \ - "evacuations. If there is an abundance of free memory, this " \ - "will result in a larger total evacuation effort. If free " \ - "memory is in short supply, this may result in shrinking the " \ - "amount of young memory that can be evacuated by this amount. " \ - "The default value 35 denotes that no more than 35% of the " \ - "collection set evacuation workload may initially dedicated " \ - "to holding objects evacuated into old-gen memory. This limits " \ - "both the promotion of aged young regions and the compaction of " \ - "existing old regions. It does not restrict the collector from " \ - "copying more objects into old-generation memory if the " \ - "young-generation collection set does not consume all of the " \ - "memory originally set aside for young-generation evacuation. " \ - "It also does not restrict the amount of memory that can be " \ - "promoted in place, by simply changing the affiliation of the " \ - "region from young to old. Given a total evacuation budget of " \ - "X, the amount of memory initially dedicated to hold objects " \ - "evacuated to young generation is 65%. A larger value allows " \ - "for quicker promotion and a smaller number of mixed evacuations "\ - "to process the entire list of old-gen collection candidates at " \ - "the cost of an increased disruption of the normal cadence of " \ - "young-gen collections. A value of 100 allows a mixed " \ - "evacuation to focus entirely on old-gen memory, allowing no " \ - "young-gen regions to be collected, likely resulting in " \ - "subsequent ation failures because the young-gen allocation pool "\ - "is not replenished. A value of 0 prevents mixed evacuations " \ + "evacuations. With the default setting, given a total " \ + "evacuation budget of X, the amount of memory initially " \ + "dedicated to holding objects evacuated to old generation is " \ + "50%. This limits both the promotion of aged young regions and " \ + "the compaction of existing old regions. It does not restrict " \ + "the collector from copying more objects into old-generation " \ + "memory if the young-generation collection set does not consume " \ + "all of the memory originally set aside for young-generation " \ + "evacuation. It also does not restrict the amount of memory " \ + "that can be promoted in place, by simply changing the " \ + "affiliation of the region from young to old. If there is an " \ + "abundance of free memory, this will result in a larger total " \ + "evacuation effort, doubling the amount of memory normally " \ + "evacuated during young evacuations (so that old evacuations " \ + "are 50% of the total evacuation, and young evacuates its " \ + "normal amount. If free memory is in short supply, this may " \ + "result in paring back both young-gen and old-gen evacuations, " \ + "such that the fraction of old is 50% (in the default " \ + "configuration) of the total available evacuation reserve. " \ + "Setting a larger value allows for quicker promotion and a " \ + "smaller number of mixed evacuations to process the entire list " \ + "of old-gen collection candidates at the cost of increased " \ + "disruption of the normal young-gen collection cadence. A " \ + "value of 100 allows a mixed evacuation to focus entirely " \ + "on old-gen memory, allowing no young-gen regions to be " \ + "collected. This would likely result in subsequent allocation " \ + "failures because the young-gen allocation pool would not be " \ + "replenished. A value of 0 prevents mixed evacuations " \ "from defragmenting old-gen memory, likely resulting in " \ "subsequent promotion failures and triggering of stop-the-world " \ "full GC events. Faiure to defragment old-gen memory can also " \ From 10d992dd3947077dc65b1df415027f9db0f8cf97 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 12 Jun 2024 13:44:12 +0000 Subject: [PATCH 27/64] Further adjustments to default Old/Young Ratio --- .../shenandoah/shenandoahGenerationalHeap.cpp | 38 +++++++++++++++---- .../gc/shenandoah/shenandoah_globals.hpp | 15 ++++---- 2 files changed, 39 insertions(+), 14 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index bb20434ae47..84f80f85e63 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -609,6 +609,12 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x bound_on_old_reserve: MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve)); +#define KELVIN_RESERVES +#ifdef KELVIN_RESERVES + log_info(gc)("max_old_reserve: " SIZE_FORMAT ", bound_on_old_reserve: " SIZE_FORMAT + ", old_available: " SIZE_FORMAT ", young_reserve: " SIZE_FORMAT ", mutator_xfer_limit: " SIZE_FORMAT, + max_old_reserve, bound_on_old_reserve, old_available, young_reserve, mutator_xfer_limit); +#endif if (young_reserve > young_available) { young_reserve = young_available; @@ -626,16 +632,23 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x "Unaffiliated available must be less than total available"); const size_t old_fragmented_available = old_available - (old_generation()->free_unaffiliated_regions() + old_cset_regions) * region_size_bytes; + + // max_old_reserve is very conservative. Assumes we evacuate the entirety of mixed-evac candidates into + // unfragmented memory. reserve_for_mixed = max_evac_need + old_fragmented_available; +#ifdef KELVIN_RESERVES + log_info(gc)("max_evac_need: " SIZE_FORMAT ", old_available: " SIZE_FORMAT ", old_fragmented_available: " SIZE_FORMAT, + max_evac_need, old_available, old_fragmented_available); +#endif if (reserve_for_mixed > max_old_reserve) { reserve_for_mixed = max_old_reserve; } } - // Decide how much space we should reserve for promotions from young + // Decide how much space we should reserve for promotions from young. We give priority to mixed evacations + // over promotions. size_t reserve_for_promo = 0; const size_t promo_load = old_generation()->get_promotion_potential(); -#undef KELVIN_RESERVES #ifdef KELVIN_RESERVES log_info(gc)("promo_load fetched from old-gen is: " SIZE_FORMAT ", times PromoEvacWaste: " SIZE_FORMAT ", available_for_promotions: " SIZE_FORMAT, @@ -674,11 +687,18 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions; old_region_surplus = MIN2(old_region_surplus, unaffiliated_old_regions); old_generation()->set_region_balance(checked_cast(old_region_surplus)); +#ifdef KELVIN_RESERVES + log_info(gc)("old_avail > old_reserve, relinquishing " SIZE_FORMAT " regions from old to young", old_region_surplus); +#endif } else if (old_available + mutator_xfer_limit >= old_reserve) { // Mutator's xfer limit is sufficient to satisfy our need: transfer all memory from there size_t old_deficit = old_reserve - old_available; old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes; old_generation()->set_region_balance(0 - checked_cast(old_region_deficit)); +#ifdef KELVIN_RESERVES + log_info(gc)("old_avail + mutator_xfer_limit > old_reserve, consuming " SIZE_FORMAT " regions from mutator_xfer_limit", + old_region_deficit); +#endif } else { // We'll try to xfer from both mutator excess and from young collector reserve size_t available_reserves = old_available + young_reserve + mutator_xfer_limit; @@ -693,6 +713,10 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x entitled_xfer = unaffiliated_young_memory; } old_entitlement = old_available + entitled_xfer; +#ifdef KELVIN_RESERVES + log_info(gc)("working with old_entitlement: " SIZE_FORMAT " based on unaffiliated_young_memory: " SIZE_FORMAT, + old_entitlement, unaffiliated_young_memory); +#endif if (old_entitlement < old_reserve) { // There's not enough memory to satisfy our desire. Scale back our old-gen intentions. size_t budget_overrun = old_reserve - old_entitlement;; @@ -721,11 +745,11 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x old_generation()->set_region_balance(0 - checked_cast(old_region_deficit)); #ifdef KELVIN_RESERVES - log_info(gc)("after adjustments, old_reserve: " SIZE_FORMAT " from promo: " SIZE_FORMAT " and evac: " SIZE_FORMAT, - old_reserve, reserve_for_promo, reserve_for_mixed); - log_info(gc)(" old_available: " SIZE_FORMAT, old_available); - log_info(gc)(" young_reserve: " SIZE_FORMAT, young_reserve); - log_info(gc)(" balance: " SSIZE_FORMAT, 0 - checked_cast(old_region_deficit)); + log_info(gc)("after adjustments, old_reserve: " SIZE_FORMAT " from promo: " SIZE_FORMAT " and evac: " SIZE_FORMAT, + old_reserve, reserve_for_promo, reserve_for_mixed); + log_info(gc)(" old_available: " SIZE_FORMAT, old_available); + log_info(gc)(" young_reserve: " SIZE_FORMAT, young_reserve); + log_info(gc)(" balance: " SSIZE_FORMAT, 0 - checked_cast(old_region_deficit)); #endif } diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp index 7f78405d905..68c436be7d3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp @@ -409,13 +409,13 @@ "reserve/waste is incorrect, at the risk that application " \ "runs out of memory too early.") \ \ - product(uintx, ShenandoahOldEvacRatioPercent, 50, EXPERIMENTAL, \ + product(uintx, ShenandoahOldEvacRatioPercent, 67, EXPERIMENTAL, \ "The maximum percentage by which the young evacuation reserve " \ "can be adjusted in order to make room for old-generation " \ "evacuations. With the default setting, given a total " \ "evacuation budget of X, the amount of memory initially " \ "dedicated to holding objects evacuated to old generation is " \ - "50%. This limits both the promotion of aged young regions and " \ + "67%. This limits both the promotion of aged young regions and " \ "the compaction of existing old regions. It does not restrict " \ "the collector from copying more objects into old-generation " \ "memory if the young-generation collection set does not consume " \ @@ -424,13 +424,14 @@ "that can be promoted in place, by simply changing the " \ "affiliation of the region from young to old. If there is an " \ "abundance of free memory, this will result in a larger total " \ - "evacuation effort, doubling the amount of memory normally " \ - "evacuated during young evacuations (so that old evacuations " \ - "are 50% of the total evacuation, and young evacuates its " \ + "evacuation effort, roughly tripling the amount of memory " \ + "normally evacuated during young evacuations (so that old " \ + "evacuates two times as much as young, and young evacuates its " \ "normal amount. If free memory is in short supply, this may " \ "result in paring back both young-gen and old-gen evacuations, " \ - "such that the fraction of old is 50% (in the default " \ - "configuration) of the total available evacuation reserve. " \ + "such that the fraction of old is 67% (in the default " \ + "configuration) of the total available evacuation reserve and " \ + "young evacuates one third of its normal amount. " \ "Setting a larger value allows for quicker promotion and a " \ "smaller number of mixed evacuations to process the entire list " \ "of old-gen collection candidates at the cost of increased " \ From f3c6e09d5848309046e66086e6b2ae1ff5dd5592 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 17 Jun 2024 01:19:00 +0000 Subject: [PATCH 28/64] Performance improvements --- .../gc/shenandoah/shenandoahConcurrentGC.cpp | 7 +++- .../shenandoah/shenandoahGenerationalHeap.cpp | 39 +++++++++++++------ .../gc/shenandoah/shenandoah_globals.hpp | 12 +++--- 3 files changed, 39 insertions(+), 19 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index 820e7f8cee7..9ae33fa4441 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -238,8 +238,8 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { // on its next iteration and run a degenerated young cycle. // vmop_entry_final_updaterefs rebuilds free set in preparation for next GC. - vmop_entry_final_roots(); _abbreviated = true; + vmop_entry_final_roots(); } // We defer generation resizing actions until after cset regions have been recycled. We do this even following an @@ -351,7 +351,10 @@ void ShenandoahConcurrentGC::entry_final_roots() { ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots); EventMark em("%s", msg); op_final_roots(); - ShenandoahHeap::heap()->rebuild_free_set(true /*concurrent*/); + if (_abbreviated) { + ShenandoahHeap::heap()->rebuild_free_set(true /*concurrent*/); + } + // else, this is the end of old marking } void ShenandoahConcurrentGC::entry_reset() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 84f80f85e63..5ff5b620a1a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -609,7 +609,7 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x bound_on_old_reserve: MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve)); -#define KELVIN_RESERVES +#undef KELVIN_RESERVES #ifdef KELVIN_RESERVES log_info(gc)("max_old_reserve: " SIZE_FORMAT ", bound_on_old_reserve: " SIZE_FORMAT ", old_available: " SIZE_FORMAT ", young_reserve: " SIZE_FORMAT ", mutator_xfer_limit: " SIZE_FORMAT, @@ -622,6 +622,7 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x // Decide how much old space we should reserve for a mixed collection size_t reserve_for_mixed = 0; + size_t reserve_for_promo = 0; const size_t mixed_candidate_live_memory = old_generation()->unprocessed_collection_candidates_live_memory(); const bool doing_mixed = (mixed_candidate_live_memory > 0); if (doing_mixed) { @@ -635,19 +636,26 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x // max_old_reserve is very conservative. Assumes we evacuate the entirety of mixed-evac candidates into // unfragmented memory. - reserve_for_mixed = max_evac_need + old_fragmented_available; + reserve_for_promo = old_fragmented_available; + reserve_for_mixed = max_evac_need; #ifdef KELVIN_RESERVES log_info(gc)("max_evac_need: " SIZE_FORMAT ", old_available: " SIZE_FORMAT ", old_fragmented_available: " SIZE_FORMAT, max_evac_need, old_available, old_fragmented_available); #endif - if (reserve_for_mixed > max_old_reserve) { - reserve_for_mixed = max_old_reserve; - } + if (reserve_for_mixed + reserve_for_promo > max_old_reserve) { + size_t excess_reserves = (reserve_for_mixed + reserve_for_promo) - max_old_reserve; + if (reserve_for_promo > excess_reserves) { + reserve_for_promo -= excess_reserves; + } else { + excess_reserves -= reserve_for_promo; + reserve_for_promo = 0; + reserve_for_mixed -= excess_reserves; + } + } } - // Decide how much space we should reserve for promotions from young. We give priority to mixed evacations + // Decide how much additional space we should reserve for promotions from young. We give priority to mixed evacations // over promotions. - size_t reserve_for_promo = 0; const size_t promo_load = old_generation()->get_promotion_potential(); #ifdef KELVIN_RESERVES log_info(gc)("promo_load fetched from old-gen is: " SIZE_FORMAT ", times PromoEvacWaste: " SIZE_FORMAT @@ -656,10 +664,19 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x #endif const bool doing_promotions = promo_load > 0; if (doing_promotions) { - // We're promoting and have a bound on the maximum amount that can be promoted - assert(max_old_reserve >= reserve_for_mixed, "Sanity"); - const size_t available_for_promotions = max_old_reserve - reserve_for_mixed; - reserve_for_promo = MIN2((size_t)(promo_load * ShenandoahPromoEvacWaste), available_for_promotions); + // We've already set aside all of the fragmented available memory within old-gen to represent old objects + // to be promoted from young generation. promo_load represents the memory that we anticipate to be promoted + // from regions that have reached tenure age. We find that several workloads (e.g. Extremem-phased and + // specjbb2015 perform better when we reserve additional promotion memory to hold aged objects that might + // be scattered throughout the young-gen collection set. In the ideal, we will always use the fragmented + // old-gen memory to hold these objects, and will use unfragmented old-gen memory to represent the old-gen + // evacuation workload and the promo_load. + + // We're promoting and have an esimate of memory to be promoted from aged regions + assert(max_old_reserve >= (reserve_for_mixed + reserve_for_promo), "Sanity"); + const size_t available_for_additional_promotions = max_old_reserve - (reserve_for_mixed + reserve_for_promo); + size_t promo_need = (size_t)(promo_load * ShenandoahPromoEvacWaste); + reserve_for_promo += MIN2(promo_need, available_for_additional_promotions); } // This is the total old we want to reserve (initialized to the ideal reserve) diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp index 68c436be7d3..702d2a5d3b1 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp @@ -409,13 +409,13 @@ "reserve/waste is incorrect, at the risk that application " \ "runs out of memory too early.") \ \ - product(uintx, ShenandoahOldEvacRatioPercent, 67, EXPERIMENTAL, \ + product(uintx, ShenandoahOldEvacRatioPercent, 75, EXPERIMENTAL, \ "The maximum percentage by which the young evacuation reserve " \ "can be adjusted in order to make room for old-generation " \ "evacuations. With the default setting, given a total " \ "evacuation budget of X, the amount of memory initially " \ "dedicated to holding objects evacuated to old generation is " \ - "67%. This limits both the promotion of aged young regions and " \ + "75%. This limits both the promotion of aged young regions and " \ "the compaction of existing old regions. It does not restrict " \ "the collector from copying more objects into old-generation " \ "memory if the young-generation collection set does not consume " \ @@ -424,14 +424,14 @@ "that can be promoted in place, by simply changing the " \ "affiliation of the region from young to old. If there is an " \ "abundance of free memory, this will result in a larger total " \ - "evacuation effort, roughly tripling the amount of memory " \ + "evacuation effort, roughly quadrupling the amount of memory " \ "normally evacuated during young evacuations (so that old " \ - "evacuates two times as much as young, and young evacuates its " \ + "evacuates three times as much as young, and young evacuates its "\ "normal amount. If free memory is in short supply, this may " \ "result in paring back both young-gen and old-gen evacuations, " \ - "such that the fraction of old is 67% (in the default " \ + "such that the fraction of old is 75% (in the default " \ "configuration) of the total available evacuation reserve and " \ - "young evacuates one third of its normal amount. " \ + "young evacuates one fourth of its normal amount. " \ "Setting a larger value allows for quicker promotion and a " \ "smaller number of mixed evacuations to process the entire list " \ "of old-gen collection candidates at the cost of increased " \ From 85a0d90f605ee1fafe496e2c06e046714a9409a3 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 17 Jun 2024 14:31:39 +0000 Subject: [PATCH 29/64] Always allow promotions into fragmented old-gen By accident, the previous implementation only allowed this if we happened to be doing a mixed evacuation. --- .../share/gc/shenandoah/shenandoahGenerationalHeap.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 5ff5b620a1a..9f1af017a0e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -622,7 +622,9 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x // Decide how much old space we should reserve for a mixed collection size_t reserve_for_mixed = 0; - size_t reserve_for_promo = 0; + const size_t old_fragmented_available = + old_available - (old_generation()->free_unaffiliated_regions() + old_cset_regions) * region_size_bytes; + size_t reserve_for_promo = old_fragmented_available; const size_t mixed_candidate_live_memory = old_generation()->unprocessed_collection_candidates_live_memory(); const bool doing_mixed = (mixed_candidate_live_memory > 0); if (doing_mixed) { @@ -631,12 +633,9 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x const size_t max_evac_need = (size_t) (mixed_candidate_live_memory * ShenandoahOldEvacWaste); assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes, "Unaffiliated available must be less than total available"); - const size_t old_fragmented_available = - old_available - (old_generation()->free_unaffiliated_regions() + old_cset_regions) * region_size_bytes; // max_old_reserve is very conservative. Assumes we evacuate the entirety of mixed-evac candidates into // unfragmented memory. - reserve_for_promo = old_fragmented_available; reserve_for_mixed = max_evac_need; #ifdef KELVIN_RESERVES log_info(gc)("max_evac_need: " SIZE_FORMAT ", old_available: " SIZE_FORMAT ", old_fragmented_available: " SIZE_FORMAT, From f87a5496f293b1f731a3a5a0eaec8e1314b30618 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Tue, 18 Jun 2024 01:20:22 +0000 Subject: [PATCH 30/64] Set promo_reserve to max of existing fragmented old-gen and promo need Promo need is amount of live data in regions that have reached tenure age. --- .../gc/shenandoah/shenandoahGenerationalHeap.cpp | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 9f1af017a0e..ef5f4349e2f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -665,17 +665,19 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x if (doing_promotions) { // We've already set aside all of the fragmented available memory within old-gen to represent old objects // to be promoted from young generation. promo_load represents the memory that we anticipate to be promoted - // from regions that have reached tenure age. We find that several workloads (e.g. Extremem-phased and - // specjbb2015 perform better when we reserve additional promotion memory to hold aged objects that might - // be scattered throughout the young-gen collection set. In the ideal, we will always use the fragmented - // old-gen memory to hold these objects, and will use unfragmented old-gen memory to represent the old-gen - // evacuation workload and the promo_load. + // from regions that have reached tenure age. In the ideal, we will always use fragmented old-gen memory + // to hold individually promoted objects and will use unfragmented old-gen memory to represent the old-gen + // evacuation workloa. // We're promoting and have an esimate of memory to be promoted from aged regions assert(max_old_reserve >= (reserve_for_mixed + reserve_for_promo), "Sanity"); const size_t available_for_additional_promotions = max_old_reserve - (reserve_for_mixed + reserve_for_promo); size_t promo_need = (size_t)(promo_load * ShenandoahPromoEvacWaste); - reserve_for_promo += MIN2(promo_need, available_for_additional_promotions); + if (promo_need > reserve_for_promo) { + reserve_for_promo += MIN2(promo_need - reserve_for_promo, available_for_additional_promotions); + } + // We've already reserved all the memory required for the promo_load, and possibly more. The excess + // can be consumed by objects promoted from regions that have not yet reached tenure age. } // This is the total old we want to reserve (initialized to the ideal reserve) From eb0ec643ccbc941d0ca7d32236ac187c98b6075a Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Tue, 18 Jun 2024 21:00:46 +0000 Subject: [PATCH 31/64] Fail faster with OOME if alloc fails following full gc Otherwise, we can find ourselves in an infinite retry loop, where fullgc continues to "make progress", but the progress is not sufficient to allow the allocation request to be successful. --- src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index e420b426a8c..c5c35397571 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1028,8 +1028,7 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) { // strategy is to try again, as long as GC makes progress (or until at least // one full GC has completed). size_t original_count = shenandoah_policy()->full_gc_count(); - while (result == nullptr - && (get_gc_no_progress_count() == 0 || original_count == shenandoah_policy()->full_gc_count())) { + while ((result == nullptr) && (original_count == shenandoah_policy()->full_gc_count())) { control_thread()->handle_alloc_failure(req, true); result = allocate_memory_under_lock(req, in_new_region); } From 1c593941ea6a6f0efb31c5dcf1a267ea251e3ae4 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 19 Jun 2024 01:48:10 +0000 Subject: [PATCH 32/64] Fix over-zealous assertion and broken code surrounding it --- .../gc/shenandoah/shenandoahGenerationalHeap.cpp | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index ef5f4349e2f..570bbdb3e14 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -604,12 +604,12 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; // If ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by mutator_xfer_limit and young_reserve - const size_t bound_on_old_reserve = old_available + mutator_xfer_limit + young_reserve; + const size_t bound_on_old_reserve = ((old_available + mutator_xfer_limit + young_reserve) * ShenandoahOldEvacRatioPercent) / 100; const size_t max_old_reserve = ((ShenandoahOldEvacRatioPercent == 100)? bound_on_old_reserve: MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve)); -#undef KELVIN_RESERVES +#define KELVIN_RESERVES #ifdef KELVIN_RESERVES log_info(gc)("max_old_reserve: " SIZE_FORMAT ", bound_on_old_reserve: " SIZE_FORMAT ", old_available: " SIZE_FORMAT ", young_reserve: " SIZE_FORMAT ", mutator_xfer_limit: " SIZE_FORMAT, @@ -682,7 +682,12 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x // This is the total old we want to reserve (initialized to the ideal reserve) size_t old_reserve = reserve_for_mixed + reserve_for_promo; - assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations"); + if (old_reserve > max_old_reserve) { + // This may happen if fragmented old is larger than max_old_reserve + size_t old_overrun = old_reserve - max_old_reserve; + assert (old_overrun <= young_reserve, "sanity"); + young_reserve -= old_overrun; + } // We now check if the old generation is running a surplus or a deficit. size_t old_region_deficit = 0; From 4dcbdcebb154a2a535d465bcb95f6ebf85541cb7 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 19 Jun 2024 14:15:15 +0000 Subject: [PATCH 33/64] A few more fixes to computation of old-gen sizes at end of gc --- .../shenandoah/shenandoahGenerationalHeap.cpp | 36 +++++++++++-------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 570bbdb3e14..677bdc7b300 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -605,15 +605,15 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x // If ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by mutator_xfer_limit and young_reserve const size_t bound_on_old_reserve = ((old_available + mutator_xfer_limit + young_reserve) * ShenandoahOldEvacRatioPercent) / 100; - const size_t max_old_reserve = ((ShenandoahOldEvacRatioPercent == 100)? - bound_on_old_reserve: - MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), - bound_on_old_reserve)); + size_t proposed_max_old = ((ShenandoahOldEvacRatioPercent == 100)? + bound_on_old_reserve: + MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), + bound_on_old_reserve)); #define KELVIN_RESERVES #ifdef KELVIN_RESERVES - log_info(gc)("max_old_reserve: " SIZE_FORMAT ", bound_on_old_reserve: " SIZE_FORMAT + log_info(gc)("proposed_max_old: " SIZE_FORMAT ", bound_on_old_reserve: " SIZE_FORMAT ", old_available: " SIZE_FORMAT ", young_reserve: " SIZE_FORMAT ", mutator_xfer_limit: " SIZE_FORMAT, - max_old_reserve, bound_on_old_reserve, old_available, young_reserve, mutator_xfer_limit); + proposed_max_old, bound_on_old_reserve, old_available, young_reserve, mutator_xfer_limit); #endif if (young_reserve > young_available) { @@ -624,9 +624,22 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x size_t reserve_for_mixed = 0; const size_t old_fragmented_available = old_available - (old_generation()->free_unaffiliated_regions() + old_cset_regions) * region_size_bytes; + + if (old_fragmented_available > proposed_max_old) { + // After we've promoted regions in place, there may be an abundance of old-fragmented available memory, + // even more than the desired percentage for old reserve. We cannot transfer these fragmented regions back + // to young. Instead we make the best of the situation by using this fragmented memory for both promotions + // and evacuations. + proposed_max_old = old_fragmented_available; + } size_t reserve_for_promo = old_fragmented_available; + const size_t max_old_reserve = proposed_max_old; const size_t mixed_candidate_live_memory = old_generation()->unprocessed_collection_candidates_live_memory(); const bool doing_mixed = (mixed_candidate_live_memory > 0); +#ifdef KELVIN_RESERVES + log_info(gc)("adjusted_max_old: " SIZE_FORMAT ", mixed_candidate_live_memory: " SIZE_FORMAT, + max_old_reserve, mixed_candidate_live_memory); +#endif if (doing_mixed) { // We want this much memory to be unfragmented in order to reliably evacuate old. This is conservative because we // may not evacuate the entirety of unprocessed candidates in a single mixed evacuation. @@ -634,14 +647,15 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes, "Unaffiliated available must be less than total available"); - // max_old_reserve is very conservative. Assumes we evacuate the entirety of mixed-evac candidates into - // unfragmented memory. + // We prefer to evacuate all of mixed into unfragmented memory, and will expand old in order to do so, unless + // we already have too much fragmented available memory in old. reserve_for_mixed = max_evac_need; #ifdef KELVIN_RESERVES log_info(gc)("max_evac_need: " SIZE_FORMAT ", old_available: " SIZE_FORMAT ", old_fragmented_available: " SIZE_FORMAT, max_evac_need, old_available, old_fragmented_available); #endif if (reserve_for_mixed + reserve_for_promo > max_old_reserve) { + // In this case, we'll allow old-evac to target some of the fragmented old memory. size_t excess_reserves = (reserve_for_mixed + reserve_for_promo) - max_old_reserve; if (reserve_for_promo > excess_reserves) { reserve_for_promo -= excess_reserves; @@ -682,12 +696,6 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x // This is the total old we want to reserve (initialized to the ideal reserve) size_t old_reserve = reserve_for_mixed + reserve_for_promo; - if (old_reserve > max_old_reserve) { - // This may happen if fragmented old is larger than max_old_reserve - size_t old_overrun = old_reserve - max_old_reserve; - assert (old_overrun <= young_reserve, "sanity"); - young_reserve -= old_overrun; - } // We now check if the old generation is running a surplus or a deficit. size_t old_region_deficit = 0; From c407dbd47e22f0376a7f6c3f56c8a80019d4b9c0 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 21 Jun 2024 15:08:50 +0000 Subject: [PATCH 34/64] Adjust collector reserves downward when resources are insufficient Previously, we reserved less than the request without updating the request size. This resulted in assertion failures at the start of next evacuation, because reserved did not equal requested. --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 50 +++++++++++++++---- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 3 ++ 2 files changed, 42 insertions(+), 11 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 463b2a5fee2..1fc79458090 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1179,6 +1179,34 @@ void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regio log_status(); } +// Reduce old reserve (when there are insufficient resources to satisfy the original request). +void ShenandoahFreeSet::reduce_old_reserve(size_t adjusted_old_reserve, size_t requested_old_reserve) { + ShenandoahOldGeneration* const old_generation = _heap->old_generation(); + size_t requested_promoted_reserve = old_generation->get_promoted_reserve(); + size_t requested_old_evac_reserve = old_generation->get_evacuation_reserve(); + assert(adjusted_old_reserve < requested_old_reserve, "Only allow reduction"); + assert(requested_promoted_reserve + requested_old_evac_reserve >= adjusted_old_reserve, "Sanity"); + size_t delta = requested_old_reserve - adjusted_old_reserve; + + if (requested_promoted_reserve >= delta) { + requested_promoted_reserve -= delta; + old_generation->set_promoted_reserve(requested_promoted_reserve); + } else { + delta -= requested_promoted_reserve; + requested_promoted_reserve = 0; + requested_old_evac_reserve -= delta; + old_generation->set_promoted_reserve(requested_promoted_reserve); + old_generation->set_evacuation_reserve(requested_old_evac_reserve); + } +} + +// Reduce young reserve (when there are insufficient resources to satisfy the original request). +void ShenandoahFreeSet::reduce_young_reserve(size_t adjusted_young_reserve, size_t requested_young_reserve) { + ShenandoahYoungGeneration* const young_generation = _heap->young_generation(); + assert(adjusted_young_reserve < requested_young_reserve, "Only allow reduction"); + young_generation->set_evacuation_reserve(adjusted_young_reserve); +} + void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions, size_t& young_reserve_result, size_t& old_reserve_result) const { const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); @@ -1286,17 +1314,17 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old } } - if (LogTarget(Info, gc, free)::is_enabled()) { - size_t old_reserve = _free_sets.capacity_of(OldCollector); - if (old_reserve < to_reserve_old) { - log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT, - PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve)); - } - size_t young_reserve = _free_sets.capacity_of(Collector); - if (young_reserve < to_reserve) { - log_info(gc, free)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT, - PROPERFMTARGS(to_reserve), PROPERFMTARGS(young_reserve)); - } + size_t old_reserve = _free_sets.capacity_of(OldCollector); + if (old_reserve < to_reserve_old) { + reduce_old_reserve(old_reserve, to_reserve_old); + log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT, + PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve)); + } + size_t young_reserve = _free_sets.capacity_of(Collector); + if (young_reserve < to_reserve) { + reduce_young_reserve(young_reserve, to_reserve); + log_info(gc, free)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT, + PROPERFMTARGS(to_reserve), PROPERFMTARGS(young_reserve)); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index ff4e7d13fd1..b5b36e60898 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -183,6 +183,9 @@ class ShenandoahFreeSet : public CHeapObj { bool can_allocate_from(size_t idx) const; bool has_alloc_capacity(ShenandoahHeapRegion *r) const; + void reduce_young_reserve(size_t adjusted_young_reserve, size_t requested_young_reserve); + void reduce_old_reserve(size_t adjusted_old_reserve, size_t requested_old_reserve); + public: ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions); From 39e02f17d5a629a928e78538d40f98e351beb0cd Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 24 Jun 2024 21:36:23 +0000 Subject: [PATCH 35/64] Fix white space --- .../heuristics/shenandoahGenerationalHeuristics.cpp | 2 +- .../gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp | 2 +- .../share/gc/shenandoah/shenandoahGenerationalHeap.cpp | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp index f4432a9b7c4..11024bf09c8 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp @@ -170,7 +170,7 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio bool doing_promote_in_place = (humongous_regions_promoted + regular_regions_promoted_in_place > 0); if (doing_promote_in_place || (preselected_candidates > 0) || (immediate_percent <= ShenandoahImmediateThreshold)) { // Only young collections need to prime the collection set. - + bool need_to_finalize_piggyback = false; size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, fragmented_available,excess_fragmented_available; diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp index e1792094839..1f161ed84b9 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp @@ -201,7 +201,7 @@ bool ShenandoahOldHeuristics::add_old_regions_to_cset(ShenandoahCollectionSet* c #endif break; } - } + } collection_set->add_region(r); included_old_regions++; evacuated_old_bytes += live_data_for_evacuation; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 677bdc7b300..0ead262c2a6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -664,7 +664,7 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x reserve_for_promo = 0; reserve_for_mixed -= excess_reserves; } - } + } } // Decide how much additional space we should reserve for promotions from young. We give priority to mixed evacations @@ -786,7 +786,7 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x assert(old_region_deficit == 0 || old_region_surplus == 0, "Only surplus or deficit, never both"); assert(young_reserve + reserve_for_mixed + reserve_for_promo <= old_available + young_available, - "Cannot reserve more memory than is available: " SIZE_FORMAT " + " SIZE_FORMAT " + " SIZE_FORMAT " <= " + "Cannot reserve more memory than is available: " SIZE_FORMAT " + " SIZE_FORMAT " + " SIZE_FORMAT " <= " SIZE_FORMAT " + " SIZE_FORMAT, young_reserve, reserve_for_mixed, reserve_for_promo, old_available, young_available); // deficit/surplus adjustments to generation sizes will precede rebuild From e1aa848b477ac7c0b872c691b432e30e5e8249c3 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 24 Jun 2024 16:17:54 -0600 Subject: [PATCH 36/64] Fix argument list after manual merge conflict resolution --- src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index fd9a5b7e77d..128548adc67 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -2434,7 +2434,7 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) { // available for transfer to old. Note that transfer of humongous regions does not impact available. ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap(); size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions); - gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions); + gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions, young_cset_regions); } // Rebuild free set based on adjusted generation sizes. _free_set->rebuild(young_cset_regions, old_cset_regions); From 54df079971d58d909ae67ccfb6cacac564aedf0d Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Thu, 27 Jun 2024 13:51:54 +0000 Subject: [PATCH 37/64] Do not access young_gen or old_gen in non-generational mode After merge from openjdk/shenandoah, some code on this branch was no longer valid, resulting in assertion errors or SIGSEGV in release builds. --- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 5 ++++- src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp | 11 +++++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 33be457842c..99f35c7f561 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1315,13 +1315,16 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old size_t old_reserve = _free_sets.capacity_of(OldCollector); if (old_reserve < to_reserve_old) { + assert(_heap->mode()->is_generational(), "to_old_reserve > 0 implies generational mode"); reduce_old_reserve(old_reserve, to_reserve_old); log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT, PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve)); } size_t young_reserve = _free_sets.capacity_of(Collector); if (young_reserve < to_reserve) { - reduce_young_reserve(young_reserve, to_reserve); + if (_heap->mode()->is_generational()) { + reduce_young_reserve(young_reserve, to_reserve); + } log_info(gc, free)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT, PROPERFMTARGS(to_reserve), PROPERFMTARGS(young_reserve)); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 128548adc67..2bcca387cde 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -414,12 +414,15 @@ jint ShenandoahHeap::initialize() { // We are initializing free set. We ignore cset region tallies. size_t first_old, last_old, num_old; - size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; - young_generation()->set_evacuation_reserve(young_reserve); - old_generation()->set_evacuation_reserve((size_t) 0); - old_generation()->set_promoted_reserve((size_t) 0); _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); _free_set->rebuild(young_cset_regions, old_cset_regions); + + if (mode()->is_generational()) { + size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; + young_generation()->set_evacuation_reserve(young_reserve); + old_generation()->set_evacuation_reserve((size_t) 0); + old_generation()->set_promoted_reserve((size_t) 0); + } } if (AlwaysPreTouch) { From 2b3afe7d046cc76216055c2c77fa3b843385af3c Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Sun, 30 Jun 2024 20:57:34 -0600 Subject: [PATCH 38/64] Fix TestGCOldWithShenandoah#generational regression --- src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp index 367de442fc4..02af01c158b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp @@ -167,7 +167,7 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) { assert((young_cset_regions == 0) && (old_cset_regions == 0), "No ongoing evacuation when concurrent mark ends"); heap->compute_old_generation_balance(allocation_runway, 0, 0); result = heap->balance_generations(); - heap->free_set()->finish_rebuild(0, 0, 0); + heap->free_set()->finish_rebuild(0, 0, num_old); } LogTarget(Info, gc, ergo) lt; From 52a3b36982da83c74f436ef5038ab377d5f75d37 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 3 Jul 2024 11:44:23 -0600 Subject: [PATCH 39/64] Remove reset_evacuation_reserves --- .../gc/shenandoah/shenandoahGenerationalFullGC.cpp | 3 --- .../share/gc/shenandoah/shenandoahGenerationalHeap.cpp | 10 ---------- .../share/gc/shenandoah/shenandoahGenerationalHeap.hpp | 3 --- 3 files changed, 16 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp index 2870bdab321..1b6bc1bbe17 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp @@ -55,9 +55,6 @@ void ShenandoahGenerationalFullGC::prepare() { // Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL. heap->set_gc_generation(heap->global_generation()); - // No need for old_gen->increase_used() as this was done when plabs were allocated. - heap->reset_generation_reserves(); - // Full GC supersedes any marking or coalescing in old generation. heap->old_generation()->cancel_gc(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 078c6917563..02d05d6cf48 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -817,16 +817,6 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x old_generation()->set_promoted_reserve(reserve_for_promo); } -void ShenandoahGenerationalHeap::reset_generation_reserves() { -#undef KELVIN_RESERVES -#ifdef KELVIN_RESERVES - log_info(gc)("reset_generation_reserves() clears everything"); -#endif - young_generation()->set_evacuation_reserve(0); - old_generation()->set_evacuation_reserve(0); - old_generation()->set_promoted_reserve(0); -} - void ShenandoahGenerationalHeap::TransferResult::print_on(const char* when, outputStream* ss) const { auto heap = ShenandoahGenerationalHeap::heap(); ShenandoahYoungGeneration* const young_gen = heap->young_generation(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp index 946eae62e9e..b5aefaf1081 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp @@ -118,9 +118,6 @@ class ShenandoahGenerationalHeap : public ShenandoahHeap { const ShenandoahGenerationSizer* generation_sizer() const { return &_generation_sizer; } - // Zeros out the evacuation and promotion reserves - void reset_generation_reserves(); - // Computes the optimal size for the old generation, represented as a surplus or deficit of old regions void compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions, size_t young_cset_regions); From 3f514af759848ff9df4f0812d1f5660eebd4e7af Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 3 Jul 2024 11:45:44 -0600 Subject: [PATCH 40/64] reserve until available in partition is sufficient This fixes a bug. Previously, we had reserved only until max_capacity exceeded the requested reserve. If there is memory in use within the partition, max_capacity will be greater than available. --- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 190ce8c1b03..6cd4c5ed8c7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1670,8 +1670,8 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old assert (ac > 0, "Membership in free set implies has capacity"); assert (!r->is_old() || r->is_trash(), "Except for trash, mutator_is_free regions should not be affiliated OLD"); - bool move_to_old_collector = _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) < to_reserve_old; - bool move_to_collector = _partitions.capacity_of(ShenandoahFreeSetPartitionId::Collector) < to_reserve; + bool move_to_old_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) < to_reserve_old; + bool move_to_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector) < to_reserve; if (!move_to_collector && !move_to_old_collector) { // We've satisfied both to_reserve and to_reserved_old From 1378ef6b80bfcc83d023839063bc404c47ff6c4a Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 3 Jul 2024 12:33:04 -0600 Subject: [PATCH 41/64] Include old_cset regions in old_available when computing reserves --- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 6cd4c5ed8c7..a08b822eadf 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1591,7 +1591,7 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions shenandoah_assert_generational(); const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); ShenandoahOldGeneration* const old_generation = _heap->old_generation(); - size_t old_available = old_generation->available(); + size_t old_available = old_generation->available() + old_cset_regions * region_size_bytes; size_t old_unaffiliated_regions = old_generation->free_unaffiliated_regions(); ShenandoahYoungGeneration* const young_generation = _heap->young_generation(); size_t young_capacity = young_generation->max_capacity(); From 04d9c08cdb1bdc49cb04f47f090389c79157da3f Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Thu, 4 Jul 2024 11:12:25 -0600 Subject: [PATCH 42/64] Reset live_bytes_in_unprocessed_candidates when abandon_collection_candidates() --- .../share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp index 1f161ed84b9..ea92335e7a7 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp @@ -653,6 +653,7 @@ unsigned int ShenandoahOldHeuristics::get_coalesce_and_fill_candidates(Shenandoa void ShenandoahOldHeuristics::abandon_collection_candidates() { _last_old_collection_candidate = 0; _next_old_collection_candidate = 0; + _live_bytes_in_unprocessed_candidates = 0; _last_old_region = 0; } From 7ab343b9eb8f254e83eadc97124a00535c407fa6 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 5 Jul 2024 10:17:56 -0600 Subject: [PATCH 43/64] Rebuild free set consistently following abbreviated and old mark cycles --- .../gc/shenandoah/shenandoahConcurrentGC.cpp | 38 +++++++++++++++++-- .../share/gc/shenandoah/shenandoahOldGC.cpp | 22 ++++++++--- .../gc/shenandoah/shenandoahOldGeneration.cpp | 4 ++ 3 files changed, 55 insertions(+), 9 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index d3a2a60e1a6..887b7e4b2b2 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -336,11 +336,43 @@ void ShenandoahConcurrentGC::entry_final_roots() { static const char* msg = "Pause Final Roots"; ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots); EventMark em("%s", msg); + ShenandoahHeap* const heap = ShenandoahHeap::heap(); op_final_roots(); - if (_abbreviated) { - ShenandoahHeap::heap()->rebuild_free_set(true /*concurrent*/); + + if (heap->mode()->is_generational()) { + assert (_abbreviated || _generation->is_old(), "Only rebuild free set for abbreviated and old-marking cycles"); + // After concurrent old marking finishes and after an abbreviated cycle, we reclaim immediate garbage. + // Further, we may also want to expand OLD in order to make room for anticipated promotions and/or for mixed + // evacuations. Mixed evacuations are especially likely to following the end of OLD marking. + ShenandoahGenerationalHeap::TransferResult result; + { + ShenandoahHeapLocker locker(heap->lock()); + ShenandoahGenerationalHeap* const gen_heap = ShenandoahGenerationalHeap::heap(); + size_t young_cset_regions, old_cset_regions; + size_t first_old, last_old, num_old; + size_t allocation_runway = heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0); + heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); + assert((young_cset_regions == 0) && (old_cset_regions == 0), + "No ongoing evacuation after abbreviated or concurrent OLD marking cycle"); + gen_heap->compute_old_generation_balance(allocation_runway, 0, 0); + result = gen_heap->balance_generations(); + heap->free_set()->finish_rebuild(0, 0, num_old); + } + LogTarget(Info, gc, ergo) lt; + if (lt.is_enabled()) { + LogStream ls(lt); + result.print_on(_generation->is_old()? "Old Mark": "Abbreviated", &ls); + } + } else { + assert (_abbreviated, "Only rebuild free set for abbreviated"); + // Rebuild free set after reclaiming immediate garbage + ShenandoahHeapLocker locker(heap->lock()); + size_t young_cset_regions, old_cset_regions; + size_t first_old, last_old, num_old; + heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); + assert((young_cset_regions == 0) && (old_cset_regions == 0), "No ongoing evacuation after abbreviated cycle"); + heap->free_set()->finish_rebuild(0, 0, num_old); } - // else, this is the end of old marking } void ShenandoahConcurrentGC::entry_reset() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp index 02af01c158b..60845c682da 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp @@ -147,18 +147,27 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) { assert(!heap->is_concurrent_strong_root_in_progress(), "No evacuations during old gc."); - // We must execute this vm operation if we completed final mark. We cannot - // return from here with weak roots in progress. This is not a valid gc state - // for any young collections (or allocation failures) that interrupt the old - // collection. This will reclaim immediate garbage, adjust generation sizes, - // and rebuild free set. + // We must execute this vm operation if we completed final mark. We cannot return from here with weak roots in progress. + // This is not a valid gc state for any young collections (or allocation failures) that interrupt the old collection. + // This will reclaim immediate garbage. vmop_entry_final_roots() will also rebuild the free set. vmop_entry_final_roots(); +#ifdef KELVIN_DEPRECATE + // Deprecating because vmop_entry_final_roots() does the free-set rebuild. + // After concurrent old marking finishes, we may be able to reclaim immediate garbage from regions that are fully garbage. // Furthermore, we may want to expand OLD in order to make room for the first mixed evacuation that immediately follows // completion of OLD marking. This is why we rebuild free set here. ShenandoahGenerationalHeap::TransferResult result; { + // Though we did not choose a collection set above, we still may have freed up immediate garbage regions so + // proceed with rebuilding the free set. A second reason to rebuild free set now is to prepare for mixed evacuations + // which are likely to follow completion of old-gen marking. Preparation for mixed evacuations likely involves + // expansion of the old generation. + + // Old marking does not degenerate. It is always concurrent. In case of out-of-cycle memory allocation failures + // while old marking is ongoing, we will degenerate to a young GC, which may, if necessary upgrade to Full GC. + // If the young degenerated GC upgrades to full GC, concurrent old marking will be cancelled. ShenandoahHeapLocker locker(heap->lock()); size_t young_cset_regions, old_cset_regions; size_t first_old, last_old, num_old; @@ -169,11 +178,12 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) { result = heap->balance_generations(); heap->free_set()->finish_rebuild(0, 0, num_old); } - + LogTarget(Info, gc, ergo) lt; if (lt.is_enabled()) { LogStream ls(lt); result.print_on("Old Mark", &ls); } +#endif return true; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp index 014d55fb63e..09726c84cab 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp @@ -474,6 +474,9 @@ void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent _old_heuristics->prepare_for_old_collections(); } +#ifdef KELVIN_DEPRECATE + // Kelvin is removing this code because vmop_entry_final_roots() does the rebuild. + { // Though we did not choose a collection set above, we still may have // freed up immediate garbage regions so proceed with rebuilding the free set. @@ -488,6 +491,7 @@ void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent // is in case there was any immediate old garbage identified. heap->free_set()->finish_rebuild(cset_young_regions, cset_old_regions, num_old); } +#endif } const char* ShenandoahOldGeneration::state_name(State state) { From b61679a0740de52fce5d2bdd26314f0fe72acd4b Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 5 Jul 2024 11:28:29 -0600 Subject: [PATCH 44/64] Fix up some assertions Assertions should not have side effects. Original assertion code was calling a function that would cause adjustments to the partition bounds for empty regions. --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index a08b822eadf..4fcbc7d49cd 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -589,12 +589,12 @@ void ShenandoahRegionPartitions::assert_bounds() { beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)]; end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)]; - assert (beg_off >= leftmost_empty(ShenandoahFreeSetPartitionId::Mutator), - "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, - beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Mutator)); - assert (end_off <= rightmost_empty(ShenandoahFreeSetPartitionId::Mutator), - "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, - end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Mutator)); + assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)], + "free empty region (" SSIZE_FORMAT ") before the leftmost bound " SSIZE_FORMAT, + beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)]); + assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)], + "free empty region (" SSIZE_FORMAT ") past the rightmost bound " SSIZE_FORMAT, + end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)]); // Performance invariants. Failing these would not break the free partition, but performance would suffer. assert (leftmost(ShenandoahFreeSetPartitionId::Collector) <= _max, "leftmost in bounds: " SSIZE_FORMAT " < " SSIZE_FORMAT, @@ -623,11 +623,11 @@ void ShenandoahRegionPartitions::assert_bounds() { beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)]; end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)]; assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)], - "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, - beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector)); + "free empty region (" SSIZE_FORMAT ") before the leftmost bound " SSIZE_FORMAT, + beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]); assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)], - "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, - end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector)); + "free empty region (" SSIZE_FORMAT ") past the rightmost bound " SSIZE_FORMAT, + end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]); // Performance invariants. Failing these would not break the free partition, but performance would suffer. assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= _max, "leftmost in bounds: " SSIZE_FORMAT " < " SSIZE_FORMAT, @@ -658,11 +658,11 @@ void ShenandoahRegionPartitions::assert_bounds() { beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)]; end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)]; assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)], - "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, - beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector)); + "free empty region (" SSIZE_FORMAT ") before the leftmost bound " SSIZE_FORMAT, + beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)]); assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)], - "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, - end_off, rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector)); + "free empty region (" SSIZE_FORMAT ") past the rightmost bound " SSIZE_FORMAT, + end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)]); } #endif From 6cef4b407e71589d509c9380dfceecf6c8fe3f16 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 5 Jul 2024 11:30:15 -0600 Subject: [PATCH 45/64] Do not top-off beyond available unaffiliated young regions --- .../share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp index ea92335e7a7..7ce54838032 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp @@ -344,6 +344,9 @@ bool ShenandoahOldHeuristics::top_off_collection_set(ShenandoahCollectionSet* co return false; } else { size_t regions_for_old_expansion = (available_to_loan_from_young_reserve / region_size_bytes); + if (regions_for_old_expansion > young_unaffiliated_regions) { + regions_for_old_expansion = young_unaffiliated_regions; + } log_info(gc)("Augmenting old-gen evacuation budget from unexpended young-generation reserve by " SIZE_FORMAT " regions", regions_for_old_expansion); _heap->generation_sizer()->force_transfer_to_old(regions_for_old_expansion); From 884c48ebdaf1b23e0a5481e74de5a004a03ffce9 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 8 Jul 2024 09:38:49 -0600 Subject: [PATCH 46/64] Fix multiple bugs detected after merge from master 1. Some debug instrumetnation which will eventually be removed 2. Disable assert_bounds for trashed OLD regions (until after they are recycled) 3. When a trashed old region is recycled, adjust the old empty interval immediately if assertions are enabled and the region is still in the OldCollector partition 4. When rebuild freeset transfers regions between OLD and YOUNG, log the transfer and clear the region balance so we do not redundantly adjust repeat the transfer 5. In compute_young_and_old_reserves(), adjust young_available by xfer_bytes if regions are being transferred between young and old generations --- .../heuristics/shenandoahOldHeuristics.cpp | 39 ++- .../gc/shenandoah/shenandoahConcurrentGC.cpp | 7 + .../gc/shenandoah/shenandoahDegeneratedGC.cpp | 7 + .../share/gc/shenandoah/shenandoahFreeSet.cpp | 227 ++++++++++++++++-- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 25 +- .../shenandoah/shenandoahGenerationSizer.cpp | 1 - .../shenandoah/shenandoahGenerationalHeap.cpp | 2 +- .../share/gc/shenandoah/shenandoahHeap.cpp | 4 + .../share/gc/shenandoah/shenandoahOldGC.cpp | 2 +- .../gc/shenandoah/shenandoahVerifier.cpp | 30 ++- 10 files changed, 318 insertions(+), 26 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp index 7ce54838032..f413e1ef6d0 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp @@ -27,6 +27,7 @@ #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp" #include "gc/shenandoah/shenandoahCollectionSet.hpp" #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" #include "gc/shenandoah/shenandoahOldGeneration.hpp" @@ -433,7 +434,9 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() { size_t immediate_garbage = 0; size_t immediate_regions = 0; size_t live_data = 0; - +#ifdef ASSERT + bool reclaimed_immediate = false; +#endif RegionData* candidates = _region_data; for (size_t i = 0; i < num_regions; i++) { ShenandoahHeapRegion* region = heap->get_region(i); @@ -446,12 +449,24 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() { live_data += live_bytes; if (region->is_regular() || region->is_regular_pinned()) { - // Only place regular or pinned regions with live data into the candidate set. - // Pinned regions cannot be evacuated, but we are not actually choosing candidates - // for the collection set here. That happens later during the next young GC cycle, - // by which time, the pinned region may no longer be pinned. + // Only place regular or pinned regions with live data into the candidate set. + // Pinned regions cannot be evacuated, but we are not actually choosing candidates + // for the collection set here. That happens later during the next young GC cycle, + // by which time, the pinned region may no longer be pinned. if (!region->has_live()) { +#undef KELVIN_DEBUG +#ifdef KELVIN_DEBUG + log_info(gc)("prepare_for_old_collections() found immediate trash in region " SIZE_FORMAT, i); +#endif assert(!region->is_pinned(), "Pinned region should have live (pinned) objects."); +#ifdef ASSERT + if (!reclaimed_immediate) { + reclaimed_immediate = true; + // Inform the free-set that old trash regions may temporarily violate OldCollector bounds + shenandoah_assert_heaplocked(); + heap->free_set()->advise_of_old_trash(); + } +#endif region->make_trash_immediate(); immediate_regions++; immediate_garbage += garbage; @@ -466,11 +481,22 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() { // If they are pinned, we expect them to hold live data, so they will not be // turned into immediate garbage. if (!region->has_live()) { +#ifdef KELVIN_DEBUG + log_info(gc)("prepare_for_old_collections() found immediate humongous start trash in region " SIZE_FORMAT, i); +#endif assert(!region->is_pinned(), "Pinned region should have live (pinned) objects."); // The humongous object is dead, we can just return this region and the continuations // immediately to the freeset - no evacuations are necessary here. The continuations // will be made into trash by this method, so they'll be skipped by the 'is_regular' // check above, but we still need to count the start region. +#ifdef ASSERT + if (!reclaimed_immediate) { + reclaimed_immediate = true; + // Inform the free-set that old trash regions may temporarily violate OldCollector bounds + shenandoah_assert_heaplocked(); + heap->free_set()->advise_of_old_trash(); + } +#endif immediate_regions++; immediate_garbage += garbage; size_t region_count = heap->trash_humongous_region_at(region); @@ -478,6 +504,9 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() { } } else if (region->is_trash()) { // Count humongous objects made into trash here. +#ifdef KELVIN_DEBUG + log_info(gc)("prepare_for_old_collections() found immediate humongous continuation trash in region " SIZE_FORMAT, i); +#endif immediate_regions++; immediate_garbage += garbage; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index 887b7e4b2b2..361a8978175 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -1126,6 +1126,10 @@ void ShenandoahConcurrentGC::op_strong_roots() { } void ShenandoahConcurrentGC::op_cleanup_early() { +#undef KELVIN_DEBUG +#ifdef KELVIN_DEBUG + log_info(gc)("ShenConcGC::op_cleanup_early() is recycling trash"); +#endif ShenandoahHeap::heap()->free_set()->recycle_trash(); } @@ -1254,6 +1258,9 @@ void ShenandoahConcurrentGC::op_final_roots() { } void ShenandoahConcurrentGC::op_cleanup_complete() { +#ifdef KELVIN_DEBUG + log_info(gc)("ShenConcGC::op_cleanup_complete() is invoking recycle_trash()"); +#endif ShenandoahHeap::heap()->free_set()->recycle_trash(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp index 230bd9381fd..c0cb2210364 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp @@ -382,6 +382,10 @@ bool ShenandoahDegenGC::has_in_place_promotions(const ShenandoahHeap* heap) cons } void ShenandoahDegenGC::op_cleanup_early() { +#undef KELVIN_DEBUG +#ifdef KELVIN_DEBUG + log_info(gc)("ShenDegenGC::op_cleanup_early()"); +#endif ShenandoahHeap::heap()->recycle_trash(); } @@ -427,6 +431,9 @@ void ShenandoahDegenGC::op_update_roots() { void ShenandoahDegenGC::op_cleanup_complete() { ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_cleanup_complete); +#ifdef KELVIN_DEBUG + log_info(gc)("ShenDegenGC::op_cleanup_complete()"); +#endif ShenandoahHeap::heap()->recycle_trash(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 4fcbc7d49cd..c43844b4092 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -273,6 +273,36 @@ inline void ShenandoahRegionPartitions::expand_interval_if_boundary_modified(She } } +inline void ShenandoahRegionPartitions::adjust_interval_for_recycled_old_region(ShenandoahHeapRegion* r) { +#undef KELVIN_DEBUG +#ifdef KELVIN_DEBUG + log_info(gc)("adjusting interval for recycled region " SIZE_FORMAT ", free: " SIZE_FORMAT ", is_trash: %s, is_old: %s", + r->index(), r->free(), r->is_trash()? "true": "false", r->is_old()? "true": "false"); +#endif + assert(!r->is_trash() && (r->free() == _region_size_bytes), "Bad argument"); + idx_t idx = (idx_t) r->index(); + // Note that a recycled old trashed region may be in any one of the free set partitions according to the following scenarios: + // 1. The old region had already been retired, so it was NotFree, and we have not rebuilt free set, so region is still NotFree + // 2. We recycled the region but we have not yet rebuilt the free set, so it is still in the OldCollector region. + // 3. We have found regions with alloc capacity but have not yet reserved_regions, so this is in Mutator set, and + // the act of placing the region into the Mutator set properly adjusts interval for Mutator set. + // 4. During reserve_regions(), we moved this region into the Collector set, and the act of placing this region into + // Collector set properly adjusts the interval for the Collector set. + // 5. During reserve_regions, we moved this region into the OldCollector set, and the act of placing this region into + // OldCollector set properly adjusts the interval for the OldCollector set. + // Only case 2 needs to be fixed up here. + ShenandoahFreeSetPartitionId old_partition = ShenandoahFreeSetPartitionId::OldCollector; + if (_membership[int(old_partition)].is_set(idx)) { + assert(_leftmosts[int(old_partition)] <= idx && _rightmosts[int(old_partition)] >= idx, "sanity"); + if (_leftmosts_empty[int(old_partition)] > idx) { + _leftmosts_empty[int(old_partition)] = idx; + } + if (_rightmosts_empty[int(old_partition)] < idx) { + _rightmosts_empty[int(old_partition)] = idx; + } + } +} + void ShenandoahRegionPartitions::retire_range_from_partition( ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) { @@ -516,7 +546,7 @@ idx_t ShenandoahRegionPartitions::rightmost_empty(ShenandoahFreeSetPartitionId w #ifdef ASSERT -void ShenandoahRegionPartitions::assert_bounds() { +void ShenandoahRegionPartitions::assert_bounds(bool old_trash_not_in_bounds) { idx_t leftmosts[UIntNumPartitions]; idx_t rightmosts[UIntNumPartitions]; @@ -542,6 +572,34 @@ void ShenandoahRegionPartitions::assert_bounds() { { size_t capacity = _free_set->alloc_capacity(i); bool is_empty = (capacity == _region_size_bytes); + ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(i); + + // When old_trash_not_in_bounds, an old trashed region might reside in: + // 1. NotFree if the region had already been retired + // 2. OldCollector because the region was originally in OldCollector when it was identified as immediate garbage, or + // 3. Mutator because we have run find_regions_with_alloc_capacity(), or + // 4. Collector because reserve_regions moved from Mutator to Collector but we have not yet recycled the trash + // 5. OldCollector because reserve_regions moved from Mutator to OldCollector but we have not yet recycled the trash + + // In case 1, there is no issue with empty-free intervals. + // In cases 3 - 5, there is no issue with empty-free intervals because the act of moving the region into the partition + // causes the empty-free interval to be updated. + // Only in case 2 do we need to disable the assert checking, but it is difficult to distinguish case 2 from case 5, + // so we do not assert bounds for case 2 or case 5. + +#undef KELVIN_DEBUG +#ifdef KELVIN_DEBUG + if (old_trash_not_in_bounds && r->is_old() && r->is_trash()) { + log_info(gc)("assert_bounds() sees old trashed region " SIZE_FORMAT " residing in partition %d", i, int(partition)); + } +#endif + + if (old_trash_not_in_bounds && (partition == ShenandoahFreeSetPartitionId::OldCollector) && r->is_old() && r->is_trash()) { + // If Old trash has been identified but we have not yet rebuilt the freeset to acount for the trashed regions, + // or if old trash has not yet been recycled, do not expect these trash regions to be within the OldCollector + // partition's bounds. + continue; + } assert(capacity > 0, "free regions must have allocation capacity"); if (i < leftmosts[int(partition)]) { leftmosts[int(partition)] = i; @@ -669,6 +727,9 @@ void ShenandoahRegionPartitions::assert_bounds() { ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) : _heap(heap), _partitions(max_regions, this), +#ifdef ASSERT + _old_trash_not_in_bounds(false), +#endif _alloc_bias_weight(0) { clear_internal(); @@ -1112,7 +1173,9 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah } } _partitions.retire_from_partition(orig_partition, idx, r->used()); - _partitions.assert_bounds(); +#ifdef ASSERT + _partitions.assert_bounds(_old_trash_not_in_bounds); +#endif } return result; } @@ -1222,7 +1285,9 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num; _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size); - _partitions.assert_bounds(); +#ifdef ASSERT + _partitions.assert_bounds(_old_trash_not_in_bounds); +#endif req.set_actual_size(words_size); if (remainder != 0) { req.set_waste(ShenandoahHeapRegion::region_size_words() - remainder); @@ -1239,14 +1304,34 @@ void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) { void ShenandoahFreeSet::recycle_trash() { // lock is not reentrable, check we don't have it shenandoah_assert_not_heaplocked(); +#ifdef KELVIN_DEBUG + int recycled_regions = 0; +#endif for (size_t i = 0; i < _heap->num_regions(); i++) { ShenandoahHeapRegion* r = _heap->get_region(i); if (r->is_trash()) { +#ifdef KELVIN_DEBUG + recycled_regions++; +#endif ShenandoahHeapLocker locker(_heap->lock()); try_recycle_trashed(r); +#ifdef ASSERT + // Note: if assertions are not enforced, there's no rush to adjust this interval. We'll adjust the + // interval when we eventually rebuild the free set. + if (_old_trash_not_in_bounds) { + _partitions.adjust_interval_for_recycled_old_region(r); + } +#endif } SpinPause(); // allow allocators to take the lock } +#ifdef ASSERT +#ifdef KELVIN_DEBUG + log_info(gc)("WILL ROBINSON, WARNING IS NO LONGER REQUIRED AFTER TRYING TO RECYCLE %d!", recycled_regions); +#endif + ShenandoahHeapLocker locker(_heap->lock()); + _old_trash_not_in_bounds = false; +#endif } void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { @@ -1259,7 +1344,9 @@ void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { size_t region_capacity = alloc_capacity(r); _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, ShenandoahFreeSetPartitionId::OldCollector, region_capacity); - _partitions.assert_bounds(); +#ifdef ASSERT + _partitions.assert_bounds(_old_trash_not_in_bounds); +#endif _heap->old_generation()->augment_evacuation_reserve(region_capacity); bool transferred = gen_heap->generation_sizer()->transfer_to_old(1); if (!transferred) { @@ -1279,8 +1366,9 @@ void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) { size_t ac = alloc_capacity(r); _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, ShenandoahFreeSetPartitionId::Collector, ac); - _partitions.assert_bounds(); - +#ifdef ASSERT + _partitions.assert_bounds(_old_trash_not_in_bounds); +#endif // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next, // to recycle trash before attempting to allocate anything in the region. } @@ -1330,9 +1418,19 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi for (size_t idx = 0; idx < _heap->num_regions(); idx++) { ShenandoahHeapRegion* region = _heap->get_region(idx); if (region->is_trash()) { - // Trashed regions represent regions that had been in the collection partition but have not yet been "cleaned up". - // The cset regions are not "trashed" until we have finished update refs. + // Trashed regions represent regions that had been in the collection set (or may have been identified as immediate garbage) + // but have not yet been "cleaned up". The cset regions are not "trashed" until we have finished update refs. if (region->is_old()) { + ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap(); + // We're going to place this region into the Mutator set. We increment old_cset_regions because this count represents + // regions that the old generation is entitled to without any transfer from young. We do not place this region into + // the OldCollector partition at this time. Instead, we let reserve_regions() decide whether to place this region + // into the OldCollector partition. Deferring the decision allows reserve_regions() to more effectively pack the + // OldCollector regions into high-address memory. We do not adjust capacities of old and young generations at this + // time. At the end of finish_rebuild(), the capacities are adjusted based on the results of reserve_regions(). +#ifdef KELVIN_DEBUG + log_info(gc)("find_regions_with_alloc_capacity() is looking at old trash region " SIZE_FORMAT, idx); +#endif old_cset_regions++; } else { assert(region->is_young(), "Trashed region should be old or young"); @@ -1353,7 +1451,12 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi size_t ac = alloc_capacity(region); if (ac > PLAB::min_size() * HeapWordSize) { if (region->is_trash() || !region->is_old()) { - // Both young and old collected regions (trashed) are placed into the Mutator set + // Young and old (possibly immediately) collected regions (trashed) are placed into the Mutator set +#ifdef KELVIN_DEBUG + if (region->is_old()) { + log_info(gc)("find_regions_with_alloc_capacity() moves old trash region " SIZE_FORMAT " to mutator partition", idx); + } +#endif _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator); if (idx < mutator_leftmost) { mutator_leftmost = idx; @@ -1532,8 +1635,20 @@ void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, si ShenandoahYoungGeneration* young_gen = heap->young_generation(); size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + size_t original_old_regions = old_gen->max_capacity() / region_size_bytes; old_gen->set_capacity(old_region_count * region_size_bytes); young_gen->set_capacity(young_region_count * region_size_bytes); + + if (original_old_regions > old_region_count) { + log_info(gc)("Transfer " SIZE_FORMAT " regions from OLD to YOUNG during rebuild of freeset", + original_old_regions - old_region_count); + } else if (original_old_regions < old_region_count) { + log_info(gc)("Transfer " SIZE_FORMAT " regions from YOUGN to OLD during rebuild of freeset", + old_region_count - original_old_regions); + } + + // Having transferred regions based on results of rebuild(), reset the rebalance request. + old_gen->set_region_balance(0); } } @@ -1548,14 +1663,19 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_cset_regions, size_t old_cse old_reserve = 0; } - // Move some of the mutator regions in the Collector and OldCollector partitions in order to satisfy + // Move some of the mutator regions into the Collector and OldCollector partitions in order to satisfy // young_reserve and old_reserve. reserve_regions(young_reserve, old_reserve, old_region_count); size_t young_region_count = _heap->num_regions() - old_region_count; establish_generation_sizes(young_region_count, old_region_count); establish_old_collector_alloc_bias(); - _partitions.assert_bounds(); +#ifdef ASSERT + _partitions.assert_bounds(_old_trash_not_in_bounds); +#endif log_status(); + // Even though we have finished rebuild, old trashed regions may not yet have been recycled, so leave + // _old_trash_not_in_bounds as is. Following rebuild, old trashed regions may reside in Mutator, Collector, + // or OldCollector partitions. } // Reduce old reserve (when there are insufficient resources to satisfy the original request). @@ -1601,6 +1721,16 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions old_unaffiliated_regions += old_cset_regions; young_unaffiliated_regions += young_cset_regions; + assert(young_capacity >= (young_generation->used() + young_generation->get_humongous_waste()), + "Young capacity (" SIZE_FORMAT ") must exceed used (" SIZE_FORMAT ") plus humongous waste (" SIZE_FORMAT ")", + young_capacity, young_generation->used(), young_generation->get_humongous_waste()); + + size_t young_available = young_capacity - (young_generation->used() + young_generation->get_humongous_waste()); + young_available += young_cset_regions * region_size_bytes; + + assert(young_available >= young_unaffiliated_regions * region_size_bytes, "sanity"); + assert(old_available >= old_unaffiliated_regions * region_size_bytes, "sanity"); + // Consult old-region balance to make adjustments to current generation capacities and availability. // The generation region transfers take place after we rebuild. const ssize_t old_region_balance = old_generation->get_region_balance(); @@ -1616,13 +1746,11 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions ssize_t xfer_bytes = old_region_balance * checked_cast(region_size_bytes); old_available -= xfer_bytes; old_unaffiliated_regions -= old_region_balance; + young_available += xfer_bytes; young_capacity += xfer_bytes; young_unaffiliated_regions += old_region_balance; } - size_t young_available = young_capacity - (young_generation->used() + young_generation->get_humongous_waste()); - young_available += young_cset_regions * region_size_bytes; - // All allocations taken from the old collector set are performed by GC, generally using PLABs for both // promotions and evacuations. The partition between which old memory is reserved for evacuation and // which is reserved for promotion is enforced using thread-local variables that prescribe intentions for @@ -1659,6 +1787,10 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions // the collector set is at least to_reserve and the memory available for allocations within the old collector set // is at least to_reserve_old. void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old, size_t &old_region_count) { +#undef KELVIN_DEBUG +#ifdef KELVIN_DEBUG + size_t initial_old_region_count = old_region_count; +#endif for (size_t i = _heap->num_regions(); i > 0; i--) { size_t idx = i - 1; ShenandoahHeapRegion* r = _heap->get_region(idx); @@ -1684,8 +1816,43 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old // be collected in the near future. if (r->is_trash() || !r->is_affiliated()) { // OLD regions that have available memory are already in the old_collector free set. +#ifdef KELVIN_DEBUG + if (_old_trash_not_in_bounds) { + log_info(gc)("reserve_regions() moving %s %sregion " SIZE_FORMAT " with alloc capacity " SIZE_FORMAT + " from Mutator to OldCollector", + r->is_affiliated()? r->affiliation_name(): "unaffiliated", r->is_trash()? "trash ": "", idx, ac); + log_info(gc)("Before move: Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]," + " Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]", + _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), + _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator), + _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector), + _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector)); + log_info(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]," + " Empty Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]", + _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), + _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator), + _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector), + _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector)); + } +#endif _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, ShenandoahFreeSetPartitionId::OldCollector, ac); +#ifdef KELVIN_DEBUG + if (_old_trash_not_in_bounds) { + log_info(gc)("After move: Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]," + " Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]", + _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), + _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator), + _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector), + _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector)); + log_info(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]," + " Empty Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]", + _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), + _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator), + _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector), + _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector)); + } +#endif log_debug(gc)(" Shifting region " SIZE_FORMAT " from mutator_free to old_collector_free", idx); log_debug(gc)(" Shifted Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]," " Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]", @@ -1734,6 +1901,10 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old log_info(gc, free)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT, PROPERFMTARGS(to_reserve), PROPERFMTARGS(young_reserve)); } +#ifdef KELVIN_DEBUG + log_info(gc)("At end of reserve_regions(), old_region_count: " SIZE_FORMAT ", at entry, was: " SIZE_FORMAT, + old_region_count, initial_old_region_count); +#endif } void ShenandoahFreeSet::establish_old_collector_alloc_bias() { @@ -1775,8 +1946,13 @@ void ShenandoahFreeSet::log_status() { shenandoah_assert_heaplocked(); #ifdef ASSERT +#undef KELVIN_DEBUG +#ifdef KELVIN_DEBUG + if (true) { +#else // Dump of the FreeSet details is only enabled if assertions are enabled if (LogTarget(Debug, gc, free)::is_enabled()) { +#endif #define BUFFER_SIZE 80 size_t retired_old = 0; size_t retired_old_humongous = 0; @@ -1798,7 +1974,22 @@ void ShenandoahFreeSet::log_status() { for (uint i = 0; i < BUFFER_SIZE; i++) { buffer[i] = '\0'; } - +#ifdef KELVIN_DEBUG + log_info(gc)("FreeSet map legend:" + " M:mutator_free C:collector_free O:old_collector_free" + " H:humongous ~:retired old _:retired young"); + log_info(gc)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocating from %s, " + " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "], " + "old collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocates from %s", + _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), + _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator), + _partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::Mutator)? "left to right": "right to left", + _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector), + _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector), + _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector), + _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector), + _partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::OldCollector)? "left to right": "right to left"); +#endif log_debug(gc)("FreeSet map legend:" " M:mutator_free C:collector_free O:old_collector_free" " H:humongous ~:retired old _:retired young"); @@ -1818,6 +2009,9 @@ void ShenandoahFreeSet::log_status() { ShenandoahHeapRegion *r = _heap->get_region(i); uint idx = i % 64; if ((i != 0) && (idx == 0)) { +#ifdef KELVIN_DEBUG + log_info(gc)(" %6u: %s", i-64, buffer); +#endif log_debug(gc)(" %6u: %s", i-64, buffer); } if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, i)) { @@ -1863,6 +2057,9 @@ void ShenandoahFreeSet::log_status() { } else { remnant = 64; } +#ifdef KELVIN_DEBUG + log_info(gc)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer); +#endif log_debug(gc)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer); } #endif diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index 88261f75701..25d8615575c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -149,6 +149,10 @@ class ShenandoahRegionPartitions { void move_from_partition_to_partition(ssize_t idx, ShenandoahFreeSetPartitionId orig_partition, ShenandoahFreeSetPartitionId new_partition, size_t available); + // In case recycled region r is in the OldCollector partition but not within the interval for empty OldCollector regions, expand + // the empty interval to include this region. If recycled region r is not in the OldCollector partition, do nothing. + inline void adjust_interval_for_recycled_old_region(ShenandoahHeapRegion* r); + const char* partition_membership_name(ssize_t idx) const; // Return the index of the next available region >= start_index, or maximum_regions if not found. @@ -236,6 +240,7 @@ class ShenandoahRegionPartitions { inline size_t count(ShenandoahFreeSetPartitionId which_partition) const { return _region_counts[int(which_partition)]; } +#ifdef ASSERT // Assure leftmost, rightmost, leftmost_empty, and rightmost_empty bounds are valid for all free sets. // Valid bounds honor all of the following (where max is the number of heap regions): // if the set is empty, leftmost equals max and rightmost equals 0 @@ -256,7 +261,10 @@ class ShenandoahRegionPartitions { // idx >= leftmost && // idx <= rightmost // } - void assert_bounds() NOT_DEBUG_RETURN; + // + // If old_trash_not_in_bounds, do not require old-generation trashed regions to be within the OldCollector bounds. + void assert_bounds(bool old_trash_not_in_bounds) NOT_DEBUG_RETURN; +#endif }; // Publicly, ShenandoahFreeSet represents memory that is available to mutator threads. The public capacity(), used(), @@ -296,7 +304,9 @@ class ShenandoahFreeSet : public CHeapObj { inline HeapWord* allocate_from_partition_with_affiliation(ShenandoahFreeSetPartitionId which_partition, ShenandoahAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region); - +#ifdef ASSERT + bool _old_trash_not_in_bounds; +#endif // We re-evaluate the left-to-right allocation bias whenever _alloc_bias_weight is less than zero. Each time // we allocate an object, we decrement the count of this value. Each time we re-evaluate whether to allocate // from right-to-left or left-to-right, we reset the value of this counter to _InitialAllocBiasWeight. @@ -397,6 +407,17 @@ class ShenandoahFreeSet : public CHeapObj { // for evacuation, invoke this to make regions available for mutator allocations. void move_regions_from_collector_to_mutator(size_t cset_regions); +#ifdef ASSERT + // Advise FreeSet that old trash regions have not yet been accounted for in OldCollector partition bounds + void advise_of_old_trash() { +#undef KELVIN_DEBUG +#ifdef KELVIN_DEBUG + log_info(gc)("DANGER WILL ROBINSON! old_trash is not in bounds!"); +#endif + shenandoah_assert_heaplocked(); + _old_trash_not_in_bounds = true; + } +#endif void recycle_trash(); void log_status(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp index ce502070558..db15b05c468 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp @@ -194,7 +194,6 @@ void ShenandoahGenerationSizer::force_transfer_to_old(size_t regions) const { regions, young_gen->name(), old_gen->name(), PROPERFMTARGS(new_size)); } - bool ShenandoahGenerationSizer::transfer_to_young(size_t regions) const { ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); return transfer_regions(heap->old_generation(), heap->young_generation(), regions); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 02d05d6cf48..9273daff943 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -631,7 +631,7 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x bound_on_old_reserve: MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve)); -#define KELVIN_RESERVES +#undef KELVIN_RESERVES #ifdef KELVIN_RESERVES log_info(gc)("proposed_max_old: " SIZE_FORMAT ", bound_on_old_reserve: " SIZE_FORMAT ", old_available: " SIZE_FORMAT ", young_reserve: " SIZE_FORMAT ", mutator_xfer_limit: " SIZE_FORMAT, diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index e6d35a6c1bb..03255e78c9e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1927,6 +1927,10 @@ void ShenandoahHeap::rendezvous_threads() { } void ShenandoahHeap::recycle_trash() { +#undef KELVIN_DEBUG +#ifdef KELVIN_DEBUG + log_info(gc)("ShenHeap::recycle_trash() calls free_set()->recycle_trash()"); +#endif free_set()->recycle_trash(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp index 60845c682da..f095774af3a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp @@ -178,7 +178,7 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) { result = heap->balance_generations(); heap->free_set()->finish_rebuild(0, 0, num_old); } - + LogTarget(Info, gc, ergo) lt; if (lt.is_enabled()) { LogStream ls(lt); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index 3d6d0049268..566590780dc 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -46,6 +46,10 @@ #include "runtime/orderAccess.hpp" #include "runtime/threads.hpp" #include "utilities/align.hpp" +#undef KELVIN_DEBUG +#ifdef KELVIN_DEBUG +#include "gc/shenandoah/shenandoahFreeSet.hpp" +#endif // Avoid name collision on verify_oop (defined in macroAssembler_arm.hpp) #ifdef verify_oop @@ -404,6 +408,23 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { } } +#ifdef KELVIN_DEBUG + static bool dump_details(ShenandoahGeneration* generation) { + log_info(gc)("Safepoint verification is unhappy with Generation %s", generation->name()); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + for (unsigned int i = 0; i < heap->num_regions(); i++) { + ShenandoahHeapRegion* r = heap->get_region(i); + log_info(gc)("%s region %u BTE (" PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT ") is %s", + r->is_affiliated()? (r->is_young()? (r->is_humongous()? "Young humongous": "Young regular"): + (r->is_humongous()? "Old humongous": "Old regular")): "Free", + i, p2i(r->bottom()), p2i(r->top()), p2i(r->end()), + r->is_trash()? "trash": "not trash"); + } + heap->free_set()->log_status(); + return true; + } +#endif + static void log_usage(ShenandoahGeneration* generation, ShenandoahCalculateRegionStatsClosure& stats) { log_debug(gc)("Safepoint verification: %s verified usage: " SIZE_FORMAT "%s, recorded usage: " SIZE_FORMAT "%s", generation->name(), @@ -430,12 +451,19 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { label, generation->name(), generation->used_regions(), stats.regions()); size_t generation_capacity = generation->max_capacity(); +#ifdef KELVIN_DEBUG + guarantee(stats.non_trashed_span() <= generation_capacity, + "%s: generation (%s) size spanned by regions (" SIZE_FORMAT ") * region size (" PROPERFMT + ") must not exceed current capacity (" PROPERFMT ")", + dump_details(generation)? label: label, generation->name(), stats.regions(), PROPERFMTARGS(ShenandoahHeapRegion::region_size_bytes()), + PROPERFMTARGS(generation_capacity)); +#else guarantee(stats.non_trashed_span() <= generation_capacity, "%s: generation (%s) size spanned by regions (" SIZE_FORMAT ") * region size (" PROPERFMT ") must not exceed current capacity (" PROPERFMT ")", label, generation->name(), stats.regions(), PROPERFMTARGS(ShenandoahHeapRegion::region_size_bytes()), PROPERFMTARGS(generation_capacity)); - +#endif size_t humongous_waste = generation->get_humongous_waste(); guarantee(stats.waste() == humongous_waste, "%s: generation (%s) humongous waste must be consistent: generation: " PROPERFMT ", regions: " PROPERFMT, From e69f9ac0b89a257810408a4752d794dcdca7f734 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 8 Jul 2024 20:55:05 +0000 Subject: [PATCH 47/64] Fix whitespace --- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index c43844b4092..c0a7d5fac05 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1424,7 +1424,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap(); // We're going to place this region into the Mutator set. We increment old_cset_regions because this count represents // regions that the old generation is entitled to without any transfer from young. We do not place this region into - // the OldCollector partition at this time. Instead, we let reserve_regions() decide whether to place this region + // the OldCollector partition at this time. Instead, we let reserve_regions() decide whether to place this region // into the OldCollector partition. Deferring the decision allows reserve_regions() to more effectively pack the // OldCollector regions into high-address memory. We do not adjust capacities of old and young generations at this // time. At the end of finish_rebuild(), the capacities are adjusted based on the results of reserve_regions(). @@ -1648,7 +1648,7 @@ void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, si } // Having transferred regions based on results of rebuild(), reset the rebalance request. - old_gen->set_region_balance(0); + old_gen->set_region_balance(0); } } From c2dda1bbf3565ce4fb41c913560f93fb442754f3 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 8 Jul 2024 23:13:41 +0000 Subject: [PATCH 48/64] Fix budgeting error during freeset rebuild Limit the size of old-gen by memory available in the OldCollector set following find_regions_with_alloc_capacity() (rather than limiting the size of old-gen by the total capacity of the OldCollector set, which includes used memory). --- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index c0a7d5fac05..3c2595c3890 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1756,7 +1756,6 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions // which is reserved for promotion is enforced using thread-local variables that prescribe intentions for // each PLAB's available memory. - // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass. const size_t promoted_reserve = old_generation->get_promoted_reserve(); const size_t old_evac_reserve = old_generation->get_evacuation_reserve(); young_reserve_result = young_generation->get_evacuation_reserve(); @@ -1771,9 +1770,9 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions // the reserve downward to account for this possibility. This loss is part of the reason why the original budget // was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers. if (old_reserve_result > - _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) { + _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) { old_reserve_result = - _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes; + _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes; } if (young_reserve_result > young_unaffiliated_regions * region_size_bytes) { From 9ea1056a8124d9712076ccf429c9bc9e8b1bd22c Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Thu, 11 Jul 2024 15:39:23 -0600 Subject: [PATCH 49/64] Ignore generation soft capacities when adjusting generation sizes Soft capacities are established, for example, by setting NewRatio or New size on the JVM command line. GenShen, for now at least, does not honor these settings. Better performance is obtained by allowing GenShen to expand and shrink generation sizes according to application behavior. This commit also tidies up various aspects of the implementation to make adjustments to generation sizing more consistent: 1. ShenandoahGlobalHeuristics::choose_global_collection_set(): share the reserves between young and old collection to maximize evacuation of garbage-first regions, regardless of whether most garbage is found in old or young 2. ShenandoahConcurrentGC::entry_final_roots(): do not balance generations before invoking finish_rebuild() because finish_rebuild will balance generations. 3. ShenandoahFreeSet::flip_to_old_gc(): invoke force_transfer_to_young() instead of transfer_to_young() so we can override soft-capacity limits 4. ShenandoahFullGC::phase5_epilog(): Do not invoke compute_balances() or balance_generations_after_rebuilding_free_set(). Allow the free-set rebuild() implementation to do this work in a more consistent fashion. 5. ShenandoahGeneration::adjust_evacuation_budgets(): replace transfer_to_youn() with force_transfer_to_young() to avoid enforcement of soft capacity limits. 6. ShenandoahGenerationSizer::force_transfer_to_young(): new method 7. ShenandoahGenerationalFullGC::balance_generations_after_gc(): establish reserves() so that free-set rebuild() can adjust balance. Do not redundantly force transfer of regions here. 8. ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set(): deprecate this method. 9. ShenandoahGenerationalFullGC::compute_balances(): deprecate this method. 10. ShenandoahGenerationaStatsClosure::validate_usage() (part of Shenandoah Verification): add consistency check for generation capacities --- .../heuristics/shenandoahGlobalHeuristics.cpp | 144 ++++++++++++------ .../gc/shenandoah/shenandoahConcurrentGC.cpp | 6 - .../share/gc/shenandoah/shenandoahFreeSet.cpp | 8 +- .../share/gc/shenandoah/shenandoahFullGC.cpp | 10 +- .../gc/shenandoah/shenandoahGeneration.cpp | 6 +- .../shenandoah/shenandoahGenerationSizer.cpp | 14 ++ .../shenandoah/shenandoahGenerationSizer.hpp | 7 +- .../shenandoahGenerationalEvacuationTask.cpp | 8 + .../shenandoahGenerationalFullGC.cpp | 53 ++++++- .../shenandoahGenerationalFullGC.hpp | 3 +- .../shenandoah/shenandoahGenerationalHeap.cpp | 8 +- .../shenandoah/shenandoahGenerationalHeap.hpp | 3 +- .../share/gc/shenandoah/shenandoahOldGC.cpp | 2 + .../gc/shenandoah/shenandoahVerifier.cpp | 5 + 14 files changed, 200 insertions(+), 77 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp index 4782b41b10c..9c9053dec24 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp @@ -81,49 +81,71 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti size_t cur_young_garbage) const { auto heap = ShenandoahGenerationalHeap::heap(); size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); - size_t capacity = heap->young_generation()->max_capacity(); + size_t young_capacity = heap->young_generation()->max_capacity(); + size_t old_capacity = heap->old_generation()->max_capacity(); size_t garbage_threshold = region_size_bytes * ShenandoahGarbageThreshold / 100; size_t ignore_threshold = region_size_bytes * ShenandoahIgnoreGarbageThreshold / 100; const uint tenuring_threshold = heap->age_census()->tenuring_threshold(); size_t young_evac_reserve = heap->young_generation()->get_evacuation_reserve(); size_t old_evac_reserve = heap->old_generation()->get_evacuation_reserve(); - size_t max_young_cset = (size_t) (young_evac_reserve / ShenandoahEvacWaste); - size_t young_cur_cset = 0; - size_t max_old_cset = (size_t) (old_evac_reserve / ShenandoahOldEvacWaste); - size_t old_cur_cset = 0; - - // Figure out how many unaffiliated young regions are dedicated to mutator and to evacuator. Allow the young - // collector's unaffiliated regions to be transferred to old-gen if old-gen has more easily reclaimed garbage - // than young-gen. At the end of this cycle, any excess regions remaining in old-gen will be transferred back - // to young. Do not transfer the mutator's unaffiliated regions to old-gen. Those must remain available - // to the mutator as it needs to be able to consume this memory during concurrent GC. size_t unaffiliated_young_regions = heap->young_generation()->free_unaffiliated_regions(); size_t unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes; - - if (unaffiliated_young_memory > max_young_cset) { - size_t unaffiliated_mutator_memory = unaffiliated_young_memory - max_young_cset; - unaffiliated_young_memory -= unaffiliated_mutator_memory; - unaffiliated_young_regions = unaffiliated_young_memory / region_size_bytes; // round down - unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes; + size_t unaffiliated_old_regions = heap->old_generation()->free_unaffiliated_regions(); + size_t unaffiliated_old_memory = unaffiliated_old_regions * region_size_bytes; + + // Figure out how many unaffiliated regions are dedicated to Collector and OldCollector reserves. Let these + // be shuffled between young and old generations in order to expedite evacuation of whichever regions have the + // most garbage, regardless of whether these garbage-first regions reside in young or old generation. + // Excess reserves will be transferred back to the mutator after collection set has been chosen. At the end + // of evacuation, any reserves not consumed by evacuation will also be transferred to the mutator free set. + size_t shared_reserve_regions = 0; + if (young_evac_reserve > unaffiliated_young_memory) { + young_evac_reserve -= unaffiliated_young_memory; + shared_reserve_regions += unaffiliated_young_memory / region_size_bytes; + } else { + size_t delta_regions = young_evac_reserve / region_size_bytes; + shared_reserve_regions += delta_regions; + young_evac_reserve -= delta_regions * region_size_bytes; + } + if (old_evac_reserve > unaffiliated_old_memory) { + old_evac_reserve -= unaffiliated_old_memory; + shared_reserve_regions += unaffiliated_old_memory / region_size_bytes; + } else { + size_t delta_regions = old_evac_reserve / region_size_bytes; + shared_reserve_regions += delta_regions; + old_evac_reserve -= delta_regions * region_size_bytes; } - // We'll affiliate these unaffiliated regions with either old or young, depending on need. - max_young_cset -= unaffiliated_young_memory; + size_t shared_reserves = shared_reserve_regions * region_size_bytes; + size_t committed_from_shared_reserves = 0; + size_t max_young_cset = (size_t) (young_evac_reserve / ShenandoahEvacWaste); + size_t young_cur_cset = 0; + size_t max_old_cset = (size_t) (old_evac_reserve / ShenandoahOldEvacWaste); + size_t old_cur_cset = 0; + + size_t promo_bytes = 0; + size_t old_evac_bytes = 0; + size_t young_evac_bytes = 0; // Keep track of how many regions we plan to transfer from young to old. - size_t regions_transferred_to_old = 0; + size_t shared_regions_consumed_by_old, shared_regions_consumed_by_young; - size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + max_young_cset; + size_t max_total_cset = (max_young_cset + max_old_cset + + (size_t) (shared_reserve_regions * region_size_bytes) / ShenandoahOldEvacWaste); + size_t free_target = ((young_capacity + old_capacity) * ShenandoahMinFreeThreshold) / 100 + max_total_cset; size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0; log_info(gc, ergo)("Adaptive CSet Selection for GLOBAL. Max Young Evacuation: " SIZE_FORMAT - "%s, Max Old Evacuation: " SIZE_FORMAT "%s, Actual Free: " SIZE_FORMAT "%s.", + "%s, Max Old Evacuation: " SIZE_FORMAT "%s, Discretionary additional evacuation: " SIZE_FORMAT + "%s, Actual Free: " SIZE_FORMAT "%s.", byte_size_in_proper_unit(max_young_cset), proper_unit_for_byte_size(max_young_cset), byte_size_in_proper_unit(max_old_cset), proper_unit_for_byte_size(max_old_cset), + byte_size_in_proper_unit(shared_reserves), proper_unit_for_byte_size(shared_reserves), byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free)); + size_t cur_garbage = cur_young_garbage; for (size_t idx = 0; idx < size; idx++) { ShenandoahHeapRegion* r = data[idx]._region; if (cset->is_preselected(r->index())) { @@ -131,36 +153,62 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti continue; } bool add_region = false; + size_t region_garbage = r->garbage(); + size_t new_garbage = cur_garbage + region_garbage; + bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage); if (r->is_old() || (r->age() >= tenuring_threshold)) { - size_t new_cset = old_cur_cset + r->get_live_data_bytes(); - if ((r->garbage() > garbage_threshold)) { - while ((new_cset > max_old_cset) && (unaffiliated_young_regions > 0)) { - unaffiliated_young_regions--; - regions_transferred_to_old++; + if (add_regardless || (region_garbage > garbage_threshold)) { + size_t live_bytes = r->get_live_data_bytes(); + size_t new_cset = old_cur_cset + r->get_live_data_bytes(); + // May need multiple reserve regions to evacuate a single region, depending on live data bytes and ShenandoahOldEvacWaste + size_t orig_max_old_cset = max_old_cset; + size_t proposed_old_region_consumption = 0; + while ((new_cset > max_old_cset) && (committed_from_shared_reserves < shared_reserves)) { + committed_from_shared_reserves += region_size_bytes; + proposed_old_region_consumption++; max_old_cset += region_size_bytes / ShenandoahOldEvacWaste; } - } - if ((new_cset <= max_old_cset) && (r->garbage() > garbage_threshold)) { - add_region = true; - old_cur_cset = new_cset; + // We already know: add_regardless || region_garbage > garbage_threshold + if (new_cset <= max_old_cset) { + shared_regions_consumed_by_old += proposed_old_region_consumption; + add_region = true; + old_cur_cset = new_cset; + cur_garbage = new_garbage; + if (r->is_old()) { + old_evac_bytes += live_bytes; + } else { + promo_bytes += live_bytes; + } + } else { + // We failed to sufficiently expand old, so unwind proposed expansion + max_old_cset = orig_max_old_cset; + committed_from_shared_reserves -= proposed_old_region_consumption * region_size_bytes; + } } } else { assert(r->is_young() && (r->age() < tenuring_threshold), "DeMorgan's law (assuming r->is_affiliated)"); - size_t new_cset = young_cur_cset + r->get_live_data_bytes(); - size_t region_garbage = r->garbage(); - size_t new_garbage = cur_young_garbage + region_garbage; - bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage); - - if (add_regardless || (r->garbage() > garbage_threshold)) { - while ((new_cset > max_young_cset) && (unaffiliated_young_regions > 0)) { - unaffiliated_young_regions--; + if (add_regardless || (region_garbage > garbage_threshold)) { + size_t live_bytes = r->get_live_data_bytes(); + size_t new_cset = young_cur_cset + live_bytes; + // May need multiple reserve regions to evacuate a single region, depending on live data bytes and ShenandoahEvacWaste + size_t orig_max_young_cset = max_young_cset; + size_t proposed_young_region_consumption = 0; + while ((new_cset > max_young_cset) && (committed_from_shared_reserves < shared_reserves)) { + committed_from_shared_reserves += region_size_bytes; + proposed_young_region_consumption++; max_young_cset += region_size_bytes / ShenandoahEvacWaste; } - } - if ((new_cset <= max_young_cset) && (add_regardless || (region_garbage > garbage_threshold))) { - add_region = true; - young_cur_cset = new_cset; - cur_young_garbage = new_garbage; + // We already know: add_regardless || region_garbage > garbage_threshold + if (new_cset <= max_young_cset) { + add_region = true; + young_cur_cset = new_cset; + cur_garbage = new_garbage; + young_evac_bytes += live_bytes; + } else { + // We failed to sufficiently expand young, so unwind proposed expansion + max_young_cset = orig_max_young_cset; + committed_from_shared_reserves -= proposed_young_region_consumption * region_size_bytes; + } } } if (add_region) { @@ -168,9 +216,7 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti } } - if (regions_transferred_to_old > 0) { - heap->generation_sizer()->force_transfer_to_old(regions_transferred_to_old); - heap->young_generation()->set_evacuation_reserve(young_evac_reserve - regions_transferred_to_old * region_size_bytes); - heap->old_generation()->set_evacuation_reserve(old_evac_reserve + regions_transferred_to_old * region_size_bytes); - } + heap->young_generation()->set_evacuation_reserve((size_t) (young_evac_bytes * ShenandoahEvacWaste)); + heap->old_generation()->set_evacuation_reserve((size_t) (old_evac_bytes * ShenandoahOldEvacWaste)); + heap->old_generation()->set_promoted_reserve((size_t) (promo_bytes * ShenandoahPromoEvacWaste)); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index f58aad66538..e38d9d4c02c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -355,14 +355,8 @@ void ShenandoahConcurrentGC::entry_final_roots() { assert((young_cset_regions == 0) && (old_cset_regions == 0), "No ongoing evacuation after abbreviated or concurrent OLD marking cycle"); gen_heap->compute_old_generation_balance(allocation_runway, 0, 0); - result = gen_heap->balance_generations(); heap->free_set()->finish_rebuild(0, 0, num_old); } - LogTarget(Info, gc, ergo) lt; - if (lt.is_enabled()) { - LogStream ls(lt); - result.print_on(_generation->is_old()? "Old Mark": "Abbreviated", &ls); - } } else { assert (_abbreviated, "Only rebuild free set for abbreviated"); // Rebuild free set after reclaiming immediate garbage diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 3c2595c3890..c2e6332d738 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1351,6 +1351,10 @@ void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { bool transferred = gen_heap->generation_sizer()->transfer_to_old(1); if (!transferred) { log_warning(gc, free)("Forcing transfer of " SIZE_FORMAT " to old reserve.", idx); +#define KELVIN_DEBUG +#ifdef KELVIN_DEBUG + log_info(gc)("flip_to_old_gc() region " SIZE_FORMAT, r->index()); +#endif gen_heap->generation_sizer()->force_transfer_to_old(1); } // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next, @@ -1592,7 +1596,7 @@ void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_r old_collector_xfer); max_xfer_regions -= old_collector_regions; if (old_collector_regions > 0) { - ShenandoahGenerationalHeap::cast(_heap)->generation_sizer()->transfer_to_young(old_collector_regions); + ShenandoahGenerationalHeap::cast(_heap)->generation_sizer()->force_transfer_to_young(old_collector_regions); } } @@ -1643,7 +1647,7 @@ void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, si log_info(gc)("Transfer " SIZE_FORMAT " regions from OLD to YOUNG during rebuild of freeset", original_old_regions - old_region_count); } else if (original_old_regions < old_region_count) { - log_info(gc)("Transfer " SIZE_FORMAT " regions from YOUGN to OLD during rebuild of freeset", + log_info(gc)("Transfer " SIZE_FORMAT " regions from YOUNG to OLD during rebuild of freeset", old_region_count - original_old_regions); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp index 63cd1cc7873..f88570ce6a6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp @@ -1181,19 +1181,15 @@ void ShenandoahFullGC::phase5_epilog() { size_t first_old, last_old, num_old; heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); - // We also do not expand old generation size following Full GC because we have scrambled age populations and - // no longer have objects separated by age into distinct regions. - // TODO: Do we need to fix FullGC so that it maintains aged segregation of objects into distinct regions? // A partial solution would be to remember how many objects are of tenure age following Full GC, but // this is probably suboptimal, because most of these objects will not reside in a region that will be // selected for the next evacuation phase. - - +#ifdef KELVIN_DEPRECATE if (heap->mode()->is_generational()) { ShenandoahGenerationalFullGC::compute_balances(); } - +#endif heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old); heap->clear_cancelled_gc(true /* clear oom handler */); @@ -1205,7 +1201,9 @@ void ShenandoahFullGC::phase5_epilog() { // We defer generation resizing actions until after cset regions have been recycled. We do this even following an // abbreviated cycle. if (heap->mode()->is_generational()) { +#ifdef KELVIN_DEPRECATE ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set(); +#endif ShenandoahGenerationalFullGC::rebuild_remembered_set(heap); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 41076fe464d..f535ab17969 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -471,11 +471,9 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, } if (regions_to_xfer > 0) { - bool result = ShenandoahGenerationalHeap::cast(heap)->generation_sizer()->transfer_to_young(regions_to_xfer); - assert(excess_old > regions_to_xfer * region_size_bytes, "Cannot xfer more than excess old"); + assert(excess_old >= regions_to_xfer * region_size_bytes, "Cannot xfer more than excess old"); excess_old -= regions_to_xfer * region_size_bytes; - log_info(gc, ergo)("%s transferred " SIZE_FORMAT " excess regions to young before start of evacuation", - result? "Successfully": "Unsuccessfully", regions_to_xfer); + ShenandoahGenerationalHeap::cast(heap)->generation_sizer()->force_transfer_to_young(regions_to_xfer); } // Add in the excess_old memory to hold unanticipated promotions, if any. If there are more unanticipated diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp index db15b05c468..377a70a2646 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp @@ -194,6 +194,20 @@ void ShenandoahGenerationSizer::force_transfer_to_old(size_t regions) const { regions, young_gen->name(), old_gen->name(), PROPERFMTARGS(new_size)); } +// This is used to transfer excess old-gen regions to young at the start of evacuation after collection set is determined. +void ShenandoahGenerationSizer::force_transfer_to_young(size_t regions) const { + ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); + ShenandoahGeneration* old_gen = heap->old_generation(); + ShenandoahGeneration* young_gen = heap->young_generation(); + const size_t bytes_to_transfer = regions * ShenandoahHeapRegion::region_size_bytes(); + + young_gen->increase_capacity(bytes_to_transfer); + old_gen->decrease_capacity(bytes_to_transfer); + const size_t new_size = young_gen->max_capacity(); + log_info(gc)("Forcing transfer of " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " PROPERFMT, + regions, old_gen->name(), young_gen->name(), PROPERFMTARGS(new_size)); +} + bool ShenandoahGenerationSizer::transfer_to_young(size_t regions) const { ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); return transfer_regions(heap->old_generation(), heap->young_generation(), regions); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.hpp index 5752422bb77..e19c7c3dfbb 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.hpp @@ -86,8 +86,13 @@ class ShenandoahGenerationSizer { bool transfer_to_young(size_t regions) const; bool transfer_to_old(size_t regions) const; - // force transfer is used when we promote humongous objects. May violate min/max limits on generation sizes + // Force transfer is used when we promote humongous objects or promote regular regions in place. + // May violate min/max limits on generation sizes. void force_transfer_to_old(size_t regions) const; + + // Force transfer is used when we have excess old and we have confirmed that old unaffiliated >= regions. + // May violate min/max limits on generation sizes. + void force_transfer_to_young(size_t regions) const; }; #endif //SHARE_GC_SHENANDOAH_SHENANDOAHGENERATIONSIZER_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index 3f446ad891f..8a933f199f5 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -202,6 +202,10 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion young_gen->decrement_affiliated_region_count(); // transfer_to_old() increases capacity of old and decreases capacity of young +#define KELVIN_DEBUG +#ifdef KELVIN_DEBUG + log_info(gc)("promote_in_place region " SIZE_FORMAT " shifting " SIZE_FORMAT " of used bytes", region->index(), region_used); +#endif _heap->generation_sizer()->force_transfer_to_old(1); region->set_affiliation(OLD_GENERATION); @@ -249,6 +253,10 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio young_gen->decrease_humongous_waste(humongous_waste); young_gen->decrease_affiliated_region_count(spanned_regions); +#define KELVIN_DEBUG +#ifdef KELVIN_DEBUG + log_info(gc)("promote humongous transfers to old " SIZE_FORMAT " regions", spanned_regions); +#endif // transfer_to_old() increases capacity of old and decreases capacity of young _heap->generation_sizer()->force_transfer_to_old(spanned_regions); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp index abd3f3de01f..da11b83ac46 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp @@ -97,6 +97,9 @@ void ShenandoahGenerationalFullGC::rebuild_remembered_set(ShenandoahHeap* heap) heap->old_generation()->set_parseable(true); } +// Full GC has scattered aged objects throughout the heap. There are no more aged regions, so there is no anticipated +// promotion. Furthermore, Full GC has cancelled any ongoing mixed evacuation efforts so there are no anticipated old-gen +// evacuations. Size old-gen to represent its current usage by setting the balance. This feeds into rebuild of freeset. void ShenandoahGenerationalFullGC::balance_generations_after_gc(ShenandoahHeap* heap) { ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::cast(heap); ShenandoahOldGeneration* const old_gen = gen_heap->old_generation(); @@ -107,19 +110,56 @@ void ShenandoahGenerationalFullGC::balance_generations_after_gc(ShenandoahHeap* assert(old_usage % ShenandoahHeapRegion::region_size_bytes() == 0, "Old usage must align with region size"); assert(old_capacity % ShenandoahHeapRegion::region_size_bytes() == 0, "Old capacity must align with region size"); + ssize_t region_balance; if (old_capacity > old_usage) { size_t excess_old_regions = (old_capacity - old_usage) / ShenandoahHeapRegion::region_size_bytes(); - gen_heap->generation_sizer()->transfer_to_young(excess_old_regions); + // Since the act of FullGC does not honor old and young budgets, excess_old_regions are conceptually unaffiliated. + region_balance = checked_cast(excess_old_regions); } else if (old_capacity < old_usage) { + // Since the old_usage already consumes more regions than in old_capacity, we know these regions are not affiliated young, + // so arrange to transfer them. size_t old_regions_deficit = (old_usage - old_capacity) / ShenandoahHeapRegion::region_size_bytes(); - gen_heap->generation_sizer()->force_transfer_to_old(old_regions_deficit); + region_balance = 0 - checked_cast(old_regions_deficit); + } else { + region_balance = 0; + } + old_gen->set_region_balance(region_balance); + // Rebuild free set will log adjustments to generation sizes. + + ShenandoahYoungGeneration* const young_gen = gen_heap->young_generation(); + size_t anticipated_young_capacity = young_gen->max_capacity() + region_balance * ShenandoahHeapRegion::region_size_bytes(); + size_t young_usage = young_gen->used_regions_size(); + assert(anticipated_young_capacity >= young_usage, "sanity"); + + size_t anticipated_max_collector_reserve = anticipated_young_capacity - young_usage; + size_t desired_collector_reserve = (anticipated_young_capacity * ShenandoahEvacReserve) / 100; + size_t young_reserve; + if (desired_collector_reserve > anticipated_max_collector_reserve) { + // Trigger next concurrent GC immediately + young_reserve = anticipated_max_collector_reserve; + } else { + young_reserve = desired_collector_reserve; } - log_info(gc)("FullGC done: young usage: " PROPERFMT ", old usage: " PROPERFMT, - PROPERFMTARGS(gen_heap->young_generation()->used()), - PROPERFMTARGS(old_gen->used())); + size_t reserve_for_promo = 0; + size_t reserve_for_mixed = 0; + +#ifdef KELVIN_RESERVES + size_t old_reserve = reserve_for_promo + reserve_for_mixed; + log_info(gc)("after adjustments, old_reserve: " SIZE_FORMAT " from promo: " SIZE_FORMAT " and evac: " SIZE_FORMAT, + old_reserve, reserve_for_promo, reserve_for_mixed); + log_info(gc)(" young_reserve: " SIZE_FORMAT, young_reserve); + log_info(gc)(" balance: " SSIZE_FORMAT, region_balance); +#endif + + // Reserves feed into rebuild calculations + young_gen->set_evacuation_reserve(young_reserve); + old_gen->set_evacuation_reserve(reserve_for_mixed); + old_gen->set_promoted_reserve(reserve_for_promo); } +#ifdef KELVIN_DEPRECATE +// deprecate because rebuild does balance void ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set() { auto result = ShenandoahGenerationalHeap::heap()->balance_generations(); LogTarget(Info, gc, ergo) lt; @@ -128,6 +168,7 @@ void ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set result.print_on("Full GC", &ls); } } +#endif void ShenandoahGenerationalFullGC::log_live_in_old(ShenandoahHeap* heap) { LogTarget(Info, gc) lt; @@ -176,6 +217,7 @@ void ShenandoahGenerationalFullGC::maybe_coalesce_and_fill_region(ShenandoahHeap } } +#ifdef KELVIN_DEPRECATE void ShenandoahGenerationalFullGC::compute_balances() { auto heap = ShenandoahGenerationalHeap::heap(); @@ -184,6 +226,7 @@ void ShenandoahGenerationalFullGC::compute_balances() { // Invoke this in case we are able to transfer memory from OLD to YOUNG. heap->compute_old_generation_balance(0, 0, 0); } +#endif ShenandoahPrepareForGenerationalCompactionObjectClosure::ShenandoahPrepareForGenerationalCompactionObjectClosure(PreservedMarks* preserved_marks, GrowableArray& empty_regions, diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp index d74bcefaaf2..2ed5d8fb304 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp @@ -55,6 +55,7 @@ class ShenandoahGenerationalFullGC { // free set. static void compute_balances(); +#ifdef KELVIN_DEPRECATE // Rebuilding the free set may have resulted in regions being pulled in to the old generation // evacuation reserve. For this reason, we must update the usage and capacity of the generations // again. In the distant past, the free set did not know anything about generations, so we had @@ -62,7 +63,7 @@ class ShenandoahGenerationalFullGC { // redundant and adds complexity. We would like to one day remove it. Until then, we must keep it // synchronized with the free set's view of things. static void balance_generations_after_rebuilding_free_set(); - +#endif // Logs the number of live bytes marked in the old generation. This is _not_ the same // value used as the baseline for the old generation _after_ the full gc is complete. // The value reported in the logs does not include objects and regions that may be diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index b45211d2b12..c7b0cd97ac2 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -566,6 +566,7 @@ void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) { retire_plab(plab, thread); } +#ifdef KELVIN_DEPRECATE ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalHeap::balance_generations() { shenandoah_assert_heaplocked_or_safepoint(); @@ -594,6 +595,7 @@ ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalHeap::balance_g return TransferResult {true, 0, "none"}; } +#endif // Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations // and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to @@ -1187,7 +1189,7 @@ void ShenandoahGenerationalHeap::complete_degenerated_cycle() { // a more detailed explanation. old_generation()->transfer_pointers_from_satb(); } - +#ifdef KELVIN_DEPRECATE // We defer generation resizing actions until after cset regions have been recycled. TransferResult result = balance_generations(); LogTarget(Info, gc, ergo) lt; @@ -1195,7 +1197,7 @@ void ShenandoahGenerationalHeap::complete_degenerated_cycle() { LogStream ls(lt); result.print_on("Degenerated GC", &ls); } - +#endif if (!old_generation()->is_parseable()) { ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_coalesce_and_fill); coalesce_and_fill_old_regions(false); @@ -1214,6 +1216,7 @@ void ShenandoahGenerationalHeap::complete_concurrent_cycle() { entry_global_coalesce_and_fill(); } +#ifdef KELVIN_DEPRECATE TransferResult result; { ShenandoahHeapLocker locker(lock()); @@ -1226,6 +1229,7 @@ void ShenandoahGenerationalHeap::complete_concurrent_cycle() { LogStream ls(lt); result.print_on("Concurrent GC", &ls); } +#endif } void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp index b5aefaf1081..42b2b83ba45 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp @@ -121,9 +121,10 @@ class ShenandoahGenerationalHeap : public ShenandoahHeap { // Computes the optimal size for the old generation, represented as a surplus or deficit of old regions void compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions, size_t young_cset_regions); +#ifdef KELVIN_DEPRECATE // Transfers surplus old regions to young, or takes regions from young to satisfy old region deficit TransferResult balance_generations(); - +#endif // Balances generations, coalesces and fills old regions if necessary void complete_degenerated_cycle(); void complete_concurrent_cycle(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp index f095774af3a..0d69ea95101 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp @@ -175,7 +175,9 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) { heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); assert((young_cset_regions == 0) && (old_cset_regions == 0), "No ongoing evacuation when concurrent mark ends"); heap->compute_old_generation_balance(allocation_runway, 0, 0); +#ifdef KELVIN_DEPRECATE result = heap->balance_generations(); +#endif heap->free_set()->finish_rebuild(0, 0, num_old); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index e1f1a1a3d17..c8ef1430b7f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -439,11 +439,16 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { ShenandoahHeap* heap = ShenandoahHeap::heap(); size_t generation_used = generation->used(); size_t generation_used_regions = generation->used_regions(); + size_t generation_max_capacity = generation->max_capacity(); if (adjust_for_padding && (generation->is_young() || generation->is_global())) { size_t pad = heap->old_generation()->get_pad_for_promote_in_place(); generation_used += pad; } + guarantee(stats.committed() <= generation_max_capacity, + "%s: generation (%s) committed: " PROPERFMT " must not exceed generation capacity: " PROPERFMT, + label, generation->name(), PROPERFMTARGS(stats.committed()), PROPERFMTARGS(generation_max_capacity)); + guarantee(stats.used() == generation_used, "%s: generation (%s) used size must be consistent: generation-used: " PROPERFMT ", regions-used: " PROPERFMT, label, generation->name(), PROPERFMTARGS(generation_used), PROPERFMTARGS(stats.used())); From c73e723c6faf042ba503beb880f20dbc4865e1f4 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Sat, 13 Jul 2024 20:52:59 +0000 Subject: [PATCH 50/64] Verifier should only count non-trashed committed regions --- .../share/gc/shenandoah/shenandoahVerifier.cpp | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index c8ef1430b7f..2e0d088adfd 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -384,7 +384,14 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure // span is the total memory affiliated with these stats (some of which is in use and other is available) size_t span() const { return _regions * ShenandoahHeapRegion::region_size_bytes(); } - size_t non_trashed_span() const { return (_regions - _trashed_regions) * ShenandoahHeapRegion::region_size_bytes(); } + size_t non_trashed_span() const { + assert(_regions >= _trashed_regions, "sanity"); + return (_regions - _trashed_regions) * ShenandoahHeapRegion::region_size_bytes(); + } + size_t non_trashed_committed() const { + assert(_committed >= _trashed_regions * ShenandoahHeapRegion::region_size_bytes(), "sanity"); + return _committed - (_trashed_regions * ShenandoahHeapRegion::region_size_bytes()); + } }; class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { @@ -445,9 +452,9 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { generation_used += pad; } - guarantee(stats.committed() <= generation_max_capacity, - "%s: generation (%s) committed: " PROPERFMT " must not exceed generation capacity: " PROPERFMT, - label, generation->name(), PROPERFMTARGS(stats.committed()), PROPERFMTARGS(generation_max_capacity)); + guarantee(stats.non_trashed_committed() <= generation_max_capacity, + "%s: generation (%s) non_trashed_committed: " PROPERFMT " must not exceed generation capacity: " PROPERFMT, + label, generation->name(), PROPERFMTARGS(stats.non_trashed_committed()), PROPERFMTARGS(generation_max_capacity)); guarantee(stats.used() == generation_used, "%s: generation (%s) used size must be consistent: generation-used: " PROPERFMT ", regions-used: " PROPERFMT, From e5c1b69d53585d6a84f091101b6be20830442a19 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Sat, 13 Jul 2024 21:04:16 +0000 Subject: [PATCH 51/64] Turn off instrumentation --- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 2 +- .../gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index c2e6332d738..39118989116 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1351,7 +1351,7 @@ void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { bool transferred = gen_heap->generation_sizer()->transfer_to_old(1); if (!transferred) { log_warning(gc, free)("Forcing transfer of " SIZE_FORMAT " to old reserve.", idx); -#define KELVIN_DEBUG +#undef KELVIN_DEBUG #ifdef KELVIN_DEBUG log_info(gc)("flip_to_old_gc() region " SIZE_FORMAT, r->index()); #endif diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index 8a933f199f5..b942bc2ea01 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -202,7 +202,7 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion young_gen->decrement_affiliated_region_count(); // transfer_to_old() increases capacity of old and decreases capacity of young -#define KELVIN_DEBUG +#undef KELVIN_DEBUG #ifdef KELVIN_DEBUG log_info(gc)("promote_in_place region " SIZE_FORMAT " shifting " SIZE_FORMAT " of used bytes", region->index(), region_used); #endif @@ -253,7 +253,7 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio young_gen->decrease_humongous_waste(humongous_waste); young_gen->decrease_affiliated_region_count(spanned_regions); -#define KELVIN_DEBUG +#undef KELVIN_DEBUG #ifdef KELVIN_DEBUG log_info(gc)("promote humongous transfers to old " SIZE_FORMAT " regions", spanned_regions); #endif From 34704e4b8e6ab37744523d78f71e3e1c2e29fc5b Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 15 Jul 2024 14:01:42 +0000 Subject: [PATCH 52/64] Remove debug instrumentation and deprecated code --- .../heuristics/shenandoahOldHeuristics.cpp | 50 --------- .../heuristics/shenandoahYoungHeuristics.cpp | 11 -- .../gc/shenandoah/shenandoahConcurrentGC.cpp | 7 -- .../gc/shenandoah/shenandoahDegeneratedGC.cpp | 7 -- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 103 ------------------ .../share/gc/shenandoah/shenandoahFreeSet.hpp | 4 - .../share/gc/shenandoah/shenandoahFullGC.cpp | 8 -- .../gc/shenandoah/shenandoahGeneration.cpp | 28 ----- .../shenandoahGenerationalEvacuationTask.cpp | 8 -- .../shenandoahGenerationalFullGC.cpp | 31 ------ .../shenandoahGenerationalFullGC.hpp | 9 -- .../shenandoah/shenandoahGenerationalHeap.cpp | 99 ----------------- .../shenandoah/shenandoahGenerationalHeap.hpp | 4 - .../share/gc/shenandoah/shenandoahHeap.cpp | 4 - .../share/gc/shenandoah/shenandoahOldGC.cpp | 37 ------- .../gc/shenandoah/shenandoahOldGeneration.cpp | 29 ----- .../gc/shenandoah/shenandoahVerifier.cpp | 29 ----- 17 files changed, 468 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp index 652a13e512f..e15eb63b54e 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp @@ -122,25 +122,13 @@ bool ShenandoahOldHeuristics::add_old_regions_to_cset(ShenandoahCollectionSet* c size_t live_data_for_evacuation = r->get_live_data_bytes(); size_t lost_available = r->free(); -#undef KELVIN_SHARE_RESERVES -#ifdef KELVIN_SHARE_RESERVES - log_info(gc)("Trying to add_old_region_to_cset(" SIZE_FORMAT ") with live_data: " SIZE_FORMAT ", lost_available: " SIZE_FORMAT, - r->index(), live_data_for_evacuation, lost_available); -#endif if ((lost_available > 0) && (excess_fragmented_available > 0)) { if (lost_available < excess_fragmented_available) { excess_fragmented_available -= lost_available; lost_available = 0; -#ifdef KELVIN_SHARE_RESERVES - log_info(gc)(" Taking lost_available from excess_fragmented_available: " SIZE_FORMAT, excess_fragmented_available); -#endif } else { lost_available -= excess_fragmented_available; excess_fragmented_available = 0; -#ifdef KELVIN_SHARE_RESERVES - log_info(gc)(" Reducing lost_available to " SIZE_FORMAT ", setting excess_fragmented_available to zero", - lost_available); -#endif } } @@ -153,34 +141,18 @@ bool ShenandoahOldHeuristics::add_old_regions_to_cset(ShenandoahCollectionSet* c fragmented_available -= scaled_loss; fragmented_delta = -scaled_loss; scaled_loss = 0; -#ifdef KELVIN_SHARE_RESERVES - log_info(gc)(" Reducing fragmented_available to " SIZE_FORMAT ", scaled_loss to zero", - fragmented_available); -#endif } else { scaled_loss -= fragmented_available; fragmented_delta = -fragmented_available; fragmented_available = 0; -#ifdef KELVIN_SHARE_RESERVES - log_info(gc)(" Reducing fragmented_available to 0, scaled_loss to " SIZE_FORMAT, - scaled_loss); -#endif } } // Allocate replica from unfragmented memory if that exists size_t evacuation_need = live_data_for_evacuation; if (evacuation_need < unfragmented_available) { unfragmented_available -= evacuation_need;; -#ifdef KELVIN_SHARE_RESERVES - log_info(gc)(" Satisfy allocation from unfragmented available: " SIZE_FORMAT, - unfragmented_available); -#endif } else { if (unfragmented_available > 0) { -#ifdef KELVIN_SHARE_RESERVES - log_info(gc)(" Partially satisfy from unfragmented_available: " SIZE_FORMAT ", which becomes zero", - unfragmented_available); -#endif evacuation_need -= unfragmented_available; unfragmented_delta = -unfragmented_available; unfragmented_available = 0; @@ -188,18 +160,10 @@ bool ShenandoahOldHeuristics::add_old_regions_to_cset(ShenandoahCollectionSet* c // Take the remaining allocation out of fragmented available if (fragmented_available > evacuation_need) { fragmented_available -= evacuation_need; -#ifdef KELVIN_SHARE_RESERVES - log_info(gc)(" Satisfied remnant " SIZE_FORMAT " from fragmented_available: " SIZE_FORMAT, - evacuation_need, fragmented_available); -#endif } else { // We cannot add this region into the collection set. We're done. Undo the adjustments to available. fragmented_available -= fragmented_delta; unfragmented_available -= unfragmented_delta; -#ifdef KELVIN_SHARE_RESERVES - log_info(gc)(" Cannot collect, restored fragmented_available: " SIZE_FORMAT ", unfragmented_available: " SIZE_FORMAT, - fragmented_available, unfragmented_available); -#endif break; } } @@ -359,10 +323,6 @@ bool ShenandoahOldHeuristics::top_off_collection_set(ShenandoahCollectionSet* co _old_generation->augment_evacuation_reserve(budget_supplement); young_generation->set_evacuation_reserve(max_young_cset - budget_supplement); -#ifdef KELVIN_SHARE_RESERVES - log_info(gc)("top_off_collection_set() transfers " SIZE_FORMAT " bytes from young_reserve to old_reserve", - budget_supplement); -#endif return add_old_regions_to_cset(collection_set, evacuated_old_bytes, collected_old_bytes, included_old_regions, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, fragmented_available, excess_fragmented_available); @@ -454,10 +414,6 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() { // for the collection set here. That happens later during the next young GC cycle, // by which time, the pinned region may no longer be pinned. if (!region->has_live()) { -#undef KELVIN_DEBUG -#ifdef KELVIN_DEBUG - log_info(gc)("prepare_for_old_collections() found immediate trash in region " SIZE_FORMAT, i); -#endif assert(!region->is_pinned(), "Pinned region should have live (pinned) objects."); #ifdef ASSERT if (!reclaimed_immediate) { @@ -481,9 +437,6 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() { // If they are pinned, we expect them to hold live data, so they will not be // turned into immediate garbage. if (!region->has_live()) { -#ifdef KELVIN_DEBUG - log_info(gc)("prepare_for_old_collections() found immediate humongous start trash in region " SIZE_FORMAT, i); -#endif assert(!region->is_pinned(), "Pinned region should have live (pinned) objects."); // The humongous object is dead, we can just return this region and the continuations // immediately to the freeset - no evacuations are necessary here. The continuations @@ -504,9 +457,6 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() { } } else if (region->is_trash()) { // Count humongous objects made into trash here. -#ifdef KELVIN_DEBUG - log_info(gc)("prepare_for_old_collections() found immediate humongous continuation trash in region " SIZE_FORMAT, i); -#endif immediate_regions++; immediate_garbage += garbage; } diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp index a199fa8ea18..0db04d7ae03 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp @@ -67,15 +67,8 @@ void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(Shenandoah // Better select garbage-first regions QuickSort::sort(data, (int) size, compare_by_garbage, false); -#ifdef KELVIN_RESERVES - log_info(gc)("YoungHeuristics::add_preselected() with size: " SIZE_FORMAT, size); -#endif size_t cur_young_garbage = add_preselected_regions_to_collection_set(cset, data, size); -#ifdef KELVIN_RESERVES - log_info(gc)("YoungHeuristics::choose_young_cset() with size: " SIZE_FORMAT ", actual_free: " SIZE_FORMAT - ", cur_young_garbage: " SIZE_FORMAT, size, actual_free, cur_young_garbage); -#endif choose_young_collection_set(cset, data, size, actual_free, cur_young_garbage); log_cset_composition(cset); @@ -99,10 +92,6 @@ void ShenandoahYoungHeuristics::choose_young_collection_set(ShenandoahCollection size_t cur_cset = 0; size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + max_cset; size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0; -#ifdef KELVIN_RESERVES - log_info(gc)("YoungHeuristics::choose_young_collection_set with max_cset: " SIZE_FORMAT, max_cset); -#endif - log_info(gc, ergo)( "Adaptive CSet Selection for YOUNG. Max Evacuation: " SIZE_FORMAT "%s, Actual Free: " SIZE_FORMAT "%s.", diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index e38d9d4c02c..0072e9658f0 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -1124,10 +1124,6 @@ void ShenandoahConcurrentGC::op_strong_roots() { } void ShenandoahConcurrentGC::op_cleanup_early() { -#undef KELVIN_DEBUG -#ifdef KELVIN_DEBUG - log_info(gc)("ShenConcGC::op_cleanup_early() is recycling trash"); -#endif ShenandoahHeap::heap()->free_set()->recycle_trash(); } @@ -1256,9 +1252,6 @@ void ShenandoahConcurrentGC::op_final_roots() { } void ShenandoahConcurrentGC::op_cleanup_complete() { -#ifdef KELVIN_DEBUG - log_info(gc)("ShenConcGC::op_cleanup_complete() is invoking recycle_trash()"); -#endif ShenandoahHeap::heap()->free_set()->recycle_trash(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp index c0cb2210364..230bd9381fd 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp @@ -382,10 +382,6 @@ bool ShenandoahDegenGC::has_in_place_promotions(const ShenandoahHeap* heap) cons } void ShenandoahDegenGC::op_cleanup_early() { -#undef KELVIN_DEBUG -#ifdef KELVIN_DEBUG - log_info(gc)("ShenDegenGC::op_cleanup_early()"); -#endif ShenandoahHeap::heap()->recycle_trash(); } @@ -431,9 +427,6 @@ void ShenandoahDegenGC::op_update_roots() { void ShenandoahDegenGC::op_cleanup_complete() { ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_cleanup_complete); -#ifdef KELVIN_DEBUG - log_info(gc)("ShenDegenGC::op_cleanup_complete()"); -#endif ShenandoahHeap::heap()->recycle_trash(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 39118989116..e6f8908a34f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -274,11 +274,6 @@ inline void ShenandoahRegionPartitions::expand_interval_if_boundary_modified(She } inline void ShenandoahRegionPartitions::adjust_interval_for_recycled_old_region(ShenandoahHeapRegion* r) { -#undef KELVIN_DEBUG -#ifdef KELVIN_DEBUG - log_info(gc)("adjusting interval for recycled region " SIZE_FORMAT ", free: " SIZE_FORMAT ", is_trash: %s, is_old: %s", - r->index(), r->free(), r->is_trash()? "true": "false", r->is_old()? "true": "false"); -#endif assert(!r->is_trash() && (r->free() == _region_size_bytes), "Bad argument"); idx_t idx = (idx_t) r->index(); // Note that a recycled old trashed region may be in any one of the free set partitions according to the following scenarios: @@ -587,13 +582,6 @@ void ShenandoahRegionPartitions::assert_bounds(bool old_trash_not_in_bounds) { // Only in case 2 do we need to disable the assert checking, but it is difficult to distinguish case 2 from case 5, // so we do not assert bounds for case 2 or case 5. -#undef KELVIN_DEBUG -#ifdef KELVIN_DEBUG - if (old_trash_not_in_bounds && r->is_old() && r->is_trash()) { - log_info(gc)("assert_bounds() sees old trashed region " SIZE_FORMAT " residing in partition %d", i, int(partition)); - } -#endif - if (old_trash_not_in_bounds && (partition == ShenandoahFreeSetPartitionId::OldCollector) && r->is_old() && r->is_trash()) { // If Old trash has been identified but we have not yet rebuilt the freeset to acount for the trashed regions, // or if old trash has not yet been recycled, do not expect these trash regions to be within the OldCollector @@ -1304,15 +1292,9 @@ void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) { void ShenandoahFreeSet::recycle_trash() { // lock is not reentrable, check we don't have it shenandoah_assert_not_heaplocked(); -#ifdef KELVIN_DEBUG - int recycled_regions = 0; -#endif for (size_t i = 0; i < _heap->num_regions(); i++) { ShenandoahHeapRegion* r = _heap->get_region(i); if (r->is_trash()) { -#ifdef KELVIN_DEBUG - recycled_regions++; -#endif ShenandoahHeapLocker locker(_heap->lock()); try_recycle_trashed(r); #ifdef ASSERT @@ -1326,9 +1308,6 @@ void ShenandoahFreeSet::recycle_trash() { SpinPause(); // allow allocators to take the lock } #ifdef ASSERT -#ifdef KELVIN_DEBUG - log_info(gc)("WILL ROBINSON, WARNING IS NO LONGER REQUIRED AFTER TRYING TO RECYCLE %d!", recycled_regions); -#endif ShenandoahHeapLocker locker(_heap->lock()); _old_trash_not_in_bounds = false; #endif @@ -1351,10 +1330,6 @@ void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { bool transferred = gen_heap->generation_sizer()->transfer_to_old(1); if (!transferred) { log_warning(gc, free)("Forcing transfer of " SIZE_FORMAT " to old reserve.", idx); -#undef KELVIN_DEBUG -#ifdef KELVIN_DEBUG - log_info(gc)("flip_to_old_gc() region " SIZE_FORMAT, r->index()); -#endif gen_heap->generation_sizer()->force_transfer_to_old(1); } // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next, @@ -1432,9 +1407,6 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi // into the OldCollector partition. Deferring the decision allows reserve_regions() to more effectively pack the // OldCollector regions into high-address memory. We do not adjust capacities of old and young generations at this // time. At the end of finish_rebuild(), the capacities are adjusted based on the results of reserve_regions(). -#ifdef KELVIN_DEBUG - log_info(gc)("find_regions_with_alloc_capacity() is looking at old trash region " SIZE_FORMAT, idx); -#endif old_cset_regions++; } else { assert(region->is_young(), "Trashed region should be old or young"); @@ -1456,11 +1428,6 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi if (ac > PLAB::min_size() * HeapWordSize) { if (region->is_trash() || !region->is_old()) { // Young and old (possibly immediately) collected regions (trashed) are placed into the Mutator set -#ifdef KELVIN_DEBUG - if (region->is_old()) { - log_info(gc)("find_regions_with_alloc_capacity() moves old trash region " SIZE_FORMAT " to mutator partition", idx); - } -#endif _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator); if (idx < mutator_leftmost) { mutator_leftmost = idx; @@ -1790,10 +1757,6 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions // the collector set is at least to_reserve and the memory available for allocations within the old collector set // is at least to_reserve_old. void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old, size_t &old_region_count) { -#undef KELVIN_DEBUG -#ifdef KELVIN_DEBUG - size_t initial_old_region_count = old_region_count; -#endif for (size_t i = _heap->num_regions(); i > 0; i--) { size_t idx = i - 1; ShenandoahHeapRegion* r = _heap->get_region(idx); @@ -1819,43 +1782,8 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old // be collected in the near future. if (r->is_trash() || !r->is_affiliated()) { // OLD regions that have available memory are already in the old_collector free set. -#ifdef KELVIN_DEBUG - if (_old_trash_not_in_bounds) { - log_info(gc)("reserve_regions() moving %s %sregion " SIZE_FORMAT " with alloc capacity " SIZE_FORMAT - " from Mutator to OldCollector", - r->is_affiliated()? r->affiliation_name(): "unaffiliated", r->is_trash()? "trash ": "", idx, ac); - log_info(gc)("Before move: Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]," - " Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]", - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), - _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator), - _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector), - _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector)); - log_info(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]," - " Empty Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]", - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), - _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator), - _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector), - _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector)); - } -#endif _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, ShenandoahFreeSetPartitionId::OldCollector, ac); -#ifdef KELVIN_DEBUG - if (_old_trash_not_in_bounds) { - log_info(gc)("After move: Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]," - " Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]", - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), - _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator), - _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector), - _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector)); - log_info(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]," - " Empty Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]", - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), - _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator), - _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector), - _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector)); - } -#endif log_debug(gc)(" Shifting region " SIZE_FORMAT " from mutator_free to old_collector_free", idx); log_debug(gc)(" Shifted Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]," " Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]", @@ -1904,10 +1832,6 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old log_info(gc, free)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT, PROPERFMTARGS(to_reserve), PROPERFMTARGS(young_reserve)); } -#ifdef KELVIN_DEBUG - log_info(gc)("At end of reserve_regions(), old_region_count: " SIZE_FORMAT ", at entry, was: " SIZE_FORMAT, - old_region_count, initial_old_region_count); -#endif } void ShenandoahFreeSet::establish_old_collector_alloc_bias() { @@ -1949,13 +1873,8 @@ void ShenandoahFreeSet::log_status() { shenandoah_assert_heaplocked(); #ifdef ASSERT -#undef KELVIN_DEBUG -#ifdef KELVIN_DEBUG - if (true) { -#else // Dump of the FreeSet details is only enabled if assertions are enabled if (LogTarget(Debug, gc, free)::is_enabled()) { -#endif #define BUFFER_SIZE 80 size_t retired_old = 0; size_t retired_old_humongous = 0; @@ -1977,22 +1896,6 @@ void ShenandoahFreeSet::log_status() { for (uint i = 0; i < BUFFER_SIZE; i++) { buffer[i] = '\0'; } -#ifdef KELVIN_DEBUG - log_info(gc)("FreeSet map legend:" - " M:mutator_free C:collector_free O:old_collector_free" - " H:humongous ~:retired old _:retired young"); - log_info(gc)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocating from %s, " - " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "], " - "old collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocates from %s", - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), - _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator), - _partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::Mutator)? "left to right": "right to left", - _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector), - _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector), - _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector), - _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector), - _partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::OldCollector)? "left to right": "right to left"); -#endif log_debug(gc)("FreeSet map legend:" " M:mutator_free C:collector_free O:old_collector_free" " H:humongous ~:retired old _:retired young"); @@ -2012,9 +1915,6 @@ void ShenandoahFreeSet::log_status() { ShenandoahHeapRegion *r = _heap->get_region(i); uint idx = i % 64; if ((i != 0) && (idx == 0)) { -#ifdef KELVIN_DEBUG - log_info(gc)(" %6u: %s", i-64, buffer); -#endif log_debug(gc)(" %6u: %s", i-64, buffer); } if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, i)) { @@ -2060,9 +1960,6 @@ void ShenandoahFreeSet::log_status() { } else { remnant = 64; } -#ifdef KELVIN_DEBUG - log_info(gc)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer); -#endif log_debug(gc)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer); } #endif diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index 25d8615575c..1477c7a6a07 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -410,10 +410,6 @@ class ShenandoahFreeSet : public CHeapObj { #ifdef ASSERT // Advise FreeSet that old trash regions have not yet been accounted for in OldCollector partition bounds void advise_of_old_trash() { -#undef KELVIN_DEBUG -#ifdef KELVIN_DEBUG - log_info(gc)("DANGER WILL ROBINSON! old_trash is not in bounds!"); -#endif shenandoah_assert_heaplocked(); _old_trash_not_in_bounds = true; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp index f88570ce6a6..1518c3c76fe 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp @@ -1185,11 +1185,6 @@ void ShenandoahFullGC::phase5_epilog() { // A partial solution would be to remember how many objects are of tenure age following Full GC, but // this is probably suboptimal, because most of these objects will not reside in a region that will be // selected for the next evacuation phase. -#ifdef KELVIN_DEPRECATE - if (heap->mode()->is_generational()) { - ShenandoahGenerationalFullGC::compute_balances(); - } -#endif heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old); heap->clear_cancelled_gc(true /* clear oom handler */); @@ -1201,9 +1196,6 @@ void ShenandoahFullGC::phase5_epilog() { // We defer generation resizing actions until after cset regions have been recycled. We do this even following an // abbreviated cycle. if (heap->mode()->is_generational()) { -#ifdef KELVIN_DEPRECATE - ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set(); -#endif ShenandoahGenerationalFullGC::rebuild_remembered_set(heap); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index f535ab17969..6c471403eb7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -150,31 +150,15 @@ void ShenandoahGeneration::increase_allocated(size_t bytes) { } void ShenandoahGeneration::set_evacuation_reserve(size_t new_val) { -#undef KELVIN_RESERVES -#ifdef KELVIN_RESERVES - if (is_old()) { - log_info(gc)("set_evacuation_reserve(" SIZE_FORMAT ")", new_val); - } -#endif _evacuation_reserve = new_val; } size_t ShenandoahGeneration::get_evacuation_reserve() const { -#ifdef KELVIN_RESERVES - if (is_old()) { - log_info(gc)("get_evacuation_reserve() yields: " SIZE_FORMAT, _evacuation_reserve); - } -#endif return _evacuation_reserve; } void ShenandoahGeneration::augment_evacuation_reserve(size_t increment) { _evacuation_reserve += increment; -#ifdef KELVIN_RESERVES - if (is_old()) { - log_info(gc)("augment_evacuation_reserve(" SIZE_FORMAT ") yields: " SIZE_FORMAT, increment, _evacuation_reserve); - } -#endif } void ShenandoahGeneration::log_status(const char *msg) const { @@ -621,11 +605,6 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { if (heap->is_aging_cycle() && (r->age() + 1 == tenuring_threshold)) { if (r->garbage() >= old_garbage_threshold) { promo_potential += r->get_live_data_bytes(); -#undef KELVIN_RESERVES -#ifdef KELVIN_RESERVES - log_info(gc)("Adding " SIZE_FORMAT " to promo potential for region " SIZE_FORMAT " of age %u vs threshold %u", - r->get_live_data_bytes(), r->index(), r->age(), tenuring_threshold); -#endif } } } @@ -651,10 +630,6 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { // We rejected this promotable region from the collection set because we had no room to hold its copy. // Add this region to promo potential for next GC. promo_potential += region_live_data; -#ifdef KELVIN_RESERVES - log_info(gc)("Adding " SIZE_FORMAT " to promo potential for rejected region " SIZE_FORMAT " of age %u vs threshold %u", - region_live_data, region->index(), region->age(), tenuring_threshold); -#endif assert(!candidate_regions_for_promotion_by_copy[region->index()], "Shouldn't be selected"); } // We keep going even if one region is excluded from selection because we need to accumulate all eligible @@ -667,9 +642,6 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { heap->old_generation()->set_pad_for_promote_in_place(promote_in_place_pad); heap->old_generation()->set_promotion_potential(promo_potential); -#ifdef KELVIN_RESERVES - log_info(gc)("Establishing promo_potential as " SIZE_FORMAT, promo_potential); -#endif return old_consumed; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index b942bc2ea01..3f446ad891f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -202,10 +202,6 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion young_gen->decrement_affiliated_region_count(); // transfer_to_old() increases capacity of old and decreases capacity of young -#undef KELVIN_DEBUG -#ifdef KELVIN_DEBUG - log_info(gc)("promote_in_place region " SIZE_FORMAT " shifting " SIZE_FORMAT " of used bytes", region->index(), region_used); -#endif _heap->generation_sizer()->force_transfer_to_old(1); region->set_affiliation(OLD_GENERATION); @@ -253,10 +249,6 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio young_gen->decrease_humongous_waste(humongous_waste); young_gen->decrease_affiliated_region_count(spanned_regions); -#undef KELVIN_DEBUG -#ifdef KELVIN_DEBUG - log_info(gc)("promote humongous transfers to old " SIZE_FORMAT " regions", spanned_regions); -#endif // transfer_to_old() increases capacity of old and decreases capacity of young _heap->generation_sizer()->force_transfer_to_old(spanned_regions); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp index da11b83ac46..185da373762 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp @@ -144,32 +144,12 @@ void ShenandoahGenerationalFullGC::balance_generations_after_gc(ShenandoahHeap* size_t reserve_for_promo = 0; size_t reserve_for_mixed = 0; -#ifdef KELVIN_RESERVES - size_t old_reserve = reserve_for_promo + reserve_for_mixed; - log_info(gc)("after adjustments, old_reserve: " SIZE_FORMAT " from promo: " SIZE_FORMAT " and evac: " SIZE_FORMAT, - old_reserve, reserve_for_promo, reserve_for_mixed); - log_info(gc)(" young_reserve: " SIZE_FORMAT, young_reserve); - log_info(gc)(" balance: " SSIZE_FORMAT, region_balance); -#endif - // Reserves feed into rebuild calculations young_gen->set_evacuation_reserve(young_reserve); old_gen->set_evacuation_reserve(reserve_for_mixed); old_gen->set_promoted_reserve(reserve_for_promo); } -#ifdef KELVIN_DEPRECATE -// deprecate because rebuild does balance -void ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set() { - auto result = ShenandoahGenerationalHeap::heap()->balance_generations(); - LogTarget(Info, gc, ergo) lt; - if (lt.is_enabled()) { - LogStream ls(lt); - result.print_on("Full GC", &ls); - } -} -#endif - void ShenandoahGenerationalFullGC::log_live_in_old(ShenandoahHeap* heap) { LogTarget(Info, gc) lt; if (lt.is_enabled()) { @@ -217,17 +197,6 @@ void ShenandoahGenerationalFullGC::maybe_coalesce_and_fill_region(ShenandoahHeap } } -#ifdef KELVIN_DEPRECATE -void ShenandoahGenerationalFullGC::compute_balances() { - auto heap = ShenandoahGenerationalHeap::heap(); - - // In case this Full GC resulted from degeneration, clear the tally on anticipated promotion. - heap->old_generation()->set_promotion_potential(0); - // Invoke this in case we are able to transfer memory from OLD to YOUNG. - heap->compute_old_generation_balance(0, 0, 0); -} -#endif - ShenandoahPrepareForGenerationalCompactionObjectClosure::ShenandoahPrepareForGenerationalCompactionObjectClosure(PreservedMarks* preserved_marks, GrowableArray& empty_regions, ShenandoahHeapRegion* from_region, uint worker_id) : diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp index 2ed5d8fb304..4235b4b1612 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp @@ -55,15 +55,6 @@ class ShenandoahGenerationalFullGC { // free set. static void compute_balances(); -#ifdef KELVIN_DEPRECATE - // Rebuilding the free set may have resulted in regions being pulled in to the old generation - // evacuation reserve. For this reason, we must update the usage and capacity of the generations - // again. In the distant past, the free set did not know anything about generations, so we had - // a layer built above it to represent how much young/old memory was available. This layer is - // redundant and adds complexity. We would like to one day remove it. Until then, we must keep it - // synchronized with the free set's view of things. - static void balance_generations_after_rebuilding_free_set(); -#endif // Logs the number of live bytes marked in the old generation. This is _not_ the same // value used as the baseline for the old generation _after_ the full gc is complete. // The value reported in the logs does not include objects and regions that may be diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index c7b0cd97ac2..68b5017bebe 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -566,37 +566,6 @@ void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) { retire_plab(plab, thread); } -#ifdef KELVIN_DEPRECATE -ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalHeap::balance_generations() { - shenandoah_assert_heaplocked_or_safepoint(); - - ShenandoahOldGeneration* old_gen = old_generation(); - const ssize_t old_region_balance = old_gen->get_region_balance(); - old_gen->set_region_balance(0); - - if (old_region_balance > 0) { - const auto old_region_surplus = checked_cast(old_region_balance); - const bool success = generation_sizer()->transfer_to_young(old_region_surplus); - return TransferResult { - success, old_region_surplus, "young" - }; - } - - if (old_region_balance < 0) { - const auto old_region_deficit = checked_cast(-old_region_balance); - const bool success = generation_sizer()->transfer_to_old(old_region_deficit); - if (!success) { - old_gen->handle_failed_transfer(); - } - return TransferResult { - success, old_region_deficit, "old" - }; - } - - return TransferResult {true, 0, "none"}; -} -#endif - // Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations // and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to // mutator_xfer_limit, and any surplus is transferred to the young generation. @@ -635,12 +604,6 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x bound_on_old_reserve: MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve)); -#undef KELVIN_RESERVES -#ifdef KELVIN_RESERVES - log_info(gc)("proposed_max_old: " SIZE_FORMAT ", bound_on_old_reserve: " SIZE_FORMAT - ", old_available: " SIZE_FORMAT ", young_reserve: " SIZE_FORMAT ", mutator_xfer_limit: " SIZE_FORMAT, - proposed_max_old, bound_on_old_reserve, old_available, young_reserve, mutator_xfer_limit); -#endif if (young_reserve > young_available) { young_reserve = young_available; @@ -662,10 +625,6 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x const size_t max_old_reserve = proposed_max_old; const size_t mixed_candidate_live_memory = old_generation()->unprocessed_collection_candidates_live_memory(); const bool doing_mixed = (mixed_candidate_live_memory > 0); -#ifdef KELVIN_RESERVES - log_info(gc)("adjusted_max_old: " SIZE_FORMAT ", mixed_candidate_live_memory: " SIZE_FORMAT, - max_old_reserve, mixed_candidate_live_memory); -#endif if (doing_mixed) { // We want this much memory to be unfragmented in order to reliably evacuate old. This is conservative because we // may not evacuate the entirety of unprocessed candidates in a single mixed evacuation. @@ -676,10 +635,6 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x // We prefer to evacuate all of mixed into unfragmented memory, and will expand old in order to do so, unless // we already have too much fragmented available memory in old. reserve_for_mixed = max_evac_need; -#ifdef KELVIN_RESERVES - log_info(gc)("max_evac_need: " SIZE_FORMAT ", old_available: " SIZE_FORMAT ", old_fragmented_available: " SIZE_FORMAT, - max_evac_need, old_available, old_fragmented_available); -#endif if (reserve_for_mixed + reserve_for_promo > max_old_reserve) { // In this case, we'll allow old-evac to target some of the fragmented old memory. size_t excess_reserves = (reserve_for_mixed + reserve_for_promo) - max_old_reserve; @@ -696,11 +651,6 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x // Decide how much additional space we should reserve for promotions from young. We give priority to mixed evacations // over promotions. const size_t promo_load = old_generation()->get_promotion_potential(); -#ifdef KELVIN_RESERVES - log_info(gc)("promo_load fetched from old-gen is: " SIZE_FORMAT ", times PromoEvacWaste: " SIZE_FORMAT - ", available_for_promotions: " SIZE_FORMAT, - promo_load, (size_t) (promo_load * ShenandoahPromoEvacWaste), max_old_reserve - reserve_for_mixed); -#endif const bool doing_promotions = promo_load > 0; if (doing_promotions) { // We've already set aside all of the fragmented available memory within old-gen to represent old objects @@ -730,13 +680,6 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x size_t mutator_region_xfer_limit = mutator_xfer_limit / region_size_bytes; // align the mutator_xfer_limit on region size mutator_xfer_limit = mutator_region_xfer_limit * region_size_bytes; -#ifdef KELVIN_RESERVES - log_info(gc)("compute_old_generation_balance(), old_reserve: " SIZE_FORMAT " from promo: " SIZE_FORMAT " and evac: " SIZE_FORMAT, - old_reserve, reserve_for_promo, reserve_for_mixed); - log_info(gc)(" old_available: " SIZE_FORMAT, old_available); - log_info(gc)(" young_reserve: " SIZE_FORMAT, young_reserve); - log_info(gc)("mutator_xfer_limit: " SIZE_FORMAT, mutator_xfer_limit); -#endif if (old_available >= old_reserve) { // We are running a surplus, so the old region surplus can go to young const size_t old_surplus = old_available - old_reserve; @@ -744,18 +687,11 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions; old_region_surplus = MIN2(old_region_surplus, unaffiliated_old_regions); old_generation()->set_region_balance(checked_cast(old_region_surplus)); -#ifdef KELVIN_RESERVES - log_info(gc)("old_avail > old_reserve, relinquishing " SIZE_FORMAT " regions from old to young", old_region_surplus); -#endif } else if (old_available + mutator_xfer_limit >= old_reserve) { // Mutator's xfer limit is sufficient to satisfy our need: transfer all memory from there size_t old_deficit = old_reserve - old_available; old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes; old_generation()->set_region_balance(0 - checked_cast(old_region_deficit)); -#ifdef KELVIN_RESERVES - log_info(gc)("old_avail + mutator_xfer_limit > old_reserve, consuming " SIZE_FORMAT " regions from mutator_xfer_limit", - old_region_deficit); -#endif } else { // We'll try to xfer from both mutator excess and from young collector reserve size_t available_reserves = old_available + young_reserve + mutator_xfer_limit; @@ -770,10 +706,6 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x entitled_xfer = unaffiliated_young_memory; } old_entitlement = old_available + entitled_xfer; -#ifdef KELVIN_RESERVES - log_info(gc)("working with old_entitlement: " SIZE_FORMAT " based on unaffiliated_young_memory: " SIZE_FORMAT, - old_entitlement, unaffiliated_young_memory); -#endif if (old_entitlement < old_reserve) { // There's not enough memory to satisfy our desire. Scale back our old-gen intentions. size_t budget_overrun = old_reserve - old_entitlement;; @@ -801,13 +733,6 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x old_generation()->set_region_balance(0 - checked_cast(old_region_deficit)); -#ifdef KELVIN_RESERVES - log_info(gc)("after adjustments, old_reserve: " SIZE_FORMAT " from promo: " SIZE_FORMAT " and evac: " SIZE_FORMAT, - old_reserve, reserve_for_promo, reserve_for_mixed); - log_info(gc)(" old_available: " SIZE_FORMAT, old_available); - log_info(gc)(" young_reserve: " SIZE_FORMAT, young_reserve); - log_info(gc)(" balance: " SSIZE_FORMAT, 0 - checked_cast(old_region_deficit)); -#endif } assert(old_region_deficit == 0 || old_region_surplus == 0, "Only surplus or deficit, never both"); @@ -1189,15 +1114,6 @@ void ShenandoahGenerationalHeap::complete_degenerated_cycle() { // a more detailed explanation. old_generation()->transfer_pointers_from_satb(); } -#ifdef KELVIN_DEPRECATE - // We defer generation resizing actions until after cset regions have been recycled. - TransferResult result = balance_generations(); - LogTarget(Info, gc, ergo) lt; - if (lt.is_enabled()) { - LogStream ls(lt); - result.print_on("Degenerated GC", &ls); - } -#endif if (!old_generation()->is_parseable()) { ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_coalesce_and_fill); coalesce_and_fill_old_regions(false); @@ -1215,21 +1131,6 @@ void ShenandoahGenerationalHeap::complete_concurrent_cycle() { // throw off the heuristics. entry_global_coalesce_and_fill(); } - -#ifdef KELVIN_DEPRECATE - TransferResult result; - { - ShenandoahHeapLocker locker(lock()); - - result = balance_generations(); - } - - LogTarget(Info, gc, ergo) lt; - if (lt.is_enabled()) { - LogStream ls(lt); - result.print_on("Concurrent GC", &ls); - } -#endif } void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp index 42b2b83ba45..232dea6617a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp @@ -121,10 +121,6 @@ class ShenandoahGenerationalHeap : public ShenandoahHeap { // Computes the optimal size for the old generation, represented as a surplus or deficit of old regions void compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions, size_t young_cset_regions); -#ifdef KELVIN_DEPRECATE - // Transfers surplus old regions to young, or takes regions from young to satisfy old region deficit - TransferResult balance_generations(); -#endif // Balances generations, coalesces and fills old regions if necessary void complete_degenerated_cycle(); void complete_concurrent_cycle(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 3611abe10c0..84ba1faae4c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1949,10 +1949,6 @@ void ShenandoahHeap::rendezvous_threads() { } void ShenandoahHeap::recycle_trash() { -#undef KELVIN_DEBUG -#ifdef KELVIN_DEBUG - log_info(gc)("ShenHeap::recycle_trash() calls free_set()->recycle_trash()"); -#endif free_set()->recycle_trash(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp index 0d69ea95101..5a48b665da4 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp @@ -144,48 +144,11 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) { // entry_class_unloading(); // } - assert(!heap->is_concurrent_strong_root_in_progress(), "No evacuations during old gc."); // We must execute this vm operation if we completed final mark. We cannot return from here with weak roots in progress. // This is not a valid gc state for any young collections (or allocation failures) that interrupt the old collection. // This will reclaim immediate garbage. vmop_entry_final_roots() will also rebuild the free set. vmop_entry_final_roots(); - -#ifdef KELVIN_DEPRECATE - // Deprecating because vmop_entry_final_roots() does the free-set rebuild. - - // After concurrent old marking finishes, we may be able to reclaim immediate garbage from regions that are fully garbage. - // Furthermore, we may want to expand OLD in order to make room for the first mixed evacuation that immediately follows - // completion of OLD marking. This is why we rebuild free set here. - ShenandoahGenerationalHeap::TransferResult result; - { - // Though we did not choose a collection set above, we still may have freed up immediate garbage regions so - // proceed with rebuilding the free set. A second reason to rebuild free set now is to prepare for mixed evacuations - // which are likely to follow completion of old-gen marking. Preparation for mixed evacuations likely involves - // expansion of the old generation. - - // Old marking does not degenerate. It is always concurrent. In case of out-of-cycle memory allocation failures - // while old marking is ongoing, we will degenerate to a young GC, which may, if necessary upgrade to Full GC. - // If the young degenerated GC upgrades to full GC, concurrent old marking will be cancelled. - ShenandoahHeapLocker locker(heap->lock()); - size_t young_cset_regions, old_cset_regions; - size_t first_old, last_old, num_old; - size_t allocation_runway = heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0); - heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); - assert((young_cset_regions == 0) && (old_cset_regions == 0), "No ongoing evacuation when concurrent mark ends"); - heap->compute_old_generation_balance(allocation_runway, 0, 0); -#ifdef KELVIN_DEPRECATE - result = heap->balance_generations(); -#endif - heap->free_set()->finish_rebuild(0, 0, num_old); - } - - LogTarget(Info, gc, ergo) lt; - if (lt.is_enabled()) { - LogStream ls(lt); - result.print_on("Old Mark", &ls); - } -#endif return true; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp index bf5915c5182..41149ab8631 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp @@ -200,26 +200,16 @@ ShenandoahOldGeneration::ShenandoahOldGeneration(uint max_queues, size_t max_cap void ShenandoahOldGeneration::set_promoted_reserve(size_t new_val) { shenandoah_assert_heaplocked_or_safepoint(); -#undef KELVIN_RESERVES -#ifdef KELVIN_RESERVES - log_info(gc)("set_promoted_reserve(" SIZE_FORMAT ")", new_val); -#endif _promoted_reserve = new_val; } size_t ShenandoahOldGeneration::get_promoted_reserve() const { -#ifdef KELVIN_RESERVES - log_info(gc)("get_promoted_reserve() yields: " SIZE_FORMAT, _promoted_reserve); -#endif return _promoted_reserve; } void ShenandoahOldGeneration::augment_promoted_reserve(size_t increment) { shenandoah_assert_heaplocked_or_safepoint(); _promoted_reserve += increment; -#ifdef KELVIN_RESERVES - log_info(gc)("augment_promoted_reserve(" SIZE_FORMAT ") yields " SIZE_FORMAT, increment, _promoted_reserve); -#endif } void ShenandoahOldGeneration::reset_promoted_expended() { @@ -473,25 +463,6 @@ void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent ShenandoahHeapLocker locker(heap->lock()); _old_heuristics->prepare_for_old_collections(); } - -#ifdef KELVIN_DEPRECATE - // Kelvin is removing this code because vmop_entry_final_roots() does the rebuild. - - { - // Though we did not choose a collection set above, we still may have - // freed up immediate garbage regions so proceed with rebuilding the free set. - ShenandoahGCPhase phase(concurrent ? - ShenandoahPhaseTimings::final_rebuild_freeset : - ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset); - ShenandoahHeapLocker locker(heap->lock()); - size_t cset_young_regions, cset_old_regions; - size_t first_old, last_old, num_old; - heap->free_set()->prepare_to_rebuild(cset_young_regions, cset_old_regions, first_old, last_old, num_old); - // This is just old-gen completion. No future budgeting required here. The only reason to rebuild the freeset here - // is in case there was any immediate old garbage identified. - heap->free_set()->finish_rebuild(cset_young_regions, cset_old_regions, num_old); - } -#endif } const char* ShenandoahOldGeneration::state_name(State state) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index 2e0d088adfd..dff9f311eed 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -46,10 +46,6 @@ #include "runtime/orderAccess.hpp" #include "runtime/threads.hpp" #include "utilities/align.hpp" -#undef KELVIN_DEBUG -#ifdef KELVIN_DEBUG -#include "gc/shenandoah/shenandoahFreeSet.hpp" -#endif // Avoid name collision on verify_oop (defined in macroAssembler_arm.hpp) #ifdef verify_oop @@ -417,23 +413,6 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { } } -#ifdef KELVIN_DEBUG - static bool dump_details(ShenandoahGeneration* generation) { - log_info(gc)("Safepoint verification is unhappy with Generation %s", generation->name()); - ShenandoahHeap* heap = ShenandoahHeap::heap(); - for (unsigned int i = 0; i < heap->num_regions(); i++) { - ShenandoahHeapRegion* r = heap->get_region(i); - log_info(gc)("%s region %u BTE (" PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT ") is %s", - r->is_affiliated()? (r->is_young()? (r->is_humongous()? "Young humongous": "Young regular"): - (r->is_humongous()? "Old humongous": "Old regular")): "Free", - i, p2i(r->bottom()), p2i(r->top()), p2i(r->end()), - r->is_trash()? "trash": "not trash"); - } - heap->free_set()->log_status(); - return true; - } -#endif - static void log_usage(ShenandoahGeneration* generation, ShenandoahCalculateRegionStatsClosure& stats) { log_debug(gc)("Safepoint verification: %s verified usage: " SIZE_FORMAT "%s, recorded usage: " SIZE_FORMAT "%s", generation->name(), @@ -465,19 +444,11 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { label, generation->name(), generation->used_regions(), stats.regions()); size_t generation_capacity = generation->max_capacity(); -#ifdef KELVIN_DEBUG - guarantee(stats.non_trashed_span() <= generation_capacity, - "%s: generation (%s) size spanned by regions (" SIZE_FORMAT ") * region size (" PROPERFMT - ") must not exceed current capacity (" PROPERFMT ")", - dump_details(generation)? label: label, generation->name(), stats.regions(), PROPERFMTARGS(ShenandoahHeapRegion::region_size_bytes()), - PROPERFMTARGS(generation_capacity)); -#else guarantee(stats.non_trashed_span() <= generation_capacity, "%s: generation (%s) size spanned by regions (" SIZE_FORMAT ") * region size (" PROPERFMT ") must not exceed current capacity (" PROPERFMT ")", label, generation->name(), stats.regions(), PROPERFMTARGS(ShenandoahHeapRegion::region_size_bytes()), PROPERFMTARGS(generation_capacity)); -#endif size_t humongous_waste = generation->get_humongous_waste(); guarantee(stats.waste() == humongous_waste, "%s: generation (%s) humongous waste must be consistent: generation: " PROPERFMT ", regions: " PROPERFMT, From 9e148263d367a4ef31bd180f01ffdf1423c2db56 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 15 Jul 2024 14:22:51 +0000 Subject: [PATCH 53/64] Fix whitespace --- src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index dff9f311eed..35d844a6b04 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -380,7 +380,7 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure // span is the total memory affiliated with these stats (some of which is in use and other is available) size_t span() const { return _regions * ShenandoahHeapRegion::region_size_bytes(); } - size_t non_trashed_span() const { + size_t non_trashed_span() const { assert(_regions >= _trashed_regions, "sanity"); return (_regions - _trashed_regions) * ShenandoahHeapRegion::region_size_bytes(); } From 33eacea7a90523f55a8b25a533101fb06b33d323 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 15 Jul 2024 14:25:16 +0000 Subject: [PATCH 54/64] Remove unreferenced local variable --- src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index 0072e9658f0..7abb6acc1d1 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -344,7 +344,6 @@ void ShenandoahConcurrentGC::entry_final_roots() { // After concurrent old marking finishes and after an abbreviated cycle, we reclaim immediate garbage. // Further, we may also want to expand OLD in order to make room for anticipated promotions and/or for mixed // evacuations. Mixed evacuations are especially likely to following the end of OLD marking. - ShenandoahGenerationalHeap::TransferResult result; { ShenandoahHeapLocker locker(heap->lock()); ShenandoahGenerationalHeap* const gen_heap = ShenandoahGenerationalHeap::heap(); From 3e38c8a33c76ad9d8cc4be74ac35e8c4df9f2e97 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Tue, 16 Jul 2024 03:31:25 +0000 Subject: [PATCH 55/64] Remove declaration of unused variables --- .../gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp index 9c9053dec24..8800f8a99fb 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp @@ -129,9 +129,6 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti size_t old_evac_bytes = 0; size_t young_evac_bytes = 0; - // Keep track of how many regions we plan to transfer from young to old. - size_t shared_regions_consumed_by_old, shared_regions_consumed_by_young; - size_t max_total_cset = (max_young_cset + max_old_cset + (size_t) (shared_reserve_regions * region_size_bytes) / ShenandoahOldEvacWaste); size_t free_target = ((young_capacity + old_capacity) * ShenandoahMinFreeThreshold) / 100 + max_total_cset; @@ -170,7 +167,6 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti } // We already know: add_regardless || region_garbage > garbage_threshold if (new_cset <= max_old_cset) { - shared_regions_consumed_by_old += proposed_old_region_consumption; add_region = true; old_cur_cset = new_cset; cur_garbage = new_garbage; From d05885ae8abf2a9c8ab9d3a8a02c102071b131dc Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Tue, 16 Jul 2024 20:18:00 +0000 Subject: [PATCH 56/64] Use mixed evac rather than piggyback to describe old-gen evacuations --- .../shenandoahGenerationalHeuristics.cpp | 42 +++++++++---------- .../heuristics/shenandoahOldHeuristics.cpp | 26 ++++++------ .../heuristics/shenandoahOldHeuristics.hpp | 30 ++++++------- 3 files changed, 49 insertions(+), 49 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp index 11024bf09c8..e997a19da95 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp @@ -171,26 +171,26 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio if (doing_promote_in_place || (preselected_candidates > 0) || (immediate_percent <= ShenandoahImmediateThreshold)) { // Only young collections need to prime the collection set. - bool need_to_finalize_piggyback = false; + bool need_to_finalize_mixed = false; size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, fragmented_available,excess_fragmented_available; uint included_old_regions; if (_generation->is_young()) { - heap->old_generation()->heuristics()->initialize_piggyback_evacs(collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available); - need_to_finalize_piggyback =heap->old_generation()->heuristics()->prime_collection_set(collection_set, - evacuated_old_bytes, - collected_old_bytes, - included_old_regions, - old_evacuation_reserve, - old_evacuation_budget, - unfragmented_available, - fragmented_available, - excess_fragmented_available); + heap->old_generation()->heuristics()->initialize_mixed_evacs(collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available, + fragmented_available, excess_fragmented_available); + need_to_finalize_mixed = heap->old_generation()->heuristics()->prime_collection_set(collection_set, + evacuated_old_bytes, + collected_old_bytes, + included_old_regions, + old_evacuation_reserve, + old_evacuation_budget, + unfragmented_available, + fragmented_available, + excess_fragmented_available); } // Call the subclasses to add young-gen regions into the collection set. @@ -201,17 +201,17 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio // enough consolidated garbage to make effective use of young-gen evacuation reserve. If there is still // young-gen reserve available following selection of the young-gen collection set, see if we can use // this memory to expand the old-gen evacuation collection set. - need_to_finalize_piggyback |= + need_to_finalize_mixed |= heap->old_generation()->heuristics()->top_off_collection_set(collection_set, evacuated_old_bytes, collected_old_bytes, included_old_regions, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, fragmented_available, excess_fragmented_available); - if (need_to_finalize_piggyback) { - heap->old_generation()->heuristics()->finalize_piggyback_evacs(collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available); + if (need_to_finalize_mixed) { + heap->old_generation()->heuristics()->finalize_mixed_evacs(collection_set, + evacuated_old_bytes, collected_old_bytes, + included_old_regions, old_evacuation_reserve, + old_evacuation_budget, unfragmented_available); } } } diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp index e15eb63b54e..327b1eea42b 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp @@ -176,13 +176,13 @@ bool ShenandoahOldHeuristics::add_old_regions_to_cset(ShenandoahCollectionSet* c return true; } -void ShenandoahOldHeuristics::initialize_piggyback_evacs(ShenandoahCollectionSet* collection_set, - size_t &evacuated_old_bytes, size_t &collected_old_bytes, - uint &included_old_regions, size_t &old_evacuation_reserve, - size_t &old_evacuation_budget, - size_t &unfragmented_available, - size_t &fragmented_available, - size_t &excess_fragmented_available) { +void ShenandoahOldHeuristics::initialize_mixed_evacs(ShenandoahCollectionSet* collection_set, + size_t &evacuated_old_bytes, size_t &collected_old_bytes, + uint &included_old_regions, size_t &old_evacuation_reserve, + size_t &old_evacuation_budget, + size_t &unfragmented_available, + size_t &fragmented_available, + size_t &excess_fragmented_available) { included_old_regions = 0; evacuated_old_bytes = 0; collected_old_bytes = 0; @@ -231,18 +231,18 @@ void ShenandoahOldHeuristics::initialize_piggyback_evacs(ShenandoahCollectionSet } -bool ShenandoahOldHeuristics::finalize_piggyback_evacs(ShenandoahCollectionSet* collection_set, - const size_t evacuated_old_bytes, size_t collected_old_bytes, - const uint included_old_regions, const size_t old_evacuation_reserve, - const size_t old_evacuation_budget, - const size_t unfragmented_available) { +bool ShenandoahOldHeuristics::finalize_mixed_evacs(ShenandoahCollectionSet* collection_set, + const size_t evacuated_old_bytes, size_t collected_old_bytes, + const uint included_old_regions, const size_t old_evacuation_reserve, + const size_t old_evacuation_budget, + const size_t unfragmented_available) { if (_first_pinned_candidate != NOT_FOUND) { // Need to deal with pinned regions slide_pinned_regions_to_front(); } decrease_unprocessed_old_collection_candidates_live_memory(evacuated_old_bytes); if (included_old_regions > 0) { - log_info(gc)("Old-gen piggyback evac (" UINT32_FORMAT " regions, evacuating " SIZE_FORMAT "%s, reclaiming: " SIZE_FORMAT "%s)", + log_info(gc)("Old-gen mixed evac (" UINT32_FORMAT " regions, evacuating " SIZE_FORMAT "%s, reclaiming: " SIZE_FORMAT "%s)", included_old_regions, byte_size_in_proper_unit(evacuated_old_bytes), proper_unit_for_byte_size(evacuated_old_bytes), byte_size_in_proper_unit(collected_old_bytes), proper_unit_for_byte_size(collected_old_bytes)); diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp index a0dcbd007de..ed0b320194b 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp @@ -127,7 +127,7 @@ class ShenandoahOldHeuristics : public ShenandoahHeuristics { protected: void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, RegionData* data, size_t data_size, size_t free) override; -// Return true iff we need to finalize piggyback evacs +// Return true iff we need to finalize mixed evacs bool add_old_regions_to_cset(ShenandoahCollectionSet* collection_set, size_t &evacuated_old_bytes, size_t &collected_old_bytes, uint &included_old_regions, const size_t old_evacuation_reserve, @@ -142,15 +142,15 @@ bool add_old_regions_to_cset(ShenandoahCollectionSet* collection_set, // Prepare for evacuation of old-gen regions by capturing the mark results of a recently completed concurrent mark pass. void prepare_for_old_collections(); - void initialize_piggyback_evacs(ShenandoahCollectionSet* collection_set, - size_t &evacuated_old_bytes, size_t &collected_old_bytes, - uint &included_old_regions, size_t &old_evacuation_reserve, - size_t &old_evacuation_budget, - size_t &unfragmented_available, - size_t &fragmented_available, - size_t &excess_fragmented_available); + void initialize_mixed_evacs(ShenandoahCollectionSet* collection_set, + size_t &evacuated_old_bytes, size_t &collected_old_bytes, + uint &included_old_regions, size_t &old_evacuation_reserve, + size_t &old_evacuation_budget, + size_t &unfragmented_available, + size_t &fragmented_available, + size_t &excess_fragmented_available); - // Return true iff we need to finalize piggyback evacs + // Return true iff we need to finalize mixed evacs bool prime_collection_set(ShenandoahCollectionSet* set, size_t &evacuated_old_bytes, size_t &collected_old_bytes, uint &included_old_regions, size_t &old_evacuation_reserve, @@ -159,7 +159,7 @@ bool add_old_regions_to_cset(ShenandoahCollectionSet* collection_set, size_t &fragmented_available, size_t &excess_fragmented_available); - // Return true iff we need to finalize piggyback evacs + // Return true iff we need to finalize mixed evacs bool top_off_collection_set(ShenandoahCollectionSet* collection_set, size_t &evacuated_old_bytes, size_t &collected_old_bytes, uint &included_old_regions, size_t &old_evacuation_reserve, @@ -170,11 +170,11 @@ bool add_old_regions_to_cset(ShenandoahCollectionSet* collection_set, // Return true iff the collection set holds at least one unpinned mixed evacuation candidate - bool finalize_piggyback_evacs(ShenandoahCollectionSet* collection_set, - const size_t evacuated_old_bytes, size_t collected_old_bytes, - const uint included_old_regions, const size_t old_evacuation_reserve, - const size_t old_evacuation_budget, - const size_t unfragmented_available); + bool finalize_mixed_evacs(ShenandoahCollectionSet* collection_set, + const size_t evacuated_old_bytes, size_t collected_old_bytes, + const uint included_old_regions, const size_t old_evacuation_reserve, + const size_t old_evacuation_budget, + const size_t unfragmented_available); // How many old-collection candidates have not yet been processed? From 406d347bde8ef481440e5a8436eed750132b036a Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Tue, 16 Jul 2024 23:54:20 +0000 Subject: [PATCH 57/64] Simplify arguments by using instance variables in ShenandoahOldHeuristics --- .../shenandoahGenerationalHeuristics.cpp | 26 +- .../heuristics/shenandoahOldHeuristics.cpp | 216 +++++++---------- .../heuristics/shenandoahOldHeuristics.hpp | 53 ++-- .../test_shenandoahOldHeuristic.cpp | 228 ++---------------- 4 files changed, 132 insertions(+), 391 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp index e997a19da95..099cc6e9064 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp @@ -177,20 +177,7 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio uint included_old_regions; if (_generation->is_young()) { - heap->old_generation()->heuristics()->initialize_mixed_evacs(collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available); - need_to_finalize_mixed = heap->old_generation()->heuristics()->prime_collection_set(collection_set, - evacuated_old_bytes, - collected_old_bytes, - included_old_regions, - old_evacuation_reserve, - old_evacuation_budget, - unfragmented_available, - fragmented_available, - excess_fragmented_available); + need_to_finalize_mixed = heap->old_generation()->heuristics()->prime_collection_set(collection_set); } // Call the subclasses to add young-gen regions into the collection set. @@ -202,16 +189,9 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio // young-gen reserve available following selection of the young-gen collection set, see if we can use // this memory to expand the old-gen evacuation collection set. need_to_finalize_mixed |= - heap->old_generation()->heuristics()->top_off_collection_set(collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available); + heap->old_generation()->heuristics()->top_off_collection_set(); if (need_to_finalize_mixed) { - heap->old_generation()->heuristics()->finalize_mixed_evacs(collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available); + heap->old_generation()->heuristics()->finalize_mixed_evacs(); } } } diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp index 327b1eea42b..e1417996d06 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp @@ -93,13 +93,7 @@ bool ShenandoahOldHeuristics::all_candidates_are_pinned() { return true; } -bool ShenandoahOldHeuristics::add_old_regions_to_cset(ShenandoahCollectionSet* collection_set, - size_t &evacuated_old_bytes, size_t &collected_old_bytes, - uint &included_old_regions, const size_t old_evacuation_reserve, - const size_t old_evacuation_budget, - size_t &unfragmented_available, - size_t &fragmented_available, - size_t &excess_fragmented_available) { +bool ShenandoahOldHeuristics::add_old_regions_to_cset() { if (unprocessed_old_collection_candidates() == 0) { return false; } @@ -122,130 +116,70 @@ bool ShenandoahOldHeuristics::add_old_regions_to_cset(ShenandoahCollectionSet* c size_t live_data_for_evacuation = r->get_live_data_bytes(); size_t lost_available = r->free(); - if ((lost_available > 0) && (excess_fragmented_available > 0)) { - if (lost_available < excess_fragmented_available) { - excess_fragmented_available -= lost_available; + if ((lost_available > 0) && (_excess_fragmented_available > 0)) { + if (lost_available < _excess_fragmented_available) { + _excess_fragmented_available -= lost_available; lost_available = 0; } else { - lost_available -= excess_fragmented_available; - excess_fragmented_available = 0; + lost_available -= _excess_fragmented_available; + _excess_fragmented_available = 0; } } ssize_t fragmented_delta = 0; ssize_t unfragmented_delta = 0; - size_t scaled_loss = (size_t) ((double) lost_available / ShenandoahOldEvacWaste); - if ((lost_available > 0) && (fragmented_available > 0)) { - if (scaled_loss < fragmented_available) { - fragmented_available -= scaled_loss; + if ((lost_available > 0) && (_fragmented_available > 0)) { + if (scaled_loss < _fragmented_available) { + _fragmented_available -= scaled_loss; fragmented_delta = -scaled_loss; scaled_loss = 0; } else { - scaled_loss -= fragmented_available; - fragmented_delta = -fragmented_available; - fragmented_available = 0; + scaled_loss -= _fragmented_available; + fragmented_delta = -_fragmented_available; + _fragmented_available = 0; } } // Allocate replica from unfragmented memory if that exists size_t evacuation_need = live_data_for_evacuation; - if (evacuation_need < unfragmented_available) { - unfragmented_available -= evacuation_need;; + if (evacuation_need < _unfragmented_available) { + _unfragmented_available -= evacuation_need;; } else { - if (unfragmented_available > 0) { - evacuation_need -= unfragmented_available; - unfragmented_delta = -unfragmented_available; - unfragmented_available = 0; + if (_unfragmented_available > 0) { + evacuation_need -= _unfragmented_available; + unfragmented_delta = -_unfragmented_available; + _unfragmented_available = 0; } // Take the remaining allocation out of fragmented available - if (fragmented_available > evacuation_need) { - fragmented_available -= evacuation_need; + if (_fragmented_available > evacuation_need) { + _fragmented_available -= evacuation_need; } else { // We cannot add this region into the collection set. We're done. Undo the adjustments to available. - fragmented_available -= fragmented_delta; - unfragmented_available -= unfragmented_delta; + _fragmented_available -= fragmented_delta; + _unfragmented_available -= unfragmented_delta; break; } } - collection_set->add_region(r); - included_old_regions++; - evacuated_old_bytes += live_data_for_evacuation; - collected_old_bytes += r->garbage(); + _mixed_evac_cset->add_region(r); + _included_old_regions++; + _evacuated_old_bytes += live_data_for_evacuation; + _collected_old_bytes += r->garbage(); consume_old_collection_candidate(); } return true; } -void ShenandoahOldHeuristics::initialize_mixed_evacs(ShenandoahCollectionSet* collection_set, - size_t &evacuated_old_bytes, size_t &collected_old_bytes, - uint &included_old_regions, size_t &old_evacuation_reserve, - size_t &old_evacuation_budget, - size_t &unfragmented_available, - size_t &fragmented_available, - size_t &excess_fragmented_available) { - included_old_regions = 0; - evacuated_old_bytes = 0; - collected_old_bytes = 0; - - // If a region is put into the collection set, then this region's free (not yet used) bytes are no longer - // "available" to hold the results of other evacuations. This may cause a decrease in the remaining amount - // of memory that can still be evacuated. We address this by reducing the evacuation budget by the amount - // of live memory in that region and by the amount of unallocated memory in that region if the evacuation - // budget is constrained by availability of free memory. - old_evacuation_reserve = _old_generation->get_evacuation_reserve(); - old_evacuation_budget = (size_t) ((double) old_evacuation_reserve / ShenandoahOldEvacWaste); - - // fragmented_available is the amount of memory within partially consumed old regions that may be required to - // hold the results of old evacuations. If all of the memory required by the old evacuation reserve is available - // in unfragmented regions (unaffiliated old regions), then fragmented_available is zero because we do not need - // to evacuate into the existing partially consumed old regions. - - // if fragmented_available is non-zero, excess_fragmented_available represents the amount of fragmented memory - // that is available within old, but is not required to hold the resuilts of old evacuation. As old-gen regions - // are added into the collection set, their free memory is subtracted from excess_fragmented_available until the - // excess is exhausted. For old-gen regions subsequently added to the collection set, their free memory is - // subtracted from fragmented_available and from the old_evacuation_budget (since the budget decreases when this - // fragmented_available memory decreases). After fragmented_available has been exhausted, any further old regions - // selected for the cset do not further decrease the old_evacuation_budget because all further evacuation is targeted - // to unfragmented regions. - - size_t unaffiliated_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes(); - if (unaffiliated_available > old_evacuation_reserve) { - unfragmented_available = old_evacuation_budget; - fragmented_available = 0; - excess_fragmented_available = 0; - } else { - assert(_old_generation->available() >= old_evacuation_reserve, "Cannot reserve more than is available"); - size_t affiliated_available = _old_generation->available() - unaffiliated_available; - assert(affiliated_available + unaffiliated_available >= old_evacuation_reserve, "Budgets do not add up"); - if (affiliated_available + unaffiliated_available > old_evacuation_reserve) { - excess_fragmented_available = (affiliated_available + unaffiliated_available) - old_evacuation_reserve; - affiliated_available -= excess_fragmented_available; - } - fragmented_available = (size_t) ((double) affiliated_available / ShenandoahOldEvacWaste); - unfragmented_available = (size_t) ((double) unaffiliated_available / ShenandoahOldEvacWaste); - } - log_info(gc)("Choose old regions for mixed collection: old evacuation budget: " SIZE_FORMAT "%s, candidates: %u", - byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget), - unprocessed_old_collection_candidates()); -} - - -bool ShenandoahOldHeuristics::finalize_mixed_evacs(ShenandoahCollectionSet* collection_set, - const size_t evacuated_old_bytes, size_t collected_old_bytes, - const uint included_old_regions, const size_t old_evacuation_reserve, - const size_t old_evacuation_budget, - const size_t unfragmented_available) { +bool ShenandoahOldHeuristics::finalize_mixed_evacs() { if (_first_pinned_candidate != NOT_FOUND) { // Need to deal with pinned regions slide_pinned_regions_to_front(); } - decrease_unprocessed_old_collection_candidates_live_memory(evacuated_old_bytes); - if (included_old_regions > 0) { - log_info(gc)("Old-gen mixed evac (" UINT32_FORMAT " regions, evacuating " SIZE_FORMAT "%s, reclaiming: " SIZE_FORMAT "%s)", - included_old_regions, - byte_size_in_proper_unit(evacuated_old_bytes), proper_unit_for_byte_size(evacuated_old_bytes), - byte_size_in_proper_unit(collected_old_bytes), proper_unit_for_byte_size(collected_old_bytes)); + decrease_unprocessed_old_collection_candidates_live_memory(_evacuated_old_bytes); + if (_included_old_regions > 0) { + log_info(gc)("Old-gen mixed evac (" SIZE_FORMAT " regions, evacuating " SIZE_FORMAT "%s, reclaiming: " SIZE_FORMAT "%s)", + _included_old_regions, + byte_size_in_proper_unit(_evacuated_old_bytes), proper_unit_for_byte_size(_evacuated_old_bytes), + byte_size_in_proper_unit(_collected_old_bytes), proper_unit_for_byte_size(_collected_old_bytes)); } if (unprocessed_old_collection_candidates() == 0) { @@ -254,7 +188,7 @@ bool ShenandoahOldHeuristics::finalize_mixed_evacs(ShenandoahCollectionSet* coll clear_triggers(); _old_generation->complete_mixed_evacuations(); - } else if (included_old_regions == 0) { + } else if (_included_old_regions == 0) { // We have candidates, but none were included for evacuation - are they all pinned? // or did we just not have enough room for any of them in this collection set? // We don't want a region with a stuck pin to prevent subsequent old collections, so @@ -266,41 +200,71 @@ bool ShenandoahOldHeuristics::finalize_mixed_evacs(ShenandoahCollectionSet* coll } else { log_info(gc)("No regions selected for mixed collection. " "Old evacuation budget: " PROPERFMT ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT, - PROPERFMTARGS(old_evacuation_reserve), + PROPERFMTARGS(_old_evacuation_reserve), _next_old_collection_candidate, _last_old_collection_candidate); } } - - return (included_old_regions > 0); + return (_included_old_regions > 0); } -bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set, - size_t &evacuated_old_bytes, size_t &collected_old_bytes, - uint &included_old_regions, size_t &old_evacuation_reserve, - size_t &old_evacuation_budget, - size_t &unfragmented_available, - size_t &fragmented_available, - size_t &excess_fragmented_available) { - return add_old_regions_to_cset(collection_set, evacuated_old_bytes, collected_old_bytes, included_old_regions, - old_evacuation_reserve, old_evacuation_budget, unfragmented_available, fragmented_available, - excess_fragmented_available); +bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) { + _mixed_evac_cset = collection_set; + _included_old_regions = 0; + _evacuated_old_bytes = 0; + _collected_old_bytes = 0; + + // If a region is put into the collection set, then this region's free (not yet used) bytes are no longer + // "available" to hold the results of other evacuations. This may cause a decrease in the remaining amount + // of memory that can still be evacuated. We address this by reducing the evacuation budget by the amount + // of live memory in that region and by the amount of unallocated memory in that region if the evacuation + // budget is constrained by availability of free memory. + _old_evacuation_reserve = _old_generation->get_evacuation_reserve(); + _old_evacuation_budget = (size_t) ((double) _old_evacuation_reserve / ShenandoahOldEvacWaste); + + // fragmented_available is the amount of memory within partially consumed old regions that may be required to + // hold the results of old evacuations. If all of the memory required by the old evacuation reserve is available + // in unfragmented regions (unaffiliated old regions), then fragmented_available is zero because we do not need + // to evacuate into the existing partially consumed old regions. + + // if fragmented_available is non-zero, excess_fragmented_available represents the amount of fragmented memory + // that is available within old, but is not required to hold the resuilts of old evacuation. As old-gen regions + // are added into the collection set, their free memory is subtracted from excess_fragmented_available until the + // excess is exhausted. For old-gen regions subsequently added to the collection set, their free memory is + // subtracted from fragmented_available and from the old_evacuation_budget (since the budget decreases when this + // fragmented_available memory decreases). After fragmented_available has been exhausted, any further old regions + // selected for the cset do not further decrease the old_evacuation_budget because all further evacuation is targeted + // to unfragmented regions. + + size_t unaffiliated_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes(); + if (unaffiliated_available > _old_evacuation_reserve) { + _unfragmented_available = _old_evacuation_budget; + _fragmented_available = 0; + _excess_fragmented_available = 0; + } else { + assert(_old_generation->available() >= _old_evacuation_reserve, "Cannot reserve more than is available"); + size_t affiliated_available = _old_generation->available() - unaffiliated_available; + assert(affiliated_available + unaffiliated_available >= _old_evacuation_reserve, "Budgets do not add up"); + if (affiliated_available + unaffiliated_available > _old_evacuation_reserve) { + _excess_fragmented_available = (affiliated_available + unaffiliated_available) - _old_evacuation_reserve; + affiliated_available -= _excess_fragmented_available; + } + _fragmented_available = (size_t) ((double) affiliated_available / ShenandoahOldEvacWaste); + _unfragmented_available = (size_t) ((double) unaffiliated_available / ShenandoahOldEvacWaste); + } + log_info(gc)("Choose old regions for mixed collection: old evacuation budget: " SIZE_FORMAT "%s, candidates: %u", + byte_size_in_proper_unit(_old_evacuation_budget), proper_unit_for_byte_size(_old_evacuation_budget), + unprocessed_old_collection_candidates()); + return add_old_regions_to_cset(); } -bool ShenandoahOldHeuristics::top_off_collection_set(ShenandoahCollectionSet* collection_set, - size_t &evacuated_old_bytes, size_t &collected_old_bytes, - uint &included_old_regions, size_t &old_evacuation_reserve, - size_t &old_evacuation_budget, - size_t &unfragmented_available, - size_t &fragmented_available, - size_t &excess_fragmented_available) { +bool ShenandoahOldHeuristics::top_off_collection_set() { if (unprocessed_old_collection_candidates() == 0) { return false; } else { ShenandoahYoungGeneration* young_generation = _heap->young_generation(); size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions(); - size_t max_young_cset = young_generation->get_evacuation_reserve(); - size_t planned_young_evac = collection_set->get_young_bytes_reserved_for_evacuation(); + size_t planned_young_evac = _mixed_evac_cset->get_young_bytes_reserved_for_evacuation(); size_t consumed_from_young_cset = (size_t) (planned_young_evac * ShenandoahEvacWaste); size_t available_to_loan_from_young_reserve = ((consumed_from_young_cset >= max_young_cset)? 0: max_young_cset - consumed_from_young_cset); @@ -317,15 +281,13 @@ bool ShenandoahOldHeuristics::top_off_collection_set(ShenandoahCollectionSet* co _heap->generation_sizer()->force_transfer_to_old(regions_for_old_expansion); size_t budget_supplement = region_size_bytes * regions_for_old_expansion; size_t supplement_after_waste = (size_t) (((double) budget_supplement) / ShenandoahOldEvacWaste); - old_evacuation_budget += supplement_after_waste; - unfragmented_available += supplement_after_waste; + _old_evacuation_budget += supplement_after_waste; + _unfragmented_available += supplement_after_waste; _old_generation->augment_evacuation_reserve(budget_supplement); young_generation->set_evacuation_reserve(max_young_cset - budget_supplement); - return add_old_regions_to_cset(collection_set, evacuated_old_bytes, collected_old_bytes, included_old_regions, - old_evacuation_reserve, old_evacuation_budget, unfragmented_available, fragmented_available, - excess_fragmented_available); + return add_old_regions_to_cset(); } } } diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp index ed0b320194b..0e3e8e3b3d0 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp @@ -103,6 +103,17 @@ class ShenandoahOldHeuristics : public ShenandoahHeuristics { size_t _fragmentation_first_old_region; size_t _fragmentation_last_old_region; + // State variables involved in construction of a mixed-evacuation collection set + ShenandoahCollectionSet* _mixed_evac_cset; + size_t _evacuated_old_bytes; + size_t _collected_old_bytes; + size_t _included_old_regions; + size_t _old_evacuation_reserve; + size_t _old_evacuation_budget; + size_t _unfragmented_available; + size_t _fragmented_available; + size_t _excess_fragmented_available; + // Compare by live is used to prioritize compaction of old-gen regions. With old-gen compaction, the goal is // to tightly pack long-lived objects into available regions. In most cases, there has not been an accumulation // of garbage within old-gen regions. The more likely opportunity will be to combine multiple sparsely populated @@ -127,14 +138,8 @@ class ShenandoahOldHeuristics : public ShenandoahHeuristics { protected: void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, RegionData* data, size_t data_size, size_t free) override; -// Return true iff we need to finalize mixed evacs -bool add_old_regions_to_cset(ShenandoahCollectionSet* collection_set, - size_t &evacuated_old_bytes, size_t &collected_old_bytes, - uint &included_old_regions, const size_t old_evacuation_reserve, - const size_t old_evacuation_budget, - size_t &unfragmented_available, - size_t &fragmented_available, - size_t &excess_fragmented_available); + // Return true iff we need to finalize mixed evacs + bool add_old_regions_to_cset(); public: explicit ShenandoahOldHeuristics(ShenandoahOldGeneration* generation, ShenandoahGenerationalHeap* gen_heap); @@ -142,40 +147,14 @@ bool add_old_regions_to_cset(ShenandoahCollectionSet* collection_set, // Prepare for evacuation of old-gen regions by capturing the mark results of a recently completed concurrent mark pass. void prepare_for_old_collections(); - void initialize_mixed_evacs(ShenandoahCollectionSet* collection_set, - size_t &evacuated_old_bytes, size_t &collected_old_bytes, - uint &included_old_regions, size_t &old_evacuation_reserve, - size_t &old_evacuation_budget, - size_t &unfragmented_available, - size_t &fragmented_available, - size_t &excess_fragmented_available); - // Return true iff we need to finalize mixed evacs - bool prime_collection_set(ShenandoahCollectionSet* set, - size_t &evacuated_old_bytes, size_t &collected_old_bytes, - uint &included_old_regions, size_t &old_evacuation_reserve, - size_t &old_evacuation_budget, - size_t &unfragmented_available, - size_t &fragmented_available, - size_t &excess_fragmented_available); + bool prime_collection_set(ShenandoahCollectionSet* collection_set); // Return true iff we need to finalize mixed evacs - bool top_off_collection_set(ShenandoahCollectionSet* collection_set, - size_t &evacuated_old_bytes, size_t &collected_old_bytes, - uint &included_old_regions, size_t &old_evacuation_reserve, - size_t &old_evacuation_budget, - size_t &unfragmented_available, - size_t &fragmented_available, - size_t &excess_fragmented_available); - + bool top_off_collection_set(); // Return true iff the collection set holds at least one unpinned mixed evacuation candidate - bool finalize_mixed_evacs(ShenandoahCollectionSet* collection_set, - const size_t evacuated_old_bytes, size_t collected_old_bytes, - const uint included_old_regions, const size_t old_evacuation_reserve, - const size_t old_evacuation_budget, - const size_t unfragmented_available); - + bool finalize_mixed_evacs(); // How many old-collection candidates have not yet been processed? uint unprocessed_old_collection_candidates() const; diff --git a/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp b/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp index b58a3768aad..9f3fccea25e 100644 --- a/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp +++ b/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp @@ -198,25 +198,8 @@ TEST_VM_F(ShenandoahOldHeuristicTest, prime_one_old_region) { size_t garbage = make_garbage_above_collection_threshold(10); _heuristics->prepare_for_old_collections(); - - size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, - fragmented_available,excess_fragmented_available; - uint included_old_regions; - - _heuristics->initialize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available); - if (_heuristics->prime_collection_set(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available)) { - _heuristics->finalize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available); + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); } EXPECT_TRUE(collection_set_is(10UL)); EXPECT_EQ(garbage, _collection_set->get_old_garbage()); @@ -229,24 +212,8 @@ TEST_VM_F(ShenandoahOldHeuristicTest, prime_many_old_regions) { size_t g1 = make_garbage_above_collection_threshold(100); size_t g2 = make_garbage_above_collection_threshold(101); _heuristics->prepare_for_old_collections(); - - size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, - fragmented_available,excess_fragmented_available; - uint included_old_regions; - _heuristics->initialize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available); - if (_heuristics->prime_collection_set(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available)) { - _heuristics->finalize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available); + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); } EXPECT_TRUE(collection_set_is(100UL, 101UL)); EXPECT_EQ(g1 + g2, _collection_set->get_old_garbage()); @@ -258,24 +225,8 @@ TEST_VM_F(ShenandoahOldHeuristicTest, require_multiple_mixed_evacuations) { size_t garbage = create_too_much_garbage_for_one_mixed_evacuation(); _heuristics->prepare_for_old_collections(); - - size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, - fragmented_available,excess_fragmented_available; - uint included_old_regions; - _heuristics->initialize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available); - if (_heuristics->prime_collection_set(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available)) { - _heuristics->finalize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available); + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); } EXPECT_LT(_collection_set->get_old_garbage(), garbage); EXPECT_GT(_heuristics->unprocessed_old_collection_candidates(), 0UL); @@ -297,23 +248,8 @@ TEST_VM_F(ShenandoahOldHeuristicTest, skip_pinned_regions) { ASSERT_EQ(3UL, _heuristics->unprocessed_old_collection_candidates()); // Here the region is still pinned, so it cannot be added to the collection set. - size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, - fragmented_available,excess_fragmented_available; - uint included_old_regions; - _heuristics->initialize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available); - if (_heuristics->prime_collection_set(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available)) { - _heuristics->finalize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available); + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); } // The two unpinned regions should be added to the collection set and the pinned @@ -328,20 +264,8 @@ TEST_VM_F(ShenandoahOldHeuristicTest, skip_pinned_regions) { make_unpinned(1); _collection_set->clear(); - _heuristics->initialize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available); - if (_heuristics->prime_collection_set(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available)) { - _heuristics->finalize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available); + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); } EXPECT_EQ(_collection_set->get_old_garbage(), g2); EXPECT_TRUE(collection_set_is(1UL)); @@ -358,24 +282,8 @@ TEST_VM_F(ShenandoahOldHeuristicTest, pinned_region_is_first) { make_pinned(0); _heuristics->prepare_for_old_collections(); - - size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, - fragmented_available,excess_fragmented_available; - uint included_old_regions; - _heuristics->initialize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available); - if (_heuristics->prime_collection_set(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available)) { - _heuristics->finalize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available); + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); } EXPECT_TRUE(collection_set_is(1UL, 2UL)); @@ -383,21 +291,8 @@ TEST_VM_F(ShenandoahOldHeuristicTest, pinned_region_is_first) { make_unpinned(0); _collection_set->clear(); - - _heuristics->initialize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available); - if (_heuristics->prime_collection_set(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available)) { - _heuristics->finalize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available); + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); } EXPECT_TRUE(collection_set_is(0UL)); @@ -414,24 +309,8 @@ TEST_VM_F(ShenandoahOldHeuristicTest, pinned_region_is_last) { make_pinned(2); _heuristics->prepare_for_old_collections(); - - size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, - fragmented_available,excess_fragmented_available; - uint included_old_regions; - _heuristics->initialize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available); - if (_heuristics->prime_collection_set(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available)) { - _heuristics->finalize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available); + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); } EXPECT_TRUE(collection_set_is(0UL, 1UL)); EXPECT_EQ(_collection_set->get_old_garbage(), g1 + g2); @@ -439,21 +318,8 @@ TEST_VM_F(ShenandoahOldHeuristicTest, pinned_region_is_last) { make_unpinned(2); _collection_set->clear(); - - _heuristics->initialize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available); - if (_heuristics->prime_collection_set(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available)) { - _heuristics->finalize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available); + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); } EXPECT_TRUE(collection_set_is(2UL)); EXPECT_EQ(_collection_set->get_old_garbage(), g3); @@ -471,24 +337,8 @@ TEST_VM_F(ShenandoahOldHeuristicTest, unpinned_region_is_middle) { make_pinned(0); make_pinned(2); _heuristics->prepare_for_old_collections(); - - size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, - fragmented_available,excess_fragmented_available; - uint included_old_regions; - _heuristics->initialize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available); - if (_heuristics->prime_collection_set(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available)) { - _heuristics->finalize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available); + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); } EXPECT_TRUE(collection_set_is(1UL)); EXPECT_EQ(_collection_set->get_old_garbage(), g2); @@ -497,21 +347,8 @@ TEST_VM_F(ShenandoahOldHeuristicTest, unpinned_region_is_middle) { make_unpinned(0); make_unpinned(2); _collection_set->clear(); - - _heuristics->initialize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available); - if (_heuristics->prime_collection_set(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available)) { - _heuristics->finalize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available); + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); } EXPECT_TRUE(collection_set_is(0UL, 2UL)); EXPECT_EQ(_collection_set->get_old_garbage(), g1 + g3); @@ -529,25 +366,8 @@ TEST_VM_F(ShenandoahOldHeuristicTest, all_candidates_are_pinned) { make_pinned(1); make_pinned(2); _heuristics->prepare_for_old_collections(); - - - size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, - fragmented_available,excess_fragmented_available; - uint included_old_regions; - _heuristics->initialize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available); - if (_heuristics->prime_collection_set(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available, - fragmented_available, excess_fragmented_available)) { - _heuristics->finalize_piggyback_evacs(_collection_set, - evacuated_old_bytes, collected_old_bytes, - included_old_regions, old_evacuation_reserve, - old_evacuation_budget, unfragmented_available); + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); } // In the case when all candidates are pinned, we want to abandon // this set of mixed collection candidates so that another old collection From ee2ab01dcc37db71c959d30c836324b3f29fd24a Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 17 Jul 2024 00:32:04 +0000 Subject: [PATCH 58/64] Remove unreferenced variables --- .../heuristics/shenandoahGenerationalHeuristics.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp index 099cc6e9064..6ba9c67cead 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp @@ -172,10 +172,6 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio // Only young collections need to prime the collection set. bool need_to_finalize_mixed = false; - size_t evacuated_old_bytes, collected_old_bytes, old_evacuation_reserve, old_evacuation_budget, unfragmented_available, - fragmented_available,excess_fragmented_available; - uint included_old_regions; - if (_generation->is_young()) { need_to_finalize_mixed = heap->old_generation()->heuristics()->prime_collection_set(collection_set); } From 21a5d328e35991bd8193900fb5e87bb344946fd8 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Thu, 18 Jul 2024 17:55:15 +0000 Subject: [PATCH 59/64] Improve comment --- .../gc/shenandoah/shenandoah_globals.hpp | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp index 60c20ba97d1..420cb8954d8 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp @@ -380,16 +380,15 @@ "runs out of memory too early.") \ \ product(uintx, ShenandoahOldEvacRatioPercent, 75, EXPERIMENTAL, \ - "The maximum percentage by which the young evacuation reserve " \ - "can be adjusted in order to make room for old-generation " \ - "evacuations. With the default setting, given a total " \ - "evacuation budget of X, the amount of memory initially " \ - "dedicated to holding objects evacuated to old generation is " \ - "75%. This limits both the promotion of aged young regions and " \ + "The maximum percent of memory that can be reserved for " \ + "evacuation into old generation. With the default setting, " \ + "given a total evacuation budget of X, the amount of memory " \ + "reserved to hold objects evacuated to old generation is 0.75x." \ + "This limits both the promotion of aged young regions and " \ "the compaction of existing old regions. It does not restrict " \ "the collector from copying more objects into old-generation " \ "memory if the young-generation collection set does not consume " \ - "all of the memory originally set aside for young-generation " \ + "all of the memory originally reserved for young-generation " \ "evacuation. It also does not restrict the amount of memory " \ "that can be promoted in place, by simply changing the " \ "affiliation of the region from young to old. If there is an " \ @@ -397,11 +396,12 @@ "evacuation effort, roughly quadrupling the amount of memory " \ "normally evacuated during young evacuations (so that old " \ "evacuates three times as much as young, and young evacuates its "\ - "normal amount. If free memory is in short supply, this may " \ + "normal amount). If free memory is in short supply, this may " \ "result in paring back both young-gen and old-gen evacuations, " \ "such that the fraction of old is 75% (in the default " \ - "configuration) of the total available evacuation reserve and " \ - "young evacuates one fourth of its normal amount. " \ + "configuration) of the total available evacuation reserve, " \ + "with young evacuating one fourth of its normal amount, " \ + "and old evacuating three times as much as young evacuates. " \ "Setting a larger value allows for quicker promotion and a " \ "smaller number of mixed evacuations to process the entire list " \ "of old-gen collection candidates at the cost of increased " \ From 02ea5660e7faa8471de646985b464fc94918ccde Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 26 Jul 2024 19:20:18 +0000 Subject: [PATCH 60/64] Better comments as requested by code review --- .../heuristics/shenandoahOldHeuristics.cpp | 1 - .../heuristics/shenandoahOldHeuristics.hpp | 24 +++++++++++++++---- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp index e1417996d06..e97b710d43c 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp @@ -186,7 +186,6 @@ bool ShenandoahOldHeuristics::finalize_mixed_evacs() { // We have added the last of our collection candidates to a mixed collection. // Any triggers that occurred during mixed evacuations may no longer be valid. They can retrigger if appropriate. clear_triggers(); - _old_generation->complete_mixed_evacuations(); } else if (_included_old_regions == 0) { // We have candidates, but none were included for evacuation - are they all pinned? diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp index 0e3e8e3b3d0..489a391ffbd 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp @@ -103,7 +103,10 @@ class ShenandoahOldHeuristics : public ShenandoahHeuristics { size_t _fragmentation_first_old_region; size_t _fragmentation_last_old_region; - // State variables involved in construction of a mixed-evacuation collection set + // State variables involved in construction of a mixed-evacuation collection set. These variables are initialized + // when client code invokes prime_collection_set(). They are consulted, and sometimes modified, when client code + // calls top_off_collection_set() to possibly expand the number old-gen regions in a mixed evacuation cset, and by + // finalize_mixed_evacs(), which prepares the way for mixed evacuations to begin. ShenandoahCollectionSet* _mixed_evac_cset; size_t _evacuated_old_bytes; size_t _collected_old_bytes; @@ -138,7 +141,11 @@ class ShenandoahOldHeuristics : public ShenandoahHeuristics { protected: void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, RegionData* data, size_t data_size, size_t free) override; - // Return true iff we need to finalize mixed evacs + // This internal helper route adds as many mixed evacuation candidate regions as fit within the old-gen evacuation budget + // to the collection set. This may be called twice to prepare for any given mixed evacuation cycle, the first time with + // a conservative old evacuation budget, and the second time with a larger more aggressive old evacuation budget. Returns + // true iff we need to finalize mixed evacs. (If no regions are added to the collection set, there is no need to finalize + // mixed evacuations.) bool add_old_regions_to_cset(); public: @@ -147,13 +154,20 @@ class ShenandoahOldHeuristics : public ShenandoahHeuristics { // Prepare for evacuation of old-gen regions by capturing the mark results of a recently completed concurrent mark pass. void prepare_for_old_collections(); - // Return true iff we need to finalize mixed evacs + // Initialize instance variables to support the preparation of a mixed-evacuation collection set. Adds as many + // old candidate regions into the collection set as can fit within the iniital conservative old evacuation budget. + // Returns true iff we need to finalize mixed evacs. bool prime_collection_set(ShenandoahCollectionSet* collection_set); - // Return true iff we need to finalize mixed evacs + // If young evacuation did not consume all of its available evacuation reserve, add as many additional mixed- + // evacuation candidate regions into the collection set as will fit within this excess repurposed reserved. + // Returns true iff we need to finalize mixed evacs. bool top_off_collection_set(); - // Return true iff the collection set holds at least one unpinned mixed evacuation candidate + // Having added all eligible mixed-evacuation candidates to the collection set, this function updates the total count + // of how much old-gen memory remains to be evacuated and adjusts the representation of old-gen regions that remain to + // be evacuated, giving special attention to regions that are currently pinned. It outputs relevant log messages and + // returns true iff the collection set holds at least one unpinned mixed evacuation candidate. bool finalize_mixed_evacs(); // How many old-collection candidates have not yet been processed? From 105883798abe7153cd84ec86cf09b9aa7cfa67c5 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 26 Jul 2024 22:59:42 +0000 Subject: [PATCH 61/64] Simplify invocations of freeset rebuild when possible Most invocations do not need to resize generations between prepare_to_rebuild() and finish_rebuild(), so no need to make these two independent invocations. --- .../share/gc/shenandoah/shenandoahConcurrentGC.cpp | 6 +----- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 6 ++++++ src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp | 8 +++++++- src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp | 7 +------ src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp | 8 +------- src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp | 7 +------ 6 files changed, 17 insertions(+), 25 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index 7abb6acc1d1..e5a02c20d70 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -360,11 +360,7 @@ void ShenandoahConcurrentGC::entry_final_roots() { assert (_abbreviated, "Only rebuild free set for abbreviated"); // Rebuild free set after reclaiming immediate garbage ShenandoahHeapLocker locker(heap->lock()); - size_t young_cset_regions, old_cset_regions; - size_t first_old, last_old, num_old; - heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); - assert((young_cset_regions == 0) && (old_cset_regions == 0), "No ongoing evacuation after abbreviated cycle"); - heap->free_set()->finish_rebuild(0, 0, num_old); + heap->free_set()->rebuild(); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index e6f8908a34f..4b86cc1f953 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1585,6 +1585,12 @@ void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_r } +void ShenandoahFreeSet::rebuild() { + size_t young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count; + prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count); + finish_rebuild(young_cset_regions, old_cset_regions, old_region_count); +} + // Overwrite arguments to represent the amount of memory in each generation that is about to be recycled void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions, size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index 1477c7a6a07..99ebd2c55be 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -376,6 +376,12 @@ class ShenandoahFreeSet : public CHeapObj { void clear(); + // Rebuild the free set. This combines the effects of prepare_to_rebuild() and finish_rebuild() with no intervening + // efforts to rebalance generation sizes. When the free set is rebuild, we reserve sufficient memory within the + // collector partition (and, for generational mode, the old collector partition) based on the amount reserved + // by heuristics to support the next planned evacuation effort. + void rebuild(); + // Examine the existing free set representation, capturing the current state into var arguments: // // young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero @@ -387,7 +393,7 @@ class ShenandoahFreeSet : public CHeapObj { size_t &first_old_region, size_t &last_old_region, size_t &old_region_count); // At the end of final mark, but before we begin evacuating, heuristics calculate how much memory is required to - // hold the results of evacuating to young-gen and to old-gen. These quantities, stored in reserves for their, + // hold the results of evacuating to young-gen and to old-gen. These quantities, stored in reserves for their, // respective generations, are consulted prior to rebuilding the free set (ShenandoahFreeSet) in preparation for // evacuation. When the free set is rebuilt, we make sure to reserve sufficient memory in the collector and // old_collector sets to hold evacuations. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp index 1518c3c76fe..6eba849fdda 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp @@ -1177,16 +1177,11 @@ void ShenandoahFullGC::phase5_epilog() { } heap->collection_set()->clear(); - size_t young_cset_regions, old_cset_regions; - size_t first_old, last_old, num_old; - heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); - // TODO: Do we need to fix FullGC so that it maintains aged segregation of objects into distinct regions? // A partial solution would be to remember how many objects are of tenure age following Full GC, but // this is probably suboptimal, because most of these objects will not reside in a region that will be // selected for the next evacuation phase. - heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old); - + heap->free_set()->rebuild(); heap->clear_cancelled_gc(true /* clear oom handler */); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 6c471403eb7..6871d293e52 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -743,13 +743,7 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset : ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset); ShenandoahHeapLocker locker(heap->lock()); - size_t young_cset_regions, old_cset_regions; - - // We are preparing for evacuation. At this time, we ignore cset region tallies. - size_t first_old, last_old, num_old; - heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); - // Free set construction uses reserve quantities, because they are known to be valid here - heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old); + heap->free_set()->rebuild(); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 84ba1faae4c..a376503a6b7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -410,12 +410,7 @@ jint ShenandoahHeap::initialize() { // Initialize to complete _marking_context->mark_complete(); - size_t young_cset_regions, old_cset_regions; - - // We are initializing free set. We ignore cset region tallies. - size_t first_old, last_old, num_old; - _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); - _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old); + _free_set->rebuild(); if (mode()->is_generational()) { size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; From 7f01a7f32cd92628f1b281a5a14f3e8cdc83de04 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 26 Jul 2024 23:15:05 +0000 Subject: [PATCH 62/64] Remove incorrect and unnecessary comments --- src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index e5a02c20d70..d9f7ff64fa1 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -211,8 +211,6 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) { return false; } - - // vmop_entry_final_updaterefs rebuilds free set in preparation for next GC. vmop_entry_final_updaterefs(); // Update references freed up collection set, kick the cleanup to reclaim the space. @@ -222,8 +220,6 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { // do not check for cancellation here because, at this point, the cycle is effectively // complete. If the cycle has been cancelled here, the control thread will detect it // on its next iteration and run a degenerated young cycle. - - // vmop_entry_final_updaterefs rebuilds free set in preparation for next GC. _abbreviated = true; vmop_entry_final_roots(); } From e009c35aaf94888e51e17760b2841fcc1e509060 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 26 Jul 2024 23:32:15 +0000 Subject: [PATCH 63/64] Simplify code to rebuild free set after abbreviated and old GC --- .../gc/shenandoah/shenandoahConcurrentGC.cpp | 30 ++++--------------- 1 file changed, 6 insertions(+), 24 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index d9f7ff64fa1..18da4e50078 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -334,30 +334,12 @@ void ShenandoahConcurrentGC::entry_final_roots() { EventMark em("%s", msg); ShenandoahHeap* const heap = ShenandoahHeap::heap(); op_final_roots(); - - if (heap->mode()->is_generational()) { - assert (_abbreviated || _generation->is_old(), "Only rebuild free set for abbreviated and old-marking cycles"); - // After concurrent old marking finishes and after an abbreviated cycle, we reclaim immediate garbage. - // Further, we may also want to expand OLD in order to make room for anticipated promotions and/or for mixed - // evacuations. Mixed evacuations are especially likely to following the end of OLD marking. - { - ShenandoahHeapLocker locker(heap->lock()); - ShenandoahGenerationalHeap* const gen_heap = ShenandoahGenerationalHeap::heap(); - size_t young_cset_regions, old_cset_regions; - size_t first_old, last_old, num_old; - size_t allocation_runway = heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0); - heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); - assert((young_cset_regions == 0) && (old_cset_regions == 0), - "No ongoing evacuation after abbreviated or concurrent OLD marking cycle"); - gen_heap->compute_old_generation_balance(allocation_runway, 0, 0); - heap->free_set()->finish_rebuild(0, 0, num_old); - } - } else { - assert (_abbreviated, "Only rebuild free set for abbreviated"); - // Rebuild free set after reclaiming immediate garbage - ShenandoahHeapLocker locker(heap->lock()); - heap->free_set()->rebuild(); - } + // After concurrent old marking finishes and after an abbreviated cycle, we reclaim immediate garbage. + // Further, we may also want to expand OLD in order to make room for anticipated promotions and/or for mixed + // evacuations. Mixed evacuations are especially likely to following the end of OLD marking. + assert(_abbreviated || (heap->mode()->is_generational() && _generation->is_old()), + "Only rebuild free set for abbreviated and old-marking cycles"); + heap->rebuild_free_set(true /*concurrent*/); } void ShenandoahConcurrentGC::entry_reset() { From 699f40986119bb0ff217ad8d597b9e9f219551e7 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Sat, 27 Jul 2024 14:21:48 +0000 Subject: [PATCH 64/64] Cleanups requested by code review --- .../heuristics/shenandoahOldHeuristics.cpp | 212 +++++++++--------- .../gc/shenandoah/shenandoahConcurrentGC.cpp | 24 -- .../gc/shenandoah/shenandoahGeneration.cpp | 7 + .../gc/shenandoah/shenandoahGeneration.hpp | 16 ++ 4 files changed, 129 insertions(+), 130 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp index e97b710d43c..fd1f0a135b2 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp @@ -77,6 +77,56 @@ ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* genera { } +bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) { + _mixed_evac_cset = collection_set; + _included_old_regions = 0; + _evacuated_old_bytes = 0; + _collected_old_bytes = 0; + + // If a region is put into the collection set, then this region's free (not yet used) bytes are no longer + // "available" to hold the results of other evacuations. This may cause a decrease in the remaining amount + // of memory that can still be evacuated. We address this by reducing the evacuation budget by the amount + // of live memory in that region and by the amount of unallocated memory in that region if the evacuation + // budget is constrained by availability of free memory. + _old_evacuation_reserve = _old_generation->get_evacuation_reserve(); + _old_evacuation_budget = (size_t) ((double) _old_evacuation_reserve / ShenandoahOldEvacWaste); + + // fragmented_available is the amount of memory within partially consumed old regions that may be required to + // hold the results of old evacuations. If all of the memory required by the old evacuation reserve is available + // in unfragmented regions (unaffiliated old regions), then fragmented_available is zero because we do not need + // to evacuate into the existing partially consumed old regions. + + // if fragmented_available is non-zero, excess_fragmented_available represents the amount of fragmented memory + // that is available within old, but is not required to hold the resuilts of old evacuation. As old-gen regions + // are added into the collection set, their free memory is subtracted from excess_fragmented_available until the + // excess is exhausted. For old-gen regions subsequently added to the collection set, their free memory is + // subtracted from fragmented_available and from the old_evacuation_budget (since the budget decreases when this + // fragmented_available memory decreases). After fragmented_available has been exhausted, any further old regions + // selected for the cset do not further decrease the old_evacuation_budget because all further evacuation is targeted + // to unfragmented regions. + + size_t unaffiliated_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes(); + if (unaffiliated_available > _old_evacuation_reserve) { + _unfragmented_available = _old_evacuation_budget; + _fragmented_available = 0; + _excess_fragmented_available = 0; + } else { + assert(_old_generation->available() >= _old_evacuation_reserve, "Cannot reserve more than is available"); + size_t affiliated_available = _old_generation->available() - unaffiliated_available; + assert(affiliated_available + unaffiliated_available >= _old_evacuation_reserve, "Budgets do not add up"); + if (affiliated_available + unaffiliated_available > _old_evacuation_reserve) { + _excess_fragmented_available = (affiliated_available + unaffiliated_available) - _old_evacuation_reserve; + affiliated_available -= _excess_fragmented_available; + } + _fragmented_available = (size_t) ((double) affiliated_available / ShenandoahOldEvacWaste); + _unfragmented_available = (size_t) ((double) unaffiliated_available / ShenandoahOldEvacWaste); + } + log_info(gc)("Choose old regions for mixed collection: old evacuation budget: " SIZE_FORMAT "%s, candidates: %u", + byte_size_in_proper_unit(_old_evacuation_budget), proper_unit_for_byte_size(_old_evacuation_budget), + unprocessed_old_collection_candidates()); + return add_old_regions_to_cset(); +} + bool ShenandoahOldHeuristics::all_candidates_are_pinned() { #ifdef ASSERT if (uint(os::random()) % 100 < ShenandoahCoalesceChance) { @@ -93,6 +143,62 @@ bool ShenandoahOldHeuristics::all_candidates_are_pinned() { return true; } +void ShenandoahOldHeuristics::slide_pinned_regions_to_front() { + // Find the first unpinned region to the left of the next region that + // will be added to the collection set. These regions will have been + // added to the cset, so we can use them to hold pointers to regions + // that were pinned when the cset was chosen. + // [ r p r p p p r r ] + // ^ ^ ^ + // | | | pointer to next region to add to a mixed collection is here. + // | | first r to the left should be in the collection set now. + // | first pinned region, we don't need to look past this + uint write_index = NOT_FOUND; + for (uint search = _next_old_collection_candidate - 1; search > _first_pinned_candidate; --search) { + ShenandoahHeapRegion* region = _region_data[search]._region; + if (!region->is_pinned()) { + write_index = search; + assert(region->is_cset(), "Expected unpinned region to be added to the collection set."); + break; + } + } + + // If we could not find an unpinned region, it means there are no slots available + // to move up the pinned regions. In this case, we just reset our next index in the + // hopes that some of these regions will become unpinned before the next mixed + // collection. We may want to bailout of here instead, as it should be quite + // rare to have so many pinned regions and may indicate something is wrong. + if (write_index == NOT_FOUND) { + assert(_first_pinned_candidate != NOT_FOUND, "Should only be here if there are pinned regions."); + _next_old_collection_candidate = _first_pinned_candidate; + return; + } + + // Find pinned regions to the left and move their pointer into a slot + // that was pointing at a region that has been added to the cset (or was pointing + // to a pinned region that we've already moved up). We are done when the leftmost + // pinned region has been slid up. + // [ r p r x p p p r ] + // ^ ^ + // | | next region for mixed collections + // | Write pointer is here. We know this region is already in the cset + // | so we can clobber it with the next pinned region we find. + for (int32_t search = (int32_t)write_index - 1; search >= (int32_t)_first_pinned_candidate; --search) { + RegionData& skipped = _region_data[search]; + if (skipped._region->is_pinned()) { + RegionData& available_slot = _region_data[write_index]; + available_slot._region = skipped._region; + available_slot._u._live_data = skipped._u._live_data; + --write_index; + } + } + + // Update to read from the leftmost pinned region. Plus one here because we decremented + // the write index to hold the next found pinned region. We are just moving it back now + // to point to the first pinned region. + _next_old_collection_candidate = write_index + 1; +} + bool ShenandoahOldHeuristics::add_old_regions_to_cset() { if (unprocessed_old_collection_candidates() == 0) { return false; @@ -206,56 +312,6 @@ bool ShenandoahOldHeuristics::finalize_mixed_evacs() { return (_included_old_regions > 0); } -bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) { - _mixed_evac_cset = collection_set; - _included_old_regions = 0; - _evacuated_old_bytes = 0; - _collected_old_bytes = 0; - - // If a region is put into the collection set, then this region's free (not yet used) bytes are no longer - // "available" to hold the results of other evacuations. This may cause a decrease in the remaining amount - // of memory that can still be evacuated. We address this by reducing the evacuation budget by the amount - // of live memory in that region and by the amount of unallocated memory in that region if the evacuation - // budget is constrained by availability of free memory. - _old_evacuation_reserve = _old_generation->get_evacuation_reserve(); - _old_evacuation_budget = (size_t) ((double) _old_evacuation_reserve / ShenandoahOldEvacWaste); - - // fragmented_available is the amount of memory within partially consumed old regions that may be required to - // hold the results of old evacuations. If all of the memory required by the old evacuation reserve is available - // in unfragmented regions (unaffiliated old regions), then fragmented_available is zero because we do not need - // to evacuate into the existing partially consumed old regions. - - // if fragmented_available is non-zero, excess_fragmented_available represents the amount of fragmented memory - // that is available within old, but is not required to hold the resuilts of old evacuation. As old-gen regions - // are added into the collection set, their free memory is subtracted from excess_fragmented_available until the - // excess is exhausted. For old-gen regions subsequently added to the collection set, their free memory is - // subtracted from fragmented_available and from the old_evacuation_budget (since the budget decreases when this - // fragmented_available memory decreases). After fragmented_available has been exhausted, any further old regions - // selected for the cset do not further decrease the old_evacuation_budget because all further evacuation is targeted - // to unfragmented regions. - - size_t unaffiliated_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes(); - if (unaffiliated_available > _old_evacuation_reserve) { - _unfragmented_available = _old_evacuation_budget; - _fragmented_available = 0; - _excess_fragmented_available = 0; - } else { - assert(_old_generation->available() >= _old_evacuation_reserve, "Cannot reserve more than is available"); - size_t affiliated_available = _old_generation->available() - unaffiliated_available; - assert(affiliated_available + unaffiliated_available >= _old_evacuation_reserve, "Budgets do not add up"); - if (affiliated_available + unaffiliated_available > _old_evacuation_reserve) { - _excess_fragmented_available = (affiliated_available + unaffiliated_available) - _old_evacuation_reserve; - affiliated_available -= _excess_fragmented_available; - } - _fragmented_available = (size_t) ((double) affiliated_available / ShenandoahOldEvacWaste); - _unfragmented_available = (size_t) ((double) unaffiliated_available / ShenandoahOldEvacWaste); - } - log_info(gc)("Choose old regions for mixed collection: old evacuation budget: " SIZE_FORMAT "%s, candidates: %u", - byte_size_in_proper_unit(_old_evacuation_budget), proper_unit_for_byte_size(_old_evacuation_budget), - unprocessed_old_collection_candidates()); - return add_old_regions_to_cset(); -} - bool ShenandoahOldHeuristics::top_off_collection_set() { if (unprocessed_old_collection_candidates() == 0) { return false; @@ -291,62 +347,6 @@ bool ShenandoahOldHeuristics::top_off_collection_set() { } } -void ShenandoahOldHeuristics::slide_pinned_regions_to_front() { - // Find the first unpinned region to the left of the next region that - // will be added to the collection set. These regions will have been - // added to the cset, so we can use them to hold pointers to regions - // that were pinned when the cset was chosen. - // [ r p r p p p r r ] - // ^ ^ ^ - // | | | pointer to next region to add to a mixed collection is here. - // | | first r to the left should be in the collection set now. - // | first pinned region, we don't need to look past this - uint write_index = NOT_FOUND; - for (uint search = _next_old_collection_candidate - 1; search > _first_pinned_candidate; --search) { - ShenandoahHeapRegion* region = _region_data[search]._region; - if (!region->is_pinned()) { - write_index = search; - assert(region->is_cset(), "Expected unpinned region to be added to the collection set."); - break; - } - } - - // If we could not find an unpinned region, it means there are no slots available - // to move up the pinned regions. In this case, we just reset our next index in the - // hopes that some of these regions will become unpinned before the next mixed - // collection. We may want to bailout of here instead, as it should be quite - // rare to have so many pinned regions and may indicate something is wrong. - if (write_index == NOT_FOUND) { - assert(_first_pinned_candidate != NOT_FOUND, "Should only be here if there are pinned regions."); - _next_old_collection_candidate = _first_pinned_candidate; - return; - } - - // Find pinned regions to the left and move their pointer into a slot - // that was pointing at a region that has been added to the cset (or was pointing - // to a pinned region that we've already moved up). We are done when the leftmost - // pinned region has been slid up. - // [ r p r x p p p r ] - // ^ ^ - // | | next region for mixed collections - // | Write pointer is here. We know this region is already in the cset - // | so we can clobber it with the next pinned region we find. - for (int32_t search = (int32_t)write_index - 1; search >= (int32_t)_first_pinned_candidate; --search) { - RegionData& skipped = _region_data[search]; - if (skipped._region->is_pinned()) { - RegionData& available_slot = _region_data[write_index]; - available_slot._region = skipped._region; - available_slot._u._live_data = skipped._u._live_data; - --write_index; - } - } - - // Update to read from the leftmost pinned region. Plus one here because we decremented - // the write index to hold the next found pinned region. We are just moving it back now - // to point to the first pinned region. - _next_old_collection_candidate = write_index + 1; -} - void ShenandoahOldHeuristics::prepare_for_old_collections() { ShenandoahHeap* heap = ShenandoahHeap::heap(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index 18da4e50078..50d40909fb7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -699,32 +699,8 @@ void ShenandoahConcurrentGC::op_final_mark() { JvmtiTagMap::set_needs_cleaning(); // The collection set is chosen by prepare_regions_and_collection_set(). - // - // TODO: Under severe memory overload conditions that can be checked here, we may want to limit - // the inclusion of old-gen candidates within the collection set. This would allow us to prioritize efforts on - // evacuating young-gen, This remediation is most appropriate when old-gen availability is very high (so there - // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections - // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that/ young-gen - // collections are not triggering frequently enough). _generation->prepare_regions_and_collection_set(true /*concurrent*/); - // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the - // evacuation efforts that are about to begin. In particular: - // - // old_generation->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has - // been set aside to hold objects promoted from young-gen memory. This represents an estimated percentage - // of the live young-gen memory within the collection set. If there is more data ready to be promoted than - // can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation - // pass. - // - // old_generation->get_evacuation_reserve() represents the amount of memory within old-gen's available memory that has been - // set aside to hold objects evacuated from the old-gen collection set. - // - // young_generation->get_evacuation_reserve() represents the amount of memory within young-gen's available memory that has - // been set aside to hold objects evacuated from the young-gen collection set. Conservatively, this value - // equals the entire amount of live young-gen memory within the collection set, even though some of this memory - // will likely be promoted. - // Has to be done after cset selection heap->prepare_concurrent_roots(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 6871d293e52..d94c0ffcd9e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -650,6 +650,13 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { ShenandoahCollectionSet* collection_set = heap->collection_set(); bool is_generational = heap->mode()->is_generational(); + // TODO: Under severe memory overload conditions that can be checked here, we may want to limit + // the inclusion of old-gen candidates within the collection set. This would allow us to prioritize efforts on + // evacuating young-gen, This remediation is most appropriate when old-gen availability is very high (so there + // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections + // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that young-gen + // collections are not triggering frequently enough). + assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC"); assert(!is_old(), "Only YOUNG and GLOBAL GC perform evacuations"); { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index 081fdad5e3b..0008371e4ca 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -170,6 +170,22 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { virtual void prepare_gc(); // Called during final mark, chooses collection set, rebuilds free set. + // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the + // evacuation efforts that are about to begin. In particular: + // + // old_generation->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has + // been set aside to hold objects promoted from young-gen memory. This represents an estimated percentage + // of the live young-gen memory within the collection set. If there is more data ready to be promoted than + // can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation + // pass. + // + // old_generation->get_evacuation_reserve() represents the amount of memory within old-gen's available memory that has been + // set aside to hold objects evacuated from the old-gen collection set. + // + // young_generation->get_evacuation_reserve() represents the amount of memory within young-gen's available memory that has + // been set aside to hold objects evacuated from the young-gen collection set. Conservatively, this value + // equals the entire amount of live young-gen memory within the collection set, even though some of this memory + // will likely be promoted. virtual void prepare_regions_and_collection_set(bool concurrent); // Cancel marking (used by Full collect and when cancelling cycle).