diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp index f5b0fdc0e26..6ba9c67cead 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp @@ -170,12 +170,26 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio bool doing_promote_in_place = (humongous_regions_promoted + regular_regions_promoted_in_place > 0); if (doing_promote_in_place || (preselected_candidates > 0) || (immediate_percent <= ShenandoahImmediateThreshold)) { // Only young collections need to prime the collection set. + + bool need_to_finalize_mixed = false; if (_generation->is_young()) { - heap->old_generation()->heuristics()->prime_collection_set(collection_set); + need_to_finalize_mixed = heap->old_generation()->heuristics()->prime_collection_set(collection_set); } // Call the subclasses to add young-gen regions into the collection set. choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free); + + if (_generation->is_young()) { + // Especially when young-gen trigger is expedited in order to finish mixed evacuations, there may not be + // enough consolidated garbage to make effective use of young-gen evacuation reserve. If there is still + // young-gen reserve available following selection of the young-gen collection set, see if we can use + // this memory to expand the old-gen evacuation collection set. + need_to_finalize_mixed |= + heap->old_generation()->heuristics()->top_off_collection_set(); + if (need_to_finalize_mixed) { + heap->old_generation()->heuristics()->finalize_mixed_evacs(); + } + } } if (collection_set->has_old_regions()) { diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp index 6d1b648deb1..4912773d5b4 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp @@ -81,49 +81,68 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti size_t cur_young_garbage) const { auto heap = ShenandoahGenerationalHeap::heap(); size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); - size_t capacity = heap->young_generation()->max_capacity(); + size_t young_capacity = heap->young_generation()->max_capacity(); + size_t old_capacity = heap->old_generation()->max_capacity(); size_t garbage_threshold = region_size_bytes * ShenandoahGarbageThreshold / 100; size_t ignore_threshold = region_size_bytes * ShenandoahIgnoreGarbageThreshold / 100; const uint tenuring_threshold = heap->age_census()->tenuring_threshold(); size_t young_evac_reserve = heap->young_generation()->get_evacuation_reserve(); size_t old_evac_reserve = heap->old_generation()->get_evacuation_reserve(); - size_t max_young_cset = (size_t) (young_evac_reserve / ShenandoahEvacWaste); - size_t young_cur_cset = 0; - size_t max_old_cset = (size_t) (old_evac_reserve / ShenandoahOldEvacWaste); - size_t old_cur_cset = 0; - - // Figure out how many unaffiliated young regions are dedicated to mutator and to evacuator. Allow the young - // collector's unaffiliated regions to be transferred to old-gen if old-gen has more easily reclaimed garbage - // than young-gen. At the end of this cycle, any excess regions remaining in old-gen will be transferred back - // to young. Do not transfer the mutator's unaffiliated regions to old-gen. Those must remain available - // to the mutator as it needs to be able to consume this memory during concurrent GC. size_t unaffiliated_young_regions = heap->young_generation()->free_unaffiliated_regions(); size_t unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes; - - if (unaffiliated_young_memory > max_young_cset) { - size_t unaffiliated_mutator_memory = unaffiliated_young_memory - max_young_cset; - unaffiliated_young_memory -= unaffiliated_mutator_memory; - unaffiliated_young_regions = unaffiliated_young_memory / region_size_bytes; // round down - unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes; + size_t unaffiliated_old_regions = heap->old_generation()->free_unaffiliated_regions(); + size_t unaffiliated_old_memory = unaffiliated_old_regions * region_size_bytes; + + // Figure out how many unaffiliated regions are dedicated to Collector and OldCollector reserves. Let these + // be shuffled between young and old generations in order to expedite evacuation of whichever regions have the + // most garbage, regardless of whether these garbage-first regions reside in young or old generation. + // Excess reserves will be transferred back to the mutator after collection set has been chosen. At the end + // of evacuation, any reserves not consumed by evacuation will also be transferred to the mutator free set. + size_t shared_reserve_regions = 0; + if (young_evac_reserve > unaffiliated_young_memory) { + young_evac_reserve -= unaffiliated_young_memory; + shared_reserve_regions += unaffiliated_young_memory / region_size_bytes; + } else { + size_t delta_regions = young_evac_reserve / region_size_bytes; + shared_reserve_regions += delta_regions; + young_evac_reserve -= delta_regions * region_size_bytes; + } + if (old_evac_reserve > unaffiliated_old_memory) { + old_evac_reserve -= unaffiliated_old_memory; + shared_reserve_regions += unaffiliated_old_memory / region_size_bytes; + } else { + size_t delta_regions = old_evac_reserve / region_size_bytes; + shared_reserve_regions += delta_regions; + old_evac_reserve -= delta_regions * region_size_bytes; } - // We'll affiliate these unaffiliated regions with either old or young, depending on need. - max_young_cset -= unaffiliated_young_memory; + size_t shared_reserves = shared_reserve_regions * region_size_bytes; + size_t committed_from_shared_reserves = 0; + size_t max_young_cset = (size_t) (young_evac_reserve / ShenandoahEvacWaste); + size_t young_cur_cset = 0; + size_t max_old_cset = (size_t) (old_evac_reserve / ShenandoahOldEvacWaste); + size_t old_cur_cset = 0; - // Keep track of how many regions we plan to transfer from young to old. - size_t regions_transferred_to_old = 0; + size_t promo_bytes = 0; + size_t old_evac_bytes = 0; + size_t young_evac_bytes = 0; - size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + max_young_cset; + size_t max_total_cset = (max_young_cset + max_old_cset + + (size_t) (shared_reserve_regions * region_size_bytes) / ShenandoahOldEvacWaste); + size_t free_target = ((young_capacity + old_capacity) * ShenandoahMinFreeThreshold) / 100 + max_total_cset; size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0; log_info(gc, ergo)("Adaptive CSet Selection for GLOBAL. Max Young Evacuation: " SIZE_FORMAT - "%s, Max Old Evacuation: " SIZE_FORMAT "%s, Actual Free: " SIZE_FORMAT "%s.", + "%s, Max Old Evacuation: " SIZE_FORMAT "%s, Discretionary additional evacuation: " SIZE_FORMAT + "%s, Actual Free: " SIZE_FORMAT "%s.", byte_size_in_proper_unit(max_young_cset), proper_unit_for_byte_size(max_young_cset), byte_size_in_proper_unit(max_old_cset), proper_unit_for_byte_size(max_old_cset), + byte_size_in_proper_unit(shared_reserves), proper_unit_for_byte_size(shared_reserves), byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free)); + size_t cur_garbage = cur_young_garbage; for (size_t idx = 0; idx < size; idx++) { ShenandoahHeapRegion* r = data[idx]._region; if (cset->is_preselected(r->index())) { @@ -131,36 +150,61 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti continue; } bool add_region = false; + size_t region_garbage = r->garbage(); + size_t new_garbage = cur_garbage + region_garbage; + bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage); if (r->is_old() || (r->age() >= tenuring_threshold)) { - size_t new_cset = old_cur_cset + r->get_live_data_bytes(); - if ((r->garbage() > garbage_threshold)) { - while ((new_cset > max_old_cset) && (unaffiliated_young_regions > 0)) { - unaffiliated_young_regions--; - regions_transferred_to_old++; + if (add_regardless || (region_garbage > garbage_threshold)) { + size_t live_bytes = r->get_live_data_bytes(); + size_t new_cset = old_cur_cset + r->get_live_data_bytes(); + // May need multiple reserve regions to evacuate a single region, depending on live data bytes and ShenandoahOldEvacWaste + size_t orig_max_old_cset = max_old_cset; + size_t proposed_old_region_consumption = 0; + while ((new_cset > max_old_cset) && (committed_from_shared_reserves < shared_reserves)) { + committed_from_shared_reserves += region_size_bytes; + proposed_old_region_consumption++; max_old_cset += region_size_bytes / ShenandoahOldEvacWaste; } - } - if ((new_cset <= max_old_cset) && (r->garbage() > garbage_threshold)) { - add_region = true; - old_cur_cset = new_cset; + // We already know: add_regardless || region_garbage > garbage_threshold + if (new_cset <= max_old_cset) { + add_region = true; + old_cur_cset = new_cset; + cur_garbage = new_garbage; + if (r->is_old()) { + old_evac_bytes += live_bytes; + } else { + promo_bytes += live_bytes; + } + } else { + // We failed to sufficiently expand old, so unwind proposed expansion + max_old_cset = orig_max_old_cset; + committed_from_shared_reserves -= proposed_old_region_consumption * region_size_bytes; + } } } else { assert(r->is_young() && (r->age() < tenuring_threshold), "DeMorgan's law (assuming r->is_affiliated)"); - size_t new_cset = young_cur_cset + r->get_live_data_bytes(); - size_t region_garbage = r->garbage(); - size_t new_garbage = cur_young_garbage + region_garbage; - bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage); - - if (add_regardless || (r->garbage() > garbage_threshold)) { - while ((new_cset > max_young_cset) && (unaffiliated_young_regions > 0)) { - unaffiliated_young_regions--; + if (add_regardless || (region_garbage > garbage_threshold)) { + size_t live_bytes = r->get_live_data_bytes(); + size_t new_cset = young_cur_cset + live_bytes; + // May need multiple reserve regions to evacuate a single region, depending on live data bytes and ShenandoahEvacWaste + size_t orig_max_young_cset = max_young_cset; + size_t proposed_young_region_consumption = 0; + while ((new_cset > max_young_cset) && (committed_from_shared_reserves < shared_reserves)) { + committed_from_shared_reserves += region_size_bytes; + proposed_young_region_consumption++; max_young_cset += region_size_bytes / ShenandoahEvacWaste; } - } - if ((new_cset <= max_young_cset) && (add_regardless || (region_garbage > garbage_threshold))) { - add_region = true; - young_cur_cset = new_cset; - cur_young_garbage = new_garbage; + // We already know: add_regardless || region_garbage > garbage_threshold + if (new_cset <= max_young_cset) { + add_region = true; + young_cur_cset = new_cset; + cur_garbage = new_garbage; + young_evac_bytes += live_bytes; + } else { + // We failed to sufficiently expand young, so unwind proposed expansion + max_young_cset = orig_max_young_cset; + committed_from_shared_reserves -= proposed_young_region_consumption * region_size_bytes; + } } } if (add_region) { @@ -168,9 +212,7 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti } } - if (regions_transferred_to_old > 0) { - heap->generation_sizer()->force_transfer_to_old(regions_transferred_to_old); - heap->young_generation()->set_evacuation_reserve(young_evac_reserve - regions_transferred_to_old * region_size_bytes); - heap->old_generation()->set_evacuation_reserve(old_evac_reserve + regions_transferred_to_old * region_size_bytes); - } + heap->young_generation()->set_evacuation_reserve((size_t) (young_evac_bytes * ShenandoahEvacWaste)); + heap->old_generation()->set_evacuation_reserve((size_t) (old_evac_bytes * ShenandoahOldEvacWaste)); + heap->old_generation()->set_promoted_reserve((size_t) (promo_bytes * ShenandoahPromoEvacWaste)); } diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp index b3f076c8af7..2670b4f29ce 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp @@ -27,9 +27,11 @@ #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp" #include "gc/shenandoah/shenandoahCollectionSet.hpp" #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" #include "gc/shenandoah/shenandoahOldGeneration.hpp" +#include "gc/shenandoah/shenandoahYoungGeneration.hpp" #include "logging/log.hpp" #include "utilities/quickSort.hpp" @@ -76,163 +78,53 @@ ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* genera } bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) { - if (unprocessed_old_collection_candidates() == 0) { - return false; - } - - _first_pinned_candidate = NOT_FOUND; - - uint included_old_regions = 0; - size_t evacuated_old_bytes = 0; - size_t collected_old_bytes = 0; + _mixed_evac_cset = collection_set; + _included_old_regions = 0; + _evacuated_old_bytes = 0; + _collected_old_bytes = 0; // If a region is put into the collection set, then this region's free (not yet used) bytes are no longer // "available" to hold the results of other evacuations. This may cause a decrease in the remaining amount // of memory that can still be evacuated. We address this by reducing the evacuation budget by the amount // of live memory in that region and by the amount of unallocated memory in that region if the evacuation // budget is constrained by availability of free memory. - const size_t old_evacuation_reserve = _old_generation->get_evacuation_reserve(); - const size_t old_evacuation_budget = (size_t) ((double) old_evacuation_reserve / ShenandoahOldEvacWaste); - size_t unfragmented_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes(); - size_t fragmented_available; - size_t excess_fragmented_available; - - if (unfragmented_available > old_evacuation_budget) { - unfragmented_available = old_evacuation_budget; - fragmented_available = 0; - excess_fragmented_available = 0; + _old_evacuation_reserve = _old_generation->get_evacuation_reserve(); + _old_evacuation_budget = (size_t) ((double) _old_evacuation_reserve / ShenandoahOldEvacWaste); + + // fragmented_available is the amount of memory within partially consumed old regions that may be required to + // hold the results of old evacuations. If all of the memory required by the old evacuation reserve is available + // in unfragmented regions (unaffiliated old regions), then fragmented_available is zero because we do not need + // to evacuate into the existing partially consumed old regions. + + // if fragmented_available is non-zero, excess_fragmented_available represents the amount of fragmented memory + // that is available within old, but is not required to hold the resuilts of old evacuation. As old-gen regions + // are added into the collection set, their free memory is subtracted from excess_fragmented_available until the + // excess is exhausted. For old-gen regions subsequently added to the collection set, their free memory is + // subtracted from fragmented_available and from the old_evacuation_budget (since the budget decreases when this + // fragmented_available memory decreases). After fragmented_available has been exhausted, any further old regions + // selected for the cset do not further decrease the old_evacuation_budget because all further evacuation is targeted + // to unfragmented regions. + + size_t unaffiliated_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes(); + if (unaffiliated_available > _old_evacuation_reserve) { + _unfragmented_available = _old_evacuation_budget; + _fragmented_available = 0; + _excess_fragmented_available = 0; } else { - assert(_old_generation->available() >= old_evacuation_budget, "Cannot budget more than is available"); - fragmented_available = _old_generation->available() - unfragmented_available; - assert(fragmented_available + unfragmented_available >= old_evacuation_budget, "Budgets do not add up"); - if (fragmented_available + unfragmented_available > old_evacuation_budget) { - excess_fragmented_available = (fragmented_available + unfragmented_available) - old_evacuation_budget; - fragmented_available -= excess_fragmented_available; + assert(_old_generation->available() >= _old_evacuation_reserve, "Cannot reserve more than is available"); + size_t affiliated_available = _old_generation->available() - unaffiliated_available; + assert(affiliated_available + unaffiliated_available >= _old_evacuation_reserve, "Budgets do not add up"); + if (affiliated_available + unaffiliated_available > _old_evacuation_reserve) { + _excess_fragmented_available = (affiliated_available + unaffiliated_available) - _old_evacuation_reserve; + affiliated_available -= _excess_fragmented_available; } + _fragmented_available = (size_t) ((double) affiliated_available / ShenandoahOldEvacWaste); + _unfragmented_available = (size_t) ((double) unaffiliated_available / ShenandoahOldEvacWaste); } - - size_t remaining_old_evacuation_budget = old_evacuation_budget; log_info(gc)("Choose old regions for mixed collection: old evacuation budget: " SIZE_FORMAT "%s, candidates: %u", - byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget), + byte_size_in_proper_unit(_old_evacuation_budget), proper_unit_for_byte_size(_old_evacuation_budget), unprocessed_old_collection_candidates()); - - size_t lost_evacuation_capacity = 0; - - // The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen - // concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates(). - // Candidate regions are ordered according to increasing amount of live data. If there is not sufficient room to - // evacuate region N, then there is no need to even consider evacuating region N+1. - while (unprocessed_old_collection_candidates() > 0) { - // Old collection candidates are sorted in order of decreasing garbage contained therein. - ShenandoahHeapRegion* r = next_old_collection_candidate(); - if (r == nullptr) { - break; - } - assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates"); - - // If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need - // to decrease the capacity of the fragmented memory by the scaled loss. - - size_t live_data_for_evacuation = r->get_live_data_bytes(); - size_t lost_available = r->free(); - - if ((lost_available > 0) && (excess_fragmented_available > 0)) { - if (lost_available < excess_fragmented_available) { - excess_fragmented_available -= lost_available; - lost_evacuation_capacity -= lost_available; - lost_available = 0; - } else { - lost_available -= excess_fragmented_available; - lost_evacuation_capacity -= excess_fragmented_available; - excess_fragmented_available = 0; - } - } - size_t scaled_loss = (size_t) ((double) lost_available / ShenandoahOldEvacWaste); - if ((lost_available > 0) && (fragmented_available > 0)) { - if (scaled_loss + live_data_for_evacuation < fragmented_available) { - fragmented_available -= scaled_loss; - scaled_loss = 0; - } else { - // We will have to allocate this region's evacuation memory from unfragmented memory, so don't bother - // to decrement scaled_loss - } - } - if (scaled_loss > 0) { - // We were not able to account for the lost free memory within fragmented memory, so we need to take this - // allocation out of unfragmented memory. Unfragmented memory does not need to account for loss of free. - if (live_data_for_evacuation > unfragmented_available) { - // There is not room to evacuate this region or any that come after it in within the candidates array. - break; - } else { - unfragmented_available -= live_data_for_evacuation; - } - } else { - // Since scaled_loss == 0, we have accounted for the loss of free memory, so we can allocate from either - // fragmented or unfragmented available memory. Use up the fragmented memory budget first. - size_t evacuation_need = live_data_for_evacuation; - - if (evacuation_need > fragmented_available) { - evacuation_need -= fragmented_available; - fragmented_available = 0; - } else { - fragmented_available -= evacuation_need; - evacuation_need = 0; - } - if (evacuation_need > unfragmented_available) { - // There is not room to evacuate this region or any that come after it in within the candidates array. - break; - } else { - unfragmented_available -= evacuation_need; - // dead code: evacuation_need == 0; - } - } - collection_set->add_region(r); - included_old_regions++; - evacuated_old_bytes += live_data_for_evacuation; - collected_old_bytes += r->garbage(); - consume_old_collection_candidate(); - } - - if (_first_pinned_candidate != NOT_FOUND) { - // Need to deal with pinned regions - slide_pinned_regions_to_front(); - } - decrease_unprocessed_old_collection_candidates_live_memory(evacuated_old_bytes); - if (included_old_regions > 0) { - log_info(gc)("Old-gen piggyback evac (" UINT32_FORMAT " regions, evacuating " SIZE_FORMAT "%s, reclaiming: " SIZE_FORMAT "%s)", - included_old_regions, - byte_size_in_proper_unit(evacuated_old_bytes), proper_unit_for_byte_size(evacuated_old_bytes), - byte_size_in_proper_unit(collected_old_bytes), proper_unit_for_byte_size(collected_old_bytes)); - } - - if (unprocessed_old_collection_candidates() == 0) { - // We have added the last of our collection candidates to a mixed collection. - // Any triggers that occurred during mixed evacuations may no longer be valid. They can retrigger if appropriate. - clear_triggers(); - - _old_generation->complete_mixed_evacuations(); - } else if (included_old_regions == 0) { - // We have candidates, but none were included for evacuation - are they all pinned? - // or did we just not have enough room for any of them in this collection set? - // We don't want a region with a stuck pin to prevent subsequent old collections, so - // if they are all pinned we transition to a state that will allow us to make these uncollected - // (pinned) regions parsable. - if (all_candidates_are_pinned()) { - log_info(gc)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates()); - _old_generation->abandon_mixed_evacuations(); - } else { - log_info(gc)("No regions selected for mixed collection. " - "Old evacuation budget: " PROPERFMT ", Remaining evacuation budget: " PROPERFMT - ", Lost capacity: " PROPERFMT - ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT, - PROPERFMTARGS(old_evacuation_reserve), - PROPERFMTARGS(remaining_old_evacuation_budget), - PROPERFMTARGS(lost_evacuation_capacity), - _next_old_collection_candidate, _last_old_collection_candidate); - } - } - - return (included_old_regions > 0); + return add_old_regions_to_cset(); } bool ShenandoahOldHeuristics::all_candidates_are_pinned() { @@ -307,6 +199,154 @@ void ShenandoahOldHeuristics::slide_pinned_regions_to_front() { _next_old_collection_candidate = write_index + 1; } +bool ShenandoahOldHeuristics::add_old_regions_to_cset() { + if (unprocessed_old_collection_candidates() == 0) { + return false; + } + _first_pinned_candidate = NOT_FOUND; + + // The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen + // concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates(). + // Candidate regions are ordered according to increasing amount of live data. If there is not sufficient room to + // evacuate region N, then there is no need to even consider evacuating region N+1. + while (unprocessed_old_collection_candidates() > 0) { + // Old collection candidates are sorted in order of decreasing garbage contained therein. + ShenandoahHeapRegion* r = next_old_collection_candidate(); + if (r == nullptr) { + break; + } + assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates"); + + // If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need + // to decrease the capacity of the fragmented memory by the scaled loss. + + size_t live_data_for_evacuation = r->get_live_data_bytes(); + size_t lost_available = r->free(); + if ((lost_available > 0) && (_excess_fragmented_available > 0)) { + if (lost_available < _excess_fragmented_available) { + _excess_fragmented_available -= lost_available; + lost_available = 0; + } else { + lost_available -= _excess_fragmented_available; + _excess_fragmented_available = 0; + } + } + + ssize_t fragmented_delta = 0; + ssize_t unfragmented_delta = 0; + size_t scaled_loss = (size_t) ((double) lost_available / ShenandoahOldEvacWaste); + if ((lost_available > 0) && (_fragmented_available > 0)) { + if (scaled_loss < _fragmented_available) { + _fragmented_available -= scaled_loss; + fragmented_delta = -scaled_loss; + scaled_loss = 0; + } else { + scaled_loss -= _fragmented_available; + fragmented_delta = -_fragmented_available; + _fragmented_available = 0; + } + } + // Allocate replica from unfragmented memory if that exists + size_t evacuation_need = live_data_for_evacuation; + if (evacuation_need < _unfragmented_available) { + _unfragmented_available -= evacuation_need;; + } else { + if (_unfragmented_available > 0) { + evacuation_need -= _unfragmented_available; + unfragmented_delta = -_unfragmented_available; + _unfragmented_available = 0; + } + // Take the remaining allocation out of fragmented available + if (_fragmented_available > evacuation_need) { + _fragmented_available -= evacuation_need; + } else { + // We cannot add this region into the collection set. We're done. Undo the adjustments to available. + _fragmented_available -= fragmented_delta; + _unfragmented_available -= unfragmented_delta; + break; + } + } + _mixed_evac_cset->add_region(r); + _included_old_regions++; + _evacuated_old_bytes += live_data_for_evacuation; + _collected_old_bytes += r->garbage(); + consume_old_collection_candidate(); + } + return true; +} + +bool ShenandoahOldHeuristics::finalize_mixed_evacs() { + if (_first_pinned_candidate != NOT_FOUND) { + // Need to deal with pinned regions + slide_pinned_regions_to_front(); + } + decrease_unprocessed_old_collection_candidates_live_memory(_evacuated_old_bytes); + if (_included_old_regions > 0) { + log_info(gc)("Old-gen mixed evac (" SIZE_FORMAT " regions, evacuating " SIZE_FORMAT "%s, reclaiming: " SIZE_FORMAT "%s)", + _included_old_regions, + byte_size_in_proper_unit(_evacuated_old_bytes), proper_unit_for_byte_size(_evacuated_old_bytes), + byte_size_in_proper_unit(_collected_old_bytes), proper_unit_for_byte_size(_collected_old_bytes)); + } + + if (unprocessed_old_collection_candidates() == 0) { + // We have added the last of our collection candidates to a mixed collection. + // Any triggers that occurred during mixed evacuations may no longer be valid. They can retrigger if appropriate. + clear_triggers(); + _old_generation->complete_mixed_evacuations(); + } else if (_included_old_regions == 0) { + // We have candidates, but none were included for evacuation - are they all pinned? + // or did we just not have enough room for any of them in this collection set? + // We don't want a region with a stuck pin to prevent subsequent old collections, so + // if they are all pinned we transition to a state that will allow us to make these uncollected + // (pinned) regions parsable. + if (all_candidates_are_pinned()) { + log_info(gc)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates()); + _old_generation->abandon_mixed_evacuations(); + } else { + log_info(gc)("No regions selected for mixed collection. " + "Old evacuation budget: " PROPERFMT ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT, + PROPERFMTARGS(_old_evacuation_reserve), + _next_old_collection_candidate, _last_old_collection_candidate); + } + } + return (_included_old_regions > 0); +} + +bool ShenandoahOldHeuristics::top_off_collection_set() { + if (unprocessed_old_collection_candidates() == 0) { + return false; + } else { + ShenandoahYoungGeneration* young_generation = _heap->young_generation(); + size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions(); + size_t max_young_cset = young_generation->get_evacuation_reserve(); + size_t planned_young_evac = _mixed_evac_cset->get_young_bytes_reserved_for_evacuation(); + size_t consumed_from_young_cset = (size_t) (planned_young_evac * ShenandoahEvacWaste); + size_t available_to_loan_from_young_reserve = ((consumed_from_young_cset >= max_young_cset)? + 0: max_young_cset - consumed_from_young_cset); + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + if ((young_unaffiliated_regions == 0) || (available_to_loan_from_young_reserve < region_size_bytes)) { + return false; + } else { + size_t regions_for_old_expansion = (available_to_loan_from_young_reserve / region_size_bytes); + if (regions_for_old_expansion > young_unaffiliated_regions) { + regions_for_old_expansion = young_unaffiliated_regions; + } + log_info(gc)("Augmenting old-gen evacuation budget from unexpended young-generation reserve by " SIZE_FORMAT " regions", + regions_for_old_expansion); + _heap->generation_sizer()->force_transfer_to_old(regions_for_old_expansion); + size_t budget_supplement = region_size_bytes * regions_for_old_expansion; + size_t supplement_after_waste = (size_t) (((double) budget_supplement) / ShenandoahOldEvacWaste); + _old_evacuation_budget += supplement_after_waste; + _unfragmented_available += supplement_after_waste; + + _old_generation->augment_evacuation_reserve(budget_supplement); + young_generation->set_evacuation_reserve(max_young_cset - budget_supplement); + + return add_old_regions_to_cset(); + } + } +} + void ShenandoahOldHeuristics::prepare_for_old_collections() { ShenandoahHeap* heap = ShenandoahHeap::heap(); @@ -315,7 +355,9 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() { size_t immediate_garbage = 0; size_t immediate_regions = 0; size_t live_data = 0; - +#ifdef ASSERT + bool reclaimed_immediate = false; +#endif RegionData* candidates = _region_data; for (size_t i = 0; i < num_regions; i++) { ShenandoahHeapRegion* region = heap->get_region(i); @@ -328,12 +370,20 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() { live_data += live_bytes; if (region->is_regular() || region->is_regular_pinned()) { - // Only place regular or pinned regions with live data into the candidate set. - // Pinned regions cannot be evacuated, but we are not actually choosing candidates - // for the collection set here. That happens later during the next young GC cycle, - // by which time, the pinned region may no longer be pinned. + // Only place regular or pinned regions with live data into the candidate set. + // Pinned regions cannot be evacuated, but we are not actually choosing candidates + // for the collection set here. That happens later during the next young GC cycle, + // by which time, the pinned region may no longer be pinned. if (!region->has_live()) { assert(!region->is_pinned(), "Pinned region should have live (pinned) objects."); +#ifdef ASSERT + if (!reclaimed_immediate) { + reclaimed_immediate = true; + // Inform the free-set that old trash regions may temporarily violate OldCollector bounds + shenandoah_assert_heaplocked(); + heap->free_set()->advise_of_old_trash(); + } +#endif region->make_trash_immediate(); immediate_regions++; immediate_garbage += garbage; @@ -353,6 +403,14 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() { // immediately to the freeset - no evacuations are necessary here. The continuations // will be made into trash by this method, so they'll be skipped by the 'is_regular' // check above, but we still need to count the start region. +#ifdef ASSERT + if (!reclaimed_immediate) { + reclaimed_immediate = true; + // Inform the free-set that old trash regions may temporarily violate OldCollector bounds + shenandoah_assert_heaplocked(); + heap->free_set()->advise_of_old_trash(); + } +#endif immediate_regions++; immediate_garbage += garbage; size_t region_count = heap->trash_humongous_region_at(region); @@ -538,6 +596,7 @@ unsigned int ShenandoahOldHeuristics::get_coalesce_and_fill_candidates(Shenandoa void ShenandoahOldHeuristics::abandon_collection_candidates() { _last_old_collection_candidate = 0; _next_old_collection_candidate = 0; + _live_bytes_in_unprocessed_candidates = 0; _last_old_region = 0; } diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp index 74d01381bf8..489a391ffbd 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp @@ -103,6 +103,20 @@ class ShenandoahOldHeuristics : public ShenandoahHeuristics { size_t _fragmentation_first_old_region; size_t _fragmentation_last_old_region; + // State variables involved in construction of a mixed-evacuation collection set. These variables are initialized + // when client code invokes prime_collection_set(). They are consulted, and sometimes modified, when client code + // calls top_off_collection_set() to possibly expand the number old-gen regions in a mixed evacuation cset, and by + // finalize_mixed_evacs(), which prepares the way for mixed evacuations to begin. + ShenandoahCollectionSet* _mixed_evac_cset; + size_t _evacuated_old_bytes; + size_t _collected_old_bytes; + size_t _included_old_regions; + size_t _old_evacuation_reserve; + size_t _old_evacuation_budget; + size_t _unfragmented_available; + size_t _fragmented_available; + size_t _excess_fragmented_available; + // Compare by live is used to prioritize compaction of old-gen regions. With old-gen compaction, the goal is // to tightly pack long-lived objects into available regions. In most cases, there has not been an accumulation // of garbage within old-gen regions. The more likely opportunity will be to combine multiple sparsely populated @@ -127,14 +141,34 @@ class ShenandoahOldHeuristics : public ShenandoahHeuristics { protected: void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, RegionData* data, size_t data_size, size_t free) override; + // This internal helper route adds as many mixed evacuation candidate regions as fit within the old-gen evacuation budget + // to the collection set. This may be called twice to prepare for any given mixed evacuation cycle, the first time with + // a conservative old evacuation budget, and the second time with a larger more aggressive old evacuation budget. Returns + // true iff we need to finalize mixed evacs. (If no regions are added to the collection set, there is no need to finalize + // mixed evacuations.) + bool add_old_regions_to_cset(); + public: explicit ShenandoahOldHeuristics(ShenandoahOldGeneration* generation, ShenandoahGenerationalHeap* gen_heap); // Prepare for evacuation of old-gen regions by capturing the mark results of a recently completed concurrent mark pass. void prepare_for_old_collections(); - // Return true iff the collection set is primed with at least one old-gen region. - bool prime_collection_set(ShenandoahCollectionSet* set); + // Initialize instance variables to support the preparation of a mixed-evacuation collection set. Adds as many + // old candidate regions into the collection set as can fit within the iniital conservative old evacuation budget. + // Returns true iff we need to finalize mixed evacs. + bool prime_collection_set(ShenandoahCollectionSet* collection_set); + + // If young evacuation did not consume all of its available evacuation reserve, add as many additional mixed- + // evacuation candidate regions into the collection set as will fit within this excess repurposed reserved. + // Returns true iff we need to finalize mixed evacs. + bool top_off_collection_set(); + + // Having added all eligible mixed-evacuation candidates to the collection set, this function updates the total count + // of how much old-gen memory remains to be evacuated and adjusts the representation of old-gen regions that remain to + // be evacuated, giving special attention to regions that are currently pinned. It outputs relevant log messages and + // returns true iff the collection set holds at least one unpinned mixed evacuation candidate. + bool finalize_mixed_evacs(); // How many old-collection candidates have not yet been processed? uint unprocessed_old_collection_candidates() const; diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp index ac96e40b54c..9cb060a11a7 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp @@ -93,7 +93,6 @@ void ShenandoahYoungHeuristics::choose_young_collection_set(ShenandoahCollection size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + max_cset; size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0; - log_info(gc, ergo)( "Adaptive CSet Selection for YOUNG. Max Evacuation: " SIZE_FORMAT "%s, Actual Free: " SIZE_FORMAT "%s.", byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset), diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index 9949a8dd3ad..1ba20ae0baa 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -207,7 +207,6 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) { return false; } - vmop_entry_final_updaterefs(); // Update references freed up collection set, kick the cleanup to reclaim the space. @@ -217,8 +216,8 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { // do not check for cancellation here because, at this point, the cycle is effectively // complete. If the cycle has been cancelled here, the control thread will detect it // on its next iteration and run a degenerated young cycle. - vmop_entry_final_roots(); _abbreviated = true; + vmop_entry_final_roots(); } // We defer generation resizing actions until after cset regions have been recycled. We do this even following an @@ -329,8 +328,14 @@ void ShenandoahConcurrentGC::entry_final_roots() { static const char* msg = "Pause Final Roots"; ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots); EventMark em("%s", msg); - + ShenandoahHeap* const heap = ShenandoahHeap::heap(); op_final_roots(); + // After concurrent old marking finishes and after an abbreviated cycle, we reclaim immediate garbage. + // Further, we may also want to expand OLD in order to make room for anticipated promotions and/or for mixed + // evacuations. Mixed evacuations are especially likely to following the end of OLD marking. + assert(_abbreviated || (heap->mode()->is_generational() && _generation->is_old()), + "Only rebuild free set for abbreviated and old-marking cycles"); + heap->rebuild_free_set(true /*concurrent*/); } void ShenandoahConcurrentGC::entry_reset() { @@ -690,32 +695,8 @@ void ShenandoahConcurrentGC::op_final_mark() { JvmtiTagMap::set_needs_cleaning(); // The collection set is chosen by prepare_regions_and_collection_set(). - // - // TODO: Under severe memory overload conditions that can be checked here, we may want to limit - // the inclusion of old-gen candidates within the collection set. This would allow us to prioritize efforts on - // evacuating young-gen, This remediation is most appropriate when old-gen availability is very high (so there - // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections - // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that/ young-gen - // collections are not triggering frequently enough). _generation->prepare_regions_and_collection_set(true /*concurrent*/); - // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the - // evacuation efforts that are about to begin. In particular: - // - // heap->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has - // been set aside to hold objects promoted from young-gen memory. This represents an estimated percentage - // of the live young-gen memory within the collection set. If there is more data ready to be promoted than - // can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation - // pass. - // - // heap->get_old_evac_reserve() represents the amount of memory within old-gen's available memory that has been - // set aside to hold objects evacuated from the old-gen collection set. - // - // heap->get_young_evac_reserve() represents the amount of memory within young-gen's available memory that has - // been set aside to hold objects evacuated from the young-gen collection set. Conservatively, this value - // equals the entire amount of live young-gen memory within the collection set, even though some of this memory - // will likely be promoted. - // Has to be done after cset selection heap->prepare_concurrent_roots(); @@ -1171,7 +1152,6 @@ void ShenandoahConcurrentGC::op_final_updaterefs() { if (VerifyAfterGC) { Universe::verify(); } - heap->rebuild_free_set(true /*concurrent*/); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index c3f3a0dfe8c..8bc441c0dfc 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -273,6 +273,31 @@ inline void ShenandoahRegionPartitions::expand_interval_if_boundary_modified(She } } +inline void ShenandoahRegionPartitions::adjust_interval_for_recycled_old_region(ShenandoahHeapRegion* r) { + assert(!r->is_trash() && (r->free() == _region_size_bytes), "Bad argument"); + idx_t idx = (idx_t) r->index(); + // Note that a recycled old trashed region may be in any one of the free set partitions according to the following scenarios: + // 1. The old region had already been retired, so it was NotFree, and we have not rebuilt free set, so region is still NotFree + // 2. We recycled the region but we have not yet rebuilt the free set, so it is still in the OldCollector region. + // 3. We have found regions with alloc capacity but have not yet reserved_regions, so this is in Mutator set, and + // the act of placing the region into the Mutator set properly adjusts interval for Mutator set. + // 4. During reserve_regions(), we moved this region into the Collector set, and the act of placing this region into + // Collector set properly adjusts the interval for the Collector set. + // 5. During reserve_regions, we moved this region into the OldCollector set, and the act of placing this region into + // OldCollector set properly adjusts the interval for the OldCollector set. + // Only case 2 needs to be fixed up here. + ShenandoahFreeSetPartitionId old_partition = ShenandoahFreeSetPartitionId::OldCollector; + if (_membership[int(old_partition)].is_set(idx)) { + assert(_leftmosts[int(old_partition)] <= idx && _rightmosts[int(old_partition)] >= idx, "sanity"); + if (_leftmosts_empty[int(old_partition)] > idx) { + _leftmosts_empty[int(old_partition)] = idx; + } + if (_rightmosts_empty[int(old_partition)] < idx) { + _rightmosts_empty[int(old_partition)] = idx; + } + } +} + void ShenandoahRegionPartitions::retire_range_from_partition( ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) { @@ -516,7 +541,7 @@ idx_t ShenandoahRegionPartitions::rightmost_empty(ShenandoahFreeSetPartitionId w #ifdef ASSERT -void ShenandoahRegionPartitions::assert_bounds() { +void ShenandoahRegionPartitions::assert_bounds(bool old_trash_not_in_bounds) { idx_t leftmosts[UIntNumPartitions]; idx_t rightmosts[UIntNumPartitions]; @@ -542,6 +567,27 @@ void ShenandoahRegionPartitions::assert_bounds() { { size_t capacity = _free_set->alloc_capacity(i); bool is_empty = (capacity == _region_size_bytes); + ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(i); + + // When old_trash_not_in_bounds, an old trashed region might reside in: + // 1. NotFree if the region had already been retired + // 2. OldCollector because the region was originally in OldCollector when it was identified as immediate garbage, or + // 3. Mutator because we have run find_regions_with_alloc_capacity(), or + // 4. Collector because reserve_regions moved from Mutator to Collector but we have not yet recycled the trash + // 5. OldCollector because reserve_regions moved from Mutator to OldCollector but we have not yet recycled the trash + + // In case 1, there is no issue with empty-free intervals. + // In cases 3 - 5, there is no issue with empty-free intervals because the act of moving the region into the partition + // causes the empty-free interval to be updated. + // Only in case 2 do we need to disable the assert checking, but it is difficult to distinguish case 2 from case 5, + // so we do not assert bounds for case 2 or case 5. + + if (old_trash_not_in_bounds && (partition == ShenandoahFreeSetPartitionId::OldCollector) && r->is_old() && r->is_trash()) { + // If Old trash has been identified but we have not yet rebuilt the freeset to acount for the trashed regions, + // or if old trash has not yet been recycled, do not expect these trash regions to be within the OldCollector + // partition's bounds. + continue; + } assert(capacity > 0, "free regions must have allocation capacity"); if (i < leftmosts[int(partition)]) { leftmosts[int(partition)] = i; @@ -589,12 +635,12 @@ void ShenandoahRegionPartitions::assert_bounds() { beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)]; end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)]; - assert (beg_off >= leftmost_empty(ShenandoahFreeSetPartitionId::Mutator), - "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, - beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Mutator)); - assert (end_off <= rightmost_empty(ShenandoahFreeSetPartitionId::Mutator), - "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, - end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Mutator)); + assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)], + "free empty region (" SSIZE_FORMAT ") before the leftmost bound " SSIZE_FORMAT, + beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)]); + assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)], + "free empty region (" SSIZE_FORMAT ") past the rightmost bound " SSIZE_FORMAT, + end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)]); // Performance invariants. Failing these would not break the free partition, but performance would suffer. assert (leftmost(ShenandoahFreeSetPartitionId::Collector) <= _max, "leftmost in bounds: " SSIZE_FORMAT " < " SSIZE_FORMAT, @@ -623,11 +669,11 @@ void ShenandoahRegionPartitions::assert_bounds() { beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)]; end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)]; assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)], - "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, - beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector)); + "free empty region (" SSIZE_FORMAT ") before the leftmost bound " SSIZE_FORMAT, + beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]); assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)], - "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, - end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector)); + "free empty region (" SSIZE_FORMAT ") past the rightmost bound " SSIZE_FORMAT, + end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]); // Performance invariants. Failing these would not break the free partition, but performance would suffer. assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= _max, "leftmost in bounds: " SSIZE_FORMAT " < " SSIZE_FORMAT, @@ -658,11 +704,11 @@ void ShenandoahRegionPartitions::assert_bounds() { beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)]; end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)]; assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)], - "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, - beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector)); + "free empty region (" SSIZE_FORMAT ") before the leftmost bound " SSIZE_FORMAT, + beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)]); assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)], - "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, - end_off, rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector)); + "free empty region (" SSIZE_FORMAT ") past the rightmost bound " SSIZE_FORMAT, + end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)]); } #endif @@ -670,6 +716,9 @@ ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) : _heap(heap), _partitions(max_regions, this), _trash_regions(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, max_regions, mtGC)), +#ifdef ASSERT + _old_trash_not_in_bounds(false), +#endif _alloc_bias_weight(0) { clear_internal(); @@ -1113,7 +1162,9 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah } } _partitions.retire_from_partition(orig_partition, idx, r->used()); - _partitions.assert_bounds(); +#ifdef ASSERT + _partitions.assert_bounds(_old_trash_not_in_bounds); +#endif } return result; } @@ -1223,7 +1274,9 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num; _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size); - _partitions.assert_bounds(); +#ifdef ASSERT + _partitions.assert_bounds(_old_trash_not_in_bounds); +#endif req.set_actual_size(words_size); if (remainder != 0) { req.set_waste(ShenandoahHeapRegion::region_size_words() - remainder); @@ -1257,9 +1310,21 @@ void ShenandoahFreeSet::recycle_trash() { ShenandoahHeapLocker locker(_heap->lock()); const jlong deadline = os::javaTimeNanos() + deadline_ns; while (idx < count && os::javaTimeNanos() < deadline) { - try_recycle_trashed(_trash_regions[idx++]); + ShenandoahHeapRegion* r = _trash_regions[idx++]; + try_recycle_trashed(r); +#ifdef ASSERT + // Note: if assertions are not enforced, there's no rush to adjust this interval. We'll adjust the + // interval when we eventually rebuild the free set. + if (_old_trash_not_in_bounds) { + _partitions.adjust_interval_for_recycled_old_region(r); + } +#endif } } +#ifdef ASSERT + ShenandoahHeapLocker locker(_heap->lock()); + _old_trash_not_in_bounds = false; +#endif } void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { @@ -1272,7 +1337,9 @@ void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { size_t region_capacity = alloc_capacity(r); _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, ShenandoahFreeSetPartitionId::OldCollector, region_capacity); - _partitions.assert_bounds(); +#ifdef ASSERT + _partitions.assert_bounds(_old_trash_not_in_bounds); +#endif _heap->old_generation()->augment_evacuation_reserve(region_capacity); bool transferred = gen_heap->generation_sizer()->transfer_to_old(1); if (!transferred) { @@ -1292,8 +1359,9 @@ void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) { size_t ac = alloc_capacity(r); _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, ShenandoahFreeSetPartitionId::Collector, ac); - _partitions.assert_bounds(); - +#ifdef ASSERT + _partitions.assert_bounds(_old_trash_not_in_bounds); +#endif // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next, // to recycle trash before attempting to allocate anything in the region. } @@ -1343,9 +1411,16 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi for (size_t idx = 0; idx < _heap->num_regions(); idx++) { ShenandoahHeapRegion* region = _heap->get_region(idx); if (region->is_trash()) { - // Trashed regions represent regions that had been in the collection partition but have not yet been "cleaned up". - // The cset regions are not "trashed" until we have finished update refs. + // Trashed regions represent regions that had been in the collection set (or may have been identified as immediate garbage) + // but have not yet been "cleaned up". The cset regions are not "trashed" until we have finished update refs. if (region->is_old()) { + ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap(); + // We're going to place this region into the Mutator set. We increment old_cset_regions because this count represents + // regions that the old generation is entitled to without any transfer from young. We do not place this region into + // the OldCollector partition at this time. Instead, we let reserve_regions() decide whether to place this region + // into the OldCollector partition. Deferring the decision allows reserve_regions() to more effectively pack the + // OldCollector regions into high-address memory. We do not adjust capacities of old and young generations at this + // time. At the end of finish_rebuild(), the capacities are adjusted based on the results of reserve_regions(). old_cset_regions++; } else { assert(region->is_young(), "Trashed region should be old or young"); @@ -1366,7 +1441,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi size_t ac = alloc_capacity(region); if (ac > PLAB::min_size() * HeapWordSize) { if (region->is_trash() || !region->is_old()) { - // Both young and old collected regions (trashed) are placed into the Mutator set + // Young and old (possibly immediately) collected regions (trashed) are placed into the Mutator set _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator); if (idx < mutator_leftmost) { mutator_leftmost = idx; @@ -1502,7 +1577,7 @@ void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_r old_collector_xfer); max_xfer_regions -= old_collector_regions; if (old_collector_regions > 0) { - ShenandoahGenerationalHeap::cast(_heap)->generation_sizer()->transfer_to_young(old_collector_regions); + ShenandoahGenerationalHeap::cast(_heap)->generation_sizer()->force_transfer_to_young(old_collector_regions); } } @@ -1524,6 +1599,12 @@ void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_r } +void ShenandoahFreeSet::rebuild() { + size_t young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count; + prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count); + finish_rebuild(young_cset_regions, old_cset_regions, old_region_count); +} + // Overwrite arguments to represent the amount of memory in each generation that is about to be recycled void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions, size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) { @@ -1545,42 +1626,83 @@ void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, si ShenandoahYoungGeneration* young_gen = heap->young_generation(); size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + size_t original_old_regions = old_gen->max_capacity() / region_size_bytes; old_gen->set_capacity(old_region_count * region_size_bytes); young_gen->set_capacity(young_region_count * region_size_bytes); + + if (original_old_regions > old_region_count) { + log_info(gc)("Transfer " SIZE_FORMAT " regions from OLD to YOUNG during rebuild of freeset", + original_old_regions - old_region_count); + } else if (original_old_regions < old_region_count) { + log_info(gc)("Transfer " SIZE_FORMAT " regions from YOUNG to OLD during rebuild of freeset", + old_region_count - original_old_regions); + } + + // Having transferred regions based on results of rebuild(), reset the rebalance request. + old_gen->set_region_balance(0); } } -void ShenandoahFreeSet::finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t old_region_count, - bool have_evacuation_reserves) { +void ShenandoahFreeSet::finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t old_region_count) { shenandoah_assert_heaplocked(); size_t young_reserve(0), old_reserve(0); if (_heap->mode()->is_generational()) { - compute_young_and_old_reserves(young_cset_regions, old_cset_regions, have_evacuation_reserves, - young_reserve, old_reserve); + compute_young_and_old_reserves(young_cset_regions, old_cset_regions, young_reserve, old_reserve); } else { young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve; old_reserve = 0; } - // Move some of the mutator regions in the Collector and OldCollector partitions in order to satisfy + // Move some of the mutator regions into the Collector and OldCollector partitions in order to satisfy // young_reserve and old_reserve. reserve_regions(young_reserve, old_reserve, old_region_count); size_t young_region_count = _heap->num_regions() - old_region_count; establish_generation_sizes(young_region_count, old_region_count); establish_old_collector_alloc_bias(); - _partitions.assert_bounds(); +#ifdef ASSERT + _partitions.assert_bounds(_old_trash_not_in_bounds); +#endif log_status(); + // Even though we have finished rebuild, old trashed regions may not yet have been recycled, so leave + // _old_trash_not_in_bounds as is. Following rebuild, old trashed regions may reside in Mutator, Collector, + // or OldCollector partitions. +} + +// Reduce old reserve (when there are insufficient resources to satisfy the original request). +void ShenandoahFreeSet::reduce_old_reserve(size_t adjusted_old_reserve, size_t requested_old_reserve) { + ShenandoahOldGeneration* const old_generation = _heap->old_generation(); + size_t requested_promoted_reserve = old_generation->get_promoted_reserve(); + size_t requested_old_evac_reserve = old_generation->get_evacuation_reserve(); + assert(adjusted_old_reserve < requested_old_reserve, "Only allow reduction"); + assert(requested_promoted_reserve + requested_old_evac_reserve >= adjusted_old_reserve, "Sanity"); + size_t delta = requested_old_reserve - adjusted_old_reserve; + + if (requested_promoted_reserve >= delta) { + requested_promoted_reserve -= delta; + old_generation->set_promoted_reserve(requested_promoted_reserve); + } else { + delta -= requested_promoted_reserve; + requested_promoted_reserve = 0; + requested_old_evac_reserve -= delta; + old_generation->set_promoted_reserve(requested_promoted_reserve); + old_generation->set_evacuation_reserve(requested_old_evac_reserve); + } +} + +// Reduce young reserve (when there are insufficient resources to satisfy the original request). +void ShenandoahFreeSet::reduce_young_reserve(size_t adjusted_young_reserve, size_t requested_young_reserve) { + ShenandoahYoungGeneration* const young_generation = _heap->young_generation(); + assert(adjusted_young_reserve < requested_young_reserve, "Only allow reduction"); + young_generation->set_evacuation_reserve(adjusted_young_reserve); } void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions, - bool have_evacuation_reserves, size_t& young_reserve_result, size_t& old_reserve_result) const { shenandoah_assert_generational(); const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); - ShenandoahOldGeneration* const old_generation = _heap->old_generation(); - size_t old_available = old_generation->available(); + size_t old_available = old_generation->available() + old_cset_regions * region_size_bytes; size_t old_unaffiliated_regions = old_generation->free_unaffiliated_regions(); ShenandoahYoungGeneration* const young_generation = _heap->young_generation(); size_t young_capacity = young_generation->max_capacity(); @@ -1590,6 +1712,16 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions old_unaffiliated_regions += old_cset_regions; young_unaffiliated_regions += young_cset_regions; + assert(young_capacity >= (young_generation->used() + young_generation->get_humongous_waste()), + "Young capacity (" SIZE_FORMAT ") must exceed used (" SIZE_FORMAT ") plus humongous waste (" SIZE_FORMAT ")", + young_capacity, young_generation->used(), young_generation->get_humongous_waste()); + + size_t young_available = young_capacity - (young_generation->used() + young_generation->get_humongous_waste()); + young_available += young_cset_regions * region_size_bytes; + + assert(young_available >= young_unaffiliated_regions * region_size_bytes, "sanity"); + assert(old_available >= old_unaffiliated_regions * region_size_bytes, "sanity"); + // Consult old-region balance to make adjustments to current generation capacities and availability. // The generation region transfers take place after we rebuild. const ssize_t old_region_balance = old_generation->get_region_balance(); @@ -1605,6 +1737,7 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions ssize_t xfer_bytes = old_region_balance * checked_cast(region_size_bytes); old_available -= xfer_bytes; old_unaffiliated_regions -= old_region_balance; + young_available += xfer_bytes; young_capacity += xfer_bytes; young_unaffiliated_regions += old_region_balance; } @@ -1613,32 +1746,24 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions // promotions and evacuations. The partition between which old memory is reserved for evacuation and // which is reserved for promotion is enforced using thread-local variables that prescribe intentions for // each PLAB's available memory. - if (have_evacuation_reserves) { - // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass. - const size_t promoted_reserve = old_generation->get_promoted_reserve(); - const size_t old_evac_reserve = old_generation->get_evacuation_reserve(); - young_reserve_result = young_generation->get_evacuation_reserve(); - old_reserve_result = promoted_reserve + old_evac_reserve; - assert(old_reserve_result <= old_available, - "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT, - promoted_reserve, old_evac_reserve, old_available); - } else { - // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults) - young_reserve_result = (young_capacity * ShenandoahEvacReserve) / 100; - // The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions. - // Affiliated old-gen regions are already in the OldCollector free set. Add in the relevant number of - // unaffiliated regions. - old_reserve_result = old_available; - } + + const size_t promoted_reserve = old_generation->get_promoted_reserve(); + const size_t old_evac_reserve = old_generation->get_evacuation_reserve(); + young_reserve_result = young_generation->get_evacuation_reserve(); + old_reserve_result = promoted_reserve + old_evac_reserve; + assert(old_reserve_result + young_reserve_result <= old_available + young_available, + "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT " + " SIZE_FORMAT + ") more than is available: " SIZE_FORMAT " + " SIZE_FORMAT, + promoted_reserve, old_evac_reserve, young_reserve_result, old_available, young_available); // Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector // free set. Because of this, old_available may not have enough memory to represent the intended reserve. Adjust // the reserve downward to account for this possibility. This loss is part of the reason why the original budget // was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers. if (old_reserve_result > - _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) { + _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) { old_reserve_result = - _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes; + _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes; } if (young_reserve_result > young_unaffiliated_regions * region_size_bytes) { @@ -1712,17 +1837,20 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old } } - if (LogTarget(Info, gc, free)::is_enabled()) { - size_t old_reserve = _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector); - if (old_reserve < to_reserve_old) { - log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT, - PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve)); - } - size_t reserve = _partitions.capacity_of(ShenandoahFreeSetPartitionId::Collector); - if (reserve < to_reserve) { - log_debug(gc)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT, - PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve)); + size_t old_reserve = _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector); + if (old_reserve < to_reserve_old) { + assert(_heap->mode()->is_generational(), "to_old_reserve > 0 implies generational mode"); + reduce_old_reserve(old_reserve, to_reserve_old); + log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT, + PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve)); + } + size_t young_reserve = _partitions.capacity_of(ShenandoahFreeSetPartitionId::Collector); + if (young_reserve < to_reserve) { + if (_heap->mode()->is_generational()) { + reduce_young_reserve(young_reserve, to_reserve); } + log_info(gc, free)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT, + PROPERFMTARGS(to_reserve), PROPERFMTARGS(young_reserve)); } } @@ -1797,7 +1925,6 @@ void ShenandoahFreeSet::log_status() { for (uint i = 0; i < BUFFER_SIZE; i++) { buffer[i] = '\0'; } - log_debug(gc)("FreeSet map legend:" " M:mutator_free C:collector_free O:old_collector_free" " H:humongous ~:retired old _:retired young"); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index 5f69ec47cfd..5c99bb875f3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -149,6 +149,10 @@ class ShenandoahRegionPartitions { void move_from_partition_to_partition(ssize_t idx, ShenandoahFreeSetPartitionId orig_partition, ShenandoahFreeSetPartitionId new_partition, size_t available); + // In case recycled region r is in the OldCollector partition but not within the interval for empty OldCollector regions, expand + // the empty interval to include this region. If recycled region r is not in the OldCollector partition, do nothing. + inline void adjust_interval_for_recycled_old_region(ShenandoahHeapRegion* r); + const char* partition_membership_name(ssize_t idx) const; // Return the index of the next available region >= start_index, or maximum_regions if not found. @@ -236,6 +240,7 @@ class ShenandoahRegionPartitions { inline size_t count(ShenandoahFreeSetPartitionId which_partition) const { return _region_counts[int(which_partition)]; } +#ifdef ASSERT // Assure leftmost, rightmost, leftmost_empty, and rightmost_empty bounds are valid for all free sets. // Valid bounds honor all of the following (where max is the number of heap regions): // if the set is empty, leftmost equals max and rightmost equals 0 @@ -256,7 +261,10 @@ class ShenandoahRegionPartitions { // idx >= leftmost && // idx <= rightmost // } - void assert_bounds() NOT_DEBUG_RETURN; + // + // If old_trash_not_in_bounds, do not require old-generation trashed regions to be within the OldCollector bounds. + void assert_bounds(bool old_trash_not_in_bounds) NOT_DEBUG_RETURN; +#endif }; // Publicly, ShenandoahFreeSet represents memory that is available to mutator threads. The public capacity(), used(), @@ -297,7 +305,9 @@ class ShenandoahFreeSet : public CHeapObj { inline HeapWord* allocate_from_partition_with_affiliation(ShenandoahFreeSetPartitionId which_partition, ShenandoahAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region); - +#ifdef ASSERT + bool _old_trash_not_in_bounds; +#endif // We re-evaluate the left-to-right allocation bias whenever _alloc_bias_weight is less than zero. Each time // we allocate an object, we decrement the count of this value. Each time we re-evaluate whether to allocate // from right-to-left or left-to-right, we reset the value of this counter to _InitialAllocBiasWeight. @@ -355,6 +365,9 @@ class ShenandoahFreeSet : public CHeapObj { void establish_generation_sizes(size_t young_region_count, size_t old_region_count); size_t get_usable_free_words(size_t free_bytes) const; + void reduce_young_reserve(size_t adjusted_young_reserve, size_t requested_young_reserve); + void reduce_old_reserve(size_t adjusted_old_reserve, size_t requested_old_reserve); + // log status, assuming lock has already been acquired by the caller. void log_status(); @@ -367,6 +380,12 @@ class ShenandoahFreeSet : public CHeapObj { void clear(); + // Rebuild the free set. This combines the effects of prepare_to_rebuild() and finish_rebuild() with no intervening + // efforts to rebalance generation sizes. When the free set is rebuild, we reserve sufficient memory within the + // collector partition (and, for generational mode, the old collector partition) based on the amount reserved + // by heuristics to support the next planned evacuation effort. + void rebuild(); + // Examine the existing free set representation, capturing the current state into var arguments: // // young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero @@ -378,27 +397,15 @@ class ShenandoahFreeSet : public CHeapObj { size_t &first_old_region, size_t &last_old_region, size_t &old_region_count); // At the end of final mark, but before we begin evacuating, heuristics calculate how much memory is required to - // hold the results of evacuating to young-gen and to old-gen, and have_evacuation_reserves should be true. - // These quantities, stored as reserves for their respective generations, are consulted prior to rebuilding - // the free set (ShenandoahFreeSet) in preparation for evacuation. When the free set is rebuilt, we make sure - // to reserve sufficient memory in the collector and old_collector sets to hold evacuations. - // - // We also rebuild the free set at the end of GC, as we prepare to idle GC until the next trigger. In this case, - // have_evacuation_reserves is false because we don't yet know how much memory will need to be evacuated in the - // next GC cycle. When have_evacuation_reserves is false, the free set rebuild operation reserves for the collector - // and old_collector sets based on alternative mechanisms, such as ShenandoahEvacReserve, ShenandoahOldEvacReserve, and - // ShenandoahOldCompactionReserve. In a future planned enhancement, the reserve for old_collector set when the - // evacuation reserves are unknown, is based in part on anticipated promotion as determined by analysis of live data - // found during the previous GC pass which is one less than the current tenure age. + // hold the results of evacuating to young-gen and to old-gen. These quantities, stored in reserves for their, + // respective generations, are consulted prior to rebuilding the free set (ShenandoahFreeSet) in preparation for + // evacuation. When the free set is rebuilt, we make sure to reserve sufficient memory in the collector and + // old_collector sets to hold evacuations. // // young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero // old_cset_regions is the number of regions currently in the old cset if we are starting a mixed evacuation, or zero // num_old_regions is the number of old-gen regions that have available memory for further allocations (excluding old cset) - // have_evacuation_reserves is true iff the desired values of young-gen and old-gen evacuation reserves and old-gen - // promotion reserve have been precomputed (and can be obtained by invoking - // ->get_evacuation_reserve() or old_gen->get_promoted_reserve() - void finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t num_old_regions, - bool have_evacuation_reserves = false); + void finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t num_old_regions); // When a region is promoted in place, we add the region's available memory if it is greater than plab_min_size() // into the old collector partition by invoking this method. @@ -410,6 +417,13 @@ class ShenandoahFreeSet : public CHeapObj { // for evacuation, invoke this to make regions available for mutator allocations. void move_regions_from_collector_to_mutator(size_t cset_regions); +#ifdef ASSERT + // Advise FreeSet that old trash regions have not yet been accounted for in OldCollector partition bounds + void advise_of_old_trash() { + shenandoah_assert_heaplocked(); + _old_trash_not_in_bounds = true; + } +#endif void recycle_trash(); // Acquire heap lock and log status, assuming heap lock is not acquired by the caller. @@ -484,7 +498,7 @@ class ShenandoahFreeSet : public CHeapObj { // Reserve space for evacuations, with regions reserved for old evacuations placed to the right // of regions reserved of young evacuations. - void compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves, + void compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions, size_t &young_reserve_result, size_t &old_reserve_result) const; }; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp index 63cd1cc7873..6eba849fdda 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp @@ -1177,25 +1177,11 @@ void ShenandoahFullGC::phase5_epilog() { } heap->collection_set()->clear(); - size_t young_cset_regions, old_cset_regions; - size_t first_old, last_old, num_old; - heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); - - // We also do not expand old generation size following Full GC because we have scrambled age populations and - // no longer have objects separated by age into distinct regions. - // TODO: Do we need to fix FullGC so that it maintains aged segregation of objects into distinct regions? // A partial solution would be to remember how many objects are of tenure age following Full GC, but // this is probably suboptimal, because most of these objects will not reside in a region that will be // selected for the next evacuation phase. - - - if (heap->mode()->is_generational()) { - ShenandoahGenerationalFullGC::compute_balances(); - } - - heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old); - + heap->free_set()->rebuild(); heap->clear_cancelled_gc(true /* clear oom handler */); } @@ -1205,7 +1191,6 @@ void ShenandoahFullGC::phase5_epilog() { // We defer generation resizing actions until after cset regions have been recycled. We do this even following an // abbreviated cycle. if (heap->mode()->is_generational()) { - ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set(); ShenandoahGenerationalFullGC::rebuild_remembered_set(heap); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 3956b20412a..c49d7405b85 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -258,8 +258,9 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap // First priority is to reclaim the easy garbage out of young-gen. - // maximum_young_evacuation_reserve is upper bound on memory to be evacuated out of young - const size_t maximum_young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100; + // maximum_young_evacuation_reserve is upper bound on memory to be evacuated into young Collector Reserve. This is + // bounded at the end of previous GC cycle, based on available memory and balancing of evacuation to old and young. + const size_t maximum_young_evacuation_reserve = young_generation->get_evacuation_reserve(); const size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve()); // maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted), @@ -353,7 +354,6 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap // Having chosen the collection set, adjust the budgets for generational mode based on its composition. Note // that young_generation->available() now knows about recently discovered immediate garbage. -// void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, ShenandoahCollectionSet* const collection_set) { shenandoah_assert_generational(); // We may find that old_evacuation_reserve and/or loaned_for_young_evacuation are not fully consumed, in which case we may @@ -455,11 +455,9 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, } if (regions_to_xfer > 0) { - bool result = ShenandoahGenerationalHeap::cast(heap)->generation_sizer()->transfer_to_young(regions_to_xfer); - assert(excess_old > regions_to_xfer * region_size_bytes, "Cannot xfer more than excess old"); + assert(excess_old >= regions_to_xfer * region_size_bytes, "Cannot xfer more than excess old"); excess_old -= regions_to_xfer * region_size_bytes; - log_info(gc, ergo)("%s transferred " SIZE_FORMAT " excess regions to young before start of evacuation", - result? "Successfully": "Unsuccessfully", regions_to_xfer); + ShenandoahGenerationalHeap::cast(heap)->generation_sizer()->force_transfer_to_young(regions_to_xfer); } // Add in the excess_old memory to hold unanticipated promotions, if any. If there are more unanticipated @@ -652,6 +650,13 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { ShenandoahCollectionSet* collection_set = heap->collection_set(); bool is_generational = heap->mode()->is_generational(); + // TODO: Under severe memory overload conditions that can be checked here, we may want to limit + // the inclusion of old-gen candidates within the collection set. This would allow us to prioritize efforts on + // evacuating young-gen, This remediation is most appropriate when old-gen availability is very high (so there + // are negligible negative impacts from delaying completion of old-gen evacuation) and when young-gen collections + // are "under duress" (as signalled by very low availability of memory within young-gen, indicating that young-gen + // collections are not triggering frequently enough). + assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC"); assert(!is_old(), "Only YOUNG and GLOBAL GC perform evacuations"); { @@ -715,16 +720,14 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { // GC is evacuating and updating references. // Find the amount that will be promoted, regions that will be promoted in - // place, and preselect older regions that will be promoted by evacuation. + // place, and preselected older regions that will be promoted by evacuation. compute_evacuation_budgets(heap); - // Choose the collection set, including the regions preselected above for - // promotion into the old generation. + // Choose the collection set, including the regions preselected above for promotion into the old generation. _heuristics->choose_collection_set(collection_set); - if (!collection_set->is_empty()) { - // only make use of evacuation budgets when we are evacuating - adjust_evacuation_budgets(heap, collection_set); - } + + // Even if collection_set->is_empty(), we want to adjust budgets, making reserves available to mutator. + adjust_evacuation_budgets(heap, collection_set); if (is_global()) { // We have just chosen a collection set for a global cycle. The mark bitmap covering old regions is complete, so @@ -743,18 +746,11 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { } } - { ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset : ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset); ShenandoahHeapLocker locker(heap->lock()); - size_t young_cset_regions, old_cset_regions; - - // We are preparing for evacuation. At this time, we ignore cset region tallies. - size_t first_old, last_old, num_old; - heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); - // Free set construction uses reserve quantities, because they are known to be valid here - heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old, true); + heap->free_set()->rebuild(); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index 081fdad5e3b..0008371e4ca 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -170,6 +170,22 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { virtual void prepare_gc(); // Called during final mark, chooses collection set, rebuilds free set. + // Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the + // evacuation efforts that are about to begin. In particular: + // + // old_generation->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has + // been set aside to hold objects promoted from young-gen memory. This represents an estimated percentage + // of the live young-gen memory within the collection set. If there is more data ready to be promoted than + // can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation + // pass. + // + // old_generation->get_evacuation_reserve() represents the amount of memory within old-gen's available memory that has been + // set aside to hold objects evacuated from the old-gen collection set. + // + // young_generation->get_evacuation_reserve() represents the amount of memory within young-gen's available memory that has + // been set aside to hold objects evacuated from the young-gen collection set. Conservatively, this value + // equals the entire amount of live young-gen memory within the collection set, even though some of this memory + // will likely be promoted. virtual void prepare_regions_and_collection_set(bool concurrent); // Cancel marking (used by Full collect and when cancelling cycle). diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp index ce502070558..377a70a2646 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp @@ -194,6 +194,19 @@ void ShenandoahGenerationSizer::force_transfer_to_old(size_t regions) const { regions, young_gen->name(), old_gen->name(), PROPERFMTARGS(new_size)); } +// This is used to transfer excess old-gen regions to young at the start of evacuation after collection set is determined. +void ShenandoahGenerationSizer::force_transfer_to_young(size_t regions) const { + ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); + ShenandoahGeneration* old_gen = heap->old_generation(); + ShenandoahGeneration* young_gen = heap->young_generation(); + const size_t bytes_to_transfer = regions * ShenandoahHeapRegion::region_size_bytes(); + + young_gen->increase_capacity(bytes_to_transfer); + old_gen->decrease_capacity(bytes_to_transfer); + const size_t new_size = young_gen->max_capacity(); + log_info(gc)("Forcing transfer of " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " PROPERFMT, + regions, old_gen->name(), young_gen->name(), PROPERFMTARGS(new_size)); +} bool ShenandoahGenerationSizer::transfer_to_young(size_t regions) const { ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.hpp index 5752422bb77..e19c7c3dfbb 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.hpp @@ -86,8 +86,13 @@ class ShenandoahGenerationSizer { bool transfer_to_young(size_t regions) const; bool transfer_to_old(size_t regions) const; - // force transfer is used when we promote humongous objects. May violate min/max limits on generation sizes + // Force transfer is used when we promote humongous objects or promote regular regions in place. + // May violate min/max limits on generation sizes. void force_transfer_to_old(size_t regions) const; + + // Force transfer is used when we have excess old and we have confirmed that old unaffiliated >= regions. + // May violate min/max limits on generation sizes. + void force_transfer_to_young(size_t regions) const; }; #endif //SHARE_GC_SHENANDOAH_SHENANDOAHGENERATIONSIZER_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp index 43e3a7910ab..185da373762 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp @@ -56,9 +56,6 @@ void ShenandoahGenerationalFullGC::prepare() { heap->set_gc_generation(heap->global_generation()); heap->set_active_generation(); - // No need for old_gen->increase_used() as this was done when plabs were allocated. - heap->reset_generation_reserves(); - // Full GC supersedes any marking or coalescing in old generation. heap->old_generation()->cancel_gc(); } @@ -100,6 +97,9 @@ void ShenandoahGenerationalFullGC::rebuild_remembered_set(ShenandoahHeap* heap) heap->old_generation()->set_parseable(true); } +// Full GC has scattered aged objects throughout the heap. There are no more aged regions, so there is no anticipated +// promotion. Furthermore, Full GC has cancelled any ongoing mixed evacuation efforts so there are no anticipated old-gen +// evacuations. Size old-gen to represent its current usage by setting the balance. This feeds into rebuild of freeset. void ShenandoahGenerationalFullGC::balance_generations_after_gc(ShenandoahHeap* heap) { ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::cast(heap); ShenandoahOldGeneration* const old_gen = gen_heap->old_generation(); @@ -110,26 +110,44 @@ void ShenandoahGenerationalFullGC::balance_generations_after_gc(ShenandoahHeap* assert(old_usage % ShenandoahHeapRegion::region_size_bytes() == 0, "Old usage must align with region size"); assert(old_capacity % ShenandoahHeapRegion::region_size_bytes() == 0, "Old capacity must align with region size"); + ssize_t region_balance; if (old_capacity > old_usage) { size_t excess_old_regions = (old_capacity - old_usage) / ShenandoahHeapRegion::region_size_bytes(); - gen_heap->generation_sizer()->transfer_to_young(excess_old_regions); + // Since the act of FullGC does not honor old and young budgets, excess_old_regions are conceptually unaffiliated. + region_balance = checked_cast(excess_old_regions); } else if (old_capacity < old_usage) { + // Since the old_usage already consumes more regions than in old_capacity, we know these regions are not affiliated young, + // so arrange to transfer them. size_t old_regions_deficit = (old_usage - old_capacity) / ShenandoahHeapRegion::region_size_bytes(); - gen_heap->generation_sizer()->force_transfer_to_old(old_regions_deficit); + region_balance = 0 - checked_cast(old_regions_deficit); + } else { + region_balance = 0; + } + old_gen->set_region_balance(region_balance); + // Rebuild free set will log adjustments to generation sizes. + + ShenandoahYoungGeneration* const young_gen = gen_heap->young_generation(); + size_t anticipated_young_capacity = young_gen->max_capacity() + region_balance * ShenandoahHeapRegion::region_size_bytes(); + size_t young_usage = young_gen->used_regions_size(); + assert(anticipated_young_capacity >= young_usage, "sanity"); + + size_t anticipated_max_collector_reserve = anticipated_young_capacity - young_usage; + size_t desired_collector_reserve = (anticipated_young_capacity * ShenandoahEvacReserve) / 100; + size_t young_reserve; + if (desired_collector_reserve > anticipated_max_collector_reserve) { + // Trigger next concurrent GC immediately + young_reserve = anticipated_max_collector_reserve; + } else { + young_reserve = desired_collector_reserve; } - log_info(gc)("FullGC done: young usage: " PROPERFMT ", old usage: " PROPERFMT, - PROPERFMTARGS(gen_heap->young_generation()->used()), - PROPERFMTARGS(old_gen->used())); -} + size_t reserve_for_promo = 0; + size_t reserve_for_mixed = 0; -void ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set() { - auto result = ShenandoahGenerationalHeap::heap()->balance_generations(); - LogTarget(Info, gc, ergo) lt; - if (lt.is_enabled()) { - LogStream ls(lt); - result.print_on("Full GC", &ls); - } + // Reserves feed into rebuild calculations + young_gen->set_evacuation_reserve(young_reserve); + old_gen->set_evacuation_reserve(reserve_for_mixed); + old_gen->set_promoted_reserve(reserve_for_promo); } void ShenandoahGenerationalFullGC::log_live_in_old(ShenandoahHeap* heap) { @@ -179,15 +197,6 @@ void ShenandoahGenerationalFullGC::maybe_coalesce_and_fill_region(ShenandoahHeap } } -void ShenandoahGenerationalFullGC::compute_balances() { - auto heap = ShenandoahGenerationalHeap::heap(); - - // In case this Full GC resulted from degeneration, clear the tally on anticipated promotion. - heap->old_generation()->set_promotion_potential(0); - // Invoke this in case we are able to transfer memory from OLD to YOUNG. - heap->compute_old_generation_balance(0, 0); -} - ShenandoahPrepareForGenerationalCompactionObjectClosure::ShenandoahPrepareForGenerationalCompactionObjectClosure(PreservedMarks* preserved_marks, GrowableArray& empty_regions, ShenandoahHeapRegion* from_region, uint worker_id) : diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp index d74bcefaaf2..4235b4b1612 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp @@ -55,14 +55,6 @@ class ShenandoahGenerationalFullGC { // free set. static void compute_balances(); - // Rebuilding the free set may have resulted in regions being pulled in to the old generation - // evacuation reserve. For this reason, we must update the usage and capacity of the generations - // again. In the distant past, the free set did not know anything about generations, so we had - // a layer built above it to represent how much young/old memory was available. This layer is - // redundant and adds complexity. We would like to one day remove it. Until then, we must keep it - // synchronized with the free set's view of things. - static void balance_generations_after_rebuilding_free_set(); - // Logs the number of live bytes marked in the old generation. This is _not_ the same // value used as the baseline for the old generation _after_ the full gc is complete. // The value reported in the logs does not include objects and regions that may be diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 3d46c2ea4b5..68b5017bebe 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -566,40 +566,12 @@ void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) { retire_plab(plab, thread); } -ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalHeap::balance_generations() { - shenandoah_assert_heaplocked_or_safepoint(); - - ShenandoahOldGeneration* old_gen = old_generation(); - const ssize_t old_region_balance = old_gen->get_region_balance(); - old_gen->set_region_balance(0); - - if (old_region_balance > 0) { - const auto old_region_surplus = checked_cast(old_region_balance); - const bool success = generation_sizer()->transfer_to_young(old_region_surplus); - return TransferResult { - success, old_region_surplus, "young" - }; - } - - if (old_region_balance < 0) { - const auto old_region_deficit = checked_cast(-old_region_balance); - const bool success = generation_sizer()->transfer_to_old(old_region_deficit); - if (!success) { - old_gen->handle_failed_transfer(); - } - return TransferResult { - success, old_region_deficit, "old" - }; - } - - return TransferResult {true, 0, "none"}; -} - // Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations // and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to -// xfer_limit, and any surplus is transferred to the young generation. -// xfer_limit is the maximum we're able to transfer from young to old. -void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions) { +// mutator_xfer_limit, and any surplus is transferred to the young generation. +// mutator_xfer_limit is the maximum we're able to transfer from young to old. +void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_xfer_limit, + size_t old_cset_regions, size_t young_cset_regions) { // We can limit the old reserve to the size of anticipated promotions: // max_old_reserve is an upper bound on memory evacuated from old and promoted to old, @@ -619,79 +591,159 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_ // We have to be careful in the event that SOEP is set to 100 by the user. assert(ShenandoahOldEvacRatioPercent <= 100, "Error"); - const size_t old_available = old_generation()->available(); - // The free set will reserve this amount of memory to hold young evacuations - const size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; + const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + const size_t old_available = old_generation()->available() + old_cset_regions * region_size_bytes; + const size_t young_available = young_generation()->available() + young_cset_regions * region_size_bytes; - // In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit. + // The free set will reserve this amount of memory to hold young evacuations (initialized to the ideal reserve) + size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; - const size_t bound_on_old_reserve = old_available + old_xfer_limit + young_reserve; - const size_t max_old_reserve = (ShenandoahOldEvacRatioPercent == 100)? - bound_on_old_reserve: MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), - bound_on_old_reserve); + // If ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by mutator_xfer_limit and young_reserve + const size_t bound_on_old_reserve = ((old_available + mutator_xfer_limit + young_reserve) * ShenandoahOldEvacRatioPercent) / 100; + size_t proposed_max_old = ((ShenandoahOldEvacRatioPercent == 100)? + bound_on_old_reserve: + MIN2((young_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent), + bound_on_old_reserve)); - const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + if (young_reserve > young_available) { + young_reserve = young_available; + } // Decide how much old space we should reserve for a mixed collection size_t reserve_for_mixed = 0; - if (old_generation()->has_unprocessed_collection_candidates()) { + const size_t old_fragmented_available = + old_available - (old_generation()->free_unaffiliated_regions() + old_cset_regions) * region_size_bytes; + + if (old_fragmented_available > proposed_max_old) { + // After we've promoted regions in place, there may be an abundance of old-fragmented available memory, + // even more than the desired percentage for old reserve. We cannot transfer these fragmented regions back + // to young. Instead we make the best of the situation by using this fragmented memory for both promotions + // and evacuations. + proposed_max_old = old_fragmented_available; + } + size_t reserve_for_promo = old_fragmented_available; + const size_t max_old_reserve = proposed_max_old; + const size_t mixed_candidate_live_memory = old_generation()->unprocessed_collection_candidates_live_memory(); + const bool doing_mixed = (mixed_candidate_live_memory > 0); + if (doing_mixed) { // We want this much memory to be unfragmented in order to reliably evacuate old. This is conservative because we // may not evacuate the entirety of unprocessed candidates in a single mixed evacuation. - const size_t max_evac_need = (size_t) - (old_generation()->unprocessed_collection_candidates_live_memory() * ShenandoahOldEvacWaste); + const size_t max_evac_need = (size_t) (mixed_candidate_live_memory * ShenandoahOldEvacWaste); assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes, "Unaffiliated available must be less than total available"); - const size_t old_fragmented_available = - old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes; - reserve_for_mixed = max_evac_need + old_fragmented_available; - if (reserve_for_mixed > max_old_reserve) { - reserve_for_mixed = max_old_reserve; + + // We prefer to evacuate all of mixed into unfragmented memory, and will expand old in order to do so, unless + // we already have too much fragmented available memory in old. + reserve_for_mixed = max_evac_need; + if (reserve_for_mixed + reserve_for_promo > max_old_reserve) { + // In this case, we'll allow old-evac to target some of the fragmented old memory. + size_t excess_reserves = (reserve_for_mixed + reserve_for_promo) - max_old_reserve; + if (reserve_for_promo > excess_reserves) { + reserve_for_promo -= excess_reserves; + } else { + excess_reserves -= reserve_for_promo; + reserve_for_promo = 0; + reserve_for_mixed -= excess_reserves; + } } } - // Decide how much space we should reserve for promotions from young - size_t reserve_for_promo = 0; + // Decide how much additional space we should reserve for promotions from young. We give priority to mixed evacations + // over promotions. const size_t promo_load = old_generation()->get_promotion_potential(); const bool doing_promotions = promo_load > 0; if (doing_promotions) { - // We're promoting and have a bound on the maximum amount that can be promoted - assert(max_old_reserve >= reserve_for_mixed, "Sanity"); - const size_t available_for_promotions = max_old_reserve - reserve_for_mixed; - reserve_for_promo = MIN2((size_t)(promo_load * ShenandoahPromoEvacWaste), available_for_promotions); + // We've already set aside all of the fragmented available memory within old-gen to represent old objects + // to be promoted from young generation. promo_load represents the memory that we anticipate to be promoted + // from regions that have reached tenure age. In the ideal, we will always use fragmented old-gen memory + // to hold individually promoted objects and will use unfragmented old-gen memory to represent the old-gen + // evacuation workloa. + + // We're promoting and have an esimate of memory to be promoted from aged regions + assert(max_old_reserve >= (reserve_for_mixed + reserve_for_promo), "Sanity"); + const size_t available_for_additional_promotions = max_old_reserve - (reserve_for_mixed + reserve_for_promo); + size_t promo_need = (size_t)(promo_load * ShenandoahPromoEvacWaste); + if (promo_need > reserve_for_promo) { + reserve_for_promo += MIN2(promo_need - reserve_for_promo, available_for_additional_promotions); + } + // We've already reserved all the memory required for the promo_load, and possibly more. The excess + // can be consumed by objects promoted from regions that have not yet reached tenure age. } - // This is the total old we want to ideally reserve - const size_t old_reserve = reserve_for_mixed + reserve_for_promo; - assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations"); + // This is the total old we want to reserve (initialized to the ideal reserve) + size_t old_reserve = reserve_for_mixed + reserve_for_promo; // We now check if the old generation is running a surplus or a deficit. - const size_t max_old_available = old_generation()->available() + old_cset_regions * region_size_bytes; - if (max_old_available >= old_reserve) { + size_t old_region_deficit = 0; + size_t old_region_surplus = 0; + + size_t mutator_region_xfer_limit = mutator_xfer_limit / region_size_bytes; + // align the mutator_xfer_limit on region size + mutator_xfer_limit = mutator_region_xfer_limit * region_size_bytes; + if (old_available >= old_reserve) { // We are running a surplus, so the old region surplus can go to young - const size_t old_surplus = (max_old_available - old_reserve) / region_size_bytes; + const size_t old_surplus = old_available - old_reserve; + old_region_surplus = old_surplus / region_size_bytes; const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions; - const size_t old_region_surplus = MIN2(old_surplus, unaffiliated_old_regions); + old_region_surplus = MIN2(old_region_surplus, unaffiliated_old_regions); old_generation()->set_region_balance(checked_cast(old_region_surplus)); + } else if (old_available + mutator_xfer_limit >= old_reserve) { + // Mutator's xfer limit is sufficient to satisfy our need: transfer all memory from there + size_t old_deficit = old_reserve - old_available; + old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes; + old_generation()->set_region_balance(0 - checked_cast(old_region_deficit)); } else { - // We are running a deficit which we'd like to fill from young. - // Ignore that this will directly impact young_generation()->max_capacity(), - // indirectly impacting young_reserve and old_reserve. These computations are conservative. - // Note that deficit is rounded up by one region. - const size_t old_need = (old_reserve - max_old_available + region_size_bytes - 1) / region_size_bytes; - const size_t max_old_region_xfer = old_xfer_limit / region_size_bytes; - - // Round down the regions we can transfer from young to old. If we're running short - // on young-gen memory, we restrict the xfer. Old-gen collection activities will be - // curtailed if the budget is restricted. - const size_t old_region_deficit = MIN2(old_need, max_old_region_xfer); + // We'll try to xfer from both mutator excess and from young collector reserve + size_t available_reserves = old_available + young_reserve + mutator_xfer_limit; + size_t old_entitlement = (available_reserves * ShenandoahOldEvacRatioPercent) / 100; + + // Round old_entitlement down to nearest multiple of regions to be transferred to old + size_t entitled_xfer = old_entitlement - old_available; + entitled_xfer = region_size_bytes * (entitled_xfer / region_size_bytes); + size_t unaffiliated_young_regions = young_generation()->free_unaffiliated_regions(); + size_t unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes; + if (entitled_xfer > unaffiliated_young_memory) { + entitled_xfer = unaffiliated_young_memory; + } + old_entitlement = old_available + entitled_xfer; + if (old_entitlement < old_reserve) { + // There's not enough memory to satisfy our desire. Scale back our old-gen intentions. + size_t budget_overrun = old_reserve - old_entitlement;; + if (reserve_for_promo > budget_overrun) { + reserve_for_promo -= budget_overrun; + old_reserve -= budget_overrun; + } else { + budget_overrun -= reserve_for_promo; + reserve_for_promo = 0; + reserve_for_mixed = (reserve_for_mixed > budget_overrun)? reserve_for_mixed - budget_overrun: 0; + old_reserve = reserve_for_promo + reserve_for_mixed; + } + } + + // Because of adjustments above, old_reserve may be smaller now than it was when we tested the branch + // condition above: "(old_available + mutator_xfer_limit >= old_reserve) + // Therefore, we do NOT know that: mutator_xfer_limit < old_reserve - old_available + + size_t old_deficit = old_reserve - old_available; + old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes; + + // Shrink young_reserve to account for loan to old reserve + const size_t reserve_xfer_regions = old_region_deficit - mutator_region_xfer_limit; + young_reserve -= reserve_xfer_regions * region_size_bytes; + old_generation()->set_region_balance(0 - checked_cast(old_region_deficit)); + } -} -void ShenandoahGenerationalHeap::reset_generation_reserves() { - young_generation()->set_evacuation_reserve(0); - old_generation()->set_evacuation_reserve(0); - old_generation()->set_promoted_reserve(0); + assert(old_region_deficit == 0 || old_region_surplus == 0, "Only surplus or deficit, never both"); + assert(young_reserve + reserve_for_mixed + reserve_for_promo <= old_available + young_available, + "Cannot reserve more memory than is available: " SIZE_FORMAT " + " SIZE_FORMAT " + " SIZE_FORMAT " <= " + SIZE_FORMAT " + " SIZE_FORMAT, young_reserve, reserve_for_mixed, reserve_for_promo, old_available, young_available); + + // deficit/surplus adjustments to generation sizes will precede rebuild + young_generation()->set_evacuation_reserve(young_reserve); + old_generation()->set_evacuation_reserve(reserve_for_mixed); + old_generation()->set_promoted_reserve(reserve_for_promo); } void ShenandoahGenerationalHeap::TransferResult::print_on(const char* when, outputStream* ss) const { @@ -1062,19 +1114,6 @@ void ShenandoahGenerationalHeap::complete_degenerated_cycle() { // a more detailed explanation. old_generation()->transfer_pointers_from_satb(); } - - // We defer generation resizing actions until after cset regions have been recycled. - TransferResult result = balance_generations(); - LogTarget(Info, gc, ergo) lt; - if (lt.is_enabled()) { - LogStream ls(lt); - result.print_on("Degenerated GC", &ls); - } - - // In case degeneration interrupted concurrent evacuation or update references, we need to clean up - // transient state. Otherwise, these actions have no effect. - reset_generation_reserves(); - if (!old_generation()->is_parseable()) { ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_coalesce_and_fill); coalesce_and_fill_old_regions(false); @@ -1092,20 +1131,6 @@ void ShenandoahGenerationalHeap::complete_concurrent_cycle() { // throw off the heuristics. entry_global_coalesce_and_fill(); } - - TransferResult result; - { - ShenandoahHeapLocker locker(lock()); - - result = balance_generations(); - reset_generation_reserves(); - } - - LogTarget(Info, gc, ergo) lt; - if (lt.is_enabled()) { - LogStream ls(lt); - result.print_on("Concurrent GC", &ls); - } } void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp index 8318aebf286..232dea6617a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp @@ -118,14 +118,8 @@ class ShenandoahGenerationalHeap : public ShenandoahHeap { const ShenandoahGenerationSizer* generation_sizer() const { return &_generation_sizer; } - // Zeros out the evacuation and promotion reserves - void reset_generation_reserves(); - // Computes the optimal size for the old generation, represented as a surplus or deficit of old regions - void compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions); - - // Transfers surplus old regions to young, or takes regions from young to satisfy old region deficit - TransferResult balance_generations(); + void compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions, size_t young_cset_regions); // Balances generations, coalesces and fills old regions if necessary void complete_degenerated_cycle(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 688627f98a1..b7868aa2cae 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -409,12 +409,14 @@ jint ShenandoahHeap::initialize() { // Initialize to complete _marking_context->mark_complete(); - size_t young_cset_regions, old_cset_regions; + _free_set->rebuild(); - // We are initializing free set. We ignore cset region tallies. - size_t first_old, last_old, num_old; - _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); - _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old); + if (mode()->is_generational()) { + size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100; + young_generation()->set_evacuation_reserve(young_reserve); + old_generation()->set_evacuation_reserve((size_t) 0); + old_generation()->set_promoted_reserve((size_t) 0); + } } if (AlwaysPreTouch) { @@ -2463,6 +2465,7 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) { ShenandoahHeapLocker locker(lock()); size_t young_cset_regions, old_cset_regions; size_t first_old_region, last_old_region, old_region_count; + _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count); // If there are no old regions, first_old_region will be greater than last_old_region assert((first_old_region > last_old_region) || @@ -2482,15 +2485,7 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) { // available for transfer to old. Note that transfer of humongous regions does not impact available. ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap(); size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions); - gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions); - - // Total old_available may have been expanded to hold anticipated promotions. We trigger if the fragmented available - // memory represents more than 16 regions worth of data. Note that fragmentation may increase when we promote regular - // regions in place when many of these regular regions have an abundant amount of available memory within them. Fragmentation - // will decrease as promote-by-copy consumes the available memory within these partially consumed regions. - // - // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides - // within partially consumed regions of memory. + gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions, young_cset_regions); } // Rebuild free set based on adjusted generation sizes. _free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp index 727128d4382..86f3c3aa207 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp @@ -141,30 +141,11 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) { // entry_class_unloading(); // } - assert(!heap->is_concurrent_strong_root_in_progress(), "No evacuations during old gc."); - // We must execute this vm operation if we completed final mark. We cannot - // return from here with weak roots in progress. This is not a valid gc state - // for any young collections (or allocation failures) that interrupt the old - // collection. + // We must execute this vm operation if we completed final mark. We cannot return from here with weak roots in progress. + // This is not a valid gc state for any young collections (or allocation failures) that interrupt the old collection. + // This will reclaim immediate garbage. vmop_entry_final_roots() will also rebuild the free set. vmop_entry_final_roots(); - - // We do not rebuild_free following increments of old marking because memory has not been reclaimed. However, we may - // need to transfer memory to OLD in order to efficiently support the mixed evacuations that might immediately follow. - size_t allocation_runway = heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0); - heap->compute_old_generation_balance(allocation_runway, 0); - - ShenandoahGenerationalHeap::TransferResult result; - { - ShenandoahHeapLocker locker(heap->lock()); - result = heap->balance_generations(); - } - - LogTarget(Info, gc, ergo) lt; - if (lt.is_enabled()) { - LogStream ls(lt); - result.print_on("Old Mark", &ls); - } return true; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp index fe2954cdb8e..41149ab8631 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp @@ -463,21 +463,6 @@ void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent ShenandoahHeapLocker locker(heap->lock()); _old_heuristics->prepare_for_old_collections(); } - - { - // Though we did not choose a collection set above, we still may have - // freed up immediate garbage regions so proceed with rebuilding the free set. - ShenandoahGCPhase phase(concurrent ? - ShenandoahPhaseTimings::final_rebuild_freeset : - ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset); - ShenandoahHeapLocker locker(heap->lock()); - size_t cset_young_regions, cset_old_regions; - size_t first_old, last_old, num_old; - heap->free_set()->prepare_to_rebuild(cset_young_regions, cset_old_regions, first_old, last_old, num_old); - // This is just old-gen completion. No future budgeting required here. The only reason to rebuild the freeset here - // is in case there was any immediate old garbage identified. - heap->free_set()->finish_rebuild(cset_young_regions, cset_old_regions, num_old); - } } const char* ShenandoahOldGeneration::state_name(State state) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp index f20e470d9d3..cbd24062b4d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp @@ -63,8 +63,8 @@ class ShenandoahOldGeneration : public ShenandoahGeneration { // remaining in a PLAB when it is retired. size_t _promoted_expended; - // Represents the quantity of live bytes we expect to promote in place during the next - // evacuation cycle. This value is used by the young heuristic to trigger mixed collections. + // Represents the quantity of live bytes we expect to promote during the next GC cycle, either by + // evacuation or by promote-in-place. This value is used by the young heuristic to trigger mixed collections. // It is also used when computing the optimum size for the old generation. size_t _promotion_potential; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index e9a16a45ac6..35d844a6b04 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -380,7 +380,14 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure // span is the total memory affiliated with these stats (some of which is in use and other is available) size_t span() const { return _regions * ShenandoahHeapRegion::region_size_bytes(); } - size_t non_trashed_span() const { return (_regions - _trashed_regions) * ShenandoahHeapRegion::region_size_bytes(); } + size_t non_trashed_span() const { + assert(_regions >= _trashed_regions, "sanity"); + return (_regions - _trashed_regions) * ShenandoahHeapRegion::region_size_bytes(); + } + size_t non_trashed_committed() const { + assert(_committed >= _trashed_regions * ShenandoahHeapRegion::region_size_bytes(), "sanity"); + return _committed - (_trashed_regions * ShenandoahHeapRegion::region_size_bytes()); + } }; class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { @@ -418,11 +425,16 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { ShenandoahHeap* heap = ShenandoahHeap::heap(); size_t generation_used = generation->used(); size_t generation_used_regions = generation->used_regions(); + size_t generation_max_capacity = generation->max_capacity(); if (adjust_for_padding && (generation->is_young() || generation->is_global())) { size_t pad = heap->old_generation()->get_pad_for_promote_in_place(); generation_used += pad; } + guarantee(stats.non_trashed_committed() <= generation_max_capacity, + "%s: generation (%s) non_trashed_committed: " PROPERFMT " must not exceed generation capacity: " PROPERFMT, + label, generation->name(), PROPERFMTARGS(stats.non_trashed_committed()), PROPERFMTARGS(generation_max_capacity)); + guarantee(stats.used() == generation_used, "%s: generation (%s) used size must be consistent: generation-used: " PROPERFMT ", regions-used: " PROPERFMT, label, generation->name(), PROPERFMTARGS(generation_used), PROPERFMTARGS(stats.used())); @@ -437,7 +449,6 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { ") must not exceed current capacity (" PROPERFMT ")", label, generation->name(), stats.regions(), PROPERFMTARGS(ShenandoahHeapRegion::region_size_bytes()), PROPERFMTARGS(generation_capacity)); - size_t humongous_waste = generation->get_humongous_waste(); guarantee(stats.waste() == humongous_waste, "%s: generation (%s) humongous waste must be consistent: generation: " PROPERFMT ", regions: " PROPERFMT, diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp index dc7fbe284b8..8b1453d6210 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp @@ -380,26 +380,42 @@ "runs out of memory too early.") \ \ product(uintx, ShenandoahOldEvacRatioPercent, 75, EXPERIMENTAL, \ - "The maximum proportion of evacuation from old-gen memory, " \ - "expressed as a percentage. The default value 75 denotes that no" \ - "more than 75% of the collection set evacuation workload may be " \ - "towards evacuation of old-gen heap regions. This limits both the"\ - "promotion of aged regions and the compaction of existing old " \ - "regions. A value of 75 denotes that the total evacuation work" \ - "may increase to up to four times the young gen evacuation work." \ - "A larger value allows quicker promotion and allows" \ - "a smaller number of mixed evacuations to process " \ - "the entire list of old-gen collection candidates at the cost " \ - "of an increased disruption of the normal cadence of young-gen " \ - "collections. A value of 100 allows a mixed evacuation to " \ - "focus entirely on old-gen memory, allowing no young-gen " \ - "regions to be collected, likely resulting in subsequent " \ - "allocation failures because the allocation pool is not " \ - "replenished. A value of 0 allows a mixed evacuation to" \ - "focus entirely on young-gen memory, allowing no old-gen " \ - "regions to be collected, likely resulting in subsequent " \ - "promotion failures and triggering of stop-the-world full GC " \ - "events.") \ + "The maximum percent of memory that can be reserved for " \ + "evacuation into old generation. With the default setting, " \ + "given a total evacuation budget of X, the amount of memory " \ + "reserved to hold objects evacuated to old generation is 0.75x." \ + "This limits both the promotion of aged young regions and " \ + "the compaction of existing old regions. It does not restrict " \ + "the collector from copying more objects into old-generation " \ + "memory if the young-generation collection set does not consume " \ + "all of the memory originally reserved for young-generation " \ + "evacuation. It also does not restrict the amount of memory " \ + "that can be promoted in place, by simply changing the " \ + "affiliation of the region from young to old. If there is an " \ + "abundance of free memory, this will result in a larger total " \ + "evacuation effort, roughly quadrupling the amount of memory " \ + "normally evacuated during young evacuations (so that old " \ + "evacuates three times as much as young, and young evacuates its "\ + "normal amount). If free memory is in short supply, this may " \ + "result in paring back both young-gen and old-gen evacuations, " \ + "such that the fraction of old is 75% (in the default " \ + "configuration) of the total available evacuation reserve, " \ + "with young evacuating one fourth of its normal amount, " \ + "and old evacuating three times as much as young evacuates. " \ + "Setting a larger value allows for quicker promotion and a " \ + "smaller number of mixed evacuations to process the entire list " \ + "of old-gen collection candidates at the cost of increased " \ + "disruption of the normal young-gen collection cadence. A " \ + "value of 100 allows a mixed evacuation to focus entirely " \ + "on old-gen memory, allowing no young-gen regions to be " \ + "collected. This would likely result in subsequent allocation " \ + "failures because the young-gen allocation pool would not be " \ + "replenished. A value of 0 prevents mixed evacuations " \ + "from defragmenting old-gen memory, likely resulting in " \ + "subsequent promotion failures and triggering of stop-the-world " \ + "full GC events. Faiure to defragment old-gen memory can also " \ + "result in unconstrained expansion of old-gen, and shrinkage of " \ + "young gen, causing inefficient high frequency of young-gen GC.") \ range(0,100) \ \ product(uintx, ShenandoahMinYoungPercentage, 20, EXPERIMENTAL, \ diff --git a/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp b/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp index d2bb9108fa9..e826511d42c 100644 --- a/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp +++ b/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp @@ -201,8 +201,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, prime_one_old_region) { size_t garbage = make_garbage_above_collection_threshold(10); _heuristics->prepare_for_old_collections(); - _heuristics->prime_collection_set(_collection_set); - + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); + } EXPECT_TRUE(collection_set_is(10UL)); EXPECT_EQ(garbage, _collection_set->get_old_garbage()); EXPECT_EQ(0U, _heuristics->unprocessed_old_collection_candidates()); @@ -214,8 +215,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, prime_many_old_regions) { size_t g1 = make_garbage_above_collection_threshold(100); size_t g2 = make_garbage_above_collection_threshold(101); _heuristics->prepare_for_old_collections(); - _heuristics->prime_collection_set(_collection_set); - + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); + } EXPECT_TRUE(collection_set_is(100UL, 101UL)); EXPECT_EQ(g1 + g2, _collection_set->get_old_garbage()); EXPECT_EQ(0U, _heuristics->unprocessed_old_collection_candidates()); @@ -226,8 +228,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, require_multiple_mixed_evacuations) { size_t garbage = create_too_much_garbage_for_one_mixed_evacuation(); _heuristics->prepare_for_old_collections(); - _heuristics->prime_collection_set(_collection_set); - + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); + } EXPECT_LT(_collection_set->get_old_garbage(), garbage); EXPECT_GT(_heuristics->unprocessed_old_collection_candidates(), 0UL); } @@ -248,7 +251,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, skip_pinned_regions) { ASSERT_EQ(3UL, _heuristics->unprocessed_old_collection_candidates()); // Here the region is still pinned, so it cannot be added to the collection set. - _heuristics->prime_collection_set(_collection_set); + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); + } // The two unpinned regions should be added to the collection set and the pinned // region should be retained at the front of the list of candidates as it would be @@ -261,8 +266,10 @@ TEST_VM_F(ShenandoahOldHeuristicTest, skip_pinned_regions) { // the now unpinned region should be added to the collection set. make_unpinned(1); _collection_set->clear(); - _heuristics->prime_collection_set(_collection_set); + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); + } EXPECT_EQ(_collection_set->get_old_garbage(), g2); EXPECT_TRUE(collection_set_is(1UL)); EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 0UL); @@ -278,14 +285,18 @@ TEST_VM_F(ShenandoahOldHeuristicTest, pinned_region_is_first) { make_pinned(0); _heuristics->prepare_for_old_collections(); - _heuristics->prime_collection_set(_collection_set); + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); + } EXPECT_TRUE(collection_set_is(1UL, 2UL)); EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 1UL); make_unpinned(0); _collection_set->clear(); - _heuristics->prime_collection_set(_collection_set); + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); + } EXPECT_TRUE(collection_set_is(0UL)); EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 0UL); @@ -301,16 +312,18 @@ TEST_VM_F(ShenandoahOldHeuristicTest, pinned_region_is_last) { make_pinned(2); _heuristics->prepare_for_old_collections(); - _heuristics->prime_collection_set(_collection_set); - + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); + } EXPECT_TRUE(collection_set_is(0UL, 1UL)); EXPECT_EQ(_collection_set->get_old_garbage(), g1 + g2); EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 1UL); make_unpinned(2); _collection_set->clear(); - _heuristics->prime_collection_set(_collection_set); - + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); + } EXPECT_TRUE(collection_set_is(2UL)); EXPECT_EQ(_collection_set->get_old_garbage(), g3); EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 0UL); @@ -327,8 +340,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, unpinned_region_is_middle) { make_pinned(0); make_pinned(2); _heuristics->prepare_for_old_collections(); - _heuristics->prime_collection_set(_collection_set); - + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); + } EXPECT_TRUE(collection_set_is(1UL)); EXPECT_EQ(_collection_set->get_old_garbage(), g2); EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 2UL); @@ -336,8 +350,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, unpinned_region_is_middle) { make_unpinned(0); make_unpinned(2); _collection_set->clear(); - _heuristics->prime_collection_set(_collection_set); - + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); + } EXPECT_TRUE(collection_set_is(0UL, 2UL)); EXPECT_EQ(_collection_set->get_old_garbage(), g1 + g3); EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 0UL); @@ -354,8 +369,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, all_candidates_are_pinned) { make_pinned(1); make_pinned(2); _heuristics->prepare_for_old_collections(); - _heuristics->prime_collection_set(_collection_set); - + if (_heuristics->prime_collection_set(_collection_set)) { + _heuristics->finalize_mixed_evacs(); + } // In the case when all candidates are pinned, we want to abandon // this set of mixed collection candidates so that another old collection // can run. This is meant to defend against "bad" JNI code that permanently