Skip to content

Commit

Permalink
8342444: Shenandoah: Uncommit regions from a separate, STS aware thread
Browse files Browse the repository at this point in the history
Reviewed-by: shade, kdnilsen, ysr
  • Loading branch information
William Kemper committed Dec 5, 2024
1 parent dbf48a5 commit bedb68a
Show file tree
Hide file tree
Showing 9 changed files with 420 additions and 127 deletions.
40 changes: 12 additions & 28 deletions src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,15 +56,8 @@ void ShenandoahControlThread::run_service() {
const GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
int sleep = ShenandoahControlIntervalMin;

double last_shrink_time = os::elapsedTime();
double last_sleep_adjust_time = os::elapsedTime();

// Shrink period avoids constantly polling regions for shrinking.
// Having a period 10x lower than the delay would mean we hit the
// shrinking with lag of less than 1/10-th of true delay.
// ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
const double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;

ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy();
ShenandoahHeuristics* const heuristics = heap->heuristics();
while (!in_graceful_shutdown() && !should_terminate()) {
Expand All @@ -76,9 +69,6 @@ void ShenandoahControlThread::run_service() {
// This control loop iteration has seen this much allocation.
const size_t allocs_seen = reset_allocs_seen();

// Check if we have seen a new target for soft max heap size.
const bool soft_max_changed = heap->check_soft_max_changed();

// Choose which GC mode to run in. The block below should select a single mode.
GCMode mode = none;
GCCause::Cause cause = GCCause::_last_gc_cause;
Expand Down Expand Up @@ -136,6 +126,9 @@ void ShenandoahControlThread::run_service() {
assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");

if (gc_requested) {
// Cannot uncommit bitmap slices during concurrent reset
ShenandoahNoUncommitMark forbid_region_uncommit(heap);

// GC is starting, bump the internal ID
update_gc_id();

Expand Down Expand Up @@ -238,29 +231,20 @@ void ShenandoahControlThread::run_service() {
}
}

const double current = os::elapsedTime();

if (ShenandoahUncommit && (is_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
// Explicit GC tries to uncommit everything down to min capacity.
// Soft max change tries to uncommit everything down to target capacity.
// Periodic uncommit tries to uncommit suitable regions down to min capacity.

double shrink_before = (is_gc_requested || soft_max_changed) ?
current :
current - (ShenandoahUncommitDelay / 1000.0);

size_t shrink_until = soft_max_changed ?
heap->soft_max_capacity() :
heap->min_capacity();

heap->maybe_uncommit(shrink_before, shrink_until);
heap->phase_timings()->flush_cycle_to_global();
last_shrink_time = current;
// Check if we have seen a new target for soft max heap size or if a gc was requested.
// Either of these conditions will attempt to uncommit regions.
if (ShenandoahUncommit) {
if (heap->check_soft_max_changed()) {
heap->notify_soft_max_changed();
} else if (is_gc_requested) {
heap->notify_explicit_gc_requested();
}
}

// Wait before performing the next action. If allocation happened during this wait,
// we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
// back off exponentially.
const double current = os::elapsedTime();
if (heap->has_changed()) {
sleep = ShenandoahControlIntervalMin;
} else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
Expand Down
3 changes: 2 additions & 1 deletion src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,9 @@ class ShenandoahResetBitmapTask : public WorkerTask {
WorkerTask("Shenandoah Reset Bitmap"), _generation(generation) {}

void work(uint worker_id) {
ShenandoahHeapRegion* region = _regions.next();
ShenandoahHeap* heap = ShenandoahHeap::heap();
assert(!heap->is_uncommit_in_progress(), "Cannot uncommit bitmaps while resetting them.");
ShenandoahHeapRegion* region = _regions.next();
ShenandoahMarkingContext* const ctx = heap->marking_context();
while (region != nullptr) {
auto const affiliation = region->affiliation();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,15 +67,8 @@ void ShenandoahGenerationalControlThread::run_service() {
const GCMode default_mode = concurrent_normal;
ShenandoahGenerationType generation = GLOBAL;

double last_shrink_time = os::elapsedTime();
uint age_period = 0;

// Shrink period avoids constantly polling regions for shrinking.
// Having a period 10x lower than the delay would mean we hit the
// shrinking with lag of less than 1/10-th of true delay.
// ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
const double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;

ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy();

// Heuristics are notified of allocation failures here and other outcomes
Expand Down Expand Up @@ -191,6 +184,9 @@ void ShenandoahGenerationalControlThread::run_service() {
assert (!gc_requested || cause != GCCause::_no_gc, "GC cause should be set");

if (gc_requested) {
// Cannot uncommit bitmap slices during concurrent reset
ShenandoahNoUncommitMark forbid_region_uncommit(heap);

// Blow away all soft references on this cycle, if handling allocation failure,
// either implicit or explicit GC request, or we are requested to do so unconditionally.
if (generation == GLOBAL && (alloc_failure_pending || is_gc_requested || ShenandoahAlwaysClearSoftRefs)) {
Expand Down Expand Up @@ -303,24 +299,14 @@ void ShenandoahGenerationalControlThread::run_service() {
}
}

const double current = os::elapsedTime();

if (ShenandoahUncommit && (is_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
// Explicit GC tries to uncommit everything down to min capacity.
// Soft max change tries to uncommit everything down to target capacity.
// Periodic uncommit tries to uncommit suitable regions down to min capacity.

double shrink_before = (is_gc_requested || soft_max_changed) ?
current :
current - (ShenandoahUncommitDelay / 1000.0);

size_t shrink_until = soft_max_changed ?
heap->soft_max_capacity() :
heap->min_capacity();

heap->maybe_uncommit(shrink_before, shrink_until);
heap->phase_timings()->flush_cycle_to_global();
last_shrink_time = current;
// Check if we have seen a new target for soft max heap size or if a gc was requested.
// Either of these conditions will attempt to uncommit regions.
if (ShenandoahUncommit) {
if (heap->check_soft_max_changed()) {
heap->notify_soft_max_changed();
} else if (is_gc_requested) {
heap->notify_explicit_gc_requested();
}
}

// Wait for ShenandoahControlIntervalMax unless there was an allocation failure or another request was made mid-cycle.
Expand Down
116 changes: 54 additions & 62 deletions src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,10 @@
*/

#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "memory/universe.hpp"

#include "cds/archiveHeapWriter.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"

#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/fullGCForwarding.hpp"
Expand All @@ -42,17 +44,16 @@
#include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
#include "gc/shenandoah/shenandoahAllocRequest.hpp"
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahCodeRoots.hpp"
#include "gc/shenandoah/shenandoahCollectionSet.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahConcurrentMark.hpp"
#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
#include "gc/shenandoah/shenandoahControlThread.hpp"
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahFreeSet.hpp"
#include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
#include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
#include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
Expand All @@ -65,31 +66,31 @@
#include "gc/shenandoah/shenandoahPacer.inline.hpp"
#include "gc/shenandoah/shenandoahPadding.hpp"
#include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
#include "gc/shenandoah/shenandoahSTWMark.hpp"
#include "gc/shenandoah/shenandoahUncommitThread.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "gc/shenandoah/shenandoahVerifier.hpp"
#include "gc/shenandoah/shenandoahCodeRoots.hpp"
#include "gc/shenandoah/shenandoahVMOperations.hpp"
#include "gc/shenandoah/shenandoahWorkGroup.hpp"
#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
#include "gc/shenandoah/shenandoahYoungGeneration.hpp"
#include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
#include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
#include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
#include "utilities/globalDefinitions.hpp"

#if INCLUDE_JFR
#include "gc/shenandoah/shenandoahJfrSupport.hpp"
#endif

#include "cds/archiveHeapWriter.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"

#include "memory/allocation.hpp"
#include "memory/classLoaderMetaspace.hpp"
#include "memory/metaspaceUtils.hpp"
#include "memory/universe.hpp"
#include "nmt/mallocTracker.hpp"
#include "nmt/memTracker.hpp"
#include "oops/compressedOops.inline.hpp"
Expand All @@ -102,6 +103,7 @@
#include "runtime/safepointMechanism.hpp"
#include "runtime/stackWatermarkSet.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/events.hpp"
#include "utilities/powerOfTwo.hpp"

Expand Down Expand Up @@ -459,6 +461,10 @@ jint ShenandoahHeap::initialize() {

initialize_controller();

if (ShenandoahUncommit) {
_uncommit_thread = new ShenandoahUncommitThread(this);
}

print_init_logger();

FullGCForwarding::initialize(_heap_region);
Expand Down Expand Up @@ -530,6 +536,7 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
_update_refs_iterator(this),
_global_generation(nullptr),
_control_thread(nullptr),
_uncommit_thread(nullptr),
_young_generation(nullptr),
_old_generation(nullptr),
_shenandoah_policy(policy),
Expand Down Expand Up @@ -800,60 +807,15 @@ bool ShenandoahHeap::is_in(const void* p) const {
}
}

void ShenandoahHeap::maybe_uncommit(double shrink_before, size_t shrink_until) {
assert (ShenandoahUncommit, "should be enabled");

// Determine if there is work to do. This avoids taking heap lock if there is
// no work available, avoids spamming logs with superfluous logging messages,
// and minimises the amount of work while locks are taken.

if (committed() <= shrink_until) return;

bool has_work = false;
for (size_t i = 0; i < num_regions(); i++) {
ShenandoahHeapRegion* r = get_region(i);
if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
has_work = true;
break;
}
}

if (has_work) {
static const char* msg = "Concurrent uncommit";
ShenandoahConcurrentPhase gcPhase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
EventMark em("%s", msg);

op_uncommit(shrink_before, shrink_until);
void ShenandoahHeap::notify_soft_max_changed() {
if (_uncommit_thread != nullptr) {
_uncommit_thread->notify_soft_max_changed();
}
}

void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
assert (ShenandoahUncommit, "should be enabled");

// Application allocates from the beginning of the heap, and GC allocates at
// the end of it. It is more efficient to uncommit from the end, so that applications
// could enjoy the near committed regions. GC allocations are much less frequent,
// and therefore can accept the committing costs.

size_t count = 0;
for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
ShenandoahHeapRegion* r = get_region(i - 1);
if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
ShenandoahHeapLocker locker(lock());
if (r->is_empty_committed()) {
if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
break;
}

r->make_uncommitted();
count++;
}
}
SpinPause(); // allow allocators to take the lock
}

if (count > 0) {
notify_heap_changed();
void ShenandoahHeap::notify_explicit_gc_requested() {
if (_uncommit_thread != nullptr) {
_uncommit_thread->notify_explicit_gc_requested();
}
}

Expand Down Expand Up @@ -1507,6 +1469,10 @@ void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
tcl->do_thread(_control_thread);
}

if (_uncommit_thread != nullptr) {
tcl->do_thread(_uncommit_thread);
}

workers()->threads_do(tcl);
if (_safepoint_workers != nullptr) {
_safepoint_workers->threads_do(tcl);
Expand Down Expand Up @@ -2094,6 +2060,11 @@ void ShenandoahHeap::stop() {

// Step 3. Wait until GC worker exits normally.
control_thread()->stop();

// Stop 4. Shutdown uncommit thread.
if (_uncommit_thread != nullptr) {
_uncommit_thread->stop();
}
}

void ShenandoahHeap::stw_unload_classes(bool full_gc) {
Expand Down Expand Up @@ -2521,7 +2492,7 @@ bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {

if (is_bitmap_slice_committed(r, true)) {
// Some other region from the group is still committed, meaning the bitmap
// slice is should stay committed, exit right away.
// slice should stay committed, exit right away.
return true;
}

Expand All @@ -2535,6 +2506,27 @@ bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
return true;
}

void ShenandoahHeap::forbid_uncommit() {
if (_uncommit_thread != nullptr) {
_uncommit_thread->forbid_uncommit();
}
}

void ShenandoahHeap::allow_uncommit() {
if (_uncommit_thread != nullptr) {
_uncommit_thread->allow_uncommit();
}
}

#ifdef ASSERT
bool ShenandoahHeap::is_uncommit_in_progress() {
if (_uncommit_thread != nullptr) {
return _uncommit_thread->is_uncommit_in_progress();
}
return false;
}
#endif

void ShenandoahHeap::safepoint_synchronize_begin() {
StackWatermarkSet::safepoint_synchronize_begin();
SuspendibleThreadSet::synchronize();
Expand Down
Loading

0 comments on commit bedb68a

Please sign in to comment.