diff --git a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp index 8a82498225a95..136ac22d840ff 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp @@ -56,15 +56,8 @@ void ShenandoahControlThread::run_service() { const GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc; int sleep = ShenandoahControlIntervalMin; - double last_shrink_time = os::elapsedTime(); double last_sleep_adjust_time = os::elapsedTime(); - // Shrink period avoids constantly polling regions for shrinking. - // Having a period 10x lower than the delay would mean we hit the - // shrinking with lag of less than 1/10-th of true delay. - // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds. - const double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10; - ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy(); ShenandoahHeuristics* const heuristics = heap->heuristics(); while (!in_graceful_shutdown() && !should_terminate()) { @@ -76,9 +69,6 @@ void ShenandoahControlThread::run_service() { // This control loop iteration has seen this much allocation. const size_t allocs_seen = reset_allocs_seen(); - // Check if we have seen a new target for soft max heap size. - const bool soft_max_changed = heap->check_soft_max_changed(); - // Choose which GC mode to run in. The block below should select a single mode. GCMode mode = none; GCCause::Cause cause = GCCause::_last_gc_cause; @@ -136,6 +126,9 @@ void ShenandoahControlThread::run_service() { assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set"); if (gc_requested) { + // Cannot uncommit bitmap slices during concurrent reset + ShenandoahNoUncommitMark forbid_region_uncommit(heap); + // GC is starting, bump the internal ID update_gc_id(); @@ -238,29 +231,20 @@ void ShenandoahControlThread::run_service() { } } - const double current = os::elapsedTime(); - - if (ShenandoahUncommit && (is_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) { - // Explicit GC tries to uncommit everything down to min capacity. - // Soft max change tries to uncommit everything down to target capacity. - // Periodic uncommit tries to uncommit suitable regions down to min capacity. - - double shrink_before = (is_gc_requested || soft_max_changed) ? - current : - current - (ShenandoahUncommitDelay / 1000.0); - - size_t shrink_until = soft_max_changed ? - heap->soft_max_capacity() : - heap->min_capacity(); - - heap->maybe_uncommit(shrink_before, shrink_until); - heap->phase_timings()->flush_cycle_to_global(); - last_shrink_time = current; + // Check if we have seen a new target for soft max heap size or if a gc was requested. + // Either of these conditions will attempt to uncommit regions. + if (ShenandoahUncommit) { + if (heap->check_soft_max_changed()) { + heap->notify_soft_max_changed(); + } else if (is_gc_requested) { + heap->notify_explicit_gc_requested(); + } } // Wait before performing the next action. If allocation happened during this wait, // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle, // back off exponentially. + const double current = os::elapsedTime(); if (heap->has_changed()) { sleep = ShenandoahControlIntervalMin; } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){ diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 1c644a9acccd7..a48b2baa18f38 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -73,8 +73,9 @@ class ShenandoahResetBitmapTask : public WorkerTask { WorkerTask("Shenandoah Reset Bitmap"), _generation(generation) {} void work(uint worker_id) { - ShenandoahHeapRegion* region = _regions.next(); ShenandoahHeap* heap = ShenandoahHeap::heap(); + assert(!heap->is_uncommit_in_progress(), "Cannot uncommit bitmaps while resetting them."); + ShenandoahHeapRegion* region = _regions.next(); ShenandoahMarkingContext* const ctx = heap->marking_context(); while (region != nullptr) { auto const affiliation = region->affiliation(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp index ef0fbf671a0dd..33af35c6b9555 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp @@ -67,15 +67,8 @@ void ShenandoahGenerationalControlThread::run_service() { const GCMode default_mode = concurrent_normal; ShenandoahGenerationType generation = GLOBAL; - double last_shrink_time = os::elapsedTime(); uint age_period = 0; - // Shrink period avoids constantly polling regions for shrinking. - // Having a period 10x lower than the delay would mean we hit the - // shrinking with lag of less than 1/10-th of true delay. - // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds. - const double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10; - ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy(); // Heuristics are notified of allocation failures here and other outcomes @@ -191,6 +184,9 @@ void ShenandoahGenerationalControlThread::run_service() { assert (!gc_requested || cause != GCCause::_no_gc, "GC cause should be set"); if (gc_requested) { + // Cannot uncommit bitmap slices during concurrent reset + ShenandoahNoUncommitMark forbid_region_uncommit(heap); + // Blow away all soft references on this cycle, if handling allocation failure, // either implicit or explicit GC request, or we are requested to do so unconditionally. if (generation == GLOBAL && (alloc_failure_pending || is_gc_requested || ShenandoahAlwaysClearSoftRefs)) { @@ -303,24 +299,14 @@ void ShenandoahGenerationalControlThread::run_service() { } } - const double current = os::elapsedTime(); - - if (ShenandoahUncommit && (is_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) { - // Explicit GC tries to uncommit everything down to min capacity. - // Soft max change tries to uncommit everything down to target capacity. - // Periodic uncommit tries to uncommit suitable regions down to min capacity. - - double shrink_before = (is_gc_requested || soft_max_changed) ? - current : - current - (ShenandoahUncommitDelay / 1000.0); - - size_t shrink_until = soft_max_changed ? - heap->soft_max_capacity() : - heap->min_capacity(); - - heap->maybe_uncommit(shrink_before, shrink_until); - heap->phase_timings()->flush_cycle_to_global(); - last_shrink_time = current; + // Check if we have seen a new target for soft max heap size or if a gc was requested. + // Either of these conditions will attempt to uncommit regions. + if (ShenandoahUncommit) { + if (heap->check_soft_max_changed()) { + heap->notify_soft_max_changed(); + } else if (is_gc_requested) { + heap->notify_explicit_gc_requested(); + } } // Wait for ShenandoahControlIntervalMax unless there was an allocation failure or another request was made mid-cycle. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 6ef66926b72fa..c1bc9dc661643 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -25,8 +25,10 @@ */ #include "precompiled.hpp" -#include "memory/allocation.hpp" -#include "memory/universe.hpp" + +#include "cds/archiveHeapWriter.hpp" +#include "classfile/systemDictionary.hpp" +#include "code/codeCache.hpp" #include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/fullGCForwarding.hpp" @@ -42,17 +44,16 @@ #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp" #include "gc/shenandoah/shenandoahAllocRequest.hpp" #include "gc/shenandoah/shenandoahBarrierSet.hpp" -#include "gc/shenandoah/shenandoahClosures.inline.hpp" +#include "gc/shenandoah/shenandoahCodeRoots.hpp" #include "gc/shenandoah/shenandoahCollectionSet.hpp" #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahConcurrentMark.hpp" -#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" #include "gc/shenandoah/shenandoahControlThread.hpp" +#include "gc/shenandoah/shenandoahClosures.inline.hpp" #include "gc/shenandoah/shenandoahFreeSet.hpp" #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp" #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" #include "gc/shenandoah/shenandoahGlobalGeneration.hpp" -#include "gc/shenandoah/shenandoahPhaseTimings.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp" #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" @@ -65,13 +66,14 @@ #include "gc/shenandoah/shenandoahPacer.inline.hpp" #include "gc/shenandoah/shenandoahPadding.hpp" #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp" +#include "gc/shenandoah/shenandoahPhaseTimings.hpp" #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" #include "gc/shenandoah/shenandoahSTWMark.hpp" +#include "gc/shenandoah/shenandoahUncommitThread.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" #include "gc/shenandoah/shenandoahVerifier.hpp" -#include "gc/shenandoah/shenandoahCodeRoots.hpp" #include "gc/shenandoah/shenandoahVMOperations.hpp" #include "gc/shenandoah/shenandoahWorkGroup.hpp" #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" @@ -79,17 +81,16 @@ #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp" #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp" #include "gc/shenandoah/mode/shenandoahSATBMode.hpp" -#include "utilities/globalDefinitions.hpp" #if INCLUDE_JFR #include "gc/shenandoah/shenandoahJfrSupport.hpp" #endif -#include "cds/archiveHeapWriter.hpp" -#include "classfile/systemDictionary.hpp" -#include "code/codeCache.hpp" + +#include "memory/allocation.hpp" #include "memory/classLoaderMetaspace.hpp" #include "memory/metaspaceUtils.hpp" +#include "memory/universe.hpp" #include "nmt/mallocTracker.hpp" #include "nmt/memTracker.hpp" #include "oops/compressedOops.inline.hpp" @@ -102,6 +103,7 @@ #include "runtime/safepointMechanism.hpp" #include "runtime/stackWatermarkSet.hpp" #include "runtime/vmThread.hpp" +#include "utilities/globalDefinitions.hpp" #include "utilities/events.hpp" #include "utilities/powerOfTwo.hpp" @@ -459,6 +461,10 @@ jint ShenandoahHeap::initialize() { initialize_controller(); + if (ShenandoahUncommit) { + _uncommit_thread = new ShenandoahUncommitThread(this); + } + print_init_logger(); FullGCForwarding::initialize(_heap_region); @@ -530,6 +536,7 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : _update_refs_iterator(this), _global_generation(nullptr), _control_thread(nullptr), + _uncommit_thread(nullptr), _young_generation(nullptr), _old_generation(nullptr), _shenandoah_policy(policy), @@ -800,60 +807,15 @@ bool ShenandoahHeap::is_in(const void* p) const { } } -void ShenandoahHeap::maybe_uncommit(double shrink_before, size_t shrink_until) { - assert (ShenandoahUncommit, "should be enabled"); - - // Determine if there is work to do. This avoids taking heap lock if there is - // no work available, avoids spamming logs with superfluous logging messages, - // and minimises the amount of work while locks are taken. - - if (committed() <= shrink_until) return; - - bool has_work = false; - for (size_t i = 0; i < num_regions(); i++) { - ShenandoahHeapRegion* r = get_region(i); - if (r->is_empty_committed() && (r->empty_time() < shrink_before)) { - has_work = true; - break; - } - } - - if (has_work) { - static const char* msg = "Concurrent uncommit"; - ShenandoahConcurrentPhase gcPhase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */); - EventMark em("%s", msg); - - op_uncommit(shrink_before, shrink_until); +void ShenandoahHeap::notify_soft_max_changed() { + if (_uncommit_thread != nullptr) { + _uncommit_thread->notify_soft_max_changed(); } } -void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) { - assert (ShenandoahUncommit, "should be enabled"); - - // Application allocates from the beginning of the heap, and GC allocates at - // the end of it. It is more efficient to uncommit from the end, so that applications - // could enjoy the near committed regions. GC allocations are much less frequent, - // and therefore can accept the committing costs. - - size_t count = 0; - for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow - ShenandoahHeapRegion* r = get_region(i - 1); - if (r->is_empty_committed() && (r->empty_time() < shrink_before)) { - ShenandoahHeapLocker locker(lock()); - if (r->is_empty_committed()) { - if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) { - break; - } - - r->make_uncommitted(); - count++; - } - } - SpinPause(); // allow allocators to take the lock - } - - if (count > 0) { - notify_heap_changed(); +void ShenandoahHeap::notify_explicit_gc_requested() { + if (_uncommit_thread != nullptr) { + _uncommit_thread->notify_explicit_gc_requested(); } } @@ -1507,6 +1469,10 @@ void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const { tcl->do_thread(_control_thread); } + if (_uncommit_thread != nullptr) { + tcl->do_thread(_uncommit_thread); + } + workers()->threads_do(tcl); if (_safepoint_workers != nullptr) { _safepoint_workers->threads_do(tcl); @@ -2094,6 +2060,11 @@ void ShenandoahHeap::stop() { // Step 3. Wait until GC worker exits normally. control_thread()->stop(); + + // Stop 4. Shutdown uncommit thread. + if (_uncommit_thread != nullptr) { + _uncommit_thread->stop(); + } } void ShenandoahHeap::stw_unload_classes(bool full_gc) { @@ -2521,7 +2492,7 @@ bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) { if (is_bitmap_slice_committed(r, true)) { // Some other region from the group is still committed, meaning the bitmap - // slice is should stay committed, exit right away. + // slice should stay committed, exit right away. return true; } @@ -2535,6 +2506,27 @@ bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) { return true; } +void ShenandoahHeap::forbid_uncommit() { + if (_uncommit_thread != nullptr) { + _uncommit_thread->forbid_uncommit(); + } +} + +void ShenandoahHeap::allow_uncommit() { + if (_uncommit_thread != nullptr) { + _uncommit_thread->allow_uncommit(); + } +} + +#ifdef ASSERT +bool ShenandoahHeap::is_uncommit_in_progress() { + if (_uncommit_thread != nullptr) { + return _uncommit_thread->is_uncommit_in_progress(); + } + return false; +} +#endif + void ShenandoahHeap::safepoint_synchronize_begin() { StackWatermarkSet::safepoint_synchronize_begin(); SuspendibleThreadSet::synchronize(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp index a9a793f9e605d..5f957b734104d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp @@ -70,6 +70,7 @@ class ShenandoahFullGC; class ShenandoahMonitoringSupport; class ShenandoahPacer; class ShenandoahReferenceProcessor; +class ShenandoahUncommitThread; class ShenandoahVerifier; class ShenandoahWorkerThreads; class VMStructs; @@ -252,11 +253,14 @@ class ShenandoahHeap : public CollectedHeap { // ---------- Periodic Tasks // -private: +public: + // Notify heuristics and region state change logger that the state of the heap has changed void notify_heap_changed(); -public: + // Force counters to update void set_forced_counters_update(bool value); + + // Update counters if forced flag is set void handle_force_counters_update(); // ---------- Workers handling @@ -440,11 +444,6 @@ class ShenandoahHeap : public CollectedHeap { void cancel_gc(GCCause::Cause cause); public: - // These will uncommit empty regions if heap::committed > shrink_until - // and there exists at least one region which was made empty before shrink_before. - void maybe_uncommit(double shrink_before, size_t shrink_until); - void op_uncommit(double shrink_before, size_t shrink_until); - // Returns true if the soft maximum heap has been changed using management APIs. bool check_soft_max_changed(); @@ -478,14 +477,22 @@ class ShenandoahHeap : public CollectedHeap { void notify_gc_no_progress(); size_t get_gc_no_progress_count() const; -// -// Mark support + // The uncommit thread targets soft max heap, notify this thread when that value has changed. + void notify_soft_max_changed(); + + // An explicit GC request may have freed regions, notify the uncommit thread. + void notify_explicit_gc_requested(); + private: ShenandoahGeneration* _global_generation; protected: + // The control thread presides over concurrent collection cycles ShenandoahController* _control_thread; + // The uncommit thread periodically attempts to uncommit regions that have been empty for longer than ShenandoahUncommitDelay + ShenandoahUncommitThread* _uncommit_thread; + ShenandoahYoungGeneration* _young_generation; ShenandoahOldGeneration* _old_generation; @@ -500,7 +507,7 @@ class ShenandoahHeap : public CollectedHeap { ShenandoahMmuTracker _mmu_tracker; public: - ShenandoahController* control_thread() { return _control_thread; } + ShenandoahController* control_thread() const { return _control_thread; } ShenandoahGeneration* global_generation() const { return _global_generation; } ShenandoahYoungGeneration* young_generation() const { @@ -726,6 +733,20 @@ class ShenandoahHeap : public CollectedHeap { bool uncommit_bitmap_slice(ShenandoahHeapRegion *r); bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false); + // During concurrent reset, the control thread will zero out the mark bitmaps for committed regions. + // This cannot happen when the uncommit thread is simultaneously trying to uncommit regions and their bitmaps. + // To prevent these threads from working at the same time, we provide these methods for the control thread to + // prevent the uncommit thread from working while a collection cycle is in progress. + + // Forbid uncommits (will stop and wait if regions are being uncommitted) + void forbid_uncommit(); + + // Allow the uncommit thread to process regions + void allow_uncommit(); +#ifdef ASSERT + bool is_uncommit_in_progress(); +#endif + // Liveness caching support ShenandoahLiveData* get_liveness_cache(uint worker_id); void flush_liveness_cache(uint worker_id); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp index 05ab60c0bb66b..4c8cb8c20570d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp @@ -189,7 +189,6 @@ class outputStream; f(full_gc_reconstruct_remembered_set, " Reconstruct Remembered Set") \ f(full_gc_heapdump_post, " Post Heap Dump") \ \ - f(conc_uncommit, "Concurrent Uncommit") \ f(pacing, "Pacing") \ \ f(heap_iteration_roots, "Heap Iteration") \ diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUncommitThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahUncommitThread.cpp new file mode 100644 index 0000000000000..85bb3349d5c97 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahUncommitThread.cpp @@ -0,0 +1,198 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahUncommitThread.hpp" +#include "logging/log.hpp" +#include "runtime/mutexLocker.hpp" +#include "utilities/events.hpp" + +ShenandoahUncommitThread::ShenandoahUncommitThread(ShenandoahHeap* heap) + : _heap(heap), + _stop_lock(Mutex::safepoint - 2, "ShenandoahUncommitStop_lock", true), + _uncommit_lock(Mutex::safepoint - 2, "ShenandoahUncommitCancel_lock", true) { + set_name("Shenandoah Uncommit Thread"); + create_and_start(); + + // Allow uncommits. This is managed by the control thread during a GC. + _uncommit_allowed.set(); +} + +void ShenandoahUncommitThread::run_service() { + assert(ShenandoahUncommit, "Thread should only run when uncommit is enabled"); + + // poll_interval avoids constantly polling regions for shrinking. + // Having an interval 10x lower than the delay would mean we hit the + // shrinking with lag of less than 1/10-th of true delay. + // ShenandoahUncommitDelay is in millis, but shrink_period is in seconds. + const int64_t poll_interval = int64_t(ShenandoahUncommitDelay) / 10; + const double shrink_period = double(ShenandoahUncommitDelay) / 1000; + bool timed_out = false; + while (!should_terminate()) { + bool soft_max_changed = _soft_max_changed.try_unset(); + bool explicit_gc_requested = _explicit_gc_requested.try_unset(); + + if (soft_max_changed || explicit_gc_requested || timed_out) { + double current = os::elapsedTime(); + size_t shrink_until = soft_max_changed ? _heap->soft_max_capacity() : _heap->min_capacity(); + double shrink_before = (soft_max_changed || explicit_gc_requested) ? + current : + current - shrink_period; + + // Explicit GC tries to uncommit everything down to min capacity. + // Soft max change tries to uncommit everything down to target capacity. + // Periodic uncommit tries to uncommit suitable regions down to min capacity. + if (should_uncommit(shrink_before, shrink_until)) { + uncommit(shrink_before, shrink_until); + } + } + { + MonitorLocker locker(&_stop_lock, Mutex::_no_safepoint_check_flag); + if (!_stop_requested.is_set()) { + timed_out = locker.wait(poll_interval); + } + } + } +} + +bool ShenandoahUncommitThread::should_uncommit(double shrink_before, size_t shrink_until) const { + // Only start uncommit if the GC is idle, is not trying to run and there is work to do. + return _heap->is_idle() && is_uncommit_allowed() && has_work(shrink_before, shrink_until); +} + +bool ShenandoahUncommitThread::has_work(double shrink_before, size_t shrink_until) const { + // Determine if there is work to do. This avoids locking the heap if there is + // no work available, avoids spamming logs with superfluous logging messages, + // and minimises the amount of work while locks are held. + + if (_heap->committed() <= shrink_until) { + return false; + } + + for (size_t i = 0; i < _heap->num_regions(); i++) { + ShenandoahHeapRegion *r = _heap->get_region(i); + if (r->is_empty_committed() && (r->empty_time() < shrink_before)) { + return true; + } + } + + return false; +} + +void ShenandoahUncommitThread::notify_soft_max_changed() { + assert(is_uncommit_allowed(), "Only notify if uncommit is allowed"); + if (_soft_max_changed.try_set()) { + MonitorLocker locker(&_stop_lock, Mutex::_no_safepoint_check_flag); + locker.notify_all(); + } +} + +void ShenandoahUncommitThread::notify_explicit_gc_requested() { + assert(is_uncommit_allowed(), "Only notify if uncommit is allowed"); + if (_explicit_gc_requested.try_set()) { + MonitorLocker locker(&_stop_lock, Mutex::_no_safepoint_check_flag); + locker.notify_all(); + } +} + +bool ShenandoahUncommitThread::is_uncommit_allowed() const { + return _uncommit_allowed.is_set(); +} + +void ShenandoahUncommitThread::uncommit(double shrink_before, size_t shrink_until) { + assert(ShenandoahUncommit, "should be enabled"); + assert(_uncommit_in_progress.is_unset(), "Uncommit should not be in progress"); + + if (!is_uncommit_allowed()) { + return; + } + + const char* msg = "Concurrent uncommit"; + EventMark em("%s", msg); + double start = os::elapsedTime(); + log_info(gc, start)("%s", msg); + + _uncommit_in_progress.set(); + + // Application allocates from the beginning of the heap, and GC allocates at + // the end of it. It is more efficient to uncommit from the end, so that applications + // could enjoy the near committed regions. GC allocations are much less frequent, + // and therefore can accept the committing costs. + size_t count = 0; + for (size_t i = _heap->num_regions(); i > 0; i--) { + if (!is_uncommit_allowed()) { + break; + } + + ShenandoahHeapRegion* r = _heap->get_region(i - 1); + if (r->is_empty_committed() && (r->empty_time() < shrink_before)) { + SuspendibleThreadSetJoiner sts_joiner; + ShenandoahHeapLocker locker(_heap->lock()); + if (r->is_empty_committed()) { + if (_heap->committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) { + break; + } + + r->make_uncommitted(); + count++; + } + } + SpinPause(); // allow allocators to take the lock + } + + { + MonitorLocker locker(&_uncommit_lock, Mutex::_no_safepoint_check_flag); + _uncommit_in_progress.unset(); + locker.notify_all(); + } + + if (count > 0) { + _heap->notify_heap_changed(); + } + + double elapsed = os::elapsedTime() - start; + log_info(gc)("%s " PROPERFMT " (" PROPERFMT ") %.3fms", + msg, PROPERFMTARGS(count * ShenandoahHeapRegion::region_size_bytes()), PROPERFMTARGS(_heap->capacity()), + elapsed * MILLIUNITS); +} + +void ShenandoahUncommitThread::stop_service() { + MonitorLocker locker(&_stop_lock, Mutex::_safepoint_check_flag); + _stop_requested.set(); + locker.notify_all(); +} + +void ShenandoahUncommitThread::forbid_uncommit() { + MonitorLocker locker(&_uncommit_lock, Mutex::_no_safepoint_check_flag); + _uncommit_allowed.unset(); + while (_uncommit_in_progress.is_set()) { + locker.wait(); + } +} + +void ShenandoahUncommitThread::allow_uncommit() { + _uncommit_allowed.set(); +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUncommitThread.hpp b/src/hotspot/share/gc/shenandoah/shenandoahUncommitThread.hpp new file mode 100644 index 0000000000000..6c4e26e4e0fd8 --- /dev/null +++ b/src/hotspot/share/gc/shenandoah/shenandoahUncommitThread.hpp @@ -0,0 +1,97 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHUNCOMMITTHREAD +#define SHARE_GC_SHENANDOAH_SHENANDOAHUNCOMMITTHREAD + +#include "gc/shared/concurrentGCThread.hpp" + +class ShenandoahHeap; + +class ShenandoahUncommitThread : public ConcurrentGCThread { + ShenandoahHeap* const _heap; + + // Indicates that `SoftMaxHeapSize` has changed + ShenandoahSharedFlag _soft_max_changed; + + // Indicates that an explicit gc has been requested + ShenandoahSharedFlag _explicit_gc_requested; + + // Indicates that the thread should stop and terminate + ShenandoahSharedFlag _stop_requested; + + // Indicates whether it is safe to uncommit regions + ShenandoahSharedFlag _uncommit_allowed; + + // Indicates that regions are being actively uncommitted + ShenandoahSharedFlag _uncommit_in_progress; + + // This lock is used to coordinate stopping and terminating this thread + Monitor _stop_lock; + + // This lock is used to coordinate allowing or forbidding regions to be uncommitted + Monitor _uncommit_lock; + + // True if there are regions to uncommit and uncommits are allowed + bool should_uncommit(double shrink_before, size_t shrink_until) const; + + // True if there are regions that have been empty for longer than ShenandoahUncommitDelay and the committed + // memory is higher than soft max capacity or minimum capacity + bool has_work(double shrink_before, size_t shrink_until) const; + + // Perform the work of uncommitting empty regions + void uncommit(double shrink_before, size_t shrink_until); + + // True if the control thread has allowed this thread to uncommit regions + bool is_uncommit_allowed() const; + +public: + explicit ShenandoahUncommitThread(ShenandoahHeap* heap); + + // Periodically check for regions to uncommit + void run_service() override; + + // Wake up this thread and try to uncommit for changed soft max size + void notify_soft_max_changed(); + + // Wake up this thread and try to uncommit for min heap size + void notify_explicit_gc_requested(); + + // Wait for uncommit operations to stop, returns immediately if uncommit thread is idle + void forbid_uncommit(); + + // Allows uncommit operations to happen, does not block + void allow_uncommit(); + + // True if uncommit is in progress + bool is_uncommit_in_progress() { + return _uncommit_in_progress.is_set(); + } +protected: + // Interrupt and stop this thread + void stop_service() override; +}; + + +#endif //SHARE_GC_SHENANDOAH_SHENANDOAHUNCOMMITTHREAD diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp b/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp index 190822af9d6bb..fd30279d318a2 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp @@ -242,4 +242,19 @@ class ShenandoahSuspendibleThreadSetLeaver { } }; +// Regions cannot be uncommitted when concurrent reset is zeroing out the bitmaps. +// This CADR class enforces this by forbidding region uncommits while it is in scope. +class ShenandoahNoUncommitMark : public StackObj { + ShenandoahHeap* const _heap; +public: + explicit ShenandoahNoUncommitMark(ShenandoahHeap* heap) : _heap(heap) { + _heap->forbid_uncommit(); + } + + ~ShenandoahNoUncommitMark() { + _heap->allow_uncommit(); + } +}; + + #endif // SHARE_GC_SHENANDOAH_SHENANDOAHUTILS_HPP