diff --git a/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp b/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp index fa7887145df..0c223ee3128 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp @@ -36,6 +36,7 @@ class ShenandoahBarrierSet; class ShenandoahHeap; class ShenandoahMarkingContext; class ShenandoahReferenceProcessor; +class SATBMarkQueueSet; // // ========= Super @@ -55,6 +56,14 @@ public: // ========= Marking // +class ShenandoahFlushSATBHandshakeClosure : public HandshakeClosure { +private: + SATBMarkQueueSet& _qset; +public: + inline explicit ShenandoahFlushSATBHandshakeClosure(SATBMarkQueueSet& qset); + inline void do_thread(Thread* thread) override; +}; + class ShenandoahMarkRefsSuperClosure : public ShenandoahSuperClosure { private: ShenandoahObjToScanQueue* _queue; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp index a9c6a3395eb..c08c9501201 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp @@ -59,6 +59,13 @@ void ShenandoahSuperClosure::do_nmethod(nmethod* nm) { // // ========= Marking // +ShenandoahFlushSATBHandshakeClosure::ShenandoahFlushSATBHandshakeClosure(SATBMarkQueueSet& qset) : + HandshakeClosure("Shenandoah Flush SATB"), + _qset(qset) {} + +void ShenandoahFlushSATBHandshakeClosure::do_thread(Thread* thread) { + _qset.flush_queue(ShenandoahThreadLocalData::satb_mark_queue(thread)); +} ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp, diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index 29fd4002578..c2d33c396da 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -102,6 +102,16 @@ ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const { return _degen_point; } +void ShenandoahConcurrentGC::entry_concurrent_update_refs_prepare(ShenandoahHeap* const heap) { + TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); + const char* msg = conc_init_update_refs_event_message(); + ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs_prepare); + EventMark em("%s", msg); + + // Evacuation is complete, retire gc labs and change gc state + heap->concurrent_prepare_for_update_refs(); +} + bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { ShenandoahHeap* const heap = ShenandoahHeap::heap(); @@ -192,8 +202,7 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { return false; } - // Evacuation is complete, retire gc labs - heap->concurrent_prepare_for_update_refs(); + entry_concurrent_update_refs_prepare(heap); // Perform update-refs phase. if (ShenandoahVerify || ShenandoahPacing) { @@ -216,24 +225,14 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { // Update references freed up collection set, kick the cleanup to reclaim the space. entry_cleanup_complete(); } else { - // We chose not to evacuate because we found sufficient immediate garbage. - // However, there may still be regions to promote in place, so do that now. - if (has_in_place_promotions(heap)) { - entry_promote_in_place(); - - // If the promote-in-place operation was cancelled, we can have the degenerated - // cycle complete the operation. It will see that no evacuations are in progress, - // and that there are regions wanting promotion. The risk with not handling the - // cancellation would be failing to restore top for these regions and leaving - // them unable to serve allocations for the old generation. - if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) { - return false; - } + if (!entry_final_roots()) { + assert(_degen_point != _degenerated_unset, "Need to know where to start degenerated cycle"); + return false; } - // At this point, the cycle is effectively complete. If the cycle has been cancelled here, - // the control thread will detect it on its next iteration and run a degenerated young cycle. - vmop_entry_final_roots(); + if (VerifyAfterGC) { + vmop_entry_verify_final_roots(); + } _abbreviated = true; } @@ -251,6 +250,52 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { return true; } +bool ShenandoahConcurrentGC::complete_abbreviated_cycle() { + shenandoah_assert_generational(); + + ShenandoahGenerationalHeap* const heap = ShenandoahGenerationalHeap::heap(); + + // We chose not to evacuate because we found sufficient immediate garbage. + // However, there may still be regions to promote in place, so do that now. + if (heap->old_generation()->has_in_place_promotions()) { + entry_promote_in_place(); + + // If the promote-in-place operation was cancelled, we can have the degenerated + // cycle complete the operation. It will see that no evacuations are in progress, + // and that there are regions wanting promotion. The risk with not handling the + // cancellation would be failing to restore top for these regions and leaving + // them unable to serve allocations for the old generation.This will leave the weak + // roots flag set (the degenerated cycle will unset it). + if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) { + return false; + } + } + + // At this point, the cycle is effectively complete. If the cycle has been cancelled here, + // the control thread will detect it on its next iteration and run a degenerated young cycle. + if (!_generation->is_old()) { + heap->update_region_ages(_generation->complete_marking_context()); + } + + if (!heap->is_concurrent_old_mark_in_progress()) { + heap->concurrent_final_roots(); + } else { + // Since the cycle was shortened for having enough immediate garbage, this will be + // the last phase before concurrent marking of old resumes. We must be sure + // that old mark threads don't see any pointers to garbage in the SATB queues. Even + // though nothing was evacuated, overwriting unreachable weak roots with null may still + // put pointers to regions that become trash in the SATB queues. The following will + // piggyback flushing the thread local SATB queues on the same handshake that propagates + // the gc state change. + ShenandoahSATBMarkQueueSet& satb_queues = ShenandoahBarrierSet::satb_mark_queue_set(); + ShenandoahFlushSATBHandshakeClosure complete_thread_local_satb_buffers(satb_queues); + heap->concurrent_final_roots(&complete_thread_local_satb_buffers); + heap->old_generation()->concurrent_transfer_pointers_from_satb(); + } + return true; +} + + void ShenandoahConcurrentGC::vmop_entry_init_mark() { ShenandoahHeap* const heap = ShenandoahHeap::heap(); TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); @@ -291,7 +336,7 @@ void ShenandoahConcurrentGC::vmop_entry_final_update_refs() { VMThread::execute(&op); } -void ShenandoahConcurrentGC::vmop_entry_final_roots() { +void ShenandoahConcurrentGC::vmop_entry_verify_final_roots() { ShenandoahHeap* const heap = ShenandoahHeap::heap(); TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters()); ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_gross); @@ -347,12 +392,12 @@ void ShenandoahConcurrentGC::entry_final_update_refs() { op_final_update_refs(); } -void ShenandoahConcurrentGC::entry_final_roots() { - const char* msg = final_roots_event_message(); +void ShenandoahConcurrentGC::entry_verify_final_roots() { + const char* msg = verify_final_roots_event_message(); ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_roots); EventMark em("%s", msg); - op_final_roots(); + op_verify_final_roots(); } void ShenandoahConcurrentGC::entry_reset() { @@ -526,19 +571,12 @@ void ShenandoahConcurrentGC::entry_evacuate() { op_evacuate(); } -void ShenandoahConcurrentGC::entry_promote_in_place() { +void ShenandoahConcurrentGC::entry_promote_in_place() const { shenandoah_assert_generational(); - ShenandoahHeap* const heap = ShenandoahHeap::heap(); - TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); - - static const char* msg = "Promote in place"; - ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::promote_in_place); - EventMark em("%s", msg); - - ShenandoahWorkerScope scope(heap->workers(), - ShenandoahWorkerPolicy::calc_workers_for_conc_evac(), - "promote in place"); + ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::promote_in_place); + ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::promote_in_place); + EventMark em("%s", "Promote in place"); ShenandoahGenerationalHeap::heap()->promote_regions_in_place(true); } @@ -663,6 +701,7 @@ void ShenandoahConcurrentGC::op_init_mark() { } if (ShenandoahVerify) { + ShenandoahTimingsTracker v(ShenandoahPhaseTimings::init_mark_verify); heap->verifier()->verify_before_concmark(); } @@ -751,6 +790,7 @@ void ShenandoahConcurrentGC::op_final_mark() { } if (ShenandoahVerify) { + ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_mark_verify); heap->verifier()->verify_before_evacuation(); } @@ -767,6 +807,7 @@ void ShenandoahConcurrentGC::op_final_mark() { } } else { if (ShenandoahVerify) { + ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_mark_verify); if (has_in_place_promotions(heap)) { heap->verifier()->verify_after_concmark_with_promotions(); } else { @@ -1088,6 +1129,7 @@ void ShenandoahConcurrentGC::op_evacuate() { void ShenandoahConcurrentGC::op_init_update_refs() { ShenandoahHeap* const heap = ShenandoahHeap::heap(); if (ShenandoahVerify) { + ShenandoahTimingsTracker v(ShenandoahPhaseTimings::init_update_refs_verify); heap->verifier()->verify_before_update_refs(); } if (ShenandoahPacing) { @@ -1175,6 +1217,7 @@ void ShenandoahConcurrentGC::op_final_update_refs() { } if (ShenandoahVerify) { + ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_update_refs_verify); heap->verifier()->verify_after_update_refs(); } @@ -1190,33 +1233,32 @@ void ShenandoahConcurrentGC::op_final_update_refs() { } } -void ShenandoahConcurrentGC::op_final_roots() { +bool ShenandoahConcurrentGC::entry_final_roots() { + ShenandoahHeap* const heap = ShenandoahHeap::heap(); + TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); - ShenandoahHeap *heap = ShenandoahHeap::heap(); - heap->set_concurrent_weak_root_in_progress(false); - heap->set_evacuation_in_progress(false); - if (heap->mode()->is_generational()) { - // If the cycle was shortened for having enough immediate garbage, this could be - // the last GC safepoint before concurrent marking of old resumes. We must be sure - // that old mark threads don't see any pointers to garbage in the SATB buffers. - if (heap->is_concurrent_old_mark_in_progress()) { - heap->old_generation()->transfer_pointers_from_satb(); - } + const char* msg = conc_final_roots_event_message(); + ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_final_roots); + EventMark em("%s", msg); + ShenandoahWorkerScope scope(heap->workers(), + ShenandoahWorkerPolicy::calc_workers_for_conc_evac(), + msg); - if (!_generation->is_old()) { - ShenandoahGenerationalHeap::heap()->update_region_ages(_generation->complete_marking_context()); + if (!heap->mode()->is_generational()) { + heap->concurrent_final_roots(); + } else { + if (!complete_abbreviated_cycle()) { + return false; } } + return true; +} +void ShenandoahConcurrentGC::op_verify_final_roots() { if (VerifyAfterGC) { Universe::verify(); } - - { - ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_roots_propagate_gc_state); - heap->propagate_gc_state_to_all_threads(); - } } void ShenandoahConcurrentGC::op_cleanup_complete() { @@ -1301,11 +1343,19 @@ const char* ShenandoahConcurrentGC::conc_reset_after_collect_event_message() con } } -const char* ShenandoahConcurrentGC::final_roots_event_message() const { +const char* ShenandoahConcurrentGC::verify_final_roots_event_message() const { if (ShenandoahHeap::heap()->unload_classes()) { - SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Roots", " (unload classes)"); + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Verify Final Roots", " (unload classes)"); } else { - SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Final Roots", ""); + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Pause Verify Final Roots", ""); + } +} + +const char* ShenandoahConcurrentGC::conc_final_roots_event_message() const { + if (ShenandoahHeap::heap()->unload_classes()) { + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Final Roots", " (unload classes)"); + } else { + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Final Roots", ""); } } @@ -1332,3 +1382,11 @@ const char* ShenandoahConcurrentGC::conc_cleanup_event_message() const { SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent cleanup", ""); } } + +const char* ShenandoahConcurrentGC::conc_init_update_refs_event_message() const { + if (ShenandoahHeap::heap()->unload_classes()) { + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Init Update Refs", " (unload classes)"); + } else { + SHENANDOAH_RETURN_EVENT_MESSAGE(_generation->type(), "Concurrent Init Update Refs", ""); + } +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp index 0b2e42fb6c6..d81c49363a2 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp @@ -56,9 +56,12 @@ private: public: ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap); + bool collect(GCCause::Cause cause) override; ShenandoahDegenPoint degen_point() const; + void entry_concurrent_update_refs_prepare(ShenandoahHeap* heap); + // Return true if this cycle found enough immediate garbage to skip evacuation bool abbreviated() const { return _abbreviated; } @@ -69,7 +72,7 @@ protected: void vmop_entry_final_mark(); void vmop_entry_init_update_refs(); void vmop_entry_final_update_refs(); - void vmop_entry_final_roots(); + void vmop_entry_verify_final_roots(); // Entry methods to normally STW GC operations. These set up logging, monitoring // and workers for next VM operation @@ -77,7 +80,7 @@ protected: void entry_final_mark(); void entry_init_update_refs(); void entry_final_update_refs(); - void entry_final_roots(); + void entry_verify_final_roots(); // Entry methods to normally concurrent GC operations. These set up logging, monitoring // for concurrent operation. @@ -96,8 +99,11 @@ protected: void entry_update_refs(); void entry_cleanup_complete(); + // This is the last phase of a cycle which performs no evacuations + bool entry_final_roots(); + // Called when the collection set is empty, but the generational mode has regions to promote in place - void entry_promote_in_place(); + void entry_promote_in_place() const; // Actual work for the phases void op_reset(); @@ -116,7 +122,8 @@ protected: void op_update_refs(); void op_update_thread_roots(); void op_final_update_refs(); - void op_final_roots(); + + void op_verify_final_roots(); void op_cleanup_complete(); void op_reset_after_collect(); @@ -129,19 +136,23 @@ protected: private: void start_mark(); - static bool has_in_place_promotions(ShenandoahHeap* heap) ; + bool complete_abbreviated_cycle(); + + static bool has_in_place_promotions(ShenandoahHeap* heap); // Messages for GC trace events, they have to be immortal for // passing around the logging/tracing systems const char* init_mark_event_message() const; const char* final_mark_event_message() const; - const char* final_roots_event_message() const; + const char* verify_final_roots_event_message() const; + const char* conc_final_roots_event_message() const; const char* conc_mark_event_message() const; const char* conc_reset_event_message() const; const char* conc_reset_after_collect_event_message() const; const char* conc_weak_refs_event_message() const; const char* conc_weak_roots_event_message() const; const char* conc_cleanup_event_message() const; + const char* conc_init_update_refs_event_message() const; }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHCONCURRENTGC_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp index 1feb19f6e4a..4a0f43226d7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp @@ -212,19 +212,6 @@ void ShenandoahConcurrentMark::mark_concurrent_roots() { } } -class ShenandoahFlushSATBHandshakeClosure : public HandshakeClosure { -private: - SATBMarkQueueSet& _qset; -public: - ShenandoahFlushSATBHandshakeClosure(SATBMarkQueueSet& qset) : - HandshakeClosure("Shenandoah Flush SATB"), - _qset(qset) {} - - void do_thread(Thread* thread) { - _qset.flush_queue(ShenandoahThreadLocalData::satb_mark_queue(thread)); - } -}; - void ShenandoahConcurrentMark::concurrent_mark() { ShenandoahHeap* const heap = ShenandoahHeap::heap(); WorkerThreads* workers = heap->workers(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp index 436ba154e5a..8cc4e2de8ea 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp @@ -409,7 +409,7 @@ void ShenandoahDegenGC::op_evacuate() { void ShenandoahDegenGC::op_init_update_refs() { // Evacuation has completed ShenandoahHeap* const heap = ShenandoahHeap::heap(); - heap->prepare_update_heap_references(false /*concurrent*/); + heap->prepare_update_heap_references(); heap->set_update_refs_in_progress(true); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index ad09aecbe68..c506c418d6d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1247,9 +1247,11 @@ public: } }; -class ShenandoahGCStatePropagator : public ThreadClosure { +class ShenandoahGCStatePropagator : public HandshakeClosure { public: - explicit ShenandoahGCStatePropagator(char gc_state) : _gc_state(gc_state) {} + explicit ShenandoahGCStatePropagator(char gc_state) : + HandshakeClosure("Shenandoah GC State Change"), + _gc_state(gc_state) {} void do_thread(Thread* thread) override { ShenandoahThreadLocalData::set_gc_state(thread, _gc_state); @@ -1306,6 +1308,37 @@ void ShenandoahHeap::concurrent_prepare_for_update_refs() { _update_refs_iterator.reset(); } +class ShenandoahCompositeHandshakeClosure : public HandshakeClosure { + HandshakeClosure* _handshake_1; + HandshakeClosure* _handshake_2; + public: + ShenandoahCompositeHandshakeClosure(HandshakeClosure* handshake_1, HandshakeClosure* handshake_2) : + HandshakeClosure(handshake_2->name()), + _handshake_1(handshake_1), _handshake_2(handshake_2) {} + + void do_thread(Thread* thread) override { + _handshake_1->do_thread(thread); + _handshake_2->do_thread(thread); + } +}; + +void ShenandoahHeap::concurrent_final_roots(HandshakeClosure* handshake_closure) { + { + assert(!is_evacuation_in_progress(), "Should not evacuate for abbreviated or old cycles"); + MutexLocker lock(Threads_lock); + set_gc_state_concurrent(WEAK_ROOTS, false); + } + + ShenandoahGCStatePropagator propagator(_gc_state.raw_value()); + Threads::non_java_threads_do(&propagator); + if (handshake_closure == nullptr) { + Handshake::execute(&propagator); + } else { + ShenandoahCompositeHandshakeClosure composite(&propagator, handshake_closure); + Handshake::execute(&composite); + } +} + oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { assert(thread == Thread::current(), "Expected thread parameter to be current thread."); if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) { @@ -2019,16 +2052,14 @@ void ShenandoahHeap::stw_weak_refs(bool full_gc) { gc_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */); } -void ShenandoahHeap::prepare_update_heap_references(bool concurrent) { +void ShenandoahHeap::prepare_update_heap_references() { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to // make them parsable for update code to work correctly. Plus, we can compute new sizes // for future GCLABs here. if (UseTLAB) { - ShenandoahGCPhase phase(concurrent ? - ShenandoahPhaseTimings::init_update_refs_manage_gclabs : - ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs); + ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs); gclabs_retire(ResizeTLAB); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp index 33d2db0b2f1..d9508beac20 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp @@ -481,11 +481,14 @@ private: // Concurrent class unloading support void do_class_unloading(); // Reference updating - void prepare_update_heap_references(bool concurrent); + void prepare_update_heap_references(); // Retires LABs used for evacuation void concurrent_prepare_for_update_refs(); + // Turn off weak roots flag, purge old satb buffers in generational mode + void concurrent_final_roots(HandshakeClosure* handshake_closure = nullptr); + virtual void update_heap_references(bool concurrent); // Final update region states void update_heap_region_states(bool concurrent); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp index 2f5abf99200..7c28378bf24 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp @@ -142,7 +142,7 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) { // return from here with weak roots in progress. This is not a valid gc state // for any young collections (or allocation failures) that interrupt the old // collection. - vmop_entry_final_roots(); + heap->concurrent_final_roots(); // We do not rebuild_free following increments of old marking because memory has not been reclaimed. However, we may // need to transfer memory to OLD in order to efficiently support the mixed evacuations that might immediately follow. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp index 9b030905b6d..b0c42c7b40f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp @@ -23,7 +23,6 @@ * */ -#include "gc/shared/strongRootsScope.hpp" #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp" #include "gc/shenandoah/shenandoahAsserts.hpp" #include "gc/shenandoah/shenandoahCardTable.hpp" @@ -94,6 +93,8 @@ public: class ShenandoahPurgeSATBTask : public WorkerTask { private: ShenandoahObjToScanQueueSet* _mark_queues; + // Keep track of the number of oops that are not transferred to mark queues. + // This is volatile because workers update it, but the vm thread reads it. volatile size_t _trashed_oops; public: @@ -124,6 +125,35 @@ public: } }; +class ShenandoahTransferOldSATBTask : public WorkerTask { + ShenandoahSATBMarkQueueSet& _satb_queues; + ShenandoahObjToScanQueueSet* _mark_queues; + // Keep track of the number of oops that are not transferred to mark queues. + // This is volatile because workers update it, but the control thread reads it. + volatile size_t _trashed_oops; + +public: + explicit ShenandoahTransferOldSATBTask(ShenandoahSATBMarkQueueSet& satb_queues, ShenandoahObjToScanQueueSet* mark_queues) : + WorkerTask("Transfer SATB"), + _satb_queues(satb_queues), + _mark_queues(mark_queues), + _trashed_oops(0) {} + + ~ShenandoahTransferOldSATBTask() { + if (_trashed_oops > 0) { + log_debug(gc)("Purged %zu oops from old generation SATB buffers", _trashed_oops); + } + } + + void work(uint worker_id) override { + ShenandoahObjToScanQueue* mark_queue = _mark_queues->queue(worker_id); + ShenandoahProcessOldSATB processor(mark_queue); + while (_satb_queues.apply_closure_to_completed_buffer(&processor)) {} + + Atomic::add(&_trashed_oops, processor.trashed_oops()); + } +}; + class ShenandoahConcurrentCoalesceAndFillTask : public WorkerTask { private: uint _nworkers; @@ -423,14 +453,25 @@ bool ShenandoahOldGeneration::coalesce_and_fill() { } } -void ShenandoahOldGeneration::transfer_pointers_from_satb() { - ShenandoahHeap* heap = ShenandoahHeap::heap(); - shenandoah_assert_safepoint(); +void ShenandoahOldGeneration::concurrent_transfer_pointers_from_satb() const { + const ShenandoahHeap* heap = ShenandoahHeap::heap(); assert(heap->is_concurrent_old_mark_in_progress(), "Only necessary during old marking."); log_debug(gc)("Transfer SATB buffers"); - uint nworkers = heap->workers()->active_workers(); - StrongRootsScope scope(nworkers); + // Step 1. All threads need to 'complete' partially filled, thread local SATB buffers. This + // is accomplished in ShenandoahConcurrentGC::complete_abbreviated_cycle using a Handshake + // operation. + // Step 2. Use worker threads to transfer oops from old, active regions in the completed + // SATB buffers to old generation mark queues. + ShenandoahSATBMarkQueueSet& satb_queues = ShenandoahBarrierSet::satb_mark_queue_set(); + ShenandoahTransferOldSATBTask transfer_task(satb_queues, task_queues()); + heap->workers()->run_task(&transfer_task); +} + +void ShenandoahOldGeneration::transfer_pointers_from_satb() const { + const ShenandoahHeap* heap = ShenandoahHeap::heap(); + assert(heap->is_concurrent_old_mark_in_progress(), "Only necessary during old marking."); + log_debug(gc)("Transfer SATB buffers"); ShenandoahPurgeSATBTask purge_satb_task(task_queues()); heap->workers()->run_task(&purge_satb_task); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp index ed12650b8ce..b70a8d33b95 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp @@ -232,7 +232,8 @@ public: // Alternatively, we could inspect the state of the heap and the age of the // object at the barrier, but we reject this approach because it is likely // the performance impact would be too severe. - void transfer_pointers_from_satb(); + void transfer_pointers_from_satb() const; + void concurrent_transfer_pointers_from_satb() const; // True if there are old regions waiting to be selected for a mixed collection bool has_unprocessed_collection_candidates(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp index b25edfd3cb7..e16275b480a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp @@ -121,6 +121,7 @@ bool ShenandoahPhaseTimings::is_worker_phase(Phase phase) { case conc_weak_refs: case conc_strong_roots: case conc_coalesce_and_fill: + case promote_in_place: return true; default: return false; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp index 20c59cce69b..9100ad2b220 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp @@ -54,6 +54,7 @@ class outputStream; f(conc_reset_old, "Concurrent Reset (OLD)") \ f(init_mark_gross, "Pause Init Mark (G)") \ f(init_mark, "Pause Init Mark (N)") \ + f(init_mark_verify, " Verify") \ f(init_manage_tlabs, " Manage TLABs") \ f(init_swap_rset, " Swap Remembered Set") \ f(init_transfer_satb, " Transfer Old From SATB") \ @@ -71,6 +72,7 @@ class outputStream; \ f(final_mark_gross, "Pause Final Mark (G)") \ f(final_mark, "Pause Final Mark (N)") \ + f(final_mark_verify, " Verify") \ f(finish_mark, " Finish Mark") \ f(final_mark_propagate_gc_state, " Propagate GC State") \ SHENANDOAH_PAR_PHASE_DO(finish_mark_, " FM: ", f) \ @@ -107,21 +109,22 @@ class outputStream; f(conc_strong_roots, "Concurrent Strong Roots") \ SHENANDOAH_PAR_PHASE_DO(conc_strong_roots_, " CSR: ", f) \ f(conc_evac, "Concurrent Evacuation") \ - f(promote_in_place, "Concurrent Promote Regions") \ - f(final_roots_gross, "Pause Final Roots (G)") \ - f(final_roots, "Pause Final Roots (N)") \ - f(final_roots_propagate_gc_state, " Propagate GC State") \ + f(conc_final_roots, "Concurrent Final Roots") \ + f(promote_in_place, " Promote Regions") \ + f(final_roots_gross, "Pause Verify Final Roots (G)") \ + f(final_roots, "Pause Verify Final Roots (N)") \ \ f(init_update_refs_gross, "Pause Init Update Refs (G)") \ f(init_update_refs, "Pause Init Update Refs (N)") \ - f(init_update_refs_manage_gclabs, " Manage GCLABs") \ + f(init_update_refs_verify, " Verify") \ \ + f(conc_update_refs_prepare, "Concurrent Update Refs Prepare") \ f(conc_update_refs, "Concurrent Update Refs") \ f(conc_update_thread_roots, "Concurrent Update Thread Roots") \ \ f(final_update_refs_gross, "Pause Final Update Refs (G)") \ f(final_update_refs, "Pause Final Update Refs (N)") \ - f(final_update_refs_finish_work, " Finish Work") \ + f(final_update_refs_verify, " Verify") \ f(final_update_refs_update_region_states, " Update Region States") \ f(final_update_refs_trash_cset, " Trash Collection Set") \ f(final_update_refs_rebuild_freeset, " Rebuild Free Set") \ @@ -152,7 +155,6 @@ class outputStream; f(degen_gc_stw_evac, " Evacuation") \ f(degen_gc_init_update_refs_manage_gclabs, " Manage GCLABs") \ f(degen_gc_update_refs, " Update References") \ - f(degen_gc_final_update_refs_finish_work, " Finish Work") \ f(degen_gc_final_update_refs_update_region_states," Update Region States") \ f(degen_gc_final_update_refs_trash_cset, " Trash Collection Set") \ f(degen_gc_final_update_refs_rebuild_freeset, " Rebuild Free Set") \ diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp index bb9c3498a06..0137492f06f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp @@ -116,5 +116,5 @@ void VM_ShenandoahFinalUpdateRefs::doit() { void VM_ShenandoahFinalRoots::doit() { ShenandoahGCPauseMark mark(_gc_id, "Final Roots", SvcGCMarker::CONCURRENT); set_active_generation(); - _gc->entry_final_roots(); + _gc->entry_verify_final_roots(); }