8248314: Parallel: Parallelize parallel full gc Adjust Roots phase

Reviewed-by: tschatzl, iwalulya
This commit is contained in:
Albert Mingkun Yang 2021-03-03 15:37:36 +00:00 committed by Thomas Schatzl
parent 3d3eb5c8d3
commit 1d2c1e6289
3 changed files with 84 additions and 42 deletions

View File

@ -57,7 +57,7 @@
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
#include "gc/shared/spaceDecorator.inline.hpp"
#include "gc/shared/taskTerminator.hpp"
#include "gc/shared/weakProcessor.hpp"
#include "gc/shared/weakProcessor.inline.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "gc/shared/workgroup.hpp"
#include "logging/log.hpp"
@ -780,7 +780,7 @@ bool ParallelCompactData::summarize(SplitInfo& split_info,
return true;
}
HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) {
HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) const {
assert(addr != NULL, "Should detect NULL oop earlier");
assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap");
assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
@ -1788,8 +1788,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
ParCompactionManager::manager_array(ParallelScavengeHeap::heap()->workers().total_workers());
{
ResourceMark rm;
const uint active_workers =
WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().total_workers(),
ParallelScavengeHeap::heap()->workers().active_workers(),
@ -1834,7 +1832,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
// adjust_roots() updates Universe::_intArrayKlassObj which is
// needed by the compaction for filling holes in the dense prefix.
adjust_roots(vmthread_cm);
adjust_roots();
compaction_start.update();
compact();
@ -2209,35 +2207,81 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
_gc_tracer.report_object_count_after_gc(is_alive_closure());
}
void PSParallelCompact::adjust_roots(ParCompactionManager* cm) {
class PSAdjustTask final : public AbstractGangTask {
SubTasksDone _sub_tasks;
WeakProcessor::Task _weak_proc_task;
OopStorageSetStrongParState<false, false> _oop_storage_iter;
uint _nworkers;
enum PSAdjustSubTask {
PSAdjustSubTask_code_cache,
PSAdjustSubTask_aot,
PSAdjustSubTask_old_ref_process,
PSAdjustSubTask_young_ref_process,
PSAdjustSubTask_num_elements
};
public:
PSAdjustTask(uint nworkers) :
AbstractGangTask("PSAdjust task"),
_sub_tasks(PSAdjustSubTask_num_elements),
_weak_proc_task(nworkers),
_nworkers(nworkers) {
// Need new claim bits when tracing through and adjusting pointers.
ClassLoaderDataGraph::clear_claimed_marks();
if (nworkers > 1) {
Threads::change_thread_claim_token();
}
}
~PSAdjustTask() {
Threads::assert_all_threads_claimed();
}
void work(uint worker_id) {
ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
PCAdjustPointerClosure adjust(cm);
{
ResourceMark rm;
Threads::possibly_parallel_oops_do(_nworkers > 1, &adjust, nullptr);
}
_oop_storage_iter.oops_do(&adjust);
{
CLDToOopClosure cld_closure(&adjust, ClassLoaderData::_claim_strong);
ClassLoaderDataGraph::cld_do(&cld_closure);
}
{
AlwaysTrueClosure always_alive;
_weak_proc_task.work(worker_id, &always_alive, &adjust);
}
if (_sub_tasks.try_claim_task(PSAdjustSubTask_code_cache)) {
CodeBlobToOopClosure adjust_code(&adjust, CodeBlobToOopClosure::FixRelocations);
CodeCache::blobs_do(&adjust_code);
}
if (_sub_tasks.try_claim_task(PSAdjustSubTask_aot)) {
AOT_ONLY(AOTLoader::oops_do(&adjust);)
}
if (_sub_tasks.try_claim_task(PSAdjustSubTask_old_ref_process)) {
PSParallelCompact::ref_processor()->weak_oops_do(&adjust);
}
if (_sub_tasks.try_claim_task(PSAdjustSubTask_young_ref_process)) {
// Roots were visited so references into the young gen in roots
// may have been scanned. Process them also.
// Should the reference processor have a span that excludes
// young gen objects?
PSScavenge::reference_processor()->weak_oops_do(&adjust);
}
_sub_tasks.all_tasks_claimed();
}
};
void PSParallelCompact::adjust_roots() {
// Adjust the pointers to reflect the new locations
GCTraceTime(Info, gc, phases) tm("Adjust Roots", &_gc_timer);
// Need new claim bits when tracing through and adjusting pointers.
ClassLoaderDataGraph::clear_claimed_marks();
PCAdjustPointerClosure oop_closure(cm);
// General strong roots.
Threads::oops_do(&oop_closure, NULL);
OopStorageSet::strong_oops_do(&oop_closure);
CLDToOopClosure cld_closure(&oop_closure, ClassLoaderData::_claim_strong);
ClassLoaderDataGraph::cld_do(&cld_closure);
// Now adjust pointers in remaining weak roots. (All of which should
// have been cleared if they pointed to non-surviving objects.)
WeakProcessor::oops_do(&oop_closure);
CodeBlobToOopClosure adjust_from_blobs(&oop_closure, CodeBlobToOopClosure::FixRelocations);
CodeCache::blobs_do(&adjust_from_blobs);
AOT_ONLY(AOTLoader::oops_do(&oop_closure);)
ref_processor()->weak_oops_do(&oop_closure);
// Roots were visited so references into the young gen in roots
// may have been scanned. Process them also.
// Should the reference processor have a span that excludes
// young gen objects?
PSScavenge::reference_processor()->weak_oops_do(&oop_closure);
uint nworkers = ParallelScavengeHeap::heap()->workers().active_workers();
PSAdjustTask task(nworkers);
ParallelScavengeHeap::heap()->workers().run_task(&task);
}
// Helper class to print 8 region numbers per line and then print the total at the end.
@ -2306,7 +2350,7 @@ void PSParallelCompact::prepare_region_draining_tasks(uint parallel_gc_threads)
for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
if (sd.region(cur)->claim_unsafe()) {
ParCompactionManager* cm = ParCompactionManager::manager_array(worker_id);
ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
bool result = sd.region(cur)->mark_normal();
assert(result, "Must succeed at this point.");
cm->region_stack()->push(cur);
@ -2505,7 +2549,6 @@ static void compaction_with_stealing_work(TaskTerminator* terminator, uint worke
// Go around again.
}
}
return;
}
class UpdateDensePrefixAndCompactionTask: public AbstractGangTask {
@ -3133,7 +3176,7 @@ void PSParallelCompact::initialize_shadow_regions(uint parallel_gc_threads)
size_t beg_region = sd.addr_to_region_idx(_space_info[old_space_id].dense_prefix());
for (uint i = 0; i < parallel_gc_threads; i++) {
ParCompactionManager *cm = ParCompactionManager::manager_array(i);
ParCompactionManager *cm = ParCompactionManager::gc_thread_compaction_manager(i);
cm->set_next_shadow_region(beg_region + i);
}
}

View File

@ -480,9 +480,9 @@ public:
HeapWord* partial_obj_end(size_t region_idx) const;
// Return the location of the object after compaction.
HeapWord* calc_new_pointer(HeapWord* addr, ParCompactionManager* cm);
HeapWord* calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) const;
HeapWord* calc_new_pointer(oop p, ParCompactionManager* cm) {
HeapWord* calc_new_pointer(oop p, ParCompactionManager* cm) const {
return calc_new_pointer(cast_from_oop<HeapWord*>(p), cm);
}
@ -1107,7 +1107,7 @@ class PSParallelCompact : AllStatic {
static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
// Adjust addresses in roots. Does not adjust addresses in heap.
static void adjust_roots(ParCompactionManager* cm);
static void adjust_roots();
DEBUG_ONLY(static void write_block_fill_histogram();)

View File

@ -113,10 +113,9 @@ inline void PSParallelCompact::adjust_pointer(T* p, ParCompactionManager* cm) {
assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
oop new_obj = (oop)summary_data().calc_new_pointer(obj, cm);
assert(new_obj != NULL, // is forwarding ptr?
"should be forwarded");
// Just always do the update unconditionally?
if (new_obj != NULL) {
assert(new_obj != NULL, "non-null address for live objects");
// Is it actually relocated at all?
if (new_obj != obj) {
assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj),
"should be in object space");
RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);