8279241: G1 Full GC does not always slide memory to bottom addresses

Reviewed-by: iwalulya, ayang, sjohanss
This commit is contained in:
Thomas Schatzl 2022-01-25 09:13:50 +00:00
parent b32774653f
commit 295b263fa9
9 changed files with 343 additions and 206 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,7 +32,7 @@
#include "gc/g1/g1FullGCCompactTask.hpp"
#include "gc/g1/g1FullGCMarker.inline.hpp"
#include "gc/g1/g1FullGCMarkTask.hpp"
#include "gc/g1/g1FullGCPrepareTask.hpp"
#include "gc/g1/g1FullGCPrepareTask.inline.hpp"
#include "gc/g1/g1FullGCScope.hpp"
#include "gc/g1/g1OopClosures.hpp"
#include "gc/g1/g1Policy.hpp"
@ -297,14 +297,67 @@ void G1FullCollector::phase1_mark_live_objects() {
}
void G1FullCollector::phase2_prepare_compaction() {
GCTraceTime(Info, gc, phases) info("Phase 2: Prepare for compaction", scope()->timer());
GCTraceTime(Info, gc, phases) info("Phase 2: Prepare compaction", scope()->timer());
phase2a_determine_worklists();
bool has_free_compaction_targets = phase2b_forward_oops();
// Try to avoid OOM immediately after Full GC in case there are no free regions
// left after determining the result locations (i.e. this phase). Prepare to
// maximally compact the tail regions of the compaction queues serially.
if (!has_free_compaction_targets) {
phase2c_prepare_serial_compaction();
}
}
void G1FullCollector::phase2a_determine_worklists() {
GCTraceTime(Debug, gc, phases) debug("Phase 2: Determine work lists", scope()->timer());
G1DetermineCompactionQueueClosure cl(this);
_heap->heap_region_iterate(&cl);
}
bool G1FullCollector::phase2b_forward_oops() {
GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare parallel compaction", scope()->timer());
G1FullGCPrepareTask task(this);
run_task(&task);
// To avoid OOM when there is memory left.
if (!task.has_freed_regions()) {
task.prepare_serial_compaction();
return task.has_free_compaction_targets();
}
void G1FullCollector::phase2c_prepare_serial_compaction() {
GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer());
// At this point we know that after parallel compaction there will be no
// completely free regions. That means that the last region of
// all compaction queues still have data in them. We try to compact
// these regions in serial to avoid a premature OOM when the mutator wants
// to allocate the first eden region after gc.
for (uint i = 0; i < workers(); i++) {
G1FullGCCompactionPoint* cp = compaction_point(i);
if (cp->has_regions()) {
serial_compaction_point()->add(cp->remove_last());
}
}
// Update the forwarding information for the regions in the serial
// compaction point.
G1FullGCCompactionPoint* cp = serial_compaction_point();
for (GrowableArrayIterator<HeapRegion*> it = cp->regions()->begin(); it != cp->regions()->end(); ++it) {
HeapRegion* current = *it;
if (!cp->is_initialized()) {
// Initialize the compaction point. Nothing more is needed for the first heap region
// since it is already prepared for compaction.
cp->initialize(current, false);
} else {
assert(!current->is_humongous(), "Should be no humongous regions in compaction queue");
G1SerialRePrepareClosure re_prepare(cp, current);
current->set_compaction_top(current->bottom());
current->apply_to_marked_objects(mark_bitmap(), &re_prepare);
}
}
cp->update();
}
void G1FullCollector::phase3_adjust_pointers() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -110,7 +110,7 @@ public:
G1FullGCCompactionPoint* serial_compaction_point() { return &_serial_compaction_point; }
G1CMBitMap* mark_bitmap();
ReferenceProcessor* reference_processor();
size_t live_words(uint region_index) {
size_t live_words(uint region_index) const {
assert(region_index < _heap->max_regions(), "sanity");
return _live_stats[region_index]._live_words;
}
@ -121,6 +121,9 @@ public:
inline bool is_skip_compacting(uint region_index) const;
inline bool is_skip_marking(oop obj) const;
// Are we (potentially) going to compact into this region?
inline bool is_compaction_target(uint region_index) const;
inline void set_free(uint region_idx);
inline bool is_free(uint region_idx) const;
inline void update_from_compacting_to_skip_compacting(uint region_idx);
@ -128,6 +131,11 @@ public:
private:
void phase1_mark_live_objects();
void phase2_prepare_compaction();
void phase2a_determine_worklists();
bool phase2b_forward_oops();
void phase2c_prepare_serial_compaction();
void phase3_adjust_pointers();
void phase4_do_compaction();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,6 +43,10 @@ bool G1FullCollector::is_skip_marking(oop obj) const {
return _region_attr_table.is_skip_marking(cast_from_oop<HeapWord*>(obj));
}
bool G1FullCollector::is_compaction_target(uint region_index) const {
return _region_attr_table.is_compacting(region_index) || is_free(region_index);
}
void G1FullCollector::set_free(uint region_idx) {
_region_attr_table.set_free(region_idx);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -70,6 +70,10 @@ public:
return get_by_address(obj) == Compacting;
}
bool is_compacting(uint idx) const {
return get_by_index(idx) == Compacting;
}
bool is_skip_compacting(uint idx) const {
return get_by_index(idx) == SkipCompacting;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,13 +23,13 @@
*/
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
#include "gc/g1/g1FullCollector.inline.hpp"
#include "gc/g1/g1FullGCCompactionPoint.hpp"
#include "gc/g1/g1FullGCMarker.hpp"
#include "gc/g1/g1FullGCOopClosures.inline.hpp"
#include "gc/g1/g1FullGCPrepareTask.hpp"
#include "gc/g1/g1FullGCPrepareTask.inline.hpp"
#include "gc/g1/g1HotCardCache.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
@ -39,113 +39,84 @@
#include "oops/oop.inline.hpp"
#include "utilities/ticks.hpp"
template<bool is_humongous>
void G1FullGCPrepareTask::G1CalculatePointersClosure::free_pinned_region(HeapRegion* hr) {
_regions_freed = true;
if (is_humongous) {
_g1h->free_humongous_region(hr, nullptr);
} else {
_g1h->free_region(hr, nullptr);
}
_collector->set_free(hr->hrm_index());
prepare_for_compaction(hr);
}
G1DetermineCompactionQueueClosure::G1DetermineCompactionQueueClosure(G1FullCollector* collector) :
_g1h(G1CollectedHeap::heap()),
_collector(collector),
_cur_worker(0) { }
bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion* hr) {
if (should_compact(hr)) {
assert(!hr->is_humongous(), "moving humongous objects not supported.");
prepare_for_compaction(hr);
} else {
// There is no need to iterate and forward objects in pinned regions ie.
// prepare them for compaction. The adjust pointers phase will skip
// work for them.
assert(hr->containing_set() == nullptr, "already cleared by PrepareRegionsClosure");
if (hr->is_humongous()) {
oop obj = cast_to_oop(hr->humongous_start_region()->bottom());
if (!_bitmap->is_marked(obj)) {
free_pinned_region<true>(hr);
}
} else if (hr->is_open_archive()) {
bool is_empty = _collector->live_words(hr->hrm_index()) == 0;
if (is_empty) {
free_pinned_region<false>(hr);
}
} else if (hr->is_closed_archive()) {
// nothing to do with closed archive region
} else {
assert(MarkSweepDeadRatio > 0,
"only skip compaction for other regions when MarkSweepDeadRatio > 0");
uint region_idx = hr->hrm_index();
assert(_collector->is_compaction_target(region_idx), "must be");
// Too many live objects; skip compacting it.
_collector->update_from_compacting_to_skip_compacting(hr->hrm_index());
if (hr->is_young()) {
// G1 updates the BOT for old region contents incrementally, but young regions
// lack BOT information for performance reasons.
// Recreate BOT information of high live ratio young regions here to keep expected
// performance during scanning their card tables in the collection pauses later.
hr->update_bot();
}
log_trace(gc, phases)("Phase 2: skip compaction region index: %u, live words: " SIZE_FORMAT,
hr->hrm_index(), _collector->live_words(hr->hrm_index()));
}
}
assert(!hr->is_pinned(), "must be");
assert(!hr->is_closed_archive(), "must be");
assert(!hr->is_open_archive(), "must be");
// Reset data structures not valid after Full GC.
reset_region_metadata(hr);
prepare_for_compaction(hr);
return false;
}
G1FullGCPrepareTask::G1FullGCPrepareTask(G1FullCollector* collector) :
G1FullGCTask("G1 Prepare Compact Task", collector),
_freed_regions(false),
_has_free_compaction_targets(false),
_hrclaimer(collector->workers()) {
}
void G1FullGCPrepareTask::set_freed_regions() {
if (!_freed_regions) {
_freed_regions = true;
void G1FullGCPrepareTask::set_has_free_compaction_targets() {
if (!_has_free_compaction_targets) {
_has_free_compaction_targets = true;
}
}
bool G1FullGCPrepareTask::has_freed_regions() {
return _freed_regions;
bool G1FullGCPrepareTask::has_free_compaction_targets() {
return _has_free_compaction_targets;
}
void G1FullGCPrepareTask::work(uint worker_id) {
Ticks start = Ticks::now();
G1FullGCCompactionPoint* compaction_point = collector()->compaction_point(worker_id);
G1CalculatePointersClosure closure(collector(), compaction_point);
G1CollectedHeap::heap()->heap_region_par_iterate_from_start(&closure, &_hrclaimer);
// Calculate the target locations for the objects in the non-free regions of
// the compaction queues provided by the associate compaction point.
{
G1FullGCCompactionPoint* compaction_point = collector()->compaction_point(worker_id);
G1CalculatePointersClosure closure(collector(), compaction_point);
compaction_point->update();
for (GrowableArrayIterator<HeapRegion*> it = compaction_point->regions()->begin();
it != compaction_point->regions()->end();
++it) {
closure.do_heap_region(*it);
}
compaction_point->update();
// Determine if there are any unused compaction targets. This is only the case if
// there are
// - any regions in queue, so no free ones either.
// - and the current region is not the last one in the list.
if (compaction_point->has_regions() &&
compaction_point->current_region() != compaction_point->regions()->last()) {
set_has_free_compaction_targets();
}
}
// Check if any regions was freed by this worker and store in task.
if (closure.freed_regions()) {
set_freed_regions();
// Clear region metadata that is invalid after GC for all regions.
{
G1ResetMetadataClosure closure(collector());
G1CollectedHeap::heap()->heap_region_par_iterate_from_start(&closure, &_hrclaimer);
}
log_task("Prepare compaction task", worker_id, start);
}
G1FullGCPrepareTask::G1CalculatePointersClosure::G1CalculatePointersClosure(G1FullCollector* collector,
G1FullGCCompactionPoint* cp) :
_g1h(G1CollectedHeap::heap()),
_collector(collector),
_bitmap(collector->mark_bitmap()),
_cp(cp),
_regions_freed(false) { }
_g1h(G1CollectedHeap::heap()),
_collector(collector),
_bitmap(collector->mark_bitmap()),
_cp(cp) { }
bool G1FullGCPrepareTask::G1CalculatePointersClosure::should_compact(HeapRegion* hr) {
if (hr->is_pinned()) {
return false;
}
size_t live_words = _collector->live_words(hr->hrm_index());
size_t live_words_threshold = _collector->scope()->region_compaction_threshold();
// High live ratio region will not be compacted.
return live_words <= live_words_threshold;
}
G1FullGCPrepareTask::G1ResetMetadataClosure::G1ResetMetadataClosure(G1FullCollector* collector) :
_g1h(G1CollectedHeap::heap()),
_collector(collector) { }
void G1FullGCPrepareTask::G1CalculatePointersClosure::reset_region_metadata(HeapRegion* hr) {
void G1FullGCPrepareTask::G1ResetMetadataClosure::reset_region_metadata(HeapRegion* hr) {
hr->rem_set()->clear();
hr->clear_cardtable();
@ -155,6 +126,26 @@ void G1FullGCPrepareTask::G1CalculatePointersClosure::reset_region_metadata(Heap
}
}
bool G1FullGCPrepareTask::G1ResetMetadataClosure::do_heap_region(HeapRegion* hr) {
uint const region_idx = hr->hrm_index();
if (!_collector->is_compaction_target(region_idx)) {
assert(!hr->is_free(), "all free regions should be compaction targets");
assert(_collector->is_skip_compacting(region_idx) || hr->is_closed_archive(), "must be");
if (hr->is_young()) {
// G1 updates the BOT for old region contents incrementally, but young regions
// lack BOT information for performance reasons.
// Recreate BOT information of high live ratio young regions here to keep expected
// performance during scanning their card tables in the collection pauses later.
hr->update_bot();
}
}
// Reset data structures not valid after Full GC.
reset_region_metadata(hr);
return false;
}
G1FullGCPrepareTask::G1PrepareCompactLiveClosure::G1PrepareCompactLiveClosure(G1FullGCCompactionPoint* cp) :
_cp(cp) { }
@ -164,87 +155,9 @@ size_t G1FullGCPrepareTask::G1PrepareCompactLiveClosure::apply(oop object) {
return size;
}
size_t G1FullGCPrepareTask::G1RePrepareClosure::apply(oop obj) {
// We only re-prepare objects forwarded within the current region, so
// skip objects that are already forwarded to another region.
if (obj->is_forwarded() && !_current->is_in(obj->forwardee())) {
return obj->size();
}
// Get size and forward.
size_t size = obj->size();
_cp->forward(obj, size);
return size;
}
void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction_work(G1FullGCCompactionPoint* cp,
HeapRegion* hr) {
hr->set_compaction_top(hr->bottom());
void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction(HeapRegion* hr) {
if (!_collector->is_free(hr->hrm_index())) {
G1PrepareCompactLiveClosure prepare_compact(cp);
G1PrepareCompactLiveClosure prepare_compact(_cp);
hr->apply_to_marked_objects(_bitmap, &prepare_compact);
}
}
void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction(HeapRegion* hr) {
if (!_cp->is_initialized()) {
hr->set_compaction_top(hr->bottom());
_cp->initialize(hr, true);
}
// Add region to the compaction queue and prepare it.
_cp->add(hr);
prepare_for_compaction_work(_cp, hr);
}
void G1FullGCPrepareTask::prepare_serial_compaction() {
GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare Serial Compaction", collector()->scope()->timer());
// At this point we know that no regions were completely freed by
// the parallel compaction. That means that the last region of
// all compaction queues still have data in them. We try to compact
// these regions in serial to avoid a premature OOM.
for (uint i = 0; i < collector()->workers(); i++) {
G1FullGCCompactionPoint* cp = collector()->compaction_point(i);
if (cp->has_regions()) {
collector()->serial_compaction_point()->add(cp->remove_last());
}
}
// Update the forwarding information for the regions in the serial
// compaction point.
G1FullGCCompactionPoint* cp = collector()->serial_compaction_point();
for (GrowableArrayIterator<HeapRegion*> it = cp->regions()->begin(); it != cp->regions()->end(); ++it) {
HeapRegion* current = *it;
if (!cp->is_initialized()) {
// Initialize the compaction point. Nothing more is needed for the first heap region
// since it is already prepared for compaction.
cp->initialize(current, false);
} else {
assert(!current->is_humongous(), "Should be no humongous regions in compaction queue");
G1RePrepareClosure re_prepare(cp, current);
current->set_compaction_top(current->bottom());
current->apply_to_marked_objects(collector()->mark_bitmap(), &re_prepare);
}
}
cp->update();
}
bool G1FullGCPrepareTask::G1CalculatePointersClosure::freed_regions() {
if (_regions_freed) {
return true;
}
if (!_cp->has_regions()) {
// No regions in queue, so no free ones either.
return false;
}
if (_cp->current_region() != _cp->regions()->last()) {
// The current region used for compaction is not the last in the
// queue. That means there is at least one free region in the queue.
return true;
}
// No free regions in the queue.
return false;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,53 +25,81 @@
#ifndef SHARE_GC_G1_G1FULLGCPREPARETASK_HPP
#define SHARE_GC_G1_G1FULLGCPREPARETASK_HPP
#include "gc/g1/g1FullGCCompactionPoint.hpp"
#include "gc/g1/g1FullGCScope.hpp"
#include "gc/g1/g1FullGCTask.hpp"
#include "gc/g1/g1RootProcessor.hpp"
#include "gc/g1/heapRegionManager.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "gc/g1/heapRegion.hpp"
#include "memory/allocation.hpp"
class G1CollectedHeap;
class G1CMBitMap;
class G1FullCollector;
class G1FullGCCompactionPoint;
class HeapRegion;
// Determines the regions in the heap that should be part of the compaction and
// distributes them among the compaction queues in round-robin fashion.
class G1DetermineCompactionQueueClosure : public HeapRegionClosure {
G1CollectedHeap* _g1h;
G1FullCollector* _collector;
uint _cur_worker;
template<bool is_humongous>
inline void free_pinned_region(HeapRegion* hr);
inline bool should_compact(HeapRegion* hr) const;
// Returns the current worker id to assign a compaction point to, and selects
// the next one round-robin style.
inline uint next_worker();
inline G1FullGCCompactionPoint* next_compaction_point();
inline void add_to_compaction_queue(HeapRegion* hr);
public:
G1DetermineCompactionQueueClosure(G1FullCollector* collector);
inline bool do_heap_region(HeapRegion* hr) override;
};
class G1FullGCPrepareTask : public G1FullGCTask {
protected:
volatile bool _freed_regions;
volatile bool _has_free_compaction_targets;
HeapRegionClaimer _hrclaimer;
void set_freed_regions();
void set_has_free_compaction_targets();
public:
G1FullGCPrepareTask(G1FullCollector* collector);
void work(uint worker_id);
void prepare_serial_compaction();
bool has_freed_regions();
// After the Prepare phase, are there any unused (empty) regions (compaction
// targets) at the end of any compaction queues?
bool has_free_compaction_targets();
protected:
private:
class G1CalculatePointersClosure : public HeapRegionClosure {
private:
template<bool is_humongous>
void free_pinned_region(HeapRegion* hr);
protected:
G1CollectedHeap* _g1h;
G1FullCollector* _collector;
G1CMBitMap* _bitmap;
G1FullGCCompactionPoint* _cp;
bool _regions_freed;
bool should_compact(HeapRegion* hr);
void prepare_for_compaction(HeapRegion* hr);
void prepare_for_compaction_work(G1FullGCCompactionPoint* cp, HeapRegion* hr);
void reset_region_metadata(HeapRegion* hr);
public:
G1CalculatePointersClosure(G1FullCollector* collector,
G1FullGCCompactionPoint* cp);
bool do_heap_region(HeapRegion* hr);
bool freed_regions();
};
class G1ResetMetadataClosure : public HeapRegionClosure {
G1CollectedHeap* _g1h;
G1FullCollector* _collector;
void reset_region_metadata(HeapRegion* hr);
public:
G1ResetMetadataClosure(G1FullCollector* collector);
bool do_heap_region(HeapRegion* hr);
};
class G1PrepareCompactLiveClosure : public StackObj {
@ -81,19 +109,20 @@ protected:
G1PrepareCompactLiveClosure(G1FullGCCompactionPoint* cp);
size_t apply(oop object);
};
};
class G1RePrepareClosure : public StackObj {
G1FullGCCompactionPoint* _cp;
HeapRegion* _current;
// Closure to re-prepare objects in the serial compaction point queue regions for
// serial compaction.
class G1SerialRePrepareClosure : public StackObj {
G1FullGCCompactionPoint* _cp;
HeapRegion* _current;
public:
G1RePrepareClosure(G1FullGCCompactionPoint* hrcp,
HeapRegion* hr) :
_cp(hrcp),
_current(hr) { }
public:
G1SerialRePrepareClosure(G1FullGCCompactionPoint* hrcp, HeapRegion* hr) :
_cp(hrcp),
_current(hr) { }
size_t apply(oop object);
};
inline size_t apply(oop obj);
};
#endif // SHARE_GC_G1_G1FULLGCPREPARETASK_HPP

View File

@ -0,0 +1,126 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_G1_G1FULLGCPREPARETASK_INLINE_HPP
#define SHARE_GC_G1_G1FULLGCPREPARETASK_INLINE_HPP
#include "gc/g1/g1FullGCPrepareTask.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1FullCollector.hpp"
#include "gc/g1/g1FullGCCompactionPoint.hpp"
#include "gc/g1/g1FullGCScope.hpp"
#include "gc/g1/heapRegion.inline.hpp"
template<bool is_humongous>
void G1DetermineCompactionQueueClosure::free_pinned_region(HeapRegion* hr) {
if (is_humongous) {
_g1h->free_humongous_region(hr, nullptr);
} else {
_g1h->free_region(hr, nullptr);
}
_collector->set_free(hr->hrm_index());
add_to_compaction_queue(hr);
}
inline bool G1DetermineCompactionQueueClosure::should_compact(HeapRegion* hr) const {
// There is no need to iterate and forward objects in pinned regions ie.
// prepare them for compaction.
if (hr->is_pinned()) {
return false;
}
size_t live_words = _collector->live_words(hr->hrm_index());
size_t live_words_threshold = _collector->scope()->region_compaction_threshold();
// High live ratio region will not be compacted.
return live_words <= live_words_threshold;
}
inline uint G1DetermineCompactionQueueClosure::next_worker() {
uint result = _cur_worker;
_cur_worker = (_cur_worker + 1) % _collector->workers();
return result;
}
inline G1FullGCCompactionPoint* G1DetermineCompactionQueueClosure::next_compaction_point() {
return _collector->compaction_point(next_worker());
}
inline void G1DetermineCompactionQueueClosure::add_to_compaction_queue(HeapRegion* hr) {
hr->set_compaction_top(hr->bottom());
G1FullGCCompactionPoint* cp = next_compaction_point();
if (!cp->is_initialized()) {
cp->initialize(hr, true);
}
// Add region to the compaction queue.
cp->add(hr);
}
inline bool G1DetermineCompactionQueueClosure::do_heap_region(HeapRegion* hr) {
if (should_compact(hr)) {
assert(!hr->is_humongous(), "moving humongous objects not supported.");
add_to_compaction_queue(hr);
} else {
assert(hr->containing_set() == nullptr, "already cleared by PrepareRegionsClosure");
if (hr->is_humongous()) {
oop obj = cast_to_oop(hr->humongous_start_region()->bottom());
bool is_empty = !_collector->mark_bitmap()->is_marked(obj);
if (is_empty) {
free_pinned_region<true>(hr);
}
} else if (hr->is_open_archive()) {
bool is_empty = _collector->live_words(hr->hrm_index()) == 0;
if (is_empty) {
free_pinned_region<false>(hr);
}
} else if (hr->is_closed_archive()) {
// nothing to do with closed archive region
} else {
assert(MarkSweepDeadRatio > 0,
"only skip compaction for other regions when MarkSweepDeadRatio > 0");
// Too many live objects in the region; skip compacting it.
_collector->update_from_compacting_to_skip_compacting(hr->hrm_index());
log_trace(gc, phases)("Phase 2: skip compaction region index: %u, live words: " SIZE_FORMAT,
hr->hrm_index(), _collector->live_words(hr->hrm_index()));
}
}
return false;
}
inline size_t G1SerialRePrepareClosure::apply(oop obj) {
// We only re-prepare objects forwarded within the current region, so
// skip objects that are already forwarded to another region.
if (obj->is_forwarded() && !_current->is_in(obj->forwardee())) {
return obj->size();
}
// Get size and forward.
size_t size = obj->size();
_cp->forward(obj, size);
return size;
}
#endif // SHARE_GC_G1_G1FULLGCPREPARETASK_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -70,6 +70,6 @@ G1FullGCTracer* G1FullGCScope::tracer() {
return &_tracer;
}
size_t G1FullGCScope::region_compaction_threshold() {
size_t G1FullGCScope::region_compaction_threshold() const {
return _region_compaction_threshold;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -71,7 +71,7 @@ public:
STWGCTimer* timer();
G1FullGCTracer* tracer();
G1HeapTransition* heap_transition();
size_t region_compaction_threshold();
size_t region_compaction_threshold() const;
};
#endif // SHARE_GC_G1_G1FULLGCSCOPE_HPP