6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
Deprecated HandlePromotionFailure, removing the ability to turn off that feature, did away with one epoch look-ahead when deciding if a scavenge is likely to fail, relying on current data. Reviewed-by: jmasa, johnc, poonam
This commit is contained in:
parent
554e77efb4
commit
cbc7f8756a
@ -354,12 +354,8 @@ void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
|
|||||||
double CMSStats::time_until_cms_gen_full() const {
|
double CMSStats::time_until_cms_gen_full() const {
|
||||||
size_t cms_free = _cms_gen->cmsSpace()->free();
|
size_t cms_free = _cms_gen->cmsSpace()->free();
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
size_t expected_promotion = gch->get_gen(0)->capacity();
|
size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
|
||||||
if (HandlePromotionFailure) {
|
(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
|
||||||
expected_promotion = MIN2(
|
|
||||||
(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
|
|
||||||
expected_promotion);
|
|
||||||
}
|
|
||||||
if (cms_free > expected_promotion) {
|
if (cms_free > expected_promotion) {
|
||||||
// Start a cms collection if there isn't enough space to promote
|
// Start a cms collection if there isn't enough space to promote
|
||||||
// for the next minor collection. Use the padded average as
|
// for the next minor collection. Use the padded average as
|
||||||
@ -865,57 +861,18 @@ size_t ConcurrentMarkSweepGeneration::max_available() const {
|
|||||||
return free() + _virtual_space.uncommitted_size();
|
return free() + _virtual_space.uncommitted_size();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(
|
bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
|
||||||
size_t max_promotion_in_bytes,
|
size_t available = max_available();
|
||||||
bool younger_handles_promotion_failure) const {
|
size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
|
||||||
|
bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
|
||||||
// This is the most conservative test. Full promotion is
|
if (PrintGC && Verbose) {
|
||||||
// guaranteed if this is used. The multiplicative factor is to
|
gclog_or_tty->print_cr(
|
||||||
// account for the worst case "dilatation".
|
"CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
|
||||||
double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
|
"max_promo("SIZE_FORMAT")",
|
||||||
if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t
|
res? "":" not", available, res? ">=":"<",
|
||||||
adjusted_max_promo_bytes = (double)max_uintx;
|
av_promo, max_promotion_in_bytes);
|
||||||
}
|
}
|
||||||
bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes);
|
return res;
|
||||||
|
|
||||||
if (younger_handles_promotion_failure && !result) {
|
|
||||||
// Full promotion is not guaranteed because fragmentation
|
|
||||||
// of the cms generation can prevent the full promotion.
|
|
||||||
result = (max_available() >= (size_t)adjusted_max_promo_bytes);
|
|
||||||
|
|
||||||
if (!result) {
|
|
||||||
// With promotion failure handling the test for the ability
|
|
||||||
// to support the promotion does not have to be guaranteed.
|
|
||||||
// Use an average of the amount promoted.
|
|
||||||
result = max_available() >= (size_t)
|
|
||||||
gc_stats()->avg_promoted()->padded_average();
|
|
||||||
if (PrintGC && Verbose && result) {
|
|
||||||
gclog_or_tty->print_cr(
|
|
||||||
"\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
|
|
||||||
" max_available: " SIZE_FORMAT
|
|
||||||
" avg_promoted: " SIZE_FORMAT,
|
|
||||||
max_available(), (size_t)
|
|
||||||
gc_stats()->avg_promoted()->padded_average());
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (PrintGC && Verbose) {
|
|
||||||
gclog_or_tty->print_cr(
|
|
||||||
"\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
|
|
||||||
" max_available: " SIZE_FORMAT
|
|
||||||
" adj_max_promo_bytes: " SIZE_FORMAT,
|
|
||||||
max_available(), (size_t)adjusted_max_promo_bytes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (PrintGC && Verbose) {
|
|
||||||
gclog_or_tty->print_cr(
|
|
||||||
"\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
|
|
||||||
" contiguous_available: " SIZE_FORMAT
|
|
||||||
" adj_max_promo_bytes: " SIZE_FORMAT,
|
|
||||||
max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// At a promotion failure dump information on block layout in heap
|
// At a promotion failure dump information on block layout in heap
|
||||||
@ -6091,23 +6048,14 @@ void CMSCollector::sweep(bool asynch) {
|
|||||||
assert(_collectorState == Resizing, "Change of collector state to"
|
assert(_collectorState == Resizing, "Change of collector state to"
|
||||||
" Resizing must be done under the freelistLocks (plural)");
|
" Resizing must be done under the freelistLocks (plural)");
|
||||||
|
|
||||||
// Now that sweeping has been completed, if the GCH's
|
// Now that sweeping has been completed, we clear
|
||||||
// incremental_collection_will_fail flag is set, clear it,
|
// the incremental_collection_failed flag,
|
||||||
// thus inviting a younger gen collection to promote into
|
// thus inviting a younger gen collection to promote into
|
||||||
// this generation. If such a promotion may still fail,
|
// this generation. If such a promotion may still fail,
|
||||||
// the flag will be set again when a young collection is
|
// the flag will be set again when a young collection is
|
||||||
// attempted.
|
// attempted.
|
||||||
// I think the incremental_collection_will_fail flag's use
|
|
||||||
// is specific to a 2 generation collection policy, so i'll
|
|
||||||
// assert that that's the configuration we are operating within.
|
|
||||||
// The use of the flag can and should be generalized appropriately
|
|
||||||
// in the future to deal with a general n-generation system.
|
|
||||||
|
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
assert(gch->collector_policy()->is_two_generation_policy(),
|
gch->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up
|
||||||
"Resetting of incremental_collection_will_fail flag"
|
|
||||||
" may be incorrect otherwise");
|
|
||||||
gch->clear_incremental_collection_will_fail();
|
|
||||||
gch->update_full_collections_completed(_collection_count_start);
|
gch->update_full_collections_completed(_collection_count_start);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1185,8 +1185,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
|||||||
virtual void par_promote_alloc_done(int thread_num);
|
virtual void par_promote_alloc_done(int thread_num);
|
||||||
virtual void par_oop_since_save_marks_iterate_done(int thread_num);
|
virtual void par_oop_since_save_marks_iterate_done(int thread_num);
|
||||||
|
|
||||||
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
|
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
|
||||||
bool younger_handles_promotion_failure) const;
|
|
||||||
|
|
||||||
// Inform this (non-young) generation that a promotion failure was
|
// Inform this (non-young) generation that a promotion failure was
|
||||||
// encountered during a collection of a younger generation that
|
// encountered during a collection of a younger generation that
|
||||||
|
@ -846,7 +846,7 @@ void ParNewGeneration::collect(bool full,
|
|||||||
// from this generation, pass on collection; let the next generation
|
// from this generation, pass on collection; let the next generation
|
||||||
// do it.
|
// do it.
|
||||||
if (!collection_attempt_is_safe()) {
|
if (!collection_attempt_is_safe()) {
|
||||||
gch->set_incremental_collection_will_fail();
|
gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
assert(to()->is_empty(), "Else not collection_attempt_is_safe");
|
assert(to()->is_empty(), "Else not collection_attempt_is_safe");
|
||||||
@ -935,8 +935,6 @@ void ParNewGeneration::collect(bool full,
|
|||||||
|
|
||||||
assert(to()->is_empty(), "to space should be empty now");
|
assert(to()->is_empty(), "to space should be empty now");
|
||||||
} else {
|
} else {
|
||||||
assert(HandlePromotionFailure,
|
|
||||||
"Should only be here if promotion failure handling is on");
|
|
||||||
assert(_promo_failure_scan_stack.is_empty(), "post condition");
|
assert(_promo_failure_scan_stack.is_empty(), "post condition");
|
||||||
_promo_failure_scan_stack.clear(true); // Clear cached segments.
|
_promo_failure_scan_stack.clear(true); // Clear cached segments.
|
||||||
|
|
||||||
@ -947,7 +945,7 @@ void ParNewGeneration::collect(bool full,
|
|||||||
// All the spaces are in play for mark-sweep.
|
// All the spaces are in play for mark-sweep.
|
||||||
swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
|
swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
|
||||||
from()->set_next_compaction_space(to());
|
from()->set_next_compaction_space(to());
|
||||||
gch->set_incremental_collection_will_fail();
|
gch->set_incremental_collection_failed();
|
||||||
// Inform the next generation that a promotion failure occurred.
|
// Inform the next generation that a promotion failure occurred.
|
||||||
_next_gen->promotion_failure_occurred();
|
_next_gen->promotion_failure_occurred();
|
||||||
|
|
||||||
@ -1092,11 +1090,6 @@ oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
|
|||||||
old, m, sz);
|
old, m, sz);
|
||||||
|
|
||||||
if (new_obj == NULL) {
|
if (new_obj == NULL) {
|
||||||
if (!HandlePromotionFailure) {
|
|
||||||
// A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
|
|
||||||
// is incorrectly set. In any case, its seriously wrong to be here!
|
|
||||||
vm_exit_out_of_memory(sz*wordSize, "promotion");
|
|
||||||
}
|
|
||||||
// promotion failed, forward to self
|
// promotion failed, forward to self
|
||||||
_promotion_failed = true;
|
_promotion_failed = true;
|
||||||
new_obj = old;
|
new_obj = old;
|
||||||
@ -1206,12 +1199,6 @@ oop ParNewGeneration::copy_to_survivor_space_with_undo(
|
|||||||
old, m, sz);
|
old, m, sz);
|
||||||
|
|
||||||
if (new_obj == NULL) {
|
if (new_obj == NULL) {
|
||||||
if (!HandlePromotionFailure) {
|
|
||||||
// A failed promotion likely means the MaxLiveObjectEvacuationRatio
|
|
||||||
// flag is incorrectly set. In any case, its seriously wrong to be
|
|
||||||
// here!
|
|
||||||
vm_exit_out_of_memory(sz*wordSize, "promotion");
|
|
||||||
}
|
|
||||||
// promotion failed, forward to self
|
// promotion failed, forward to self
|
||||||
forward_ptr = old->forward_to_atomic(old);
|
forward_ptr = old->forward_to_atomic(old);
|
||||||
new_obj = old;
|
new_obj = old;
|
||||||
|
@ -659,9 +659,6 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
|
|||||||
}
|
}
|
||||||
return result; // could be null if we are out of space
|
return result; // could be null if we are out of space
|
||||||
} else if (!gch->incremental_collection_will_fail()) {
|
} else if (!gch->incremental_collection_will_fail()) {
|
||||||
// The gc_prologues have not executed yet. The value
|
|
||||||
// for incremental_collection_will_fail() is the remanent
|
|
||||||
// of the last collection.
|
|
||||||
// Do an incremental collection.
|
// Do an incremental collection.
|
||||||
gch->do_collection(false /* full */,
|
gch->do_collection(false /* full */,
|
||||||
false /* clear_all_soft_refs */,
|
false /* clear_all_soft_refs */,
|
||||||
@ -739,9 +736,8 @@ bool GenCollectorPolicy::should_try_older_generation_allocation(
|
|||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
|
size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
|
||||||
return (word_size > heap_word_size(gen0_capacity))
|
return (word_size > heap_word_size(gen0_capacity))
|
||||||
|| (GC_locker::is_active_and_needs_gc())
|
|| GC_locker::is_active_and_needs_gc()
|
||||||
|| ( gch->last_incremental_collection_failed()
|
|| gch->incremental_collection_failed();
|
||||||
&& gch->incremental_collection_will_fail());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -510,7 +510,7 @@ void DefNewGeneration::collect(bool full,
|
|||||||
// from this generation, pass on collection; let the next generation
|
// from this generation, pass on collection; let the next generation
|
||||||
// do it.
|
// do it.
|
||||||
if (!collection_attempt_is_safe()) {
|
if (!collection_attempt_is_safe()) {
|
||||||
gch->set_incremental_collection_will_fail();
|
gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
assert(to()->is_empty(), "Else not collection_attempt_is_safe");
|
assert(to()->is_empty(), "Else not collection_attempt_is_safe");
|
||||||
@ -596,9 +596,8 @@ void DefNewGeneration::collect(bool full,
|
|||||||
if (PrintGC && !PrintGCDetails) {
|
if (PrintGC && !PrintGCDetails) {
|
||||||
gch->print_heap_change(gch_prev_used);
|
gch->print_heap_change(gch_prev_used);
|
||||||
}
|
}
|
||||||
|
assert(!gch->incremental_collection_failed(), "Should be clear");
|
||||||
} else {
|
} else {
|
||||||
assert(HandlePromotionFailure,
|
|
||||||
"Should not be here unless promotion failure handling is on");
|
|
||||||
assert(_promo_failure_scan_stack.is_empty(), "post condition");
|
assert(_promo_failure_scan_stack.is_empty(), "post condition");
|
||||||
_promo_failure_scan_stack.clear(true); // Clear cached segments.
|
_promo_failure_scan_stack.clear(true); // Clear cached segments.
|
||||||
|
|
||||||
@ -613,7 +612,7 @@ void DefNewGeneration::collect(bool full,
|
|||||||
// and from-space.
|
// and from-space.
|
||||||
swap_spaces(); // For uniformity wrt ParNewGeneration.
|
swap_spaces(); // For uniformity wrt ParNewGeneration.
|
||||||
from()->set_next_compaction_space(to());
|
from()->set_next_compaction_space(to());
|
||||||
gch->set_incremental_collection_will_fail();
|
gch->set_incremental_collection_failed();
|
||||||
|
|
||||||
// Inform the next generation that a promotion failure occurred.
|
// Inform the next generation that a promotion failure occurred.
|
||||||
_next_gen->promotion_failure_occurred();
|
_next_gen->promotion_failure_occurred();
|
||||||
@ -700,12 +699,6 @@ oop DefNewGeneration::copy_to_survivor_space(oop old) {
|
|||||||
if (obj == NULL) {
|
if (obj == NULL) {
|
||||||
obj = _next_gen->promote(old, s);
|
obj = _next_gen->promote(old, s);
|
||||||
if (obj == NULL) {
|
if (obj == NULL) {
|
||||||
if (!HandlePromotionFailure) {
|
|
||||||
// A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
|
|
||||||
// is incorrectly set. In any case, its seriously wrong to be here!
|
|
||||||
vm_exit_out_of_memory(s*wordSize, "promotion");
|
|
||||||
}
|
|
||||||
|
|
||||||
handle_promotion_failure(old);
|
handle_promotion_failure(old);
|
||||||
return old;
|
return old;
|
||||||
}
|
}
|
||||||
@ -812,31 +805,7 @@ bool DefNewGeneration::collection_attempt_is_safe() {
|
|||||||
assert(_next_gen != NULL,
|
assert(_next_gen != NULL,
|
||||||
"This must be the youngest gen, and not the only gen");
|
"This must be the youngest gen, and not the only gen");
|
||||||
}
|
}
|
||||||
|
return _next_gen->promotion_attempt_is_safe(used());
|
||||||
// Decide if there's enough room for a full promotion
|
|
||||||
// When using extremely large edens, we effectively lose a
|
|
||||||
// large amount of old space. Use the "MaxLiveObjectEvacuationRatio"
|
|
||||||
// flag to reduce the minimum evacuation space requirements. If
|
|
||||||
// there is not enough space to evacuate eden during a scavenge,
|
|
||||||
// the VM will immediately exit with an out of memory error.
|
|
||||||
// This flag has not been tested
|
|
||||||
// with collectors other than simple mark & sweep.
|
|
||||||
//
|
|
||||||
// Note that with the addition of promotion failure handling, the
|
|
||||||
// VM will not immediately exit but will undo the young generation
|
|
||||||
// collection. The parameter is left here for compatibility.
|
|
||||||
const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0;
|
|
||||||
|
|
||||||
// worst_case_evacuation is based on "used()". For the case where this
|
|
||||||
// method is called after a collection, this is still appropriate because
|
|
||||||
// the case that needs to be detected is one in which a full collection
|
|
||||||
// has been done and has overflowed into the young generation. In that
|
|
||||||
// case a minor collection will fail (the overflow of the full collection
|
|
||||||
// means there is no space in the old generation for any promotion).
|
|
||||||
size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio);
|
|
||||||
|
|
||||||
return _next_gen->promotion_attempt_is_safe(worst_case_evacuation,
|
|
||||||
HandlePromotionFailure);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void DefNewGeneration::gc_epilogue(bool full) {
|
void DefNewGeneration::gc_epilogue(bool full) {
|
||||||
@ -845,14 +814,17 @@ void DefNewGeneration::gc_epilogue(bool full) {
|
|||||||
// a minimum at the end of a collection. If it is not, then
|
// a minimum at the end of a collection. If it is not, then
|
||||||
// the heap is approaching full.
|
// the heap is approaching full.
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
clear_should_allocate_from_space();
|
if (full) {
|
||||||
if (collection_attempt_is_safe()) {
|
assert(!GC_locker::is_active(), "We should not be executing here");
|
||||||
gch->clear_incremental_collection_will_fail();
|
if (!collection_attempt_is_safe()) {
|
||||||
} else {
|
gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
|
||||||
gch->set_incremental_collection_will_fail();
|
set_should_allocate_from_space(); // we seem to be running out of space
|
||||||
if (full) { // we seem to be running out of space
|
} else {
|
||||||
set_should_allocate_from_space();
|
gch->clear_incremental_collection_failed(); // We just did a full collection
|
||||||
|
clear_should_allocate_from_space(); // if set
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
assert(!gch->incremental_collection_failed(), "Error");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ZapUnusedHeapArea) {
|
if (ZapUnusedHeapArea) {
|
||||||
|
@ -82,12 +82,6 @@ protected:
|
|||||||
Stack<oop> _objs_with_preserved_marks;
|
Stack<oop> _objs_with_preserved_marks;
|
||||||
Stack<markOop> _preserved_marks_of_objs;
|
Stack<markOop> _preserved_marks_of_objs;
|
||||||
|
|
||||||
// Returns true if the collection can be safely attempted.
|
|
||||||
// If this method returns false, a collection is not
|
|
||||||
// guaranteed to fail but the system may not be able
|
|
||||||
// to recover from the failure.
|
|
||||||
bool collection_attempt_is_safe();
|
|
||||||
|
|
||||||
// Promotion failure handling
|
// Promotion failure handling
|
||||||
OopClosure *_promo_failure_scan_stack_closure;
|
OopClosure *_promo_failure_scan_stack_closure;
|
||||||
void set_promo_failure_scan_stack_closure(OopClosure *scan_stack_closure) {
|
void set_promo_failure_scan_stack_closure(OopClosure *scan_stack_closure) {
|
||||||
@ -304,6 +298,14 @@ protected:
|
|||||||
|
|
||||||
// GC support
|
// GC support
|
||||||
virtual void compute_new_size();
|
virtual void compute_new_size();
|
||||||
|
|
||||||
|
// Returns true if the collection is likely to be safely
|
||||||
|
// completed. Even if this method returns true, a collection
|
||||||
|
// may not be guaranteed to succeed, and the system should be
|
||||||
|
// able to safely unwind and recover from that failure, albeit
|
||||||
|
// at some additional cost. Override superclass's implementation.
|
||||||
|
virtual bool collection_attempt_is_safe();
|
||||||
|
|
||||||
virtual void collect(bool full,
|
virtual void collect(bool full,
|
||||||
bool clear_all_soft_refs,
|
bool clear_all_soft_refs,
|
||||||
size_t size,
|
size_t size,
|
||||||
|
@ -142,8 +142,7 @@ jint GenCollectedHeap::initialize() {
|
|||||||
}
|
}
|
||||||
_perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
|
_perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
|
||||||
|
|
||||||
clear_incremental_collection_will_fail();
|
clear_incremental_collection_failed();
|
||||||
clear_last_incremental_collection_failed();
|
|
||||||
|
|
||||||
#ifndef SERIALGC
|
#ifndef SERIALGC
|
||||||
// If we are running CMS, create the collector responsible
|
// If we are running CMS, create the collector responsible
|
||||||
@ -1347,17 +1346,6 @@ class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
|
|||||||
};
|
};
|
||||||
|
|
||||||
void GenCollectedHeap::gc_epilogue(bool full) {
|
void GenCollectedHeap::gc_epilogue(bool full) {
|
||||||
// Remember if a partial collection of the heap failed, and
|
|
||||||
// we did a complete collection.
|
|
||||||
if (full && incremental_collection_will_fail()) {
|
|
||||||
set_last_incremental_collection_failed();
|
|
||||||
} else {
|
|
||||||
clear_last_incremental_collection_failed();
|
|
||||||
}
|
|
||||||
// Clear the flag, if set; the generation gc_epilogues will set the
|
|
||||||
// flag again if the condition persists despite the collection.
|
|
||||||
clear_incremental_collection_will_fail();
|
|
||||||
|
|
||||||
#ifdef COMPILER2
|
#ifdef COMPILER2
|
||||||
assert(DerivedPointerTable::is_empty(), "derived pointer present");
|
assert(DerivedPointerTable::is_empty(), "derived pointer present");
|
||||||
size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
|
size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
|
||||||
|
@ -62,11 +62,10 @@ public:
|
|||||||
// The generational collector policy.
|
// The generational collector policy.
|
||||||
GenCollectorPolicy* _gen_policy;
|
GenCollectorPolicy* _gen_policy;
|
||||||
|
|
||||||
// If a generation would bail out of an incremental collection,
|
// Indicates that the most recent previous incremental collection failed.
|
||||||
// it sets this flag. If the flag is set, satisfy_failed_allocation
|
// The flag is cleared when an action is taken that might clear the
|
||||||
// will attempt allocating in all generations before doing a full GC.
|
// condition that caused that incremental collection to fail.
|
||||||
bool _incremental_collection_will_fail;
|
bool _incremental_collection_failed;
|
||||||
bool _last_incremental_collection_failed;
|
|
||||||
|
|
||||||
// In support of ExplicitGCInvokesConcurrent functionality
|
// In support of ExplicitGCInvokesConcurrent functionality
|
||||||
unsigned int _full_collections_completed;
|
unsigned int _full_collections_completed;
|
||||||
@ -469,26 +468,26 @@ public:
|
|||||||
// call to "save_marks".
|
// call to "save_marks".
|
||||||
bool no_allocs_since_save_marks(int level);
|
bool no_allocs_since_save_marks(int level);
|
||||||
|
|
||||||
// If a generation bails out of an incremental collection,
|
// Returns true if an incremental collection is likely to fail.
|
||||||
// it sets this flag.
|
|
||||||
bool incremental_collection_will_fail() {
|
bool incremental_collection_will_fail() {
|
||||||
return _incremental_collection_will_fail;
|
// Assumes a 2-generation system; the first disjunct remembers if an
|
||||||
}
|
// incremental collection failed, even when we thought (second disjunct)
|
||||||
void set_incremental_collection_will_fail() {
|
// that it would not.
|
||||||
_incremental_collection_will_fail = true;
|
assert(heap()->collector_policy()->is_two_generation_policy(),
|
||||||
}
|
"the following definition may not be suitable for an n(>2)-generation system");
|
||||||
void clear_incremental_collection_will_fail() {
|
return incremental_collection_failed() || !get_gen(0)->collection_attempt_is_safe();
|
||||||
_incremental_collection_will_fail = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool last_incremental_collection_failed() const {
|
// If a generation bails out of an incremental collection,
|
||||||
return _last_incremental_collection_failed;
|
// it sets this flag.
|
||||||
|
bool incremental_collection_failed() const {
|
||||||
|
return _incremental_collection_failed;
|
||||||
}
|
}
|
||||||
void set_last_incremental_collection_failed() {
|
void set_incremental_collection_failed() {
|
||||||
_last_incremental_collection_failed = true;
|
_incremental_collection_failed = true;
|
||||||
}
|
}
|
||||||
void clear_last_incremental_collection_failed() {
|
void clear_incremental_collection_failed() {
|
||||||
_last_incremental_collection_failed = false;
|
_incremental_collection_failed = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Promotion of obj into gen failed. Try to promote obj to higher non-perm
|
// Promotion of obj into gen failed. Try to promote obj to higher non-perm
|
||||||
|
@ -165,15 +165,16 @@ size_t Generation::max_contiguous_available() const {
|
|||||||
return max;
|
return max;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Generation::promotion_attempt_is_safe(size_t promotion_in_bytes,
|
bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
|
||||||
bool not_used) const {
|
size_t available = max_contiguous_available();
|
||||||
|
bool res = (available >= max_promotion_in_bytes);
|
||||||
if (PrintGC && Verbose) {
|
if (PrintGC && Verbose) {
|
||||||
gclog_or_tty->print_cr("Generation::promotion_attempt_is_safe"
|
gclog_or_tty->print_cr(
|
||||||
" contiguous_available: " SIZE_FORMAT
|
"Generation: promo attempt is%s safe: available("SIZE_FORMAT") %s max_promo("SIZE_FORMAT")",
|
||||||
" promotion_in_bytes: " SIZE_FORMAT,
|
res? "":" not", available, res? ">=":"<",
|
||||||
max_contiguous_available(), promotion_in_bytes);
|
max_promotion_in_bytes);
|
||||||
}
|
}
|
||||||
return max_contiguous_available() >= promotion_in_bytes;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ignores "ref" and calls allocate().
|
// Ignores "ref" and calls allocate().
|
||||||
|
@ -173,15 +173,11 @@ class Generation: public CHeapObj {
|
|||||||
// The largest number of contiguous free bytes in this or any higher generation.
|
// The largest number of contiguous free bytes in this or any higher generation.
|
||||||
virtual size_t max_contiguous_available() const;
|
virtual size_t max_contiguous_available() const;
|
||||||
|
|
||||||
// Returns true if promotions of the specified amount can
|
// Returns true if promotions of the specified amount are
|
||||||
// be attempted safely (without a vm failure).
|
// likely to succeed without a promotion failure.
|
||||||
// Promotion of the full amount is not guaranteed but
|
// Promotion of the full amount is not guaranteed but
|
||||||
// can be attempted.
|
// might be attempted in the worst case.
|
||||||
// younger_handles_promotion_failure
|
virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const;
|
||||||
// is true if the younger generation handles a promotion
|
|
||||||
// failure.
|
|
||||||
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
|
|
||||||
bool younger_handles_promotion_failure) const;
|
|
||||||
|
|
||||||
// For a non-young generation, this interface can be used to inform a
|
// For a non-young generation, this interface can be used to inform a
|
||||||
// generation that a promotion attempt into that generation failed.
|
// generation that a promotion attempt into that generation failed.
|
||||||
@ -358,6 +354,16 @@ class Generation: public CHeapObj {
|
|||||||
return (full || should_allocate(word_size, is_tlab));
|
return (full || should_allocate(word_size, is_tlab));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns true if the collection is likely to be safely
|
||||||
|
// completed. Even if this method returns true, a collection
|
||||||
|
// may not be guaranteed to succeed, and the system should be
|
||||||
|
// able to safely unwind and recover from that failure, albeit
|
||||||
|
// at some additional cost.
|
||||||
|
virtual bool collection_attempt_is_safe() {
|
||||||
|
guarantee(false, "Are you sure you want to call this method?");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
// Perform a garbage collection.
|
// Perform a garbage collection.
|
||||||
// If full is true attempt a full garbage collection of this generation.
|
// If full is true attempt a full garbage collection of this generation.
|
||||||
// Otherwise, attempting to (at least) free enough space to support an
|
// Otherwise, attempting to (at least) free enough space to support an
|
||||||
|
@ -419,29 +419,16 @@ void TenuredGeneration::retire_alloc_buffers_before_full_gc() {}
|
|||||||
void TenuredGeneration::verify_alloc_buffers_clean() {}
|
void TenuredGeneration::verify_alloc_buffers_clean() {}
|
||||||
#endif // SERIALGC
|
#endif // SERIALGC
|
||||||
|
|
||||||
bool TenuredGeneration::promotion_attempt_is_safe(
|
bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
|
||||||
size_t max_promotion_in_bytes,
|
size_t available = max_contiguous_available();
|
||||||
bool younger_handles_promotion_failure) const {
|
size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
|
||||||
|
bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
|
||||||
bool result = max_contiguous_available() >= max_promotion_in_bytes;
|
if (PrintGC && Verbose) {
|
||||||
|
gclog_or_tty->print_cr(
|
||||||
if (younger_handles_promotion_failure && !result) {
|
"Tenured: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
|
||||||
result = max_contiguous_available() >=
|
"max_promo("SIZE_FORMAT")",
|
||||||
(size_t) gc_stats()->avg_promoted()->padded_average();
|
res? "":" not", available, res? ">=":"<",
|
||||||
if (PrintGC && Verbose && result) {
|
av_promo, max_promotion_in_bytes);
|
||||||
gclog_or_tty->print_cr("TenuredGeneration::promotion_attempt_is_safe"
|
|
||||||
" contiguous_available: " SIZE_FORMAT
|
|
||||||
" avg_promoted: " SIZE_FORMAT,
|
|
||||||
max_contiguous_available(),
|
|
||||||
gc_stats()->avg_promoted()->padded_average());
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (PrintGC && Verbose) {
|
|
||||||
gclog_or_tty->print_cr("TenuredGeneration::promotion_attempt_is_safe"
|
|
||||||
" contiguous_available: " SIZE_FORMAT
|
|
||||||
" promotion_in_bytes: " SIZE_FORMAT,
|
|
||||||
max_contiguous_available(), max_promotion_in_bytes);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return result;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -101,8 +101,7 @@ class TenuredGeneration: public OneContigSpaceCardGeneration {
|
|||||||
|
|
||||||
virtual void update_gc_stats(int level, bool full);
|
virtual void update_gc_stats(int level, bool full);
|
||||||
|
|
||||||
virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes,
|
virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes) const;
|
||||||
bool younger_handles_promotion_failure) const;
|
|
||||||
|
|
||||||
void verify_alloc_buffers_clean();
|
void verify_alloc_buffers_clean();
|
||||||
};
|
};
|
||||||
|
@ -185,6 +185,10 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
|
|||||||
JDK_Version::jdk_update(6,18), JDK_Version::jdk(7) },
|
JDK_Version::jdk_update(6,18), JDK_Version::jdk(7) },
|
||||||
{ "UseDepthFirstScavengeOrder",
|
{ "UseDepthFirstScavengeOrder",
|
||||||
JDK_Version::jdk_update(6,22), JDK_Version::jdk(7) },
|
JDK_Version::jdk_update(6,22), JDK_Version::jdk(7) },
|
||||||
|
{ "HandlePromotionFailure",
|
||||||
|
JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) },
|
||||||
|
{ "MaxLiveObjectEvacuationRatio",
|
||||||
|
JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) },
|
||||||
{ NULL, JDK_Version(0), JDK_Version(0) }
|
{ NULL, JDK_Version(0), JDK_Version(0) }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1722,8 +1726,6 @@ bool Arguments::check_vm_args_consistency() {
|
|||||||
status = false;
|
status = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
status = status && verify_percentage(MaxLiveObjectEvacuationRatio,
|
|
||||||
"MaxLiveObjectEvacuationRatio");
|
|
||||||
status = status && verify_percentage(AdaptiveSizePolicyWeight,
|
status = status && verify_percentage(AdaptiveSizePolicyWeight,
|
||||||
"AdaptiveSizePolicyWeight");
|
"AdaptiveSizePolicyWeight");
|
||||||
status = status && verify_percentage(AdaptivePermSizeWeight, "AdaptivePermSizeWeight");
|
status = status && verify_percentage(AdaptivePermSizeWeight, "AdaptivePermSizeWeight");
|
||||||
|
@ -1786,10 +1786,6 @@ class CommandLineFlags {
|
|||||||
notproduct(bool, GCALotAtAllSafepoints, false, \
|
notproduct(bool, GCALotAtAllSafepoints, false, \
|
||||||
"Enforce ScavengeALot/GCALot at all potential safepoints") \
|
"Enforce ScavengeALot/GCALot at all potential safepoints") \
|
||||||
\
|
\
|
||||||
product(bool, HandlePromotionFailure, true, \
|
|
||||||
"The youngest generation collection does not require " \
|
|
||||||
"a guarantee of full promotion of all live objects.") \
|
|
||||||
\
|
|
||||||
product(bool, PrintPromotionFailure, false, \
|
product(bool, PrintPromotionFailure, false, \
|
||||||
"Print additional diagnostic information following " \
|
"Print additional diagnostic information following " \
|
||||||
" promotion failure") \
|
" promotion failure") \
|
||||||
@ -3003,9 +2999,6 @@ class CommandLineFlags {
|
|||||||
product(intx, NewRatio, 2, \
|
product(intx, NewRatio, 2, \
|
||||||
"Ratio of new/old generation sizes") \
|
"Ratio of new/old generation sizes") \
|
||||||
\
|
\
|
||||||
product(uintx, MaxLiveObjectEvacuationRatio, 100, \
|
|
||||||
"Max percent of eden objects that will be live at scavenge") \
|
|
||||||
\
|
|
||||||
product_pd(uintx, NewSizeThreadIncrease, \
|
product_pd(uintx, NewSizeThreadIncrease, \
|
||||||
"Additional size added to desired new generation size per " \
|
"Additional size added to desired new generation size per " \
|
||||||
"non-daemon thread (in bytes)") \
|
"non-daemon thread (in bytes)") \
|
||||||
|
Loading…
x
Reference in New Issue
Block a user