4957990: Perm heap bloat in JVM

Treat ProfileData in MDO's as a source of weak, not strong, roots. Fixes the bug for stop-world collection -- the case of concurrent collection will be fixed separately.

Reviewed-by: jcoomes, jmasa, kvn, never
This commit is contained in:
Y. Srinivas Ramakrishna 2009-09-02 00:04:29 -07:00
parent 2491751525
commit c6763b5bad
27 changed files with 385 additions and 65 deletions

View File

@ -1079,6 +1079,10 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
this, (address)_method, (address)cause); this, (address)_method, (address)cause);
cause->klass()->print(); cause->klass()->print();
} }
// Unlink the osr method, so we do not look this up again
if (is_osr_method()) {
invalidate_osr_method();
}
// If _method is already NULL the methodOop is about to be unloaded, // If _method is already NULL the methodOop is about to be unloaded,
// so we don't have to break the cycle. Note that it is possible to // so we don't have to break the cycle. Note that it is possible to
// have the methodOop live here, in case we unload the nmethod because // have the methodOop live here, in case we unload the nmethod because
@ -1148,7 +1152,7 @@ void nmethod::make_not_entrant_or_zombie(int state) {
// will never be used anymore. That the nmethods only gets removed when class unloading // will never be used anymore. That the nmethods only gets removed when class unloading
// happens, make life much simpler, since the nmethods are not just going to disappear // happens, make life much simpler, since the nmethods are not just going to disappear
// out of the blue. // out of the blue.
if (is_osr_only_method()) { if (is_osr_method()) {
if (osr_entry_bci() != InvalidOSREntryBci) { if (osr_entry_bci() != InvalidOSREntryBci) {
// only log this once // only log this once
log_state_change(state); log_state_change(state);
@ -1520,6 +1524,17 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive,
#endif // !PRODUCT #endif // !PRODUCT
} }
// This method is called twice during GC -- once while
// tracing the "active" nmethods on thread stacks during
// the (strong) marking phase, and then again when walking
// the code cache contents during the weak roots processing
// phase. The two uses are distinguished by means of the
// do_nmethods() method in the closure "f" below -- which
// answers "yes" in the first case, and "no" in the second
// case. We want to walk the weak roots in the nmethod
// only in the second case. The weak roots in the nmethod
// are the oops in the ExceptionCache and the InlineCache
// oops.
void nmethod::oops_do(OopClosure* f) { void nmethod::oops_do(OopClosure* f) {
// make sure the oops ready to receive visitors // make sure the oops ready to receive visitors
assert(!is_zombie() && !is_unloaded(), assert(!is_zombie() && !is_unloaded(),
@ -1538,19 +1553,25 @@ void nmethod::oops_do(OopClosure* f) {
// Compiled code // Compiled code
f->do_oop((oop*) &_method); f->do_oop((oop*) &_method);
ExceptionCache* ec = exception_cache(); if (!f->do_nmethods()) {
while(ec != NULL) { // weak roots processing phase -- update ExceptionCache oops
f->do_oop((oop*)ec->exception_type_addr()); ExceptionCache* ec = exception_cache();
ec = ec->next(); while(ec != NULL) {
} f->do_oop((oop*)ec->exception_type_addr());
ec = ec->next();
}
} // Else strong roots phase -- skip oops in ExceptionCache
RelocIterator iter(this, low_boundary); RelocIterator iter(this, low_boundary);
while (iter.next()) { while (iter.next()) {
if (iter.type() == relocInfo::oop_type ) { if (iter.type() == relocInfo::oop_type ) {
oop_Relocation* r = iter.oop_reloc(); oop_Relocation* r = iter.oop_reloc();
// In this loop, we must only follow those oops directly embedded in // In this loop, we must only follow those oops directly embedded in
// the code. Other oops (oop_index>0) are seen as part of scopes_oops. // the code. Other oops (oop_index>0) are seen as part of scopes_oops.
assert(1 == (r->oop_is_immediate()) + (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), "oop must be found in exactly one place"); assert(1 == (r->oop_is_immediate()) +
(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
"oop must be found in exactly one place");
if (r->oop_is_immediate() && r->oop_value() != NULL) { if (r->oop_is_immediate() && r->oop_value() != NULL) {
f->do_oop(r->oop_addr()); f->do_oop(r->oop_addr());
} }

View File

@ -314,7 +314,6 @@ class nmethod : public CodeBlob {
bool is_java_method() const { return !method()->is_native(); } bool is_java_method() const { return !method()->is_native(); }
bool is_native_method() const { return method()->is_native(); } bool is_native_method() const { return method()->is_native(); }
bool is_osr_method() const { return _entry_bci != InvocationEntryBci; } bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
bool is_osr_only_method() const { return is_osr_method(); }
bool is_compiled_by_c1() const; bool is_compiled_by_c1() const;
bool is_compiled_by_c2() const; bool is_compiled_by_c2() const;

View File

@ -155,6 +155,12 @@ class PushAndMarkClosure: public KlassRememberingOopClosure {
Prefetch::style prefetch_style() { Prefetch::style prefetch_style() {
return Prefetch::do_read; return Prefetch::do_read;
} }
// In support of class unloading
virtual const bool should_remember_mdo() const {
return false;
// return _should_remember_klasses;
}
virtual void remember_mdo(DataLayout* v);
}; };
// In the parallel case, the revisit stack, the bit map and the // In the parallel case, the revisit stack, the bit map and the
@ -185,6 +191,12 @@ class Par_PushAndMarkClosure: public Par_KlassRememberingOopClosure {
Prefetch::style prefetch_style() { Prefetch::style prefetch_style() {
return Prefetch::do_read; return Prefetch::do_read;
} }
// In support of class unloading
virtual const bool should_remember_mdo() const {
return false;
// return _should_remember_klasses;
}
virtual void remember_mdo(DataLayout* v);
}; };
// The non-parallel version (the parallel version appears further below). // The non-parallel version (the parallel version appears further below).
@ -303,6 +315,13 @@ class PushOrMarkClosure: public KlassRememberingOopClosure {
virtual void do_oop(narrowOop* p); virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); } inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
// In support of class unloading
virtual const bool should_remember_mdo() const {
return false;
// return _should_remember_klasses;
}
virtual void remember_mdo(DataLayout* v);
// Deal with a stack overflow condition // Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost); void handle_stack_overflow(HeapWord* lost);
private: private:
@ -340,6 +359,13 @@ class Par_PushOrMarkClosure: public Par_KlassRememberingOopClosure {
virtual void do_oop(narrowOop* p); virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
// In support of class unloading
virtual const bool should_remember_mdo() const {
return false;
// return _should_remember_klasses;
}
virtual void remember_mdo(DataLayout* v);
// Deal with a stack overflow condition // Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost); void handle_stack_overflow(HeapWord* lost);
private: private:

View File

@ -51,13 +51,22 @@ void KlassRememberingOopClosure::remember_klass(Klass* k) {
check_remember_klasses(); check_remember_klasses();
} }
inline void PushOrMarkClosure::remember_mdo(DataLayout* v) {
// TBD
}
void Par_KlassRememberingOopClosure::remember_klass(Klass* k) { void Par_KlassRememberingOopClosure::remember_klass(Klass* k) {
if (!_revisit_stack->par_push(oop(k))) { if (!_revisit_stack->par_push(oop(k))) {
fatal("Revisit stack overflow in PushOrMarkClosure"); fatal("Revisit stack overflow in Par_KlassRememberingOopClosure");
} }
check_remember_klasses(); check_remember_klasses();
} }
inline void Par_PushOrMarkClosure::remember_mdo(DataLayout* v) {
// TBD
}
inline void PushOrMarkClosure::do_yield_check() { inline void PushOrMarkClosure::do_yield_check() {
_parent->do_yield_check(); _parent->do_yield_check();
} }

View File

@ -7632,6 +7632,14 @@ void Par_PushAndMarkClosure::do_oop(oop obj) {
void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
void PushAndMarkClosure::remember_mdo(DataLayout* v) {
// TBD
}
void Par_PushAndMarkClosure::remember_mdo(DataLayout* v) {
// TBD
}
void CMSPrecleanRefsYieldClosure::do_yield_work() { void CMSPrecleanRefsYieldClosure::do_yield_work() {
DEBUG_ONLY(RememberKlassesChecker mux(false);) DEBUG_ONLY(RememberKlassesChecker mux(false);)
Mutex* bml = _collector->bitMapLock(); Mutex* bml = _collector->bitMapLock();

View File

@ -5064,7 +5064,7 @@ bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
return hr->is_in(p); return hr->is_in(p);
} }
} }
#endif // PRODUCT #endif // !PRODUCT
void G1CollectedHeap::g1_unimplemented() { void G1CollectedHeap::g1_unimplemented() {
// Unimplemented(); // Unimplemented();

View File

@ -859,7 +859,7 @@ public:
return _g1_committed; return _g1_committed;
} }
NOT_PRODUCT( bool is_in_closed_subset(const void* p) const; ) NOT_PRODUCT(bool is_in_closed_subset(const void* p) const;)
// Dirty card table entries covering a list of young regions. // Dirty card table entries covering a list of young regions.
void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list); void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list);

View File

@ -102,9 +102,14 @@ void G1MarkSweep::allocate_stacks() {
GenMarkSweep::_marking_stack = GenMarkSweep::_marking_stack =
new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true); new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
size_t size = SystemDictionary::number_of_classes() * 2; int size = SystemDictionary::number_of_classes() * 2;
GenMarkSweep::_revisit_klass_stack = GenMarkSweep::_revisit_klass_stack =
new (ResourceObj::C_HEAP) GrowableArray<Klass*>((int)size, true); new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
// (#klass/k)^2 for k ~ 10 appears a better fit, but this will have to do
// for now until we have a chance to work out a more optimal setting.
GenMarkSweep::_revisit_mdo_stack =
new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
} }
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
@ -139,13 +144,18 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
CodeCache::do_unloading(&GenMarkSweep::is_alive, CodeCache::do_unloading(&GenMarkSweep::is_alive,
&GenMarkSweep::keep_alive, &GenMarkSweep::keep_alive,
purged_class); purged_class);
GenMarkSweep::follow_stack(); GenMarkSweep::follow_stack();
// Update subklass/sibling/implementor links of live klasses // Update subklass/sibling/implementor links of live klasses
GenMarkSweep::follow_weak_klass_links(); GenMarkSweep::follow_weak_klass_links();
assert(GenMarkSweep::_marking_stack->is_empty(), assert(GenMarkSweep::_marking_stack->is_empty(),
"stack should be empty by now"); "stack should be empty by now");
// Visit memoized MDO's and clear any unmarked weak refs
GenMarkSweep::follow_mdo_weak_refs();
assert(GenMarkSweep::_marking_stack->is_empty(), "just drained");
// Visit symbol and interned string tables and delete unmarked oops // Visit symbol and interned string tables and delete unmarked oops
SymbolTable::unlink(&GenMarkSweep::is_alive); SymbolTable::unlink(&GenMarkSweep::is_alive);
StringTable::unlink(&GenMarkSweep::is_alive); StringTable::unlink(&GenMarkSweep::is_alive);

View File

@ -253,10 +253,11 @@ psParallelCompact.cpp gcCause.hpp
psParallelCompact.cpp gcLocker.inline.hpp psParallelCompact.cpp gcLocker.inline.hpp
psParallelCompact.cpp gcTaskManager.hpp psParallelCompact.cpp gcTaskManager.hpp
psParallelCompact.cpp isGCActiveMark.hpp psParallelCompact.cpp isGCActiveMark.hpp
psParallelCompact.cpp management.hpp
psParallelCompact.cpp memoryService.hpp
psParallelCompact.cpp methodDataOop.hpp
psParallelCompact.cpp oop.inline.hpp psParallelCompact.cpp oop.inline.hpp
psParallelCompact.cpp oop.pcgc.inline.hpp psParallelCompact.cpp oop.pcgc.inline.hpp
psParallelCompact.cpp memoryService.hpp
psParallelCompact.cpp management.hpp
psParallelCompact.cpp parallelScavengeHeap.inline.hpp psParallelCompact.cpp parallelScavengeHeap.inline.hpp
psParallelCompact.cpp pcTasks.hpp psParallelCompact.cpp pcTasks.hpp
psParallelCompact.cpp psMarkSweep.hpp psParallelCompact.cpp psMarkSweep.hpp

View File

@ -58,9 +58,8 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);
// cm->allocate_stacks();
assert(cm->stacks_have_been_allocated(), assert(cm->stacks_have_been_allocated(),
"Stack space has not been allocated"); "Stack space has not been allocated");
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
switch (_root_type) { switch (_root_type) {
@ -129,9 +128,8 @@ void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);
// cm->allocate_stacks();
assert(cm->stacks_have_been_allocated(), assert(cm->stacks_have_been_allocated(),
"Stack space has not been allocated"); "Stack space has not been allocated");
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
PSParallelCompact::FollowStackClosure follow_stack_closure(cm); PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
_rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(), _rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(),

View File

@ -61,12 +61,16 @@ ParCompactionManager::ParCompactionManager() :
int size = int size =
(SystemDictionary::number_of_classes() * 2) * 2 / ParallelGCThreads; (SystemDictionary::number_of_classes() * 2) * 2 / ParallelGCThreads;
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true); _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
// From some experiments (#klass/k)^2 for k = 10 seems a better fit, but this will
// have to do for now until we are able to investigate a more optimal setting.
_revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
} }
ParCompactionManager::~ParCompactionManager() { ParCompactionManager::~ParCompactionManager() {
delete _overflow_stack; delete _overflow_stack;
delete _revisit_klass_stack; delete _revisit_klass_stack;
delete _revisit_mdo_stack;
// _manager_array and _stack_array are statics // _manager_array and _stack_array are statics
// shared with all instances of ParCompactionManager // shared with all instances of ParCompactionManager
// should not be deallocated. // should not be deallocated.
@ -195,6 +199,7 @@ ParCompactionManager::gc_thread_compaction_manager(int index) {
void ParCompactionManager::reset() { void ParCompactionManager::reset() {
for(uint i=0; i<ParallelGCThreads+1; i++) { for(uint i=0; i<ParallelGCThreads+1; i++) {
manager_array(i)->revisit_klass_stack()->clear(); manager_array(i)->revisit_klass_stack()->clear();
manager_array(i)->revisit_mdo_stack()->clear();
} }
} }
@ -296,6 +301,7 @@ void ParCompactionManager::drain_region_stacks() {
#ifdef ASSERT #ifdef ASSERT
bool ParCompactionManager::stacks_have_been_allocated() { bool ParCompactionManager::stacks_have_been_allocated() {
return (revisit_klass_stack()->data_addr() != NULL); return (revisit_klass_stack()->data_addr() != NULL &&
revisit_mdo_stack()->data_addr() != NULL);
} }
#endif #endif

View File

@ -93,6 +93,7 @@ class ParCompactionManager : public CHeapObj {
#if 1 // does this happen enough to need a per thread stack? #if 1 // does this happen enough to need a per thread stack?
GrowableArray<Klass*>* _revisit_klass_stack; GrowableArray<Klass*>* _revisit_klass_stack;
GrowableArray<DataLayout*>* _revisit_mdo_stack;
#endif #endif
static ParMarkBitMap* _mark_bitmap; static ParMarkBitMap* _mark_bitmap;
@ -154,6 +155,7 @@ class ParCompactionManager : public CHeapObj {
#if 1 #if 1
// Probably stays as a growable array // Probably stays as a growable array
GrowableArray<Klass*>* revisit_klass_stack() { return _revisit_klass_stack; } GrowableArray<Klass*>* revisit_klass_stack() { return _revisit_klass_stack; }
GrowableArray<DataLayout*>* revisit_mdo_stack() { return _revisit_mdo_stack; }
#endif #endif
// Save oop for later processing. Must not fail. // Save oop for later processing. Must not fail.

View File

@ -482,6 +482,9 @@ void PSMarkSweep::allocate_stacks() {
int size = SystemDictionary::number_of_classes() * 2; int size = SystemDictionary::number_of_classes() * 2;
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true); _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
// (#klass/k)^2, for k ~ 10 appears a better setting, but this will have to do for
// now until we investigate a more optimal setting.
_revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
} }
@ -495,6 +498,7 @@ void PSMarkSweep::deallocate_stacks() {
delete _marking_stack; delete _marking_stack;
delete _revisit_klass_stack; delete _revisit_klass_stack;
delete _revisit_mdo_stack;
} }
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
@ -540,6 +544,10 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
follow_weak_klass_links(); follow_weak_klass_links();
assert(_marking_stack->is_empty(), "just drained"); assert(_marking_stack->is_empty(), "just drained");
// Visit memoized mdo's and clear unmarked weak refs
follow_mdo_weak_refs();
assert(_marking_stack->is_empty(), "just drained");
// Visit symbol and interned string tables and delete unmarked oops // Visit symbol and interned string tables and delete unmarked oops
SymbolTable::unlink(is_alive_closure()); SymbolTable::unlink(is_alive_closure());
StringTable::unlink(is_alive_closure()); StringTable::unlink(is_alive_closure());

View File

@ -2378,7 +2378,10 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
// Update subklass/sibling/implementor links of live klasses // Update subklass/sibling/implementor links of live klasses
// revisit_klass_stack is used in follow_weak_klass_links(). // revisit_klass_stack is used in follow_weak_klass_links().
follow_weak_klass_links(cm); follow_weak_klass_links();
// Revisit memoized MDO's and clear any unmarked weak refs
follow_mdo_weak_refs();
// Visit symbol and interned string tables and delete unmarked oops // Visit symbol and interned string tables and delete unmarked oops
SymbolTable::unlink(is_alive_closure()); SymbolTable::unlink(is_alive_closure());
@ -2721,17 +2724,25 @@ void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
} }
void void
PSParallelCompact::follow_weak_klass_links(ParCompactionManager* serial_cm) { PSParallelCompact::follow_weak_klass_links() {
// All klasses on the revisit stack are marked at this point. // All klasses on the revisit stack are marked at this point.
// Update and follow all subklass, sibling and implementor links. // Update and follow all subklass, sibling and implementor links.
for (uint i = 0; i < ParallelGCThreads+1; i++) { if (PrintRevisitStats) {
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
}
for (uint i = 0; i < ParallelGCThreads + 1; i++) {
ParCompactionManager* cm = ParCompactionManager::manager_array(i); ParCompactionManager* cm = ParCompactionManager::manager_array(i);
KeepAliveClosure keep_alive_closure(cm); KeepAliveClosure keep_alive_closure(cm);
for (int i = 0; i < cm->revisit_klass_stack()->length(); i++) { int length = cm->revisit_klass_stack()->length();
cm->revisit_klass_stack()->at(i)->follow_weak_klass_links( if (PrintRevisitStats) {
gclog_or_tty->print_cr("Revisit klass stack[%d] length = %d", i, length);
}
for (int j = 0; j < length; j++) {
cm->revisit_klass_stack()->at(j)->follow_weak_klass_links(
is_alive_closure(), is_alive_closure(),
&keep_alive_closure); &keep_alive_closure);
} }
// revisit_klass_stack is cleared in reset()
follow_stack(cm); follow_stack(cm);
} }
} }
@ -2741,6 +2752,35 @@ PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) {
cm->revisit_klass_stack()->push(k); cm->revisit_klass_stack()->push(k);
} }
#if ( defined(COMPILER1) || defined(COMPILER2) )
void PSParallelCompact::revisit_mdo(ParCompactionManager* cm, DataLayout* p) {
cm->revisit_mdo_stack()->push(p);
}
void PSParallelCompact::follow_mdo_weak_refs() {
// All strongly reachable oops have been marked at this point;
// we can visit and clear any weak references from MDO's which
// we memoized during the strong marking phase.
if (PrintRevisitStats) {
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
}
for (uint i = 0; i < ParallelGCThreads + 1; i++) {
ParCompactionManager* cm = ParCompactionManager::manager_array(i);
GrowableArray<DataLayout*>* rms = cm->revisit_mdo_stack();
int length = rms->length();
if (PrintRevisitStats) {
gclog_or_tty->print_cr("Revisit MDO stack[%d] length = %d", i, length);
}
for (int j = 0; j < length; j++) {
rms->at(j)->follow_weak_refs(is_alive_closure());
}
// revisit_mdo_stack is cleared in reset()
follow_stack(cm);
}
}
#endif // ( COMPILER1 || COMPILER2 )
#ifdef VALIDATE_MARK_SWEEP #ifdef VALIDATE_MARK_SWEEP
void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) { void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) {

View File

@ -901,7 +901,8 @@ class PSParallelCompact : AllStatic {
static void marking_phase(ParCompactionManager* cm, static void marking_phase(ParCompactionManager* cm,
bool maximum_heap_compaction); bool maximum_heap_compaction);
static void follow_stack(ParCompactionManager* cm); static void follow_stack(ParCompactionManager* cm);
static void follow_weak_klass_links(ParCompactionManager* cm); static void follow_weak_klass_links();
static void follow_mdo_weak_refs();
template <class T> static inline void adjust_pointer(T* p, bool is_root); template <class T> static inline void adjust_pointer(T* p, bool is_root);
static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); } static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
@ -1221,6 +1222,9 @@ class PSParallelCompact : AllStatic {
// Update subklass/sibling/implementor links at end of marking. // Update subklass/sibling/implementor links at end of marking.
static void revisit_weak_klass_link(ParCompactionManager* cm, Klass* k); static void revisit_weak_klass_link(ParCompactionManager* cm, Klass* k);
// Clear unmarked oops in MDOs at the end of marking.
static void revisit_mdo(ParCompactionManager* cm, DataLayout* p);
#ifndef PRODUCT #ifndef PRODUCT
// Debugging support. // Debugging support.
static const char* space_names[last_space_id]; static const char* space_names[last_space_id];

View File

@ -27,6 +27,7 @@
GrowableArray<oop>* MarkSweep::_marking_stack = NULL; GrowableArray<oop>* MarkSweep::_marking_stack = NULL;
GrowableArray<Klass*>* MarkSweep::_revisit_klass_stack = NULL; GrowableArray<Klass*>* MarkSweep::_revisit_klass_stack = NULL;
GrowableArray<DataLayout*>* MarkSweep::_revisit_mdo_stack = NULL;
GrowableArray<oop>* MarkSweep::_preserved_oop_stack = NULL; GrowableArray<oop>* MarkSweep::_preserved_oop_stack = NULL;
GrowableArray<markOop>* MarkSweep::_preserved_mark_stack= NULL; GrowableArray<markOop>* MarkSweep::_preserved_mark_stack= NULL;
@ -62,12 +63,37 @@ void MarkSweep::revisit_weak_klass_link(Klass* k) {
void MarkSweep::follow_weak_klass_links() { void MarkSweep::follow_weak_klass_links() {
// All klasses on the revisit stack are marked at this point. // All klasses on the revisit stack are marked at this point.
// Update and follow all subklass, sibling and implementor links. // Update and follow all subklass, sibling and implementor links.
if (PrintRevisitStats) {
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
gclog_or_tty->print_cr("Revisit klass stack length = %d", _revisit_klass_stack->length());
}
for (int i = 0; i < _revisit_klass_stack->length(); i++) { for (int i = 0; i < _revisit_klass_stack->length(); i++) {
_revisit_klass_stack->at(i)->follow_weak_klass_links(&is_alive,&keep_alive); _revisit_klass_stack->at(i)->follow_weak_klass_links(&is_alive,&keep_alive);
} }
follow_stack(); follow_stack();
} }
#if ( defined(COMPILER1) || defined(COMPILER2) )
void MarkSweep::revisit_mdo(DataLayout* p) {
_revisit_mdo_stack->push(p);
}
void MarkSweep::follow_mdo_weak_refs() {
// All strongly reachable oops have been marked at this point;
// we can visit and clear any weak references from MDO's which
// we memoized during the strong marking phase.
assert(_marking_stack->is_empty(), "Marking stack should be empty");
if (PrintRevisitStats) {
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
gclog_or_tty->print_cr("Revisit MDO stack length = %d", _revisit_mdo_stack->length());
}
for (int i = 0; i < _revisit_mdo_stack->length(); i++) {
_revisit_mdo_stack->at(i)->follow_weak_refs(&is_alive);
}
follow_stack();
}
#endif // ( COMPILER1 || COMPILER2 )
MarkSweep::FollowRootClosure MarkSweep::follow_root_closure; MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;
void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); } void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); }

View File

@ -23,6 +23,7 @@
*/ */
class ReferenceProcessor; class ReferenceProcessor;
class DataLayout;
// MarkSweep takes care of global mark-compact garbage collection for a // MarkSweep takes care of global mark-compact garbage collection for a
// GenCollectedHeap using a four-phase pointer forwarding algorithm. All // GenCollectedHeap using a four-phase pointer forwarding algorithm. All
@ -65,6 +66,8 @@ class MarkSweep : AllStatic {
virtual void do_oop(oop* p); virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p); virtual void do_oop(narrowOop* p);
virtual const bool do_nmethods() const { return true; } virtual const bool do_nmethods() const { return true; }
virtual const bool should_remember_mdo() const { return true; }
virtual void remember_mdo(DataLayout* p) { MarkSweep::revisit_mdo(p); }
}; };
class FollowStackClosure: public VoidClosure { class FollowStackClosure: public VoidClosure {
@ -103,6 +106,7 @@ class MarkSweep : AllStatic {
friend class KeepAliveClosure; friend class KeepAliveClosure;
friend class VM_MarkSweep; friend class VM_MarkSweep;
friend void marksweep_init(); friend void marksweep_init();
friend class DataLayout;
// //
// Vars // Vars
@ -112,6 +116,8 @@ class MarkSweep : AllStatic {
static GrowableArray<oop>* _marking_stack; static GrowableArray<oop>* _marking_stack;
// Stack for live klasses to revisit at end of marking phase // Stack for live klasses to revisit at end of marking phase
static GrowableArray<Klass*>* _revisit_klass_stack; static GrowableArray<Klass*>* _revisit_klass_stack;
// Set (stack) of MDO's to revisit at end of marking phase
static GrowableArray<DataLayout*>* _revisit_mdo_stack;
// Space for storing/restoring mark word // Space for storing/restoring mark word
static GrowableArray<markOop>* _preserved_mark_stack; static GrowableArray<markOop>* _preserved_mark_stack;
@ -157,6 +163,10 @@ class MarkSweep : AllStatic {
// Class unloading. Update subklass/sibling/implementor links at end of marking phase. // Class unloading. Update subklass/sibling/implementor links at end of marking phase.
static void follow_weak_klass_links(); static void follow_weak_klass_links();
// Class unloading. Clear weak refs in MDO's (ProfileData)
// at the end of the marking phase.
static void follow_mdo_weak_refs();
// Debugging // Debugging
static void trace(const char* msg) PRODUCT_RETURN; static void trace(const char* msg) PRODUCT_RETURN;
@ -213,7 +223,10 @@ class MarkSweep : AllStatic {
#endif #endif
// Call backs for class unloading // Call backs for class unloading
static void revisit_weak_klass_link(Klass* k); // Update subklass/sibling/implementor links at end of marking. // Update subklass/sibling/implementor links at end of marking.
static void revisit_weak_klass_link(Klass* k);
// For weak refs clearing in MDO's
static void revisit_mdo(DataLayout* p);
}; };
class PreservedMark VALUE_OBJ_CLASS_SPEC { class PreservedMark VALUE_OBJ_CLASS_SPEC {

View File

@ -239,6 +239,9 @@ class CollectedHeap : public CHeapObj {
return p == NULL || is_in_closed_subset(p); return p == NULL || is_in_closed_subset(p);
} }
// XXX is_permanent() and is_in_permanent() should be better named
// to distinguish one from the other.
// Returns "TRUE" if "p" is allocated as "permanent" data. // Returns "TRUE" if "p" is allocated as "permanent" data.
// If the heap does not use "permanent" data, returns the same // If the heap does not use "permanent" data, returns the same
// value is_in_reserved() would return. // value is_in_reserved() would return.
@ -247,13 +250,17 @@ class CollectedHeap : public CHeapObj {
// space). If you need the more conservative answer use is_permanent(). // space). If you need the more conservative answer use is_permanent().
virtual bool is_in_permanent(const void *p) const = 0; virtual bool is_in_permanent(const void *p) const = 0;
bool is_in_permanent_or_null(const void *p) const {
return p == NULL || is_in_permanent(p);
}
// Returns "TRUE" if "p" is in the committed area of "permanent" data. // Returns "TRUE" if "p" is in the committed area of "permanent" data.
// If the heap does not use "permanent" data, returns the same // If the heap does not use "permanent" data, returns the same
// value is_in() would return. // value is_in() would return.
virtual bool is_permanent(const void *p) const = 0; virtual bool is_permanent(const void *p) const = 0;
bool is_in_permanent_or_null(const void *p) const { bool is_permanent_or_null(const void *p) const {
return p == NULL || is_in_permanent(p); return p == NULL || is_permanent(p);
} }
// Returns "TRUE" if "p" is a method oop in the // Returns "TRUE" if "p" is a method oop in the

View File

@ -2684,6 +2684,7 @@ markOop.inline.hpp klassOop.hpp
markOop.inline.hpp markOop.hpp markOop.inline.hpp markOop.hpp
markSweep.cpp compileBroker.hpp markSweep.cpp compileBroker.hpp
markSweep.cpp methodDataOop.hpp
markSweep.hpp collectedHeap.hpp markSweep.hpp collectedHeap.hpp

View File

@ -849,8 +849,25 @@ static void trace_osr_request(methodHandle method, nmethod* osr, int bci) {
} }
#endif // !PRODUCT #endif // !PRODUCT
nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp) {
nmethod* nm = frequency_counter_overflow_inner(thread, branch_bcp);
assert(branch_bcp != NULL || nm == NULL, "always returns null for non OSR requests");
if (branch_bcp != NULL && nm != NULL) {
// This was a successful request for an OSR nmethod. Because
// frequency_counter_overflow_inner ends with a safepoint check,
// nm could have been unloaded so look it up again. It's unsafe
// to examine nm directly since it might have been freed and used
// for something else.
frame fr = thread->last_frame();
methodOop method = fr.interpreter_frame_method();
int bci = method->bci_from(fr.interpreter_frame_bcp());
nm = method->lookup_osr_nmethod_for(bci);
}
return nm;
}
IRT_ENTRY(nmethod*, IRT_ENTRY(nmethod*,
InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp)) InterpreterRuntime::frequency_counter_overflow_inner(JavaThread* thread, address branch_bcp))
// use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized // use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized
// flag, in case this method triggers classloading which will call into Java. // flag, in case this method triggers classloading which will call into Java.
UnlockFlagSaver fs(thread); UnlockFlagSaver fs(thread);
@ -923,7 +940,6 @@ IRT_ENTRY(nmethod*,
} }
BiasedLocking::revoke(objects_to_revoke); BiasedLocking::revoke(objects_to_revoke);
} }
return osr_nm; return osr_nm;
} }
} }

View File

@ -49,6 +49,9 @@ class InterpreterRuntime: AllStatic {
static ConstantPoolCacheEntry* cache_entry(JavaThread *thread) { return cache_entry_at(thread, Bytes::get_native_u2(bcp(thread) + 1)); } static ConstantPoolCacheEntry* cache_entry(JavaThread *thread) { return cache_entry_at(thread, Bytes::get_native_u2(bcp(thread) + 1)); }
static void note_trap(JavaThread *thread, int reason, TRAPS); static void note_trap(JavaThread *thread, int reason, TRAPS);
// Inner work method for Interpreter's frequency counter overflow
static nmethod* frequency_counter_overflow_inner(JavaThread* thread, address branch_bcp);
public: public:
// Constants // Constants
static void ldc (JavaThread* thread, bool wide); static void ldc (JavaThread* thread, bool wide);

View File

@ -162,6 +162,9 @@ void GenMarkSweep::allocate_stacks() {
int size = SystemDictionary::number_of_classes() * 2; int size = SystemDictionary::number_of_classes() * 2;
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true); _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
// (#klass/k)^2 for k ~ 10 appears to be a better fit, but this will have to do for
// now until we have had a chance to investigate a more optimal setting.
_revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(2*size, true);
#ifdef VALIDATE_MARK_SWEEP #ifdef VALIDATE_MARK_SWEEP
if (ValidateMarkSweep) { if (ValidateMarkSweep) {
@ -206,6 +209,7 @@ void GenMarkSweep::deallocate_stacks() {
delete _marking_stack; delete _marking_stack;
delete _revisit_klass_stack; delete _revisit_klass_stack;
delete _revisit_mdo_stack;
#ifdef VALIDATE_MARK_SWEEP #ifdef VALIDATE_MARK_SWEEP
if (ValidateMarkSweep) { if (ValidateMarkSweep) {
@ -262,6 +266,10 @@ void GenMarkSweep::mark_sweep_phase1(int level,
follow_weak_klass_links(); follow_weak_klass_links();
assert(_marking_stack->is_empty(), "just drained"); assert(_marking_stack->is_empty(), "just drained");
// Visit memoized MDO's and clear any unmarked weak refs
follow_mdo_weak_refs();
assert(_marking_stack->is_empty(), "just drained");
// Visit symbol and interned string tables and delete unmarked oops // Visit symbol and interned string tables and delete unmarked oops
SymbolTable::unlink(&is_alive); SymbolTable::unlink(&is_alive);
StringTable::unlink(&is_alive); StringTable::unlink(&is_alive);

View File

@ -25,6 +25,7 @@
// The following classes are C++ `closures` for iterating over objects, roots and spaces // The following classes are C++ `closures` for iterating over objects, roots and spaces
class ReferenceProcessor; class ReferenceProcessor;
class DataLayout;
// Closure provides abortability. // Closure provides abortability.
@ -62,6 +63,12 @@ class OopClosure : public Closure {
virtual void remember_klass(Klass* k) { /* do nothing */ } virtual void remember_klass(Klass* k) { /* do nothing */ }
// In support of post-processing of weak references in
// ProfileData (MethodDataOop) objects; see, for example,
// VirtualCallData::oop_iterate().
virtual const bool should_remember_mdo() const { return false; }
virtual void remember_mdo(DataLayout* v) { /* do nothing */ }
// If "true", invoke on nmethods (when scanning compiled frames). // If "true", invoke on nmethods (when scanning compiled frames).
virtual const bool do_nmethods() const { return false; } virtual const bool do_nmethods() const { return false; }

View File

@ -49,6 +49,12 @@ void DataLayout::initialize(u1 tag, u2 bci, int cell_count) {
} }
} }
void DataLayout::follow_weak_refs(BoolObjectClosure* cl) {
ResourceMark m;
data_in()->follow_weak_refs(cl);
}
// ================================================================== // ==================================================================
// ProfileData // ProfileData
// //
@ -145,42 +151,92 @@ void JumpData::print_data_on(outputStream* st) {
// which are used to store a type profile for the receiver of the check. // which are used to store a type profile for the receiver of the check.
void ReceiverTypeData::follow_contents() { void ReceiverTypeData::follow_contents() {
for (uint row = 0; row < row_limit(); row++) { // This is a set of weak references that need
if (receiver(row) != NULL) { // to be followed at the end of the strong marking
MarkSweep::mark_and_push(adr_receiver(row)); // phase. Memoize this object so it can be visited
} // in the weak roots processing phase.
} MarkSweep::revisit_mdo(data());
} }
#ifndef SERIALGC #ifndef SERIALGC
void ReceiverTypeData::follow_contents(ParCompactionManager* cm) { void ReceiverTypeData::follow_contents(ParCompactionManager* cm) {
for (uint row = 0; row < row_limit(); row++) { // This is a set of weak references that need
if (receiver(row) != NULL) { // to be followed at the end of the strong marking
PSParallelCompact::mark_and_push(cm, adr_receiver(row)); // phase. Memoize this object so it can be visited
} // in the weak roots processing phase.
} PSParallelCompact::revisit_mdo(cm, data());
} }
#endif // SERIALGC #endif // SERIALGC
void ReceiverTypeData::oop_iterate(OopClosure* blk) { void ReceiverTypeData::oop_iterate(OopClosure* blk) {
for (uint row = 0; row < row_limit(); row++) { if (blk->should_remember_mdo()) {
if (receiver(row) != NULL) { // This is a set of weak references that need
blk->do_oop(adr_receiver(row)); // to be followed at the end of the strong marking
} // phase. Memoize this object so it can be visited
} // in the weak roots processing phase.
} blk->remember_mdo(data());
} else { // normal scan
void ReceiverTypeData::oop_iterate_m(OopClosure* blk, MemRegion mr) { for (uint row = 0; row < row_limit(); row++) {
for (uint row = 0; row < row_limit(); row++) { if (receiver(row) != NULL) {
if (receiver(row) != NULL) { oop* adr = adr_receiver(row);
oop* adr = adr_receiver(row);
if (mr.contains(adr)) {
blk->do_oop(adr); blk->do_oop(adr);
} }
} }
} }
} }
void ReceiverTypeData::oop_iterate_m(OopClosure* blk, MemRegion mr) {
// Currently, this interface is called only during card-scanning for
// a young gen gc, in which case this object cannot contribute anything,
// since it does not contain any references that cross out of
// the perm gen. However, for future more general use we allow
// the possibility of calling for instance from more general
// iterators (for example, a future regionalized perm gen for G1,
// or the possibility of moving some references out of perm in
// the case of other collectors). In that case, you will need
// to relax or remove some of the assertions below.
#ifdef ASSERT
// Verify that none of the embedded oop references cross out of
// this generation.
for (uint row = 0; row < row_limit(); row++) {
if (receiver(row) != NULL) {
oop* adr = adr_receiver(row);
CollectedHeap* h = Universe::heap();
assert(h->is_permanent(adr) && h->is_permanent_or_null(*adr), "Not intra-perm");
}
}
#endif // ASSERT
assert(!blk->should_remember_mdo(), "Not expected to remember MDO");
return; // Nothing to do, see comment above
#if 0
if (blk->should_remember_mdo()) {
// This is a set of weak references that need
// to be followed at the end of the strong marking
// phase. Memoize this object so it can be visited
// in the weak roots processing phase.
blk->remember_mdo(data());
} else { // normal scan
for (uint row = 0; row < row_limit(); row++) {
if (receiver(row) != NULL) {
oop* adr = adr_receiver(row);
if (mr.contains(adr)) {
blk->do_oop(adr);
} else if ((HeapWord*)adr >= mr.end()) {
// Test that the current cursor and the two ends of the range
// that we may have skipped iterating over are monotonically ordered;
// this is just a paranoid assertion, just in case represetations
// should change in the future rendering the short-circuit return
// here invalid.
assert((row+1 >= row_limit() || adr_receiver(row+1) > adr) &&
(row+2 >= row_limit() || adr_receiver(row_limit()-1) > adr_receiver(row+1)), "Reducing?");
break; // remaining should be outside this mr too
}
}
}
}
#endif
}
void ReceiverTypeData::adjust_pointers() { void ReceiverTypeData::adjust_pointers() {
for (uint row = 0; row < row_limit(); row++) { for (uint row = 0; row < row_limit(); row++) {
if (receiver(row) != NULL) { if (receiver(row) != NULL) {
@ -189,6 +245,15 @@ void ReceiverTypeData::adjust_pointers() {
} }
} }
void ReceiverTypeData::follow_weak_refs(BoolObjectClosure* is_alive_cl) {
for (uint row = 0; row < row_limit(); row++) {
klassOop p = receiver(row);
if (p != NULL && !is_alive_cl->do_object_b(p)) {
clear_row(row);
}
}
}
#ifndef SERIALGC #ifndef SERIALGC
void ReceiverTypeData::update_pointers() { void ReceiverTypeData::update_pointers() {
for (uint row = 0; row < row_limit(); row++) { for (uint row = 0; row < row_limit(); row++) {
@ -625,30 +690,33 @@ ProfileData* methodDataOopDesc::data_at(int data_index) {
return NULL; return NULL;
} }
DataLayout* data_layout = data_layout_at(data_index); DataLayout* data_layout = data_layout_at(data_index);
return data_layout->data_in();
}
switch (data_layout->tag()) { ProfileData* DataLayout::data_in() {
switch (tag()) {
case DataLayout::no_tag: case DataLayout::no_tag:
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
return NULL; return NULL;
case DataLayout::bit_data_tag: case DataLayout::bit_data_tag:
return new BitData(data_layout); return new BitData(this);
case DataLayout::counter_data_tag: case DataLayout::counter_data_tag:
return new CounterData(data_layout); return new CounterData(this);
case DataLayout::jump_data_tag: case DataLayout::jump_data_tag:
return new JumpData(data_layout); return new JumpData(this);
case DataLayout::receiver_type_data_tag: case DataLayout::receiver_type_data_tag:
return new ReceiverTypeData(data_layout); return new ReceiverTypeData(this);
case DataLayout::virtual_call_data_tag: case DataLayout::virtual_call_data_tag:
return new VirtualCallData(data_layout); return new VirtualCallData(this);
case DataLayout::ret_data_tag: case DataLayout::ret_data_tag:
return new RetData(data_layout); return new RetData(this);
case DataLayout::branch_data_tag: case DataLayout::branch_data_tag:
return new BranchData(data_layout); return new BranchData(this);
case DataLayout::multi_branch_data_tag: case DataLayout::multi_branch_data_tag:
return new MultiBranchData(data_layout); return new MultiBranchData(this);
case DataLayout::arg_info_data_tag: case DataLayout::arg_info_data_tag:
return new ArgInfoData(data_layout); return new ArgInfoData(this);
}; };
} }

View File

@ -55,6 +55,9 @@ class BytecodeStream;
// with invocation counter incrementation. None of these races harm correct // with invocation counter incrementation. None of these races harm correct
// execution of the compiled code. // execution of the compiled code.
// forward decl
class ProfileData;
// DataLayout // DataLayout
// //
// Overlay for generic profiling data. // Overlay for generic profiling data.
@ -231,6 +234,10 @@ public:
temp._header._struct._flags = byte_constant; temp._header._struct._flags = byte_constant;
return temp._header._bits; return temp._header._bits;
} }
// GC support
ProfileData* data_in();
void follow_weak_refs(BoolObjectClosure* cl);
}; };
@ -430,6 +437,7 @@ public:
virtual void oop_iterate(OopClosure* blk) {} virtual void oop_iterate(OopClosure* blk) {}
virtual void oop_iterate_m(OopClosure* blk, MemRegion mr) {} virtual void oop_iterate_m(OopClosure* blk, MemRegion mr) {}
virtual void adjust_pointers() {} virtual void adjust_pointers() {}
virtual void follow_weak_refs(BoolObjectClosure* is_alive_closure) {}
#ifndef SERIALGC #ifndef SERIALGC
// Parallel old support // Parallel old support
@ -667,11 +675,27 @@ public:
return recv; return recv;
} }
void set_receiver(uint row, oop p) {
assert((uint)row < row_limit(), "oob");
set_oop_at(receiver_cell_index(row), p);
}
uint receiver_count(uint row) { uint receiver_count(uint row) {
assert(row < row_limit(), "oob"); assert(row < row_limit(), "oob");
return uint_at(receiver_count_cell_index(row)); return uint_at(receiver_count_cell_index(row));
} }
void set_receiver_count(uint row, uint count) {
assert(row < row_limit(), "oob");
set_uint_at(receiver_count_cell_index(row), count);
}
void clear_row(uint row) {
assert(row < row_limit(), "oob");
set_receiver(row, NULL);
set_receiver_count(row, 0);
}
// Code generation support // Code generation support
static ByteSize receiver_offset(uint row) { static ByteSize receiver_offset(uint row) {
return cell_offset(receiver_cell_index(row)); return cell_offset(receiver_cell_index(row));
@ -688,6 +712,7 @@ public:
virtual void oop_iterate(OopClosure* blk); virtual void oop_iterate(OopClosure* blk);
virtual void oop_iterate_m(OopClosure* blk, MemRegion mr); virtual void oop_iterate_m(OopClosure* blk, MemRegion mr);
virtual void adjust_pointers(); virtual void adjust_pointers();
virtual void follow_weak_refs(BoolObjectClosure* is_alive_closure);
#ifndef SERIALGC #ifndef SERIALGC
// Parallel old support // Parallel old support

View File

@ -1707,6 +1707,9 @@ class CommandLineFlags {
product(bool, TLABStats, true, \ product(bool, TLABStats, true, \
"Print various TLAB related information") \ "Print various TLAB related information") \
\ \
product(bool, PrintRevisitStats, false, \
"Print revisit (klass and MDO) stack related information") \
\
product_pd(bool, NeverActAsServerClassMachine, \ product_pd(bool, NeverActAsServerClassMachine, \
"Never act like a server-class machine") \ "Never act like a server-class machine") \
\ \

View File

@ -125,8 +125,14 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
// there are no inline caches that referes to it. // there are no inline caches that referes to it.
if (nm->is_marked_for_reclamation()) { if (nm->is_marked_for_reclamation()) {
assert(!nm->is_locked_by_vm(), "must not flush locked nmethods"); assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod 0x%x (marked for reclamation) being flushed", nm);
}
nm->flush(); nm->flush();
} else { } else {
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod 0x%x (zombie) being marked for reclamation", nm);
}
nm->mark_for_reclamation(); nm->mark_for_reclamation();
_rescan = true; _rescan = true;
} }
@ -134,6 +140,9 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
// If there is no current activations of this method on the // If there is no current activations of this method on the
// stack we can safely convert it to a zombie method // stack we can safely convert it to a zombie method
if (nm->can_not_entrant_be_converted()) { if (nm->can_not_entrant_be_converted()) {
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod 0x%x (not entrant) being made zombie", nm);
}
nm->make_zombie(); nm->make_zombie();
_rescan = true; _rescan = true;
} else { } else {
@ -146,7 +155,9 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
} }
} else if (nm->is_unloaded()) { } else if (nm->is_unloaded()) {
// Unloaded code, just make it a zombie // Unloaded code, just make it a zombie
if (nm->is_osr_only_method()) { if (PrintMethodFlushing && Verbose)
tty->print_cr("### Nmethod 0x%x (unloaded) being made zombie", nm);
if (nm->is_osr_method()) {
// No inline caches will ever point to osr methods, so we can just remove it // No inline caches will ever point to osr methods, so we can just remove it
nm->flush(); nm->flush();
} else { } else {