8203837: Split nmethod unloading from inline cache cleaning
Refactor cleaning inline caches to after GC do_unloading. Reviewed-by: thartmann, eosterlund
This commit is contained in:
parent
f2a30dcb3e
commit
3e3414dbf3
@ -75,7 +75,7 @@ address* AOTCompiledMethod::orig_pc_addr(const frame* fr) {
|
||||
return (address*) ((address)fr->unextended_sp() + _meta->orig_pc_offset());
|
||||
}
|
||||
|
||||
bool AOTCompiledMethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) {
|
||||
bool AOTCompiledMethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -245,7 +245,7 @@ bool AOTCompiledMethod::make_entrant() {
|
||||
// more conservative than for nmethods.
|
||||
void AOTCompiledMethod::flush_evol_dependents_on(InstanceKlass* dependee) {
|
||||
if (is_java_method()) {
|
||||
cleanup_inline_caches();
|
||||
clear_inline_caches();
|
||||
mark_for_deoptimization();
|
||||
make_not_entrant();
|
||||
}
|
||||
|
@ -284,8 +284,8 @@ private:
|
||||
bool is_aot_runtime_stub() const { return _method == NULL; }
|
||||
|
||||
protected:
|
||||
virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred);
|
||||
virtual bool do_unloading_jvmci(bool unloading_occurred) { return false; }
|
||||
virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive);
|
||||
virtual bool do_unloading_jvmci() { return false; }
|
||||
|
||||
};
|
||||
|
||||
|
@ -685,8 +685,15 @@ void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurre
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
CompiledMethodIterator iter;
|
||||
while(iter.next_alive()) {
|
||||
iter.method()->do_unloading(is_alive, unloading_occurred);
|
||||
iter.method()->do_unloading(is_alive);
|
||||
}
|
||||
|
||||
// Now that all the unloaded nmethods are known, cleanup caches
|
||||
// before CLDG is purged.
|
||||
// This is another code cache walk but it is moved from gc_epilogue.
|
||||
// G1 does a parallel walk of the nmethods so cleans them up
|
||||
// as it goes and doesn't call this.
|
||||
do_unloading_nmethod_caches(unloading_occurred);
|
||||
}
|
||||
|
||||
void CodeCache::blobs_do(CodeBlobClosure* f) {
|
||||
@ -720,8 +727,11 @@ void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) {
|
||||
assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
|
||||
|
||||
bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
|
||||
if (TraceScavenge) {
|
||||
cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
|
||||
LogTarget(Trace, gc, nmethod) lt;
|
||||
if (lt.is_enabled()) {
|
||||
LogStream ls(lt);
|
||||
CompileTask::print(&ls, cur,
|
||||
is_live ? "scavenge root " : "dead scavenge root", /*short_form:*/ true);
|
||||
}
|
||||
if (is_live) {
|
||||
// Perform cur->oops_do(f), maybe just once per nmethod.
|
||||
@ -892,18 +902,26 @@ void CodeCache::verify_icholder_relocations() {
|
||||
#endif
|
||||
}
|
||||
|
||||
void CodeCache::gc_prologue() {
|
||||
}
|
||||
void CodeCache::gc_prologue() { }
|
||||
|
||||
void CodeCache::gc_epilogue() {
|
||||
prune_scavenge_root_nmethods();
|
||||
}
|
||||
|
||||
|
||||
void CodeCache::do_unloading_nmethod_caches(bool class_unloading_occurred) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
NOT_DEBUG(if (needs_cache_clean())) {
|
||||
// Even if classes are not unloaded, there may have been some nmethods that are
|
||||
// unloaded because oops in them are no longer reachable.
|
||||
NOT_DEBUG(if (needs_cache_clean() || class_unloading_occurred)) {
|
||||
CompiledMethodIterator iter;
|
||||
while(iter.next_alive()) {
|
||||
CompiledMethod* cm = iter.method();
|
||||
assert(!cm->is_unloaded(), "Tautology");
|
||||
DEBUG_ONLY(if (needs_cache_clean())) {
|
||||
cm->cleanup_inline_caches();
|
||||
DEBUG_ONLY(if (needs_cache_clean() || class_unloading_occurred)) {
|
||||
// Clean up both unloaded klasses from nmethods and unloaded nmethods
|
||||
// from inline caches.
|
||||
cm->unload_nmethod_caches(/*parallel*/false, class_unloading_occurred);
|
||||
}
|
||||
DEBUG_ONLY(cm->verify());
|
||||
DEBUG_ONLY(cm->verify_oop_relocations());
|
||||
@ -911,8 +929,6 @@ void CodeCache::gc_epilogue() {
|
||||
}
|
||||
|
||||
set_needs_cache_clean(false);
|
||||
prune_scavenge_root_nmethods();
|
||||
|
||||
verify_icholder_relocations();
|
||||
}
|
||||
|
||||
|
@ -168,9 +168,10 @@ class CodeCache : AllStatic {
|
||||
static void gc_epilogue();
|
||||
static void gc_prologue();
|
||||
static void verify_oops();
|
||||
// If "unloading_occurred" is true, then unloads (i.e., breaks root links
|
||||
// If any oops are not marked this method unloads (i.e., breaks root links
|
||||
// to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading"
|
||||
// to "true" iff some code got unloaded.
|
||||
// "unloading_occurred" controls whether metadata should be cleaned because of class unloading.
|
||||
static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
|
||||
static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
|
||||
|
||||
@ -223,8 +224,10 @@ class CodeCache : AllStatic {
|
||||
|
||||
static bool needs_cache_clean() { return _needs_cache_clean; }
|
||||
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
||||
|
||||
static void clear_inline_caches(); // clear all inline caches
|
||||
static void cleanup_inline_caches();
|
||||
static void cleanup_inline_caches(); // clean unloaded/zombie nmethods from inline caches
|
||||
static void do_unloading_nmethod_caches(bool class_unloading_occurred); // clean all nmethod caches for unloading, including inline caches
|
||||
|
||||
// Returns true if an own CodeHeap for the given CodeBlobType is available
|
||||
static bool heap_available(int code_blob_type);
|
||||
|
@ -552,7 +552,8 @@ void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site, const Com
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
void CompiledStaticCall::set_to_clean() {
|
||||
void CompiledStaticCall::set_to_clean(bool in_use) {
|
||||
// in_use is unused but needed to match template function in CompiledMethod
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
|
||||
// Reset call site
|
||||
MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
|
||||
|
@ -358,7 +358,7 @@ public:
|
||||
virtual address destination() const = 0;
|
||||
|
||||
// Clean static call (will force resolving on next use)
|
||||
void set_to_clean();
|
||||
void set_to_clean(bool in_use = true);
|
||||
|
||||
// Set state. The entry must be the same, as computed by compute_entry.
|
||||
// Computation and setting is split up, since the actions are separate during
|
||||
|
@ -28,6 +28,8 @@
|
||||
#include "code/scopeDesc.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "interpreter/bytecode.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logTag.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "oops/method.inline.hpp"
|
||||
@ -222,9 +224,7 @@ ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
|
||||
pd->return_oop());
|
||||
}
|
||||
|
||||
void CompiledMethod::cleanup_inline_caches(bool clean_all/*=false*/) {
|
||||
assert_locked_or_safepoint(CompiledIC_lock);
|
||||
|
||||
address CompiledMethod::oops_reloc_begin() const {
|
||||
// If the method is not entrant or zombie then a JMP is plastered over the
|
||||
// first few bytes. If an oop in the old code was there, that oop
|
||||
// should not get GC'd. Skip the first few bytes of oops on
|
||||
@ -237,41 +237,7 @@ void CompiledMethod::cleanup_inline_caches(bool clean_all/*=false*/) {
|
||||
// This shouldn't matter, since oops of non-entrant methods are never used.
|
||||
// In fact, why are we bothering to look at oops in a non-entrant method??
|
||||
}
|
||||
|
||||
// Find all calls in an nmethod and clear the ones that point to non-entrant,
|
||||
// zombie and unloaded nmethods.
|
||||
ResourceMark rm;
|
||||
RelocIterator iter(this, low_boundary);
|
||||
while(iter.next()) {
|
||||
switch(iter.type()) {
|
||||
case relocInfo::virtual_call_type:
|
||||
case relocInfo::opt_virtual_call_type: {
|
||||
CompiledIC *ic = CompiledIC_at(&iter);
|
||||
// Ok, to lookup references to zombies here
|
||||
CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
|
||||
if( cb != NULL && cb->is_compiled() ) {
|
||||
CompiledMethod* nm = cb->as_compiled_method();
|
||||
// Clean inline caches pointing to zombie, non-entrant and unloaded methods
|
||||
if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case relocInfo::static_call_type: {
|
||||
CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
|
||||
CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
|
||||
if( cb != NULL && cb->is_compiled() ) {
|
||||
CompiledMethod* cm = cb->as_compiled_method();
|
||||
// Clean inline caches pointing to zombie, non-entrant and unloaded methods
|
||||
if (clean_all || !cm->is_in_use() || (cm->method()->code() != cm)) {
|
||||
csc->set_to_clean();
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return low_boundary;
|
||||
}
|
||||
|
||||
int CompiledMethod::verify_icholder_relocations() {
|
||||
@ -437,17 +403,15 @@ unsigned char CompiledMethod::unloading_clock() {
|
||||
return OrderAccess::load_acquire(&_unloading_clock);
|
||||
}
|
||||
|
||||
// Processing of oop references should have been sufficient to keep
|
||||
// all strong references alive. Any weak references should have been
|
||||
// cleared as well. Visit all the metadata and ensure that it's
|
||||
// really alive.
|
||||
void CompiledMethod::verify_metadata_loaders(address low_boundary) {
|
||||
|
||||
// static_stub_Relocations may have dangling references to
|
||||
// nmethods so trim them out here. Otherwise it looks like
|
||||
// compiled code is maintaining a link to dead metadata.
|
||||
void CompiledMethod::clean_ic_stubs() {
|
||||
#ifdef ASSERT
|
||||
RelocIterator iter(this, low_boundary);
|
||||
while (iter.next()) {
|
||||
// static_stub_Relocations may have dangling references to
|
||||
// Method*s so trim them out here. Otherwise it looks like
|
||||
// compiled code is maintaining a link to dead metadata.
|
||||
address low_boundary = oops_reloc_begin();
|
||||
RelocIterator iter(this, low_boundary);
|
||||
while (iter.next()) {
|
||||
address static_call_addr = NULL;
|
||||
if (iter.type() == relocInfo::opt_virtual_call_type) {
|
||||
CompiledIC* cic = CompiledIC_at(&iter);
|
||||
@ -470,8 +434,6 @@ void CompiledMethod::verify_metadata_loaders(address low_boundary) {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Check that the metadata embedded in the nmethod is alive
|
||||
metadata_do(check_class);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -479,67 +441,43 @@ void CompiledMethod::verify_metadata_loaders(address low_boundary) {
|
||||
// GC to unload an nmethod if it contains otherwise unreachable
|
||||
// oops.
|
||||
|
||||
void CompiledMethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
|
||||
void CompiledMethod::do_unloading(BoolObjectClosure* is_alive) {
|
||||
// Make sure the oop's ready to receive visitors
|
||||
assert(!is_zombie() && !is_unloaded(),
|
||||
"should not call follow on zombie or unloaded nmethod");
|
||||
|
||||
// If the method is not entrant then a JMP is plastered over the
|
||||
// first few bytes. If an oop in the old code was there, that oop
|
||||
// should not get GC'd. Skip the first few bytes of oops on
|
||||
// not-entrant methods.
|
||||
address low_boundary = verified_entry_point();
|
||||
if (is_not_entrant()) {
|
||||
low_boundary += NativeJump::instruction_size;
|
||||
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
|
||||
// (See comment above.)
|
||||
}
|
||||
address low_boundary = oops_reloc_begin();
|
||||
|
||||
// Exception cache
|
||||
clean_exception_cache();
|
||||
|
||||
// If class unloading occurred we first iterate over all inline caches and
|
||||
// clear ICs where the cached oop is referring to an unloaded klass or method.
|
||||
// The remaining live cached oops will be traversed in the relocInfo::oop_type
|
||||
// iteration below.
|
||||
if (unloading_occurred) {
|
||||
RelocIterator iter(this, low_boundary);
|
||||
while(iter.next()) {
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
CompiledIC *ic = CompiledIC_at(&iter);
|
||||
clean_ic_if_metadata_is_dead(ic);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
|
||||
if (do_unloading_oops(low_boundary, is_alive)) {
|
||||
return;
|
||||
}
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
if (do_unloading_jvmci(unloading_occurred)) {
|
||||
if (do_unloading_jvmci()) {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Ensure that all metadata is still alive
|
||||
verify_metadata_loaders(low_boundary);
|
||||
// Cleanup exception cache and inline caches happens
|
||||
// after all the unloaded methods are found.
|
||||
}
|
||||
|
||||
// Clean references to unloaded nmethods at addr from this one, which is not unloaded.
|
||||
template <class CompiledICorStaticCall>
|
||||
static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from) {
|
||||
static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
|
||||
bool parallel, bool clean_all) {
|
||||
// Ok, to lookup references to zombies here
|
||||
CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
|
||||
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
||||
if (nm != NULL) {
|
||||
if (nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
|
||||
if (parallel && nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
|
||||
// The nmethod has not been processed yet.
|
||||
return true;
|
||||
}
|
||||
|
||||
// Clean inline caches pointing to both zombie and not_entrant methods
|
||||
if (!nm->is_in_use() || (nm->method()->code() != nm)) {
|
||||
ic->set_to_clean();
|
||||
if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) {
|
||||
ic->set_to_clean(from->is_alive());
|
||||
assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
|
||||
}
|
||||
}
|
||||
@ -547,12 +485,14 @@ static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address add
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from) {
|
||||
return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from);
|
||||
static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
|
||||
bool parallel, bool clean_all = false) {
|
||||
return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, parallel, clean_all);
|
||||
}
|
||||
|
||||
static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from) {
|
||||
return clean_if_nmethod_is_unloaded(csc, csc->destination(), from);
|
||||
static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
|
||||
bool parallel, bool clean_all = false) {
|
||||
return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, parallel, clean_all);
|
||||
}
|
||||
|
||||
bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
|
||||
@ -562,47 +502,79 @@ bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unl
|
||||
assert(!is_zombie() && !is_unloaded(),
|
||||
"should not call follow on zombie or unloaded nmethod");
|
||||
|
||||
// If the method is not entrant then a JMP is plastered over the
|
||||
// first few bytes. If an oop in the old code was there, that oop
|
||||
// should not get GC'd. Skip the first few bytes of oops on
|
||||
// not-entrant methods.
|
||||
address low_boundary = verified_entry_point();
|
||||
if (is_not_entrant()) {
|
||||
low_boundary += NativeJump::instruction_size;
|
||||
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
|
||||
// (See comment above.)
|
||||
address low_boundary = oops_reloc_begin();
|
||||
|
||||
if (do_unloading_oops(low_boundary, is_alive)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Exception cache
|
||||
clean_exception_cache();
|
||||
#if INCLUDE_JVMCI
|
||||
if (do_unloading_jvmci()) {
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
return unload_nmethod_caches(/*parallel*/true, unloading_occurred);
|
||||
}
|
||||
|
||||
// Cleans caches in nmethods that point to either classes that are unloaded
|
||||
// or nmethods that are unloaded.
|
||||
//
|
||||
// Can be called either in parallel by G1 currently or after all
|
||||
// nmethods are unloaded. Return postponed=true in the parallel case for
|
||||
// inline caches found that point to nmethods that are not yet visited during
|
||||
// the do_unloading walk.
|
||||
bool CompiledMethod::unload_nmethod_caches(bool parallel, bool unloading_occurred) {
|
||||
|
||||
// Exception cache only needs to be called if unloading occurred
|
||||
if (unloading_occurred) {
|
||||
clean_exception_cache();
|
||||
}
|
||||
|
||||
bool postponed = cleanup_inline_caches_impl(parallel, unloading_occurred, /*clean_all*/false);
|
||||
|
||||
// All static stubs need to be cleaned.
|
||||
clean_ic_stubs();
|
||||
|
||||
// Check that the metadata embedded in the nmethod is alive
|
||||
DEBUG_ONLY(metadata_do(check_class));
|
||||
|
||||
return postponed;
|
||||
}
|
||||
|
||||
// Called to clean up after class unloading for live nmethods and from the sweeper
|
||||
// for all methods.
|
||||
bool CompiledMethod::cleanup_inline_caches_impl(bool parallel, bool unloading_occurred, bool clean_all) {
|
||||
assert_locked_or_safepoint(CompiledIC_lock);
|
||||
bool postponed = false;
|
||||
|
||||
RelocIterator iter(this, low_boundary);
|
||||
// Find all calls in an nmethod and clear the ones that point to non-entrant,
|
||||
// zombie and unloaded nmethods.
|
||||
RelocIterator iter(this, oops_reloc_begin());
|
||||
while(iter.next()) {
|
||||
|
||||
switch (iter.type()) {
|
||||
|
||||
case relocInfo::virtual_call_type:
|
||||
if (unloading_occurred) {
|
||||
// If class unloading occurred we first iterate over all inline caches and
|
||||
// clear ICs where the cached oop is referring to an unloaded klass or method.
|
||||
// If class unloading occurred we first clear ICs where the cached metadata
|
||||
// is referring to an unloaded klass or method.
|
||||
clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
|
||||
}
|
||||
|
||||
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this);
|
||||
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all);
|
||||
break;
|
||||
|
||||
case relocInfo::opt_virtual_call_type:
|
||||
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this);
|
||||
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all);
|
||||
break;
|
||||
|
||||
case relocInfo::static_call_type:
|
||||
postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this);
|
||||
postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, parallel, clean_all);
|
||||
break;
|
||||
|
||||
case relocInfo::oop_type:
|
||||
// handled by do_unloading_oops below
|
||||
// handled by do_unloading_oops already
|
||||
break;
|
||||
|
||||
case relocInfo::metadata_type:
|
||||
@ -613,19 +585,6 @@ bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unl
|
||||
}
|
||||
}
|
||||
|
||||
if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
|
||||
return postponed;
|
||||
}
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
if (do_unloading_jvmci(unloading_occurred)) {
|
||||
return postponed;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Ensure that all metadata is still alive
|
||||
verify_metadata_loaders(low_boundary);
|
||||
|
||||
return postponed;
|
||||
}
|
||||
|
||||
@ -636,32 +595,21 @@ void CompiledMethod::do_unloading_parallel_postponed() {
|
||||
assert(!is_zombie(),
|
||||
"should not call follow on zombie nmethod");
|
||||
|
||||
// If the method is not entrant then a JMP is plastered over the
|
||||
// first few bytes. If an oop in the old code was there, that oop
|
||||
// should not get GC'd. Skip the first few bytes of oops on
|
||||
// not-entrant methods.
|
||||
address low_boundary = verified_entry_point();
|
||||
if (is_not_entrant()) {
|
||||
low_boundary += NativeJump::instruction_size;
|
||||
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
|
||||
// (See comment above.)
|
||||
}
|
||||
|
||||
RelocIterator iter(this, low_boundary);
|
||||
RelocIterator iter(this, oops_reloc_begin());
|
||||
while(iter.next()) {
|
||||
|
||||
switch (iter.type()) {
|
||||
|
||||
case relocInfo::virtual_call_type:
|
||||
clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this);
|
||||
clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true);
|
||||
break;
|
||||
|
||||
case relocInfo::opt_virtual_call_type:
|
||||
clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this);
|
||||
clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true);
|
||||
break;
|
||||
|
||||
case relocInfo::static_call_type:
|
||||
clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this);
|
||||
clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, true);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -331,8 +331,19 @@ public:
|
||||
|
||||
static address get_deopt_original_pc(const frame* fr);
|
||||
|
||||
// Inline cache support
|
||||
void cleanup_inline_caches(bool clean_all = false);
|
||||
// GC unloading support
|
||||
// Cleans unloaded klasses and unloaded nmethods in inline caches
|
||||
bool unload_nmethod_caches(bool parallel, bool class_unloading_occurred);
|
||||
|
||||
// Inline cache support for class unloading and nmethod unloading
|
||||
private:
|
||||
bool cleanup_inline_caches_impl(bool parallel, bool unloading_occurred, bool clean_all);
|
||||
public:
|
||||
bool cleanup_inline_caches(bool clean_all = false) {
|
||||
// Serial version used by sweeper and whitebox test
|
||||
return cleanup_inline_caches_impl(false, false, clean_all);
|
||||
}
|
||||
|
||||
virtual void clear_inline_caches();
|
||||
void clear_ic_stubs();
|
||||
|
||||
@ -364,12 +375,15 @@ public:
|
||||
void set_unloading_next(CompiledMethod* next) { _unloading_next = next; }
|
||||
CompiledMethod* unloading_next() { return _unloading_next; }
|
||||
|
||||
protected:
|
||||
address oops_reloc_begin() const;
|
||||
private:
|
||||
void static clean_ic_if_metadata_is_dead(CompiledIC *ic);
|
||||
|
||||
// Check that all metadata is still alive
|
||||
void verify_metadata_loaders(address low_boundary);
|
||||
void clean_ic_stubs();
|
||||
|
||||
virtual void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
|
||||
public:
|
||||
virtual void do_unloading(BoolObjectClosure* is_alive);
|
||||
// The parallel versions are used by G1.
|
||||
virtual bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
|
||||
virtual void do_unloading_parallel_postponed();
|
||||
@ -381,9 +395,9 @@ public:
|
||||
unsigned char unloading_clock();
|
||||
|
||||
protected:
|
||||
virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) = 0;
|
||||
virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive) = 0;
|
||||
#if INCLUDE_JVMCI
|
||||
virtual bool do_unloading_jvmci(bool unloading_occurred) = 0;
|
||||
virtual bool do_unloading_jvmci() = 0;
|
||||
#endif
|
||||
|
||||
private:
|
||||
|
@ -946,21 +946,8 @@ void nmethod::fix_oop_relocations(address begin, address end, bool initialize_im
|
||||
void nmethod::verify_clean_inline_caches() {
|
||||
assert_locked_or_safepoint(CompiledIC_lock);
|
||||
|
||||
// If the method is not entrant or zombie then a JMP is plastered over the
|
||||
// first few bytes. If an oop in the old code was there, that oop
|
||||
// should not get GC'd. Skip the first few bytes of oops on
|
||||
// not-entrant methods.
|
||||
address low_boundary = verified_entry_point();
|
||||
if (!is_in_use()) {
|
||||
low_boundary += NativeJump::instruction_size;
|
||||
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
|
||||
// This means that the low_boundary is going to be a little too high.
|
||||
// This shouldn't matter, since oops of non-entrant methods are never used.
|
||||
// In fact, why are we bothering to look at oops in a non-entrant method??
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
RelocIterator iter(this, low_boundary);
|
||||
RelocIterator iter(this, oops_reloc_begin());
|
||||
while(iter.next()) {
|
||||
switch(iter.type()) {
|
||||
case relocInfo::virtual_call_type:
|
||||
@ -1041,13 +1028,17 @@ void nmethod::make_unloaded(oop cause) {
|
||||
flush_dependencies(/*delete_immediately*/false);
|
||||
|
||||
// Break cycle between nmethod & method
|
||||
LogTarget(Trace, class, unload) lt;
|
||||
LogTarget(Trace, class, unload, nmethod) lt;
|
||||
if (lt.is_enabled()) {
|
||||
LogStream ls(lt);
|
||||
ls.print_cr("making nmethod " INTPTR_FORMAT
|
||||
" unloadable, Method*(" INTPTR_FORMAT
|
||||
"), cause(" INTPTR_FORMAT ")",
|
||||
p2i(this), p2i(_method), p2i(cause));
|
||||
ls.print("making nmethod " INTPTR_FORMAT
|
||||
" unloadable, Method*(" INTPTR_FORMAT
|
||||
"), cause(" INTPTR_FORMAT ") ",
|
||||
p2i(this), p2i(_method), p2i(cause));
|
||||
if (cause != NULL) {
|
||||
cause->print_value_on(&ls);
|
||||
}
|
||||
ls.cr();
|
||||
}
|
||||
// Unlink the osr method, so we do not look this up again
|
||||
if (is_osr_method()) {
|
||||
@ -1378,17 +1369,15 @@ void nmethod::flush_dependencies(bool delete_immediately) {
|
||||
|
||||
|
||||
// If this oop is not live, the nmethod can be unloaded.
|
||||
bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) {
|
||||
bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root) {
|
||||
assert(root != NULL, "just checking");
|
||||
oop obj = *root;
|
||||
if (obj == NULL || is_alive->do_object_b(obj)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If ScavengeRootsInCode is true, an nmethod might be unloaded
|
||||
// simply because one of its constant oops has gone dead.
|
||||
// An nmethod might be unloaded simply because one of its constant oops has gone dead.
|
||||
// No actual classes need to be unloaded in order for this to occur.
|
||||
assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
|
||||
make_unloaded(obj);
|
||||
return true;
|
||||
}
|
||||
@ -1466,7 +1455,7 @@ void nmethod::post_compiled_method_unload() {
|
||||
set_unload_reported();
|
||||
}
|
||||
|
||||
bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) {
|
||||
bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive) {
|
||||
assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type");
|
||||
|
||||
oop_Relocation* r = iter_at_oop->oop_reloc();
|
||||
@ -1477,7 +1466,7 @@ bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *i
|
||||
"oop must be found in exactly one place");
|
||||
if (r->oop_is_immediate() && r->oop_value() != NULL) {
|
||||
// Unload this nmethod if the oop is dead.
|
||||
if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
|
||||
if (can_unload(is_alive, r->oop_addr())) {
|
||||
return true;;
|
||||
}
|
||||
}
|
||||
@ -1485,18 +1474,18 @@ bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *i
|
||||
return false;
|
||||
}
|
||||
|
||||
bool nmethod::do_unloading_scopes(BoolObjectClosure* is_alive, bool unloading_occurred) {
|
||||
bool nmethod::do_unloading_scopes(BoolObjectClosure* is_alive) {
|
||||
// Scopes
|
||||
for (oop* p = oops_begin(); p < oops_end(); p++) {
|
||||
if (*p == Universe::non_oop_word()) continue; // skip non-oops
|
||||
if (can_unload(is_alive, p, unloading_occurred)) {
|
||||
if (can_unload(is_alive, p)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) {
|
||||
bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive) {
|
||||
// Compiled code
|
||||
|
||||
// Prevent extra code cache walk for platforms that don't have immediate oops.
|
||||
@ -1504,18 +1493,18 @@ bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_aliv
|
||||
RelocIterator iter(this, low_boundary);
|
||||
while (iter.next()) {
|
||||
if (iter.type() == relocInfo::oop_type) {
|
||||
if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) {
|
||||
if (unload_if_dead_at(&iter, is_alive)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return do_unloading_scopes(is_alive, unloading_occurred);
|
||||
return do_unloading_scopes(is_alive);
|
||||
}
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
bool nmethod::do_unloading_jvmci(bool unloading_occurred) {
|
||||
bool nmethod::do_unloading_jvmci() {
|
||||
if (_jvmci_installed_code != NULL) {
|
||||
if (JNIHandles::is_global_weak_cleared(_jvmci_installed_code)) {
|
||||
if (_jvmci_installed_code_triggers_unloading) {
|
||||
@ -1533,15 +1522,9 @@ bool nmethod::do_unloading_jvmci(bool unloading_occurred) {
|
||||
|
||||
// Iterate over metadata calling this function. Used by RedefineClasses
|
||||
void nmethod::metadata_do(void f(Metadata*)) {
|
||||
address low_boundary = verified_entry_point();
|
||||
if (is_not_entrant()) {
|
||||
low_boundary += NativeJump::instruction_size;
|
||||
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
|
||||
// (See comment above.)
|
||||
}
|
||||
{
|
||||
// Visit all immediate references that are embedded in the instruction stream.
|
||||
RelocIterator iter(this, low_boundary);
|
||||
RelocIterator iter(this, oops_reloc_begin());
|
||||
while (iter.next()) {
|
||||
if (iter.type() == relocInfo::metadata_type ) {
|
||||
metadata_Relocation* r = iter.metadata_reloc();
|
||||
@ -1588,20 +1571,9 @@ void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
|
||||
assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
|
||||
assert(!is_unloaded(), "should not call follow on unloaded nmethod");
|
||||
|
||||
// If the method is not entrant or zombie then a JMP is plastered over the
|
||||
// first few bytes. If an oop in the old code was there, that oop
|
||||
// should not get GC'd. Skip the first few bytes of oops on
|
||||
// not-entrant methods.
|
||||
address low_boundary = verified_entry_point();
|
||||
if (is_not_entrant()) {
|
||||
low_boundary += NativeJump::instruction_size;
|
||||
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
|
||||
// (See comment above.)
|
||||
}
|
||||
|
||||
// Prevent extra code cache walk for platforms that don't have immediate oops.
|
||||
if (relocInfo::mustIterateImmediateOopsInCode()) {
|
||||
RelocIterator iter(this, low_boundary);
|
||||
RelocIterator iter(this, oops_reloc_begin());
|
||||
|
||||
while (iter.next()) {
|
||||
if (iter.type() == relocInfo::oop_type ) {
|
||||
@ -1650,7 +1622,11 @@ bool nmethod::test_set_oops_do_mark() {
|
||||
break;
|
||||
}
|
||||
// Mark was clear when we first saw this guy.
|
||||
if (TraceScavenge) { print_on(tty, "oops_do, mark"); }
|
||||
LogTarget(Trace, gc, nmethod) lt;
|
||||
if (lt.is_enabled()) {
|
||||
LogStream ls(lt);
|
||||
CompileTask::print(&ls, this, "oops_do, mark", /*short_form:*/ true);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -1659,7 +1635,7 @@ bool nmethod::test_set_oops_do_mark() {
|
||||
}
|
||||
|
||||
void nmethod::oops_do_marking_prologue() {
|
||||
if (TraceScavenge) { tty->print_cr("[oops_do_marking_prologue"); }
|
||||
log_trace(gc, nmethod)("oops_do_marking_prologue");
|
||||
assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
|
||||
// We use cmpxchg instead of regular assignment here because the user
|
||||
// may fork a bunch of threads, and we need them all to see the same state.
|
||||
@ -1675,20 +1651,26 @@ void nmethod::oops_do_marking_epilogue() {
|
||||
nmethod* next = cur->_oops_do_mark_link;
|
||||
cur->_oops_do_mark_link = NULL;
|
||||
DEBUG_ONLY(cur->verify_oop_relocations());
|
||||
NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark"));
|
||||
|
||||
LogTarget(Trace, gc, nmethod) lt;
|
||||
if (lt.is_enabled()) {
|
||||
LogStream ls(lt);
|
||||
CompileTask::print(&ls, cur, "oops_do, unmark", /*short_form:*/ true);
|
||||
}
|
||||
cur = next;
|
||||
}
|
||||
nmethod* required = _oops_do_mark_nmethods;
|
||||
nmethod* observed = Atomic::cmpxchg((nmethod*)NULL, &_oops_do_mark_nmethods, required);
|
||||
guarantee(observed == required, "no races in this sequential code");
|
||||
if (TraceScavenge) { tty->print_cr("oops_do_marking_epilogue]"); }
|
||||
log_trace(gc, nmethod)("oops_do_marking_epilogue");
|
||||
}
|
||||
|
||||
class DetectScavengeRoot: public OopClosure {
|
||||
bool _detected_scavenge_root;
|
||||
nmethod* _print_nm;
|
||||
public:
|
||||
DetectScavengeRoot() : _detected_scavenge_root(false)
|
||||
{ NOT_PRODUCT(_print_nm = NULL); }
|
||||
DetectScavengeRoot(nmethod* nm) : _detected_scavenge_root(false), _print_nm(nm) {}
|
||||
|
||||
bool detected_scavenge_root() { return _detected_scavenge_root; }
|
||||
virtual void do_oop(oop* p) {
|
||||
if ((*p) != NULL && Universe::heap()->is_scavengable(*p)) {
|
||||
@ -1699,21 +1681,25 @@ public:
|
||||
virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
|
||||
|
||||
#ifndef PRODUCT
|
||||
nmethod* _print_nm;
|
||||
void maybe_print(oop* p) {
|
||||
if (_print_nm == NULL) return;
|
||||
if (!_detected_scavenge_root) _print_nm->print_on(tty, "new scavenge root");
|
||||
tty->print_cr("" PTR_FORMAT "[offset=%d] detected scavengable oop " PTR_FORMAT " (found at " PTR_FORMAT ")",
|
||||
p2i(_print_nm), (int)((intptr_t)p - (intptr_t)_print_nm),
|
||||
p2i(*p), p2i(p));
|
||||
(*p)->print();
|
||||
LogTarget(Trace, gc, nmethod) lt;
|
||||
if (lt.is_enabled()) {
|
||||
LogStream ls(lt);
|
||||
if (!_detected_scavenge_root) {
|
||||
CompileTask::print(&ls, _print_nm, "new scavenge root", /*short_form:*/ true);
|
||||
}
|
||||
ls.print("" PTR_FORMAT "[offset=%d] detected scavengable oop " PTR_FORMAT " (found at " PTR_FORMAT ") ",
|
||||
p2i(_print_nm), (int)((intptr_t)p - (intptr_t)_print_nm),
|
||||
p2i(*p), p2i(p));
|
||||
(*p)->print_value_on(&ls);
|
||||
ls.cr();
|
||||
}
|
||||
}
|
||||
#endif //PRODUCT
|
||||
};
|
||||
|
||||
bool nmethod::detect_scavenge_root_oops() {
|
||||
DetectScavengeRoot detect_scavenge_root;
|
||||
NOT_PRODUCT(if (TraceScavenge) detect_scavenge_root._print_nm = this);
|
||||
DetectScavengeRoot detect_scavenge_root(this);
|
||||
oops_do(&detect_scavenge_root);
|
||||
return detect_scavenge_root.detected_scavenge_root();
|
||||
}
|
||||
|
@ -484,18 +484,18 @@ public:
|
||||
#endif
|
||||
|
||||
protected:
|
||||
virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred);
|
||||
virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive);
|
||||
#if INCLUDE_JVMCI
|
||||
// See comment for _jvmci_installed_code_triggers_unloading field.
|
||||
// Returns whether this nmethod was unloaded.
|
||||
virtual bool do_unloading_jvmci(bool unloading_occurred);
|
||||
virtual bool do_unloading_jvmci();
|
||||
#endif
|
||||
|
||||
private:
|
||||
bool do_unloading_scopes(BoolObjectClosure* is_alive, bool unloading_occurred);
|
||||
bool do_unloading_scopes(BoolObjectClosure* is_alive);
|
||||
// Unload a nmethod if the *root object is dead.
|
||||
bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
|
||||
bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred);
|
||||
bool can_unload(BoolObjectClosure* is_alive, oop* root);
|
||||
bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive);
|
||||
|
||||
public:
|
||||
void oops_do(OopClosure* f) { oops_do(f, false); }
|
||||
|
@ -3355,7 +3355,7 @@ private:
|
||||
add_to_postponed_list(nm);
|
||||
}
|
||||
|
||||
// Mark that this thread has been cleaned/unloaded.
|
||||
// Mark that this nmethod has been cleaned/unloaded.
|
||||
// After this call, it will be safe to ask if this nmethod was unloaded or not.
|
||||
nm->set_unloading_clock(CompiledMethod::global_unloading_clock());
|
||||
}
|
||||
|
@ -449,12 +449,6 @@ bool Method::init_method_counters(MethodCounters* counters) {
|
||||
return Atomic::replace_if_null(counters, &_method_counters);
|
||||
}
|
||||
|
||||
void Method::cleanup_inline_caches() {
|
||||
// The current system doesn't use inline caches in the interpreter
|
||||
// => nothing to do (keep this method around for future use)
|
||||
}
|
||||
|
||||
|
||||
int Method::extra_stack_words() {
|
||||
// not an inline function, to avoid a header dependency on Interpreter
|
||||
return extra_stack_entries() * Interpreter::stackElementSize;
|
||||
|
@ -904,9 +904,6 @@ class Method : public Metadata {
|
||||
return method_holder()->lookup_osr_nmethod(this, bci, level, match_level);
|
||||
}
|
||||
|
||||
// Inline cache support
|
||||
void cleanup_inline_caches();
|
||||
|
||||
// Find if klass for method is loaded
|
||||
bool is_klass_loaded_by_klass_index(int klass_index) const;
|
||||
bool is_klass_loaded(int refinfo_index, bool must_be_resolved = false) const;
|
||||
|
@ -553,6 +553,7 @@ static SpecialFlag const special_jvm_flags[] = {
|
||||
{ "CheckEndorsedAndExtDirs", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
|
||||
{ "DeferThrSuspendLoopCount", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
|
||||
{ "DeferPollingPageLoopCount", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
|
||||
{ "TraceScavenge", JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) },
|
||||
{ "PermSize", JDK_Version::undefined(), JDK_Version::jdk(8), JDK_Version::undefined() },
|
||||
{ "MaxPermSize", JDK_Version::undefined(), JDK_Version::jdk(8), JDK_Version::undefined() },
|
||||
{ "SharedReadWriteSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() },
|
||||
|
@ -1055,9 +1055,6 @@ define_pd_global(uint64_t,MaxRAM, 1ULL*G);
|
||||
develop(bool, TraceFinalizerRegistration, false, \
|
||||
"Trace registration of final references") \
|
||||
\
|
||||
notproduct(bool, TraceScavenge, false, \
|
||||
"Trace scavenge") \
|
||||
\
|
||||
product(bool, IgnoreEmptyClassPaths, false, \
|
||||
"Ignore empty path elements in -classpath") \
|
||||
\
|
||||
|
Loading…
x
Reference in New Issue
Block a user