8151956: Support non-continuous CodeBlobs in HotSpot

Reviewed-by: iveresov, thartmann, simonis
This commit is contained in:
Rickard Bäckman 2016-04-26 10:28:51 +02:00
parent 67ff4391ec
commit b853eb7f5c
100 changed files with 2486 additions and 1868 deletions

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -221,21 +221,19 @@ bool frame::safe_for_sender(JavaThread *thread) {
return jcw_safe; return jcw_safe;
} }
if (sender_blob->is_nmethod()) { CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
nmethod* nm = sender_blob->as_nmethod_or_null(); if (nm != NULL) {
if (nm != NULL) { if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) || nm->method()->is_method_handle_intrinsic()) {
nm->method()->is_method_handle_intrinsic()) { return false;
return false; }
}
}
} }
// If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
// because the return address counts against the callee's frame. // because the return address counts against the callee's frame.
if (sender_blob->frame_size() <= 0) { if (sender_blob->frame_size() <= 0) {
assert(!sender_blob->is_nmethod(), "should count return address at least"); assert(!sender_blob->is_compiled(), "should count return address at least");
return false; return false;
} }
@ -244,7 +242,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// should not be anything but the call stub (already covered), the interpreter (already covered) // should not be anything but the call stub (already covered), the interpreter (already covered)
// or an nmethod. // or an nmethod.
if (!sender_blob->is_nmethod()) { if (!sender_blob->is_compiled()) {
return false; return false;
} }
@ -286,7 +284,7 @@ void frame::patch_pc(Thread* thread, address pc) {
assert(_pc == *pc_addr || pc == *pc_addr, "must be"); assert(_pc == *pc_addr || pc == *pc_addr, "must be");
*pc_addr = pc; *pc_addr = pc;
_cb = CodeCache::find_blob(pc); _cb = CodeCache::find_blob(pc);
address original_pc = nmethod::get_deopt_original_pc(this); address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != NULL) {
assert(original_pc == _pc, "expected original PC to be stored before patching"); assert(original_pc == _pc, "expected original PC to be stored before patching");
_deopt_state = is_deoptimized; _deopt_state = is_deoptimized;
@ -371,7 +369,7 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
// Verifies the calculated original PC of a deoptimization PC for the // Verifies the calculated original PC of a deoptimization PC for the
// given unextended SP. // given unextended SP.
#ifdef ASSERT #ifdef ASSERT
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) { void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) {
frame fr; frame fr;
// This is ugly but it's better than to change {get,set}_original_pc // This is ugly but it's better than to change {get,set}_original_pc
@ -391,12 +389,14 @@ void frame::adjust_unextended_sp() {
// as any other call site. Therefore, no special action is needed when we are // as any other call site. Therefore, no special action is needed when we are
// returning to any of these call sites. // returning to any of these call sites.
nmethod* sender_nm = (_cb == NULL) ? NULL : _cb->as_nmethod_or_null(); if (_cb != NULL) {
if (sender_nm != NULL) { CompiledMethod* sender_cm = _cb->as_compiled_method_or_null();
// If the sender PC is a deoptimization point, get the original PC. if (sender_cm != NULL) {
if (sender_nm->is_deopt_entry(_pc) || // If the sender PC is a deoptimization point, get the original PC.
sender_nm->is_deopt_mh_entry(_pc)) { if (sender_cm->is_deopt_entry(_pc) ||
DEBUG_ONLY(verify_deopt_original_pc(sender_nm, _unextended_sp)); sender_cm->is_deopt_mh_entry(_pc)) {
DEBUG_ONLY(verify_deopt_original_pc(sender_cm, _unextended_sp));
}
} }
} }
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -124,7 +124,7 @@
#ifdef ASSERT #ifdef ASSERT
// Used in frame::sender_for_{interpreter,compiled}_frame // Used in frame::sender_for_{interpreter,compiled}_frame
static void verify_deopt_original_pc( nmethod* nm, intptr_t* unextended_sp); static void verify_deopt_original_pc( CompiledMethod* nm, intptr_t* unextended_sp);
#endif #endif
public: public:

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -55,7 +55,7 @@ inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
_cb = CodeCache::find_blob(pc); _cb = CodeCache::find_blob(pc);
adjust_unextended_sp(); adjust_unextended_sp();
address original_pc = nmethod::get_deopt_original_pc(this); address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != NULL) {
_pc = original_pc; _pc = original_pc;
_deopt_state = is_deoptimized; _deopt_state = is_deoptimized;
@ -79,10 +79,10 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
_cb = CodeCache::find_blob(pc); _cb = CodeCache::find_blob(pc);
adjust_unextended_sp(); adjust_unextended_sp();
address original_pc = nmethod::get_deopt_original_pc(this); address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != NULL) {
_pc = original_pc; _pc = original_pc;
assert(((nmethod*)_cb)->insts_contains(_pc), "original PC must be in nmethod"); assert(((CompiledMethod*)_cb)->insts_contains(_pc), "original PC must be in CompiledMethod");
_deopt_state = is_deoptimized; _deopt_state = is_deoptimized;
} else { } else {
_deopt_state = not_deoptimized; _deopt_state = not_deoptimized;
@ -111,7 +111,7 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
_cb = CodeCache::find_blob(_pc); _cb = CodeCache::find_blob(_pc);
adjust_unextended_sp(); adjust_unextended_sp();
address original_pc = nmethod::get_deopt_original_pc(this); address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != NULL) {
_pc = original_pc; _pc = original_pc;
_deopt_state = is_deoptimized; _deopt_state = is_deoptimized;

View File

@ -99,7 +99,7 @@ address NativeCall::get_trampoline() {
address bl_destination address bl_destination
= MacroAssembler::pd_call_destination(call_addr); = MacroAssembler::pd_call_destination(call_addr);
if (code->content_contains(bl_destination) && if (code->contains(bl_destination) &&
is_NativeCallTrampolineStub_at(bl_destination)) is_NativeCallTrampolineStub_at(bl_destination))
return bl_destination; return bl_destination;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved. * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -40,7 +40,7 @@ inline void frame::find_codeblob_and_set_pc_and_deopt_state(address pc) {
_fp = (intptr_t*)own_abi()->callers_sp; _fp = (intptr_t*)own_abi()->callers_sp;
address original_pc = nmethod::get_deopt_original_pc(this); address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != NULL) {
_pc = original_pc; _pc = original_pc;
_deopt_state = is_deoptimized; _deopt_state = is_deoptimized;

View File

@ -137,7 +137,7 @@ address NativeCall::get_trampoline() {
return NULL; return NULL;
address bl_destination = Assembler::bxx_destination(call_addr); address bl_destination = Assembler::bxx_destination(call_addr);
if (code->content_contains(bl_destination) && if (code->contains(bl_destination) &&
is_NativeCallTrampolineStub_at(bl_destination)) is_NativeCallTrampolineStub_at(bl_destination))
return bl_destination; return bl_destination;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -212,7 +212,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// ok. adapter blobs never have a frame complete and are never ok. // ok. adapter blobs never have a frame complete and are never ok.
if (!_cb->is_frame_complete_at(_pc)) { if (!_cb->is_frame_complete_at(_pc)) {
if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) { if (_cb->is_compiled() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
return false; return false;
} }
} }
@ -304,7 +304,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// because you must allocate window space // because you must allocate window space
if (sender_blob->frame_size() <= 0) { if (sender_blob->frame_size() <= 0) {
assert(!sender_blob->is_nmethod(), "should count return address at least"); assert(!sender_blob->is_compiled(), "should count return address at least");
return false; return false;
} }
@ -315,7 +315,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// the stack unwalkable. pd_get_top_frame_for_signal_handler tries to recover from this by unwinding // the stack unwalkable. pd_get_top_frame_for_signal_handler tries to recover from this by unwinding
// that initial frame and retrying. // that initial frame and retrying.
if (!sender_blob->is_nmethod()) { if (!sender_blob->is_compiled()) {
return false; return false;
} }
@ -358,9 +358,9 @@ void frame::init(intptr_t* sp, address pc, CodeBlob* cb) {
} }
_deopt_state = unknown; _deopt_state = unknown;
#ifdef ASSERT #ifdef ASSERT
if ( _cb != NULL && _cb->is_nmethod()) { if ( _cb != NULL && _cb->is_compiled()) {
// Without a valid unextended_sp() we can't convert the pc to "original" // Without a valid unextended_sp() we can't convert the pc to "original"
assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant broken"); assert(!((CompiledMethod*)_cb)->is_deopt_pc(_pc), "invariant broken");
} }
#endif // ASSERT #endif // ASSERT
} }
@ -393,7 +393,7 @@ frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpret
// Check for MethodHandle call sites. // Check for MethodHandle call sites.
if (_cb != NULL) { if (_cb != NULL) {
nmethod* nm = _cb->as_nmethod_or_null(); CompiledMethod* nm = _cb->as_compiled_method_or_null();
if (nm != NULL) { if (nm != NULL) {
if (nm->is_deopt_mh_entry(_pc) || nm->is_method_handle_return(_pc)) { if (nm->is_deopt_mh_entry(_pc) || nm->is_method_handle_return(_pc)) {
_sp_adjustment_by_callee = (intptr_t*) ((intptr_t) sp[L7_mh_SP_save->sp_offset_in_saved_window()] + STACK_BIAS) - sp; _sp_adjustment_by_callee = (intptr_t*) ((intptr_t) sp[L7_mh_SP_save->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
@ -413,7 +413,7 @@ frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpret
// this lookup as get_deopt_original_pc() needs a correct value for // this lookup as get_deopt_original_pc() needs a correct value for
// unextended_sp() which uses _sp_adjustment_by_callee. // unextended_sp() which uses _sp_adjustment_by_callee.
if (_pc != NULL) { if (_pc != NULL) {
address original_pc = nmethod::get_deopt_original_pc(this); address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != NULL) {
_pc = original_pc; _pc = original_pc;
_deopt_state = is_deoptimized; _deopt_state = is_deoptimized;
@ -547,7 +547,7 @@ void frame::patch_pc(Thread* thread, address pc) {
_cb = CodeCache::find_blob(pc); _cb = CodeCache::find_blob(pc);
*O7_addr() = pc - pc_return_offset; *O7_addr() = pc - pc_return_offset;
_cb = CodeCache::find_blob(_pc); _cb = CodeCache::find_blob(_pc);
address original_pc = nmethod::get_deopt_original_pc(this); address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != NULL) {
assert(original_pc == _pc, "expected original to be stored before patching"); assert(original_pc == _pc, "expected original to be stored before patching");
_deopt_state = is_deoptimized; _deopt_state = is_deoptimized;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -95,7 +95,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// ok. adapter blobs never have a frame complete and are never ok. // ok. adapter blobs never have a frame complete and are never ok.
if (!_cb->is_frame_complete_at(_pc)) { if (!_cb->is_frame_complete_at(_pc)) {
if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) { if (_cb->is_compiled() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
return false; return false;
} }
} }
@ -220,13 +220,11 @@ bool frame::safe_for_sender(JavaThread *thread) {
return jcw_safe; return jcw_safe;
} }
if (sender_blob->is_nmethod()) { CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
nmethod* nm = sender_blob->as_nmethod_or_null(); if (nm != NULL) {
if (nm != NULL) { if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) || nm->method()->is_method_handle_intrinsic()) {
nm->method()->is_method_handle_intrinsic()) { return false;
return false;
}
} }
} }
@ -234,7 +232,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// because the return address counts against the callee's frame. // because the return address counts against the callee's frame.
if (sender_blob->frame_size() <= 0) { if (sender_blob->frame_size() <= 0) {
assert(!sender_blob->is_nmethod(), "should count return address at least"); assert(!sender_blob->is_compiled(), "should count return address at least");
return false; return false;
} }
@ -243,7 +241,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// should not be anything but the call stub (already covered), the interpreter (already covered) // should not be anything but the call stub (already covered), the interpreter (already covered)
// or an nmethod. // or an nmethod.
if (!sender_blob->is_nmethod()) { if (!sender_blob->is_compiled()) {
return false; return false;
} }
@ -286,7 +284,7 @@ void frame::patch_pc(Thread* thread, address pc) {
assert(_pc == *pc_addr || pc == *pc_addr, "must be"); assert(_pc == *pc_addr || pc == *pc_addr, "must be");
*pc_addr = pc; *pc_addr = pc;
_cb = CodeCache::find_blob(pc); _cb = CodeCache::find_blob(pc);
address original_pc = nmethod::get_deopt_original_pc(this); address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != NULL) {
assert(original_pc == _pc, "expected original PC to be stored before patching"); assert(original_pc == _pc, "expected original PC to be stored before patching");
_deopt_state = is_deoptimized; _deopt_state = is_deoptimized;
@ -372,7 +370,7 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
// Verifies the calculated original PC of a deoptimization PC for the // Verifies the calculated original PC of a deoptimization PC for the
// given unextended SP. // given unextended SP.
#ifdef ASSERT #ifdef ASSERT
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) { void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) {
frame fr; frame fr;
// This is ugly but it's better than to change {get,set}_original_pc // This is ugly but it's better than to change {get,set}_original_pc
@ -381,7 +379,7 @@ void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) {
fr._unextended_sp = unextended_sp; fr._unextended_sp = unextended_sp;
address original_pc = nm->get_original_pc(&fr); address original_pc = nm->get_original_pc(&fr);
assert(nm->insts_contains(original_pc), "original PC must be in nmethod"); assert(nm->insts_contains(original_pc), "original PC must be in CompiledMethod");
} }
#endif #endif
@ -392,12 +390,14 @@ void frame::adjust_unextended_sp() {
// as any other call site. Therefore, no special action is needed when we are // as any other call site. Therefore, no special action is needed when we are
// returning to any of these call sites. // returning to any of these call sites.
nmethod* sender_nm = (_cb == NULL) ? NULL : _cb->as_nmethod_or_null(); if (_cb != NULL) {
if (sender_nm != NULL) { CompiledMethod* sender_cm = _cb->as_compiled_method_or_null();
// If the sender PC is a deoptimization point, get the original PC. if (sender_cm != NULL) {
if (sender_nm->is_deopt_entry(_pc) || // If the sender PC is a deoptimization point, get the original PC.
sender_nm->is_deopt_mh_entry(_pc)) { if (sender_cm->is_deopt_entry(_pc) ||
DEBUG_ONLY(verify_deopt_original_pc(sender_nm, _unextended_sp)); sender_cm->is_deopt_mh_entry(_pc)) {
DEBUG_ONLY(verify_deopt_original_pc(sender_cm, _unextended_sp));
}
} }
} }
} }

View File

@ -124,7 +124,7 @@
#ifdef ASSERT #ifdef ASSERT
// Used in frame::sender_for_{interpreter,compiled}_frame // Used in frame::sender_for_{interpreter,compiled}_frame
static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp); static void verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp);
#endif #endif
public: public:

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -50,7 +50,7 @@ inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
_cb = CodeCache::find_blob(pc); _cb = CodeCache::find_blob(pc);
adjust_unextended_sp(); adjust_unextended_sp();
address original_pc = nmethod::get_deopt_original_pc(this); address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != NULL) {
_pc = original_pc; _pc = original_pc;
_deopt_state = is_deoptimized; _deopt_state = is_deoptimized;
@ -72,10 +72,10 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
_cb = CodeCache::find_blob(pc); _cb = CodeCache::find_blob(pc);
adjust_unextended_sp(); adjust_unextended_sp();
address original_pc = nmethod::get_deopt_original_pc(this); address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != NULL) {
_pc = original_pc; _pc = original_pc;
assert(((nmethod*)_cb)->insts_contains(_pc), "original PC must be in nmethod"); assert(((CompiledMethod*)_cb)->insts_contains(_pc), "original PC must be in CompiledMethod");
_deopt_state = is_deoptimized; _deopt_state = is_deoptimized;
} else { } else {
if (_cb->is_deoptimization_stub()) { if (_cb->is_deoptimization_stub()) {
@ -106,7 +106,7 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
_cb = CodeCache::find_blob(_pc); _cb = CodeCache::find_blob(_pc);
adjust_unextended_sp(); adjust_unextended_sp();
address original_pc = nmethod::get_deopt_original_pc(this); address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != NULL) {
_pc = original_pc; _pc = original_pc;
_deopt_state = is_deoptimized; _deopt_state = is_deoptimized;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc. * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -59,7 +59,7 @@ inline frame::frame(ZeroFrame* zf, intptr_t* sp) {
case ZeroFrame::SHARK_FRAME: { case ZeroFrame::SHARK_FRAME: {
_pc = zero_sharkframe()->pc(); _pc = zero_sharkframe()->pc();
_cb = CodeCache::find_blob_unsafe(pc()); _cb = CodeCache::find_blob_unsafe(pc());
address original_pc = nmethod::get_deopt_original_pc(this); address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != NULL) {
_pc = original_pc; _pc = original_pc;
_deopt_state = is_deoptimized; _deopt_state = is_deoptimized;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@ import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*; import sun.jvm.hotspot.types.*;
public class AdapterBlob extends CodeBlob { public class AdapterBlob extends RuntimeBlob {
static { static {
VM.registerVMInitializedObserver(new Observer() { VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) { public void update(Observable o, Object data) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@ import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*; import sun.jvm.hotspot.types.*;
public class BufferBlob extends CodeBlob { public class BufferBlob extends RuntimeBlob {
static { static {
VM.registerVMInitializedObserver(new Observer() { VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) { public void update(Observable o, Object data) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -19,84 +19,142 @@
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any * or visit www.oracle.com if you need additional information or have any
* questions. * questions.
*
*/ */
package sun.jvm.hotspot.code; package sun.jvm.hotspot.code;
import java.io.*; import sun.jvm.hotspot.compiler.ImmutableOopMap;
import java.util.*; import sun.jvm.hotspot.compiler.ImmutableOopMapSet;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObject;
import sun.jvm.hotspot.types.AddressField;
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
import sun.jvm.hotspot.utilities.Assert;
import sun.jvm.hotspot.compiler.*; import java.io.PrintStream;
import sun.jvm.hotspot.debugger.*; import java.util.Observable;
import sun.jvm.hotspot.runtime.*; import java.util.Observer;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
public class CodeBlob extends VMObject { public class CodeBlob extends VMObject {
private static AddressField nameField; private static AddressField nameField;
private static CIntegerField sizeField; private static CIntegerField sizeField;
private static CIntegerField headerSizeField; private static CIntegerField headerSizeField;
private static CIntegerField relocationSizeField; private static AddressField contentBeginField;
private static CIntegerField contentOffsetField; private static AddressField codeBeginField;
private static CIntegerField codeOffsetField; private static AddressField codeEndField;
private static AddressField dataEndField;
private static CIntegerField frameCompleteOffsetField; private static CIntegerField frameCompleteOffsetField;
private static CIntegerField dataOffsetField; private static CIntegerField dataOffsetField;
private static CIntegerField frameSizeField; private static CIntegerField frameSizeField;
private static AddressField oopMapsField; private static AddressField oopMapsField;
// Only used by server compiler on x86; computed over in SA rather public CodeBlob(Address addr) {
// than relying on computation in target VM super(addr);
private static final int NOT_YET_COMPUTED = -2;
private static final int UNDEFINED = -1;
private int linkOffset = NOT_YET_COMPUTED;
private static int matcherInterpreterFramePointerReg;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
} }
protected static int matcherInterpreterFramePointerReg;
private static void initialize(TypeDataBase db) { private static void initialize(TypeDataBase db) {
Type type = db.lookupType("CodeBlob"); Type type = db.lookupType("CodeBlob");
nameField = type.getAddressField("_name"); nameField = type.getAddressField("_name");
sizeField = type.getCIntegerField("_size"); sizeField = type.getCIntegerField("_size");
headerSizeField = type.getCIntegerField("_header_size"); headerSizeField = type.getCIntegerField("_header_size");
relocationSizeField = type.getCIntegerField("_relocation_size");
frameCompleteOffsetField = type.getCIntegerField("_frame_complete_offset"); frameCompleteOffsetField = type.getCIntegerField("_frame_complete_offset");
contentOffsetField = type.getCIntegerField("_content_offset"); contentBeginField = type.getAddressField("_content_begin");
codeOffsetField = type.getCIntegerField("_code_offset"); codeBeginField = type.getAddressField("_code_begin");
codeEndField = type.getAddressField("_code_end");
dataEndField = type.getAddressField("_data_end");
dataOffsetField = type.getCIntegerField("_data_offset"); dataOffsetField = type.getCIntegerField("_data_offset");
frameSizeField = type.getCIntegerField("_frame_size"); frameSizeField = type.getCIntegerField("_frame_size");
oopMapsField = type.getAddressField("_oop_maps"); oopMapsField = type.getAddressField("_oop_maps");
if (VM.getVM().isServerCompiler()) { if (VM.getVM().isServerCompiler()) {
matcherInterpreterFramePointerReg = matcherInterpreterFramePointerReg =
db.lookupIntConstant("Matcher::interpreter_frame_pointer_reg").intValue(); db.lookupIntConstant("Matcher::interpreter_frame_pointer_reg").intValue();
} }
} }
public CodeBlob(Address addr) { static {
super(addr); VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
} }
public Address headerBegin() { return getAddress(); }
public Address headerEnd() { return getAddress().addOffsetTo(getHeaderSize()); }
public Address contentBegin() { return contentBeginField.getValue(addr); }
public Address contentEnd() { return headerBegin().addOffsetTo(getDataOffset()); }
public Address codeBegin() { return codeBeginField.getValue(addr); }
public Address codeEnd() { return codeEndField.getValue(addr); }
public Address dataBegin() { return headerBegin().addOffsetTo(getDataOffset()); }
public Address dataEnd() { return dataEndField.getValue(addr); }
public long getFrameCompleteOffset() { return frameCompleteOffsetField.getValue(addr); }
public int getDataOffset() { return (int) dataOffsetField.getValue(addr); }
// Sizes
public int getSize() { return (int) sizeField.getValue(addr); }
public int getHeaderSize() { return (int) headerSizeField.getValue(addr); }
public long getFrameSizeWords() {
return (int) frameSizeField.getValue(addr);
}
public String getName() {
return getName();
}
/** OopMap for frame; can return null if none available */
public ImmutableOopMapSet getOopMaps() {
Address value = oopMapsField.getValue(addr);
if (value == null) {
return null;
}
return new ImmutableOopMapSet(value);
}
// Typing // Typing
public boolean isBufferBlob() { return false; } public boolean isBufferBlob() { return false; }
public boolean isAOT() { return false; }
public boolean isCompiled() { return false; }
public boolean isNMethod() { return false; } public boolean isNMethod() { return false; }
public boolean isRuntimeStub() { return false; } public boolean isRuntimeStub() { return false; }
public boolean isDeoptimizationStub() { return false; } public boolean isDeoptimizationStub() { return false; }
public boolean isUncommonTrapStub() { return false; } public boolean isUncommonTrapStub() { return false; }
public boolean isExceptionStub() { return false; } public boolean isExceptionStub() { return false; }
public boolean isSafepointStub() { return false; } public boolean isSafepointStub() { return false; }
public boolean isAdapterBlob() { return false; } public boolean isAdapterBlob() { return false; }
// Fine grain nmethod support: isNmethod() == isJavaMethod() || isNativeMethod() || isOSRMethod() // Fine grain nmethod support: isNmethod() == isJavaMethod() || isNativeMethod() || isOSRMethod()
public boolean isJavaMethod() { return false; } public boolean isJavaMethod() { return false; }
public boolean isNativeMethod() { return false; } public boolean isNativeMethod() { return false; }
/** On-Stack Replacement method */ /** On-Stack Replacement method */
public boolean isOSRMethod() { return false; } public boolean isOSRMethod() { return false; }
@ -105,81 +163,32 @@ public class CodeBlob extends VMObject {
return null; return null;
} }
// Boundaries
public Address headerBegin() {
return addr;
}
public Address headerEnd() {
return addr.addOffsetTo(headerSizeField.getValue(addr));
}
// FIXME: add RelocInfo
// public RelocInfo relocationBegin();
// public RelocInfo relocationEnd();
public Address contentBegin() {
return headerBegin().addOffsetTo(contentOffsetField.getValue(addr));
}
public Address contentEnd() {
return headerBegin().addOffsetTo(dataOffsetField.getValue(addr));
}
public Address codeBegin() {
return headerBegin().addOffsetTo(contentOffsetField.getValue(addr));
}
public Address codeEnd() {
return headerBegin().addOffsetTo(dataOffsetField.getValue(addr));
}
public Address dataBegin() {
return headerBegin().addOffsetTo(dataOffsetField.getValue(addr));
}
public Address dataEnd() {
return headerBegin().addOffsetTo(sizeField.getValue(addr));
}
// Offsets
public int getRelocationOffset() { return (int) headerSizeField .getValue(addr); }
public int getContentOffset() { return (int) contentOffsetField.getValue(addr); }
public int getCodeOffset() { return (int) codeOffsetField .getValue(addr); }
public int getDataOffset() { return (int) dataOffsetField .getValue(addr); }
// Sizes
public int getSize() { return (int) sizeField .getValue(addr); }
public int getHeaderSize() { return (int) headerSizeField.getValue(addr); }
// FIXME: add getRelocationSize() // FIXME: add getRelocationSize()
public int getContentSize() { return (int) contentEnd().minus(contentBegin()); } public int getContentSize() { return (int) contentEnd().minus(contentBegin()); }
public int getCodeSize() { return (int) codeEnd() .minus(codeBegin()); } public int getCodeSize() { return (int) codeEnd() .minus(codeBegin()); }
public int getDataSize() { return (int) dataEnd() .minus(dataBegin()); } public int getDataSize() { return (int) dataEnd() .minus(dataBegin()); }
// Containment // Containment
public boolean blobContains(Address addr) { return headerBegin() .lessThanOrEqual(addr) && dataEnd() .greaterThan(addr); } public boolean blobContains(Address addr) { return headerBegin() .lessThanOrEqual(addr) && dataEnd() .greaterThan(addr); }
// FIXME: add relocationContains // FIXME: add relocationContains
public boolean contentContains(Address addr) { return contentBegin().lessThanOrEqual(addr) && contentEnd().greaterThan(addr); } public boolean contentContains(Address addr) { return contentBegin().lessThanOrEqual(addr) && contentEnd().greaterThan(addr); }
public boolean codeContains(Address addr) { return codeBegin() .lessThanOrEqual(addr) && codeEnd() .greaterThan(addr); } public boolean codeContains(Address addr) { return codeBegin() .lessThanOrEqual(addr) && codeEnd() .greaterThan(addr); }
public boolean dataContains(Address addr) { return dataBegin() .lessThanOrEqual(addr) && dataEnd() .greaterThan(addr); } public boolean dataContains(Address addr) { return dataBegin() .lessThanOrEqual(addr) && dataEnd() .greaterThan(addr); }
public boolean contains(Address addr) { return contentContains(addr); } public boolean contains(Address addr) { return contentContains(addr); }
public boolean isFrameCompleteAt(Address a) { return codeContains(a) && a.minus(codeBegin()) >= frameCompleteOffsetField.getValue(addr); }
public boolean isFrameCompleteAt(Address a) { return codeContains(a) && a.minus(codeBegin()) >= getFrameCompleteOffset(); }
// Reclamation support (really only used by the nmethods, but in order to get asserts to work // Reclamation support (really only used by the nmethods, but in order to get asserts to work
// in the CodeCache they are defined virtual here) // in the CodeCache they are defined virtual here)
public boolean isZombie() { return false; } public boolean isZombie() { return false; }
public boolean isLockedByVM() { return false; }
/** OopMap for frame; can return null if none available */ public boolean isLockedByVM() { return false; }
public ImmutableOopMapSet getOopMaps() {
Address oopMapsAddr = oopMapsField.getValue(addr);
if (oopMapsAddr == null) {
return null;
}
return new ImmutableOopMapSet(oopMapsAddr);
}
// FIXME: not yet implementable
// void set_oop_maps(ImmutableOopMapSet* p);
public ImmutableOopMap getOopMapForReturnAddress(Address returnAddress, boolean debugging) { public ImmutableOopMap getOopMapForReturnAddress(Address returnAddress, boolean debugging) {
Address pc = returnAddress; Address pc = returnAddress;
@ -189,25 +198,14 @@ public class CodeBlob extends VMObject {
return getOopMaps().findMapAtOffset(pc.minus(codeBegin()), debugging); return getOopMaps().findMapAtOffset(pc.minus(codeBegin()), debugging);
} }
// virtual void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, void f(oop*)) { ShouldNotReachHere(); }
// FIXME;
/** NOTE: this returns a size in BYTES in this system! */ /** NOTE: this returns a size in BYTES in this system! */
public long getFrameSize() { public long getFrameSize() {
return VM.getVM().getAddressSize() * frameSizeField.getValue(addr); return VM.getVM().getAddressSize() * getFrameSizeWords();
} }
// Returns true, if the next frame is responsible for GC'ing oops passed as arguments // Returns true, if the next frame is responsible for GC'ing oops passed as arguments
public boolean callerMustGCArguments() { return false; } public boolean callerMustGCArguments() { return false; }
public String getName() {
return CStringUtilities.getString(nameField.getValue(addr));
}
// FIXME: NOT FINISHED
// FIXME: add more accessors
public void print() { public void print() {
printOn(System.out); printOn(System.out);
} }

View File

@ -0,0 +1,74 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package sun.jvm.hotspot.code;
import java.util.*;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
public abstract class CompiledMethod extends CodeBlob {
private static AddressField methodField;
private static AddressField deoptHandlerBeginField;
private static AddressField deoptMhHandlerBeginField;
private static AddressField scopesDataBeginField;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static void initialize(TypeDataBase db) {
Type type = db.lookupType("CompiledMethod");
methodField = type.getAddressField("_method");
deoptHandlerBeginField = type.getAddressField("_deopt_handler_begin");
deoptMhHandlerBeginField = type.getAddressField("_deopt_mh_handler_begin");
scopesDataBeginField = type.getAddressField("_scopes_data_begin");
}
public CompiledMethod(Address addr) {
super(addr);
}
public Method getMethod() {
return (Method)Metadata.instantiateWrapperFor(methodField.getValue(addr));
}
public Address deoptHandlerBegin() { return deoptHandlerBeginField.getValue(addr); }
public Address deoptMhHandlerBegin() { return deoptMhHandlerBeginField.getValue(addr); }
public Address scopesDataBegin() { return scopesDataBeginField.getValue(addr); }
public static int getMethodOffset() { return (int) methodField.getOffset(); }
@Override
public boolean isCompiled() {
return true;
}
}

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -27,15 +27,13 @@ package sun.jvm.hotspot.code;
import java.io.*; import java.io.*;
import java.util.*; import java.util.*;
import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.memory.*;
import sun.jvm.hotspot.oops.*; import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*; import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*; import sun.jvm.hotspot.utilities.*;
public class NMethod extends CodeBlob { public class NMethod extends CompiledMethod {
private static long pcDescSize; private static long pcDescSize;
private static AddressField methodField;
/** != InvocationEntryBci if this nmethod is an on-stack replacement method */ /** != InvocationEntryBci if this nmethod is an on-stack replacement method */
private static CIntegerField entryBCIField; private static CIntegerField entryBCIField;
/** To support simple linked-list chaining of nmethods */ /** To support simple linked-list chaining of nmethods */
@ -45,13 +43,10 @@ public class NMethod extends CodeBlob {
/** Offsets for different nmethod parts */ /** Offsets for different nmethod parts */
private static CIntegerField exceptionOffsetField; private static CIntegerField exceptionOffsetField;
private static CIntegerField deoptOffsetField;
private static CIntegerField deoptMhOffsetField;
private static CIntegerField origPCOffsetField; private static CIntegerField origPCOffsetField;
private static CIntegerField stubOffsetField; private static CIntegerField stubOffsetField;
private static CIntegerField oopsOffsetField; private static CIntegerField oopsOffsetField;
private static CIntegerField metadataOffsetField; private static CIntegerField metadataOffsetField;
private static CIntegerField scopesDataOffsetField;
private static CIntegerField scopesPCsOffsetField; private static CIntegerField scopesPCsOffsetField;
private static CIntegerField dependenciesOffsetField; private static CIntegerField dependenciesOffsetField;
private static CIntegerField handlerTableOffsetField; private static CIntegerField handlerTableOffsetField;
@ -91,20 +86,16 @@ public class NMethod extends CodeBlob {
private static void initialize(TypeDataBase db) { private static void initialize(TypeDataBase db) {
Type type = db.lookupType("nmethod"); Type type = db.lookupType("nmethod");
methodField = type.getAddressField("_method");
entryBCIField = type.getCIntegerField("_entry_bci"); entryBCIField = type.getCIntegerField("_entry_bci");
osrLinkField = type.getAddressField("_osr_link"); osrLinkField = type.getAddressField("_osr_link");
scavengeRootLinkField = type.getAddressField("_scavenge_root_link"); scavengeRootLinkField = type.getAddressField("_scavenge_root_link");
scavengeRootStateField = type.getJByteField("_scavenge_root_state"); scavengeRootStateField = type.getJByteField("_scavenge_root_state");
exceptionOffsetField = type.getCIntegerField("_exception_offset"); exceptionOffsetField = type.getCIntegerField("_exception_offset");
deoptOffsetField = type.getCIntegerField("_deoptimize_offset");
deoptMhOffsetField = type.getCIntegerField("_deoptimize_mh_offset");
origPCOffsetField = type.getCIntegerField("_orig_pc_offset"); origPCOffsetField = type.getCIntegerField("_orig_pc_offset");
stubOffsetField = type.getCIntegerField("_stub_offset"); stubOffsetField = type.getCIntegerField("_stub_offset");
oopsOffsetField = type.getCIntegerField("_oops_offset"); oopsOffsetField = type.getCIntegerField("_oops_offset");
metadataOffsetField = type.getCIntegerField("_metadata_offset"); metadataOffsetField = type.getCIntegerField("_metadata_offset");
scopesDataOffsetField = type.getCIntegerField("_scopes_data_offset");
scopesPCsOffsetField = type.getCIntegerField("_scopes_pcs_offset"); scopesPCsOffsetField = type.getCIntegerField("_scopes_pcs_offset");
dependenciesOffsetField = type.getCIntegerField("_dependencies_offset"); dependenciesOffsetField = type.getCIntegerField("_dependencies_offset");
handlerTableOffsetField = type.getCIntegerField("_handler_table_offset"); handlerTableOffsetField = type.getCIntegerField("_handler_table_offset");
@ -123,16 +114,11 @@ public class NMethod extends CodeBlob {
super(addr); super(addr);
} }
// Accessors // Accessors
public Address getAddress() { public Address getAddress() {
return addr; return addr;
} }
public Method getMethod() {
return (Method)Metadata.instantiateWrapperFor(methodField.getValue(addr));
}
// Type info // Type info
public boolean isNMethod() { return true; } public boolean isNMethod() { return true; }
public boolean isJavaMethod() { return !getMethod().isNative(); } public boolean isJavaMethod() { return !getMethod().isNative(); }
@ -145,15 +131,12 @@ public class NMethod extends CodeBlob {
public Address instsBegin() { return codeBegin(); } public Address instsBegin() { return codeBegin(); }
public Address instsEnd() { return headerBegin().addOffsetTo(getStubOffset()); } public Address instsEnd() { return headerBegin().addOffsetTo(getStubOffset()); }
public Address exceptionBegin() { return headerBegin().addOffsetTo(getExceptionOffset()); } public Address exceptionBegin() { return headerBegin().addOffsetTo(getExceptionOffset()); }
public Address deoptHandlerBegin() { return headerBegin().addOffsetTo(getDeoptOffset()); }
public Address deoptMhHandlerBegin() { return headerBegin().addOffsetTo(getDeoptMhOffset()); }
public Address stubBegin() { return headerBegin().addOffsetTo(getStubOffset()); } public Address stubBegin() { return headerBegin().addOffsetTo(getStubOffset()); }
public Address stubEnd() { return headerBegin().addOffsetTo(getOopsOffset()); } public Address stubEnd() { return headerBegin().addOffsetTo(getOopsOffset()); }
public Address oopsBegin() { return headerBegin().addOffsetTo(getOopsOffset()); } public Address oopsBegin() { return headerBegin().addOffsetTo(getOopsOffset()); }
public Address oopsEnd() { return headerBegin().addOffsetTo(getMetadataOffset()); } public Address oopsEnd() { return headerBegin().addOffsetTo(getMetadataOffset()); }
public Address metadataBegin() { return headerBegin().addOffsetTo(getMetadataOffset()); } public Address metadataBegin() { return headerBegin().addOffsetTo(getMetadataOffset()); }
public Address metadataEnd() { return headerBegin().addOffsetTo(getScopesDataOffset()); } public Address metadataEnd() { return scopesDataBegin(); }
public Address scopesDataBegin() { return headerBegin().addOffsetTo(getScopesDataOffset()); }
public Address scopesDataEnd() { return headerBegin().addOffsetTo(getScopesPCsOffset()); } public Address scopesDataEnd() { return headerBegin().addOffsetTo(getScopesPCsOffset()); }
public Address scopesPCsBegin() { return headerBegin().addOffsetTo(getScopesPCsOffset()); } public Address scopesPCsBegin() { return headerBegin().addOffsetTo(getScopesPCsOffset()); }
public Address scopesPCsEnd() { return headerBegin().addOffsetTo(getDependenciesOffset()); } public Address scopesPCsEnd() { return headerBegin().addOffsetTo(getDependenciesOffset()); }
@ -462,8 +445,6 @@ public class NMethod extends CodeBlob {
public static int getVerifiedEntryPointOffset() { return (int) verifiedEntryPointField.getOffset(); } public static int getVerifiedEntryPointOffset() { return (int) verifiedEntryPointField.getOffset(); }
public static int getOSREntryPointOffset() { return (int) osrEntryPointField.getOffset(); } public static int getOSREntryPointOffset() { return (int) osrEntryPointField.getOffset(); }
public static int getEntryBCIOffset() { return (int) entryBCIField.getOffset(); } public static int getEntryBCIOffset() { return (int) entryBCIField.getOffset(); }
/** NOTE: renamed from "method_offset_in_bytes" */
public static int getMethodOffset() { return (int) methodField.getOffset(); }
public void print() { public void print() {
printOn(System.out); printOn(System.out);
@ -541,12 +522,9 @@ public class NMethod extends CodeBlob {
private int getEntryBCI() { return (int) entryBCIField .getValue(addr); } private int getEntryBCI() { return (int) entryBCIField .getValue(addr); }
private int getExceptionOffset() { return (int) exceptionOffsetField .getValue(addr); } private int getExceptionOffset() { return (int) exceptionOffsetField .getValue(addr); }
private int getDeoptOffset() { return (int) deoptOffsetField .getValue(addr); }
private int getDeoptMhOffset() { return (int) deoptMhOffsetField .getValue(addr); }
private int getStubOffset() { return (int) stubOffsetField .getValue(addr); } private int getStubOffset() { return (int) stubOffsetField .getValue(addr); }
private int getOopsOffset() { return (int) oopsOffsetField .getValue(addr); } private int getOopsOffset() { return (int) oopsOffsetField .getValue(addr); }
private int getMetadataOffset() { return (int) metadataOffsetField .getValue(addr); } private int getMetadataOffset() { return (int) metadataOffsetField .getValue(addr); }
private int getScopesDataOffset() { return (int) scopesDataOffsetField .getValue(addr); }
private int getScopesPCsOffset() { return (int) scopesPCsOffsetField .getValue(addr); } private int getScopesPCsOffset() { return (int) scopesPCsOffsetField .getValue(addr); }
private int getDependenciesOffset() { return (int) dependenciesOffsetField.getValue(addr); } private int getDependenciesOffset() { return (int) dependenciesOffsetField.getValue(addr); }
private int getHandlerTableOffset() { return (int) handlerTableOffsetField.getValue(addr); } private int getHandlerTableOffset() { return (int) handlerTableOffsetField.getValue(addr); }

View File

@ -0,0 +1,58 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.code;
import java.util.*;
import sun.jvm.hotspot.compiler.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
public class RuntimeBlob extends CodeBlob {
// Only used by server compiler on x86; computed over in SA rather
// than relying on computation in target VM
private static final int NOT_YET_COMPUTED = -2;
private static final int UNDEFINED = -1;
private int linkOffset = NOT_YET_COMPUTED;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static void initialize(TypeDataBase db) {
Type type = db.lookupType("RuntimeBlob");
}
public RuntimeBlob(Address addr) {
super(addr);
}
}

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2004, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@ import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*; import sun.jvm.hotspot.types.*;
public class RuntimeStub extends CodeBlob { public class RuntimeStub extends RuntimeBlob {
private static CIntegerField callerMustGCArgumentsField; private static CIntegerField callerMustGCArgumentsField;
static { static {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@ import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*; import sun.jvm.hotspot.types.*;
public class SingletonBlob extends CodeBlob { public class SingletonBlob extends RuntimeBlob {
static { static {
VM.registerVMInitializedObserver(new Observer() { VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) { public void update(Observable o, Object data) {

View File

@ -1230,7 +1230,7 @@ public class HotSpotVMConfig {
@HotSpotVMField(name = "Method::_method_counters", type = "MethodCounters*", get = HotSpotVMField.Type.OFFSET) @Stable public int methodCountersOffset; @HotSpotVMField(name = "Method::_method_counters", type = "MethodCounters*", get = HotSpotVMField.Type.OFFSET) @Stable public int methodCountersOffset;
@HotSpotVMField(name = "Method::_method_data", type = "MethodData*", get = HotSpotVMField.Type.OFFSET) @Stable public int methodDataOffset; @HotSpotVMField(name = "Method::_method_data", type = "MethodData*", get = HotSpotVMField.Type.OFFSET) @Stable public int methodDataOffset;
@HotSpotVMField(name = "Method::_from_compiled_entry", type = "address", get = HotSpotVMField.Type.OFFSET) @Stable public int methodCompiledEntryOffset; @HotSpotVMField(name = "Method::_from_compiled_entry", type = "address", get = HotSpotVMField.Type.OFFSET) @Stable public int methodCompiledEntryOffset;
@HotSpotVMField(name = "Method::_code", type = "nmethod*", get = HotSpotVMField.Type.OFFSET) @Stable public int methodCodeOffset; @HotSpotVMField(name = "Method::_code", type = "CompiledMethod*", get = HotSpotVMField.Type.OFFSET) @Stable public int methodCodeOffset;
@HotSpotVMConstant(name = "Method::_jfr_towrite") @Stable public int methodFlagsJfrTowrite; @HotSpotVMConstant(name = "Method::_jfr_towrite") @Stable public int methodFlagsJfrTowrite;
@HotSpotVMConstant(name = "Method::_caller_sensitive") @Stable public int methodFlagsCallerSensitive; @HotSpotVMConstant(name = "Method::_caller_sensitive") @Stable public int methodFlagsCallerSensitive;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -256,8 +256,9 @@ int generateJvmOffsets(GEN_variant gen_variant) {
GEN_OFFS(CodeBlob, _name); GEN_OFFS(CodeBlob, _name);
GEN_OFFS(CodeBlob, _header_size); GEN_OFFS(CodeBlob, _header_size);
GEN_OFFS(CodeBlob, _content_offset); GEN_OFFS(CodeBlob, _content_begin);
GEN_OFFS(CodeBlob, _code_offset); GEN_OFFS(CodeBlob, _code_begin);
GEN_OFFS(CodeBlob, _code_end);
GEN_OFFS(CodeBlob, _data_offset); GEN_OFFS(CodeBlob, _data_offset);
GEN_OFFS(CodeBlob, _frame_size); GEN_OFFS(CodeBlob, _frame_size);
printf("\n"); printf("\n");
@ -265,10 +266,10 @@ int generateJvmOffsets(GEN_variant gen_variant) {
GEN_OFFS(nmethod, _method); GEN_OFFS(nmethod, _method);
GEN_OFFS(nmethod, _dependencies_offset); GEN_OFFS(nmethod, _dependencies_offset);
GEN_OFFS(nmethod, _metadata_offset); GEN_OFFS(nmethod, _metadata_offset);
GEN_OFFS(nmethod, _scopes_data_offset); GEN_OFFS(nmethod, _scopes_data_begin);
GEN_OFFS(nmethod, _scopes_pcs_offset); GEN_OFFS(nmethod, _scopes_pcs_offset);
GEN_OFFS(nmethod, _handler_table_offset); GEN_OFFS(nmethod, _handler_table_offset);
GEN_OFFS(nmethod, _deoptimize_offset); GEN_OFFS(nmethod, _deopt_handler_begin);
GEN_OFFS(nmethod, _orig_pc_offset); GEN_OFFS(nmethod, _orig_pc_offset);
GEN_OFFS(PcDesc, _pc_offset); GEN_OFFS(PcDesc, _pc_offset);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -124,10 +124,10 @@ typedef struct Nmethod_t {
uint64_t pc_desc; uint64_t pc_desc;
int32_t orig_pc_offset; /* _orig_pc_offset */ int32_t orig_pc_offset; /* _orig_pc_offset */
int32_t instrs_beg; /* _code_offset */ uint64_t instrs_beg; /* _code_offset */
int32_t instrs_end; uint64_t instrs_end;
int32_t deopt_beg; /* _deoptimize_offset */ uint64_t deopt_beg; /* _deoptimize_offset */
int32_t scopes_data_beg; /* _scopes_data_offset */ uint64_t scopes_data_beg; /* _scopes_data_offset */
int32_t scopes_data_end; int32_t scopes_data_end;
int32_t metadata_beg; /* _metadata_offset */ int32_t metadata_beg; /* _metadata_offset */
int32_t metadata_end; int32_t metadata_end;
@ -617,11 +617,12 @@ static int nmethod_info(Nmethod_t *N)
fprintf(stderr, "\t nmethod_info: BEGIN \n"); fprintf(stderr, "\t nmethod_info: BEGIN \n");
/* Instructions */ /* Instructions */
err = ps_pread(J->P, nm + OFFSET_CodeBlob_code_offset, &N->instrs_beg, SZ32); err = read_pointer(J, base + OFFSET_VMStructEntryaddress, &vmp->address);
err = read_pointer(J, nm + OFFSET_CodeBlob_code_begin, &N->instrs_beg);
CHECK_FAIL(err); CHECK_FAIL(err);
err = ps_pread(J->P, nm + OFFSET_CodeBlob_data_offset, &N->instrs_end, SZ32); err = read_pointer(J, nm + OFFSET_CodeBlob_code_end, &N->instrs_end);
CHECK_FAIL(err); CHECK_FAIL(err);
err = ps_pread(J->P, nm + OFFSET_nmethod_deoptimize_offset, &N->deopt_beg, SZ32); err = read_pointer(J, nm + OFFSET_nmethod_deopt_handler_begin, &N->deopt_beg);
CHECK_FAIL(err); CHECK_FAIL(err);
err = ps_pread(J->P, nm + OFFSET_nmethod_orig_pc_offset, &N->orig_pc_offset, SZ32); err = ps_pread(J->P, nm + OFFSET_nmethod_orig_pc_offset, &N->orig_pc_offset, SZ32);
CHECK_FAIL(err); CHECK_FAIL(err);
@ -639,7 +640,7 @@ static int nmethod_info(Nmethod_t *N)
CHECK_FAIL(err); CHECK_FAIL(err);
/* scopes_data */ /* scopes_data */
err = ps_pread(J->P, nm + OFFSET_nmethod_scopes_data_offset, &N->scopes_data_beg, SZ32); err = ps_pread(J->P, nm + OFFSET_nmethod_scopes_data_begin, &N->scopes_data_beg, POINTER_SIZE);
CHECK_FAIL(err); CHECK_FAIL(err);
if (debug > 2 ) { if (debug > 2 ) {
@ -868,7 +869,7 @@ get_real_pc(Nmethod_t *N, uint64_t pc_desc, uint64_t *real_pc)
err = ps_pread(N->J->P, pc_desc + OFFSET_PcDesc_pc_offset, &pc_offset, SZ32); err = ps_pread(N->J->P, pc_desc + OFFSET_PcDesc_pc_offset, &pc_offset, SZ32);
CHECK_FAIL(err); CHECK_FAIL(err);
*real_pc = N->nm + N->instrs_beg + pc_offset; *real_pc = N->instrs_beg + pc_offset;
if (debug > 2) { if (debug > 2) {
fprintf(stderr, "\t\t get_real_pc: pc_offset: %lx, real_pc: %llx\n", fprintf(stderr, "\t\t get_real_pc: pc_offset: %lx, real_pc: %llx\n",
pc_offset, *real_pc); pc_offset, *real_pc);
@ -942,7 +943,7 @@ scope_desc_at(Nmethod_t *N, int32_t decode_offset, Vframe_t *vf)
fprintf(stderr, "\t\t scope_desc_at: BEGIN \n"); fprintf(stderr, "\t\t scope_desc_at: BEGIN \n");
} }
buffer = N->nm + N->scopes_data_beg + decode_offset; buffer = N->scopes_data_beg + decode_offset;
err = raw_read_int(N->J, &buffer, &vf->sender_decode_offset); err = raw_read_int(N->J, &buffer, &vf->sender_decode_offset);
CHECK_FAIL(err); CHECK_FAIL(err);
@ -1052,11 +1053,11 @@ name_for_nmethod(jvm_agent_t* J,
CHECK_FAIL(err); CHECK_FAIL(err);
if (debug) { if (debug) {
fprintf(stderr, "name_for_nmethod: pc: %#llx, deopt_pc: %#llx\n", fprintf(stderr, "name_for_nmethod: pc: %#llx, deopt_pc: %#llx\n",
pc, N->nm + N->deopt_beg); pc, N->deopt_beg);
} }
/* check for a deoptimized frame */ /* check for a deoptimized frame */
if ( pc == N->nm + N->deopt_beg) { if ( pc == N->deopt_beg) {
uint64_t base; uint64_t base;
if (debug) { if (debug) {
fprintf(stderr, "name_for_nmethod: found deoptimized frame\n"); fprintf(stderr, "name_for_nmethod: found deoptimized frame\n");

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -251,8 +251,9 @@ int generateJvmOffsets(GEN_variant gen_variant) {
GEN_OFFS(CodeBlob, _name); GEN_OFFS(CodeBlob, _name);
GEN_OFFS(CodeBlob, _header_size); GEN_OFFS(CodeBlob, _header_size);
GEN_OFFS(CodeBlob, _content_offset); GEN_OFFS(CodeBlob, _content_begin);
GEN_OFFS(CodeBlob, _code_offset); GEN_OFFS(CodeBlob, _code_begin);
GEN_OFFS(CodeBlob, _code_end);
GEN_OFFS(CodeBlob, _data_offset); GEN_OFFS(CodeBlob, _data_offset);
GEN_OFFS(CodeBlob, _frame_size); GEN_OFFS(CodeBlob, _frame_size);
printf("\n"); printf("\n");
@ -260,10 +261,10 @@ int generateJvmOffsets(GEN_variant gen_variant) {
GEN_OFFS(nmethod, _method); GEN_OFFS(nmethod, _method);
GEN_OFFS(nmethod, _dependencies_offset); GEN_OFFS(nmethod, _dependencies_offset);
GEN_OFFS(nmethod, _metadata_offset); GEN_OFFS(nmethod, _metadata_offset);
GEN_OFFS(nmethod, _scopes_data_offset); GEN_OFFS(nmethod, _scopes_data_begin);
GEN_OFFS(nmethod, _scopes_pcs_offset); GEN_OFFS(nmethod, _scopes_pcs_offset);
GEN_OFFS(nmethod, _handler_table_offset); GEN_OFFS(nmethod, _handler_table_offset);
GEN_OFFS(nmethod, _deoptimize_offset); GEN_OFFS(nmethod, _deopt_handler_begin);
GEN_OFFS(nmethod, _orig_pc_offset); GEN_OFFS(nmethod, _orig_pc_offset);
GEN_OFFS(PcDesc, _pc_offset); GEN_OFFS(PcDesc, _pc_offset);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -124,10 +124,10 @@ typedef struct Nmethod_t {
uint64_t pc_desc; uint64_t pc_desc;
int32_t orig_pc_offset; /* _orig_pc_offset */ int32_t orig_pc_offset; /* _orig_pc_offset */
int32_t instrs_beg; /* _code_offset */ uint64_t instrs_beg; /* _code_offset */
int32_t instrs_end; uint64_t instrs_end;
int32_t deopt_beg; /* _deoptimize_offset */ uint64_t deopt_beg; /* _deoptimize_offset */
int32_t scopes_data_beg; /* _scopes_data_offset */ uint64_t scopes_data_beg; /* _scopes_data_begin */
int32_t scopes_data_end; int32_t scopes_data_end;
int32_t metadata_beg; /* _metadata_offset */ int32_t metadata_beg; /* _metadata_offset */
int32_t metadata_end; int32_t metadata_end;
@ -617,11 +617,11 @@ static int nmethod_info(Nmethod_t *N)
fprintf(stderr, "\t nmethod_info: BEGIN \n"); fprintf(stderr, "\t nmethod_info: BEGIN \n");
/* Instructions */ /* Instructions */
err = ps_pread(J->P, nm + OFFSET_CodeBlob_code_offset, &N->instrs_beg, SZ32); err = read_pointer(J, nm + OFFSET_CodeBlob_code_begin, &N->instrs_beg);
CHECK_FAIL(err); CHECK_FAIL(err);
err = ps_pread(J->P, nm + OFFSET_CodeBlob_data_offset, &N->instrs_end, SZ32); err = read_pointer(J, nm + OFFSET_CodeBlob_code_end, &N->instrs_end);
CHECK_FAIL(err); CHECK_FAIL(err);
err = ps_pread(J->P, nm + OFFSET_nmethod_deoptimize_offset, &N->deopt_beg, SZ32); err = read_pointer(J, nm + OFFSET_nmethod_deopt_handler_begin, &N->deopt_beg);
CHECK_FAIL(err); CHECK_FAIL(err);
err = ps_pread(J->P, nm + OFFSET_nmethod_orig_pc_offset, &N->orig_pc_offset, SZ32); err = ps_pread(J->P, nm + OFFSET_nmethod_orig_pc_offset, &N->orig_pc_offset, SZ32);
CHECK_FAIL(err); CHECK_FAIL(err);
@ -629,7 +629,7 @@ static int nmethod_info(Nmethod_t *N)
/* Metadata */ /* Metadata */
err = ps_pread(J->P, nm + OFFSET_nmethod_metadata_offset, &N->metadata_beg, SZ32); err = ps_pread(J->P, nm + OFFSET_nmethod_metadata_offset, &N->metadata_beg, SZ32);
CHECK_FAIL(err); CHECK_FAIL(err);
err = ps_pread(J->P, nm + OFFSET_nmethod_scopes_data_offset, &N->metadata_end, SZ32); err = ps_pread(J->P, nm + OFFSET_nmethod_scopes_data_begin, &N->metadata_end, SZ32);
CHECK_FAIL(err); CHECK_FAIL(err);
/* scopes_pcs */ /* scopes_pcs */
@ -639,7 +639,7 @@ static int nmethod_info(Nmethod_t *N)
CHECK_FAIL(err); CHECK_FAIL(err);
/* scopes_data */ /* scopes_data */
err = ps_pread(J->P, nm + OFFSET_nmethod_scopes_data_offset, &N->scopes_data_beg, SZ32); err = ps_pread(J->P, nm + OFFSET_nmethod_scopes_data_begin, &N->scopes_data_beg, POINTER_SIZE);
CHECK_FAIL(err); CHECK_FAIL(err);
if (debug > 2 ) { if (debug > 2 ) {
@ -868,7 +868,7 @@ get_real_pc(Nmethod_t *N, uint64_t pc_desc, uint64_t *real_pc)
err = ps_pread(N->J->P, pc_desc + OFFSET_PcDesc_pc_offset, &pc_offset, SZ32); err = ps_pread(N->J->P, pc_desc + OFFSET_PcDesc_pc_offset, &pc_offset, SZ32);
CHECK_FAIL(err); CHECK_FAIL(err);
*real_pc = N->nm + N->instrs_beg + pc_offset; *real_pc = N->instrs_beg + pc_offset;
if (debug > 2) { if (debug > 2) {
fprintf(stderr, "\t\t get_real_pc: pc_offset: %lx, real_pc: %llx\n", fprintf(stderr, "\t\t get_real_pc: pc_offset: %lx, real_pc: %llx\n",
pc_offset, *real_pc); pc_offset, *real_pc);
@ -942,7 +942,7 @@ scope_desc_at(Nmethod_t *N, int32_t decode_offset, Vframe_t *vf)
fprintf(stderr, "\t\t scope_desc_at: BEGIN \n"); fprintf(stderr, "\t\t scope_desc_at: BEGIN \n");
} }
buffer = N->nm + N->scopes_data_beg + decode_offset; buffer = N->scopes_data_beg + decode_offset;
err = raw_read_int(N->J, &buffer, &vf->sender_decode_offset); err = raw_read_int(N->J, &buffer, &vf->sender_decode_offset);
CHECK_FAIL(err); CHECK_FAIL(err);
@ -1052,11 +1052,11 @@ name_for_nmethod(jvm_agent_t* J,
CHECK_FAIL(err); CHECK_FAIL(err);
if (debug) { if (debug) {
fprintf(stderr, "name_for_nmethod: pc: %#llx, deopt_pc: %#llx\n", fprintf(stderr, "name_for_nmethod: pc: %#llx, deopt_pc: %#llx\n",
pc, N->nm + N->deopt_beg); pc, N->deopt_beg);
} }
/* check for a deoptimized frame */ /* check for a deoptimized frame */
if ( pc == N->nm + N->deopt_beg) { if ( pc == N->deopt_beg) {
uint64_t base; uint64_t base;
if (debug) { if (debug) {
fprintf(stderr, "name_for_nmethod: found deoptimized frame\n"); fprintf(stderr, "name_for_nmethod: found deoptimized frame\n");

View File

@ -390,7 +390,7 @@ JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrec
// BugId 4454115: A read from a MappedByteBuffer can fault here if the // BugId 4454115: A read from a MappedByteBuffer can fault here if the
// underlying file has been truncated. Do not crash the VM in such a case. // underlying file has been truncated. Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc); CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL; CompiledMethod* nm = cb->as_compiled_method_or_null();
if (nm != NULL && nm->has_unsafe_access()) { if (nm != NULL && nm->has_unsafe_access()) {
// We don't really need a stub here! Just set the pending exeption and // We don't really need a stub here! Just set the pending exeption and
// continue at the next instruction after the faulting read. Returning // continue at the next instruction after the faulting read. Returning

View File

@ -582,7 +582,7 @@ JVM_handle_bsd_signal(int sig,
// here if the underlying file has been truncated. // here if the underlying file has been truncated.
// Do not crash the VM in such a case. // Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc); CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL; CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL && nm->has_unsafe_access()) { if (nm != NULL && nm->has_unsafe_access()) {
stub = StubRoutines::handler_for_unsafe_access(); stub = StubRoutines::handler_for_unsafe_access();
} }

View File

@ -385,7 +385,7 @@ JVM_handle_linux_signal(int sig,
// here if the underlying file has been truncated. // here if the underlying file has been truncated.
// Do not crash the VM in such a case. // Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc); CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL; CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL && nm->has_unsafe_access()) { if (nm != NULL && nm->has_unsafe_access()) {
stub = handle_unsafe_access(thread, pc); stub = handle_unsafe_access(thread, pc);
} }

View File

@ -315,7 +315,7 @@ JVM_handle_linux_signal(int sig,
((NativeInstruction*)pc)->is_safepoint_poll() && ((NativeInstruction*)pc)->is_safepoint_poll() &&
CodeCache::contains((void*) pc) && CodeCache::contains((void*) pc) &&
((cb = CodeCache::find_blob(pc)) != NULL) && ((cb = CodeCache::find_blob(pc)) != NULL) &&
cb->is_nmethod()) { cb->is_compiled()) {
if (TraceTraps) { if (TraceTraps) {
tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", p2i(pc)); tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", p2i(pc));
} }
@ -364,7 +364,7 @@ JVM_handle_linux_signal(int sig,
// BugId 4454115: A read from a MappedByteBuffer can fault here if the // BugId 4454115: A read from a MappedByteBuffer can fault here if the
// underlying file has been truncated. Do not crash the VM in such a case. // underlying file has been truncated. Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc); CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL; CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL && nm->has_unsafe_access()) { if (nm != NULL && nm->has_unsafe_access()) {
// We don't really need a stub here! Just set the pending exeption and // We don't really need a stub here! Just set the pending exeption and
// continue at the next instruction after the faulting read. Returning // continue at the next instruction after the faulting read. Returning

View File

@ -438,7 +438,7 @@ inline static bool checkByteBuffer(address pc, address* stub) {
// here if the underlying file has been truncated. // here if the underlying file has been truncated.
// Do not crash the VM in such a case. // Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc); CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL; CompiledMethod* nm = cb->as_compiled_method_or_null();
if (nm != NULL && nm->has_unsafe_access()) { if (nm != NULL && nm->has_unsafe_access()) {
*stub = StubRoutines::handler_for_unsafe_access(); *stub = StubRoutines::handler_for_unsafe_access();
return true; return true;

View File

@ -418,7 +418,7 @@ JVM_handle_linux_signal(int sig,
// here if the underlying file has been truncated. // here if the underlying file has been truncated.
// Do not crash the VM in such a case. // Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc); CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL; CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL && nm->has_unsafe_access()) { if (nm != NULL && nm->has_unsafe_access()) {
stub = StubRoutines::handler_for_unsafe_access(); stub = StubRoutines::handler_for_unsafe_access();
} }

View File

@ -478,7 +478,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
// here if the underlying file has been truncated. // here if the underlying file has been truncated.
// Do not crash the VM in such a case. // Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc); CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL; CompiledMethod* nm = cb->as_compiled_method_or_null();
if (nm != NULL && nm->has_unsafe_access()) { if (nm != NULL && nm->has_unsafe_access()) {
stub = StubRoutines::handler_for_unsafe_access(); stub = StubRoutines::handler_for_unsafe_access();
} }

View File

@ -518,7 +518,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
// Do not crash the VM in such a case. // Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc); CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
if (cb != NULL) { if (cb != NULL) {
nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL; CompiledMethod* nm = cb->as_compiled_method_or_null();
if (nm != NULL && nm->has_unsafe_access()) { if (nm != NULL && nm->has_unsafe_access()) {
stub = StubRoutines::handler_for_unsafe_access(); stub = StubRoutines::handler_for_unsafe_access();
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -604,7 +604,7 @@ void CodeBuffer::finalize_oop_references(const methodHandle& mh) {
csize_t CodeBuffer::total_offset_of(CodeSection* cs) const { csize_t CodeBuffer::total_offset_of(const CodeSection* cs) const {
csize_t size_so_far = 0; csize_t size_so_far = 0;
for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) { for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
const CodeSection* cur_cs = code_section(n); const CodeSection* cur_cs = code_section(n);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -468,9 +468,11 @@ class CodeBuffer: public StackObj {
// construction. // construction.
void initialize(csize_t code_size, csize_t locs_size); void initialize(csize_t code_size, csize_t locs_size);
CodeSection* consts() { return &_consts; } CodeSection* consts() { return &_consts; }
CodeSection* insts() { return &_insts; } CodeSection* insts() { return &_insts; }
CodeSection* stubs() { return &_stubs; } CodeSection* stubs() { return &_stubs; }
const CodeSection* insts() const { return &_insts; }
// present sections in order; return NULL at end; consts is #0, etc. // present sections in order; return NULL at end; consts is #0, etc.
CodeSection* code_section(int n) { CodeSection* code_section(int n) {
@ -547,7 +549,7 @@ class CodeBuffer: public StackObj {
// Combined offset (relative to start of first section) of given // Combined offset (relative to start of first section) of given
// section, as eventually found in the final CodeBlob. // section, as eventually found in the final CodeBlob.
csize_t total_offset_of(CodeSection* cs) const; csize_t total_offset_of(const CodeSection* cs) const;
// allocated size of all relocation data, including index, rounded up // allocated size of all relocation data, including index, rounded up
csize_t total_relocation_size() const; csize_t total_relocation_size() const;

View File

@ -1055,14 +1055,14 @@ void ciEnv::register_method(ciMethod* target,
if (entry_bci == InvocationEntryBci) { if (entry_bci == InvocationEntryBci) {
if (TieredCompilation) { if (TieredCompilation) {
// If there is an old version we're done with it // If there is an old version we're done with it
nmethod* old = method->code(); CompiledMethod* old = method->code();
if (TraceMethodReplacement && old != NULL) { if (TraceMethodReplacement && old != NULL) {
ResourceMark rm; ResourceMark rm;
char *method_name = method->name_and_sig_as_C_string(); char *method_name = method->name_and_sig_as_C_string();
tty->print_cr("Replacing method %s", method_name); tty->print_cr("Replacing method %s", method_name);
} }
if (old != NULL) { if (old != NULL) {
old->make_not_entrant(); old->make_not_used();
} }
} }
if (TraceNMethodInstalls) { if (TraceNMethodInstalls) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1115,7 +1115,7 @@ bool ciMethod::has_compiled_code() {
int ciMethod::comp_level() { int ciMethod::comp_level() {
check_is_loaded(); check_is_loaded();
VM_ENTRY_MARK; VM_ENTRY_MARK;
nmethod* nm = get_Method()->code(); CompiledMethod* nm = get_Method()->code();
if (nm != NULL) return nm->comp_level(); if (nm != NULL) return nm->comp_level();
return 0; return 0;
} }
@ -1150,7 +1150,7 @@ int ciMethod::code_size_for_inlining() {
int ciMethod::instructions_size() { int ciMethod::instructions_size() {
if (_instructions_size == -1) { if (_instructions_size == -1) {
GUARDED_VM_ENTRY( GUARDED_VM_ENTRY(
nmethod* code = get_Method()->code(); CompiledMethod* code = get_Method()->code();
if (code != NULL && (code->comp_level() == CompLevel_full_optimization)) { if (code != NULL && (code->comp_level() == CompLevel_full_optimization)) {
_instructions_size = code->insts_end() - code->verified_entry_point(); _instructions_size = code->insts_end() - code->verified_entry_point();
} else { } else {
@ -1165,7 +1165,7 @@ int ciMethod::instructions_size() {
// ciMethod::log_nmethod_identity // ciMethod::log_nmethod_identity
void ciMethod::log_nmethod_identity(xmlStream* log) { void ciMethod::log_nmethod_identity(xmlStream* log) {
GUARDED_VM_ENTRY( GUARDED_VM_ENTRY(
nmethod* code = get_Method()->code(); CompiledMethod* code = get_Method()->code();
if (code != NULL) { if (code != NULL) {
code->log_identity(log); code->log_identity(log);
} }

View File

@ -546,7 +546,7 @@ class CompileReplay : public StackObj {
} }
} }
// Make sure the existence of a prior compile doesn't stop this one // Make sure the existence of a prior compile doesn't stop this one
nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, comp_level, true) : method->code(); CompiledMethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, comp_level, true) : method->code();
if (nm != NULL) { if (nm != NULL) {
nm->make_not_entrant(); nm->make_not_entrant();
} }

View File

@ -1651,7 +1651,7 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
} }
if (TieredCompilation && TieredStopAtLevel >= CompLevel_full_optimization) { if (TieredCompilation && TieredStopAtLevel >= CompLevel_full_optimization) {
// Clobber the first compile and force second tier compilation // Clobber the first compile and force second tier compilation
nmethod* nm = m->code(); CompiledMethod* nm = m->code();
if (nm != NULL && !m->is_method_handle_intrinsic()) { if (nm != NULL && !m->is_method_handle_intrinsic()) {
// Throw out the code so that the code cache doesn't fill up // Throw out the code so that the code cache doesn't fill up
nm->make_not_entrant(); nm->make_not_entrant();
@ -1670,7 +1670,7 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string()); tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
} }
nmethod* nm = m->code(); CompiledMethod* nm = m->code();
if (nm != NULL && !m->is_method_handle_intrinsic()) { if (nm != NULL && !m->is_method_handle_intrinsic()) {
// Throw out the code so that the code cache doesn't fill up // Throw out the code so that the code cache doesn't fill up
nm->make_not_entrant(); nm->make_not_entrant();

View File

@ -1798,7 +1798,7 @@ static void print_stack_element_to_stream(outputStream* st, Handle mirror, int m
// Neither sourcename nor linenumber // Neither sourcename nor linenumber
sprintf(buf + (int)strlen(buf), "Unknown Source)"); sprintf(buf + (int)strlen(buf), "Unknown Source)");
} }
nmethod* nm = method->code(); CompiledMethod* nm = method->code();
if (WizardMode && nm != NULL) { if (WizardMode && nm != NULL) {
sprintf(buf + (int)strlen(buf), "(nmethod " INTPTR_FORMAT ")", (intptr_t)nm); sprintf(buf + (int)strlen(buf), "(nmethod " INTPTR_FORMAT ")", (intptr_t)nm);
} }
@ -1920,7 +1920,7 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHand
int total_count = 0; int total_count = 0;
RegisterMap map(thread, false); RegisterMap map(thread, false);
int decode_offset = 0; int decode_offset = 0;
nmethod* nm = NULL; CompiledMethod* nm = NULL;
bool skip_fillInStackTrace_check = false; bool skip_fillInStackTrace_check = false;
bool skip_throwableInit_check = false; bool skip_throwableInit_check = false;
bool skip_hidden = !ShowHiddenFrames; bool skip_hidden = !ShowHiddenFrames;
@ -1948,10 +1948,10 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHand
// HMMM QQQ might be nice to have frame return nm as NULL if cb is non-NULL // HMMM QQQ might be nice to have frame return nm as NULL if cb is non-NULL
// but non nmethod // but non nmethod
fr = fr.sender(&map); fr = fr.sender(&map);
if (cb == NULL || !cb->is_nmethod()) { if (cb == NULL || !cb->is_compiled()) {
continue; continue;
} }
nm = (nmethod*)cb; nm = cb->as_compiled_method();
if (nm->method()->is_native()) { if (nm->method()->is_native()) {
method = nm->method(); method = nm->method();
bci = 0; bci = 0;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -56,7 +56,7 @@ MetadataOnStackMark::MetadataOnStackMark(bool redefinition_walk) {
if (redefinition_walk) { if (redefinition_walk) {
Threads::metadata_do(Metadata::mark_on_stack); Threads::metadata_do(Metadata::mark_on_stack);
CodeCache::alive_nmethods_do(nmethod::mark_on_stack); CodeCache::metadata_do(Metadata::mark_on_stack);
CompileBroker::mark_on_stack(); CompileBroker::mark_on_stack();
JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack); JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack);
ThreadService::metadata_do(Metadata::mark_on_stack); ThreadService::metadata_do(Metadata::mark_on_stack);

View File

@ -65,12 +65,67 @@ unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
return size; return size;
} }
CodeBlob::CodeBlob(const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) :
_name(name),
_size(layout.size()),
_header_size(layout.header_size()),
_frame_complete_offset(frame_complete_offset),
_data_offset(layout.data_offset()),
_frame_size(frame_size),
_strings(CodeStrings()),
_oop_maps(oop_maps),
_caller_must_gc_arguments(caller_must_gc_arguments),
_code_begin(layout.code_begin()),
_code_end(layout.code_end()),
_data_end(layout.data_end()),
_relocation_begin(layout.relocation_begin()),
_relocation_end(layout.relocation_end()),
_content_begin(layout.content_begin())
{
assert(layout.size() == round_to(layout.size(), oopSize), "unaligned size");
assert(layout.header_size() == round_to(layout.header_size(), oopSize), "unaligned size");
assert(layout.relocation_size() == round_to(layout.relocation_size(), oopSize), "unaligned size");
assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()");
#ifdef COMPILER1
// probably wrong for tiered
assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
#endif // COMPILER1
}
CodeBlob::CodeBlob(const char* name, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
_name(name),
_size(layout.size()),
_header_size(layout.header_size()),
_frame_complete_offset(frame_complete_offset),
_data_offset(layout.data_offset()),
_frame_size(frame_size),
_strings(CodeStrings()),
_caller_must_gc_arguments(caller_must_gc_arguments),
_code_begin(layout.code_begin()),
_code_end(layout.code_end()),
_data_end(layout.data_end()),
_relocation_begin(layout.relocation_begin()),
_relocation_end(layout.relocation_end()),
_content_begin(layout.content_begin())
{
assert(_size == round_to(_size, oopSize), "unaligned size");
assert(_header_size == round_to(_header_size, oopSize), "unaligned size");
assert(_data_offset <= _size, "codeBlob is too small");
assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()");
set_oop_maps(oop_maps);
#ifdef COMPILER1
// probably wrong for tiered
assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
#endif // COMPILER1
}
// Creates a simple CodeBlob. Sets up the size of the different regions. // Creates a simple CodeBlob. Sets up the size of the different regions.
CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) { RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size)
assert(size == round_to(size, oopSize), "unaligned size"); : CodeBlob(name, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, NULL, false /* caller_must_gc_arguments */)
{
assert(locs_size == round_to(locs_size, oopSize), "unaligned size"); assert(locs_size == round_to(locs_size, oopSize), "unaligned size");
assert(header_size == round_to(header_size, oopSize), "unaligned size");
assert(!UseRelocIndex, "no space allocated for reloc index yet"); assert(!UseRelocIndex, "no space allocated for reloc index yet");
// Note: If UseRelocIndex is enabled, there needs to be (at least) one // Note: If UseRelocIndex is enabled, there needs to be (at least) one
@ -79,55 +134,31 @@ CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_comple
// mentation is not easily understandable and thus it is not clear // mentation is not easily understandable and thus it is not clear
// what exactly the format is supposed to be. For now, we just turn // what exactly the format is supposed to be. For now, we just turn
// off the use of this table (gri 7/6/2000). // off the use of this table (gri 7/6/2000).
_name = name;
_size = size;
_frame_complete_offset = frame_complete;
_header_size = header_size;
_relocation_size = locs_size;
_content_offset = align_code_offset(header_size + _relocation_size);
_code_offset = _content_offset;
_data_offset = size;
_frame_size = 0;
set_oop_maps(NULL);
_strings = CodeStrings();
} }
// Creates a CodeBlob from a CodeBuffer. Sets up the size of the different regions, // Creates a RuntimeBlob from a CodeBuffer
// and copy code and relocation info. // and copy code and relocation info.
CodeBlob::CodeBlob( RuntimeBlob::RuntimeBlob(
const char* name, const char* name,
CodeBuffer* cb, CodeBuffer* cb,
int header_size, int header_size,
int size, int size,
int frame_complete, int frame_complete,
int frame_size, int frame_size,
OopMapSet* oop_maps OopMapSet* oop_maps,
) { bool caller_must_gc_arguments
assert(size == round_to(size, oopSize), "unaligned size"); ) : CodeBlob(name, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) {
assert(header_size == round_to(header_size, oopSize), "unaligned size");
_name = name;
_size = size;
_frame_complete_offset = frame_complete;
_header_size = header_size;
_relocation_size = round_to(cb->total_relocation_size(), oopSize);
_content_offset = align_code_offset(header_size + _relocation_size);
_code_offset = _content_offset + cb->total_offset_of(cb->insts());
_data_offset = _content_offset + round_to(cb->total_content_size(), oopSize);
assert(_data_offset <= size, "codeBlob is too small");
_strings = CodeStrings();
cb->copy_code_and_locs_to(this); cb->copy_code_and_locs_to(this);
set_oop_maps(oop_maps);
_frame_size = frame_size;
#ifdef COMPILER1
// probably wrong for tiered
assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
#endif // COMPILER1
} }
void CodeBlob::flush() {
if (_oop_maps) {
FREE_C_HEAP_ARRAY(unsigned char, _oop_maps);
_oop_maps = NULL;
}
_strings.free();
}
void CodeBlob::set_oop_maps(OopMapSet* p) { void CodeBlob::set_oop_maps(OopMapSet* p) {
// Danger Will Robinson! This method allocates a big // Danger Will Robinson! This method allocates a big
@ -140,7 +171,7 @@ void CodeBlob::set_oop_maps(OopMapSet* p) {
} }
void CodeBlob::trace_new_stub(CodeBlob* stub, const char* name1, const char* name2) { void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) {
// Do not hold the CodeCache lock during name formatting. // Do not hold the CodeCache lock during name formatting.
assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub"); assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
@ -167,19 +198,9 @@ void CodeBlob::trace_new_stub(CodeBlob* stub, const char* name1, const char* nam
MemoryService::track_code_cache_memory_usage(); MemoryService::track_code_cache_memory_usage();
} }
void CodeBlob::flush() {
if (_oop_maps) {
FREE_C_HEAP_ARRAY(unsigned char, _oop_maps);
_oop_maps = NULL;
}
_strings.free();
}
const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) { const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) {
assert(oop_maps() != NULL, "nope"); assert(_oop_maps != NULL, "nope");
return oop_maps()->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin()); return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
} }
void CodeBlob::print_code() { void CodeBlob::print_code() {
@ -193,7 +214,7 @@ void CodeBlob::print_code() {
BufferBlob::BufferBlob(const char* name, int size) BufferBlob::BufferBlob(const char* name, int size)
: CodeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0) : RuntimeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0)
{} {}
BufferBlob* BufferBlob::create(const char* name, int buffer_size) { BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
@ -203,7 +224,7 @@ BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
unsigned int size = sizeof(BufferBlob); unsigned int size = sizeof(BufferBlob);
CodeCacheExtensions::size_blob(name, &buffer_size); CodeCacheExtensions::size_blob(name, &buffer_size);
// align the size to CodeEntryAlignment // align the size to CodeEntryAlignment
size = align_code_offset(size); size = CodeBlob::align_code_offset(size);
size += round_to(buffer_size, oopSize); size += round_to(buffer_size, oopSize);
assert(name != NULL, "must provide a name"); assert(name != NULL, "must provide a name");
{ {
@ -218,14 +239,14 @@ BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb) BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb)
: CodeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, NULL) : RuntimeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, NULL)
{} {}
BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) { BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
BufferBlob* blob = NULL; BufferBlob* blob = NULL;
unsigned int size = allocation_size(cb, sizeof(BufferBlob)); unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob));
assert(name != NULL, "must provide a name"); assert(name != NULL, "must provide a name");
{ {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
@ -246,7 +267,7 @@ void BufferBlob::free(BufferBlob *blob) {
blob->flush(); blob->flush();
{ {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeCache::free((CodeBlob*)blob); CodeCache::free((RuntimeBlob*)blob);
} }
// Track memory usage statistic after releasing CodeCache_lock // Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage(); MemoryService::track_code_cache_memory_usage();
@ -265,7 +286,7 @@ AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
AdapterBlob* blob = NULL; AdapterBlob* blob = NULL;
unsigned int size = allocation_size(cb, sizeof(AdapterBlob)); unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob));
{ {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) AdapterBlob(size, cb); blob = new (size) AdapterBlob(size, cb);
@ -287,7 +308,7 @@ MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
unsigned int size = sizeof(MethodHandlesAdapterBlob); unsigned int size = sizeof(MethodHandlesAdapterBlob);
CodeCacheExtensions::size_blob("MethodHandles adapters", &buffer_size); CodeCacheExtensions::size_blob("MethodHandles adapters", &buffer_size);
// align the size to CodeEntryAlignment // align the size to CodeEntryAlignment
size = align_code_offset(size); size = CodeBlob::align_code_offset(size);
size += round_to(buffer_size, oopSize); size += round_to(buffer_size, oopSize);
{ {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
@ -314,12 +335,10 @@ RuntimeStub::RuntimeStub(
OopMapSet* oop_maps, OopMapSet* oop_maps,
bool caller_must_gc_arguments bool caller_must_gc_arguments
) )
: CodeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps) : RuntimeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
{ {
_caller_must_gc_arguments = caller_must_gc_arguments;
} }
RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name, RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
CodeBuffer* cb, CodeBuffer* cb,
int frame_complete, int frame_complete,
@ -332,7 +351,7 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
if (!CodeCacheExtensions::skip_code_generation()) { if (!CodeCacheExtensions::skip_code_generation()) {
// bypass useless code generation // bypass useless code generation
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
unsigned int size = allocation_size(cb, sizeof(RuntimeStub)); unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub));
stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
} }
stub = (RuntimeStub*) CodeCacheExtensions::handle_generated_blob(stub, stub_name); stub = (RuntimeStub*) CodeCacheExtensions::handle_generated_blob(stub, stub_name);
@ -392,7 +411,7 @@ DeoptimizationBlob* DeoptimizationBlob::create(
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
{ {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
unsigned int size = allocation_size(cb, sizeof(DeoptimizationBlob)); unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob));
blob = new (size) DeoptimizationBlob(cb, blob = new (size) DeoptimizationBlob(cb,
size, size,
oop_maps, oop_maps,
@ -431,7 +450,7 @@ UncommonTrapBlob* UncommonTrapBlob::create(
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
{ {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
unsigned int size = allocation_size(cb, sizeof(UncommonTrapBlob)); unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob));
blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size); blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size);
} }
@ -467,7 +486,7 @@ ExceptionBlob* ExceptionBlob::create(
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
{ {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
unsigned int size = allocation_size(cb, sizeof(ExceptionBlob)); unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob));
blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size); blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size);
} }
@ -502,7 +521,7 @@ SafepointBlob* SafepointBlob::create(
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
{ {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
unsigned int size = allocation_size(cb, sizeof(SafepointBlob)); unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob));
blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size); blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
} }
@ -515,10 +534,6 @@ SafepointBlob* SafepointBlob::create(
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
// Verification and printing // Verification and printing
void CodeBlob::verify() {
ShouldNotReachHere();
}
void CodeBlob::print_on(outputStream* st) const { void CodeBlob::print_on(outputStream* st) const {
st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", p2i(this)); st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", p2i(this));
st->print_cr("Framesize: %d", _frame_size); st->print_cr("Framesize: %d", _frame_size);
@ -528,12 +543,16 @@ void CodeBlob::print_value_on(outputStream* st) const {
st->print_cr("[CodeBlob]"); st->print_cr("[CodeBlob]");
} }
void RuntimeBlob::verify() {
ShouldNotReachHere();
}
void BufferBlob::verify() { void BufferBlob::verify() {
// unimplemented // unimplemented
} }
void BufferBlob::print_on(outputStream* st) const { void BufferBlob::print_on(outputStream* st) const {
CodeBlob::print_on(st); RuntimeBlob::print_on(st);
print_value_on(st); print_value_on(st);
} }
@ -547,10 +566,10 @@ void RuntimeStub::verify() {
void RuntimeStub::print_on(outputStream* st) const { void RuntimeStub::print_on(outputStream* st) const {
ttyLocker ttyl; ttyLocker ttyl;
CodeBlob::print_on(st); RuntimeBlob::print_on(st);
st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this)); st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this));
st->print_cr("%s", name()); st->print_cr("%s", name());
Disassembler::decode((CodeBlob*)this, st); Disassembler::decode((RuntimeBlob*)this, st);
} }
void RuntimeStub::print_value_on(outputStream* st) const { void RuntimeStub::print_value_on(outputStream* st) const {
@ -563,9 +582,9 @@ void SingletonBlob::verify() {
void SingletonBlob::print_on(outputStream* st) const { void SingletonBlob::print_on(outputStream* st) const {
ttyLocker ttyl; ttyLocker ttyl;
CodeBlob::print_on(st); RuntimeBlob::print_on(st);
st->print_cr("%s", name()); st->print_cr("%s", name());
Disassembler::decode((CodeBlob*)this, st); Disassembler::decode((RuntimeBlob*)this, st);
} }
void SingletonBlob::print_value_on(outputStream* st) const { void SingletonBlob::print_value_on(outputStream* st) const {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -45,12 +45,14 @@ struct CodeBlobType {
// CodeBlob - superclass for all entries in the CodeCache. // CodeBlob - superclass for all entries in the CodeCache.
// //
// Suptypes are: // Subtypes are:
// nmethod : Compiled Java methods (include method that calls to native code) // CompiledMethod : Compiled Java methods (include method that calls to native code)
// RuntimeStub : Call to VM runtime methods // nmethod : JIT Compiled Java methods
// DeoptimizationBlob : Used for deoptimizatation // RuntimeBlob : Non-compiled method code; generated glue code
// ExceptionBlob : Used for stack unrolling // RuntimeStub : Call to VM runtime methods
// SafepointBlob : Used to handle illegal instruction exceptions // DeoptimizationBlob : Used for deoptimization
// ExceptionBlob : Used for stack unrolling
// SafepointBlob : Used to handle illegal instruction exceptions
// //
// //
// Layout: // Layout:
@ -59,90 +61,79 @@ struct CodeBlobType {
// - content space // - content space
// - instruction space // - instruction space
// - data space // - data space
class DeoptimizationBlob;
class CodeBlobLayout;
class CodeBlob VALUE_OBJ_CLASS_SPEC { class CodeBlob VALUE_OBJ_CLASS_SPEC {
friend class VMStructs; friend class VMStructs;
friend class JVMCIVMStructs; friend class JVMCIVMStructs;
friend class CodeCacheDumper; friend class CodeCacheDumper;
private: protected:
const char* _name; const char* _name;
int _size; // total size of CodeBlob in bytes int _size; // total size of CodeBlob in bytes
int _header_size; // size of header (depends on subclass) int _header_size; // size of header (depends on subclass)
int _relocation_size; // size of relocation
int _content_offset; // offset to where content region begins (this includes consts, insts, stubs)
int _code_offset; // offset to where instructions region begins (this includes insts, stubs)
int _frame_complete_offset; // instruction offsets in [0.._frame_complete_offset) have int _frame_complete_offset; // instruction offsets in [0.._frame_complete_offset) have
// not finished setting up their frame. Beware of pc's in // not finished setting up their frame. Beware of pc's in
// that range. There is a similar range(s) on returns // that range. There is a similar range(s) on returns
// which we don't detect. // which we don't detect.
int _data_offset; // offset to where data region begins int _data_offset; // offset to where data region begins
int _frame_size; // size of stack frame int _frame_size; // size of stack frame
ImmutableOopMapSet* _oop_maps; // OopMap for this CodeBlob
CodeStrings _strings;
public: address _code_begin;
address _code_end;
address _content_begin; // address to where content region begins (this includes consts, insts, stubs)
// address _content_end - not required, for all CodeBlobs _code_end == _content_end for now
address _data_end;
address _relocation_begin;
address _relocation_end;
ImmutableOopMapSet* _oop_maps; // OopMap for this CodeBlob
bool _caller_must_gc_arguments;
CodeStrings _strings;
CodeBlob(const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
CodeBlob(const char* name, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
public:
// Returns the space needed for CodeBlob // Returns the space needed for CodeBlob
static unsigned int allocation_size(CodeBuffer* cb, int header_size); static unsigned int allocation_size(CodeBuffer* cb, int header_size);
static unsigned int align_code_offset(int offset); static unsigned int align_code_offset(int offset);
// Creation
// a) simple CodeBlob
// frame_complete is the offset from the beginning of the instructions
// to where the frame setup (from stackwalk viewpoint) is complete.
CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size);
// b) full CodeBlob
CodeBlob(
const char* name,
CodeBuffer* cb,
int header_size,
int size,
int frame_complete,
int frame_size,
OopMapSet* oop_maps
);
// Deletion // Deletion
void flush(); virtual void flush();
// Typing // Typing
virtual bool is_buffer_blob() const { return false; } virtual bool is_buffer_blob() const { return false; }
virtual bool is_nmethod() const { return false; } virtual bool is_nmethod() const { return false; }
virtual bool is_runtime_stub() const { return false; } virtual bool is_runtime_stub() const { return false; }
virtual bool is_deoptimization_stub() const { return false; } virtual bool is_deoptimization_stub() const { return false; }
virtual bool is_uncommon_trap_stub() const { return false; } virtual bool is_uncommon_trap_stub() const { return false; }
virtual bool is_exception_stub() const { return false; } virtual bool is_exception_stub() const { return false; }
virtual bool is_safepoint_stub() const { return false; } virtual bool is_safepoint_stub() const { return false; }
virtual bool is_adapter_blob() const { return false; } virtual bool is_adapter_blob() const { return false; }
virtual bool is_method_handles_adapter_blob() const { return false; } virtual bool is_method_handles_adapter_blob() const { return false; }
virtual bool is_compiled() const { return false; }
virtual bool is_compiled_by_c2() const { return false; } virtual bool is_compiled_by_c2() const { return false; }
virtual bool is_compiled_by_c1() const { return false; } virtual bool is_compiled_by_c1() const { return false; }
virtual bool is_compiled_by_jvmci() const { return false; } virtual bool is_compiled_by_jvmci() const { return false; }
// Casting // Casting
nmethod* as_nmethod_or_null() { return is_nmethod() ? (nmethod*) this : NULL; } nmethod* as_nmethod_or_null() { return is_nmethod() ? (nmethod*) this : NULL; }
nmethod* as_nmethod() { assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; }
CompiledMethod* as_compiled_method_or_null() { return is_compiled() ? (CompiledMethod*) this : NULL; }
CompiledMethod* as_compiled_method() { assert(is_compiled(), "must be compiled"); return (CompiledMethod*) this; }
// Boundaries // Boundaries
address header_begin() const { return (address) this; } address header_begin() const { return (address) this; }
address header_end() const { return ((address) this) + _header_size; }; relocInfo* relocation_begin() const { return (relocInfo*) _relocation_begin; };
relocInfo* relocation_begin() const { return (relocInfo*) header_end(); }; relocInfo* relocation_end() const { return (relocInfo*) _relocation_end; }
relocInfo* relocation_end() const { return (relocInfo*)(header_end() + _relocation_size); } address content_begin() const { return _content_begin; }
address content_begin() const { return (address) header_begin() + _content_offset; } address content_end() const { return _code_end; } // _code_end == _content_end is true for all types of blobs for now, it is also checked in the constructor
address content_end() const { return (address) header_begin() + _data_offset; } address code_begin() const { return _code_begin; }
address code_begin() const { return (address) header_begin() + _code_offset; } address code_end() const { return _code_end; }
address code_end() const { return (address) header_begin() + _data_offset; } address data_end() const { return _data_end; }
address data_begin() const { return (address) header_begin() + _data_offset; }
address data_end() const { return (address) header_begin() + _size; }
// Offsets
int relocation_offset() const { return _header_size; }
int content_offset() const { return _content_offset; }
int code_offset() const { return _code_offset; }
int data_offset() const { return _data_offset; }
// Sizes // Sizes
int size() const { return _size; } int size() const { return _size; }
@ -150,17 +141,12 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
int relocation_size() const { return (address) relocation_end() - (address) relocation_begin(); } int relocation_size() const { return (address) relocation_end() - (address) relocation_begin(); }
int content_size() const { return content_end() - content_begin(); } int content_size() const { return content_end() - content_begin(); }
int code_size() const { return code_end() - code_begin(); } int code_size() const { return code_end() - code_begin(); }
int data_size() const { return data_end() - data_begin(); }
// Containment // Containment
bool blob_contains(address addr) const { return header_begin() <= addr && addr < data_end(); } bool blob_contains(address addr) const { return header_begin() <= addr && addr < data_end(); }
bool relocation_contains(relocInfo* addr) const{ return relocation_begin() <= addr && addr < relocation_end(); }
bool content_contains(address addr) const { return content_begin() <= addr && addr < content_end(); }
bool code_contains(address addr) const { return code_begin() <= addr && addr < code_end(); } bool code_contains(address addr) const { return code_begin() <= addr && addr < code_end(); }
bool data_contains(address addr) const { return data_begin() <= addr && addr < data_end(); } bool contains(address addr) const { return content_begin() <= addr && addr < content_end(); }
bool contains(address addr) const { return content_contains(addr); } bool is_frame_complete_at(address addr) const { return code_contains(addr) && addr >= code_begin() + _frame_complete_offset; }
bool is_frame_complete_at(address addr) const { return code_contains(addr) &&
addr >= code_begin() + _frame_complete_offset; }
// CodeCache support: really only used by the nmethods, but in order to get // CodeCache support: really only used by the nmethods, but in order to get
// asserts and certain bookkeeping to work in the CodeCache they are defined // asserts and certain bookkeeping to work in the CodeCache they are defined
@ -178,29 +164,26 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
ImmutableOopMapSet* oop_maps() const { return _oop_maps; } ImmutableOopMapSet* oop_maps() const { return _oop_maps; }
void set_oop_maps(OopMapSet* p); void set_oop_maps(OopMapSet* p);
const ImmutableOopMap* oop_map_for_return_address(address return_address); const ImmutableOopMap* oop_map_for_return_address(address return_address);
virtual void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { ShouldNotReachHere(); } virtual void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) = 0;
// Frame support // Frame support
int frame_size() const { return _frame_size; } int frame_size() const { return _frame_size; }
void set_frame_size(int size) { _frame_size = size; } void set_frame_size(int size) { _frame_size = size; }
// Returns true, if the next frame is responsible for GC'ing oops passed as arguments // Returns true, if the next frame is responsible for GC'ing oops passed as arguments
virtual bool caller_must_gc_arguments(JavaThread* thread) const { return false; } bool caller_must_gc_arguments(JavaThread* thread) const { return _caller_must_gc_arguments; }
// Naming // Naming
const char* name() const { return _name; } const char* name() const { return _name; }
void set_name(const char* name) { _name = name; } void set_name(const char* name) { _name = name; }
// Debugging // Debugging
virtual void verify(); virtual void verify() = 0;
void print() const { print_on(tty); } virtual void print() const { print_on(tty); };
virtual void print_on(outputStream* st) const; virtual void print_on(outputStream* st) const;
virtual void print_value_on(outputStream* st) const; virtual void print_value_on(outputStream* st) const;
void print_code(); void print_code();
// Deal with Disassembler, VTune, Forte, JvmtiExport, MemoryService.
static void trace_new_stub(CodeBlob* blob, const char* name1, const char* name2 = "");
// Print the comment associated with offset on stream, if there is one // Print the comment associated with offset on stream, if there is one
virtual void print_block_comment(outputStream* stream, address block_begin) const { virtual void print_block_comment(outputStream* stream, address block_begin) const {
intptr_t offset = (intptr_t)(block_begin - code_begin()); intptr_t offset = (intptr_t)(block_begin - code_begin());
@ -221,11 +204,142 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
} }
}; };
class CodeBlobLayout : public StackObj {
private:
int _size;
int _header_size;
int _relocation_size;
int _content_offset;
int _code_offset;
int _data_offset;
address _code_begin;
address _code_end;
address _content_begin;
address _content_end;
address _data_end;
address _relocation_begin;
address _relocation_end;
public:
CodeBlobLayout(address code_begin, address code_end, address content_begin, address content_end, address data_end, address relocation_begin, address relocation_end) :
_size(0),
_header_size(0),
_relocation_size(0),
_content_offset(0),
_code_offset(0),
_data_offset(0),
_content_begin(content_begin),
_content_end(content_end),
_code_begin(code_begin),
_code_end(code_end),
_data_end(data_end),
_relocation_begin(relocation_begin),
_relocation_end(relocation_end)
{
}
CodeBlobLayout(const address start, int size, int header_size, int relocation_size, int data_offset) :
_size(size),
_header_size(header_size),
_relocation_size(relocation_size),
_content_offset(CodeBlob::align_code_offset(_header_size + _relocation_size)),
_code_offset(_content_offset),
_data_offset(data_offset)
{
assert(_relocation_size == round_to(_relocation_size, oopSize), "unaligned size");
_code_begin = (address) start + _code_offset;
_code_end = (address) start + _data_offset;
_content_begin = (address) start + _content_offset;
_content_end = (address) start + _data_offset;
_data_end = (address) start + _size;
_relocation_begin = (address) start + _header_size;
_relocation_end = _relocation_begin + _relocation_size;
}
CodeBlobLayout(const address start, int size, int header_size, const CodeBuffer* cb) :
_size(size),
_header_size(header_size),
_relocation_size(round_to(cb->total_relocation_size(), oopSize)),
_content_offset(CodeBlob::align_code_offset(_header_size + _relocation_size)),
_code_offset(_content_offset + cb->total_offset_of(cb->insts())),
_data_offset(_content_offset + round_to(cb->total_content_size(), oopSize))
{
assert(_relocation_size == round_to(_relocation_size, oopSize), "unaligned size");
_code_begin = (address) start + _code_offset;
_code_end = (address) start + _data_offset;
_content_begin = (address) start + _content_offset;
_content_end = (address) start + _data_offset;
_data_end = (address) start + _size;
_relocation_begin = (address) start + _header_size;
_relocation_end = _relocation_begin + _relocation_size;
}
int size() const { return _size; }
int header_size() const { return _header_size; }
int relocation_size() const { return _relocation_size; }
int content_offset() const { return _content_offset; }
int code_offset() const { return _code_offset; }
int data_offset() const { return _data_offset; }
address code_begin() const { return _code_begin; }
address code_end() const { return _code_end; }
address data_end() const { return _data_end; }
address relocation_begin() const { return _relocation_begin; }
address relocation_end() const { return _relocation_end; }
address content_begin() const { return _content_begin; }
address content_end() const { return _content_end; }
};
class RuntimeBlob : public CodeBlob {
friend class VMStructs;
public:
// Creation
// a) simple CodeBlob
// frame_complete is the offset from the beginning of the instructions
// to where the frame setup (from stackwalk viewpoint) is complete.
RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size);
// b) full CodeBlob
RuntimeBlob(
const char* name,
CodeBuffer* cb,
int header_size,
int size,
int frame_complete,
int frame_size,
OopMapSet* oop_maps,
bool caller_must_gc_arguments = false
);
// GC support
virtual bool is_alive() const = 0;
void verify();
// OopMap for frame
virtual void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { ShouldNotReachHere(); }
// Debugging
void print() const { print_on(tty); }
virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); }
virtual void print_value_on(outputStream* st) const { CodeBlob::print_value_on(st); }
// Deal with Disassembler, VTune, Forte, JvmtiExport, MemoryService.
static void trace_new_stub(RuntimeBlob* blob, const char* name1, const char* name2 = "");
};
class WhiteBox; class WhiteBox;
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
// BufferBlob: used to hold non-relocatable machine code such as the interpreter, stubroutines, etc. // BufferBlob: used to hold non-relocatable machine code such as the interpreter, stubroutines, etc.
class BufferBlob: public CodeBlob { class BufferBlob: public RuntimeBlob {
friend class VMStructs; friend class VMStructs;
friend class AdapterBlob; friend class AdapterBlob;
friend class MethodHandlesAdapterBlob; friend class MethodHandlesAdapterBlob;
@ -293,11 +407,9 @@ public:
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
// RuntimeStub: describes stubs used by compiled code to call a (static) C++ runtime routine // RuntimeStub: describes stubs used by compiled code to call a (static) C++ runtime routine
class RuntimeStub: public CodeBlob { class RuntimeStub: public RuntimeBlob {
friend class VMStructs; friend class VMStructs;
private: private:
bool _caller_must_gc_arguments;
// Creation support // Creation support
RuntimeStub( RuntimeStub(
const char* name, const char* name,
@ -325,10 +437,7 @@ class RuntimeStub: public CodeBlob {
// Typing // Typing
bool is_runtime_stub() const { return true; } bool is_runtime_stub() const { return true; }
// GC support address entry_point() const { return code_begin(); }
bool caller_must_gc_arguments(JavaThread* thread) const { return _caller_must_gc_arguments; }
address entry_point() { return code_begin(); }
// GC/Verification support // GC/Verification support
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ } void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ }
@ -343,7 +452,7 @@ class RuntimeStub: public CodeBlob {
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
// Super-class for all blobs that exist in only one instance. Implements default behaviour. // Super-class for all blobs that exist in only one instance. Implements default behaviour.
class SingletonBlob: public CodeBlob { class SingletonBlob: public RuntimeBlob {
friend class VMStructs; friend class VMStructs;
protected: protected:
@ -358,13 +467,15 @@ class SingletonBlob: public CodeBlob {
int frame_size, int frame_size,
OopMapSet* oop_maps OopMapSet* oop_maps
) )
: CodeBlob(name, cb, header_size, size, CodeOffsets::frame_never_safe, frame_size, oop_maps) : RuntimeBlob(name, cb, header_size, size, CodeOffsets::frame_never_safe, frame_size, oop_maps)
{}; {};
address entry_point() { return code_begin(); } address entry_point() { return code_begin(); }
bool is_alive() const { return true; } bool is_alive() const { return true; }
// GC/Verification support
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ }
void verify(); // does nothing void verify(); // does nothing
void print_on(outputStream* st) const; void print_on(outputStream* st) const;
void print_value_on(outputStream* st) const; void print_value_on(outputStream* st) const;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -561,12 +561,12 @@ CodeBlob* CodeCache::find_blob(void* start) {
// what you are doing) // what you are doing)
CodeBlob* CodeCache::find_blob_unsafe(void* start) { CodeBlob* CodeCache::find_blob_unsafe(void* start) {
// NMT can walk the stack before code cache is created // NMT can walk the stack before code cache is created
if (_heaps == NULL || _heaps->is_empty()) return NULL; if (_heaps != NULL && !_heaps->is_empty()) {
FOR_ALL_HEAPS(heap) {
FOR_ALL_HEAPS(heap) { CodeBlob* result = (CodeBlob*) (*heap)->find_start(start);
CodeBlob* result = (CodeBlob*) (*heap)->find_start(start); if (result != NULL && result->blob_contains((address)start)) {
if (result != NULL && result->blob_contains((address)start)) { return result;
return result; }
} }
} }
return NULL; return NULL;
@ -595,11 +595,11 @@ void CodeCache::nmethods_do(void f(nmethod* nm)) {
} }
} }
void CodeCache::alive_nmethods_do(void f(nmethod* nm)) { void CodeCache::metadata_do(void f(Metadata* m)) {
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
NMethodIterator iter; NMethodIterator iter;
while(iter.next_alive()) { while(iter.next_alive()) {
f(iter.method()); iter.method()->metadata_do(f);
} }
} }
@ -614,7 +614,7 @@ int CodeCache::alignment_offset() {
// Mark nmethods for unloading if they contain otherwise unreachable oops. // Mark nmethods for unloading if they contain otherwise unreachable oops.
void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
NMethodIterator iter; CompiledMethodIterator iter;
while(iter.next_alive()) { while(iter.next_alive()) {
iter.method()->do_unloading(is_alive, unloading_occurred); iter.method()->do_unloading(is_alive, unloading_occurred);
} }
@ -841,17 +841,18 @@ void CodeCache::gc_prologue() {
void CodeCache::gc_epilogue() { void CodeCache::gc_epilogue() {
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
NOT_DEBUG(if (needs_cache_clean())) { NOT_DEBUG(if (needs_cache_clean())) {
NMethodIterator iter; CompiledMethodIterator iter;
while(iter.next_alive()) { while(iter.next_alive()) {
nmethod* nm = iter.method(); CompiledMethod* cm = iter.method();
assert(!nm->is_unloaded(), "Tautology"); assert(!cm->is_unloaded(), "Tautology");
DEBUG_ONLY(if (needs_cache_clean())) { DEBUG_ONLY(if (needs_cache_clean())) {
nm->cleanup_inline_caches(); cm->cleanup_inline_caches();
} }
DEBUG_ONLY(nm->verify()); DEBUG_ONLY(cm->verify());
DEBUG_ONLY(nm->verify_oop_relocations()); DEBUG_ONLY(cm->verify_oop_relocations());
} }
} }
set_needs_cache_clean(false); set_needs_cache_clean(false);
prune_scavenge_root_nmethods(); prune_scavenge_root_nmethods();
@ -1036,7 +1037,7 @@ int CodeCache::number_of_nmethods_with_dependencies() {
void CodeCache::clear_inline_caches() { void CodeCache::clear_inline_caches() {
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
NMethodIterator iter; CompiledMethodIterator iter;
while(iter.next_alive()) { while(iter.next_alive()) {
iter.method()->clear_inline_caches(); iter.method()->clear_inline_caches();
} }
@ -1083,6 +1084,11 @@ int CodeCache::mark_for_deoptimization(KlassDepChange& changes) {
return number_of_marked_CodeBlobs; return number_of_marked_CodeBlobs;
} }
CompiledMethod* CodeCache::find_compiled(void* start) {
CodeBlob *cb = find_blob(start);
assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method");
return (CompiledMethod*)cb;
}
#ifdef HOTSWAP #ifdef HOTSWAP
int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) { int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
@ -1094,16 +1100,16 @@ int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
for (int i = 0; i < old_methods->length(); i++) { for (int i = 0; i < old_methods->length(); i++) {
ResourceMark rm; ResourceMark rm;
Method* old_method = old_methods->at(i); Method* old_method = old_methods->at(i);
nmethod *nm = old_method->code(); CompiledMethod* nm = old_method->code();
if (nm != NULL) { if (nm != NULL) {
nm->mark_for_deoptimization(); nm->mark_for_deoptimization();
number_of_marked_CodeBlobs++; number_of_marked_CodeBlobs++;
} }
} }
NMethodIterator iter; CompiledMethodIterator iter;
while(iter.next_alive()) { while(iter.next_alive()) {
nmethod* nm = iter.method(); CompiledMethod* nm = iter.method();
if (nm->is_marked_for_deoptimization()) { if (nm->is_marked_for_deoptimization()) {
// ...Already marked in the previous pass; don't count it again. // ...Already marked in the previous pass; don't count it again.
} else if (nm->is_evol_dependent_on(dependee())) { } else if (nm->is_evol_dependent_on(dependee())) {
@ -1124,9 +1130,9 @@ int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
// Deoptimize all methods // Deoptimize all methods
void CodeCache::mark_all_nmethods_for_deoptimization() { void CodeCache::mark_all_nmethods_for_deoptimization() {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
NMethodIterator iter; CompiledMethodIterator iter;
while(iter.next_alive()) { while(iter.next_alive()) {
nmethod* nm = iter.method(); CompiledMethod* nm = iter.method();
if (!nm->method()->is_method_handle_intrinsic()) { if (!nm->method()->is_method_handle_intrinsic()) {
nm->mark_for_deoptimization(); nm->mark_for_deoptimization();
} }
@ -1137,9 +1143,9 @@ int CodeCache::mark_for_deoptimization(Method* dependee) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
int number_of_marked_CodeBlobs = 0; int number_of_marked_CodeBlobs = 0;
NMethodIterator iter; CompiledMethodIterator iter;
while(iter.next_alive()) { while(iter.next_alive()) {
nmethod* nm = iter.method(); CompiledMethod* nm = iter.method();
if (nm->is_dependent_on_method(dependee)) { if (nm->is_dependent_on_method(dependee)) {
ResourceMark rm; ResourceMark rm;
nm->mark_for_deoptimization(); nm->mark_for_deoptimization();
@ -1152,9 +1158,9 @@ int CodeCache::mark_for_deoptimization(Method* dependee) {
void CodeCache::make_marked_nmethods_not_entrant() { void CodeCache::make_marked_nmethods_not_entrant() {
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
NMethodIterator iter; CompiledMethodIterator iter;
while(iter.next_alive()) { while(iter.next_alive()) {
nmethod* nm = iter.method(); CompiledMethod* nm = iter.method();
if (nm->is_marked_for_deoptimization()) { if (nm->is_marked_for_deoptimization()) {
nm->make_not_entrant(); nm->make_not_entrant();
} }
@ -1549,3 +1555,36 @@ void CodeCache::log_state(outputStream* st) {
blob_count(), nmethod_count(), adapter_count(), blob_count(), nmethod_count(), adapter_count(),
unallocated_capacity()); unallocated_capacity());
} }
// Initialize iterator to given compiled method
void CompiledMethodIterator::initialize(CompiledMethod* cm) {
_code_blob = (CodeBlob*)cm;
if (!SegmentedCodeCache) {
// Iterate over all CodeBlobs
_code_blob_type = CodeBlobType::All;
} else if (cm != NULL) {
_code_blob_type = CodeCache::get_code_blob_type(cm);
} else {
// Only iterate over method code heaps, starting with non-profiled
_code_blob_type = CodeBlobType::MethodNonProfiled;
}
}
// Advance iterator to the next compiled method in the current code heap
bool CompiledMethodIterator::next_compiled_method() {
// Get first method CodeBlob
if (_code_blob == NULL) {
_code_blob = CodeCache::first_blob(_code_blob_type);
if (_code_blob == NULL) {
return false;
} else if (_code_blob->is_nmethod()) {
return true;
}
}
// Search for next method CodeBlob
_code_blob = CodeCache::next_blob(_code_blob);
while (_code_blob != NULL && !_code_blob->is_compiled()) {
_code_blob = CodeCache::next_blob(_code_blob);
}
return _code_blob != NULL;
}

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -78,6 +78,7 @@ class CodeCache : AllStatic {
friend class VMStructs; friend class VMStructs;
friend class JVMCIVMStructs; friend class JVMCIVMStructs;
friend class NMethodIterator; friend class NMethodIterator;
friend class CompiledMethodIterator;
friend class WhiteBox; friend class WhiteBox;
friend class CodeCacheLoader; friend class CodeCacheLoader;
private: private:
@ -134,12 +135,13 @@ class CodeCache : AllStatic {
static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods static void metadata_do(void f(Metadata* m)); // iterates over metadata in alive nmethods
// Lookup // Lookup
static CodeBlob* find_blob(void* start); // Returns the CodeBlob containing the given address static CodeBlob* find_blob(void* start); // Returns the CodeBlob containing the given address
static CodeBlob* find_blob_unsafe(void* start); // Same as find_blob but does not fail if looking up a zombie method static CodeBlob* find_blob_unsafe(void* start); // Same as find_blob but does not fail if looking up a zombie method
static nmethod* find_nmethod(void* start); // Returns the nmethod containing the given address static nmethod* find_nmethod(void* start); // Returns the nmethod containing the given address
static CompiledMethod* find_compiled(void* start);
static int blob_count(); // Returns the total number of CodeBlobs in the cache static int blob_count(); // Returns the total number of CodeBlobs in the cache
static int blob_count(int code_blob_type); static int blob_count(int code_blob_type);
@ -207,8 +209,8 @@ class CodeCache : AllStatic {
static bool heap_available(int code_blob_type); static bool heap_available(int code_blob_type);
// Returns the CodeBlobType for the given nmethod // Returns the CodeBlobType for the given nmethod
static int get_code_blob_type(nmethod* nm) { static int get_code_blob_type(CompiledMethod* cm) {
return get_code_heap(nm)->code_blob_type(); return get_code_heap(cm)->code_blob_type();
} }
// Returns the CodeBlobType for the given compilation level // Returns the CodeBlobType for the given compilation level
@ -337,4 +339,53 @@ private:
} }
}; };
// Iterator to iterate over compiled methods in the CodeCache.
class CompiledMethodIterator : public StackObj {
private:
CodeBlob* _code_blob; // Current CodeBlob
int _code_blob_type; // Refers to current CodeHeap
public:
CompiledMethodIterator() {
initialize(NULL); // Set to NULL, initialized by first call to next()
}
CompiledMethodIterator(CompiledMethod* cm) {
initialize(cm);
}
// Advance iterator to next compiled method
bool next() {
assert_locked_or_safepoint(CodeCache_lock);
assert(_code_blob_type < CodeBlobType::NumTypes, "end reached");
bool result = next_compiled_method();
while (!result && (_code_blob_type < CodeBlobType::MethodProfiled)) {
// Advance to next code heap if segmented code cache
_code_blob_type++;
result = next_compiled_method();
}
return result;
}
// Advance iterator to next alive compiled method
bool next_alive() {
bool result = next();
while(result && !_code_blob->is_alive()) {
result = next();
}
return result;
}
bool end() const { return _code_blob == NULL; }
CompiledMethod* method() const { return (_code_blob != NULL) ? _code_blob->as_compiled_method() : NULL; }
private:
// Initialize iterator to given compiled method
void initialize(CompiledMethod* cm);
// Advance iterator to the next compiled method in the current code heap
bool next_compiled_method();
};
#endif // SHARE_VM_CODE_CODECACHE_HPP #endif // SHARE_VM_CODE_CODECACHE_HPP

View File

@ -103,7 +103,7 @@ void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub
MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
#ifdef ASSERT #ifdef ASSERT
CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call); CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
assert(cb != NULL && cb->is_nmethod(), "must be nmethod"); assert(cb != NULL && cb->is_compiled(), "must be compiled");
#endif #endif
_ic_call->set_destination_mt_safe(entry_point); _ic_call->set_destination_mt_safe(entry_point);
} }
@ -182,17 +182,17 @@ void CompiledIC::initialize_from_iter(RelocIterator* iter) {
} }
} }
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) CompiledIC::CompiledIC(CompiledMethod* cm, NativeCall* call)
: _ic_call(call) : _ic_call(call)
{ {
address ic_call = _ic_call->instruction_address(); address ic_call = _ic_call->instruction_address();
assert(ic_call != NULL, "ic_call address must be set"); assert(ic_call != NULL, "ic_call address must be set");
assert(nm != NULL, "must pass nmethod"); assert(cm != NULL, "must pass compiled method");
assert(nm->contains(ic_call), "must be in nmethod"); assert(cm->contains(ic_call), "must be in compiled method");
// Search for the ic_call at the given address. // Search for the ic_call at the given address.
RelocIterator iter(nm, ic_call, ic_call+1); RelocIterator iter(cm, ic_call, ic_call+1);
bool ret = iter.next(); bool ret = iter.next();
assert(ret == true, "relocInfo must exist at this address"); assert(ret == true, "relocInfo must exist at this address");
assert(iter.addr() == ic_call, "must find ic_call"); assert(iter.addr() == ic_call, "must find ic_call");
@ -205,10 +205,10 @@ CompiledIC::CompiledIC(RelocIterator* iter)
{ {
address ic_call = _ic_call->instruction_address(); address ic_call = _ic_call->instruction_address();
nmethod* nm = iter->code(); CompiledMethod* nm = iter->code();
assert(ic_call != NULL, "ic_call address must be set"); assert(ic_call != NULL, "ic_call address must be set");
assert(nm != NULL, "must pass nmethod"); assert(nm != NULL, "must pass compiled method");
assert(nm->contains(ic_call), "must be in nmethod"); assert(nm->contains(ic_call), "must be in compiled method");
initialize_from_iter(iter); initialize_from_iter(iter);
} }
@ -278,7 +278,7 @@ bool CompiledIC::is_call_to_compiled() const {
// method is guaranteed to still exist, since we only remove methods after all inline caches // method is guaranteed to still exist, since we only remove methods after all inline caches
// has been cleaned up // has been cleaned up
CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
bool is_monomorphic = (cb != NULL && cb->is_nmethod()); bool is_monomorphic = (cb != NULL && cb->is_compiled());
// Check that the cached_value is a klass for non-optimized monomorphic calls // Check that the cached_value is a klass for non-optimized monomorphic calls
// This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
// for calling directly to vep without using the inline cache (i.e., cached_value == NULL). // for calling directly to vep without using the inline cache (i.e., cached_value == NULL).
@ -423,7 +423,7 @@ void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL); bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL);
#ifdef ASSERT #ifdef ASSERT
CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry()); CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
assert (cb->is_nmethod(), "must be compiled!"); assert (cb->is_compiled(), "must be compiled!");
#endif /* ASSERT */ #endif /* ASSERT */
// This is MT safe if we come from a clean-cache and go through a // This is MT safe if we come from a clean-cache and go through a
@ -469,9 +469,11 @@ void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
bool static_bound, bool static_bound,
CompiledICInfo& info, CompiledICInfo& info,
TRAPS) { TRAPS) {
nmethod* method_code = method->code(); CompiledMethod* method_code = method->code();
address entry = NULL; address entry = NULL;
if (method_code != NULL && method_code->is_in_use()) { if (method_code != NULL && method_code->is_in_use()) {
assert(method_code->is_compiled(), "must be compiled");
// Call to compiled code // Call to compiled code
if (static_bound || is_optimized) { if (static_bound || is_optimized) {
entry = method_code->verified_entry_point(); entry = method_code->verified_entry_point();
@ -520,6 +522,7 @@ void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
info.set_interpreter_entry(method()->get_c2i_entry(), method()); info.set_interpreter_entry(method()->get_c2i_entry(), method());
} else { } else {
// Use icholder entry // Use icholder entry
assert(method_code == NULL || method_code->is_compiled(), "must be compiled");
CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass()); CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass());
info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder); info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder);
} }
@ -557,7 +560,7 @@ void CompiledStaticCall::set_to_clean() {
MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
#ifdef ASSERT #ifdef ASSERT
CodeBlob* cb = CodeCache::find_blob_unsafe(this); CodeBlob* cb = CodeCache::find_blob_unsafe(this);
assert(cb != NULL && cb->is_nmethod(), "must be nmethod"); assert(cb != NULL && cb->is_compiled(), "must be compiled");
#endif #endif
set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub()); set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub());
@ -579,8 +582,8 @@ bool CompiledStaticCall::is_call_to_compiled() const {
bool CompiledStaticCall::is_call_to_interpreted() const { bool CompiledStaticCall::is_call_to_interpreted() const {
// It is a call to interpreted, if it calls to a stub. Hence, the destination // It is a call to interpreted, if it calls to a stub. Hence, the destination
// must be in the stub part of the nmethod that contains the call // must be in the stub part of the nmethod that contains the call
nmethod* nm = CodeCache::find_nmethod(instruction_address()); CompiledMethod* cm = CodeCache::find_compiled(instruction_address());
return nm->stub_contains(destination()); return cm->stub_contains(destination());
} }
void CompiledStaticCall::set(const StaticCallInfo& info) { void CompiledStaticCall::set(const StaticCallInfo& info) {
@ -612,7 +615,7 @@ void CompiledStaticCall::set(const StaticCallInfo& info) {
// Compute settings for a CompiledStaticCall. Since we might have to set // Compute settings for a CompiledStaticCall. Since we might have to set
// the stub when calling to the interpreter, we need to return arguments. // the stub when calling to the interpreter, we need to return arguments.
void CompiledStaticCall::compute_entry(const methodHandle& m, StaticCallInfo& info) { void CompiledStaticCall::compute_entry(const methodHandle& m, StaticCallInfo& info) {
nmethod* m_code = m->code(); CompiledMethod* m_code = m->code();
info._callee = m; info._callee = m;
if (m_code != NULL && m_code->is_in_use()) { if (m_code != NULL && m_code->is_in_use()) {
info._to_interpreter = false; info._to_interpreter = false;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -135,7 +135,7 @@ class CompiledIC: public ResourceObj {
NativeMovConstReg* _value; // patchable value cell for this IC NativeMovConstReg* _value; // patchable value cell for this IC
bool _is_optimized; // an optimized virtual call (i.e., no compiled IC) bool _is_optimized; // an optimized virtual call (i.e., no compiled IC)
CompiledIC(nmethod* nm, NativeCall* ic_call); CompiledIC(CompiledMethod* cm, NativeCall* ic_call);
CompiledIC(RelocIterator* iter); CompiledIC(RelocIterator* iter);
void initialize_from_iter(RelocIterator* iter); void initialize_from_iter(RelocIterator* iter);
@ -169,8 +169,8 @@ class CompiledIC: public ResourceObj {
public: public:
// conversion (machine PC to CompiledIC*) // conversion (machine PC to CompiledIC*)
friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr); friend CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr);
friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site); friend CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site);
friend CompiledIC* CompiledIC_at(Relocation* call_site); friend CompiledIC* CompiledIC_at(Relocation* call_site);
friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter); friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);
@ -234,13 +234,13 @@ class CompiledIC: public ResourceObj {
void verify() PRODUCT_RETURN; void verify() PRODUCT_RETURN;
}; };
inline CompiledIC* CompiledIC_before(nmethod* nm, address return_addr) { inline CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr) {
CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr)); CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr));
c_ic->verify(); c_ic->verify();
return c_ic; return c_ic;
} }
inline CompiledIC* CompiledIC_at(nmethod* nm, address call_site) { inline CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site) {
CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site)); CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site));
c_ic->verify(); c_ic->verify();
return c_ic; return c_ic;

View File

@ -0,0 +1,707 @@
/*
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "code/compiledIC.hpp"
#include "code/scopeDesc.hpp"
#include "code/codeCache.hpp"
#include "prims/methodHandles.hpp"
#include "interpreter/bytecode.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/mutexLocker.hpp"
CompiledMethod::CompiledMethod(Method* method, const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
: CodeBlob(name, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
_method(method), _mark_for_deoptimization_status(not_marked) {
init_defaults();
}
CompiledMethod::CompiledMethod(Method* method, const char* name, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
: CodeBlob(name, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
_method(method), _mark_for_deoptimization_status(not_marked) {
init_defaults();
}
void CompiledMethod::init_defaults() {
_has_unsafe_access = 0;
_has_method_handle_invokes = 0;
_lazy_critical_native = 0;
_has_wide_vectors = 0;
_unloading_clock = 0;
}
bool CompiledMethod::is_method_handle_return(address return_pc) {
if (!has_method_handle_invokes()) return false;
PcDesc* pd = pc_desc_at(return_pc);
if (pd == NULL)
return false;
return pd->is_method_handle_invoke();
}
// When using JVMCI the address might be off by the size of a call instruction.
bool CompiledMethod::is_deopt_entry(address pc) {
return pc == deopt_handler_begin()
#if INCLUDE_JVMCI
|| pc == (deopt_handler_begin() + NativeCall::instruction_size)
#endif
;
}
// Returns a string version of the method state.
const char* CompiledMethod::state() const {
int state = get_state();
switch (state) {
case in_use:
return "in use";
case not_used:
return "not_used";
case not_entrant:
return "not_entrant";
case zombie:
return "zombie";
case unloaded:
return "unloaded";
default:
fatal("unexpected method state: %d", state);
return NULL;
}
}
//-----------------------------------------------------------------------------
void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
assert(new_entry != NULL,"Must be non null");
assert(new_entry->next() == NULL, "Must be null");
ExceptionCache *ec = exception_cache();
if (ec != NULL) {
new_entry->set_next(ec);
}
release_set_exception_cache(new_entry);
}
void CompiledMethod::clean_exception_cache(BoolObjectClosure* is_alive) {
ExceptionCache* prev = NULL;
ExceptionCache* curr = exception_cache();
while (curr != NULL) {
ExceptionCache* next = curr->next();
Klass* ex_klass = curr->exception_type();
if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
if (prev == NULL) {
set_exception_cache(next);
} else {
prev->set_next(next);
}
delete curr;
// prev stays the same.
} else {
prev = curr;
}
curr = next;
}
}
// public method for accessing the exception cache
// These are the public access methods.
address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
// We never grab a lock to read the exception cache, so we may
// have false negatives. This is okay, as it can only happen during
// the first few exception lookups for a given nmethod.
ExceptionCache* ec = exception_cache();
while (ec != NULL) {
address ret_val;
if ((ret_val = ec->match(exception,pc)) != NULL) {
return ret_val;
}
ec = ec->next();
}
return NULL;
}
void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
// There are potential race conditions during exception cache updates, so we
// must own the ExceptionCache_lock before doing ANY modifications. Because
// we don't lock during reads, it is possible to have several threads attempt
// to update the cache with the same data. We need to check for already inserted
// copies of the current data before adding it.
MutexLocker ml(ExceptionCache_lock);
ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
target_entry = new ExceptionCache(exception,pc,handler);
add_exception_cache_entry(target_entry);
}
}
//-------------end of code for ExceptionCache--------------
// private method for handling exception cache
// These methods are private, and used to manipulate the exception cache
// directly.
ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
ExceptionCache* ec = exception_cache();
while (ec != NULL) {
if (ec->match_exception_with_space(exception)) {
return ec;
}
ec = ec->next();
}
return NULL;
}
bool CompiledMethod::is_at_poll_return(address pc) {
RelocIterator iter(this, pc, pc+1);
while (iter.next()) {
if (iter.type() == relocInfo::poll_return_type)
return true;
}
return false;
}
bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
RelocIterator iter(this, pc, pc+1);
while (iter.next()) {
relocInfo::relocType t = iter.type();
if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
return true;
}
return false;
}
void CompiledMethod::verify_oop_relocations() {
// Ensure sure that the code matches the current oop values
RelocIterator iter(this, NULL, NULL);
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
oop_Relocation* reloc = iter.oop_reloc();
if (!reloc->oop_is_immediate()) {
reloc->verify_oop_relocation();
}
}
}
}
ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
PcDesc* pd = pc_desc_at(pc);
guarantee(pd != NULL, "scope must be present");
return new ScopeDesc(this, pd->scope_decode_offset(),
pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
pd->return_oop());
}
void CompiledMethod::cleanup_inline_caches(bool clean_all/*=false*/) {
assert_locked_or_safepoint(CompiledIC_lock);
// If the method is not entrant or zombie then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
// should not get GC'd. Skip the first few bytes of oops on
// not-entrant methods.
address low_boundary = verified_entry_point();
if (!is_in_use() && is_nmethod()) {
low_boundary += NativeJump::instruction_size;
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// This means that the low_boundary is going to be a little too high.
// This shouldn't matter, since oops of non-entrant methods are never used.
// In fact, why are we bothering to look at oops in a non-entrant method??
}
// Find all calls in an nmethod and clear the ones that point to non-entrant,
// zombie and unloaded nmethods.
ResourceMark rm;
RelocIterator iter(this, low_boundary);
while(iter.next()) {
switch(iter.type()) {
case relocInfo::virtual_call_type:
case relocInfo::opt_virtual_call_type: {
CompiledIC *ic = CompiledIC_at(&iter);
// Ok, to lookup references to zombies here
CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
if( cb != NULL && cb->is_compiled() ) {
CompiledMethod* nm = cb->as_compiled_method();
// Clean inline caches pointing to zombie, non-entrant and unloaded methods
if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
}
break;
}
case relocInfo::static_call_type: {
CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
if( cb != NULL && cb->is_compiled() ) {
CompiledMethod* cm = cb->as_compiled_method();
// Clean inline caches pointing to zombie, non-entrant and unloaded methods
if (clean_all || !cm->is_in_use() || (cm->method()->code() != cm)) {
csc->set_to_clean();
}
}
break;
}
}
}
}
int CompiledMethod::verify_icholder_relocations() {
ResourceMark rm;
int count = 0;
RelocIterator iter(this);
while(iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
CompiledIC *ic = CompiledIC_at(&iter);
if (TraceCompiledIC) {
tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
ic->print();
}
assert(ic->cached_icholder() != NULL, "must be non-NULL");
count++;
}
}
}
return count;
}
// Method that knows how to preserve outgoing arguments at call. This method must be
// called with a frame corresponding to a Java invoke
void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
#ifndef SHARK
if (method() != NULL && !method()->is_native()) {
address pc = fr.pc();
SimpleScopeDesc ssd(this, pc);
Bytecode_invoke call(ssd.method(), ssd.bci());
bool has_receiver = call.has_receiver();
bool has_appendix = call.has_appendix();
Symbol* signature = call.signature();
// The method attached by JIT-compilers should be used, if present.
// Bytecode can be inaccurate in such case.
Method* callee = attached_method_before_pc(pc);
if (callee != NULL) {
has_receiver = !(callee->access_flags().is_static());
has_appendix = false;
signature = callee->signature();
}
fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
}
#endif // !SHARK
}
// -----------------------------------------------------------------------------
// CompiledMethod::get_deopt_original_pc
//
// Return the original PC for the given PC if:
// (a) the given PC belongs to a nmethod and
// (b) it is a deopt PC
address CompiledMethod::get_deopt_original_pc(const frame* fr) {
if (fr->cb() == NULL) return NULL;
CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
if (cm != NULL && cm->is_deopt_pc(fr->pc()))
return cm->get_original_pc(fr);
return NULL;
}
Method* CompiledMethod::attached_method(address call_instr) {
assert(code_contains(call_instr), "not part of the nmethod");
RelocIterator iter(this, call_instr, call_instr + 1);
while (iter.next()) {
if (iter.addr() == call_instr) {
switch(iter.type()) {
case relocInfo::static_call_type: return iter.static_call_reloc()->method_value();
case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value();
}
}
}
return NULL; // not found
}
Method* CompiledMethod::attached_method_before_pc(address pc) {
if (NativeCall::is_call_before(pc)) {
NativeCall* ncall = nativeCall_before(pc);
return attached_method(ncall->instruction_address());
}
return NULL; // not a call
}
void CompiledMethod::clear_inline_caches() {
assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
if (is_zombie()) {
return;
}
RelocIterator iter(this);
while (iter.next()) {
iter.reloc()->clear_inline_cache();
}
}
// Clear ICStubs of all compiled ICs
void CompiledMethod::clear_ic_stubs() {
assert_locked_or_safepoint(CompiledIC_lock);
RelocIterator iter(this);
while(iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
CompiledIC* ic = CompiledIC_at(&iter);
ic->clear_ic_stub();
}
}
}
#ifdef ASSERT
class CheckClass : AllStatic {
static BoolObjectClosure* _is_alive;
// Check class_loader is alive for this bit of metadata.
static void check_class(Metadata* md) {
Klass* klass = NULL;
if (md->is_klass()) {
klass = ((Klass*)md);
} else if (md->is_method()) {
klass = ((Method*)md)->method_holder();
} else if (md->is_methodData()) {
klass = ((MethodData*)md)->method()->method_holder();
} else {
md->print();
ShouldNotReachHere();
}
assert(klass->is_loader_alive(_is_alive), "must be alive");
}
public:
static void do_check_class(BoolObjectClosure* is_alive, CompiledMethod* nm) {
assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
_is_alive = is_alive;
nm->metadata_do(check_class);
}
};
// This is called during a safepoint so can use static data
BoolObjectClosure* CheckClass::_is_alive = NULL;
#endif // ASSERT
void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
if (ic->is_icholder_call()) {
// The only exception is compiledICHolder oops which may
// yet be marked below. (We check this further below).
CompiledICHolder* cichk_oop = ic->cached_icholder();
if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
return;
}
} else {
Metadata* ic_oop = ic->cached_metadata();
if (ic_oop != NULL) {
if (ic_oop->is_klass()) {
if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
return;
}
} else if (ic_oop->is_method()) {
if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
return;
}
} else {
ShouldNotReachHere();
}
}
}
ic->set_to_clean();
}
unsigned char CompiledMethod::_global_unloading_clock = 0;
void CompiledMethod::increase_unloading_clock() {
_global_unloading_clock++;
if (_global_unloading_clock == 0) {
// _nmethods are allocated with _unloading_clock == 0,
// so 0 is never used as a clock value.
_global_unloading_clock = 1;
}
}
void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) {
OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
}
unsigned char CompiledMethod::unloading_clock() {
return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
}
// Processing of oop references should have been sufficient to keep
// all strong references alive. Any weak references should have been
// cleared as well. Visit all the metadata and ensure that it's
// really alive.
void CompiledMethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
#ifdef ASSERT
RelocIterator iter(this, low_boundary);
while (iter.next()) {
// static_stub_Relocations may have dangling references to
// Method*s so trim them out here. Otherwise it looks like
// compiled code is maintaining a link to dead metadata.
address static_call_addr = NULL;
if (iter.type() == relocInfo::opt_virtual_call_type) {
CompiledIC* cic = CompiledIC_at(&iter);
if (!cic->is_call_to_interpreted()) {
static_call_addr = iter.addr();
}
} else if (iter.type() == relocInfo::static_call_type) {
CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
if (!csc->is_call_to_interpreted()) {
static_call_addr = iter.addr();
}
}
if (static_call_addr != NULL) {
RelocIterator sciter(this, low_boundary);
while (sciter.next()) {
if (sciter.type() == relocInfo::static_stub_type &&
sciter.static_stub_reloc()->static_call() == static_call_addr) {
sciter.static_stub_reloc()->clear_inline_cache();
}
}
}
}
// Check that the metadata embedded in the nmethod is alive
CheckClass::do_check_class(is_alive, this);
#endif
}
// This is called at the end of the strong tracing/marking phase of a
// GC to unload an nmethod if it contains otherwise unreachable
// oops.
void CompiledMethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
// Make sure the oop's ready to receive visitors
assert(!is_zombie() && !is_unloaded(),
"should not call follow on zombie or unloaded nmethod");
// If the method is not entrant then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
// should not get GC'd. Skip the first few bytes of oops on
// not-entrant methods.
address low_boundary = verified_entry_point();
if (is_not_entrant()) {
low_boundary += NativeJump::instruction_size;
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// (See comment above.)
}
// The RedefineClasses() API can cause the class unloading invariant
// to no longer be true. See jvmtiExport.hpp for details.
// Also, leave a debugging breadcrumb in local flag.
if (JvmtiExport::has_redefined_a_class()) {
// This set of the unloading_occurred flag is done before the
// call to post_compiled_method_unload() so that the unloading
// of this nmethod is reported.
unloading_occurred = true;
}
// Exception cache
clean_exception_cache(is_alive);
// If class unloading occurred we first iterate over all inline caches and
// clear ICs where the cached oop is referring to an unloaded klass or method.
// The remaining live cached oops will be traversed in the relocInfo::oop_type
// iteration below.
if (unloading_occurred) {
RelocIterator iter(this, low_boundary);
while(iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
CompiledIC *ic = CompiledIC_at(&iter);
clean_ic_if_metadata_is_dead(ic, is_alive);
}
}
}
if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
return;
}
#if INCLUDE_JVMCI
if (do_unloading_jvmci(is_alive, unloading_occurred)) {
return;
}
#endif
// Ensure that all metadata is still alive
verify_metadata_loaders(low_boundary, is_alive);
}
template <class CompiledICorStaticCall>
static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, CompiledMethod* from) {
// Ok, to lookup references to zombies here
CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL) {
if (nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
// The nmethod has not been processed yet.
return true;
}
// Clean inline caches pointing to both zombie and not_entrant methods
if (!nm->is_in_use() || (nm->method()->code() != nm)) {
ic->set_to_clean();
assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
}
}
return false;
}
static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, CompiledMethod* from) {
return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
}
static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, CompiledMethod* from) {
return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
}
bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
ResourceMark rm;
// Make sure the oop's ready to receive visitors
assert(!is_zombie() && !is_unloaded(),
"should not call follow on zombie or unloaded nmethod");
// If the method is not entrant then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
// should not get GC'd. Skip the first few bytes of oops on
// not-entrant methods.
address low_boundary = verified_entry_point();
if (is_not_entrant()) {
low_boundary += NativeJump::instruction_size;
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// (See comment above.)
}
// The RedefineClasses() API can cause the class unloading invariant
// to no longer be true. See jvmtiExport.hpp for details.
// Also, leave a debugging breadcrumb in local flag.
if (JvmtiExport::has_redefined_a_class()) {
// This set of the unloading_occurred flag is done before the
// call to post_compiled_method_unload() so that the unloading
// of this nmethod is reported.
unloading_occurred = true;
}
// Exception cache
clean_exception_cache(is_alive);
bool postponed = false;
RelocIterator iter(this, low_boundary);
while(iter.next()) {
switch (iter.type()) {
case relocInfo::virtual_call_type:
if (unloading_occurred) {
// If class unloading occurred we first iterate over all inline caches and
// clear ICs where the cached oop is referring to an unloaded klass or method.
clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
}
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
break;
case relocInfo::opt_virtual_call_type:
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
break;
case relocInfo::static_call_type:
postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
break;
case relocInfo::oop_type:
// handled by do_unloading_oops below
break;
case relocInfo::metadata_type:
break; // nothing to do.
}
}
if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
return postponed;
}
#if INCLUDE_JVMCI
if (do_unloading_jvmci(is_alive, unloading_occurred)) {
return postponed;
}
#endif
// Ensure that all metadata is still alive
verify_metadata_loaders(low_boundary, is_alive);
return postponed;
}
void CompiledMethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
ResourceMark rm;
// Make sure the oop's ready to receive visitors
assert(!is_zombie(),
"should not call follow on zombie nmethod");
// If the method is not entrant then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
// should not get GC'd. Skip the first few bytes of oops on
// not-entrant methods.
address low_boundary = verified_entry_point();
if (is_not_entrant()) {
low_boundary += NativeJump::instruction_size;
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// (See comment above.)
}
RelocIterator iter(this, low_boundary);
while(iter.next()) {
switch (iter.type()) {
case relocInfo::virtual_call_type:
clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
break;
case relocInfo::opt_virtual_call_type:
clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
break;
case relocInfo::static_call_type:
clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
break;
}
}
}

View File

@ -0,0 +1,391 @@
/*
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_CODE_COMPILEDMETHOD_HPP
#define SHARE_VM_CODE_COMPILEDMETHOD_HPP
#include "code/codeBlob.hpp"
#include "code/pcDesc.hpp"
#include "oops/metadata.hpp"
class Dependencies;
class ExceptionHandlerTable;
class ImplicitExceptionTable;
class AbstractCompiler;
class xmlStream;
class CompiledStaticCall;
// This class is used internally by nmethods, to cache
// exception/pc/handler information.
class ExceptionCache : public CHeapObj<mtCode> {
friend class VMStructs;
private:
enum { cache_size = 16 };
Klass* _exception_type;
address _pc[cache_size];
address _handler[cache_size];
volatile int _count;
ExceptionCache* _next;
address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; }
void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; }
void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
int count() { return OrderAccess::load_acquire(&_count); }
// increment_count is only called under lock, but there may be concurrent readers.
void increment_count() { OrderAccess::release_store(&_count, _count + 1); }
public:
ExceptionCache(Handle exception, address pc, address handler);
Klass* exception_type() { return _exception_type; }
ExceptionCache* next() { return _next; }
void set_next(ExceptionCache *ec) { _next = ec; }
address match(Handle exception, address pc);
bool match_exception_with_space(Handle exception) ;
address test_address(address addr);
bool add_address_and_handler(address addr, address handler) ;
};
class nmethod;
// cache pc descs found in earlier inquiries
class PcDescCache VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
private:
enum { cache_size = 4 };
// The array elements MUST be volatile! Several threads may modify
// and read from the cache concurrently. find_pc_desc_internal has
// returned wrong results. C++ compiler (namely xlC12) may duplicate
// C++ field accesses if the elements are not volatile.
typedef PcDesc* PcDescPtr;
volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
public:
PcDescCache() { debug_only(_pc_descs[0] = NULL); }
void reset_to(PcDesc* initial_pc_desc);
PcDesc* find_pc_desc(int pc_offset, bool approximate);
void add_pc_desc(PcDesc* pc_desc);
PcDesc* last_pc_desc() { return _pc_descs[0]; }
};
class PcDescSearch {
private:
address _code_begin;
PcDesc* _lower;
PcDesc* _upper;
public:
PcDescSearch(address code, PcDesc* lower, PcDesc* upper) :
_code_begin(code), _lower(lower), _upper(upper)
{
}
address code_begin() const { return _code_begin; }
PcDesc* scopes_pcs_begin() const { return _lower; }
PcDesc* scopes_pcs_end() const { return _upper; }
};
class PcDescContainer VALUE_OBJ_CLASS_SPEC {
private:
PcDescCache _pc_desc_cache;
public:
PcDescContainer() {}
PcDesc* find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search);
void reset_to(PcDesc* initial_pc_desc) { _pc_desc_cache.reset_to(initial_pc_desc); }
PcDesc* find_pc_desc(address pc, bool approximate, const PcDescSearch& search) {
address base_address = search.code_begin();
PcDesc* desc = _pc_desc_cache.last_pc_desc();
if (desc != NULL && desc->pc_offset() == pc - base_address) {
return desc;
}
return find_pc_desc_internal(pc, approximate, search);
}
};
class CompiledMethod : public CodeBlob {
friend class VMStructs;
friend class NMethodSweeper;
void init_defaults();
protected:
enum MarkForDeoptimizationStatus {
not_marked,
deoptimize,
deoptimize_noupdate
};
MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization
bool _is_far_code; // Code is far from CodeCache.
// Have to use far call instructions to call it from code in CodeCache.
// set during construction
unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
unsigned int _lazy_critical_native:1; // Lazy JNI critical native
unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints
Method* _method;
address _scopes_data_begin;
// All deoptee's will resume execution at this location described by
// this address.
address _deopt_handler_begin;
// All deoptee's at a MethodHandle call site will resume execution
// at this location described by this offset.
address _deopt_mh_handler_begin;
PcDescContainer _pc_desc_container;
ExceptionCache * volatile _exception_cache;
virtual void flush() = 0;
protected:
CompiledMethod(Method* method, const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
CompiledMethod(Method* method, const char* name, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
public:
virtual bool is_compiled() const { return true; }
bool has_unsafe_access() const { return _has_unsafe_access; }
void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
bool is_lazy_critical_native() const { return _lazy_critical_native; }
void set_lazy_critical_native(bool z) { _lazy_critical_native = z; }
bool has_wide_vectors() const { return _has_wide_vectors; }
void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
enum { in_use = 0, // executable nmethod
not_used = 1, // not entrant, but revivable
not_entrant = 2, // marked for deoptimization but activations may still exist,
// will be transformed to zombie when all activations are gone
zombie = 3, // no activations exist, nmethod is ready for purge
unloaded = 4 // there should be no activations, should not be called,
// will be transformed to zombie immediately
};
virtual AbstractCompiler* compiler() const = 0;
virtual bool is_in_use() const = 0;
virtual int comp_level() const = 0;
virtual int compile_id() const = 0;
virtual address verified_entry_point() const = 0;
virtual void log_identity(xmlStream* log) const = 0;
virtual void log_state_change() const = 0;
virtual bool make_not_used() = 0;
virtual bool make_not_entrant() = 0;
virtual bool make_entrant() = 0;
virtual address entry_point() const = 0;
virtual bool make_zombie() = 0;
virtual bool is_osr_method() const = 0;
virtual int osr_entry_bci() const = 0;
Method* method() const { return _method; }
virtual void print_pcs() = 0;
bool is_native_method() const { return _method != NULL && _method->is_native(); }
bool is_java_method() const { return _method != NULL && !_method->is_native(); }
// ScopeDesc retrieval operation
PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
// pc_desc_near returns the first PcDesc at or after the givne pc.
PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
// ScopeDesc for an instruction
ScopeDesc* scope_desc_at(address pc);
bool is_at_poll_return(address pc);
bool is_at_poll_or_poll_return(address pc);
bool is_marked_for_deoptimization() const { return _mark_for_deoptimization_status != not_marked; }
void mark_for_deoptimization(bool inc_recompile_counts = true) {
_mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
}
bool update_recompile_counts() const {
// Update recompile counts when either the update is explicitly requested (deoptimize)
// or the nmethod is not marked for deoptimization at all (not_marked).
// The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
return _mark_for_deoptimization_status != deoptimize_noupdate;
}
// tells whether frames described by this nmethod can be deoptimized
// note: native wrappers cannot be deoptimized.
bool can_be_deoptimized() const { return is_java_method(); }
virtual oop oop_at(int index) const = 0;
virtual Metadata* metadata_at(int index) const = 0;
address scopes_data_begin() const { return _scopes_data_begin; }
virtual address scopes_data_end() const = 0;
int scopes_data_size() const { return scopes_data_end() - scopes_data_begin(); }
virtual PcDesc* scopes_pcs_begin() const = 0;
virtual PcDesc* scopes_pcs_end() const = 0;
int scopes_pcs_size() const { return (intptr_t) scopes_pcs_end() - (intptr_t) scopes_pcs_begin(); }
address insts_begin() const { return code_begin(); }
address insts_end() const { return stub_begin(); }
bool insts_contains(address addr) const { return insts_begin() <= addr && addr < insts_end(); }
int insts_size() const { return insts_end() - insts_begin(); }
virtual address consts_begin() const = 0;
virtual address consts_end() const = 0;
bool consts_contains(address addr) const { return consts_begin() <= addr && addr < consts_end(); }
int consts_size() const { return consts_end() - consts_begin(); }
virtual address stub_begin() const = 0;
virtual address stub_end() const = 0;
bool stub_contains(address addr) const { return stub_begin() <= addr && addr < stub_end(); }
int stub_size() const { return stub_end() - stub_begin(); }
virtual address handler_table_begin() const = 0;
virtual address handler_table_end() const = 0;
bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
virtual address nul_chk_table_begin() const = 0;
virtual address nul_chk_table_end() const = 0;
bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
virtual oop* oop_addr_at(int index) const = 0;
virtual Metadata** metadata_addr_at(int index) const = 0;
virtual void set_original_pc(const frame* fr, address pc) = 0;
// Exception cache support
// Note: _exception_cache may be read concurrently. We rely on memory_order_consume here.
ExceptionCache* exception_cache() const { return _exception_cache; }
void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store_ptr(&_exception_cache, ec); }
address handler_for_exception_and_pc(Handle exception, address pc);
void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
void clean_exception_cache(BoolObjectClosure* is_alive);
void add_exception_cache_entry(ExceptionCache* new_entry);
ExceptionCache* exception_cache_entry_for_exception(Handle exception);
// MethodHandle
bool is_method_handle_return(address return_pc);
address deopt_mh_handler_begin() const { return _deopt_mh_handler_begin; }
address deopt_handler_begin() const { return _deopt_handler_begin; }
virtual address get_original_pc(const frame* fr) = 0;
// Deopt
// Return true is the PC is one would expect if the frame is being deopted.
bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
bool is_deopt_entry(address pc);
virtual bool can_convert_to_zombie() = 0;
virtual const char* compile_kind() const = 0;
virtual int get_state() const = 0;
const char* state() const;
bool is_far_code() const { return _is_far_code; }
bool inlinecache_check_contains(address addr) const {
return (addr >= code_begin() && addr < verified_entry_point());
}
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
// implicit exceptions support
virtual address continuation_for_implicit_exception(address pc) { return NULL; }
static address get_deopt_original_pc(const frame* fr);
// Inline cache support
void cleanup_inline_caches(bool clean_all = false);
virtual void clear_inline_caches();
void clear_ic_stubs();
// Verify and count cached icholder relocations.
int verify_icholder_relocations();
void verify_oop_relocations();
virtual bool is_evol_dependent_on(Klass* dependee) = 0;
// Fast breakpoint support. Tells if this compiled method is
// dependent on the given method. Returns true if this nmethod
// corresponds to the given method as well.
virtual bool is_dependent_on_method(Method* dependee) = 0;
Method* attached_method(address call_pc);
Method* attached_method_before_pc(address pc);
virtual void metadata_do(void f(Metadata*)) = 0;
// GC support
void set_unloading_next(CompiledMethod* next) { _unloading_next = next; }
CompiledMethod* unloading_next() { return _unloading_next; }
void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive);
// Check that all metadata is still alive
void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
virtual void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
// The parallel versions are used by G1.
virtual bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
virtual void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
static unsigned char global_unloading_clock() { return _global_unloading_clock; }
static void increase_unloading_clock();
void set_unloading_clock(unsigned char unloading_clock);
unsigned char unloading_clock();
protected:
virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) = 0;
#if INCLUDE_JVMCI
virtual bool do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred) = 0;
#endif
private:
// GC support to help figure out if an nmethod has been
// cleaned/unloaded by the current GC.
static unsigned char _global_unloading_clock;
volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod
PcDesc* find_pc_desc(address pc, bool approximate) {
return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end()));
}
protected:
union {
// Used by G1 to chain nmethods.
CompiledMethod* _unloading_next;
// Used by non-G1 GCs to chain nmethods.
nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
};
};
#endif //SHARE_VM_CODE_COMPILEDMETHOD_HPP

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -261,11 +261,11 @@ class MonitorValue: public ResourceObj {
class DebugInfoReadStream : public CompressedReadStream { class DebugInfoReadStream : public CompressedReadStream {
private: private:
const nmethod* _code; const CompiledMethod* _code;
const nmethod* code() const { return _code; } const CompiledMethod* code() const { return _code; }
GrowableArray<ScopeValue*>* _obj_pool; GrowableArray<ScopeValue*>* _obj_pool;
public: public:
DebugInfoReadStream(const nmethod* code, int offset, GrowableArray<ScopeValue*>* obj_pool = NULL) : DebugInfoReadStream(const CompiledMethod* code, int offset, GrowableArray<ScopeValue*>* obj_pool = NULL) :
CompressedReadStream(code->scopes_data_begin(), offset) { CompressedReadStream(code->scopes_data_begin(), offset) {
_code = code; _code = code;
_obj_pool = obj_pool; _obj_pool = obj_pool;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -65,9 +65,9 @@ ExceptionHandlerTable::ExceptionHandlerTable(int initial_size) {
} }
ExceptionHandlerTable::ExceptionHandlerTable(const nmethod* nm) { ExceptionHandlerTable::ExceptionHandlerTable(const CompiledMethod* cm) {
_table = (HandlerTableEntry*)nm->handler_table_begin(); _table = (HandlerTableEntry*)cm->handler_table_begin();
_length = nm->handler_table_size() / sizeof(HandlerTableEntry); _length = cm->handler_table_size() / sizeof(HandlerTableEntry);
_size = 0; // no space allocated by ExeptionHandlerTable! _size = 0; // no space allocated by ExeptionHandlerTable!
} }
@ -98,9 +98,9 @@ void ExceptionHandlerTable::add_subtable(
} }
void ExceptionHandlerTable::copy_to(nmethod* nm) { void ExceptionHandlerTable::copy_to(CompiledMethod* cm) {
assert(size_in_bytes() == nm->handler_table_size(), "size of space allocated in nmethod incorrect"); assert(size_in_bytes() == cm->handler_table_size(), "size of space allocated in compiled method incorrect");
copy_bytes_to(nm->handler_table_begin()); copy_bytes_to(cm->handler_table_begin());
} }
void ExceptionHandlerTable::copy_bytes_to(address addr) { void ExceptionHandlerTable::copy_bytes_to(address addr) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -98,7 +98,7 @@ class ExceptionHandlerTable VALUE_OBJ_CLASS_SPEC {
ExceptionHandlerTable(int initial_size = 8); ExceptionHandlerTable(int initial_size = 8);
// (run-time) construction from nmethod // (run-time) construction from nmethod
ExceptionHandlerTable(const nmethod* nm); ExceptionHandlerTable(const CompiledMethod* nm);
// (compile-time) add entries // (compile-time) add entries
void add_subtable( void add_subtable(
@ -115,7 +115,7 @@ class ExceptionHandlerTable VALUE_OBJ_CLASS_SPEC {
// nmethod support // nmethod support
int size_in_bytes() const { return round_to(_length * sizeof(HandlerTableEntry), oopSize); } int size_in_bytes() const { return round_to(_length * sizeof(HandlerTableEntry), oopSize); }
void copy_to(nmethod* nm); void copy_to(CompiledMethod* nm);
void copy_bytes_to(address addr); void copy_bytes_to(address addr);
// lookup // lookup

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -49,8 +49,8 @@ int InlineCacheBuffer::_pending_count = 0;
void ICStub::finalize() { void ICStub::finalize() {
if (!is_empty()) { if (!is_empty()) {
ResourceMark rm; ResourceMark rm;
CompiledIC *ic = CompiledIC_at(CodeCache::find_nmethod(ic_site()), ic_site()); CompiledIC *ic = CompiledIC_at(CodeCache::find_compiled(ic_site()), ic_site());
assert(CodeCache::find_nmethod(ic->instruction_address()) != NULL, "inline cache in non-nmethod?"); assert(CodeCache::find_compiled(ic->instruction_address()) != NULL, "inline cache in non-compiled?");
assert(this == ICStub_from_destination_address(ic->stub_address()), "wrong owner of ic buffer"); assert(this == ICStub_from_destination_address(ic->stub_address()), "wrong owner of ic buffer");
ic->set_ic_destination_and_value(destination(), cached_value()); ic->set_ic_destination_and_value(destination(), cached_value());

File diff suppressed because it is too large Load Diff

View File

@ -25,68 +25,11 @@
#ifndef SHARE_VM_CODE_NMETHOD_HPP #ifndef SHARE_VM_CODE_NMETHOD_HPP
#define SHARE_VM_CODE_NMETHOD_HPP #define SHARE_VM_CODE_NMETHOD_HPP
#include "code/codeBlob.hpp" #include "code/compiledMethod.hpp"
#include "code/pcDesc.hpp"
#include "oops/metadata.hpp"
class DepChange;
class DirectiveSet; class DirectiveSet;
// This class is used internally by nmethods, to cache
// exception/pc/handler information.
class ExceptionCache : public CHeapObj<mtCode> {
friend class VMStructs;
private:
enum { cache_size = 16 };
Klass* _exception_type;
address _pc[cache_size];
address _handler[cache_size];
volatile int _count;
ExceptionCache* _next;
address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; }
void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; }
void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
int count() { return OrderAccess::load_acquire(&_count); }
// increment_count is only called under lock, but there may be concurrent readers.
void increment_count() { OrderAccess::release_store(&_count, _count + 1); }
public:
ExceptionCache(Handle exception, address pc, address handler);
Klass* exception_type() { return _exception_type; }
ExceptionCache* next() { return _next; }
void set_next(ExceptionCache *ec) { _next = ec; }
address match(Handle exception, address pc);
bool match_exception_with_space(Handle exception) ;
address test_address(address addr);
bool add_address_and_handler(address addr, address handler) ;
};
// cache pc descs found in earlier inquiries
class PcDescCache VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
private:
enum { cache_size = 4 };
// The array elements MUST be volatile! Several threads may modify
// and read from the cache concurrently. find_pc_desc_internal has
// returned wrong results. C++ compiler (namely xlC12) may duplicate
// C++ field accesses if the elements are not volatile.
typedef PcDesc* PcDescPtr;
volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
public:
PcDescCache() { debug_only(_pc_descs[0] = NULL); }
void reset_to(PcDesc* initial_pc_desc);
PcDesc* find_pc_desc(int pc_offset, bool approximate);
void add_pc_desc(PcDesc* pc_desc);
PcDesc* last_pc_desc() { return _pc_descs[0]; }
};
// nmethods (native methods) are the compiled code versions of Java methods. // nmethods (native methods) are the compiled code versions of Java methods.
// //
// An nmethod contains: // An nmethod contains:
@ -108,26 +51,14 @@ class PcDescCache VALUE_OBJ_CLASS_SPEC {
// [Implicit Null Pointer exception table] // [Implicit Null Pointer exception table]
// - implicit null table array // - implicit null table array
class DepChange; class nmethod : public CompiledMethod {
class Dependencies;
class ExceptionHandlerTable;
class ImplicitExceptionTable;
class AbstractCompiler;
class xmlStream;
class nmethod : public CodeBlob {
friend class VMStructs; friend class VMStructs;
friend class JVMCIVMStructs; friend class JVMCIVMStructs;
friend class NMethodSweeper; friend class NMethodSweeper;
friend class CodeCache; // scavengable oops friend class CodeCache; // scavengable oops
private: private:
// GC support to help figure out if an nmethod has been
// cleaned/unloaded by the current GC.
static unsigned char _global_unloading_clock;
// Shared fields for all nmethod's // Shared fields for all nmethod's
Method* _method;
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
jmethodID _jmethod_id; // Cache of method()->jmethod_id() jmethodID _jmethod_id; // Cache of method()->jmethod_id()
@ -140,13 +71,6 @@ class nmethod : public CodeBlob {
// To support simple linked-list chaining of nmethods: // To support simple linked-list chaining of nmethods:
nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
union {
// Used by G1 to chain nmethods.
nmethod* _unloading_next;
// Used by non-G1 GCs to chain nmethods.
nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
};
static nmethod* volatile _oops_do_mark_nmethods; static nmethod* volatile _oops_do_mark_nmethods;
nmethod* volatile _oops_do_mark_link; nmethod* volatile _oops_do_mark_link;
@ -158,13 +82,7 @@ class nmethod : public CodeBlob {
address _osr_entry_point; // entry point for on stack replacement address _osr_entry_point; // entry point for on stack replacement
// Offsets for different nmethod parts // Offsets for different nmethod parts
int _exception_offset; int _exception_offset;
// All deoptee's will resume execution at this location described by
// this offset.
int _deoptimize_offset;
// All deoptee's at a MethodHandle call site will resume execution
// at this location described by this offset.
int _deoptimize_mh_offset;
// Offset of the unwind handler if it exists // Offset of the unwind handler if it exists
int _unwind_handler_offset; int _unwind_handler_offset;
@ -179,6 +97,8 @@ class nmethod : public CodeBlob {
int _nul_chk_table_offset; int _nul_chk_table_offset;
int _nmethod_end_offset; int _nmethod_end_offset;
int code_offset() const { return (address) code_begin() - header_begin(); }
// location in frame (offset for sp) that deopt can store the original // location in frame (offset for sp) that deopt can store the original
// pc during a deopt. // pc during a deopt.
int _orig_pc_offset; int _orig_pc_offset;
@ -189,27 +109,12 @@ class nmethod : public CodeBlob {
// protected by CodeCache_lock // protected by CodeCache_lock
bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock) bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
enum MarkForDeoptimizationStatus {
not_marked,
deoptimize,
deoptimize_noupdate };
MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization
// used by jvmti to track if an unload event has been posted for this nmethod. // used by jvmti to track if an unload event has been posted for this nmethod.
bool _unload_reported; bool _unload_reported;
// set during construction
unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
unsigned int _lazy_critical_native:1; // Lazy JNI critical native
unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints
// Protected by Patching_lock // Protected by Patching_lock
volatile unsigned char _state; // {in_use, not_entrant, zombie, unloaded} volatile unsigned char _state; // {in_use, not_entrant, zombie, unloaded}
volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod
#ifdef ASSERT #ifdef ASSERT
bool _oops_are_stale; // indicates that it's no longer safe to access oops section bool _oops_are_stale; // indicates that it's no longer safe to access oops section
#endif #endif
@ -242,9 +147,6 @@ class nmethod : public CodeBlob {
// counter is decreased (by 1) while sweeping. // counter is decreased (by 1) while sweeping.
int _hotness_counter; int _hotness_counter;
ExceptionCache * volatile _exception_cache;
PcDescCache _pc_desc_cache;
// These are used for compiled synchronized native methods to // These are used for compiled synchronized native methods to
// locate the owner and stack slot for the BasicLock so that we can // locate the owner and stack slot for the BasicLock so that we can
// properly revoke the bias of the owner if necessary. They are // properly revoke the bias of the owner if necessary. They are
@ -302,18 +204,21 @@ class nmethod : public CodeBlob {
// Returns true if this thread changed the state of the nmethod or // Returns true if this thread changed the state of the nmethod or
// false if another thread performed the transition. // false if another thread performed the transition.
bool make_not_entrant_or_zombie(unsigned int state); bool make_not_entrant_or_zombie(unsigned int state);
bool make_entrant() { Unimplemented(); return false; }
void inc_decompile_count(); void inc_decompile_count();
// Used to manipulate the exception cache
void add_exception_cache_entry(ExceptionCache* new_entry);
ExceptionCache* exception_cache_entry_for_exception(Handle exception);
// Inform external interfaces that a compiled method has been unloaded // Inform external interfaces that a compiled method has been unloaded
void post_compiled_method_unload(); void post_compiled_method_unload();
// Initailize fields to their default values // Initailize fields to their default values
void init_defaults(); void init_defaults();
// Offsets
int content_offset() const { return content_begin() - header_begin(); }
int data_offset() const { return _data_offset; }
address header_end() const { return (address) header_begin() + header_size(); }
public: public:
// create nmethod with entry_bci // create nmethod with entry_bci
static nmethod* new_nmethod(const methodHandle& method, static nmethod* new_nmethod(const methodHandle& method,
@ -334,7 +239,7 @@ class nmethod : public CodeBlob {
, Handle installed_code = Handle(), , Handle installed_code = Handle(),
Handle speculation_log = Handle() Handle speculation_log = Handle()
#endif #endif
); );
static nmethod* new_native_nmethod(const methodHandle& method, static nmethod* new_native_nmethod(const methodHandle& method,
int compile_id, int compile_id,
@ -347,13 +252,10 @@ class nmethod : public CodeBlob {
OopMapSet* oop_maps); OopMapSet* oop_maps);
// accessors // accessors
Method* method() const { return _method; }
AbstractCompiler* compiler() const { return _compiler; } AbstractCompiler* compiler() const { return _compiler; }
// type info // type info
bool is_nmethod() const { return true; } bool is_nmethod() const { return true; }
bool is_java_method() const { return !method()->is_native(); }
bool is_native_method() const { return method()->is_native(); }
bool is_osr_method() const { return _entry_bci != InvocationEntryBci; } bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
bool is_compiled_by_c1() const; bool is_compiled_by_c1() const;
@ -363,22 +265,17 @@ class nmethod : public CodeBlob {
// boundaries for different parts // boundaries for different parts
address consts_begin () const { return header_begin() + _consts_offset ; } address consts_begin () const { return header_begin() + _consts_offset ; }
address consts_end () const { return header_begin() + code_offset() ; } address consts_end () const { return code_begin() ; }
address insts_begin () const { return header_begin() + code_offset() ; }
address insts_end () const { return header_begin() + _stub_offset ; }
address stub_begin () const { return header_begin() + _stub_offset ; } address stub_begin () const { return header_begin() + _stub_offset ; }
address stub_end () const { return header_begin() + _oops_offset ; } address stub_end () const { return header_begin() + _oops_offset ; }
address exception_begin () const { return header_begin() + _exception_offset ; } address exception_begin () const { return header_begin() + _exception_offset ; }
address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; }
address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; }
address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; } address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; } oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; } oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; }
Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; } Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; }
Metadata** metadata_end () const { return (Metadata**) (header_begin() + _scopes_data_offset) ; } Metadata** metadata_end () const { return (Metadata**) _scopes_data_begin; }
address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; } address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); } PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; } PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
@ -390,16 +287,9 @@ class nmethod : public CodeBlob {
address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; } address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
// Sizes // Sizes
int consts_size () const { return consts_end () - consts_begin (); }
int insts_size () const { return insts_end () - insts_begin (); }
int stub_size () const { return stub_end () - stub_begin (); }
int oops_size () const { return (address) oops_end () - (address) oops_begin (); } int oops_size () const { return (address) oops_end () - (address) oops_begin (); }
int metadata_size () const { return (address) metadata_end () - (address) metadata_begin (); } int metadata_size () const { return (address) metadata_end () - (address) metadata_begin (); }
int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); }
int dependencies_size () const { return dependencies_end () - dependencies_begin (); } int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; } int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; }
int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; } int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; }
@ -411,15 +301,10 @@ class nmethod : public CodeBlob {
int hotness_counter() const { return _hotness_counter; } int hotness_counter() const { return _hotness_counter; }
// Containment // Containment
bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); } bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); } bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); } bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
// entry points // entry points
address entry_point() const { return _entry_point; } // normal entry point address entry_point() const { return _entry_point; } // normal entry point
@ -434,24 +319,11 @@ class nmethod : public CodeBlob {
// flag accessing and manipulation // flag accessing and manipulation
bool is_in_use() const { return _state == in_use; } bool is_in_use() const { return _state == in_use; }
bool is_alive() const { unsigned char s = _state; return s == in_use || s == not_entrant; } bool is_alive() const { unsigned char s = _state; return s < zombie; }
bool is_not_entrant() const { return _state == not_entrant; } bool is_not_entrant() const { return _state == not_entrant; }
bool is_zombie() const { return _state == zombie; } bool is_zombie() const { return _state == zombie; }
bool is_unloaded() const { return _state == unloaded; } bool is_unloaded() const { return _state == unloaded; }
// returns a string version of the nmethod state
const char* state() const {
switch(_state) {
case in_use: return "in use";
case not_entrant: return "not_entrant";
case zombie: return "zombie";
case unloaded: return "unloaded";
default:
fatal("unexpected nmethod state: %d", _state);
return NULL;
}
}
#if INCLUDE_RTM_OPT #if INCLUDE_RTM_OPT
// rtm state accessing and manipulating // rtm state accessing and manipulating
RTMState rtm_state() const { return _rtm_state; } RTMState rtm_state() const { return _rtm_state; }
@ -466,30 +338,15 @@ class nmethod : public CodeBlob {
assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant"); assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant");
return make_not_entrant_or_zombie(not_entrant); return make_not_entrant_or_zombie(not_entrant);
} }
bool make_not_used() { return make_not_entrant(); }
bool make_zombie() { return make_not_entrant_or_zombie(zombie); } bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
// used by jvmti to track if the unload event has been reported // used by jvmti to track if the unload event has been reported
bool unload_reported() { return _unload_reported; } bool unload_reported() { return _unload_reported; }
void set_unload_reported() { _unload_reported = true; } void set_unload_reported() { _unload_reported = true; }
void set_unloading_next(nmethod* next) { _unloading_next = next; } int get_state() const {
nmethod* unloading_next() { return _unloading_next; } return _state;
static unsigned char global_unloading_clock() { return _global_unloading_clock; }
static void increase_unloading_clock();
void set_unloading_clock(unsigned char unloading_clock);
unsigned char unloading_clock();
bool is_marked_for_deoptimization() const { return _mark_for_deoptimization_status != not_marked; }
void mark_for_deoptimization(bool inc_recompile_counts = true) {
_mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
}
bool update_recompile_counts() const {
// Update recompile counts when either the update is explicitly requested (deoptimize)
// or the nmethod is not marked for deoptimization at all (not_marked).
// The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
return _mark_for_deoptimization_status != deoptimize_noupdate;
} }
void make_unloaded(BoolObjectClosure* is_alive, oop cause); void make_unloaded(BoolObjectClosure* is_alive, oop cause);
@ -502,18 +359,6 @@ class nmethod : public CodeBlob {
_has_flushed_dependencies = 1; _has_flushed_dependencies = 1;
} }
bool has_unsafe_access() const { return _has_unsafe_access; }
void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
bool is_lazy_critical_native() const { return _lazy_critical_native; }
void set_lazy_critical_native(bool z) { _lazy_critical_native = z; }
bool has_wide_vectors() const { return _has_wide_vectors; }
void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
int comp_level() const { return _comp_level; } int comp_level() const { return _comp_level; }
// Support for oops in scopes and relocs: // Support for oops in scopes and relocs:
@ -538,9 +383,6 @@ class nmethod : public CodeBlob {
void copy_values(GrowableArray<jobject>* oops); void copy_values(GrowableArray<jobject>* oops);
void copy_values(GrowableArray<Metadata*>* metadata); void copy_values(GrowableArray<Metadata*>* metadata);
Method* attached_method(address call_pc);
Method* attached_method_before_pc(address pc);
// Relocation support // Relocation support
private: private:
void fix_oop_relocations(address begin, address end, bool initialize_immediates); void fix_oop_relocations(address begin, address end, bool initialize_immediates);
@ -549,10 +391,6 @@ private:
public: public:
void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); } void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); }
void verify_oop_relocations();
bool is_at_poll_return(address pc);
bool is_at_poll_or_poll_return(address pc);
// Scavengable oop support // Scavengable oop support
bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; } bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
@ -576,15 +414,6 @@ public:
long stack_traversal_mark() { return _stack_traversal_mark; } long stack_traversal_mark() { return _stack_traversal_mark; }
void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; } void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; }
// Exception cache support
// Note: _exception_cache may be read concurrently. We rely on memory_order_consume here.
ExceptionCache* exception_cache() const { return _exception_cache; }
void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store_ptr(&_exception_cache, ec); }
address handler_for_exception_and_pc(Handle exception, address pc);
void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
void clean_exception_cache(BoolObjectClosure* is_alive);
// implicit exceptions support // implicit exceptions support
address continuation_for_implicit_exception(address pc); address continuation_for_implicit_exception(address pc);
@ -595,24 +424,8 @@ public:
nmethod* osr_link() const { return _osr_link; } nmethod* osr_link() const { return _osr_link; }
void set_osr_link(nmethod *n) { _osr_link = n; } void set_osr_link(nmethod *n) { _osr_link = n; }
// tells whether frames described by this nmethod can be deoptimized
// note: native wrappers cannot be deoptimized.
bool can_be_deoptimized() const { return is_java_method(); }
// Inline cache support
void clear_inline_caches();
void clear_ic_stubs();
void cleanup_inline_caches(bool clean_all = false);
bool inlinecache_check_contains(address addr) const {
return (addr >= code_begin() && addr < verified_entry_point());
}
// Verify calls to dead methods have been cleaned. // Verify calls to dead methods have been cleaned.
void verify_clean_inline_caches(); void verify_clean_inline_caches();
// Verify and count cached icholder relocations.
int verify_icholder_relocations();
// Check that all metadata is still alive
void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
// unlink and deallocate this nmethod // unlink and deallocate this nmethod
// Only NMethodSweeper class is expected to use this. NMethodSweeper is not // Only NMethodSweeper class is expected to use this. NMethodSweeper is not
@ -653,20 +466,19 @@ public:
public: public:
#endif #endif
// GC support protected:
void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred); virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred);
// The parallel versions are used by G1. #if INCLUDE_JVMCI
bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred); virtual bool do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred);
void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred); #endif
private: private:
bool do_unloading_scopes(BoolObjectClosure* is_alive, bool unloading_occurred);
// Unload a nmethod if the *root object is dead. // Unload a nmethod if the *root object is dead.
bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred); bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred); bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred);
public: public:
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
OopClosure* f);
void oops_do(OopClosure* f) { oops_do(f, false); } void oops_do(OopClosure* f) { oops_do(f, false); }
void oops_do(OopClosure* f, bool allow_zombie); void oops_do(OopClosure* f, bool allow_zombie);
bool detect_scavenge_root_oops(); bool detect_scavenge_root_oops();
@ -678,49 +490,20 @@ public:
static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; } static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
bool test_oops_do_mark() { return _oops_do_mark_link != NULL; } bool test_oops_do_mark() { return _oops_do_mark_link != NULL; }
// ScopeDesc for an instruction
ScopeDesc* scope_desc_at(address pc);
private: private:
ScopeDesc* scope_desc_in(address begin, address end); ScopeDesc* scope_desc_in(address begin, address end);
address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); } address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
PcDesc* find_pc_desc_internal(address pc, bool approximate);
PcDesc* find_pc_desc(address pc, bool approximate) {
PcDesc* desc = _pc_desc_cache.last_pc_desc();
if (desc != NULL && desc->pc_offset() == pc - code_begin()) {
return desc;
}
return find_pc_desc_internal(pc, approximate);
}
public:
// ScopeDesc retrieval operation
PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
// pc_desc_near returns the first PcDesc at or after the givne pc.
PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
public: public:
// copying of debugging information // copying of debugging information
void copy_scopes_pcs(PcDesc* pcs, int count); void copy_scopes_pcs(PcDesc* pcs, int count);
void copy_scopes_data(address buffer, int size); void copy_scopes_data(address buffer, int size);
// Deopt
// Return true is the PC is one would expect if the frame is being deopted.
bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
bool is_deopt_entry (address pc);
bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
// Accessor/mutator for the original pc of a frame before a frame was deopted. // Accessor/mutator for the original pc of a frame before a frame was deopted.
address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); } address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; } void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
static address get_deopt_original_pc(const frame* fr);
// MethodHandle
bool is_method_handle_return(address return_pc);
// jvmti support: // jvmti support:
void post_compiled_method_load_event(); void post_compiled_method_load_event();
jmethodID get_and_cache_jmethod_id(); jmethodID get_and_cache_jmethod_id();
@ -770,7 +553,7 @@ public:
// are numbered in an independent sequence if CICountOSR is true, // are numbered in an independent sequence if CICountOSR is true,
// and native method wrappers are also numbered independently if // and native method wrappers are also numbered independently if
// CICountNative is true. // CICountNative is true.
int compile_id() const { return _compile_id; } virtual int compile_id() const { return _compile_id; }
const char* compile_kind() const; const char* compile_kind() const;
// tells if any of this method's dependencies have been invalidated // tells if any of this method's dependencies have been invalidated
@ -789,7 +572,7 @@ public:
// Fast breakpoint support. Tells if this compiled method is // Fast breakpoint support. Tells if this compiled method is
// dependent on the given method. Returns true if this nmethod // dependent on the given method. Returns true if this nmethod
// corresponds to the given method as well. // corresponds to the given method as well.
bool is_dependent_on_method(Method* dependee); virtual bool is_dependent_on_method(Method* dependee);
// is it ok to patch at address? // is it ok to patch at address?
bool is_patchable_at(address instr_address); bool is_patchable_at(address instr_address);
@ -807,12 +590,7 @@ public:
static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); } static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); }
static int state_offset() { return offset_of(nmethod, _state); } static int state_offset() { return offset_of(nmethod, _state); }
// RedefineClasses support. Mark metadata in nmethods as on_stack so that virtual void metadata_do(void f(Metadata*));
// redefine classes doesn't purge it.
static void mark_on_stack(nmethod* nm) {
nm->metadata_do(Metadata::mark_on_stack);
}
void metadata_do(void f(Metadata*));
}; };
// Locks an nmethod so its code will not get removed and it will not // Locks an nmethod so its code will not get removed and it will not
@ -821,26 +599,43 @@ public:
// needs to be done, then lock_nmethod() is used directly to keep the // needs to be done, then lock_nmethod() is used directly to keep the
// generated code from being reused too early. // generated code from being reused too early.
class nmethodLocker : public StackObj { class nmethodLocker : public StackObj {
nmethod* _nm; CompiledMethod* _nm;
public: public:
// note: nm can be NULL // note: nm can be NULL
// Only JvmtiDeferredEvent::compiled_method_unload_event() // Only JvmtiDeferredEvent::compiled_method_unload_event()
// should pass zombie_ok == true. // should pass zombie_ok == true.
static void lock_nmethod(nmethod* nm, bool zombie_ok = false); static void lock_nmethod(CompiledMethod* nm, bool zombie_ok = false);
static void unlock_nmethod(nmethod* nm); // (ditto) static void unlock_nmethod(CompiledMethod* nm); // (ditto)
nmethodLocker(address pc); // derive nm from pc nmethodLocker(address pc); // derive nm from pc
nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); } nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
nmethodLocker() { _nm = NULL; } nmethodLocker(CompiledMethod *nm) {
~nmethodLocker() { unlock_nmethod(_nm); } _nm = nm;
lock(_nm);
}
nmethod* code() { return _nm; } static void lock(CompiledMethod* method) {
void set_code(nmethod* new_nm) { if (method == NULL) return;
unlock_nmethod(_nm); // note: This works even if _nm==new_nm. lock_nmethod(method);
}
static void unlock(CompiledMethod* method) {
if (method == NULL) return;
unlock_nmethod(method);
}
nmethodLocker() { _nm = NULL; }
~nmethodLocker() {
unlock(_nm);
}
CompiledMethod* code() { return _nm; }
void set_code(CompiledMethod* new_nm) {
unlock(_nm); // note: This works even if _nm==new_nm.
_nm = new_nm; _nm = new_nm;
lock_nmethod(_nm); lock(_nm);
} }
}; };

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -36,11 +36,11 @@ PcDesc::PcDesc(int pc_offset, int scope_decode_offset, int obj_decode_offset) {
_flags = 0; _flags = 0;
} }
address PcDesc::real_pc(const nmethod* code) const { address PcDesc::real_pc(const CompiledMethod* code) const {
return code->code_begin() + pc_offset(); return code->code_begin() + pc_offset();
} }
void PcDesc::print(nmethod* code) { void PcDesc::print(CompiledMethod* code) {
#ifndef PRODUCT #ifndef PRODUCT
ResourceMark rm; ResourceMark rm;
tty->print_cr("PcDesc(pc=" PTR_FORMAT " offset=%x bits=%x):", p2i(real_pc(code)), pc_offset(), _flags); tty->print_cr("PcDesc(pc=" PTR_FORMAT " offset=%x bits=%x):", p2i(real_pc(code)), pc_offset(), _flags);
@ -57,7 +57,7 @@ void PcDesc::print(nmethod* code) {
#endif #endif
} }
bool PcDesc::verify(nmethod* code) { bool PcDesc::verify(CompiledMethod* code) {
//Unimplemented(); //Unimplemented();
return true; return true;
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@
// PcDescs map a physical PC (given as offset from start of nmethod) to // PcDescs map a physical PC (given as offset from start of nmethod) to
// the corresponding source scope and byte code index. // the corresponding source scope and byte code index.
class nmethod; class CompiledMethod;
class PcDesc VALUE_OBJ_CLASS_SPEC { class PcDesc VALUE_OBJ_CLASS_SPEC {
friend class VMStructs; friend class VMStructs;
@ -91,10 +91,10 @@ class PcDesc VALUE_OBJ_CLASS_SPEC {
void set_return_oop(bool z) { set_flag(PCDESC_return_oop, z); } void set_return_oop(bool z) { set_flag(PCDESC_return_oop, z); }
// Returns the real pc // Returns the real pc
address real_pc(const nmethod* code) const; address real_pc(const CompiledMethod* code) const;
void print(nmethod* code); void print(CompiledMethod* code);
bool verify(nmethod* code); bool verify(CompiledMethod* code);
}; };
#endif // SHARE_VM_CODE_PCDESC_HPP #endif // SHARE_VM_CODE_PCDESC_HPP

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -81,7 +81,6 @@ relocInfo* relocInfo::finish_prefix(short* prefix_limit) {
return (relocInfo*)prefix_limit; return (relocInfo*)prefix_limit;
} }
void relocInfo::set_type(relocType t) { void relocInfo::set_type(relocType t) {
int old_offset = addr_offset(); int old_offset = addr_offset();
int old_format = format(); int old_format = format();
@ -91,6 +90,9 @@ void relocInfo::set_type(relocType t) {
assert(format()==old_format, "sanity check"); assert(format()==old_format, "sanity check");
} }
nmethod* RelocIterator::code_as_nmethod() const {
return _code->as_nmethod();
}
void relocInfo::set_format(int f) { void relocInfo::set_format(int f) {
int old_offset = addr_offset(); int old_offset = addr_offset();
@ -121,13 +123,13 @@ void relocInfo::remove_reloc_info_for_address(RelocIterator *itr, address pc, re
// ---------------------------------------------------------------------------------------------------- // ----------------------------------------------------------------------------------------------------
// Implementation of RelocIterator // Implementation of RelocIterator
void RelocIterator::initialize(nmethod* nm, address begin, address limit) { void RelocIterator::initialize(CompiledMethod* nm, address begin, address limit) {
initialize_misc(); initialize_misc();
if (nm == NULL && begin != NULL) { if (nm == NULL && begin != NULL) {
// allow nmethod to be deduced from beginning address // allow nmethod to be deduced from beginning address
CodeBlob* cb = CodeCache::find_blob(begin); CodeBlob* cb = CodeCache::find_blob(begin);
nm = cb->as_nmethod_or_null(); nm = cb->as_compiled_method_or_null();
} }
assert(nm != NULL, "must be able to deduce nmethod from other arguments"); assert(nm != NULL, "must be able to deduce nmethod from other arguments");

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,8 @@
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"
class nmethod;
class CompiledMethod;
class Metadata; class Metadata;
class NativeMovConstReg; class NativeMovConstReg;
@ -539,7 +541,7 @@ class RelocIterator : public StackObj {
address _limit; // stop producing relocations after this _addr address _limit; // stop producing relocations after this _addr
relocInfo* _current; // the current relocation information relocInfo* _current; // the current relocation information
relocInfo* _end; // end marker; we're done iterating when _current == _end relocInfo* _end; // end marker; we're done iterating when _current == _end
nmethod* _code; // compiled method containing _addr CompiledMethod* _code; // compiled method containing _addr
address _addr; // instruction to which the relocation applies address _addr; // instruction to which the relocation applies
short _databuf; // spare buffer for compressed data short _databuf; // spare buffer for compressed data
short* _data; // pointer to the relocation's data short* _data; // pointer to the relocation's data
@ -570,13 +572,13 @@ class RelocIterator : public StackObj {
void initialize_misc(); void initialize_misc();
void initialize(nmethod* nm, address begin, address limit); void initialize(CompiledMethod* nm, address begin, address limit);
RelocIterator() { initialize_misc(); } RelocIterator() { initialize_misc(); }
public: public:
// constructor // constructor
RelocIterator(nmethod* nm, address begin = NULL, address limit = NULL); RelocIterator(CompiledMethod* nm, address begin = NULL, address limit = NULL);
RelocIterator(CodeSection* cb, address begin = NULL, address limit = NULL); RelocIterator(CodeSection* cb, address begin = NULL, address limit = NULL);
// get next reloc info, return !eos // get next reloc info, return !eos
@ -611,7 +613,8 @@ class RelocIterator : public StackObj {
relocType type() const { return current()->type(); } relocType type() const { return current()->type(); }
int format() const { return (relocInfo::have_format) ? current()->format() : 0; } int format() const { return (relocInfo::have_format) ? current()->format() : 0; }
address addr() const { return _addr; } address addr() const { return _addr; }
nmethod* code() const { return _code; } CompiledMethod* code() const { return _code; }
nmethod* code_as_nmethod() const;
short* data() const { return _data; } short* data() const { return _data; }
int datalen() const { return _datalen; } int datalen() const { return _datalen; }
bool has_current() const { return _datalen >= 0; } bool has_current() const { return _datalen >= 0; }
@ -810,9 +813,10 @@ class Relocation VALUE_OBJ_CLASS_SPEC {
public: public:
// accessors which only make sense for a bound Relocation // accessors which only make sense for a bound Relocation
address addr() const { return binding()->addr(); } address addr() const { return binding()->addr(); }
nmethod* code() const { return binding()->code(); } CompiledMethod* code() const { return binding()->code(); }
bool addr_in_const() const { return binding()->addr_in_const(); } nmethod* code_as_nmethod() const { return binding()->code_as_nmethod(); }
bool addr_in_const() const { return binding()->addr_in_const(); }
protected: protected:
short* data() const { return binding()->data(); } short* data() const { return binding()->data(); }
int datalen() const { return binding()->datalen(); } int datalen() const { return binding()->datalen(); }
@ -1371,7 +1375,7 @@ inline name##_Relocation* RelocIterator::name##_reloc() { \
APPLY_TO_RELOCATIONS(EACH_CASE); APPLY_TO_RELOCATIONS(EACH_CASE);
#undef EACH_CASE #undef EACH_CASE
inline RelocIterator::RelocIterator(nmethod* nm, address begin, address limit) { inline RelocIterator::RelocIterator(CompiledMethod* nm, address begin, address limit) {
initialize(nm, begin, limit); initialize(nm, begin, limit);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool rethrow_exception, bool return_oop) { ScopeDesc::ScopeDesc(const CompiledMethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool rethrow_exception, bool return_oop) {
_code = code; _code = code;
_decode_offset = decode_offset; _decode_offset = decode_offset;
_objects = decode_object_values(obj_decode_offset); _objects = decode_object_values(obj_decode_offset);
@ -40,7 +40,7 @@ ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offs
decode_body(); decode_body();
} }
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, bool reexecute, bool rethrow_exception, bool return_oop) { ScopeDesc::ScopeDesc(const CompiledMethod* code, int decode_offset, bool reexecute, bool rethrow_exception, bool return_oop) {
_code = code; _code = code;
_decode_offset = decode_offset; _decode_offset = decode_offset;
_objects = decode_object_values(DebugInformationRecorder::serialized_null); _objects = decode_object_values(DebugInformationRecorder::serialized_null);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,7 @@ class SimpleScopeDesc : public StackObj {
int _bci; int _bci;
public: public:
SimpleScopeDesc(nmethod* code, address pc) { SimpleScopeDesc(CompiledMethod* code, address pc) {
PcDesc* pc_desc = code->pc_desc_at(pc); PcDesc* pc_desc = code->pc_desc_at(pc);
assert(pc_desc != NULL, "Must be able to find matching PcDesc"); assert(pc_desc != NULL, "Must be able to find matching PcDesc");
DebugInfoReadStream buffer(code, pc_desc->scope_decode_offset()); DebugInfoReadStream buffer(code, pc_desc->scope_decode_offset());
@ -60,12 +60,12 @@ class SimpleScopeDesc : public StackObj {
class ScopeDesc : public ResourceObj { class ScopeDesc : public ResourceObj {
public: public:
// Constructor // Constructor
ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool rethrow_exception, bool return_oop); ScopeDesc(const CompiledMethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool rethrow_exception, bool return_oop);
// Calls above, giving default value of "serialized_null" to the // Calls above, giving default value of "serialized_null" to the
// "obj_decode_offset" argument. (We don't use a default argument to // "obj_decode_offset" argument. (We don't use a default argument to
// avoid a .hpp-.hpp dependency.) // avoid a .hpp-.hpp dependency.)
ScopeDesc(const nmethod* code, int decode_offset, bool reexecute, bool rethrow_exception, bool return_oop); ScopeDesc(const CompiledMethod* code, int decode_offset, bool reexecute, bool rethrow_exception, bool return_oop);
// JVM state // JVM state
Method* method() const { return _method; } Method* method() const { return _method; }
@ -110,7 +110,7 @@ class ScopeDesc : public ResourceObj {
GrowableArray<ScopeValue*>* _objects; GrowableArray<ScopeValue*>* _objects;
// Nmethod information // Nmethod information
const nmethod* _code; const CompiledMethod* _code;
// Decoding operations // Decoding operations
void decode_body(); void decode_body();

View File

@ -1075,10 +1075,10 @@ nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
if (osr_bci == InvocationEntryBci) { if (osr_bci == InvocationEntryBci) {
// standard compilation // standard compilation
nmethod* method_code = method->code(); CompiledMethod* method_code = method->code();
if (method_code != NULL) { if (method_code != NULL && method_code->is_nmethod()) {
if (compilation_is_complete(method, osr_bci, comp_level)) { if (compilation_is_complete(method, osr_bci, comp_level)) {
return method_code; return (nmethod*) method_code;
} }
} }
if (method->is_not_compilable(comp_level)) { if (method->is_not_compilable(comp_level)) {
@ -1184,7 +1184,12 @@ nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
// return requested nmethod // return requested nmethod
// We accept a higher level osr method // We accept a higher level osr method
if (osr_bci == InvocationEntryBci) { if (osr_bci == InvocationEntryBci) {
return method->code(); CompiledMethod* code = method->code();
if (code == NULL) {
return (nmethod*) code;
} else {
return code->as_nmethod_or_null();
}
} }
return method->lookup_osr_nmethod_for(osr_bci, comp_level, false); return method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
} }
@ -1209,7 +1214,7 @@ bool CompileBroker::compilation_is_complete(const methodHandle& method,
if (method->is_not_compilable(comp_level)) { if (method->is_not_compilable(comp_level)) {
return true; return true;
} else { } else {
nmethod* result = method->code(); CompiledMethod* result = method->code();
if (result == NULL) return false; if (result == NULL) return false;
return comp_level == result->comp_level(); return comp_level == result->comp_level();
} }

View File

@ -135,7 +135,11 @@ AbstractCompiler* CompileTask::compiler() {
// //
nmethod* CompileTask::code() const { nmethod* CompileTask::code() const {
if (_code_handle == NULL) return NULL; if (_code_handle == NULL) return NULL;
return _code_handle->code(); CodeBlob *blob = _code_handle->code();
if (blob != NULL) {
return blob->as_nmethod();
}
return NULL;
} }
void CompileTask::set_code(nmethod* nm) { void CompileTask::set_code(nmethod* nm) {

View File

@ -3776,12 +3776,12 @@ private:
const uint _num_workers; const uint _num_workers;
// Variables used to claim nmethods. // Variables used to claim nmethods.
nmethod* _first_nmethod; CompiledMethod* _first_nmethod;
volatile nmethod* _claimed_nmethod; volatile CompiledMethod* _claimed_nmethod;
// The list of nmethods that need to be processed by the second pass. // The list of nmethods that need to be processed by the second pass.
volatile nmethod* _postponed_list; volatile CompiledMethod* _postponed_list;
volatile uint _num_entered_barrier; volatile uint _num_entered_barrier;
public: public:
G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) : G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
@ -3793,13 +3793,13 @@ private:
_postponed_list(NULL), _postponed_list(NULL),
_num_entered_barrier(0) _num_entered_barrier(0)
{ {
nmethod::increase_unloading_clock(); CompiledMethod::increase_unloading_clock();
// Get first alive nmethod // Get first alive nmethod
NMethodIterator iter = NMethodIterator(); CompiledMethodIterator iter = CompiledMethodIterator();
if(iter.next_alive()) { if(iter.next_alive()) {
_first_nmethod = iter.method(); _first_nmethod = iter.method();
} }
_claimed_nmethod = (volatile nmethod*)_first_nmethod; _claimed_nmethod = (volatile CompiledMethod*)_first_nmethod;
} }
~G1CodeCacheUnloadingTask() { ~G1CodeCacheUnloadingTask() {
@ -3812,15 +3812,15 @@ private:
} }
private: private:
void add_to_postponed_list(nmethod* nm) { void add_to_postponed_list(CompiledMethod* nm) {
nmethod* old; CompiledMethod* old;
do { do {
old = (nmethod*)_postponed_list; old = (CompiledMethod*)_postponed_list;
nm->set_unloading_next(old); nm->set_unloading_next(old);
} while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old); } while ((CompiledMethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
} }
void clean_nmethod(nmethod* nm) { void clean_nmethod(CompiledMethod* nm) {
bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred); bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
if (postponed) { if (postponed) {
@ -3830,24 +3830,24 @@ private:
// Mark that this thread has been cleaned/unloaded. // Mark that this thread has been cleaned/unloaded.
// After this call, it will be safe to ask if this nmethod was unloaded or not. // After this call, it will be safe to ask if this nmethod was unloaded or not.
nm->set_unloading_clock(nmethod::global_unloading_clock()); nm->set_unloading_clock(CompiledMethod::global_unloading_clock());
} }
void clean_nmethod_postponed(nmethod* nm) { void clean_nmethod_postponed(CompiledMethod* nm) {
nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred); nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
} }
static const int MaxClaimNmethods = 16; static const int MaxClaimNmethods = 16;
void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) { void claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods) {
nmethod* first; CompiledMethod* first;
NMethodIterator last; CompiledMethodIterator last;
do { do {
*num_claimed_nmethods = 0; *num_claimed_nmethods = 0;
first = (nmethod*)_claimed_nmethod; first = (CompiledMethod*)_claimed_nmethod;
last = NMethodIterator(first); last = CompiledMethodIterator(first);
if (first != NULL) { if (first != NULL) {
@ -3860,22 +3860,22 @@ private:
} }
} }
} while ((nmethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first); } while ((CompiledMethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first);
} }
nmethod* claim_postponed_nmethod() { CompiledMethod* claim_postponed_nmethod() {
nmethod* claim; CompiledMethod* claim;
nmethod* next; CompiledMethod* next;
do { do {
claim = (nmethod*)_postponed_list; claim = (CompiledMethod*)_postponed_list;
if (claim == NULL) { if (claim == NULL) {
return NULL; return NULL;
} }
next = claim->unloading_next(); next = claim->unloading_next();
} while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim); } while ((CompiledMethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
return claim; return claim;
} }
@ -3911,7 +3911,7 @@ private:
} }
int num_claimed_nmethods; int num_claimed_nmethods;
nmethod* claimed_nmethods[MaxClaimNmethods]; CompiledMethod* claimed_nmethods[MaxClaimNmethods];
while (true) { while (true) {
claim_nmethods(claimed_nmethods, &num_claimed_nmethods); claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
@ -3927,7 +3927,7 @@ private:
} }
void work_second_pass(uint worker_id) { void work_second_pass(uint worker_id) {
nmethod* nm; CompiledMethod* nm;
// Take care of postponed nmethods. // Take care of postponed nmethods.
while ((nm = claim_postponed_nmethod()) != NULL) { while ((nm = claim_postponed_nmethod()) != NULL) {
clean_nmethod_postponed(nm); clean_nmethod_postponed(nm);

View File

@ -992,7 +992,7 @@ C2V_VMENTRY(void, reprofile, (JNIEnv*, jobject, jobject jvmci_method))
} }
NOT_PRODUCT(method->set_compiled_invocation_count(0)); NOT_PRODUCT(method->set_compiled_invocation_count(0));
nmethod* code = method->code(); CompiledMethod* code = method->code();
if (code != NULL) { if (code != NULL) {
code->make_not_entrant(); code->make_not_entrant();
} }

View File

@ -546,7 +546,7 @@ JVMCIEnv::CodeInstallResult JVMCIEnv::register_method(
if (entry_bci == InvocationEntryBci) { if (entry_bci == InvocationEntryBci) {
if (TieredCompilation) { if (TieredCompilation) {
// If there is an old version we're done with it // If there is an old version we're done with it
nmethod* old = method->code(); CompiledMethod* old = method->code();
if (TraceMethodReplacement && old != NULL) { if (TraceMethodReplacement && old != NULL) {
ResourceMark rm; ResourceMark rm;
char *method_name = method->name_and_sig_as_C_string(); char *method_name = method->name_and_sig_as_C_string();

View File

@ -220,15 +220,15 @@ extern void vm_exit(int code);
// been deoptimized. If that is the case we return the deopt blob // been deoptimized. If that is the case we return the deopt blob
// unpack_with_exception entry instead. This makes life for the exception blob easier // unpack_with_exception entry instead. This makes life for the exception blob easier
// because making that same check and diverting is painful from assembly language. // because making that same check and diverting is painful from assembly language.
JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, nmethod*& nm)) JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, CompiledMethod*& cm))
// Reset method handle flag. // Reset method handle flag.
thread->set_is_method_handle_return(false); thread->set_is_method_handle_return(false);
Handle exception(thread, ex); Handle exception(thread, ex);
nm = CodeCache::find_nmethod(pc); cm = CodeCache::find_compiled(pc);
assert(nm != NULL, "this is not a compiled method"); assert(cm != NULL, "this is not a compiled method");
// Adjust the pc as needed/ // Adjust the pc as needed/
if (nm->is_deopt_pc(pc)) { if (cm->is_deopt_pc(pc)) {
RegisterMap map(thread, false); RegisterMap map(thread, false);
frame exception_frame = thread->last_frame().sender(&map); frame exception_frame = thread->last_frame().sender(&map);
// if the frame isn't deopted then pc must not correspond to the caller of last_frame // if the frame isn't deopted then pc must not correspond to the caller of last_frame
@ -275,10 +275,10 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
// ExceptionCache is used only for exceptions at call sites and not for implicit exceptions // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
if (guard_pages_enabled) { if (guard_pages_enabled) {
address fast_continuation = nm->handler_for_exception_and_pc(exception, pc); address fast_continuation = cm->handler_for_exception_and_pc(exception, pc);
if (fast_continuation != NULL) { if (fast_continuation != NULL) {
// Set flag if return address is a method handle call site. // Set flag if return address is a method handle call site.
thread->set_is_method_handle_return(nm->is_method_handle_return(pc)); thread->set_is_method_handle_return(cm->is_method_handle_return(pc));
return fast_continuation; return fast_continuation;
} }
} }
@ -299,7 +299,7 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
stringStream tempst; stringStream tempst;
tempst.print("compiled method <%s>\n" tempst.print("compiled method <%s>\n"
" at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT, " at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT,
nm->method()->print_value_string(), p2i(pc), p2i(thread)); cm->method()->print_value_string(), p2i(pc), p2i(thread));
Exceptions::log_exception(exception, tempst); Exceptions::log_exception(exception, tempst);
} }
// for AbortVMOnException flag // for AbortVMOnException flag
@ -311,19 +311,19 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
// normal bytecode execution. // normal bytecode execution.
thread->clear_exception_oop_and_pc(); thread->clear_exception_oop_and_pc();
continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false); continuation = SharedRuntime::compute_compiled_exc_handler(cm, pc, exception, false, false);
// If an exception was thrown during exception dispatch, the exception oop may have changed // If an exception was thrown during exception dispatch, the exception oop may have changed
thread->set_exception_oop(exception()); thread->set_exception_oop(exception());
thread->set_exception_pc(pc); thread->set_exception_pc(pc);
// the exception cache is used only by non-implicit exceptions // the exception cache is used only by non-implicit exceptions
if (continuation != NULL && !SharedRuntime::deopt_blob()->contains(continuation)) { if (continuation != NULL && !SharedRuntime::deopt_blob()->contains(continuation)) {
nm->add_handler_for_exception_and_pc(exception, pc, continuation); cm->add_handler_for_exception_and_pc(exception, pc, continuation);
} }
} }
// Set flag if return address is a method handle call site. // Set flag if return address is a method handle call site.
thread->set_is_method_handle_return(nm->is_method_handle_return(pc)); thread->set_is_method_handle_return(cm->is_method_handle_return(pc));
if (log_is_enabled(Info, exceptions)) { if (log_is_enabled(Info, exceptions)) {
ResourceMark rm; ResourceMark rm;
@ -345,18 +345,18 @@ address JVMCIRuntime::exception_handler_for_pc(JavaThread* thread) {
address pc = thread->exception_pc(); address pc = thread->exception_pc();
// Still in Java mode // Still in Java mode
DEBUG_ONLY(ResetNoHandleMark rnhm); DEBUG_ONLY(ResetNoHandleMark rnhm);
nmethod* nm = NULL; CompiledMethod* cm = NULL;
address continuation = NULL; address continuation = NULL;
{ {
// Enter VM mode by calling the helper // Enter VM mode by calling the helper
ResetNoHandleMark rnhm; ResetNoHandleMark rnhm;
continuation = exception_handler_for_pc_helper(thread, exception, pc, nm); continuation = exception_handler_for_pc_helper(thread, exception, pc, cm);
} }
// Back in JAVA, use no oops DON'T safepoint // Back in JAVA, use no oops DON'T safepoint
// Now check to see if the compiled method we were called from is now deoptimized. // Now check to see if the compiled method we were called from is now deoptimized.
// If so we must return to the deopt blob and deoptimize the nmethod // If so we must return to the deopt blob and deoptimize the nmethod
if (nm != NULL && caller_is_deopted()) { if (cm != NULL && caller_is_deopted()) {
continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
} }

View File

@ -186,7 +186,7 @@
nonstatic_field(Method, _vtable_index, int) \ nonstatic_field(Method, _vtable_index, int) \
nonstatic_field(Method, _intrinsic_id, u2) \ nonstatic_field(Method, _intrinsic_id, u2) \
nonstatic_field(Method, _flags, u2) \ nonstatic_field(Method, _flags, u2) \
volatile_nonstatic_field(Method, _code, nmethod*) \ volatile_nonstatic_field(Method, _code, CompiledMethod*) \
volatile_nonstatic_field(Method, _from_compiled_entry, address) \ volatile_nonstatic_field(Method, _from_compiled_entry, address) \
\ \
nonstatic_field(MethodCounters, _invocation_counter, InvocationCounter) \ nonstatic_field(MethodCounters, _invocation_counter, InvocationCounter) \

View File

@ -746,7 +746,7 @@ void Method::set_native_function(address function, bool post_event_flag) {
// This function can be called more than once. We must make sure that we always // This function can be called more than once. We must make sure that we always
// use the latest registered method -> check if a stub already has been generated. // use the latest registered method -> check if a stub already has been generated.
// If so, we have to make it not_entrant. // If so, we have to make it not_entrant.
nmethod* nm = code(); // Put it into local variable to guard against concurrent updates CompiledMethod* nm = code(); // Put it into local variable to guard against concurrent updates
if (nm != NULL) { if (nm != NULL) {
nm->make_not_entrant(); nm->make_not_entrant();
} }
@ -1046,12 +1046,12 @@ address Method::verified_code_entry() {
// Not inline to avoid circular ref. // Not inline to avoid circular ref.
bool Method::check_code() const { bool Method::check_code() const {
// cached in a register or local. There's a race on the value of the field. // cached in a register or local. There's a race on the value of the field.
nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code); CompiledMethod *code = (CompiledMethod *)OrderAccess::load_ptr_acquire(&_code);
return code == NULL || (code->method() == NULL) || (code->method() == (Method*)this && !code->is_osr_method()); return code == NULL || (code->method() == NULL) || (code->method() == (Method*)this && !code->is_osr_method());
} }
// Install compiled code. Instantly it can execute. // Install compiled code. Instantly it can execute.
void Method::set_code(methodHandle mh, nmethod *code) { void Method::set_code(methodHandle mh, CompiledMethod *code) {
assert( code, "use clear_code to remove code" ); assert( code, "use clear_code to remove code" );
assert( mh->check_code(), "" ); assert( mh->check_code(), "" );

View File

@ -58,6 +58,7 @@ class MethodCounters;
class ConstMethod; class ConstMethod;
class InlineTableSizes; class InlineTableSizes;
class KlassSizeStats; class KlassSizeStats;
class CompiledMethod;
class Method : public Metadata { class Method : public Metadata {
friend class VMStructs; friend class VMStructs;
@ -101,7 +102,7 @@ class Method : public Metadata {
// field can come and go. It can transition from NULL to not-null at any // field can come and go. It can transition from NULL to not-null at any
// time (whenever a compile completes). It can transition from not-null to // time (whenever a compile completes). It can transition from not-null to
// NULL only at safepoints (because of a de-opt). // NULL only at safepoints (because of a de-opt).
nmethod* volatile _code; // Points to the corresponding piece of native code CompiledMethod* volatile _code; // Points to the corresponding piece of native code
volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry
// Constructor // Constructor
@ -431,9 +432,9 @@ class Method : public Metadata {
// nmethod/verified compiler entry // nmethod/verified compiler entry
address verified_code_entry(); address verified_code_entry();
bool check_code() const; // Not inline to avoid circular ref bool check_code() const; // Not inline to avoid circular ref
nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); } CompiledMethod* volatile code() const { assert( check_code(), "" ); return (CompiledMethod *)OrderAccess::load_ptr_acquire(&_code); }
void clear_code(); // Clear out any compiled code void clear_code(); // Clear out any compiled code
static void set_code(methodHandle mh, nmethod* code); static void set_code(methodHandle mh, CompiledMethod* code);
void set_adapter_entry(AdapterHandlerEntry* adapter) { void set_adapter_entry(AdapterHandlerEntry* adapter) {
constMethod()->set_adapter_entry(adapter); constMethod()->set_adapter_entry(adapter);
} }

View File

@ -565,7 +565,7 @@ uint Compile::scratch_emit_size(const Node* n) {
relocInfo* locs_buf = scratch_locs_memory(); relocInfo* locs_buf = scratch_locs_memory();
address blob_begin = blob->content_begin(); address blob_begin = blob->content_begin();
address blob_end = (address)locs_buf; address blob_end = (address)locs_buf;
assert(blob->content_contains(blob_end), "sanity"); assert(blob->contains(blob_end), "sanity");
CodeBuffer buf(blob_begin, blob_end - blob_begin); CodeBuffer buf(blob_begin, blob_end - blob_begin);
buf.initialize_consts_size(_scratch_const_size); buf.initialize_consts_size(_scratch_const_size);
buf.initialize_stubs_size(MAX_stubs_size); buf.initialize_stubs_size(MAX_stubs_size);

View File

@ -1663,9 +1663,9 @@ static void trace_exception(outputStream* st, oop exception_oop, address excepti
exception_oop->print_value_on(&tempst); exception_oop->print_value_on(&tempst);
tempst.print(" in "); tempst.print(" in ");
CodeBlob* blob = CodeCache::find_blob(exception_pc); CodeBlob* blob = CodeCache::find_blob(exception_pc);
if (blob->is_nmethod()) { if (blob->is_compiled()) {
nmethod* nm = blob->as_nmethod_or_null(); CompiledMethod* cm = blob->as_compiled_method_or_null();
nm->method()->print_value_on(&tempst); cm->method()->print_value_on(&tempst);
} else if (blob->is_runtime_stub()) { } else if (blob->is_runtime_stub()) {
tempst.print("<runtime-stub>"); tempst.print("<runtime-stub>");
} else { } else {

View File

@ -483,9 +483,9 @@ class VM_WhiteBoxDeoptimizeFrames : public VM_WhiteBoxOperation {
RegisterMap* reg_map = fst.register_map(); RegisterMap* reg_map = fst.register_map();
Deoptimization::deoptimize(t, *f, reg_map); Deoptimization::deoptimize(t, *f, reg_map);
if (_make_not_entrant) { if (_make_not_entrant) {
nmethod* nm = CodeCache::find_nmethod(f->pc()); CompiledMethod* cm = CodeCache::find_compiled(f->pc());
assert(nm != NULL, "sanity check"); assert(cm != NULL, "sanity check");
nm->make_not_entrant(); cm->make_not_entrant();
} }
++_result; ++_result;
} }
@ -533,7 +533,7 @@ WB_ENTRY(jboolean, WB_IsMethodCompiled(JNIEnv* env, jobject o, jobject method, j
CHECK_JNI_EXCEPTION_(env, JNI_FALSE); CHECK_JNI_EXCEPTION_(env, JNI_FALSE);
MutexLockerEx mu(Compile_lock); MutexLockerEx mu(Compile_lock);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid)); methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code(); CompiledMethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
if (code == NULL) { if (code == NULL) {
return JNI_FALSE; return JNI_FALSE;
} }
@ -589,7 +589,7 @@ WB_ENTRY(jint, WB_GetMethodCompilationLevel(JNIEnv* env, jobject o, jobject meth
jmethodID jmid = reflected_method_to_jmid(thread, env, method); jmethodID jmid = reflected_method_to_jmid(thread, env, method);
CHECK_JNI_EXCEPTION_(env, CompLevel_none); CHECK_JNI_EXCEPTION_(env, CompLevel_none);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid)); methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code(); CompiledMethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
return (code != NULL ? code->comp_level() : CompLevel_none); return (code != NULL ? code->comp_level() : CompLevel_none);
WB_END WB_END
@ -608,7 +608,7 @@ WB_ENTRY(jint, WB_GetMethodEntryBci(JNIEnv* env, jobject o, jobject method))
jmethodID jmid = reflected_method_to_jmid(thread, env, method); jmethodID jmid = reflected_method_to_jmid(thread, env, method);
CHECK_JNI_EXCEPTION_(env, InvocationEntryBci); CHECK_JNI_EXCEPTION_(env, InvocationEntryBci);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid)); methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
nmethod* code = mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false); CompiledMethod* code = mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false);
return (code != NULL && code->is_osr_method() ? code->osr_entry_bci() : InvocationEntryBci); return (code != NULL && code->is_osr_method() ? code->osr_entry_bci() : InvocationEntryBci);
WB_END WB_END
@ -1093,7 +1093,7 @@ WB_ENTRY(jobjectArray, WB_GetNMethod(JNIEnv* env, jobject o, jobject method, jbo
jmethodID jmid = reflected_method_to_jmid(thread, env, method); jmethodID jmid = reflected_method_to_jmid(thread, env, method);
CHECK_JNI_EXCEPTION_(env, NULL); CHECK_JNI_EXCEPTION_(env, NULL);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid)); methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code(); CompiledMethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
jobjectArray result = NULL; jobjectArray result = NULL;
if (code == NULL) { if (code == NULL) {
return result; return result;

View File

@ -496,7 +496,7 @@ void AdvancedThresholdPolicy::submit_compile(const methodHandle& mh, int bci, Co
// Handle the invocation event. // Handle the invocation event.
void AdvancedThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh, void AdvancedThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
CompLevel level, nmethod* nm, JavaThread* thread) { CompLevel level, CompiledMethod* nm, JavaThread* thread) {
if (should_create_mdo(mh(), level)) { if (should_create_mdo(mh(), level)) {
create_mdo(mh, thread); create_mdo(mh, thread);
} }
@ -511,7 +511,7 @@ void AdvancedThresholdPolicy::method_invocation_event(const methodHandle& mh, co
// Handle the back branch event. Notice that we can compile the method // Handle the back branch event. Notice that we can compile the method
// with a regular entry from here. // with a regular entry from here.
void AdvancedThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh, void AdvancedThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
int bci, CompLevel level, nmethod* nm, JavaThread* thread) { int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread) {
if (should_create_mdo(mh(), level)) { if (should_create_mdo(mh(), level)) {
create_mdo(mh, thread); create_mdo(mh, thread);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -216,9 +216,9 @@ protected:
virtual void submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread); virtual void submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread);
// event() from SimpleThresholdPolicy would call these. // event() from SimpleThresholdPolicy would call these.
virtual void method_invocation_event(const methodHandle& method, const methodHandle& inlinee, virtual void method_invocation_event(const methodHandle& method, const methodHandle& inlinee,
CompLevel level, nmethod* nm, JavaThread* thread); CompLevel level, CompiledMethod* nm, JavaThread* thread);
virtual void method_back_branch_event(const methodHandle& method, const methodHandle& inlinee, virtual void method_back_branch_event(const methodHandle& method, const methodHandle& inlinee,
int bci, CompLevel level, nmethod* nm, JavaThread* thread); int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread);
public: public:
AdvancedThresholdPolicy() : _start_time(0) { } AdvancedThresholdPolicy() : _start_time(0) { }
// Select task is called by CompileBroker. We should return a task or NULL. // Select task is called by CompileBroker. We should return a task or NULL.

View File

@ -379,7 +379,7 @@ bool NonTieredCompPolicy::is_mature(Method* method) {
} }
nmethod* NonTieredCompPolicy::event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, nmethod* NonTieredCompPolicy::event(const methodHandle& method, const methodHandle& inlinee, int branch_bci,
int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread) { int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) {
assert(comp_level == CompLevel_none, "This should be only called from the interpreter"); assert(comp_level == CompLevel_none, "This should be only called from the interpreter");
NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci)); NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci));
if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) { if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) {
@ -484,7 +484,7 @@ void SimpleCompPolicy::method_invocation_event(const methodHandle& m, JavaThread
const char* comment = "count"; const char* comment = "count";
if (is_compilation_enabled() && can_be_compiled(m, comp_level)) { if (is_compilation_enabled() && can_be_compiled(m, comp_level)) {
nmethod* nm = m->code(); CompiledMethod* nm = m->code();
if (nm == NULL ) { if (nm == NULL ) {
CompileBroker::compile_method(m, InvocationEntryBci, comp_level, m, hot_count, comment, thread); CompileBroker::compile_method(m, InvocationEntryBci, comp_level, m, hot_count, comment, thread);
} }
@ -713,7 +713,7 @@ const char* StackWalkCompPolicy::shouldNotInline(const methodHandle& m) {
// note: we allow ik->is_abstract() // note: we allow ik->is_abstract()
if (!m->method_holder()->is_initialized()) return (_msg = "method holder not initialized"); if (!m->method_holder()->is_initialized()) return (_msg = "method holder not initialized");
if (m->is_native()) return (_msg = "native method"); if (m->is_native()) return (_msg = "native method");
nmethod* m_code = m->code(); CompiledMethod* m_code = m->code();
if (m_code != NULL && m_code->code_size() > InlineSmallCode) if (m_code != NULL && m_code->code_size() > InlineSmallCode)
return (_msg = "already compiled into a big method"); return (_msg = "already compiled into a big method");

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -68,7 +68,7 @@ public:
virtual int compiler_count(CompLevel comp_level) = 0; virtual int compiler_count(CompLevel comp_level) = 0;
// main notification entry, return a pointer to an nmethod if the OSR is required, // main notification entry, return a pointer to an nmethod if the OSR is required,
// returns NULL otherwise. // returns NULL otherwise.
virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread) = 0; virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) = 0;
// safepoint() is called at the end of the safepoint // safepoint() is called at the end of the safepoint
virtual void do_safepoint_work() = 0; virtual void do_safepoint_work() = 0;
// reprofile request // reprofile request
@ -109,7 +109,7 @@ public:
virtual bool is_mature(Method* method); virtual bool is_mature(Method* method);
virtual void initialize(); virtual void initialize();
virtual CompileTask* select_task(CompileQueue* compile_queue); virtual CompileTask* select_task(CompileQueue* compile_queue);
virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread); virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread);
virtual void method_invocation_event(const methodHandle& m, JavaThread* thread) = 0; virtual void method_invocation_event(const methodHandle& m, JavaThread* thread) = 0;
virtual void method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) = 0; virtual void method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) = 0;
}; };

View File

@ -168,9 +168,10 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
// Now get the deoptee with a valid map // Now get the deoptee with a valid map
frame deoptee = stub_frame.sender(&map); frame deoptee = stub_frame.sender(&map);
// Set the deoptee nmethod // Set the deoptee nmethod
assert(thread->deopt_nmethod() == NULL, "Pending deopt!"); assert(thread->deopt_compiled_method() == NULL, "Pending deopt!");
thread->set_deopt_nmethod(deoptee.cb()->as_nmethod_or_null()); CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null();
bool skip_internal = thread->deopt_nmethod() != NULL && !thread->deopt_nmethod()->compiler()->is_jvmci(); thread->set_deopt_compiled_method(cm);
bool skip_internal = (cm != NULL) && !cm->is_compiled_by_jvmci();
if (VerifyStack) { if (VerifyStack) {
thread->validate_frame_layout(); thread->validate_frame_layout();
@ -548,7 +549,7 @@ void Deoptimization::cleanup_deopt_info(JavaThread *thread,
delete thread->deopt_mark(); delete thread->deopt_mark();
thread->set_deopt_mark(NULL); thread->set_deopt_mark(NULL);
thread->set_deopt_nmethod(NULL); thread->set_deopt_compiled_method(NULL);
if (JvmtiExport::can_pop_frame()) { if (JvmtiExport::can_pop_frame()) {
@ -1292,14 +1293,14 @@ void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deopt
gather_statistics(reason, Action_none, Bytecodes::_illegal); gather_statistics(reason, Action_none, Bytecodes::_illegal);
if (LogCompilation && xtty != NULL) { if (LogCompilation && xtty != NULL) {
nmethod* nm = fr.cb()->as_nmethod_or_null(); CompiledMethod* cm = fr.cb()->as_compiled_method_or_null();
assert(nm != NULL, "only compiled methods can deopt"); assert(cm != NULL, "only compiled methods can deopt");
ttyLocker ttyl; ttyLocker ttyl;
xtty->begin_head("deoptimized thread='" UINTX_FORMAT "'", (uintx)thread->osthread()->thread_id()); xtty->begin_head("deoptimized thread='" UINTX_FORMAT "'", (uintx)thread->osthread()->thread_id());
nm->log_identity(xtty); cm->log_identity(xtty);
xtty->end_head(); xtty->end_head();
for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) { for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
xtty->begin_elem("jvms bci='%d'", sd->bci()); xtty->begin_elem("jvms bci='%d'", sd->bci());
xtty->method(sd->method()); xtty->method(sd->method());
xtty->end_elem(); xtty->end_elem();
@ -1480,7 +1481,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
vframe* vf = vframe::new_vframe(&fr, &reg_map, thread); vframe* vf = vframe::new_vframe(&fr, &reg_map, thread);
compiledVFrame* cvf = compiledVFrame::cast(vf); compiledVFrame* cvf = compiledVFrame::cast(vf);
nmethod* nm = cvf->code(); CompiledMethod* nm = cvf->code();
ScopeDesc* trap_scope = cvf->scope(); ScopeDesc* trap_scope = cvf->scope();
@ -1499,7 +1500,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
oop speculation = thread->pending_failed_speculation(); oop speculation = thread->pending_failed_speculation();
if (nm->is_compiled_by_jvmci()) { if (nm->is_compiled_by_jvmci()) {
if (speculation != NULL) { if (speculation != NULL) {
oop speculation_log = nm->speculation_log(); oop speculation_log = nm->as_nmethod()->speculation_log();
if (speculation_log != NULL) { if (speculation_log != NULL) {
if (TraceDeoptimization || TraceUncollectedSpeculations) { if (TraceDeoptimization || TraceUncollectedSpeculations) {
if (HotSpotSpeculationLog::lastFailed(speculation_log) != NULL) { if (HotSpotSpeculationLog::lastFailed(speculation_log) != NULL) {
@ -1615,19 +1616,21 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
nm->method()->print_short_name(tty); nm->method()->print_short_name(tty);
tty->print(" compiler=%s compile_id=%d", nm->compiler() == NULL ? "" : nm->compiler()->name(), nm->compile_id()); tty->print(" compiler=%s compile_id=%d", nm->compiler() == NULL ? "" : nm->compiler()->name(), nm->compile_id());
#if INCLUDE_JVMCI #if INCLUDE_JVMCI
oop installedCode = nm->jvmci_installed_code(); if (nm->is_nmethod()) {
if (installedCode != NULL) { oop installedCode = nm->as_nmethod()->jvmci_installed_code();
oop installedCodeName = NULL; if (installedCode != NULL) {
if (installedCode->is_a(InstalledCode::klass())) { oop installedCodeName = NULL;
installedCodeName = InstalledCode::name(installedCode); if (installedCode->is_a(InstalledCode::klass())) {
installedCodeName = InstalledCode::name(installedCode);
}
if (installedCodeName != NULL) {
tty->print(" (JVMCI: installedCodeName=%s) ", java_lang_String::as_utf8_string(installedCodeName));
} else {
tty->print(" (JVMCI: installed code has no name) ");
}
} else if (nm->is_compiled_by_jvmci()) {
tty->print(" (JVMCI: no installed code) ");
} }
if (installedCodeName != NULL) {
tty->print(" (JVMCI: installedCodeName=%s) ", java_lang_String::as_utf8_string(installedCodeName));
} else {
tty->print(" (JVMCI: installed code has no name) ");
}
} else if (nm->is_compiled_by_jvmci()) {
tty->print(" (JVMCI: no installed code) ");
} }
#endif #endif
tty->print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d" JVMCI_ONLY(" debug_id=%d"), tty->print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d" JVMCI_ONLY(" debug_id=%d"),
@ -1867,7 +1870,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
// Assume that in new recompiled code the statistic could be different, // Assume that in new recompiled code the statistic could be different,
// for example, due to different inlining. // for example, due to different inlining.
if ((reason != Reason_rtm_state_change) && (trap_mdo != NULL) && if ((reason != Reason_rtm_state_change) && (trap_mdo != NULL) &&
UseRTMDeopt && (nm->rtm_state() != ProfileRTM)) { UseRTMDeopt && (nm->as_nmethod()->rtm_state() != ProfileRTM)) {
trap_mdo->atomic_set_rtm_state(ProfileRTM); trap_mdo->atomic_set_rtm_state(ProfileRTM);
} }
#endif #endif

View File

@ -538,11 +538,12 @@ class adapterNode : public ProfilerNode {
class runtimeStubNode : public ProfilerNode { class runtimeStubNode : public ProfilerNode {
private: private:
const CodeBlob* _stub; const RuntimeStub* _stub;
const char* _symbol; // The name of the nearest VM symbol when ProfileVM is on. Points to a unique string. const char* _symbol; // The name of the nearest VM symbol when ProfileVM is on. Points to a unique string.
public: public:
runtimeStubNode(const CodeBlob* stub, const char* name, TickPosition where) : ProfilerNode(), _stub(stub), _symbol(name) { runtimeStubNode(const CodeBlob* stub, const char* name, TickPosition where) : ProfilerNode(), _stub(NULL), _symbol(name) {
assert(stub->is_runtime_stub(), "wrong code blob"); assert(stub->is_runtime_stub(), "wrong code blob");
_stub = (RuntimeStub*) stub;
update(where); update(where);
} }
@ -550,7 +551,7 @@ class runtimeStubNode : public ProfilerNode {
bool runtimeStub_match(const CodeBlob* stub, const char* name) const { bool runtimeStub_match(const CodeBlob* stub, const char* name) const {
assert(stub->is_runtime_stub(), "wrong code blob"); assert(stub->is_runtime_stub(), "wrong code blob");
return ((RuntimeStub*)_stub)->entry_point() == ((RuntimeStub*)stub)->entry_point() && return _stub->entry_point() == ((RuntimeStub*)stub)->entry_point() &&
(_symbol == name); (_symbol == name);
} }
@ -571,7 +572,7 @@ class runtimeStubNode : public ProfilerNode {
} }
void print_method_on(outputStream* st) { void print_method_on(outputStream* st) {
st->print("%s", ((RuntimeStub*)_stub)->name()); st->print("%s", _stub->name());
print_symbol_on(st); print_symbol_on(st);
} }
@ -588,18 +589,18 @@ class unknown_compiledNode : public ProfilerNode {
public: public:
unknown_compiledNode(const CodeBlob* cb, TickPosition where) : ProfilerNode() { unknown_compiledNode(const CodeBlob* cb, TickPosition where) : ProfilerNode() {
if ( cb->is_buffer_blob() ) if ( cb->is_buffer_blob() )
_name = ((BufferBlob*)cb)->name(); _name = ((const BufferBlob*)cb)->name();
else else
_name = ((SingletonBlob*)cb)->name(); _name = ((const SingletonBlob*)cb)->name();
update(where); update(where);
} }
bool is_compiled() const { return true; } bool is_compiled() const { return true; }
bool unknown_compiled_match(const CodeBlob* cb) const { bool unknown_compiled_match(const CodeBlob* cb) const {
if ( cb->is_buffer_blob() ) if ( cb->is_buffer_blob() )
return !strcmp(((BufferBlob*)cb)->name(), _name); return !strcmp(((const BufferBlob*)cb)->name(), _name);
else else
return !strcmp(((SingletonBlob*)cb)->name(), _name); return !strcmp(((const SingletonBlob*)cb)->name(), _name);
} }
Method* method() { return NULL; } Method* method() { return NULL; }
@ -993,16 +994,15 @@ void ThreadProfiler::record_compiled_tick(JavaThread* thread, frame fr, TickPosi
CodeBlob* cb = fr.cb(); CodeBlob* cb = fr.cb();
// For runtime stubs, record as native rather than as compiled // For runtime stubs, record as native rather than as compiled
if (cb->is_runtime_stub()) { if (cb->is_runtime_stub()) {
RegisterMap map(thread, false); RegisterMap map(thread, false);
fr = fr.sender(&map); fr = fr.sender(&map);
cb = fr.cb(); cb = fr.cb();
localwhere = tp_native; localwhere = tp_native;
} }
Method* method = (cb->is_nmethod()) ? ((nmethod *)cb)->method() :
(Method*)NULL;
Method* method = cb->is_compiled() ? cb->as_compiled_method()->method() : (Method*) NULL;
if (method == NULL) { if (method == NULL) {
if (cb->is_runtime_stub()) if (cb->is_runtime_stub())
runtime_stub_update(cb, name, localwhere); runtime_stub_update(cb, name, localwhere);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -132,11 +132,11 @@ void RegisterMap::print() const {
address frame::raw_pc() const { address frame::raw_pc() const {
if (is_deoptimized_frame()) { if (is_deoptimized_frame()) {
nmethod* nm = cb()->as_nmethod_or_null(); CompiledMethod* cm = cb()->as_compiled_method_or_null();
if (nm->is_method_handle_return(pc())) if (cm->is_method_handle_return(pc()))
return nm->deopt_mh_handler_begin() - pc_return_offset; return cm->deopt_mh_handler_begin() - pc_return_offset;
else else
return nm->deopt_handler_begin() - pc_return_offset; return cm->deopt_handler_begin() - pc_return_offset;
} else { } else {
return (pc() - pc_return_offset); return (pc() - pc_return_offset);
} }
@ -183,8 +183,8 @@ bool frame::is_java_frame() const {
bool frame::is_compiled_frame() const { bool frame::is_compiled_frame() const {
if (_cb != NULL && if (_cb != NULL &&
_cb->is_nmethod() && _cb->is_compiled() &&
((nmethod*)_cb)->is_java_method()) { ((CompiledMethod*)_cb)->is_java_method()) {
return true; return true;
} }
return false; return false;
@ -228,8 +228,8 @@ JavaCallWrapper* frame::entry_frame_call_wrapper_if_safe(JavaThread* thread) con
bool frame::should_be_deoptimized() const { bool frame::should_be_deoptimized() const {
if (_deopt_state == is_deoptimized || if (_deopt_state == is_deoptimized ||
!is_compiled_frame() ) return false; !is_compiled_frame() ) return false;
assert(_cb != NULL && _cb->is_nmethod(), "must be an nmethod"); assert(_cb != NULL && _cb->is_compiled(), "must be an nmethod");
nmethod* nm = (nmethod *)_cb; CompiledMethod* nm = (CompiledMethod *)_cb;
if (TraceDependencies) { if (TraceDependencies) {
tty->print("checking (%s) ", nm->is_marked_for_deoptimization() ? "true" : "false"); tty->print("checking (%s) ", nm->is_marked_for_deoptimization() ? "true" : "false");
nm->print_value_on(tty); nm->print_value_on(tty);
@ -246,7 +246,7 @@ bool frame::should_be_deoptimized() const {
bool frame::can_be_deoptimized() const { bool frame::can_be_deoptimized() const {
if (!is_compiled_frame()) return false; if (!is_compiled_frame()) return false;
nmethod* nm = (nmethod*)_cb; CompiledMethod* nm = (CompiledMethod*)_cb;
if( !nm->can_be_deoptimized() ) if( !nm->can_be_deoptimized() )
return false; return false;
@ -256,8 +256,7 @@ bool frame::can_be_deoptimized() const {
void frame::deoptimize(JavaThread* thread) { void frame::deoptimize(JavaThread* thread) {
// Schedule deoptimization of an nmethod activation with this frame. // Schedule deoptimization of an nmethod activation with this frame.
assert(_cb != NULL && _cb->is_nmethod(), "must be"); assert(_cb != NULL && _cb->is_compiled(), "must be");
nmethod* nm = (nmethod*)_cb;
// This is a fix for register window patching race // This is a fix for register window patching race
if (NeedsDeoptSuspend && Thread::current() != thread) { if (NeedsDeoptSuspend && Thread::current() != thread) {
@ -316,12 +315,13 @@ void frame::deoptimize(JavaThread* thread) {
// If the call site is a MethodHandle call site use the MH deopt // If the call site is a MethodHandle call site use the MH deopt
// handler. // handler.
address deopt = nm->is_method_handle_return(pc()) ? CompiledMethod* cm = (CompiledMethod*) _cb;
nm->deopt_mh_handler_begin() : address deopt = cm->is_method_handle_return(pc()) ?
nm->deopt_handler_begin(); cm->deopt_mh_handler_begin() :
cm->deopt_handler_begin();
// Save the original pc before we patch in the new one // Save the original pc before we patch in the new one
nm->set_original_pc(this, pc()); cm->set_original_pc(this, pc());
patch_pc(thread, deopt); patch_pc(thread, deopt);
#ifdef ASSERT #ifdef ASSERT
@ -661,13 +661,16 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose
} }
} else if (_cb->is_buffer_blob()) { } else if (_cb->is_buffer_blob()) {
st->print("v ~BufferBlob::%s", ((BufferBlob *)_cb)->name()); st->print("v ~BufferBlob::%s", ((BufferBlob *)_cb)->name());
} else if (_cb->is_nmethod()) { } else if (_cb->is_compiled()) {
nmethod* nm = (nmethod*)_cb; CompiledMethod* cm = (CompiledMethod*)_cb;
Method* m = nm->method(); Method* m = cm->method();
if (m != NULL) { if (m != NULL) {
st->print("J %d%s", nm->compile_id(), (nm->is_osr_method() ? "%" : "")); if (cm->is_nmethod()) {
if (nm->compiler() != NULL) { nmethod* nm = cm->as_nmethod();
st->print(" %s", nm->compiler()->name()); st->print("J %d%s", nm->compile_id(), (nm->is_osr_method() ? "%" : ""));
if (nm->compiler() != NULL) {
st->print(" %s", nm->compiler()->name());
}
} }
m->name_and_sig_as_C_string(buf, buflen); m->name_and_sig_as_C_string(buf, buflen);
st->print(" %s", buf); st->print(" %s", buf);
@ -681,9 +684,12 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose
st->print(" (%d bytes) @ " PTR_FORMAT " [" PTR_FORMAT "+" INTPTR_FORMAT "]", st->print(" (%d bytes) @ " PTR_FORMAT " [" PTR_FORMAT "+" INTPTR_FORMAT "]",
m->code_size(), p2i(_pc), p2i(_cb->code_begin()), _pc - _cb->code_begin()); m->code_size(), p2i(_pc), p2i(_cb->code_begin()), _pc - _cb->code_begin());
#if INCLUDE_JVMCI #if INCLUDE_JVMCI
char* jvmciName = nm->jvmci_installed_code_name(buf, buflen); if (cm->is_nmethod()) {
if (jvmciName != NULL) { nmethod* nm = cm->as_nmethod();
st->print(" (%s)", jvmciName); char* jvmciName = nm->jvmci_installed_code_name(buf, buflen);
if (jvmciName != NULL) {
st->print(" (%s)", jvmciName);
}
} }
#endif #endif
} else { } else {
@ -1244,10 +1250,10 @@ void frame::describe(FrameValues& values, int frame_no) {
values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2); values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2);
} else if (is_compiled_frame()) { } else if (is_compiled_frame()) {
// For now just label the frame // For now just label the frame
nmethod* nm = cb()->as_nmethod_or_null(); CompiledMethod* cm = (CompiledMethod*)cb();
values.describe(-1, info_address, values.describe(-1, info_address,
FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method %s%s", frame_no, FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method %s%s", frame_no,
p2i(nm), nm->method()->name_and_sig_as_C_string(), p2i(cm), cm->method()->name_and_sig_as_C_string(),
(_deopt_state == is_deoptimized) ? (_deopt_state == is_deoptimized) ?
" (deoptimized)" : " (deoptimized)" :
((_deopt_state == unknown) ? " (state unknown)" : "")), ((_deopt_state == unknown) ? " (state unknown)" : "")),

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -127,7 +127,7 @@ int InterpretedRFrame::cost() const {
} }
int CompiledRFrame::cost() const { int CompiledRFrame::cost() const {
nmethod* nm = top_method()->code(); CompiledMethod* nm = top_method()->code();
if (nm != NULL) { if (nm != NULL) {
return nm->insts_size(); return nm->insts_size();
} else { } else {
@ -139,7 +139,7 @@ void CompiledRFrame::init() {
RegisterMap map(thread(), false); RegisterMap map(thread(), false);
vframe* vf = vframe::new_vframe(&_fr, &map, thread()); vframe* vf = vframe::new_vframe(&_fr, &map, thread());
assert(vf->is_compiled_frame(), "must be compiled"); assert(vf->is_compiled_frame(), "must be compiled");
_nm = compiledVFrame::cast(vf)->code(); _nm = compiledVFrame::cast(vf)->code()->as_nmethod();
vf = vf->top(); vf = vf->top();
_vf = javaVFrame::cast(vf); _vf = javaVFrame::cast(vf);
_method = CodeCache::find_nmethod(_fr.pc())->method(); _method = CodeCache::find_nmethod(_fr.pc())->method();

View File

@ -1010,8 +1010,8 @@ void ThreadSafepointState::handle_polling_page_exception() {
address real_return_addr = thread()->saved_exception_pc(); address real_return_addr = thread()->saved_exception_pc();
CodeBlob *cb = CodeCache::find_blob(real_return_addr); CodeBlob *cb = CodeCache::find_blob(real_return_addr);
assert(cb != NULL && cb->is_nmethod(), "return address should be in nmethod"); assert(cb != NULL && cb->is_compiled(), "return address should be in nmethod");
nmethod* nm = (nmethod*)cb; CompiledMethod* nm = (CompiledMethod*)cb;
// Find frame of caller // Find frame of caller
frame stub_fr = thread()->last_frame(); frame stub_fr = thread()->last_frame();

View File

@ -540,10 +540,10 @@ address SharedRuntime::get_poll_stub(address pc) {
CodeBlob *cb = CodeCache::find_blob(pc); CodeBlob *cb = CodeCache::find_blob(pc);
// Should be an nmethod // Should be an nmethod
assert(cb && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod"); assert(cb && cb->is_compiled(), "safepoint polling: pc must refer to an nmethod");
// Look up the relocation information // Look up the relocation information
assert(((nmethod*)cb)->is_at_poll_or_poll_return(pc), assert(((CompiledMethod*)cb)->is_at_poll_or_poll_return(pc),
"safepoint polling: type must be poll"); "safepoint polling: type must be poll");
#ifdef ASSERT #ifdef ASSERT
@ -554,8 +554,8 @@ address SharedRuntime::get_poll_stub(address pc) {
} }
#endif #endif
bool at_poll_return = ((nmethod*)cb)->is_at_poll_return(pc); bool at_poll_return = ((CompiledMethod*)cb)->is_at_poll_return(pc);
bool has_wide_vectors = ((nmethod*)cb)->has_wide_vectors(); bool has_wide_vectors = ((CompiledMethod*)cb)->has_wide_vectors();
if (at_poll_return) { if (at_poll_return) {
assert(SharedRuntime::polling_page_return_handler_blob() != NULL, assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
"polling page return stub not created yet"); "polling page return stub not created yet");
@ -630,22 +630,22 @@ JRT_END
// ret_pc points into caller; we are returning caller's exception handler // ret_pc points into caller; we are returning caller's exception handler
// for given exception // for given exception
address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception, address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address ret_pc, Handle& exception,
bool force_unwind, bool top_frame_only) { bool force_unwind, bool top_frame_only) {
assert(nm != NULL, "must exist"); assert(cm != NULL, "must exist");
ResourceMark rm; ResourceMark rm;
#if INCLUDE_JVMCI #if INCLUDE_JVMCI
if (nm->is_compiled_by_jvmci()) { if (cm->is_compiled_by_jvmci()) {
// lookup exception handler for this pc // lookup exception handler for this pc
int catch_pco = ret_pc - nm->code_begin(); int catch_pco = ret_pc - cm->code_begin();
ExceptionHandlerTable table(nm); ExceptionHandlerTable table(cm);
HandlerTableEntry *t = table.entry_for(catch_pco, -1, 0); HandlerTableEntry *t = table.entry_for(catch_pco, -1, 0);
if (t != NULL) { if (t != NULL) {
return nm->code_begin() + t->pco(); return cm->code_begin() + t->pco();
} else { } else {
// there is no exception handler for this pc => deoptimize // there is no exception handler for this pc => deoptimize
nm->make_not_entrant(); cm->make_not_entrant();
// Use Deoptimization::deoptimize for all of its side-effects: // Use Deoptimization::deoptimize for all of its side-effects:
// revoking biases of monitors, gathering traps statistics, logging... // revoking biases of monitors, gathering traps statistics, logging...
@ -662,6 +662,7 @@ address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc,
} }
#endif // INCLUDE_JVMCI #endif // INCLUDE_JVMCI
nmethod* nm = cm->as_nmethod();
ScopeDesc* sd = nm->scope_desc_at(ret_pc); ScopeDesc* sd = nm->scope_desc_at(ret_pc);
// determine handler bci, if any // determine handler bci, if any
EXCEPTION_MARK; EXCEPTION_MARK;
@ -797,7 +798,7 @@ void SharedRuntime::throw_StackOverflowError_common(JavaThread* thread, bool del
} }
#if INCLUDE_JVMCI #if INCLUDE_JVMCI
address SharedRuntime::deoptimize_for_implicit_exception(JavaThread* thread, address pc, nmethod* nm, int deopt_reason) { address SharedRuntime::deoptimize_for_implicit_exception(JavaThread* thread, address pc, CompiledMethod* nm, int deopt_reason) {
assert(deopt_reason > Deoptimization::Reason_none && deopt_reason < Deoptimization::Reason_LIMIT, "invalid deopt reason"); assert(deopt_reason > Deoptimization::Reason_none && deopt_reason < Deoptimization::Reason_LIMIT, "invalid deopt reason");
thread->set_jvmci_implicit_exception_pc(pc); thread->set_jvmci_implicit_exception_pc(pc);
thread->set_pending_deoptimization(Deoptimization::make_trap_request((Deoptimization::DeoptReason)deopt_reason, Deoptimization::Action_reinterpret)); thread->set_pending_deoptimization(Deoptimization::make_trap_request((Deoptimization::DeoptReason)deopt_reason, Deoptimization::Action_reinterpret));
@ -871,7 +872,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
// 2. Inline-cache check in nmethod, or // 2. Inline-cache check in nmethod, or
// 3. Implicit null exception in nmethod // 3. Implicit null exception in nmethod
if (!cb->is_nmethod()) { if (!cb->is_compiled()) {
bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(); bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob();
if (!is_in_blob) { if (!is_in_blob) {
// Allow normal crash reporting to handle this // Allow normal crash reporting to handle this
@ -882,9 +883,9 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
return StubRoutines::throw_NullPointerException_at_call_entry(); return StubRoutines::throw_NullPointerException_at_call_entry();
} }
// Otherwise, it's an nmethod. Consult its exception handlers. // Otherwise, it's a compiled method. Consult its exception handlers.
nmethod* nm = (nmethod*)cb; CompiledMethod* cm = (CompiledMethod*)cb;
if (nm->inlinecache_check_contains(pc)) { if (cm->inlinecache_check_contains(pc)) {
// exception happened inside inline-cache check code // exception happened inside inline-cache check code
// => the nmethod is not yet active (i.e., the frame // => the nmethod is not yet active (i.e., the frame
// is not set up yet) => use return address pushed by // is not set up yet) => use return address pushed by
@ -893,7 +894,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
return StubRoutines::throw_NullPointerException_at_call_entry(); return StubRoutines::throw_NullPointerException_at_call_entry();
} }
if (nm->method()->is_method_handle_intrinsic()) { if (cm->method()->is_method_handle_intrinsic()) {
// exception happened inside MH dispatch code, similar to a vtable stub // exception happened inside MH dispatch code, similar to a vtable stub
Events::log_exception(thread, "NullPointerException in MH adapter " INTPTR_FORMAT, p2i(pc)); Events::log_exception(thread, "NullPointerException in MH adapter " INTPTR_FORMAT, p2i(pc));
return StubRoutines::throw_NullPointerException_at_call_entry(); return StubRoutines::throw_NullPointerException_at_call_entry();
@ -903,15 +904,15 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
_implicit_null_throws++; _implicit_null_throws++;
#endif #endif
#if INCLUDE_JVMCI #if INCLUDE_JVMCI
if (nm->is_compiled_by_jvmci() && nm->pc_desc_at(pc) != NULL) { if (cm->is_compiled_by_jvmci() && cm->pc_desc_at(pc) != NULL) {
// If there's no PcDesc then we'll die way down inside of // If there's no PcDesc then we'll die way down inside of
// deopt instead of just getting normal error reporting, // deopt instead of just getting normal error reporting,
// so only go there if it will succeed. // so only go there if it will succeed.
return deoptimize_for_implicit_exception(thread, pc, nm, Deoptimization::Reason_null_check); return deoptimize_for_implicit_exception(thread, pc, cm, Deoptimization::Reason_null_check);
} else { } else {
#endif // INCLUDE_JVMCI #endif // INCLUDE_JVMCI
assert (nm->is_nmethod(), "Expect nmethod"); assert (cm->is_nmethod(), "Expect nmethod");
target_pc = nm->continuation_for_implicit_exception(pc); target_pc = ((nmethod*)cm)->continuation_for_implicit_exception(pc);
#if INCLUDE_JVMCI #if INCLUDE_JVMCI
} }
#endif // INCLUDE_JVMCI #endif // INCLUDE_JVMCI
@ -925,17 +926,17 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
case IMPLICIT_DIVIDE_BY_ZERO: { case IMPLICIT_DIVIDE_BY_ZERO: {
nmethod* nm = CodeCache::find_nmethod(pc); CompiledMethod* cm = CodeCache::find_compiled(pc);
guarantee(nm != NULL, "must have containing compiled method for implicit division-by-zero exceptions"); guarantee(cm != NULL, "must have containing compiled method for implicit division-by-zero exceptions");
#ifndef PRODUCT #ifndef PRODUCT
_implicit_div0_throws++; _implicit_div0_throws++;
#endif #endif
#if INCLUDE_JVMCI #if INCLUDE_JVMCI
if (nm->is_compiled_by_jvmci() && nm->pc_desc_at(pc) != NULL) { if (cm->is_compiled_by_jvmci() && cm->pc_desc_at(pc) != NULL) {
return deoptimize_for_implicit_exception(thread, pc, nm, Deoptimization::Reason_div0_check); return deoptimize_for_implicit_exception(thread, pc, cm, Deoptimization::Reason_div0_check);
} else { } else {
#endif // INCLUDE_JVMCI #endif // INCLUDE_JVMCI
target_pc = nm->continuation_for_implicit_exception(pc); target_pc = cm->continuation_for_implicit_exception(pc);
#if INCLUDE_JVMCI #if INCLUDE_JVMCI
} }
#endif // INCLUDE_JVMCI #endif // INCLUDE_JVMCI
@ -1084,14 +1085,14 @@ Handle SharedRuntime::find_callee_info(JavaThread* thread, Bytecodes::Code& bc,
} }
methodHandle SharedRuntime::extract_attached_method(vframeStream& vfst) { methodHandle SharedRuntime::extract_attached_method(vframeStream& vfst) {
nmethod* caller_nm = vfst.nm(); CompiledMethod* caller = vfst.nm();
nmethodLocker caller_lock(caller_nm); nmethodLocker caller_lock(caller);
address pc = vfst.frame_pc(); address pc = vfst.frame_pc();
{ // Get call instruction under lock because another thread may be busy patching it. { // Get call instruction under lock because another thread may be busy patching it.
MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
return caller_nm->attached_method_before_pc(pc); return caller->attached_method_before_pc(pc);
} }
return NULL; return NULL;
} }
@ -1283,8 +1284,8 @@ methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
frame caller_frame = thread->last_frame().sender(&cbl_map); frame caller_frame = thread->last_frame().sender(&cbl_map);
CodeBlob* caller_cb = caller_frame.cb(); CodeBlob* caller_cb = caller_frame.cb();
guarantee(caller_cb != NULL && caller_cb->is_nmethod(), "must be called from nmethod"); guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
nmethod* caller_nm = caller_cb->as_nmethod_or_null(); CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();
// make sure caller is not getting deoptimized // make sure caller is not getting deoptimized
// and removed before we are done with it. // and removed before we are done with it.
@ -1347,14 +1348,19 @@ methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
// Make sure the callee nmethod does not get deoptimized and removed before // Make sure the callee nmethod does not get deoptimized and removed before
// we are done patching the code. // we are done patching the code.
nmethod* callee_nm = callee_method->code(); CompiledMethod* callee = callee_method->code();
if (callee_nm != NULL && !callee_nm->is_in_use()) {
// Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded. if (callee != NULL) {
callee_nm = NULL; assert(callee->is_compiled(), "must be nmethod for patching");
} }
nmethodLocker nl_callee(callee_nm);
if (callee != NULL && !callee->is_in_use()) {
// Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
callee = NULL;
}
nmethodLocker nl_callee(callee);
#ifdef ASSERT #ifdef ASSERT
address dest_entry_point = callee_nm == NULL ? 0 : callee_nm->entry_point(); // used below address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below
#endif #endif
if (is_virtual) { if (is_virtual) {
@ -1382,12 +1388,12 @@ methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
// which may happen when multiply alive nmethod (tiered compilation) // which may happen when multiply alive nmethod (tiered compilation)
// will be supported. // will be supported.
if (!callee_method->is_old() && if (!callee_method->is_old() &&
(callee_nm == NULL || callee_nm->is_in_use() && (callee_method->code() == callee_nm))) { (callee == NULL || callee->is_in_use() && (callee_method->code() == callee))) {
#ifdef ASSERT #ifdef ASSERT
// We must not try to patch to jump to an already unloaded method. // We must not try to patch to jump to an already unloaded method.
if (dest_entry_point != 0) { if (dest_entry_point != 0) {
CodeBlob* cb = CodeCache::find_blob(dest_entry_point); CodeBlob* cb = CodeCache::find_blob(dest_entry_point);
assert((cb != NULL) && cb->is_nmethod() && (((nmethod*)cb) == callee_nm), assert((cb != NULL) && cb->is_compiled() && (((CompiledMethod*)cb) == callee),
"should not call unloaded nmethod"); "should not call unloaded nmethod");
} }
#endif #endif
@ -1582,8 +1588,9 @@ methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
RegisterMap reg_map(thread, false); RegisterMap reg_map(thread, false);
frame caller_frame = thread->last_frame().sender(&reg_map); frame caller_frame = thread->last_frame().sender(&reg_map);
CodeBlob* cb = caller_frame.cb(); CodeBlob* cb = caller_frame.cb();
if (cb->is_nmethod()) { CompiledMethod* caller_nm = cb->as_compiled_method_or_null();
CompiledIC* inline_cache = CompiledIC_before(((nmethod*)cb), caller_frame.pc()); if (cb->is_compiled()) {
CompiledIC* inline_cache = CompiledIC_before(((CompiledMethod*)cb), caller_frame.pc());
bool should_be_mono = false; bool should_be_mono = false;
if (inline_cache->is_optimized()) { if (inline_cache->is_optimized()) {
if (TraceCallFixup) { if (TraceCallFixup) {
@ -1667,7 +1674,7 @@ methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
// Check for static or virtual call // Check for static or virtual call
bool is_static_call = false; bool is_static_call = false;
nmethod* caller_nm = CodeCache::find_nmethod(pc); CompiledMethod* caller_nm = CodeCache::find_compiled(pc);
// Default call_addr is the location of the "basic" call. // Default call_addr is the location of the "basic" call.
// Determine the address of the call we a reresolving. With // Determine the address of the call we a reresolving. With
@ -1802,12 +1809,12 @@ IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address cal
// ask me how I know this... // ask me how I know this...
CodeBlob* cb = CodeCache::find_blob(caller_pc); CodeBlob* cb = CodeCache::find_blob(caller_pc);
if (!cb->is_nmethod() || entry_point == moop->get_c2i_entry()) { if (!cb->is_compiled() || entry_point == moop->get_c2i_entry()) {
return; return;
} }
// The check above makes sure this is a nmethod. // The check above makes sure this is a nmethod.
nmethod* nm = cb->as_nmethod_or_null(); CompiledMethod* nm = cb->as_compiled_method_or_null();
assert(nm, "must be"); assert(nm, "must be");
// Get the return PC for the passed caller PC. // Get the return PC for the passed caller PC.

View File

@ -188,7 +188,7 @@ class SharedRuntime: AllStatic {
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
// exception handling and implicit exceptions // exception handling and implicit exceptions
static address compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception, static address compute_compiled_exc_handler(CompiledMethod* nm, address ret_pc, Handle& exception,
bool force_unwind, bool top_frame_only); bool force_unwind, bool top_frame_only);
enum ImplicitExceptionKind { enum ImplicitExceptionKind {
IMPLICIT_NULL, IMPLICIT_NULL,
@ -207,7 +207,7 @@ class SharedRuntime: AllStatic {
address faulting_pc, address faulting_pc,
ImplicitExceptionKind exception_kind); ImplicitExceptionKind exception_kind);
#if INCLUDE_JVMCI #if INCLUDE_JVMCI
static address deoptimize_for_implicit_exception(JavaThread* thread, address pc, nmethod* nm, int deopt_reason); static address deoptimize_for_implicit_exception(JavaThread* thread, address pc, CompiledMethod* nm, int deopt_reason);
#endif #endif
static void enable_stack_reserved_zone(JavaThread* thread); static void enable_stack_reserved_zone(JavaThread* thread);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -192,7 +192,7 @@ void SimpleThresholdPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
} }
nmethod* SimpleThresholdPolicy::event(const methodHandle& method, const methodHandle& inlinee, nmethod* SimpleThresholdPolicy::event(const methodHandle& method, const methodHandle& inlinee,
int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread) { int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) {
if (comp_level == CompLevel_none && if (comp_level == CompLevel_none &&
JvmtiExport::can_post_interpreter_events() && JvmtiExport::can_post_interpreter_events() &&
thread->is_interp_only_mode()) { thread->is_interp_only_mode()) {
@ -392,7 +392,7 @@ CompLevel SimpleThresholdPolicy::loop_event(Method* method, CompLevel cur_level)
// Handle the invocation event. // Handle the invocation event.
void SimpleThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh, void SimpleThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
CompLevel level, nmethod* nm, JavaThread* thread) { CompLevel level, CompiledMethod* nm, JavaThread* thread) {
if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) { if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
CompLevel next_level = call_event(mh(), level); CompLevel next_level = call_event(mh(), level);
if (next_level != level) { if (next_level != level) {
@ -404,7 +404,7 @@ void SimpleThresholdPolicy::method_invocation_event(const methodHandle& mh, cons
// Handle the back branch event. Notice that we can compile the method // Handle the back branch event. Notice that we can compile the method
// with a regular entry from here. // with a regular entry from here.
void SimpleThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh, void SimpleThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
int bci, CompLevel level, nmethod* nm, JavaThread* thread) { int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread) {
// If the method is already compiling, quickly bail out. // If the method is already compiling, quickly bail out.
if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) { if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
// Use loop event as an opportunity to also check there's been // Use loop event as an opportunity to also check there's been

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -81,16 +81,16 @@ protected:
// Get a compilation level for a given method. // Get a compilation level for a given method.
static CompLevel comp_level(Method* method) { static CompLevel comp_level(Method* method) {
nmethod *nm = method->code(); CompiledMethod *nm = method->code();
if (nm != NULL && nm->is_in_use()) { if (nm != NULL && nm->is_in_use()) {
return (CompLevel)nm->comp_level(); return (CompLevel)nm->comp_level();
} }
return CompLevel_none; return CompLevel_none;
} }
virtual void method_invocation_event(const methodHandle& method, const methodHandle& inlinee, virtual void method_invocation_event(const methodHandle& method, const methodHandle& inlinee,
CompLevel level, nmethod* nm, JavaThread* thread); CompLevel level, CompiledMethod* nm, JavaThread* thread);
virtual void method_back_branch_event(const methodHandle& method, const methodHandle& inlinee, virtual void method_back_branch_event(const methodHandle& method, const methodHandle& inlinee,
int bci, CompLevel level, nmethod* nm, JavaThread* thread); int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread);
public: public:
SimpleThresholdPolicy() : _c1_count(0), _c2_count(0) { } SimpleThresholdPolicy() : _c1_count(0), _c2_count(0) { }
virtual int compiler_count(CompLevel comp_level) { virtual int compiler_count(CompLevel comp_level) {
@ -104,7 +104,7 @@ public:
virtual void disable_compilation(Method* method) { } virtual void disable_compilation(Method* method) { }
virtual void reprofile(ScopeDesc* trap_scope, bool is_osr); virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);
virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee,
int branch_bci, int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread); int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread);
// Select task is called by CompileBroker. We should return a task or NULL. // Select task is called by CompileBroker. We should return a task or NULL.
virtual CompileTask* select_task(CompileQueue* compile_queue); virtual CompileTask* select_task(CompileQueue* compile_queue);
// Tell the runtime if we think a given method is adequately profiled. // Tell the runtime if we think a given method is adequately profiled.

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -254,8 +254,8 @@ class StubRoutines: AllStatic {
(_code2 != NULL && _code2->blob_contains(addr)) ; (_code2 != NULL && _code2->blob_contains(addr)) ;
} }
static CodeBlob* code1() { return _code1; } static RuntimeBlob* code1() { return _code1; }
static CodeBlob* code2() { return _code2; } static RuntimeBlob* code2() { return _code2; }
// Debugging // Debugging
static jint verify_oop_count() { return _verify_oop_count; } static jint verify_oop_count() { return _verify_oop_count; }

View File

@ -109,13 +109,13 @@ void NMethodSweeper::report_events() {
} }
} }
void NMethodSweeper::record_sweep(nmethod* nm, int line) { void NMethodSweeper::record_sweep(CompiledMethod* nm, int line) {
if (_records != NULL) { if (_records != NULL) {
_records[_sweep_index].traversal = _traversals; _records[_sweep_index].traversal = _traversals;
_records[_sweep_index].traversal_mark = nm->_stack_traversal_mark; _records[_sweep_index].traversal_mark = nm->is_nmethod() ? ((nmethod*)nm)->_stack_traversal_mark : 0;
_records[_sweep_index].compile_id = nm->compile_id(); _records[_sweep_index].compile_id = nm->compile_id();
_records[_sweep_index].kind = nm->compile_kind(); _records[_sweep_index].kind = nm->compile_kind();
_records[_sweep_index].state = nm->_state; _records[_sweep_index].state = nm->get_state();
_records[_sweep_index].vep = nm->verified_entry_point(); _records[_sweep_index].vep = nm->verified_entry_point();
_records[_sweep_index].uep = nm->entry_point(); _records[_sweep_index].uep = nm->entry_point();
_records[_sweep_index].line = line; _records[_sweep_index].line = line;
@ -134,7 +134,7 @@ void NMethodSweeper::init_sweeper_log() {
#define SWEEP(nm) #define SWEEP(nm)
#endif #endif
NMethodIterator NMethodSweeper::_current; // Current nmethod CompiledMethodIterator NMethodSweeper::_current; // Current compiled method
long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID. long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID.
long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache
long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper
@ -210,10 +210,17 @@ void NMethodSweeper::mark_active_nmethods() {
_time_counter++; _time_counter++;
// Check for restart // Check for restart
assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid"); if (_current.method() != NULL) {
if (_current.method()->is_nmethod()) {
assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid");
} else {
ShouldNotReachHere();
}
}
if (wait_for_stack_scanning()) { if (wait_for_stack_scanning()) {
_seen = 0; _seen = 0;
_current = NMethodIterator(); _current = CompiledMethodIterator();
// Initialize to first nmethod // Initialize to first nmethod
_current.next(); _current.next();
_traversals += 1; _traversals += 1;
@ -415,14 +422,15 @@ void NMethodSweeper::sweep_code_cache() {
// Since we will give up the CodeCache_lock, always skip ahead // Since we will give up the CodeCache_lock, always skip ahead
// to the next nmethod. Other blobs can be deleted by other // to the next nmethod. Other blobs can be deleted by other
// threads but nmethods are only reclaimed by the sweeper. // threads but nmethods are only reclaimed by the sweeper.
nmethod* nm = _current.method(); CompiledMethod* nm = _current.method();
_current.next(); _current.next();
// Now ready to process nmethod and give up CodeCache_lock // Now ready to process nmethod and give up CodeCache_lock
{ {
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
// Save information before potentially flushing the nmethod // Save information before potentially flushing the nmethod
int size = nm->total_size(); // Only flushing nmethods so size only matters for them.
int size = nm->is_nmethod() ? ((nmethod*)nm)->total_size() : 0;
bool is_c2_method = nm->is_compiled_by_c2(); bool is_c2_method = nm->is_compiled_by_c2();
bool is_osr = nm->is_osr_method(); bool is_osr = nm->is_osr_method();
int compile_id = nm->compile_id(); int compile_id = nm->compile_id();
@ -430,7 +438,7 @@ void NMethodSweeper::sweep_code_cache() {
const char* state_before = nm->state(); const char* state_before = nm->state();
const char* state_after = ""; const char* state_after = "";
MethodStateChange type = process_nmethod(nm); MethodStateChange type = process_compiled_method(nm);
switch (type) { switch (type) {
case Flushed: case Flushed:
state_after = "flushed"; state_after = "flushed";
@ -532,28 +540,28 @@ void NMethodSweeper::possibly_enable_sweeper() {
} }
} }
class NMethodMarker: public StackObj { class CompiledMethodMarker: public StackObj {
private: private:
CodeCacheSweeperThread* _thread; CodeCacheSweeperThread* _thread;
public: public:
NMethodMarker(nmethod* nm) { CompiledMethodMarker(CompiledMethod* cm) {
JavaThread* current = JavaThread::current(); JavaThread* current = JavaThread::current();
assert (current->is_Code_cache_sweeper_thread(), "Must be"); assert (current->is_Code_cache_sweeper_thread(), "Must be");
_thread = (CodeCacheSweeperThread*)current; _thread = (CodeCacheSweeperThread*)current;
if (!nm->is_zombie() && !nm->is_unloaded()) { if (!cm->is_zombie() && !cm->is_unloaded()) {
// Only expose live nmethods for scanning // Only expose live nmethods for scanning
_thread->set_scanned_nmethod(nm); _thread->set_scanned_compiled_method(cm);
} }
} }
~NMethodMarker() { ~CompiledMethodMarker() {
_thread->set_scanned_nmethod(NULL); _thread->set_scanned_compiled_method(NULL);
} }
}; };
void NMethodSweeper::release_nmethod(nmethod* nm) { void NMethodSweeper::release_compiled_method(CompiledMethod* nm) {
// Make sure the released nmethod is no longer referenced by the sweeper thread // Make sure the released nmethod is no longer referenced by the sweeper thread
CodeCacheSweeperThread* thread = (CodeCacheSweeperThread*)JavaThread::current(); CodeCacheSweeperThread* thread = (CodeCacheSweeperThread*)JavaThread::current();
thread->set_scanned_nmethod(NULL); thread->set_scanned_compiled_method(NULL);
// Clean up any CompiledICHolders // Clean up any CompiledICHolders
{ {
@ -571,98 +579,100 @@ void NMethodSweeper::release_nmethod(nmethod* nm) {
nm->flush(); nm->flush();
} }
NMethodSweeper::MethodStateChange NMethodSweeper::process_nmethod(nmethod* nm) { NMethodSweeper::MethodStateChange NMethodSweeper::process_compiled_method(CompiledMethod* cm) {
assert(nm != NULL, "sanity"); assert(cm != NULL, "sanity");
assert(!CodeCache_lock->owned_by_self(), "just checking"); assert(!CodeCache_lock->owned_by_self(), "just checking");
MethodStateChange result = None; MethodStateChange result = None;
// Make sure this nmethod doesn't get unloaded during the scan, // Make sure this nmethod doesn't get unloaded during the scan,
// since safepoints may happen during acquired below locks. // since safepoints may happen during acquired below locks.
NMethodMarker nmm(nm); CompiledMethodMarker nmm(cm);
SWEEP(nm); SWEEP(cm);
// Skip methods that are currently referenced by the VM // Skip methods that are currently referenced by the VM
if (nm->is_locked_by_vm()) { if (cm->is_locked_by_vm()) {
// But still remember to clean-up inline caches for alive nmethods // But still remember to clean-up inline caches for alive nmethods
if (nm->is_alive()) { if (cm->is_alive()) {
// Clean inline caches that point to zombie/non-entrant/unloaded nmethods // Clean inline caches that point to zombie/non-entrant/unloaded nmethods
MutexLocker cl(CompiledIC_lock); MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches(); cm->cleanup_inline_caches();
SWEEP(nm); SWEEP(cm);
} }
return result; return result;
} }
if (nm->is_zombie()) { if (cm->is_zombie()) {
// All inline caches that referred to this nmethod were cleaned in the // All inline caches that referred to this nmethod were cleaned in the
// previous sweeper cycle. Now flush the nmethod from the code cache. // previous sweeper cycle. Now flush the nmethod from the code cache.
assert(!nm->is_locked_by_vm(), "must not flush locked nmethods"); assert(!cm->is_locked_by_vm(), "must not flush locked Compiled Methods");
release_nmethod(nm); release_compiled_method(cm);
assert(result == None, "sanity"); assert(result == None, "sanity");
result = Flushed; result = Flushed;
} else if (nm->is_not_entrant()) { } else if (cm->is_not_entrant()) {
// If there are no current activations of this method on the // If there are no current activations of this method on the
// stack we can safely convert it to a zombie method // stack we can safely convert it to a zombie method
if (nm->can_convert_to_zombie()) { if (cm->can_convert_to_zombie()) {
// Clear ICStubs to prevent back patching stubs of zombie or flushed // Clear ICStubs to prevent back patching stubs of zombie or flushed
// nmethods during the next safepoint (see ICStub::finalize). // nmethods during the next safepoint (see ICStub::finalize).
{ {
MutexLocker cl(CompiledIC_lock); MutexLocker cl(CompiledIC_lock);
nm->clear_ic_stubs(); cm->clear_ic_stubs();
} }
// Code cache state change is tracked in make_zombie() // Code cache state change is tracked in make_zombie()
nm->make_zombie(); cm->make_zombie();
SWEEP(nm); SWEEP(cm);
// The nmethod may have been locked by JVMTI after being made zombie (see // The nmethod may have been locked by JVMTI after being made zombie (see
// JvmtiDeferredEvent::compiled_method_unload_event()). If so, we cannot // JvmtiDeferredEvent::compiled_method_unload_event()). If so, we cannot
// flush the osr nmethod directly but have to wait for a later sweeper cycle. // flush the osr nmethod directly but have to wait for a later sweeper cycle.
if (nm->is_osr_method() && !nm->is_locked_by_vm()) { if (cm->is_osr_method() && !cm->is_locked_by_vm()) {
// No inline caches will ever point to osr methods, so we can just remove it. // No inline caches will ever point to osr methods, so we can just remove it.
// Make sure that we unregistered the nmethod with the heap and flushed all // Make sure that we unregistered the nmethod with the heap and flushed all
// dependencies before removing the nmethod (done in make_zombie()). // dependencies before removing the nmethod (done in make_zombie()).
assert(nm->is_zombie(), "nmethod must be unregistered"); assert(cm->is_zombie(), "nmethod must be unregistered");
release_nmethod(nm); release_compiled_method(cm);
assert(result == None, "sanity"); assert(result == None, "sanity");
result = Flushed; result = Flushed;
} else { } else {
assert(result == None, "sanity"); assert(result == None, "sanity");
result = MadeZombie; result = MadeZombie;
assert(nm->is_zombie(), "nmethod must be zombie"); assert(cm->is_zombie(), "nmethod must be zombie");
} }
} else { } else {
// Still alive, clean up its inline caches // Still alive, clean up its inline caches
MutexLocker cl(CompiledIC_lock); MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches(); cm->cleanup_inline_caches();
SWEEP(nm); SWEEP(cm);
} }
} else if (nm->is_unloaded()) { } else if (cm->is_unloaded()) {
// Code is unloaded, so there are no activations on the stack. // Code is unloaded, so there are no activations on the stack.
// Convert the nmethod to zombie or flush it directly in the OSR case. // Convert the nmethod to zombie or flush it directly in the OSR case.
{ {
// Clean ICs of unloaded nmethods as well because they may reference other // Clean ICs of unloaded nmethods as well because they may reference other
// unloaded nmethods that may be flushed earlier in the sweeper cycle. // unloaded nmethods that may be flushed earlier in the sweeper cycle.
MutexLocker cl(CompiledIC_lock); MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches(); cm->cleanup_inline_caches();
} }
if (nm->is_osr_method()) { if (cm->is_osr_method()) {
SWEEP(nm); SWEEP(cm);
// No inline caches will ever point to osr methods, so we can just remove it // No inline caches will ever point to osr methods, so we can just remove it
release_nmethod(nm); release_compiled_method(cm);
assert(result == None, "sanity"); assert(result == None, "sanity");
result = Flushed; result = Flushed;
} else { } else {
// Code cache state change is tracked in make_zombie() // Code cache state change is tracked in make_zombie()
nm->make_zombie(); cm->make_zombie();
SWEEP(nm); SWEEP(cm);
assert(result == None, "sanity"); assert(result == None, "sanity");
result = MadeZombie; result = MadeZombie;
} }
} else { } else {
possibly_flush(nm); if (cm->is_nmethod()) {
possibly_flush((nmethod*)cm);
}
// Clean inline caches that point to zombie/non-entrant/unloaded nmethods // Clean inline caches that point to zombie/non-entrant/unloaded nmethods
MutexLocker cl(CompiledIC_lock); MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches(); cm->cleanup_inline_caches();
SWEEP(nm); SWEEP(cm);
} }
return result; return result;
} }

View File

@ -66,7 +66,7 @@ class NMethodSweeper : public AllStatic {
static long _total_nof_code_cache_sweeps; // Total number of full sweeps of the code cache static long _total_nof_code_cache_sweeps; // Total number of full sweeps of the code cache
static long _time_counter; // Virtual time used to periodically invoke sweeper static long _time_counter; // Virtual time used to periodically invoke sweeper
static long _last_sweep; // Value of _time_counter when the last sweep happened static long _last_sweep; // Value of _time_counter when the last sweep happened
static NMethodIterator _current; // Current nmethod static CompiledMethodIterator _current; // Current compiled method
static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache
static volatile int _sweep_started; // Flag to control conc sweeper static volatile int _sweep_started; // Flag to control conc sweeper
@ -88,8 +88,8 @@ class NMethodSweeper : public AllStatic {
static Monitor* _stat_lock; static Monitor* _stat_lock;
static MethodStateChange process_nmethod(nmethod *nm); static MethodStateChange process_compiled_method(CompiledMethod *nm);
static void release_nmethod(nmethod* nm); static void release_compiled_method(CompiledMethod* nm);
static void init_sweeper_log() NOT_DEBUG_RETURN; static void init_sweeper_log() NOT_DEBUG_RETURN;
static bool wait_for_stack_scanning(); static bool wait_for_stack_scanning();
@ -107,9 +107,8 @@ class NMethodSweeper : public AllStatic {
#ifdef ASSERT #ifdef ASSERT
static bool is_sweeping(nmethod* which) { return _current.method() == which; }
// Keep track of sweeper activity in the ring buffer // Keep track of sweeper activity in the ring buffer
static void record_sweep(nmethod* nm, int line); static void record_sweep(CompiledMethod* nm, int line);
static void report_events(int id, address entry); static void report_events(int id, address entry);
static void report_events(); static void report_events();
#endif #endif

View File

@ -1432,7 +1432,7 @@ void JavaThread::initialize() {
set_vframe_array_last(NULL); set_vframe_array_last(NULL);
set_deferred_locals(NULL); set_deferred_locals(NULL);
set_deopt_mark(NULL); set_deopt_mark(NULL);
set_deopt_nmethod(NULL); set_deopt_compiled_method(NULL);
clear_must_deopt_id(); clear_must_deopt_id();
set_monitor_chunks(NULL); set_monitor_chunks(NULL);
set_next(NULL); set_next(NULL);
@ -3300,26 +3300,26 @@ bool CompilerThread::can_call_java() const {
// Create sweeper thread // Create sweeper thread
CodeCacheSweeperThread::CodeCacheSweeperThread() CodeCacheSweeperThread::CodeCacheSweeperThread()
: JavaThread(&sweeper_thread_entry) { : JavaThread(&sweeper_thread_entry) {
_scanned_nmethod = NULL; _scanned_compiled_method = NULL;
} }
void CodeCacheSweeperThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) { void CodeCacheSweeperThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
JavaThread::oops_do(f, cld_f, cf); JavaThread::oops_do(f, cld_f, cf);
if (_scanned_nmethod != NULL && cf != NULL) { if (_scanned_compiled_method != NULL && cf != NULL) {
// Safepoints can occur when the sweeper is scanning an nmethod so // Safepoints can occur when the sweeper is scanning an nmethod so
// process it here to make sure it isn't unloaded in the middle of // process it here to make sure it isn't unloaded in the middle of
// a scan. // a scan.
cf->do_code_blob(_scanned_nmethod); cf->do_code_blob(_scanned_compiled_method);
} }
} }
void CodeCacheSweeperThread::nmethods_do(CodeBlobClosure* cf) { void CodeCacheSweeperThread::nmethods_do(CodeBlobClosure* cf) {
JavaThread::nmethods_do(cf); JavaThread::nmethods_do(cf);
if (_scanned_nmethod != NULL && cf != NULL) { if (_scanned_compiled_method != NULL && cf != NULL) {
// Safepoints can occur when the sweeper is scanning an nmethod so // Safepoints can occur when the sweeper is scanning an nmethod so
// process it here to make sure it isn't unloaded in the middle of // process it here to make sure it isn't unloaded in the middle of
// a scan. // a scan.
cf->do_code_blob(_scanned_nmethod); cf->do_code_blob(_scanned_compiled_method);
} }
} }
@ -4353,7 +4353,7 @@ void Threads::nmethods_do(CodeBlobClosure* cf) {
ALL_JAVA_THREADS(p) { ALL_JAVA_THREADS(p) {
// This is used by the code cache sweeper to mark nmethods that are active // This is used by the code cache sweeper to mark nmethods that are active
// on the stack of a Java thread. Ignore the sweeper thread itself to avoid // on the stack of a Java thread. Ignore the sweeper thread itself to avoid
// marking CodeCacheSweeperThread::_scanned_nmethod as active. // marking CodeCacheSweeperThread::_scanned_compiled_method as active.
if(!p->is_Code_cache_sweeper_thread()) { if(!p->is_Code_cache_sweeper_thread()) {
p->nmethods_do(cf); p->nmethods_do(cf);
} }

View File

@ -820,7 +820,7 @@ class JavaThread: public Thread {
intptr_t* _must_deopt_id; // id of frame that needs to be deopted once we intptr_t* _must_deopt_id; // id of frame that needs to be deopted once we
// transition out of native // transition out of native
nmethod* _deopt_nmethod; // nmethod that is currently being deoptimized CompiledMethod* _deopt_nmethod; // CompiledMethod that is currently being deoptimized
vframeArray* _vframe_array_head; // Holds the heap of the active vframeArrays vframeArray* _vframe_array_head; // Holds the heap of the active vframeArrays
vframeArray* _vframe_array_last; // Holds last vFrameArray we popped vframeArray* _vframe_array_last; // Holds last vFrameArray we popped
// Because deoptimization is lazy we must save jvmti requests to set locals // Because deoptimization is lazy we must save jvmti requests to set locals
@ -1298,8 +1298,8 @@ class JavaThread: public Thread {
void set_must_deopt_id(intptr_t* id) { _must_deopt_id = id; } void set_must_deopt_id(intptr_t* id) { _must_deopt_id = id; }
void clear_must_deopt_id() { _must_deopt_id = NULL; } void clear_must_deopt_id() { _must_deopt_id = NULL; }
void set_deopt_nmethod(nmethod* nm) { _deopt_nmethod = nm; } void set_deopt_compiled_method(CompiledMethod* nm) { _deopt_nmethod = nm; }
nmethod* deopt_nmethod() { return _deopt_nmethod; } CompiledMethod* deopt_compiled_method() { return _deopt_nmethod; }
Method* callee_target() const { return _callee_target; } Method* callee_target() const { return _callee_target; }
void set_callee_target (Method* x) { _callee_target = x; } void set_callee_target (Method* x) { _callee_target = x; }
@ -1980,13 +1980,13 @@ inline CompilerThread* JavaThread::as_CompilerThread() {
// Dedicated thread to sweep the code cache // Dedicated thread to sweep the code cache
class CodeCacheSweeperThread : public JavaThread { class CodeCacheSweeperThread : public JavaThread {
nmethod* _scanned_nmethod; // nmethod being scanned by the sweeper CompiledMethod* _scanned_compiled_method; // nmethod being scanned by the sweeper
public: public:
CodeCacheSweeperThread(); CodeCacheSweeperThread();
// Track the nmethod currently being scanned by the sweeper // Track the nmethod currently being scanned by the sweeper
void set_scanned_nmethod(nmethod* nm) { void set_scanned_compiled_method(CompiledMethod* cm) {
assert(_scanned_nmethod == NULL || nm == NULL, "should reset to NULL before writing a new value"); assert(_scanned_compiled_method == NULL || cm == NULL, "should reset to NULL before writing a new value");
_scanned_nmethod = nm; _scanned_compiled_method = cm;
} }
// Hide sweeper thread from external view. // Hide sweeper thread from external view.
@ -1994,7 +1994,7 @@ class CodeCacheSweeperThread : public JavaThread {
bool is_Code_cache_sweeper_thread() const { return true; } bool is_Code_cache_sweeper_thread() const { return true; }
// Prevent GC from unloading _scanned_nmethod // Prevent GC from unloading _scanned_compiled_method
void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf); void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
void nmethods_do(CodeBlobClosure* cf); void nmethods_do(CodeBlobClosure* cf);
}; };

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -67,8 +67,8 @@ vframe* vframe::new_vframe(const frame* f, const RegisterMap* reg_map, JavaThrea
// Compiled frame // Compiled frame
CodeBlob* cb = f->cb(); CodeBlob* cb = f->cb();
if (cb != NULL) { if (cb != NULL) {
if (cb->is_nmethod()) { if (cb->is_compiled()) {
nmethod* nm = (nmethod*)cb; CompiledMethod* nm = (CompiledMethod*)cb;
return new compiledVFrame(f, reg_map, thread, nm); return new compiledVFrame(f, reg_map, thread, nm);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -66,9 +66,9 @@ class vframe: public ResourceObj {
// Accessors // Accessors
frame fr() const { return _fr; } frame fr() const { return _fr; }
CodeBlob* cb() const { return _fr.cb(); } CodeBlob* cb() const { return _fr.cb(); }
nmethod* nm() const { CompiledMethod* nm() const {
assert( cb() != NULL && cb()->is_nmethod(), "usage"); assert( cb() != NULL && cb()->is_compiled(), "usage");
return (nmethod*) cb(); return (CompiledMethod*) cb();
} }
// ???? Does this need to be a copy? // ???? Does this need to be a copy?
@ -326,9 +326,9 @@ class vframeStreamCommon : StackObj {
} }
CodeBlob* cb() const { return _frame.cb(); } CodeBlob* cb() const { return _frame.cb(); }
nmethod* nm() const { CompiledMethod* nm() const {
assert( cb() != NULL && cb()->is_nmethod(), "usage"); assert( cb() != NULL && cb()->is_compiled(), "usage");
return (nmethod*) cb(); return (CompiledMethod*) cb();
} }
// Frame type // Frame type
@ -449,7 +449,7 @@ inline bool vframeStreamCommon::fill_from_frame() {
// Compiled frame // Compiled frame
if (cb() != NULL && cb()->is_nmethod()) { if (cb() != NULL && cb()->is_compiled()) {
if (nm()->is_native_method()) { if (nm()->is_native_method()) {
// Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick. // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick.
fill_from_compiled_native_frame(); fill_from_compiled_native_frame();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -206,8 +206,8 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters,
// in which case bcp should point to the monitorenter since it is within the exception's range. // in which case bcp should point to the monitorenter since it is within the exception's range.
assert(*bcp != Bytecodes::_monitorenter || is_top_frame, "a _monitorenter must be a top frame"); assert(*bcp != Bytecodes::_monitorenter || is_top_frame, "a _monitorenter must be a top frame");
assert(thread->deopt_nmethod() != NULL, "nmethod should be known"); assert(thread->deopt_compiled_method() != NULL, "compiled method should be known");
guarantee(!(thread->deopt_nmethod()->is_compiled_by_c2() && guarantee(!(thread->deopt_compiled_method()->is_compiled_by_c2() &&
*bcp == Bytecodes::_monitorenter && *bcp == Bytecodes::_monitorenter &&
exec_mode == Deoptimization::Unpack_exception), exec_mode == Deoptimization::Unpack_exception),
"shouldn't get exception during monitorenter"); "shouldn't get exception during monitorenter");

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -196,7 +196,7 @@ BasicLock* compiledVFrame::resolve_monitor_lock(Location location) const {
GrowableArray<MonitorInfo*>* compiledVFrame::monitors() const { GrowableArray<MonitorInfo*>* compiledVFrame::monitors() const {
// Natives has no scope // Natives has no scope
if (scope() == NULL) { if (scope() == NULL) {
nmethod* nm = code(); CompiledMethod* nm = code();
Method* method = nm->method(); Method* method = nm->method();
assert(method->is_native(), ""); assert(method->is_native(), "");
if (!method->is_synchronized()) { if (!method->is_synchronized()) {
@ -240,13 +240,13 @@ GrowableArray<MonitorInfo*>* compiledVFrame::monitors() const {
} }
compiledVFrame::compiledVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread, nmethod* nm) compiledVFrame::compiledVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread, CompiledMethod* nm)
: javaVFrame(fr, reg_map, thread) { : javaVFrame(fr, reg_map, thread) {
_scope = NULL; _scope = NULL;
// Compiled method (native stub or Java code) // Compiled method (native stub or Java code)
// native wrappers have no scope data, it is implied // native wrappers have no scope data, it is implied
if (!nm->is_native_method()) { if (!nm->is_compiled() || !nm->as_compiled_method()->is_native_method()) {
_scope = nm->scope_desc_at(_fr.pc()); _scope = nm->scope_desc_at(_fr.pc());
} }
} }
@ -264,15 +264,15 @@ bool compiledVFrame::is_top() const {
} }
nmethod* compiledVFrame::code() const { CompiledMethod* compiledVFrame::code() const {
return CodeCache::find_nmethod(_fr.pc()); return CodeCache::find_compiled(_fr.pc());
} }
Method* compiledVFrame::method() const { Method* compiledVFrame::method() const {
if (scope() == NULL) { if (scope() == NULL) {
// native nmethods have no scope the method is implied // native nmethods have no scope the method is implied
nmethod* nm = code(); nmethod* nm = code()->as_nmethod();
assert(nm->is_native_method(), "must be native"); assert(nm->is_native_method(), "must be native");
return nm->method(); return nm->method();
} }
@ -289,7 +289,7 @@ int compiledVFrame::bci() const {
int compiledVFrame::raw_bci() const { int compiledVFrame::raw_bci() const {
if (scope() == NULL) { if (scope() == NULL) {
// native nmethods have no scope the method/bci is implied // native nmethods have no scope the method/bci is implied
nmethod* nm = code(); nmethod* nm = code()->as_nmethod();
assert(nm->is_native_method(), "must be native"); assert(nm->is_native_method(), "must be native");
return 0; return 0;
} }
@ -299,7 +299,7 @@ int compiledVFrame::raw_bci() const {
bool compiledVFrame::should_reexecute() const { bool compiledVFrame::should_reexecute() const {
if (scope() == NULL) { if (scope() == NULL) {
// native nmethods have no scope the method/bci is implied // native nmethods have no scope the method/bci is implied
nmethod* nm = code(); nmethod* nm = code()->as_nmethod();
assert(nm->is_native_method(), "must be native"); assert(nm->is_native_method(), "must be native");
return false; return false;
} }
@ -310,7 +310,7 @@ vframe* compiledVFrame::sender() const {
const frame f = fr(); const frame f = fr();
if (scope() == NULL) { if (scope() == NULL) {
// native nmethods have no scope the method/bci is implied // native nmethods have no scope the method/bci is implied
nmethod* nm = code(); nmethod* nm = code()->as_nmethod();
assert(nm->is_native_method(), "must be native"); assert(nm->is_native_method(), "must be native");
return vframe::sender(); return vframe::sender();
} else { } else {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -52,13 +52,13 @@ class compiledVFrame: public javaVFrame {
public: public:
// Constructors // Constructors
compiledVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread, nmethod* nm); compiledVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread, CompiledMethod* nm);
// Update a local in a compiled frame. Update happens when deopt occurs // Update a local in a compiled frame. Update happens when deopt occurs
void update_local(BasicType type, int index, jvalue value); void update_local(BasicType type, int index, jvalue value);
// Returns the active nmethod // Returns the active nmethod
nmethod* code() const; CompiledMethod* code() const;
// Returns the scopeDesc // Returns the scopeDesc
ScopeDesc* scope() const { return _scope; } ScopeDesc* scope() const { return _scope; }

View File

@ -398,7 +398,7 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
nonstatic_field(Method, _intrinsic_id, u2) \ nonstatic_field(Method, _intrinsic_id, u2) \
nonstatic_field(Method, _flags, u2) \ nonstatic_field(Method, _flags, u2) \
nonproduct_nonstatic_field(Method, _compiled_invocation_count, int) \ nonproduct_nonstatic_field(Method, _compiled_invocation_count, int) \
volatile_nonstatic_field(Method, _code, nmethod*) \ volatile_nonstatic_field(Method, _code, CompiledMethod*) \
nonstatic_field(Method, _i2i_entry, address) \ nonstatic_field(Method, _i2i_entry, address) \
volatile_nonstatic_field(Method, _from_compiled_entry, address) \ volatile_nonstatic_field(Method, _from_compiled_entry, address) \
volatile_nonstatic_field(Method, _from_interpreted_entry, address) \ volatile_nonstatic_field(Method, _from_interpreted_entry, address) \
@ -915,40 +915,47 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
/* CodeBlobs (NOTE: incomplete, but only a little) */ \ /* CodeBlobs (NOTE: incomplete, but only a little) */ \
/***************************************************/ \ /***************************************************/ \
\ \
nonstatic_field(CodeBlob, _name, const char*) \ nonstatic_field(CodeBlob, _name, const char*) \
nonstatic_field(CodeBlob, _size, int) \ nonstatic_field(CodeBlob, _size, int) \
nonstatic_field(CodeBlob, _header_size, int) \ nonstatic_field(CodeBlob, _header_size, int) \
nonstatic_field(CodeBlob, _relocation_size, int) \ nonstatic_field(CodeBlob, _frame_complete_offset, int) \
nonstatic_field(CodeBlob, _content_offset, int) \ nonstatic_field(CodeBlob, _data_offset, int) \
nonstatic_field(CodeBlob, _code_offset, int) \ nonstatic_field(CodeBlob, _frame_size, int) \
nonstatic_field(CodeBlob, _frame_complete_offset, int) \ nonstatic_field(CodeBlob, _oop_maps, ImmutableOopMapSet*) \
nonstatic_field(CodeBlob, _data_offset, int) \ nonstatic_field(CodeBlob, _code_begin, address) \
nonstatic_field(CodeBlob, _frame_size, int) \ nonstatic_field(CodeBlob, _code_end, address) \
nonstatic_field(CodeBlob, _oop_maps, ImmutableOopMapSet*) \ nonstatic_field(CodeBlob, _content_begin, address) \
nonstatic_field(CodeBlob, _data_end, address) \
\ \
nonstatic_field(DeoptimizationBlob, _unpack_offset, int) \ nonstatic_field(DeoptimizationBlob, _unpack_offset, int) \
\ \
nonstatic_field(RuntimeStub, _caller_must_gc_arguments, bool) \ nonstatic_field(RuntimeStub, _caller_must_gc_arguments, bool) \
\ \
/********************************************************/ \
/* CompiledMethod (NOTE: incomplete, but only a little) */ \
/********************************************************/ \
\
nonstatic_field(CompiledMethod, _method, Method*) \
volatile_nonstatic_field(CompiledMethod, _exception_cache, ExceptionCache*) \
nonstatic_field(CompiledMethod, _scopes_data_begin, address) \
nonstatic_field(CompiledMethod, _deopt_handler_begin, address) \
nonstatic_field(CompiledMethod, _deopt_mh_handler_begin, address) \
\
/**************************************************/ \ /**************************************************/ \
/* NMethods (NOTE: incomplete, but only a little) */ \ /* NMethods (NOTE: incomplete, but only a little) */ \
/**************************************************/ \ /**************************************************/ \
\ \
nonstatic_field(nmethod, _method, Method*) \
nonstatic_field(nmethod, _entry_bci, int) \ nonstatic_field(nmethod, _entry_bci, int) \
nonstatic_field(nmethod, _osr_link, nmethod*) \ nonstatic_field(nmethod, _osr_link, nmethod*) \
nonstatic_field(nmethod, _scavenge_root_link, nmethod*) \ nonstatic_field(nmethod, _scavenge_root_link, nmethod*) \
nonstatic_field(nmethod, _scavenge_root_state, jbyte) \ nonstatic_field(nmethod, _scavenge_root_state, jbyte) \
nonstatic_field(nmethod, _state, volatile unsigned char) \ nonstatic_field(nmethod, _state, volatile unsigned char) \
nonstatic_field(nmethod, _exception_offset, int) \ nonstatic_field(nmethod, _exception_offset, int) \
nonstatic_field(nmethod, _deoptimize_offset, int) \
nonstatic_field(nmethod, _deoptimize_mh_offset, int) \
nonstatic_field(nmethod, _orig_pc_offset, int) \ nonstatic_field(nmethod, _orig_pc_offset, int) \
nonstatic_field(nmethod, _stub_offset, int) \ nonstatic_field(nmethod, _stub_offset, int) \
nonstatic_field(nmethod, _consts_offset, int) \ nonstatic_field(nmethod, _consts_offset, int) \
nonstatic_field(nmethod, _oops_offset, int) \ nonstatic_field(nmethod, _oops_offset, int) \
nonstatic_field(nmethod, _metadata_offset, int) \ nonstatic_field(nmethod, _metadata_offset, int) \
nonstatic_field(nmethod, _scopes_data_offset, int) \
nonstatic_field(nmethod, _scopes_pcs_offset, int) \ nonstatic_field(nmethod, _scopes_pcs_offset, int) \
nonstatic_field(nmethod, _dependencies_offset, int) \ nonstatic_field(nmethod, _dependencies_offset, int) \
nonstatic_field(nmethod, _handler_table_offset, int) \ nonstatic_field(nmethod, _handler_table_offset, int) \
@ -961,7 +968,6 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
nonstatic_field(nmethod, _stack_traversal_mark, long) \ nonstatic_field(nmethod, _stack_traversal_mark, long) \
nonstatic_field(nmethod, _compile_id, int) \ nonstatic_field(nmethod, _compile_id, int) \
nonstatic_field(nmethod, _comp_level, int) \ nonstatic_field(nmethod, _comp_level, int) \
volatile_nonstatic_field(nmethod, _exception_cache, ExceptionCache*) \
\ \
unchecked_c2_static_field(Deoptimization, _trap_reason_name, void*) \ unchecked_c2_static_field(Deoptimization, _trap_reason_name, void*) \
\ \
@ -1744,16 +1750,18 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
declare_toplevel_type(SharedRuntime) \ declare_toplevel_type(SharedRuntime) \
\ \
declare_toplevel_type(CodeBlob) \ declare_toplevel_type(CodeBlob) \
declare_type(BufferBlob, CodeBlob) \ declare_type(RuntimeBlob, CodeBlob) \
declare_type(BufferBlob, RuntimeBlob) \
declare_type(AdapterBlob, BufferBlob) \ declare_type(AdapterBlob, BufferBlob) \
declare_type(MethodHandlesAdapterBlob, BufferBlob) \ declare_type(MethodHandlesAdapterBlob, BufferBlob) \
declare_type(nmethod, CodeBlob) \ declare_type(CompiledMethod, CodeBlob) \
declare_type(RuntimeStub, CodeBlob) \ declare_type(nmethod, CompiledMethod) \
declare_type(SingletonBlob, CodeBlob) \ declare_type(RuntimeStub, RuntimeBlob) \
declare_type(SingletonBlob, RuntimeBlob) \
declare_type(SafepointBlob, SingletonBlob) \ declare_type(SafepointBlob, SingletonBlob) \
declare_type(DeoptimizationBlob, SingletonBlob) \ declare_type(DeoptimizationBlob, SingletonBlob) \
declare_c2_type(ExceptionBlob, SingletonBlob) \ declare_c2_type(ExceptionBlob, SingletonBlob) \
declare_c2_type(UncommonTrapBlob, CodeBlob) \ declare_c2_type(UncommonTrapBlob, RuntimeBlob) \
\ \
/***************************************/ \ /***************************************/ \
/* PcDesc and other compiled code info */ \ /* PcDesc and other compiled code info */ \
@ -2236,6 +2244,7 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
declare_toplevel_type(BreakpointInfo) \ declare_toplevel_type(BreakpointInfo) \
declare_toplevel_type(BreakpointInfo*) \ declare_toplevel_type(BreakpointInfo*) \
declare_toplevel_type(CodeBlob*) \ declare_toplevel_type(CodeBlob*) \
declare_toplevel_type(RuntimeBlob*) \
declare_toplevel_type(CompressedWriteStream*) \ declare_toplevel_type(CompressedWriteStream*) \
declare_toplevel_type(ConstantPoolCacheEntry) \ declare_toplevel_type(ConstantPoolCacheEntry) \
declare_toplevel_type(elapsedTimer) \ declare_toplevel_type(elapsedTimer) \

View File

@ -1003,7 +1003,9 @@ class VM_Operation;
class VMOperationQueue; class VMOperationQueue;
class CodeBlob; class CodeBlob;
class nmethod; class CompiledMethod;
class nmethod;
class RuntimeBlob;
class OSRAdapter; class OSRAdapter;
class I2CAdapter; class I2CAdapter;
class C2IAdapter; class C2IAdapter;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -84,7 +84,7 @@ public class InitialAndMaxUsageTest {
Asserts.assertEQ(initialUsage, 0L, "Unexpected initial usage"); Asserts.assertEQ(initialUsage, 0L, "Unexpected initial usage");
} }
ArrayList<Long> blobs = new ArrayList<>(); ArrayList<Long> blobs = new ArrayList<>();
long minAllocationUnit = CodeCacheUtils.MIN_ALLOCATION - headerSize; long minAllocationUnit = Math.max(0, CodeCacheUtils.MIN_ALLOCATION - headerSize);
/* now filling code cache with large-sized allocation first, since /* now filling code cache with large-sized allocation first, since
lots of small allocations takes too much time, so, just a small lots of small allocations takes too much time, so, just a small
optimization */ optimization */

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -67,7 +67,7 @@ public class UsageThresholdIncreasedTest {
protected void runTest() { protected void runTest() {
long headerSize = CodeCacheUtils.getHeaderSize(btype); long headerSize = CodeCacheUtils.getHeaderSize(btype);
long allocationUnit = CodeCacheUtils.MIN_ALLOCATION - headerSize; long allocationUnit = Math.max(0, CodeCacheUtils.MIN_ALLOCATION - headerSize);
MemoryPoolMXBean bean = btype.getMemoryPool(); MemoryPoolMXBean bean = btype.getMemoryPool();
long initialCount = bean.getUsageThresholdCount(); long initialCount = bean.getUsageThresholdCount();
long initialSize = bean.getUsage().getUsed(); long initialSize = bean.getUsage().getUsed();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -58,9 +58,12 @@ public class UsageThresholdNotExceededTest {
MemoryPoolMXBean bean = btype.getMemoryPool(); MemoryPoolMXBean bean = btype.getMemoryPool();
long initialThresholdCount = bean.getUsageThresholdCount(); long initialThresholdCount = bean.getUsageThresholdCount();
long initialUsage = bean.getUsage().getUsed(); long initialUsage = bean.getUsage().getUsed();
bean.setUsageThreshold(initialUsage + 1 + CodeCacheUtils.MIN_ALLOCATION); bean.setUsageThreshold(initialUsage + 1 + CodeCacheUtils.MIN_ALLOCATION);
CodeCacheUtils.WB.allocateCodeBlob(CodeCacheUtils.MIN_ALLOCATION long size = CodeCacheUtils.getHeaderSize(btype);
- CodeCacheUtils.getHeaderSize(btype), btype.id);
CodeCacheUtils.WB.allocateCodeBlob(Math.max(0, CodeCacheUtils.MIN_ALLOCATION
- size), btype.id);
// a gc cycle triggers usage threshold recalculation // a gc cycle triggers usage threshold recalculation
CodeCacheUtils.WB.fullGC(); CodeCacheUtils.WB.fullGC();
CodeCacheUtils.assertEQorGTE(btype, bean.getUsageThresholdCount(), initialThresholdCount, CodeCacheUtils.assertEQorGTE(btype, bean.getUsageThresholdCount(), initialThresholdCount,