Merge
This commit is contained in:
commit
5dc9f56544
@ -825,17 +825,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
return start;
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The following routine generates a subroutine to throw an asynchronous
|
|
||||||
// UnknownError when an unsafe access gets a fault that could not be
|
|
||||||
// reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.)
|
|
||||||
//
|
|
||||||
address generate_handler_for_unsafe_access() {
|
|
||||||
StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
|
|
||||||
address start = __ function_entry();
|
|
||||||
__ unimplemented("StubRoutines::handler_for_unsafe_access", 93);
|
|
||||||
return start;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if !defined(PRODUCT)
|
#if !defined(PRODUCT)
|
||||||
// Wrapper which calls oopDesc::is_oop_or_null()
|
// Wrapper which calls oopDesc::is_oop_or_null()
|
||||||
// Only called by MacroAssembler::verify_oop
|
// Only called by MacroAssembler::verify_oop
|
||||||
@ -3111,8 +3100,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
|
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
|
||||||
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
|
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
|
||||||
|
|
||||||
StubRoutines::_handler_for_unsafe_access_entry = generate_handler_for_unsafe_access();
|
|
||||||
|
|
||||||
// support for verify_oop (must happen after universe_init)
|
// support for verify_oop (must happen after universe_init)
|
||||||
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
|
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
|
||||||
|
|
||||||
|
@ -64,20 +64,6 @@ static const Register& Lstub_temp = L2;
|
|||||||
// -------------------------------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------------------------------
|
||||||
// Stub Code definitions
|
// Stub Code definitions
|
||||||
|
|
||||||
static address handle_unsafe_access() {
|
|
||||||
JavaThread* thread = JavaThread::current();
|
|
||||||
address pc = thread->saved_exception_pc();
|
|
||||||
address npc = thread->saved_exception_npc();
|
|
||||||
// pc is the instruction which we must emulate
|
|
||||||
// doing a no-op is fine: return garbage from the load
|
|
||||||
|
|
||||||
// request an async exception
|
|
||||||
thread->set_pending_unsafe_access_error();
|
|
||||||
|
|
||||||
// return address of next instruction to execute
|
|
||||||
return npc;
|
|
||||||
}
|
|
||||||
|
|
||||||
class StubGenerator: public StubCodeGenerator {
|
class StubGenerator: public StubCodeGenerator {
|
||||||
private:
|
private:
|
||||||
|
|
||||||
@ -746,62 +732,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
Label _atomic_add_stub; // called from other stubs
|
Label _atomic_add_stub; // called from other stubs
|
||||||
|
|
||||||
|
|
||||||
//------------------------------------------------------------------------------------------------------------------------
|
|
||||||
// The following routine generates a subroutine to throw an asynchronous
|
|
||||||
// UnknownError when an unsafe access gets a fault that could not be
|
|
||||||
// reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.)
|
|
||||||
//
|
|
||||||
// Arguments :
|
|
||||||
//
|
|
||||||
// trapping PC: O7
|
|
||||||
//
|
|
||||||
// Results:
|
|
||||||
// posts an asynchronous exception, skips the trapping instruction
|
|
||||||
//
|
|
||||||
|
|
||||||
address generate_handler_for_unsafe_access() {
|
|
||||||
StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
|
|
||||||
address start = __ pc();
|
|
||||||
|
|
||||||
const int preserve_register_words = (64 * 2);
|
|
||||||
Address preserve_addr(FP, (-preserve_register_words * wordSize) + STACK_BIAS);
|
|
||||||
|
|
||||||
Register Lthread = L7_thread_cache;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
__ save_frame(0);
|
|
||||||
__ mov(G1, L1);
|
|
||||||
__ mov(G2, L2);
|
|
||||||
__ mov(G3, L3);
|
|
||||||
__ mov(G4, L4);
|
|
||||||
__ mov(G5, L5);
|
|
||||||
for (i = 0; i < 64; i += 2) {
|
|
||||||
__ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
address entry_point = CAST_FROM_FN_PTR(address, handle_unsafe_access);
|
|
||||||
BLOCK_COMMENT("call handle_unsafe_access");
|
|
||||||
__ call(entry_point, relocInfo::runtime_call_type);
|
|
||||||
__ delayed()->nop();
|
|
||||||
|
|
||||||
__ mov(L1, G1);
|
|
||||||
__ mov(L2, G2);
|
|
||||||
__ mov(L3, G3);
|
|
||||||
__ mov(L4, G4);
|
|
||||||
__ mov(L5, G5);
|
|
||||||
for (i = 0; i < 64; i += 2) {
|
|
||||||
__ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
__ verify_thread();
|
|
||||||
|
|
||||||
__ jmp(O0, 0);
|
|
||||||
__ delayed()->restore();
|
|
||||||
|
|
||||||
return start;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// Support for uint StubRoutine::Sparc::partial_subtype_check( Klass sub, Klass super );
|
// Support for uint StubRoutine::Sparc::partial_subtype_check( Klass sub, Klass super );
|
||||||
// Arguments :
|
// Arguments :
|
||||||
//
|
//
|
||||||
@ -5218,9 +5148,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
|
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
|
||||||
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
|
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
|
||||||
|
|
||||||
StubRoutines::_handler_for_unsafe_access_entry =
|
|
||||||
generate_handler_for_unsafe_access();
|
|
||||||
|
|
||||||
// support for verify_oop (must happen after universe_init)
|
// support for verify_oop (must happen after universe_init)
|
||||||
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine();
|
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine();
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -63,21 +63,6 @@ const int FPU_CNTRL_WRD_MASK = 0xFFFF;
|
|||||||
// -------------------------------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------------------------------
|
||||||
// Stub Code definitions
|
// Stub Code definitions
|
||||||
|
|
||||||
static address handle_unsafe_access() {
|
|
||||||
JavaThread* thread = JavaThread::current();
|
|
||||||
address pc = thread->saved_exception_pc();
|
|
||||||
// pc is the instruction which we must emulate
|
|
||||||
// doing a no-op is fine: return garbage from the load
|
|
||||||
// therefore, compute npc
|
|
||||||
address npc = Assembler::locate_next_instruction(pc);
|
|
||||||
|
|
||||||
// request an async exception
|
|
||||||
thread->set_pending_unsafe_access_error();
|
|
||||||
|
|
||||||
// return address of next instruction to execute
|
|
||||||
return npc;
|
|
||||||
}
|
|
||||||
|
|
||||||
class StubGenerator: public StubCodeGenerator {
|
class StubGenerator: public StubCodeGenerator {
|
||||||
private:
|
private:
|
||||||
|
|
||||||
@ -623,27 +608,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------
|
|
||||||
// The following routine generates a subroutine to throw an asynchronous
|
|
||||||
// UnknownError when an unsafe access gets a fault that could not be
|
|
||||||
// reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.)
|
|
||||||
address generate_handler_for_unsafe_access() {
|
|
||||||
StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
|
|
||||||
address start = __ pc();
|
|
||||||
|
|
||||||
__ push(0); // hole for return address-to-be
|
|
||||||
__ pusha(); // push registers
|
|
||||||
Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
|
|
||||||
BLOCK_COMMENT("call handle_unsafe_access");
|
|
||||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
|
|
||||||
__ movptr(next_pc, rax); // stuff next address
|
|
||||||
__ popa();
|
|
||||||
__ ret(0); // jump to next address
|
|
||||||
|
|
||||||
return start;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
//----------------------------------------------------------------------------------------------------
|
//----------------------------------------------------------------------------------------------------
|
||||||
// Non-destructive plausibility checks for oops
|
// Non-destructive plausibility checks for oops
|
||||||
|
|
||||||
@ -3865,9 +3829,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
// These are currently used by Solaris/Intel
|
// These are currently used by Solaris/Intel
|
||||||
StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
|
StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
|
||||||
|
|
||||||
StubRoutines::_handler_for_unsafe_access_entry =
|
|
||||||
generate_handler_for_unsafe_access();
|
|
||||||
|
|
||||||
// platform dependent
|
// platform dependent
|
||||||
create_control_words();
|
create_control_words();
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -61,21 +61,6 @@ const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions
|
|||||||
|
|
||||||
// Stub Code definitions
|
// Stub Code definitions
|
||||||
|
|
||||||
static address handle_unsafe_access() {
|
|
||||||
JavaThread* thread = JavaThread::current();
|
|
||||||
address pc = thread->saved_exception_pc();
|
|
||||||
// pc is the instruction which we must emulate
|
|
||||||
// doing a no-op is fine: return garbage from the load
|
|
||||||
// therefore, compute npc
|
|
||||||
address npc = Assembler::locate_next_instruction(pc);
|
|
||||||
|
|
||||||
// request an async exception
|
|
||||||
thread->set_pending_unsafe_access_error();
|
|
||||||
|
|
||||||
// return address of next instruction to execute
|
|
||||||
return npc;
|
|
||||||
}
|
|
||||||
|
|
||||||
class StubGenerator: public StubCodeGenerator {
|
class StubGenerator: public StubCodeGenerator {
|
||||||
private:
|
private:
|
||||||
|
|
||||||
@ -989,32 +974,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
return start;
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The following routine generates a subroutine to throw an
|
|
||||||
// asynchronous UnknownError when an unsafe access gets a fault that
|
|
||||||
// could not be reasonably prevented by the programmer. (Example:
|
|
||||||
// SIGBUS/OBJERR.)
|
|
||||||
address generate_handler_for_unsafe_access() {
|
|
||||||
StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
|
|
||||||
address start = __ pc();
|
|
||||||
|
|
||||||
__ push(0); // hole for return address-to-be
|
|
||||||
__ pusha(); // push registers
|
|
||||||
Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
|
|
||||||
|
|
||||||
// FIXME: this probably needs alignment logic
|
|
||||||
|
|
||||||
__ subptr(rsp, frame::arg_reg_save_area_bytes);
|
|
||||||
BLOCK_COMMENT("call handle_unsafe_access");
|
|
||||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
|
|
||||||
__ addptr(rsp, frame::arg_reg_save_area_bytes);
|
|
||||||
|
|
||||||
__ movptr(next_pc, rax); // stuff next address
|
|
||||||
__ popa();
|
|
||||||
__ ret(0); // jump to next address
|
|
||||||
|
|
||||||
return start;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Non-destructive plausibility checks for oops
|
// Non-destructive plausibility checks for oops
|
||||||
//
|
//
|
||||||
// Arguments:
|
// Arguments:
|
||||||
@ -5139,9 +5098,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr();
|
StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr();
|
||||||
StubRoutines::_fence_entry = generate_orderaccess_fence();
|
StubRoutines::_fence_entry = generate_orderaccess_fence();
|
||||||
|
|
||||||
StubRoutines::_handler_for_unsafe_access_entry =
|
|
||||||
generate_handler_for_unsafe_access();
|
|
||||||
|
|
||||||
// platform dependent
|
// platform dependent
|
||||||
StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
|
StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
|
||||||
StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp();
|
StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp();
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2007, 2008, 2010, 2015 Red Hat, Inc.
|
* Copyright 2007, 2008, 2010, 2015 Red Hat, Inc.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
@ -261,10 +261,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
StubRoutines::_atomic_add_entry = ShouldNotCallThisStub();
|
StubRoutines::_atomic_add_entry = ShouldNotCallThisStub();
|
||||||
StubRoutines::_atomic_add_ptr_entry = ShouldNotCallThisStub();
|
StubRoutines::_atomic_add_ptr_entry = ShouldNotCallThisStub();
|
||||||
StubRoutines::_fence_entry = ShouldNotCallThisStub();
|
StubRoutines::_fence_entry = ShouldNotCallThisStub();
|
||||||
|
|
||||||
// amd64 does this here, sparc does it in generate_all()
|
|
||||||
StubRoutines::_handler_for_unsafe_access_entry =
|
|
||||||
ShouldNotCallThisStub();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void generate_all() {
|
void generate_all() {
|
||||||
|
@ -392,11 +392,9 @@ JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrec
|
|||||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||||
CompiledMethod* nm = cb->as_compiled_method_or_null();
|
CompiledMethod* nm = cb->as_compiled_method_or_null();
|
||||||
if (nm != NULL && nm->has_unsafe_access()) {
|
if (nm != NULL && nm->has_unsafe_access()) {
|
||||||
// We don't really need a stub here! Just set the pending exeption and
|
address next_pc = pc + 4;
|
||||||
// continue at the next instruction after the faulting read. Returning
|
next_pc = SharedRuntime::handle_unsafe_access(thread, next_pc);
|
||||||
// garbage from this read is ok.
|
os::Aix::ucontext_set_pc(uc, next_pc);
|
||||||
thread->set_pending_unsafe_access_error();
|
|
||||||
os::Aix::ucontext_set_pc(uc, pc + 4);
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -415,11 +413,9 @@ JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrec
|
|||||||
}
|
}
|
||||||
else if (thread->thread_state() == _thread_in_vm &&
|
else if (thread->thread_state() == _thread_in_vm &&
|
||||||
sig == SIGBUS && thread->doing_unsafe_access()) {
|
sig == SIGBUS && thread->doing_unsafe_access()) {
|
||||||
// We don't really need a stub here! Just set the pending exeption and
|
address next_pc = pc + 4;
|
||||||
// continue at the next instruction after the faulting read. Returning
|
next_pc = SharedRuntime::handle_unsafe_access(thread, next_pc);
|
||||||
// garbage from this read is ok.
|
os::Aix::ucontext_set_pc(uc, next_pc);
|
||||||
thread->set_pending_unsafe_access_error();
|
|
||||||
os::Aix::ucontext_set_pc(uc, pc + 4);
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -584,7 +584,8 @@ JVM_handle_bsd_signal(int sig,
|
|||||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||||
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
||||||
if (nm != NULL && nm->has_unsafe_access()) {
|
if (nm != NULL && nm->has_unsafe_access()) {
|
||||||
stub = StubRoutines::handler_for_unsafe_access();
|
address next_pc = Assembler::locate_next_instruction(pc);
|
||||||
|
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -655,7 +656,8 @@ JVM_handle_bsd_signal(int sig,
|
|||||||
} else if (thread->thread_state() == _thread_in_vm &&
|
} else if (thread->thread_state() == _thread_in_vm &&
|
||||||
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
|
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
|
||||||
thread->doing_unsafe_access()) {
|
thread->doing_unsafe_access()) {
|
||||||
stub = StubRoutines::handler_for_unsafe_access();
|
address next_pc = Assembler::locate_next_instruction(pc);
|
||||||
|
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
|
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
|
||||||
|
@ -226,23 +226,6 @@ extern "C" void FetchNPFI () ;
|
|||||||
extern "C" void FetchNResume () ;
|
extern "C" void FetchNResume () ;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// An operation in Unsafe has faulted. We're going to return to the
|
|
||||||
// instruction after the faulting load or store. We also set
|
|
||||||
// pending_unsafe_access_error so that at some point in the future our
|
|
||||||
// user will get a helpful message.
|
|
||||||
static address handle_unsafe_access(JavaThread* thread, address pc) {
|
|
||||||
// pc is the instruction which we must emulate
|
|
||||||
// doing a no-op is fine: return garbage from the load
|
|
||||||
// therefore, compute npc
|
|
||||||
address npc = pc + NativeCall::instruction_size;
|
|
||||||
|
|
||||||
// request an async exception
|
|
||||||
thread->set_pending_unsafe_access_error();
|
|
||||||
|
|
||||||
// return address of next instruction to execute
|
|
||||||
return npc;
|
|
||||||
}
|
|
||||||
|
|
||||||
extern "C" JNIEXPORT int
|
extern "C" JNIEXPORT int
|
||||||
JVM_handle_linux_signal(int sig,
|
JVM_handle_linux_signal(int sig,
|
||||||
siginfo_t* info,
|
siginfo_t* info,
|
||||||
@ -387,7 +370,8 @@ JVM_handle_linux_signal(int sig,
|
|||||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||||
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
||||||
if (nm != NULL && nm->has_unsafe_access()) {
|
if (nm != NULL && nm->has_unsafe_access()) {
|
||||||
stub = handle_unsafe_access(thread, pc);
|
address next_pc = pc + NativeCall::instruction_size;
|
||||||
|
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -408,7 +392,8 @@ JVM_handle_linux_signal(int sig,
|
|||||||
} else if (thread->thread_state() == _thread_in_vm &&
|
} else if (thread->thread_state() == _thread_in_vm &&
|
||||||
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
|
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
|
||||||
thread->doing_unsafe_access()) {
|
thread->doing_unsafe_access()) {
|
||||||
stub = handle_unsafe_access(thread, pc);
|
address next_pc = pc + NativeCall::instruction_size;
|
||||||
|
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
|
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
|
||||||
|
@ -366,11 +366,9 @@ JVM_handle_linux_signal(int sig,
|
|||||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||||
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
||||||
if (nm != NULL && nm->has_unsafe_access()) {
|
if (nm != NULL && nm->has_unsafe_access()) {
|
||||||
// We don't really need a stub here! Just set the pending exeption and
|
address next_pc = pc + 4;
|
||||||
// continue at the next instruction after the faulting read. Returning
|
next_pc = SharedRuntime::handle_unsafe_access(thread, next_pc);
|
||||||
// garbage from this read is ok.
|
os::Linux::ucontext_set_pc(uc, next_pc);
|
||||||
thread->set_pending_unsafe_access_error();
|
|
||||||
os::Linux::ucontext_set_pc(uc, pc + 4);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -385,10 +383,8 @@ JVM_handle_linux_signal(int sig,
|
|||||||
}
|
}
|
||||||
else if (thread->thread_state() == _thread_in_vm &&
|
else if (thread->thread_state() == _thread_in_vm &&
|
||||||
sig == SIGBUS && thread->doing_unsafe_access()) {
|
sig == SIGBUS && thread->doing_unsafe_access()) {
|
||||||
// We don't really need a stub here! Just set the pending exeption and
|
address next_pc = pc + 4;
|
||||||
// continue at the next instruction after the faulting read. Returning
|
next_pc = SharedRuntime::handle_unsafe_access(thread, next_pc);
|
||||||
// garbage from this read is ok.
|
|
||||||
thread->set_pending_unsafe_access_error();
|
|
||||||
os::Linux::ucontext_set_pc(uc, pc + 4);
|
os::Linux::ucontext_set_pc(uc, pc + 4);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -433,14 +433,14 @@ inline static bool checkPollingPage(address pc, address fault, address* stub) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline static bool checkByteBuffer(address pc, address* stub) {
|
inline static bool checkByteBuffer(address pc, address npc, address* stub) {
|
||||||
// BugId 4454115: A read from a MappedByteBuffer can fault
|
// BugId 4454115: A read from a MappedByteBuffer can fault
|
||||||
// here if the underlying file has been truncated.
|
// here if the underlying file has been truncated.
|
||||||
// Do not crash the VM in such a case.
|
// Do not crash the VM in such a case.
|
||||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||||
CompiledMethod* nm = cb->as_compiled_method_or_null();
|
CompiledMethod* nm = cb->as_compiled_method_or_null();
|
||||||
if (nm != NULL && nm->has_unsafe_access()) {
|
if (nm != NULL && nm->has_unsafe_access()) {
|
||||||
*stub = StubRoutines::handler_for_unsafe_access();
|
*stub = SharedRuntime::handle_unsafe_access(thread, npc);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
@ -613,7 +613,7 @@ JVM_handle_linux_signal(int sig,
|
|||||||
if (sig == SIGBUS &&
|
if (sig == SIGBUS &&
|
||||||
thread->thread_state() == _thread_in_vm &&
|
thread->thread_state() == _thread_in_vm &&
|
||||||
thread->doing_unsafe_access()) {
|
thread->doing_unsafe_access()) {
|
||||||
stub = StubRoutines::handler_for_unsafe_access();
|
stub = SharedRuntime::handle_unsafe_access(thread, npc);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (thread->thread_state() == _thread_in_Java) {
|
if (thread->thread_state() == _thread_in_Java) {
|
||||||
@ -625,7 +625,7 @@ JVM_handle_linux_signal(int sig,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((sig == SIGBUS) && checkByteBuffer(pc, &stub)) {
|
if ((sig == SIGBUS) && checkByteBuffer(pc, npc, &stub)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -420,7 +420,8 @@ JVM_handle_linux_signal(int sig,
|
|||||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||||
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
|
||||||
if (nm != NULL && nm->has_unsafe_access()) {
|
if (nm != NULL && nm->has_unsafe_access()) {
|
||||||
stub = StubRoutines::handler_for_unsafe_access();
|
address next_pc = Assembler::locate_next_instruction(pc);
|
||||||
|
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -469,7 +470,8 @@ JVM_handle_linux_signal(int sig,
|
|||||||
} else if (thread->thread_state() == _thread_in_vm &&
|
} else if (thread->thread_state() == _thread_in_vm &&
|
||||||
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
|
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
|
||||||
thread->doing_unsafe_access()) {
|
thread->doing_unsafe_access()) {
|
||||||
stub = StubRoutines::handler_for_unsafe_access();
|
address next_pc = Assembler::locate_next_instruction(pc);
|
||||||
|
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
|
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
|
||||||
|
@ -441,7 +441,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
|
|||||||
|
|
||||||
if (thread->thread_state() == _thread_in_vm) {
|
if (thread->thread_state() == _thread_in_vm) {
|
||||||
if (sig == SIGBUS && info->si_code == BUS_OBJERR && thread->doing_unsafe_access()) {
|
if (sig == SIGBUS && info->si_code == BUS_OBJERR && thread->doing_unsafe_access()) {
|
||||||
stub = StubRoutines::handler_for_unsafe_access();
|
stub = SharedRuntime::handle_unsafe_access(thread, npc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -480,7 +480,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
|
|||||||
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
|
||||||
CompiledMethod* nm = cb->as_compiled_method_or_null();
|
CompiledMethod* nm = cb->as_compiled_method_or_null();
|
||||||
if (nm != NULL && nm->has_unsafe_access()) {
|
if (nm != NULL && nm->has_unsafe_access()) {
|
||||||
stub = StubRoutines::handler_for_unsafe_access();
|
stub = SharedRuntime::handle_unsafe_access(thread, npc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -503,7 +503,8 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
|
|||||||
|
|
||||||
if (thread->thread_state() == _thread_in_vm) {
|
if (thread->thread_state() == _thread_in_vm) {
|
||||||
if (sig == SIGBUS && info->si_code == BUS_OBJERR && thread->doing_unsafe_access()) {
|
if (sig == SIGBUS && info->si_code == BUS_OBJERR && thread->doing_unsafe_access()) {
|
||||||
stub = StubRoutines::handler_for_unsafe_access();
|
address next_pc = Assembler::locate_next_instruction(pc);
|
||||||
|
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -520,7 +521,8 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
|
|||||||
if (cb != NULL) {
|
if (cb != NULL) {
|
||||||
CompiledMethod* nm = cb->as_compiled_method_or_null();
|
CompiledMethod* nm = cb->as_compiled_method_or_null();
|
||||||
if (nm != NULL && nm->has_unsafe_access()) {
|
if (nm != NULL && nm->has_unsafe_access()) {
|
||||||
stub = StubRoutines::handler_for_unsafe_access();
|
address next_pc = Assembler::locate_next_instruction(pc);
|
||||||
|
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -198,20 +198,6 @@ bool Compiler::is_intrinsic_supported(const methodHandle& method) {
|
|||||||
case vmIntrinsics::_putLongVolatile:
|
case vmIntrinsics::_putLongVolatile:
|
||||||
case vmIntrinsics::_putFloatVolatile:
|
case vmIntrinsics::_putFloatVolatile:
|
||||||
case vmIntrinsics::_putDoubleVolatile:
|
case vmIntrinsics::_putDoubleVolatile:
|
||||||
case vmIntrinsics::_getByte_raw:
|
|
||||||
case vmIntrinsics::_getShort_raw:
|
|
||||||
case vmIntrinsics::_getChar_raw:
|
|
||||||
case vmIntrinsics::_getInt_raw:
|
|
||||||
case vmIntrinsics::_getLong_raw:
|
|
||||||
case vmIntrinsics::_getFloat_raw:
|
|
||||||
case vmIntrinsics::_getDouble_raw:
|
|
||||||
case vmIntrinsics::_putByte_raw:
|
|
||||||
case vmIntrinsics::_putShort_raw:
|
|
||||||
case vmIntrinsics::_putChar_raw:
|
|
||||||
case vmIntrinsics::_putInt_raw:
|
|
||||||
case vmIntrinsics::_putLong_raw:
|
|
||||||
case vmIntrinsics::_putFloat_raw:
|
|
||||||
case vmIntrinsics::_putDouble_raw:
|
|
||||||
case vmIntrinsics::_getShortUnaligned:
|
case vmIntrinsics::_getShortUnaligned:
|
||||||
case vmIntrinsics::_getCharUnaligned:
|
case vmIntrinsics::_getCharUnaligned:
|
||||||
case vmIntrinsics::_getIntUnaligned:
|
case vmIntrinsics::_getIntUnaligned:
|
||||||
|
@ -3465,20 +3465,6 @@ void GraphBuilder::build_graph_for_intrinsic(ciMethod* callee) {
|
|||||||
case vmIntrinsics::_putLongVolatile : append_unsafe_put_obj(callee, T_LONG, true); return;
|
case vmIntrinsics::_putLongVolatile : append_unsafe_put_obj(callee, T_LONG, true); return;
|
||||||
case vmIntrinsics::_putFloatVolatile : append_unsafe_put_obj(callee, T_FLOAT, true); return;
|
case vmIntrinsics::_putFloatVolatile : append_unsafe_put_obj(callee, T_FLOAT, true); return;
|
||||||
case vmIntrinsics::_putDoubleVolatile : append_unsafe_put_obj(callee, T_DOUBLE, true); return;
|
case vmIntrinsics::_putDoubleVolatile : append_unsafe_put_obj(callee, T_DOUBLE, true); return;
|
||||||
case vmIntrinsics::_getByte_raw : append_unsafe_get_raw(callee, T_BYTE ); return;
|
|
||||||
case vmIntrinsics::_getShort_raw : append_unsafe_get_raw(callee, T_SHORT ); return;
|
|
||||||
case vmIntrinsics::_getChar_raw : append_unsafe_get_raw(callee, T_CHAR ); return;
|
|
||||||
case vmIntrinsics::_getInt_raw : append_unsafe_get_raw(callee, T_INT ); return;
|
|
||||||
case vmIntrinsics::_getLong_raw : append_unsafe_get_raw(callee, T_LONG ); return;
|
|
||||||
case vmIntrinsics::_getFloat_raw : append_unsafe_get_raw(callee, T_FLOAT ); return;
|
|
||||||
case vmIntrinsics::_getDouble_raw : append_unsafe_get_raw(callee, T_DOUBLE); return;
|
|
||||||
case vmIntrinsics::_putByte_raw : append_unsafe_put_raw(callee, T_BYTE ); return;
|
|
||||||
case vmIntrinsics::_putShort_raw : append_unsafe_put_raw(callee, T_SHORT ); return;
|
|
||||||
case vmIntrinsics::_putChar_raw : append_unsafe_put_raw(callee, T_CHAR ); return;
|
|
||||||
case vmIntrinsics::_putInt_raw : append_unsafe_put_raw(callee, T_INT ); return;
|
|
||||||
case vmIntrinsics::_putLong_raw : append_unsafe_put_raw(callee, T_LONG ); return;
|
|
||||||
case vmIntrinsics::_putFloat_raw : append_unsafe_put_raw(callee, T_FLOAT ); return;
|
|
||||||
case vmIntrinsics::_putDouble_raw : append_unsafe_put_raw(callee, T_DOUBLE); return;
|
|
||||||
case vmIntrinsics::_compareAndSwapLong:
|
case vmIntrinsics::_compareAndSwapLong:
|
||||||
case vmIntrinsics::_compareAndSwapInt:
|
case vmIntrinsics::_compareAndSwapInt:
|
||||||
case vmIntrinsics::_compareAndSwapObject: append_unsafe_CAS(callee); return;
|
case vmIntrinsics::_compareAndSwapObject: append_unsafe_CAS(callee); return;
|
||||||
|
@ -580,20 +580,6 @@ bool vmIntrinsics::is_disabled_by_flags(const methodHandle& method) {
|
|||||||
case vmIntrinsics::_putLongOpaque:
|
case vmIntrinsics::_putLongOpaque:
|
||||||
case vmIntrinsics::_putFloatOpaque:
|
case vmIntrinsics::_putFloatOpaque:
|
||||||
case vmIntrinsics::_putDoubleOpaque:
|
case vmIntrinsics::_putDoubleOpaque:
|
||||||
case vmIntrinsics::_getByte_raw:
|
|
||||||
case vmIntrinsics::_getShort_raw:
|
|
||||||
case vmIntrinsics::_getChar_raw:
|
|
||||||
case vmIntrinsics::_getInt_raw:
|
|
||||||
case vmIntrinsics::_getLong_raw:
|
|
||||||
case vmIntrinsics::_getFloat_raw:
|
|
||||||
case vmIntrinsics::_getDouble_raw:
|
|
||||||
case vmIntrinsics::_putByte_raw:
|
|
||||||
case vmIntrinsics::_putShort_raw:
|
|
||||||
case vmIntrinsics::_putChar_raw:
|
|
||||||
case vmIntrinsics::_putInt_raw:
|
|
||||||
case vmIntrinsics::_putLong_raw:
|
|
||||||
case vmIntrinsics::_putFloat_raw:
|
|
||||||
case vmIntrinsics::_putDouble_raw:
|
|
||||||
case vmIntrinsics::_getAndAddInt:
|
case vmIntrinsics::_getAndAddInt:
|
||||||
case vmIntrinsics::_getAndAddLong:
|
case vmIntrinsics::_getAndAddLong:
|
||||||
case vmIntrinsics::_getAndSetInt:
|
case vmIntrinsics::_getAndSetInt:
|
||||||
@ -634,8 +620,6 @@ bool vmIntrinsics::is_disabled_by_flags(const methodHandle& method) {
|
|||||||
case vmIntrinsics::_putIntUnaligned:
|
case vmIntrinsics::_putIntUnaligned:
|
||||||
case vmIntrinsics::_putLongUnaligned:
|
case vmIntrinsics::_putLongUnaligned:
|
||||||
case vmIntrinsics::_allocateInstance:
|
case vmIntrinsics::_allocateInstance:
|
||||||
case vmIntrinsics::_getAddress_raw:
|
|
||||||
case vmIntrinsics::_putAddress_raw:
|
|
||||||
if (!InlineUnsafeOps || !UseUnalignedAccesses) return true;
|
if (!InlineUnsafeOps || !UseUnalignedAccesses) return true;
|
||||||
break;
|
break;
|
||||||
case vmIntrinsics::_hashCode:
|
case vmIntrinsics::_hashCode:
|
||||||
|
@ -1232,43 +1232,6 @@
|
|||||||
do_intrinsic(_putIntUnaligned, jdk_internal_misc_Unsafe, putIntUnaligned_name, putInt_signature, F_R) \
|
do_intrinsic(_putIntUnaligned, jdk_internal_misc_Unsafe, putIntUnaligned_name, putInt_signature, F_R) \
|
||||||
do_intrinsic(_putLongUnaligned, jdk_internal_misc_Unsafe, putLongUnaligned_name, putLong_signature, F_R) \
|
do_intrinsic(_putLongUnaligned, jdk_internal_misc_Unsafe, putLongUnaligned_name, putLong_signature, F_R) \
|
||||||
\
|
\
|
||||||
/* %%% these are redundant except perhaps for getAddress, but Unsafe has native methods for them */ \
|
|
||||||
do_signature(getByte_raw_signature, "(J)B") \
|
|
||||||
do_signature(putByte_raw_signature, "(JB)V") \
|
|
||||||
do_signature(getShort_raw_signature, "(J)S") \
|
|
||||||
do_signature(putShort_raw_signature, "(JS)V") \
|
|
||||||
do_signature(getChar_raw_signature, "(J)C") \
|
|
||||||
do_signature(putChar_raw_signature, "(JC)V") \
|
|
||||||
do_signature(putInt_raw_signature, "(JI)V") \
|
|
||||||
do_alias(getLong_raw_signature, /*(J)J*/ long_long_signature) \
|
|
||||||
do_alias(putLong_raw_signature, /*(JJ)V*/ long_long_void_signature) \
|
|
||||||
do_signature(getFloat_raw_signature, "(J)F") \
|
|
||||||
do_signature(putFloat_raw_signature, "(JF)V") \
|
|
||||||
do_alias(getDouble_raw_signature, /*(J)D*/ long_double_signature) \
|
|
||||||
do_signature(putDouble_raw_signature, "(JD)V") \
|
|
||||||
do_alias(getAddress_raw_signature, /*(J)J*/ long_long_signature) \
|
|
||||||
do_alias(putAddress_raw_signature, /*(JJ)V*/ long_long_void_signature) \
|
|
||||||
\
|
|
||||||
do_name( getAddress_name, "getAddress") \
|
|
||||||
do_name( putAddress_name, "putAddress") \
|
|
||||||
\
|
|
||||||
do_intrinsic(_getByte_raw, jdk_internal_misc_Unsafe, getByte_name, getByte_raw_signature, F_R) \
|
|
||||||
do_intrinsic(_getShort_raw, jdk_internal_misc_Unsafe, getShort_name, getShort_raw_signature, F_R) \
|
|
||||||
do_intrinsic(_getChar_raw, jdk_internal_misc_Unsafe, getChar_name, getChar_raw_signature, F_R) \
|
|
||||||
do_intrinsic(_getInt_raw, jdk_internal_misc_Unsafe, getInt_name, long_int_signature, F_R) \
|
|
||||||
do_intrinsic(_getLong_raw, jdk_internal_misc_Unsafe, getLong_name, getLong_raw_signature, F_R) \
|
|
||||||
do_intrinsic(_getFloat_raw, jdk_internal_misc_Unsafe, getFloat_name, getFloat_raw_signature, F_R) \
|
|
||||||
do_intrinsic(_getDouble_raw, jdk_internal_misc_Unsafe, getDouble_name, getDouble_raw_signature, F_R) \
|
|
||||||
do_intrinsic(_getAddress_raw, jdk_internal_misc_Unsafe, getAddress_name, getAddress_raw_signature, F_R) \
|
|
||||||
do_intrinsic(_putByte_raw, jdk_internal_misc_Unsafe, putByte_name, putByte_raw_signature, F_R) \
|
|
||||||
do_intrinsic(_putShort_raw, jdk_internal_misc_Unsafe, putShort_name, putShort_raw_signature, F_R) \
|
|
||||||
do_intrinsic(_putChar_raw, jdk_internal_misc_Unsafe, putChar_name, putChar_raw_signature, F_R) \
|
|
||||||
do_intrinsic(_putInt_raw, jdk_internal_misc_Unsafe, putInt_name, putInt_raw_signature, F_R) \
|
|
||||||
do_intrinsic(_putLong_raw, jdk_internal_misc_Unsafe, putLong_name, putLong_raw_signature, F_R) \
|
|
||||||
do_intrinsic(_putFloat_raw, jdk_internal_misc_Unsafe, putFloat_name, putFloat_raw_signature, F_R) \
|
|
||||||
do_intrinsic(_putDouble_raw, jdk_internal_misc_Unsafe, putDouble_name, putDouble_raw_signature, F_R) \
|
|
||||||
do_intrinsic(_putAddress_raw, jdk_internal_misc_Unsafe, putAddress_name, putAddress_raw_signature, F_R) \
|
|
||||||
\
|
|
||||||
do_signature(compareAndSwapObject_signature, "(Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z") \
|
do_signature(compareAndSwapObject_signature, "(Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z") \
|
||||||
do_signature(compareAndExchangeObject_signature, "(Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;") \
|
do_signature(compareAndExchangeObject_signature, "(Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;") \
|
||||||
do_signature(compareAndSwapLong_signature, "(Ljava/lang/Object;JJJ)Z") \
|
do_signature(compareAndSwapLong_signature, "(Ljava/lang/Object;JJJ)Z") \
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -25,8 +25,8 @@
|
|||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "gc/parallel/gcTaskManager.hpp"
|
#include "gc/parallel/gcTaskManager.hpp"
|
||||||
#include "gc/parallel/gcTaskThread.hpp"
|
#include "gc/parallel/gcTaskThread.hpp"
|
||||||
#include "gc/shared/adaptiveSizePolicy.hpp"
|
|
||||||
#include "gc/shared/gcId.hpp"
|
#include "gc/shared/gcId.hpp"
|
||||||
|
#include "gc/shared/workerManager.hpp"
|
||||||
#include "logging/log.hpp"
|
#include "logging/log.hpp"
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
#include "memory/allocation.inline.hpp"
|
#include "memory/allocation.inline.hpp"
|
||||||
@ -34,6 +34,7 @@
|
|||||||
#include "runtime/mutex.hpp"
|
#include "runtime/mutex.hpp"
|
||||||
#include "runtime/mutexLocker.hpp"
|
#include "runtime/mutexLocker.hpp"
|
||||||
#include "runtime/orderAccess.inline.hpp"
|
#include "runtime/orderAccess.inline.hpp"
|
||||||
|
#include "runtime/os.hpp"
|
||||||
|
|
||||||
//
|
//
|
||||||
// GCTask
|
// GCTask
|
||||||
@ -372,10 +373,28 @@ SynchronizedGCTaskQueue::~SynchronizedGCTaskQueue() {
|
|||||||
GCTaskManager::GCTaskManager(uint workers) :
|
GCTaskManager::GCTaskManager(uint workers) :
|
||||||
_workers(workers),
|
_workers(workers),
|
||||||
_active_workers(0),
|
_active_workers(0),
|
||||||
_idle_workers(0) {
|
_idle_workers(0),
|
||||||
|
_created_workers(0) {
|
||||||
initialize();
|
initialize();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
GCTaskThread* GCTaskManager::install_worker(uint t) {
|
||||||
|
GCTaskThread* new_worker = GCTaskThread::create(this, t, _processor_assignment[t]);
|
||||||
|
set_thread(t, new_worker);
|
||||||
|
return new_worker;
|
||||||
|
}
|
||||||
|
|
||||||
|
void GCTaskManager::add_workers(bool initializing) {
|
||||||
|
os::ThreadType worker_type = os::pgc_thread;
|
||||||
|
_created_workers = WorkerManager::add_workers(this,
|
||||||
|
_active_workers,
|
||||||
|
(uint) _workers,
|
||||||
|
_created_workers,
|
||||||
|
worker_type,
|
||||||
|
initializing);
|
||||||
|
_active_workers = MIN2(_created_workers, _active_workers);
|
||||||
|
}
|
||||||
|
|
||||||
void GCTaskManager::initialize() {
|
void GCTaskManager::initialize() {
|
||||||
if (TraceGCTaskManager) {
|
if (TraceGCTaskManager) {
|
||||||
tty->print_cr("GCTaskManager::initialize: workers: %u", workers());
|
tty->print_cr("GCTaskManager::initialize: workers: %u", workers());
|
||||||
@ -394,28 +413,30 @@ void GCTaskManager::initialize() {
|
|||||||
// Set up worker threads.
|
// Set up worker threads.
|
||||||
// Distribute the workers among the available processors,
|
// Distribute the workers among the available processors,
|
||||||
// unless we were told not to, or if the os doesn't want to.
|
// unless we were told not to, or if the os doesn't want to.
|
||||||
uint* processor_assignment = NEW_C_HEAP_ARRAY(uint, workers(), mtGC);
|
_processor_assignment = NEW_C_HEAP_ARRAY(uint, workers(), mtGC);
|
||||||
if (!BindGCTaskThreadsToCPUs ||
|
if (!BindGCTaskThreadsToCPUs ||
|
||||||
!os::distribute_processes(workers(), processor_assignment)) {
|
!os::distribute_processes(workers(), _processor_assignment)) {
|
||||||
for (uint a = 0; a < workers(); a += 1) {
|
for (uint a = 0; a < workers(); a += 1) {
|
||||||
processor_assignment[a] = sentinel_worker();
|
_processor_assignment[a] = sentinel_worker();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_thread = NEW_C_HEAP_ARRAY(GCTaskThread*, workers(), mtGC);
|
_thread = NEW_C_HEAP_ARRAY(GCTaskThread*, workers(), mtGC);
|
||||||
for (uint t = 0; t < workers(); t += 1) {
|
_active_workers = ParallelGCThreads;
|
||||||
set_thread(t, GCTaskThread::create(this, t, processor_assignment[t]));
|
if (UseDynamicNumberOfGCThreads && !FLAG_IS_CMDLINE(ParallelGCThreads)) {
|
||||||
|
_active_workers = 1U;
|
||||||
}
|
}
|
||||||
|
|
||||||
Log(gc, task, thread) log;
|
Log(gc, task, thread) log;
|
||||||
if (log.is_trace()) {
|
if (log.is_trace()) {
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
outputStream* out = log.trace_stream();
|
outputStream* out = log.trace_stream();
|
||||||
out->print("GCTaskManager::initialize: distribution:");
|
out->print("GCTaskManager::initialize: distribution:");
|
||||||
for (uint t = 0; t < workers(); t += 1) {
|
for (uint t = 0; t < workers(); t += 1) {
|
||||||
out->print(" %u", processor_assignment[t]);
|
out->print(" %u", _processor_assignment[t]);
|
||||||
}
|
}
|
||||||
out->cr();
|
out->cr();
|
||||||
}
|
}
|
||||||
FREE_C_HEAP_ARRAY(uint, processor_assignment);
|
|
||||||
}
|
}
|
||||||
reset_busy_workers();
|
reset_busy_workers();
|
||||||
set_unblocked();
|
set_unblocked();
|
||||||
@ -426,9 +447,8 @@ void GCTaskManager::initialize() {
|
|||||||
reset_completed_tasks();
|
reset_completed_tasks();
|
||||||
reset_barriers();
|
reset_barriers();
|
||||||
reset_emptied_queue();
|
reset_emptied_queue();
|
||||||
for (uint s = 0; s < workers(); s += 1) {
|
|
||||||
thread(s)->start();
|
add_workers(true);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
GCTaskManager::~GCTaskManager() {
|
GCTaskManager::~GCTaskManager() {
|
||||||
@ -437,13 +457,17 @@ GCTaskManager::~GCTaskManager() {
|
|||||||
NoopGCTask::destroy(_noop_task);
|
NoopGCTask::destroy(_noop_task);
|
||||||
_noop_task = NULL;
|
_noop_task = NULL;
|
||||||
if (_thread != NULL) {
|
if (_thread != NULL) {
|
||||||
for (uint i = 0; i < workers(); i += 1) {
|
for (uint i = 0; i < created_workers(); i += 1) {
|
||||||
GCTaskThread::destroy(thread(i));
|
GCTaskThread::destroy(thread(i));
|
||||||
set_thread(i, NULL);
|
set_thread(i, NULL);
|
||||||
}
|
}
|
||||||
FREE_C_HEAP_ARRAY(GCTaskThread*, _thread);
|
FREE_C_HEAP_ARRAY(GCTaskThread*, _thread);
|
||||||
_thread = NULL;
|
_thread = NULL;
|
||||||
}
|
}
|
||||||
|
if (_processor_assignment != NULL) {
|
||||||
|
FREE_C_HEAP_ARRAY(uint, _processor_assignment);
|
||||||
|
_processor_assignment = NULL;
|
||||||
|
}
|
||||||
if (_resource_flag != NULL) {
|
if (_resource_flag != NULL) {
|
||||||
FREE_C_HEAP_ARRAY(bool, _resource_flag);
|
FREE_C_HEAP_ARRAY(bool, _resource_flag);
|
||||||
_resource_flag = NULL;
|
_resource_flag = NULL;
|
||||||
@ -470,6 +494,9 @@ void GCTaskManager::set_active_gang() {
|
|||||||
"all_workers_active() is incorrect: "
|
"all_workers_active() is incorrect: "
|
||||||
"active %d ParallelGCThreads %u", active_workers(),
|
"active %d ParallelGCThreads %u", active_workers(),
|
||||||
ParallelGCThreads);
|
ParallelGCThreads);
|
||||||
|
_active_workers = MIN2(_active_workers, _workers);
|
||||||
|
// "add_workers" does not guarantee any additional workers
|
||||||
|
add_workers(false);
|
||||||
log_trace(gc, task)("GCTaskManager::set_active_gang(): "
|
log_trace(gc, task)("GCTaskManager::set_active_gang(): "
|
||||||
"all_workers_active() %d workers %d "
|
"all_workers_active() %d workers %d "
|
||||||
"active %d ParallelGCThreads %u",
|
"active %d ParallelGCThreads %u",
|
||||||
@ -499,7 +526,7 @@ void GCTaskManager::task_idle_workers() {
|
|||||||
// is starting). Try later to release enough idle_workers
|
// is starting). Try later to release enough idle_workers
|
||||||
// to allow the desired number of active_workers.
|
// to allow the desired number of active_workers.
|
||||||
more_inactive_workers =
|
more_inactive_workers =
|
||||||
workers() - active_workers() - idle_workers();
|
created_workers() - active_workers() - idle_workers();
|
||||||
if (more_inactive_workers < 0) {
|
if (more_inactive_workers < 0) {
|
||||||
int reduced_active_workers = active_workers() + more_inactive_workers;
|
int reduced_active_workers = active_workers() + more_inactive_workers;
|
||||||
set_active_workers(reduced_active_workers);
|
set_active_workers(reduced_active_workers);
|
||||||
@ -507,7 +534,7 @@ void GCTaskManager::task_idle_workers() {
|
|||||||
}
|
}
|
||||||
log_trace(gc, task)("JT: %d workers %d active %d idle %d more %d",
|
log_trace(gc, task)("JT: %d workers %d active %d idle %d more %d",
|
||||||
Threads::number_of_non_daemon_threads(),
|
Threads::number_of_non_daemon_threads(),
|
||||||
workers(),
|
created_workers(),
|
||||||
active_workers(),
|
active_workers(),
|
||||||
idle_workers(),
|
idle_workers(),
|
||||||
more_inactive_workers);
|
more_inactive_workers);
|
||||||
@ -517,7 +544,7 @@ void GCTaskManager::task_idle_workers() {
|
|||||||
q->enqueue(IdleGCTask::create_on_c_heap());
|
q->enqueue(IdleGCTask::create_on_c_heap());
|
||||||
increment_idle_workers();
|
increment_idle_workers();
|
||||||
}
|
}
|
||||||
assert(workers() == active_workers() + idle_workers(),
|
assert(created_workers() == active_workers() + idle_workers(),
|
||||||
"total workers should equal active + inactive");
|
"total workers should equal active + inactive");
|
||||||
add_list(q);
|
add_list(q);
|
||||||
// GCTaskQueue* q was created in a ResourceArea so a
|
// GCTaskQueue* q was created in a ResourceArea so a
|
||||||
@ -539,14 +566,15 @@ void GCTaskManager::print_task_time_stamps() {
|
|||||||
if (!log_is_enabled(Debug, gc, task, time)) {
|
if (!log_is_enabled(Debug, gc, task, time)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
for(uint i=0; i<ParallelGCThreads; i++) {
|
uint num_thr = created_workers();
|
||||||
|
for(uint i=0; i < num_thr; i++) {
|
||||||
GCTaskThread* t = thread(i);
|
GCTaskThread* t = thread(i);
|
||||||
t->print_task_time_stamps();
|
t->print_task_time_stamps();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void GCTaskManager::print_threads_on(outputStream* st) {
|
void GCTaskManager::print_threads_on(outputStream* st) {
|
||||||
uint num_thr = workers();
|
uint num_thr = created_workers();
|
||||||
for (uint i = 0; i < num_thr; i++) {
|
for (uint i = 0; i < num_thr; i++) {
|
||||||
thread(i)->print_on(st);
|
thread(i)->print_on(st);
|
||||||
st->cr();
|
st->cr();
|
||||||
@ -555,19 +583,20 @@ void GCTaskManager::print_threads_on(outputStream* st) {
|
|||||||
|
|
||||||
void GCTaskManager::threads_do(ThreadClosure* tc) {
|
void GCTaskManager::threads_do(ThreadClosure* tc) {
|
||||||
assert(tc != NULL, "Null ThreadClosure");
|
assert(tc != NULL, "Null ThreadClosure");
|
||||||
uint num_thr = workers();
|
uint num_thr = created_workers();
|
||||||
for (uint i = 0; i < num_thr; i++) {
|
for (uint i = 0; i < num_thr; i++) {
|
||||||
tc->do_thread(thread(i));
|
tc->do_thread(thread(i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
GCTaskThread* GCTaskManager::thread(uint which) {
|
GCTaskThread* GCTaskManager::thread(uint which) {
|
||||||
assert(which < workers(), "index out of bounds");
|
assert(which < created_workers(), "index out of bounds");
|
||||||
assert(_thread[which] != NULL, "shouldn't have null thread");
|
assert(_thread[which] != NULL, "shouldn't have null thread");
|
||||||
return _thread[which];
|
return _thread[which];
|
||||||
}
|
}
|
||||||
|
|
||||||
void GCTaskManager::set_thread(uint which, GCTaskThread* value) {
|
void GCTaskManager::set_thread(uint which, GCTaskThread* value) {
|
||||||
|
// "_created_workers" may not have been updated yet so use workers()
|
||||||
assert(which < workers(), "index out of bounds");
|
assert(which < workers(), "index out of bounds");
|
||||||
assert(value != NULL, "shouldn't have null thread");
|
assert(value != NULL, "shouldn't have null thread");
|
||||||
_thread[which] = value;
|
_thread[which] = value;
|
||||||
@ -728,7 +757,7 @@ uint GCTaskManager::decrement_busy_workers() {
|
|||||||
|
|
||||||
void GCTaskManager::release_all_resources() {
|
void GCTaskManager::release_all_resources() {
|
||||||
// If you want this to be done atomically, do it in a WaitForBarrierGCTask.
|
// If you want this to be done atomically, do it in a WaitForBarrierGCTask.
|
||||||
for (uint i = 0; i < workers(); i += 1) {
|
for (uint i = 0; i < created_workers(); i += 1) {
|
||||||
set_resource_flag(i, true);
|
set_resource_flag(i, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -370,6 +370,7 @@ private:
|
|||||||
Monitor* _monitor; // Notification of changes.
|
Monitor* _monitor; // Notification of changes.
|
||||||
SynchronizedGCTaskQueue* _queue; // Queue of tasks.
|
SynchronizedGCTaskQueue* _queue; // Queue of tasks.
|
||||||
GCTaskThread** _thread; // Array of worker threads.
|
GCTaskThread** _thread; // Array of worker threads.
|
||||||
|
uint _created_workers; // Number of workers created.
|
||||||
uint _active_workers; // Number of active workers.
|
uint _active_workers; // Number of active workers.
|
||||||
uint _busy_workers; // Number of busy workers.
|
uint _busy_workers; // Number of busy workers.
|
||||||
uint _blocking_worker; // The worker that's blocking.
|
uint _blocking_worker; // The worker that's blocking.
|
||||||
@ -381,6 +382,8 @@ private:
|
|||||||
NoopGCTask* _noop_task; // The NoopGCTask instance.
|
NoopGCTask* _noop_task; // The NoopGCTask instance.
|
||||||
WaitHelper _wait_helper; // Used by inactive worker
|
WaitHelper _wait_helper; // Used by inactive worker
|
||||||
volatile uint _idle_workers; // Number of idled workers
|
volatile uint _idle_workers; // Number of idled workers
|
||||||
|
uint* _processor_assignment; // Worker to cpu mappings. May
|
||||||
|
// be used lazily
|
||||||
public:
|
public:
|
||||||
// Factory create and destroy methods.
|
// Factory create and destroy methods.
|
||||||
static GCTaskManager* create(uint workers) {
|
static GCTaskManager* create(uint workers) {
|
||||||
@ -546,6 +549,13 @@ protected:
|
|||||||
uint active_workers() const {
|
uint active_workers() const {
|
||||||
return _active_workers;
|
return _active_workers;
|
||||||
}
|
}
|
||||||
|
uint created_workers() const {
|
||||||
|
return _created_workers;
|
||||||
|
}
|
||||||
|
// Create a GC worker and install into GCTaskManager
|
||||||
|
GCTaskThread* install_worker(uint worker_id);
|
||||||
|
// Add GC workers as needed.
|
||||||
|
void add_workers(bool initializing);
|
||||||
};
|
};
|
||||||
|
|
||||||
//
|
//
|
||||||
|
@ -44,9 +44,6 @@ GCTaskThread::GCTaskThread(GCTaskManager* manager,
|
|||||||
_time_stamps(NULL),
|
_time_stamps(NULL),
|
||||||
_time_stamp_index(0)
|
_time_stamp_index(0)
|
||||||
{
|
{
|
||||||
if (!os::create_thread(this, os::pgc_thread))
|
|
||||||
vm_exit_out_of_memory(0, OOM_MALLOC_ERROR, "Cannot create GC thread. Out of system resources.");
|
|
||||||
|
|
||||||
set_id(which);
|
set_id(which);
|
||||||
set_name("ParGC Thread#%d", which);
|
set_name("ParGC Thread#%d", which);
|
||||||
}
|
}
|
||||||
@ -57,10 +54,6 @@ GCTaskThread::~GCTaskThread() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void GCTaskThread::start() {
|
|
||||||
os::start_thread(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
GCTaskTimeStamp* GCTaskThread::time_stamp_at(uint index) {
|
GCTaskTimeStamp* GCTaskThread::time_stamp_at(uint index) {
|
||||||
guarantee(index < GCTaskTimeStampEntries, "increase GCTaskTimeStampEntries");
|
guarantee(index < GCTaskTimeStampEntries, "increase GCTaskTimeStampEntries");
|
||||||
if (_time_stamps == NULL) {
|
if (_time_stamps == NULL) {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -48,13 +48,13 @@ private:
|
|||||||
|
|
||||||
bool _is_working; // True if participating in GC tasks
|
bool _is_working; // True if participating in GC tasks
|
||||||
|
|
||||||
public:
|
|
||||||
// Factory create and destroy methods.
|
// Factory create and destroy methods.
|
||||||
static GCTaskThread* create(GCTaskManager* manager,
|
static GCTaskThread* create(GCTaskManager* manager,
|
||||||
uint which,
|
uint which,
|
||||||
uint processor_id) {
|
uint processor_id) {
|
||||||
return new GCTaskThread(manager, which, processor_id);
|
return new GCTaskThread(manager, which, processor_id);
|
||||||
}
|
}
|
||||||
|
public:
|
||||||
static void destroy(GCTaskThread* manager) {
|
static void destroy(GCTaskThread* manager) {
|
||||||
if (manager != NULL) {
|
if (manager != NULL) {
|
||||||
delete manager;
|
delete manager;
|
||||||
@ -65,8 +65,6 @@ private:
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
virtual void run();
|
virtual void run();
|
||||||
// Methods.
|
|
||||||
void start();
|
|
||||||
|
|
||||||
void print_task_time_stamps();
|
void print_task_time_stamps();
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2004, 2015, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2004, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -130,10 +130,7 @@ uint AdaptiveSizePolicy::calc_default_active_workers(uintx total_workers,
|
|||||||
uintx max_active_workers =
|
uintx max_active_workers =
|
||||||
MAX2(active_workers_by_JT, active_workers_by_heap_size);
|
MAX2(active_workers_by_JT, active_workers_by_heap_size);
|
||||||
|
|
||||||
// Limit the number of workers to the the number created,
|
new_active_workers = MIN2(max_active_workers, (uintx) total_workers);
|
||||||
// (workers()).
|
|
||||||
new_active_workers = MIN2(max_active_workers,
|
|
||||||
(uintx) total_workers);
|
|
||||||
|
|
||||||
// Increase GC workers instantly but decrease them more
|
// Increase GC workers instantly but decrease them more
|
||||||
// slowly.
|
// slowly.
|
||||||
@ -167,7 +164,7 @@ uint AdaptiveSizePolicy::calc_default_active_workers(uintx total_workers,
|
|||||||
"Jiggled active workers too much");
|
"Jiggled active workers too much");
|
||||||
}
|
}
|
||||||
|
|
||||||
log_trace(gc, task)("GCTaskManager::calc_default_active_workers() : "
|
log_trace(gc, task)("GCTaskManager::calc_default_active_workers() : "
|
||||||
"active_workers(): " UINTX_FORMAT " new_active_workers: " UINTX_FORMAT " "
|
"active_workers(): " UINTX_FORMAT " new_active_workers: " UINTX_FORMAT " "
|
||||||
"prev_active_workers: " UINTX_FORMAT "\n"
|
"prev_active_workers: " UINTX_FORMAT "\n"
|
||||||
" active_workers_by_JT: " UINTX_FORMAT " active_workers_by_heap_size: " UINTX_FORMAT,
|
" active_workers_by_JT: " UINTX_FORMAT " active_workers_by_heap_size: " UINTX_FORMAT,
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2004, 2015, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2004, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
77
hotspot/src/share/vm/gc/shared/workerManager.hpp
Normal file
77
hotspot/src/share/vm/gc/shared/workerManager.hpp
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_GC_SHARED_WORKERMANAGER_HPP
|
||||||
|
#define SHARE_VM_GC_SHARED_WORKERMANAGER_HPP
|
||||||
|
|
||||||
|
#include "gc/shared/adaptiveSizePolicy.hpp"
|
||||||
|
|
||||||
|
class WorkerManager : public AllStatic {
|
||||||
|
public:
|
||||||
|
// Create additional workers as needed.
|
||||||
|
// active_workers - number of workers being requested for an upcoming
|
||||||
|
// parallel task.
|
||||||
|
// total_workers - total number of workers. This is the maximum
|
||||||
|
// number possible.
|
||||||
|
// created_workers - number of workers already created. This maybe
|
||||||
|
// less than, equal to, or greater than active workers. If greater than
|
||||||
|
// or equal to active_workers, nothing is done.
|
||||||
|
// worker_type - type of thread.
|
||||||
|
// initializing - true if this is called to get the initial number of
|
||||||
|
// GC workers.
|
||||||
|
// If initializing is true, do a vm exit if the workers cannot be created.
|
||||||
|
// The initializing = true case is for JVM start up and failing to
|
||||||
|
// create all the worker at start should considered a problem so exit.
|
||||||
|
// If initializing = false, there are already some number of worker
|
||||||
|
// threads and a failure would not be optimal but should not be fatal.
|
||||||
|
template <class WorkerType>
|
||||||
|
static uint add_workers (WorkerType* holder,
|
||||||
|
uint active_workers,
|
||||||
|
uint total_workers,
|
||||||
|
uint created_workers,
|
||||||
|
os::ThreadType worker_type,
|
||||||
|
bool initializing) {
|
||||||
|
uint start = created_workers;
|
||||||
|
uint end = MIN2(active_workers, total_workers);
|
||||||
|
for (uint worker_id = start; worker_id < end; worker_id += 1) {
|
||||||
|
WorkerThread* new_worker = holder->install_worker(worker_id);
|
||||||
|
assert(new_worker != NULL, "Failed to allocate GangWorker");
|
||||||
|
if (new_worker == NULL || !os::create_thread(new_worker, worker_type)) {
|
||||||
|
if(initializing) {
|
||||||
|
vm_exit_out_of_memory(0, OOM_MALLOC_ERROR,
|
||||||
|
"Cannot create worker GC thread. Out of system resources.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
created_workers++;
|
||||||
|
os::start_thread(new_worker);
|
||||||
|
}
|
||||||
|
|
||||||
|
log_trace(gc, task)("AdaptiveSizePolicy::add_workers() : "
|
||||||
|
"active_workers: %u created_workers: %u",
|
||||||
|
active_workers, created_workers);
|
||||||
|
|
||||||
|
return created_workers;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
#endif // SHARE_VM_GC_SHARED_WORKERMANAGER_HPP
|
@ -25,6 +25,7 @@
|
|||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "gc/shared/gcId.hpp"
|
#include "gc/shared/gcId.hpp"
|
||||||
#include "gc/shared/workgroup.hpp"
|
#include "gc/shared/workgroup.hpp"
|
||||||
|
#include "gc/shared/workerManager.hpp"
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
#include "memory/allocation.inline.hpp"
|
#include "memory/allocation.inline.hpp"
|
||||||
#include "runtime/atomic.inline.hpp"
|
#include "runtime/atomic.inline.hpp"
|
||||||
@ -35,37 +36,45 @@
|
|||||||
// Definitions of WorkGang methods.
|
// Definitions of WorkGang methods.
|
||||||
|
|
||||||
// The current implementation will exit if the allocation
|
// The current implementation will exit if the allocation
|
||||||
// of any worker fails. Still, return a boolean so that
|
// of any worker fails.
|
||||||
// a future implementation can possibly do a partial
|
void AbstractWorkGang::initialize_workers() {
|
||||||
// initialization of the workers and report such to the
|
|
||||||
// caller.
|
|
||||||
bool AbstractWorkGang::initialize_workers() {
|
|
||||||
log_develop_trace(gc, workgang)("Constructing work gang %s with %u threads", name(), total_workers());
|
log_develop_trace(gc, workgang)("Constructing work gang %s with %u threads", name(), total_workers());
|
||||||
_workers = NEW_C_HEAP_ARRAY(AbstractGangWorker*, total_workers(), mtInternal);
|
_workers = NEW_C_HEAP_ARRAY(AbstractGangWorker*, total_workers(), mtInternal);
|
||||||
if (_workers == NULL) {
|
if (_workers == NULL) {
|
||||||
vm_exit_out_of_memory(0, OOM_MALLOC_ERROR, "Cannot create GangWorker array.");
|
vm_exit_out_of_memory(0, OOM_MALLOC_ERROR, "Cannot create GangWorker array.");
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_active_workers = ParallelGCThreads;
|
||||||
|
if (UseDynamicNumberOfGCThreads && !FLAG_IS_CMDLINE(ParallelGCThreads)) {
|
||||||
|
_active_workers = 1U;
|
||||||
|
}
|
||||||
|
|
||||||
|
add_workers(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
AbstractGangWorker* AbstractWorkGang::install_worker(uint worker_id) {
|
||||||
|
AbstractGangWorker* new_worker = allocate_worker(worker_id);
|
||||||
|
set_thread(worker_id, new_worker);
|
||||||
|
return new_worker;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AbstractWorkGang::add_workers(bool initializing) {
|
||||||
|
|
||||||
os::ThreadType worker_type;
|
os::ThreadType worker_type;
|
||||||
if (are_ConcurrentGC_threads()) {
|
if (are_ConcurrentGC_threads()) {
|
||||||
worker_type = os::cgc_thread;
|
worker_type = os::cgc_thread;
|
||||||
} else {
|
} else {
|
||||||
worker_type = os::pgc_thread;
|
worker_type = os::pgc_thread;
|
||||||
}
|
}
|
||||||
for (uint worker = 0; worker < total_workers(); worker += 1) {
|
|
||||||
AbstractGangWorker* new_worker = allocate_worker(worker);
|
_created_workers = WorkerManager::add_workers(this,
|
||||||
assert(new_worker != NULL, "Failed to allocate GangWorker");
|
_active_workers,
|
||||||
_workers[worker] = new_worker;
|
_total_workers,
|
||||||
if (new_worker == NULL || !os::create_thread(new_worker, worker_type)) {
|
_created_workers,
|
||||||
vm_exit_out_of_memory(0, OOM_MALLOC_ERROR,
|
worker_type,
|
||||||
"Cannot create worker GC thread. Out of system resources.");
|
initializing);
|
||||||
return false;
|
_active_workers = MIN2(_created_workers, _active_workers);
|
||||||
}
|
|
||||||
if (!DisableStartThread) {
|
|
||||||
os::start_thread(new_worker);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
AbstractGangWorker* AbstractWorkGang::worker(uint i) const {
|
AbstractGangWorker* AbstractWorkGang::worker(uint i) const {
|
||||||
@ -79,7 +88,7 @@ AbstractGangWorker* AbstractWorkGang::worker(uint i) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void AbstractWorkGang::print_worker_threads_on(outputStream* st) const {
|
void AbstractWorkGang::print_worker_threads_on(outputStream* st) const {
|
||||||
uint workers = total_workers();
|
uint workers = created_workers();
|
||||||
for (uint i = 0; i < workers; i++) {
|
for (uint i = 0; i < workers; i++) {
|
||||||
worker(i)->print_on(st);
|
worker(i)->print_on(st);
|
||||||
st->cr();
|
st->cr();
|
||||||
@ -88,7 +97,7 @@ void AbstractWorkGang::print_worker_threads_on(outputStream* st) const {
|
|||||||
|
|
||||||
void AbstractWorkGang::threads_do(ThreadClosure* tc) const {
|
void AbstractWorkGang::threads_do(ThreadClosure* tc) const {
|
||||||
assert(tc != NULL, "Null ThreadClosure");
|
assert(tc != NULL, "Null ThreadClosure");
|
||||||
uint workers = total_workers();
|
uint workers = created_workers();
|
||||||
for (uint i = 0; i < workers; i++) {
|
for (uint i = 0; i < workers; i++) {
|
||||||
tc->do_thread(worker(i));
|
tc->do_thread(worker(i));
|
||||||
}
|
}
|
||||||
|
@ -112,6 +112,8 @@ class AbstractWorkGang : public CHeapObj<mtInternal> {
|
|||||||
uint _total_workers;
|
uint _total_workers;
|
||||||
// The currently active workers in this gang.
|
// The currently active workers in this gang.
|
||||||
uint _active_workers;
|
uint _active_workers;
|
||||||
|
// The count of created workers in the gang.
|
||||||
|
uint _created_workers;
|
||||||
// Printing support.
|
// Printing support.
|
||||||
const char* _name;
|
const char* _name;
|
||||||
|
|
||||||
@ -120,23 +122,32 @@ class AbstractWorkGang : public CHeapObj<mtInternal> {
|
|||||||
const bool _are_GC_task_threads;
|
const bool _are_GC_task_threads;
|
||||||
const bool _are_ConcurrentGC_threads;
|
const bool _are_ConcurrentGC_threads;
|
||||||
|
|
||||||
|
void set_thread(uint worker_id, AbstractGangWorker* worker) {
|
||||||
|
_workers[worker_id] = worker;
|
||||||
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
AbstractWorkGang(const char* name, uint workers, bool are_GC_task_threads, bool are_ConcurrentGC_threads) :
|
AbstractWorkGang(const char* name, uint workers, bool are_GC_task_threads, bool are_ConcurrentGC_threads) :
|
||||||
_name(name),
|
_name(name),
|
||||||
_total_workers(workers),
|
_total_workers(workers),
|
||||||
_active_workers(UseDynamicNumberOfGCThreads ? 1U : workers),
|
_active_workers(UseDynamicNumberOfGCThreads ? 1U : workers),
|
||||||
|
_created_workers(0),
|
||||||
_are_GC_task_threads(are_GC_task_threads),
|
_are_GC_task_threads(are_GC_task_threads),
|
||||||
_are_ConcurrentGC_threads(are_ConcurrentGC_threads)
|
_are_ConcurrentGC_threads(are_ConcurrentGC_threads)
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
// Initialize workers in the gang. Return true if initialization succeeded.
|
// Initialize workers in the gang. Return true if initialization succeeded.
|
||||||
bool initialize_workers();
|
void initialize_workers();
|
||||||
|
|
||||||
bool are_GC_task_threads() const { return _are_GC_task_threads; }
|
bool are_GC_task_threads() const { return _are_GC_task_threads; }
|
||||||
bool are_ConcurrentGC_threads() const { return _are_ConcurrentGC_threads; }
|
bool are_ConcurrentGC_threads() const { return _are_ConcurrentGC_threads; }
|
||||||
|
|
||||||
uint total_workers() const { return _total_workers; }
|
uint total_workers() const { return _total_workers; }
|
||||||
|
|
||||||
|
uint created_workers() const {
|
||||||
|
return _created_workers;
|
||||||
|
}
|
||||||
|
|
||||||
virtual uint active_workers() const {
|
virtual uint active_workers() const {
|
||||||
assert(_active_workers <= _total_workers,
|
assert(_active_workers <= _total_workers,
|
||||||
"_active_workers: %u > _total_workers: %u", _active_workers, _total_workers);
|
"_active_workers: %u > _total_workers: %u", _active_workers, _total_workers);
|
||||||
@ -144,22 +155,29 @@ class AbstractWorkGang : public CHeapObj<mtInternal> {
|
|||||||
"Unless dynamic should use total workers");
|
"Unless dynamic should use total workers");
|
||||||
return _active_workers;
|
return _active_workers;
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_active_workers(uint v) {
|
void set_active_workers(uint v) {
|
||||||
assert(v <= _total_workers,
|
assert(v <= _total_workers,
|
||||||
"Trying to set more workers active than there are");
|
"Trying to set more workers active than there are");
|
||||||
_active_workers = MIN2(v, _total_workers);
|
_active_workers = MIN2(v, _total_workers);
|
||||||
|
add_workers(false /* exit_on_failure */);
|
||||||
assert(v != 0, "Trying to set active workers to 0");
|
assert(v != 0, "Trying to set active workers to 0");
|
||||||
_active_workers = MAX2(1U, _active_workers);
|
|
||||||
assert(UseDynamicNumberOfGCThreads || _active_workers == _total_workers,
|
assert(UseDynamicNumberOfGCThreads || _active_workers == _total_workers,
|
||||||
"Unless dynamic should use total workers");
|
"Unless dynamic should use total workers");
|
||||||
log_info(gc, task)("GC Workers: using %d out of %d", _active_workers, _total_workers);
|
log_info(gc, task)("GC Workers: using %d out of %d", _active_workers, _total_workers);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add GC workers as needed.
|
||||||
|
void add_workers(bool initializing);
|
||||||
|
|
||||||
// Return the Ith worker.
|
// Return the Ith worker.
|
||||||
AbstractGangWorker* worker(uint i) const;
|
AbstractGangWorker* worker(uint i) const;
|
||||||
|
|
||||||
void threads_do(ThreadClosure* tc) const;
|
void threads_do(ThreadClosure* tc) const;
|
||||||
|
|
||||||
|
// Create a GC worker and install it into the work gang.
|
||||||
|
virtual AbstractGangWorker* install_worker(uint which);
|
||||||
|
|
||||||
// Debugging.
|
// Debugging.
|
||||||
const char* name() const { return _name; }
|
const char* name() const { return _name; }
|
||||||
|
|
||||||
|
@ -409,22 +409,6 @@ bool C2Compiler::is_intrinsic_supported(const methodHandle& method, bool is_virt
|
|||||||
case vmIntrinsics::_putLong:
|
case vmIntrinsics::_putLong:
|
||||||
case vmIntrinsics::_putFloat:
|
case vmIntrinsics::_putFloat:
|
||||||
case vmIntrinsics::_putDouble:
|
case vmIntrinsics::_putDouble:
|
||||||
case vmIntrinsics::_getByte_raw:
|
|
||||||
case vmIntrinsics::_getShort_raw:
|
|
||||||
case vmIntrinsics::_getChar_raw:
|
|
||||||
case vmIntrinsics::_getInt_raw:
|
|
||||||
case vmIntrinsics::_getLong_raw:
|
|
||||||
case vmIntrinsics::_getFloat_raw:
|
|
||||||
case vmIntrinsics::_getDouble_raw:
|
|
||||||
case vmIntrinsics::_getAddress_raw:
|
|
||||||
case vmIntrinsics::_putByte_raw:
|
|
||||||
case vmIntrinsics::_putShort_raw:
|
|
||||||
case vmIntrinsics::_putChar_raw:
|
|
||||||
case vmIntrinsics::_putInt_raw:
|
|
||||||
case vmIntrinsics::_putLong_raw:
|
|
||||||
case vmIntrinsics::_putFloat_raw:
|
|
||||||
case vmIntrinsics::_putDouble_raw:
|
|
||||||
case vmIntrinsics::_putAddress_raw:
|
|
||||||
case vmIntrinsics::_getObjectVolatile:
|
case vmIntrinsics::_getObjectVolatile:
|
||||||
case vmIntrinsics::_getBooleanVolatile:
|
case vmIntrinsics::_getBooleanVolatile:
|
||||||
case vmIntrinsics::_getByteVolatile:
|
case vmIntrinsics::_getByteVolatile:
|
||||||
|
@ -93,7 +93,7 @@ class LibraryCallKit : public GraphKit {
|
|||||||
Node* _result; // the result node, if any
|
Node* _result; // the result node, if any
|
||||||
int _reexecute_sp; // the stack pointer when bytecode needs to be reexecuted
|
int _reexecute_sp; // the stack pointer when bytecode needs to be reexecuted
|
||||||
|
|
||||||
const TypeOopPtr* sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr = false);
|
const TypeOopPtr* sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
LibraryCallKit(JVMState* jvms, LibraryIntrinsic* intrinsic)
|
LibraryCallKit(JVMState* jvms, LibraryIntrinsic* intrinsic)
|
||||||
@ -247,7 +247,7 @@ class LibraryCallKit : public GraphKit {
|
|||||||
void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
|
void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
|
||||||
|
|
||||||
typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
|
typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
|
||||||
bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
|
bool inline_unsafe_access(bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
|
||||||
static bool klass_needs_init_guard(Node* kls);
|
static bool klass_needs_init_guard(Node* kls);
|
||||||
bool inline_unsafe_allocate();
|
bool inline_unsafe_allocate();
|
||||||
bool inline_unsafe_newArray(bool uninitialized);
|
bool inline_unsafe_newArray(bool uninitialized);
|
||||||
@ -475,7 +475,6 @@ bool LibraryCallKit::try_to_inline(int predicate) {
|
|||||||
// Handle symbolic names for otherwise undistinguished boolean switches:
|
// Handle symbolic names for otherwise undistinguished boolean switches:
|
||||||
const bool is_store = true;
|
const bool is_store = true;
|
||||||
const bool is_compress = true;
|
const bool is_compress = true;
|
||||||
const bool is_native_ptr = true;
|
|
||||||
const bool is_static = true;
|
const bool is_static = true;
|
||||||
const bool is_volatile = true;
|
const bool is_volatile = true;
|
||||||
|
|
||||||
@ -555,113 +554,95 @@ bool LibraryCallKit::try_to_inline(int predicate) {
|
|||||||
case vmIntrinsics::_inflateStringC:
|
case vmIntrinsics::_inflateStringC:
|
||||||
case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
|
case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
|
||||||
|
|
||||||
case vmIntrinsics::_getObject: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, Relaxed, false);
|
case vmIntrinsics::_getObject: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false);
|
||||||
case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, Relaxed, false);
|
case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false);
|
||||||
case vmIntrinsics::_getByte: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, Relaxed, false);
|
case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false);
|
||||||
case vmIntrinsics::_getShort: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, Relaxed, false);
|
case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false);
|
||||||
case vmIntrinsics::_getChar: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, Relaxed, false);
|
case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false);
|
||||||
case vmIntrinsics::_getInt: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, Relaxed, false);
|
case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false);
|
||||||
case vmIntrinsics::_getLong: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, Relaxed, false);
|
case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false);
|
||||||
case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, Relaxed, false);
|
case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false);
|
||||||
case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, Relaxed, false);
|
case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false);
|
||||||
|
|
||||||
case vmIntrinsics::_putObject: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, Relaxed, false);
|
case vmIntrinsics::_putObject: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false);
|
||||||
case vmIntrinsics::_putBoolean: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, Relaxed, false);
|
case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false);
|
||||||
case vmIntrinsics::_putByte: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, Relaxed, false);
|
case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false);
|
||||||
case vmIntrinsics::_putShort: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, Relaxed, false);
|
case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false);
|
||||||
case vmIntrinsics::_putChar: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, Relaxed, false);
|
case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false);
|
||||||
case vmIntrinsics::_putInt: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, Relaxed, false);
|
case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false);
|
||||||
case vmIntrinsics::_putLong: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, Relaxed, false);
|
case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false);
|
||||||
case vmIntrinsics::_putFloat: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, Relaxed, false);
|
case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false);
|
||||||
case vmIntrinsics::_putDouble: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, Relaxed, false);
|
case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false);
|
||||||
|
|
||||||
case vmIntrinsics::_getByte_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE, Relaxed, false);
|
case vmIntrinsics::_getObjectVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false);
|
||||||
case vmIntrinsics::_getShort_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT, Relaxed, false);
|
case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false);
|
||||||
case vmIntrinsics::_getChar_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR, Relaxed, false);
|
case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false);
|
||||||
case vmIntrinsics::_getInt_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_INT, Relaxed, false);
|
case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
|
||||||
case vmIntrinsics::_getLong_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_LONG, Relaxed, false);
|
case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_store, T_CHAR, Volatile, false);
|
||||||
case vmIntrinsics::_getFloat_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT, Relaxed, false);
|
case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_store, T_INT, Volatile, false);
|
||||||
case vmIntrinsics::_getDouble_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE, Relaxed, false);
|
case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_store, T_LONG, Volatile, false);
|
||||||
case vmIntrinsics::_getAddress_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, Relaxed, false);
|
case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_store, T_FLOAT, Volatile, false);
|
||||||
|
case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_store, T_DOUBLE, Volatile, false);
|
||||||
|
|
||||||
case vmIntrinsics::_putByte_raw: return inline_unsafe_access( is_native_ptr, is_store, T_BYTE, Relaxed, false);
|
case vmIntrinsics::_putObjectVolatile: return inline_unsafe_access( is_store, T_OBJECT, Volatile, false);
|
||||||
case vmIntrinsics::_putShort_raw: return inline_unsafe_access( is_native_ptr, is_store, T_SHORT, Relaxed, false);
|
case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access( is_store, T_BOOLEAN, Volatile, false);
|
||||||
case vmIntrinsics::_putChar_raw: return inline_unsafe_access( is_native_ptr, is_store, T_CHAR, Relaxed, false);
|
case vmIntrinsics::_putByteVolatile: return inline_unsafe_access( is_store, T_BYTE, Volatile, false);
|
||||||
case vmIntrinsics::_putInt_raw: return inline_unsafe_access( is_native_ptr, is_store, T_INT, Relaxed, false);
|
case vmIntrinsics::_putShortVolatile: return inline_unsafe_access( is_store, T_SHORT, Volatile, false);
|
||||||
case vmIntrinsics::_putLong_raw: return inline_unsafe_access( is_native_ptr, is_store, T_LONG, Relaxed, false);
|
case vmIntrinsics::_putCharVolatile: return inline_unsafe_access( is_store, T_CHAR, Volatile, false);
|
||||||
case vmIntrinsics::_putFloat_raw: return inline_unsafe_access( is_native_ptr, is_store, T_FLOAT, Relaxed, false);
|
case vmIntrinsics::_putIntVolatile: return inline_unsafe_access( is_store, T_INT, Volatile, false);
|
||||||
case vmIntrinsics::_putDouble_raw: return inline_unsafe_access( is_native_ptr, is_store, T_DOUBLE, Relaxed, false);
|
case vmIntrinsics::_putLongVolatile: return inline_unsafe_access( is_store, T_LONG, Volatile, false);
|
||||||
case vmIntrinsics::_putAddress_raw: return inline_unsafe_access( is_native_ptr, is_store, T_ADDRESS, Relaxed, false);
|
case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access( is_store, T_FLOAT, Volatile, false);
|
||||||
|
case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access( is_store, T_DOUBLE, Volatile, false);
|
||||||
|
|
||||||
case vmIntrinsics::_getObjectVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, Volatile, false);
|
case vmIntrinsics::_getShortUnaligned: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, true);
|
||||||
case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, Volatile, false);
|
case vmIntrinsics::_getCharUnaligned: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, true);
|
||||||
case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, Volatile, false);
|
case vmIntrinsics::_getIntUnaligned: return inline_unsafe_access(!is_store, T_INT, Relaxed, true);
|
||||||
case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, Volatile, false);
|
case vmIntrinsics::_getLongUnaligned: return inline_unsafe_access(!is_store, T_LONG, Relaxed, true);
|
||||||
case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, Volatile, false);
|
|
||||||
case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, Volatile, false);
|
|
||||||
case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, Volatile, false);
|
|
||||||
case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, Volatile, false);
|
|
||||||
case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, Volatile, false);
|
|
||||||
|
|
||||||
case vmIntrinsics::_putObjectVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, Volatile, false);
|
case vmIntrinsics::_putShortUnaligned: return inline_unsafe_access( is_store, T_SHORT, Relaxed, true);
|
||||||
case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, Volatile, false);
|
case vmIntrinsics::_putCharUnaligned: return inline_unsafe_access( is_store, T_CHAR, Relaxed, true);
|
||||||
case vmIntrinsics::_putByteVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, Volatile, false);
|
case vmIntrinsics::_putIntUnaligned: return inline_unsafe_access( is_store, T_INT, Relaxed, true);
|
||||||
case vmIntrinsics::_putShortVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, Volatile, false);
|
case vmIntrinsics::_putLongUnaligned: return inline_unsafe_access( is_store, T_LONG, Relaxed, true);
|
||||||
case vmIntrinsics::_putCharVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, Volatile, false);
|
|
||||||
case vmIntrinsics::_putIntVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, Volatile, false);
|
|
||||||
case vmIntrinsics::_putLongVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, Volatile, false);
|
|
||||||
case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, Volatile, false);
|
|
||||||
case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, Volatile, false);
|
|
||||||
|
|
||||||
case vmIntrinsics::_getShortUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, Relaxed, true);
|
case vmIntrinsics::_getObjectAcquire: return inline_unsafe_access(!is_store, T_OBJECT, Acquire, false);
|
||||||
case vmIntrinsics::_getCharUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, Relaxed, true);
|
case vmIntrinsics::_getBooleanAcquire: return inline_unsafe_access(!is_store, T_BOOLEAN, Acquire, false);
|
||||||
case vmIntrinsics::_getIntUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, Relaxed, true);
|
case vmIntrinsics::_getByteAcquire: return inline_unsafe_access(!is_store, T_BYTE, Acquire, false);
|
||||||
case vmIntrinsics::_getLongUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, Relaxed, true);
|
case vmIntrinsics::_getShortAcquire: return inline_unsafe_access(!is_store, T_SHORT, Acquire, false);
|
||||||
|
case vmIntrinsics::_getCharAcquire: return inline_unsafe_access(!is_store, T_CHAR, Acquire, false);
|
||||||
|
case vmIntrinsics::_getIntAcquire: return inline_unsafe_access(!is_store, T_INT, Acquire, false);
|
||||||
|
case vmIntrinsics::_getLongAcquire: return inline_unsafe_access(!is_store, T_LONG, Acquire, false);
|
||||||
|
case vmIntrinsics::_getFloatAcquire: return inline_unsafe_access(!is_store, T_FLOAT, Acquire, false);
|
||||||
|
case vmIntrinsics::_getDoubleAcquire: return inline_unsafe_access(!is_store, T_DOUBLE, Acquire, false);
|
||||||
|
|
||||||
case vmIntrinsics::_putShortUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, Relaxed, true);
|
case vmIntrinsics::_putObjectRelease: return inline_unsafe_access( is_store, T_OBJECT, Release, false);
|
||||||
case vmIntrinsics::_putCharUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, Relaxed, true);
|
case vmIntrinsics::_putBooleanRelease: return inline_unsafe_access( is_store, T_BOOLEAN, Release, false);
|
||||||
case vmIntrinsics::_putIntUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, Relaxed, true);
|
case vmIntrinsics::_putByteRelease: return inline_unsafe_access( is_store, T_BYTE, Release, false);
|
||||||
case vmIntrinsics::_putLongUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, Relaxed, true);
|
case vmIntrinsics::_putShortRelease: return inline_unsafe_access( is_store, T_SHORT, Release, false);
|
||||||
|
case vmIntrinsics::_putCharRelease: return inline_unsafe_access( is_store, T_CHAR, Release, false);
|
||||||
|
case vmIntrinsics::_putIntRelease: return inline_unsafe_access( is_store, T_INT, Release, false);
|
||||||
|
case vmIntrinsics::_putLongRelease: return inline_unsafe_access( is_store, T_LONG, Release, false);
|
||||||
|
case vmIntrinsics::_putFloatRelease: return inline_unsafe_access( is_store, T_FLOAT, Release, false);
|
||||||
|
case vmIntrinsics::_putDoubleRelease: return inline_unsafe_access( is_store, T_DOUBLE, Release, false);
|
||||||
|
|
||||||
case vmIntrinsics::_getObjectAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, Acquire, false);
|
case vmIntrinsics::_getObjectOpaque: return inline_unsafe_access(!is_store, T_OBJECT, Opaque, false);
|
||||||
case vmIntrinsics::_getBooleanAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, Acquire, false);
|
case vmIntrinsics::_getBooleanOpaque: return inline_unsafe_access(!is_store, T_BOOLEAN, Opaque, false);
|
||||||
case vmIntrinsics::_getByteAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, Acquire, false);
|
case vmIntrinsics::_getByteOpaque: return inline_unsafe_access(!is_store, T_BYTE, Opaque, false);
|
||||||
case vmIntrinsics::_getShortAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, Acquire, false);
|
case vmIntrinsics::_getShortOpaque: return inline_unsafe_access(!is_store, T_SHORT, Opaque, false);
|
||||||
case vmIntrinsics::_getCharAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, Acquire, false);
|
case vmIntrinsics::_getCharOpaque: return inline_unsafe_access(!is_store, T_CHAR, Opaque, false);
|
||||||
case vmIntrinsics::_getIntAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, Acquire, false);
|
case vmIntrinsics::_getIntOpaque: return inline_unsafe_access(!is_store, T_INT, Opaque, false);
|
||||||
case vmIntrinsics::_getLongAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, Acquire, false);
|
case vmIntrinsics::_getLongOpaque: return inline_unsafe_access(!is_store, T_LONG, Opaque, false);
|
||||||
case vmIntrinsics::_getFloatAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, Acquire, false);
|
case vmIntrinsics::_getFloatOpaque: return inline_unsafe_access(!is_store, T_FLOAT, Opaque, false);
|
||||||
case vmIntrinsics::_getDoubleAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, Acquire, false);
|
case vmIntrinsics::_getDoubleOpaque: return inline_unsafe_access(!is_store, T_DOUBLE, Opaque, false);
|
||||||
|
|
||||||
case vmIntrinsics::_putObjectRelease: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, Release, false);
|
case vmIntrinsics::_putObjectOpaque: return inline_unsafe_access( is_store, T_OBJECT, Opaque, false);
|
||||||
case vmIntrinsics::_putBooleanRelease: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, Release, false);
|
case vmIntrinsics::_putBooleanOpaque: return inline_unsafe_access( is_store, T_BOOLEAN, Opaque, false);
|
||||||
case vmIntrinsics::_putByteRelease: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, Release, false);
|
case vmIntrinsics::_putByteOpaque: return inline_unsafe_access( is_store, T_BYTE, Opaque, false);
|
||||||
case vmIntrinsics::_putShortRelease: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, Release, false);
|
case vmIntrinsics::_putShortOpaque: return inline_unsafe_access( is_store, T_SHORT, Opaque, false);
|
||||||
case vmIntrinsics::_putCharRelease: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, Release, false);
|
case vmIntrinsics::_putCharOpaque: return inline_unsafe_access( is_store, T_CHAR, Opaque, false);
|
||||||
case vmIntrinsics::_putIntRelease: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, Release, false);
|
case vmIntrinsics::_putIntOpaque: return inline_unsafe_access( is_store, T_INT, Opaque, false);
|
||||||
case vmIntrinsics::_putLongRelease: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, Release, false);
|
case vmIntrinsics::_putLongOpaque: return inline_unsafe_access( is_store, T_LONG, Opaque, false);
|
||||||
case vmIntrinsics::_putFloatRelease: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, Release, false);
|
case vmIntrinsics::_putFloatOpaque: return inline_unsafe_access( is_store, T_FLOAT, Opaque, false);
|
||||||
case vmIntrinsics::_putDoubleRelease: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, Release, false);
|
case vmIntrinsics::_putDoubleOpaque: return inline_unsafe_access( is_store, T_DOUBLE, Opaque, false);
|
||||||
|
|
||||||
case vmIntrinsics::_getObjectOpaque: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, Opaque, false);
|
|
||||||
case vmIntrinsics::_getBooleanOpaque: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, Opaque, false);
|
|
||||||
case vmIntrinsics::_getByteOpaque: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, Opaque, false);
|
|
||||||
case vmIntrinsics::_getShortOpaque: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, Opaque, false);
|
|
||||||
case vmIntrinsics::_getCharOpaque: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, Opaque, false);
|
|
||||||
case vmIntrinsics::_getIntOpaque: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, Opaque, false);
|
|
||||||
case vmIntrinsics::_getLongOpaque: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, Opaque, false);
|
|
||||||
case vmIntrinsics::_getFloatOpaque: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, Opaque, false);
|
|
||||||
case vmIntrinsics::_getDoubleOpaque: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, Opaque, false);
|
|
||||||
|
|
||||||
case vmIntrinsics::_putObjectOpaque: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, Opaque, false);
|
|
||||||
case vmIntrinsics::_putBooleanOpaque: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, Opaque, false);
|
|
||||||
case vmIntrinsics::_putByteOpaque: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, Opaque, false);
|
|
||||||
case vmIntrinsics::_putShortOpaque: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, Opaque, false);
|
|
||||||
case vmIntrinsics::_putCharOpaque: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, Opaque, false);
|
|
||||||
case vmIntrinsics::_putIntOpaque: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, Opaque, false);
|
|
||||||
case vmIntrinsics::_putLongOpaque: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, Opaque, false);
|
|
||||||
case vmIntrinsics::_putFloatOpaque: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, Opaque, false);
|
|
||||||
case vmIntrinsics::_putDoubleOpaque: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, Opaque, false);
|
|
||||||
|
|
||||||
case vmIntrinsics::_compareAndSwapObject: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap, Volatile);
|
case vmIntrinsics::_compareAndSwapObject: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap, Volatile);
|
||||||
case vmIntrinsics::_compareAndSwapInt: return inline_unsafe_load_store(T_INT, LS_cmp_swap, Volatile);
|
case vmIntrinsics::_compareAndSwapInt: return inline_unsafe_load_store(T_INT, LS_cmp_swap, Volatile);
|
||||||
@ -2196,8 +2177,6 @@ bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) {
|
|||||||
|
|
||||||
//----------------------------inline_unsafe_access----------------------------
|
//----------------------------inline_unsafe_access----------------------------
|
||||||
|
|
||||||
const static BasicType T_ADDRESS_HOLDER = T_LONG;
|
|
||||||
|
|
||||||
// Helper that guards and inserts a pre-barrier.
|
// Helper that guards and inserts a pre-barrier.
|
||||||
void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
|
void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
|
||||||
Node* pre_val, bool need_mem_bar) {
|
Node* pre_val, bool need_mem_bar) {
|
||||||
@ -2298,13 +2277,12 @@ void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr) {
|
const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
|
||||||
// Attempt to infer a sharper value type from the offset and base type.
|
// Attempt to infer a sharper value type from the offset and base type.
|
||||||
ciKlass* sharpened_klass = NULL;
|
ciKlass* sharpened_klass = NULL;
|
||||||
|
|
||||||
// See if it is an instance field, with an object type.
|
// See if it is an instance field, with an object type.
|
||||||
if (alias_type->field() != NULL) {
|
if (alias_type->field() != NULL) {
|
||||||
assert(!is_native_ptr, "native pointer op cannot use a java address");
|
|
||||||
if (alias_type->field()->type()->is_klass()) {
|
if (alias_type->field()->type()->is_klass()) {
|
||||||
sharpened_klass = alias_type->field()->type()->as_klass();
|
sharpened_klass = alias_type->field()->type()->as_klass();
|
||||||
}
|
}
|
||||||
@ -2337,7 +2315,7 @@ const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool LibraryCallKit::inline_unsafe_access(const bool is_native_ptr, bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
|
bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
|
||||||
if (callee()->is_static()) return false; // caller must have the capability!
|
if (callee()->is_static()) return false; // caller must have the capability!
|
||||||
guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
|
guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
|
||||||
guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
|
guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
|
||||||
@ -2352,31 +2330,17 @@ bool LibraryCallKit::inline_unsafe_access(const bool is_native_ptr, bool is_stor
|
|||||||
if (!is_store) {
|
if (!is_store) {
|
||||||
// Object getObject(Object base, int/long offset), etc.
|
// Object getObject(Object base, int/long offset), etc.
|
||||||
BasicType rtype = sig->return_type()->basic_type();
|
BasicType rtype = sig->return_type()->basic_type();
|
||||||
if (rtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::getAddress_name())
|
|
||||||
rtype = T_ADDRESS; // it is really a C void*
|
|
||||||
assert(rtype == type, "getter must return the expected value");
|
assert(rtype == type, "getter must return the expected value");
|
||||||
if (!is_native_ptr) {
|
assert(sig->count() == 2, "oop getter has 2 arguments");
|
||||||
assert(sig->count() == 2, "oop getter has 2 arguments");
|
assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
|
||||||
assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
|
assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
|
||||||
assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
|
|
||||||
} else {
|
|
||||||
assert(sig->count() == 1, "native getter has 1 argument");
|
|
||||||
assert(sig->type_at(0)->basic_type() == T_LONG, "getter base is long");
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// void putObject(Object base, int/long offset, Object x), etc.
|
// void putObject(Object base, int/long offset, Object x), etc.
|
||||||
assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
|
assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
|
||||||
if (!is_native_ptr) {
|
assert(sig->count() == 3, "oop putter has 3 arguments");
|
||||||
assert(sig->count() == 3, "oop putter has 3 arguments");
|
assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
|
||||||
assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
|
assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
|
||||||
assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
|
|
||||||
} else {
|
|
||||||
assert(sig->count() == 2, "native putter has 2 arguments");
|
|
||||||
assert(sig->type_at(0)->basic_type() == T_LONG, "putter base is long");
|
|
||||||
}
|
|
||||||
BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
|
BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
|
||||||
if (vtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::putAddress_name())
|
|
||||||
vtype = T_ADDRESS; // it is really a C void*
|
|
||||||
assert(vtype == type, "putter must accept the expected value");
|
assert(vtype == type, "putter must accept the expected value");
|
||||||
}
|
}
|
||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
@ -2393,27 +2357,22 @@ bool LibraryCallKit::inline_unsafe_access(const bool is_native_ptr, bool is_stor
|
|||||||
Node* offset = top();
|
Node* offset = top();
|
||||||
Node* val;
|
Node* val;
|
||||||
|
|
||||||
if (!is_native_ptr) {
|
// The base is either a Java object or a value produced by Unsafe.staticFieldBase
|
||||||
// The base is either a Java object or a value produced by Unsafe.staticFieldBase
|
Node* base = argument(1); // type: oop
|
||||||
Node* base = argument(1); // type: oop
|
// The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
|
||||||
// The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
|
offset = argument(2); // type: long
|
||||||
offset = argument(2); // type: long
|
// We currently rely on the cookies produced by Unsafe.xxxFieldOffset
|
||||||
// We currently rely on the cookies produced by Unsafe.xxxFieldOffset
|
// to be plain byte offsets, which are also the same as those accepted
|
||||||
// to be plain byte offsets, which are also the same as those accepted
|
// by oopDesc::field_base.
|
||||||
// by oopDesc::field_base.
|
assert(Unsafe_field_offset_to_byte_offset(11) == 11,
|
||||||
assert(Unsafe_field_offset_to_byte_offset(11) == 11,
|
"fieldOffset must be byte-scaled");
|
||||||
"fieldOffset must be byte-scaled");
|
// 32-bit machines ignore the high half!
|
||||||
// 32-bit machines ignore the high half!
|
offset = ConvL2X(offset);
|
||||||
offset = ConvL2X(offset);
|
adr = make_unsafe_address(base, offset);
|
||||||
adr = make_unsafe_address(base, offset);
|
if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) {
|
||||||
heap_base_oop = base;
|
heap_base_oop = base;
|
||||||
val = is_store ? argument(4) : NULL;
|
|
||||||
} else {
|
|
||||||
Node* ptr = argument(1); // type: long
|
|
||||||
ptr = ConvL2X(ptr); // adjust Java long to machine word
|
|
||||||
adr = make_unsafe_address(NULL, ptr);
|
|
||||||
val = is_store ? argument(3) : NULL;
|
|
||||||
}
|
}
|
||||||
|
val = is_store ? argument(4) : NULL;
|
||||||
|
|
||||||
const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
|
const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
|
||||||
|
|
||||||
@ -2494,11 +2453,11 @@ bool LibraryCallKit::inline_unsafe_access(const bool is_native_ptr, bool is_stor
|
|||||||
// SATB log buffer using the pre-barrier mechanism.
|
// SATB log buffer using the pre-barrier mechanism.
|
||||||
// Also we need to add memory barrier to prevent commoning reads
|
// Also we need to add memory barrier to prevent commoning reads
|
||||||
// from this field across safepoint since GC can change its value.
|
// from this field across safepoint since GC can change its value.
|
||||||
bool need_read_barrier = !is_native_ptr && !is_store &&
|
bool need_read_barrier = !is_store &&
|
||||||
offset != top() && heap_base_oop != top();
|
offset != top() && heap_base_oop != top();
|
||||||
|
|
||||||
if (!is_store && type == T_OBJECT) {
|
if (!is_store && type == T_OBJECT) {
|
||||||
const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type, is_native_ptr);
|
const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
|
||||||
if (tjp != NULL) {
|
if (tjp != NULL) {
|
||||||
value_type = tjp;
|
value_type = tjp;
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2015, 2016 Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -37,44 +37,44 @@
|
|||||||
#include "utilities/globalDefinitions.hpp"
|
#include "utilities/globalDefinitions.hpp"
|
||||||
|
|
||||||
// setup and cleanup actions
|
// setup and cleanup actions
|
||||||
void StackWalkAnchor::setup_magic_on_entry(objArrayHandle frames_array) {
|
void JavaFrameStream::setup_magic_on_entry(objArrayHandle frames_array) {
|
||||||
frames_array->obj_at_put(magic_pos, _thread->threadObj());
|
frames_array->obj_at_put(magic_pos, _thread->threadObj());
|
||||||
_anchor = address_value();
|
_anchor = address_value();
|
||||||
assert(check_magic(frames_array), "invalid magic");
|
assert(check_magic(frames_array), "invalid magic");
|
||||||
}
|
}
|
||||||
|
|
||||||
bool StackWalkAnchor::check_magic(objArrayHandle frames_array) {
|
bool JavaFrameStream::check_magic(objArrayHandle frames_array) {
|
||||||
oop m1 = frames_array->obj_at(magic_pos);
|
oop m1 = frames_array->obj_at(magic_pos);
|
||||||
jlong m2 = _anchor;
|
jlong m2 = _anchor;
|
||||||
if (m1 == _thread->threadObj() && m2 == address_value()) return true;
|
if (m1 == _thread->threadObj() && m2 == address_value()) return true;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool StackWalkAnchor::cleanup_magic_on_exit(objArrayHandle frames_array) {
|
bool JavaFrameStream::cleanup_magic_on_exit(objArrayHandle frames_array) {
|
||||||
bool ok = check_magic(frames_array);
|
bool ok = check_magic(frames_array);
|
||||||
frames_array->obj_at_put(magic_pos, NULL);
|
frames_array->obj_at_put(magic_pos, NULL);
|
||||||
_anchor = 0L;
|
_anchor = 0L;
|
||||||
return ok;
|
return ok;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns StackWalkAnchor for the current stack being traversed.
|
// Returns JavaFrameStream for the current stack being traversed.
|
||||||
//
|
//
|
||||||
// Parameters:
|
// Parameters:
|
||||||
// thread Current Java thread.
|
// thread Current Java thread.
|
||||||
// magic Magic value used for each stack walking
|
// magic Magic value used for each stack walking
|
||||||
// frames_array User-supplied buffers. The 0th element is reserved
|
// frames_array User-supplied buffers. The 0th element is reserved
|
||||||
// to this StackWalkAnchor to use
|
// to this JavaFrameStream to use
|
||||||
//
|
//
|
||||||
StackWalkAnchor* StackWalkAnchor::from_current(JavaThread* thread, jlong magic,
|
JavaFrameStream* JavaFrameStream::from_current(JavaThread* thread, jlong magic,
|
||||||
objArrayHandle frames_array)
|
objArrayHandle frames_array)
|
||||||
{
|
{
|
||||||
assert(thread != NULL && thread->is_Java_thread(), "");
|
assert(thread != NULL && thread->is_Java_thread(), "");
|
||||||
oop m1 = frames_array->obj_at(magic_pos);
|
oop m1 = frames_array->obj_at(magic_pos);
|
||||||
if (m1 != thread->threadObj()) return NULL;
|
if (m1 != thread->threadObj()) return NULL;
|
||||||
if (magic == 0L) return NULL;
|
if (magic == 0L) return NULL;
|
||||||
StackWalkAnchor* anchor = (StackWalkAnchor*) (intptr_t) magic;
|
JavaFrameStream* stream = (JavaFrameStream*) (intptr_t) magic;
|
||||||
if (!anchor->is_valid_in(thread, frames_array)) return NULL;
|
if (!stream->is_valid_in(thread, frames_array)) return NULL;
|
||||||
return anchor;
|
return stream;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unpacks one or more frames into user-supplied buffers.
|
// Unpacks one or more frames into user-supplied buffers.
|
||||||
@ -84,19 +84,19 @@ StackWalkAnchor* StackWalkAnchor::from_current(JavaThread* thread, jlong magic,
|
|||||||
// In other words, do not leave any stale data in the vfst.
|
// In other words, do not leave any stale data in the vfst.
|
||||||
//
|
//
|
||||||
// Parameters:
|
// Parameters:
|
||||||
// mode Restrict which frames to be decoded.
|
// mode Restrict which frames to be decoded.
|
||||||
// vfst vFrameStream.
|
// JavaFrameStream stream of javaVFrames
|
||||||
// max_nframes Maximum number of frames to be filled.
|
// max_nframes Maximum number of frames to be filled.
|
||||||
// start_index Start index to the user-supplied buffers.
|
// start_index Start index to the user-supplied buffers.
|
||||||
// frames_array Buffer to store Class or StackFrame in, starting at start_index.
|
// frames_array Buffer to store Class or StackFrame in, starting at start_index.
|
||||||
// frames array is a Class<?>[] array when only getting caller
|
// frames array is a Class<?>[] array when only getting caller
|
||||||
// reference, and a StackFrameInfo[] array (or derivative)
|
// reference, and a StackFrameInfo[] array (or derivative)
|
||||||
// otherwise. It should never be null.
|
// otherwise. It should never be null.
|
||||||
// end_index End index to the user-supplied buffers with unpacked frames.
|
// end_index End index to the user-supplied buffers with unpacked frames.
|
||||||
//
|
//
|
||||||
// Returns the number of frames whose information was transferred into the buffers.
|
// Returns the number of frames whose information was transferred into the buffers.
|
||||||
//
|
//
|
||||||
int StackWalk::fill_in_frames(jlong mode, vframeStream& vfst,
|
int StackWalk::fill_in_frames(jlong mode, JavaFrameStream& stream,
|
||||||
int max_nframes, int start_index,
|
int max_nframes, int start_index,
|
||||||
objArrayHandle frames_array,
|
objArrayHandle frames_array,
|
||||||
int& end_index, TRAPS) {
|
int& end_index, TRAPS) {
|
||||||
@ -108,9 +108,9 @@ int StackWalk::fill_in_frames(jlong mode, vframeStream& vfst,
|
|||||||
assert(start_index + max_nframes <= frames_array->length(), "oob");
|
assert(start_index + max_nframes <= frames_array->length(), "oob");
|
||||||
|
|
||||||
int frames_decoded = 0;
|
int frames_decoded = 0;
|
||||||
for (; !vfst.at_end(); vfst.next()) {
|
for (; !stream.at_end(); stream.next()) {
|
||||||
Method* method = vfst.method();
|
Method* method = stream.method();
|
||||||
int bci = vfst.bci();
|
int bci = stream.bci();
|
||||||
|
|
||||||
if (method == NULL) continue;
|
if (method == NULL) continue;
|
||||||
if (!ShowHiddenFrames && StackWalk::skip_hidden_frames(mode)) {
|
if (!ShowHiddenFrames && StackWalk::skip_hidden_frames(mode)) {
|
||||||
@ -133,7 +133,7 @@ int StackWalk::fill_in_frames(jlong mode, vframeStream& vfst,
|
|||||||
if (live_frame_info(mode)) {
|
if (live_frame_info(mode)) {
|
||||||
assert (use_frames_array(mode), "Bad mode for get live frame");
|
assert (use_frames_array(mode), "Bad mode for get live frame");
|
||||||
Handle stackFrame(frames_array->obj_at(index));
|
Handle stackFrame(frames_array->obj_at(index));
|
||||||
fill_live_stackframe(stackFrame, method, bci, vfst.java_frame(), CHECK_0);
|
fill_live_stackframe(stackFrame, method, bci, stream.java_frame(), CHECK_0);
|
||||||
} else if (need_method_info(mode)) {
|
} else if (need_method_info(mode)) {
|
||||||
assert (use_frames_array(mode), "Bad mode for get stack frame");
|
assert (use_frames_array(mode), "Bad mode for get stack frame");
|
||||||
Handle stackFrame(frames_array->obj_at(index));
|
Handle stackFrame(frames_array->obj_at(index));
|
||||||
@ -294,6 +294,7 @@ oop StackWalk::walk(Handle stackStream, jlong mode,
|
|||||||
int skip_frames, int frame_count, int start_index,
|
int skip_frames, int frame_count, int start_index,
|
||||||
objArrayHandle frames_array,
|
objArrayHandle frames_array,
|
||||||
TRAPS) {
|
TRAPS) {
|
||||||
|
ResourceMark rm(THREAD);
|
||||||
JavaThread* jt = (JavaThread*)THREAD;
|
JavaThread* jt = (JavaThread*)THREAD;
|
||||||
if (TraceStackWalk) {
|
if (TraceStackWalk) {
|
||||||
tty->print_cr("Start walking: mode " JLONG_FORMAT " skip %d frames batch size %d",
|
tty->print_cr("Start walking: mode " JLONG_FORMAT " skip %d frames batch size %d",
|
||||||
@ -309,41 +310,39 @@ oop StackWalk::walk(Handle stackStream, jlong mode,
|
|||||||
|
|
||||||
methodHandle m_doStackWalk(THREAD, Universe::do_stack_walk_method());
|
methodHandle m_doStackWalk(THREAD, Universe::do_stack_walk_method());
|
||||||
|
|
||||||
// Open up a traversable stream onto my stack.
|
// Setup traversal onto my stack.
|
||||||
// This stream will be made available by *reference* to the inner Java call.
|
RegisterMap regMap(jt, true);
|
||||||
StackWalkAnchor anchor(jt);
|
JavaFrameStream stream(jt, ®Map);
|
||||||
vframeStream& vfst = anchor.vframe_stream();
|
|
||||||
|
|
||||||
{
|
{
|
||||||
while (!vfst.at_end()) {
|
while (!stream.at_end()) {
|
||||||
InstanceKlass* ik = vfst.method()->method_holder();
|
InstanceKlass* ik = stream.method()->method_holder();
|
||||||
if (ik != stackWalker_klass &&
|
if (ik != stackWalker_klass &&
|
||||||
ik != abstractStackWalker_klass && ik->super() != abstractStackWalker_klass) {
|
ik != abstractStackWalker_klass && ik->super() != abstractStackWalker_klass) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (TraceStackWalk) {
|
if (TraceStackWalk) {
|
||||||
tty->print(" skip "); vfst.method()->print_short_name(); tty->print("\n");
|
tty->print(" skip "); stream.method()->print_short_name(); tty->print("\n");
|
||||||
}
|
}
|
||||||
vfst.next();
|
stream.next();
|
||||||
}
|
}
|
||||||
|
|
||||||
// stack frame has been traversed individually and resume stack walk
|
// stack frame has been traversed individually and resume stack walk
|
||||||
// from the stack frame at depth == skip_frames.
|
// from the stack frame at depth == skip_frames.
|
||||||
for (int n=0; n < skip_frames && !vfst.at_end(); vfst.next(), n++) {
|
for (int n=0; n < skip_frames && !stream.at_end(); stream.next(), n++) {
|
||||||
if (TraceStackWalk) {
|
if (TraceStackWalk) {
|
||||||
tty->print(" skip "); vfst.method()->print_short_name();
|
tty->print(" skip "); stream.method()->print_short_name();
|
||||||
tty->print_cr(" frame id: " PTR_FORMAT " pc: " PTR_FORMAT,
|
tty->print_cr(" frame id: " PTR_FORMAT " pc: " PTR_FORMAT,
|
||||||
p2i(vfst.frame_id()), p2i(vfst.frame_pc()));
|
p2i(stream.java_frame()->fr().id()),
|
||||||
|
p2i(stream.java_frame()->fr().pc()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The Method* pointer in the vfst has a very short shelf life. Grab it now.
|
|
||||||
int end_index = start_index;
|
int end_index = start_index;
|
||||||
int numFrames = 0;
|
int numFrames = 0;
|
||||||
if (!vfst.at_end()) {
|
if (!stream.at_end()) {
|
||||||
numFrames = fill_in_frames(mode, vfst, frame_count, start_index,
|
numFrames = fill_in_frames(mode, stream, frame_count, start_index,
|
||||||
frames_array, end_index, CHECK_NULL);
|
frames_array, end_index, CHECK_NULL);
|
||||||
if (numFrames < 1) {
|
if (numFrames < 1) {
|
||||||
THROW_MSG_(vmSymbols::java_lang_InternalError(), "stack walk: decode failed", NULL);
|
THROW_MSG_(vmSymbols::java_lang_InternalError(), "stack walk: decode failed", NULL);
|
||||||
@ -356,19 +355,19 @@ oop StackWalk::walk(Handle stackStream, jlong mode,
|
|||||||
// When JVM_CallStackWalk returns, it invalidates the stack stream.
|
// When JVM_CallStackWalk returns, it invalidates the stack stream.
|
||||||
JavaValue result(T_OBJECT);
|
JavaValue result(T_OBJECT);
|
||||||
JavaCallArguments args(stackStream);
|
JavaCallArguments args(stackStream);
|
||||||
args.push_long(anchor.address_value());
|
args.push_long(stream.address_value());
|
||||||
args.push_int(skip_frames);
|
args.push_int(skip_frames);
|
||||||
args.push_int(frame_count);
|
args.push_int(frame_count);
|
||||||
args.push_int(start_index);
|
args.push_int(start_index);
|
||||||
args.push_int(end_index);
|
args.push_int(end_index);
|
||||||
|
|
||||||
// Link the thread and vframe stream into the callee-visible object
|
// Link the thread and vframe stream into the callee-visible object
|
||||||
anchor.setup_magic_on_entry(frames_array);
|
stream.setup_magic_on_entry(frames_array);
|
||||||
|
|
||||||
JavaCalls::call(&result, m_doStackWalk, &args, THREAD);
|
JavaCalls::call(&result, m_doStackWalk, &args, THREAD);
|
||||||
|
|
||||||
// Do this before anything else happens, to disable any lingering stream objects
|
// Do this before anything else happens, to disable any lingering stream objects
|
||||||
bool ok = anchor.cleanup_magic_on_exit(frames_array);
|
bool ok = stream.cleanup_magic_on_exit(frames_array);
|
||||||
|
|
||||||
// Throw pending exception if we must
|
// Throw pending exception if we must
|
||||||
(void) (CHECK_NULL);
|
(void) (CHECK_NULL);
|
||||||
@ -379,7 +378,6 @@ oop StackWalk::walk(Handle stackStream, jlong mode,
|
|||||||
|
|
||||||
// Return normally
|
// Return normally
|
||||||
return (oop)result.get_jobject();
|
return (oop)result.get_jobject();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Walk the next batch of stack frames
|
// Walk the next batch of stack frames
|
||||||
@ -400,8 +398,8 @@ jint StackWalk::moreFrames(Handle stackStream, jlong mode, jlong magic,
|
|||||||
TRAPS)
|
TRAPS)
|
||||||
{
|
{
|
||||||
JavaThread* jt = (JavaThread*)THREAD;
|
JavaThread* jt = (JavaThread*)THREAD;
|
||||||
StackWalkAnchor* existing_anchor = StackWalkAnchor::from_current(jt, magic, frames_array);
|
JavaFrameStream* existing_stream = JavaFrameStream::from_current(jt, magic, frames_array);
|
||||||
if (existing_anchor == NULL) {
|
if (existing_stream == NULL) {
|
||||||
THROW_MSG_(vmSymbols::java_lang_InternalError(), "doStackWalk: corrupted buffers", 0L);
|
THROW_MSG_(vmSymbols::java_lang_InternalError(), "doStackWalk: corrupted buffers", 0L);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -410,8 +408,8 @@ jint StackWalk::moreFrames(Handle stackStream, jlong mode, jlong magic,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (TraceStackWalk) {
|
if (TraceStackWalk) {
|
||||||
tty->print_cr("StackWalk::moreFrames frame_count %d existing_anchor " PTR_FORMAT " start %d frames %d",
|
tty->print_cr("StackWalk::moreFrames frame_count %d existing_stream " PTR_FORMAT " start %d frames %d",
|
||||||
frame_count, p2i(existing_anchor), start_index, frames_array->length());
|
frame_count, p2i(existing_stream), start_index, frames_array->length());
|
||||||
}
|
}
|
||||||
int end_index = start_index;
|
int end_index = start_index;
|
||||||
if (frame_count <= 0) {
|
if (frame_count <= 0) {
|
||||||
@ -421,12 +419,11 @@ jint StackWalk::moreFrames(Handle stackStream, jlong mode, jlong magic,
|
|||||||
int count = frame_count + start_index;
|
int count = frame_count + start_index;
|
||||||
assert (frames_array->length() >= count, "not enough space in buffers");
|
assert (frames_array->length() >= count, "not enough space in buffers");
|
||||||
|
|
||||||
StackWalkAnchor& anchor = (*existing_anchor);
|
JavaFrameStream& stream = (*existing_stream);
|
||||||
vframeStream& vfst = anchor.vframe_stream();
|
if (!stream.at_end()) {
|
||||||
if (!vfst.at_end()) {
|
stream.next(); // advance past the last frame decoded in previous batch
|
||||||
vfst.next(); // this was the last frame decoded in the previous batch
|
if (!stream.at_end()) {
|
||||||
if (!vfst.at_end()) {
|
int n = fill_in_frames(mode, stream, frame_count, start_index,
|
||||||
int n = fill_in_frames(mode, vfst, frame_count, start_index,
|
|
||||||
frames_array, end_index, CHECK_0);
|
frames_array, end_index, CHECK_0);
|
||||||
if (n < 1) {
|
if (n < 1) {
|
||||||
THROW_MSG_(vmSymbols::java_lang_InternalError(), "doStackWalk: later decode failed", 0L);
|
THROW_MSG_(vmSymbols::java_lang_InternalError(), "doStackWalk: later decode failed", 0L);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2015, 2016 Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -29,21 +29,31 @@
|
|||||||
#include "oops/oop.hpp"
|
#include "oops/oop.hpp"
|
||||||
#include "runtime/vframe.hpp"
|
#include "runtime/vframe.hpp"
|
||||||
|
|
||||||
class StackWalkAnchor : public StackObj {
|
//
|
||||||
|
// JavaFrameStream is used by StackWalker to iterate through Java stack frames
|
||||||
|
// on the given JavaThread.
|
||||||
|
//
|
||||||
|
class JavaFrameStream : public StackObj {
|
||||||
private:
|
private:
|
||||||
enum {
|
enum {
|
||||||
magic_pos = 0
|
magic_pos = 0
|
||||||
};
|
};
|
||||||
|
|
||||||
JavaThread* _thread;
|
JavaThread* _thread;
|
||||||
vframeStream _vfst;
|
javaVFrame* _jvf;
|
||||||
jlong _anchor;
|
jlong _anchor;
|
||||||
public:
|
public:
|
||||||
StackWalkAnchor(JavaThread* thread)
|
JavaFrameStream(JavaThread* thread, RegisterMap* rm)
|
||||||
: _thread(thread), _vfst(thread), _anchor(0L) {}
|
: _thread(thread), _anchor(0L) {
|
||||||
|
_jvf = _thread->last_java_vframe(rm);
|
||||||
|
}
|
||||||
|
|
||||||
vframeStream& vframe_stream() { return _vfst; }
|
javaVFrame* java_frame() { return _jvf; }
|
||||||
JavaThread* thread() { return _thread; }
|
void next() { _jvf = _jvf->java_sender(); }
|
||||||
|
bool at_end() { return _jvf == NULL; }
|
||||||
|
|
||||||
|
Method* method() { return _jvf->method(); }
|
||||||
|
int bci() { return _jvf->bci(); }
|
||||||
|
|
||||||
void setup_magic_on_entry(objArrayHandle frames_array);
|
void setup_magic_on_entry(objArrayHandle frames_array);
|
||||||
bool check_magic(objArrayHandle frames_array);
|
bool check_magic(objArrayHandle frames_array);
|
||||||
@ -57,12 +67,12 @@ public:
|
|||||||
return (jlong) castable_address(this);
|
return (jlong) castable_address(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
static StackWalkAnchor* from_current(JavaThread* thread, jlong anchor, objArrayHandle frames_array);
|
static JavaFrameStream* from_current(JavaThread* thread, jlong magic, objArrayHandle frames_array);
|
||||||
};
|
};
|
||||||
|
|
||||||
class StackWalk : public AllStatic {
|
class StackWalk : public AllStatic {
|
||||||
private:
|
private:
|
||||||
static int fill_in_frames(jlong mode, vframeStream& vfst,
|
static int fill_in_frames(jlong mode, JavaFrameStream& stream,
|
||||||
int max_nframes, int start_index,
|
int max_nframes, int start_index,
|
||||||
objArrayHandle frames_array,
|
objArrayHandle frames_array,
|
||||||
int& end_index, TRAPS);
|
int& end_index, TRAPS);
|
||||||
|
@ -131,38 +131,137 @@ jlong Unsafe_field_offset_from_byte_offset(jlong byte_offset) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
///// Data in the Java heap.
|
///// Data read/writes on the Java heap and in native (off-heap) memory
|
||||||
|
|
||||||
#define truncate_jboolean(x) ((x) & 1)
|
/**
|
||||||
#define truncate_jbyte(x) (x)
|
* Helper class for accessing memory.
|
||||||
#define truncate_jshort(x) (x)
|
*
|
||||||
#define truncate_jchar(x) (x)
|
* Normalizes values and wraps accesses in
|
||||||
#define truncate_jint(x) (x)
|
* JavaThread::doing_unsafe_access() if needed.
|
||||||
#define truncate_jlong(x) (x)
|
*/
|
||||||
#define truncate_jfloat(x) (x)
|
class MemoryAccess : StackObj {
|
||||||
#define truncate_jdouble(x) (x)
|
JavaThread* _thread;
|
||||||
|
jobject _obj;
|
||||||
|
jlong _offset;
|
||||||
|
|
||||||
#define GET_FIELD(obj, offset, type_name, v) \
|
// Resolves and returns the address of the memory access
|
||||||
oop p = JNIHandles::resolve(obj); \
|
void* addr() {
|
||||||
type_name v = *(type_name*)index_oop_from_field_offset_long(p, offset)
|
return index_oop_from_field_offset_long(JNIHandles::resolve(_obj), _offset);
|
||||||
|
}
|
||||||
|
|
||||||
#define SET_FIELD(obj, offset, type_name, x) \
|
template <typename T>
|
||||||
oop p = JNIHandles::resolve(obj); \
|
T normalize(T x) {
|
||||||
*(type_name*)index_oop_from_field_offset_long(p, offset) = truncate_##type_name(x)
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
#define GET_FIELD_VOLATILE(obj, offset, type_name, v) \
|
jboolean normalize(jboolean x) {
|
||||||
oop p = JNIHandles::resolve(obj); \
|
return x & 1;
|
||||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) { \
|
}
|
||||||
OrderAccess::fence(); \
|
|
||||||
} \
|
|
||||||
volatile type_name v = OrderAccess::load_acquire((volatile type_name*)index_oop_from_field_offset_long(p, offset));
|
|
||||||
|
|
||||||
#define SET_FIELD_VOLATILE(obj, offset, type_name, x) \
|
/**
|
||||||
oop p = JNIHandles::resolve(obj); \
|
* Helper class to wrap memory accesses in JavaThread::doing_unsafe_access()
|
||||||
OrderAccess::release_store_fence((volatile type_name*)index_oop_from_field_offset_long(p, offset), truncate_##type_name(x));
|
*/
|
||||||
|
class GuardUnsafeAccess {
|
||||||
|
JavaThread* _thread;
|
||||||
|
bool _active;
|
||||||
|
|
||||||
|
public:
|
||||||
|
GuardUnsafeAccess(JavaThread* thread, jobject _obj) : _thread(thread) {
|
||||||
|
if (JNIHandles::resolve(_obj) == NULL) {
|
||||||
|
// native/off-heap access which may raise SIGBUS if accessing
|
||||||
|
// memory mapped file data in a region of the file which has
|
||||||
|
// been truncated and is now invalid
|
||||||
|
_thread->set_doing_unsafe_access(true);
|
||||||
|
_active = true;
|
||||||
|
} else {
|
||||||
|
_active = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
~GuardUnsafeAccess() {
|
||||||
|
if (_active) {
|
||||||
|
_thread->set_doing_unsafe_access(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
public:
|
||||||
|
MemoryAccess(JavaThread* thread, jobject obj, jlong offset)
|
||||||
|
: _thread(thread), _obj(obj), _offset(offset) {
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
T get() {
|
||||||
|
GuardUnsafeAccess guard(_thread, _obj);
|
||||||
|
|
||||||
|
T* p = (T*)addr();
|
||||||
|
|
||||||
|
T x = *p;
|
||||||
|
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void put(T x) {
|
||||||
|
GuardUnsafeAccess guard(_thread, _obj);
|
||||||
|
|
||||||
|
T* p = (T*)addr();
|
||||||
|
|
||||||
|
*p = normalize(x);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// Get/SetObject must be special-cased, since it works with handles.
|
template <typename T>
|
||||||
|
T get_volatile() {
|
||||||
|
GuardUnsafeAccess guard(_thread, _obj);
|
||||||
|
|
||||||
|
T* p = (T*)addr();
|
||||||
|
|
||||||
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||||
|
OrderAccess::fence();
|
||||||
|
}
|
||||||
|
|
||||||
|
T x = OrderAccess::load_acquire((volatile T*)p);
|
||||||
|
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void put_volatile(T x) {
|
||||||
|
GuardUnsafeAccess guard(_thread, _obj);
|
||||||
|
|
||||||
|
T* p = (T*)addr();
|
||||||
|
|
||||||
|
OrderAccess::release_store_fence((volatile T*)p, normalize(x));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#ifndef SUPPORTS_NATIVE_CX8
|
||||||
|
jlong get_jlong_locked() {
|
||||||
|
GuardUnsafeAccess guard(_thread, _obj);
|
||||||
|
|
||||||
|
MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
|
||||||
|
jlong* p = (jlong*)addr();
|
||||||
|
|
||||||
|
jlong x = Atomic::load(p);
|
||||||
|
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
void put_jlong_locked(jlong x) {
|
||||||
|
GuardUnsafeAccess guard(_thread, _obj);
|
||||||
|
|
||||||
|
MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
|
||||||
|
jlong* p = (jlong*)addr();
|
||||||
|
|
||||||
|
Atomic::store(normalize(x), p);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
// Get/PutObject must be special-cased, since it works with handles.
|
||||||
|
|
||||||
// These functions allow a null base pointer with an arbitrary address.
|
// These functions allow a null base pointer with an arbitrary address.
|
||||||
// But if the base pointer is non-null, the offset should make some sense.
|
// But if the base pointer is non-null, the offset should make some sense.
|
||||||
@ -208,7 +307,7 @@ UNSAFE_ENTRY(jobject, Unsafe_GetObject(JNIEnv *env, jobject unsafe, jobject obj,
|
|||||||
return ret;
|
return ret;
|
||||||
} UNSAFE_END
|
} UNSAFE_END
|
||||||
|
|
||||||
UNSAFE_ENTRY(void, Unsafe_SetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) {
|
UNSAFE_ENTRY(void, Unsafe_PutObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) {
|
||||||
oop x = JNIHandles::resolve(x_h);
|
oop x = JNIHandles::resolve(x_h);
|
||||||
oop p = JNIHandles::resolve(obj);
|
oop p = JNIHandles::resolve(obj);
|
||||||
|
|
||||||
@ -236,7 +335,7 @@ UNSAFE_ENTRY(jobject, Unsafe_GetObjectVolatile(JNIEnv *env, jobject unsafe, jobj
|
|||||||
return JNIHandles::make_local(env, v);
|
return JNIHandles::make_local(env, v);
|
||||||
} UNSAFE_END
|
} UNSAFE_END
|
||||||
|
|
||||||
UNSAFE_ENTRY(void, Unsafe_SetObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) {
|
UNSAFE_ENTRY(void, Unsafe_PutObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) {
|
||||||
oop x = JNIHandles::resolve(x_h);
|
oop x = JNIHandles::resolve(x_h);
|
||||||
oop p = JNIHandles::resolve(obj);
|
oop p = JNIHandles::resolve(obj);
|
||||||
void* addr = index_oop_from_field_offset_long(p, offset);
|
void* addr = index_oop_from_field_offset_long(p, offset);
|
||||||
@ -301,25 +400,17 @@ UNSAFE_ENTRY(jlong, Unsafe_GetKlassPointer(JNIEnv *env, jobject unsafe, jobject
|
|||||||
|
|
||||||
UNSAFE_ENTRY(jlong, Unsafe_GetLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) {
|
UNSAFE_ENTRY(jlong, Unsafe_GetLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) {
|
||||||
if (VM_Version::supports_cx8()) {
|
if (VM_Version::supports_cx8()) {
|
||||||
GET_FIELD_VOLATILE(obj, offset, jlong, v);
|
return MemoryAccess(thread, obj, offset).get_volatile<jlong>();
|
||||||
return v;
|
|
||||||
} else {
|
} else {
|
||||||
Handle p (THREAD, JNIHandles::resolve(obj));
|
return MemoryAccess(thread, obj, offset).get_jlong_locked();
|
||||||
jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
|
|
||||||
MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
|
|
||||||
jlong value = Atomic::load(addr);
|
|
||||||
return value;
|
|
||||||
}
|
}
|
||||||
} UNSAFE_END
|
} UNSAFE_END
|
||||||
|
|
||||||
UNSAFE_ENTRY(void, Unsafe_SetLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong x)) {
|
UNSAFE_ENTRY(void, Unsafe_PutLongVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong x)) {
|
||||||
if (VM_Version::supports_cx8()) {
|
if (VM_Version::supports_cx8()) {
|
||||||
SET_FIELD_VOLATILE(obj, offset, jlong, x);
|
MemoryAccess(thread, obj, offset).put_volatile<jlong>(x);
|
||||||
} else {
|
} else {
|
||||||
Handle p (THREAD, JNIHandles::resolve(obj));
|
MemoryAccess(thread, obj, offset).put_jlong_locked(x);
|
||||||
jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
|
|
||||||
MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
|
|
||||||
Atomic::store(x, addr);
|
|
||||||
}
|
}
|
||||||
} UNSAFE_END
|
} UNSAFE_END
|
||||||
|
|
||||||
@ -337,15 +428,14 @@ UNSAFE_LEAF(jint, Unsafe_unalignedAccess0(JNIEnv *env, jobject unsafe)) {
|
|||||||
return UseUnalignedAccesses;
|
return UseUnalignedAccesses;
|
||||||
} UNSAFE_END
|
} UNSAFE_END
|
||||||
|
|
||||||
#define DEFINE_GETSETOOP(java_type, Type) \
|
#define DEFINE_GETSETOOP(java_type, Type) \
|
||||||
\
|
\
|
||||||
UNSAFE_ENTRY(java_type, Unsafe_Get##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
|
UNSAFE_ENTRY(java_type, Unsafe_Get##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
|
||||||
GET_FIELD(obj, offset, java_type, v); \
|
return MemoryAccess(thread, obj, offset).get<java_type>(); \
|
||||||
return v; \
|
|
||||||
} UNSAFE_END \
|
} UNSAFE_END \
|
||||||
\
|
\
|
||||||
UNSAFE_ENTRY(void, Unsafe_Set##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
|
UNSAFE_ENTRY(void, Unsafe_Put##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
|
||||||
SET_FIELD(obj, offset, java_type, x); \
|
MemoryAccess(thread, obj, offset).put<java_type>(x); \
|
||||||
} UNSAFE_END \
|
} UNSAFE_END \
|
||||||
\
|
\
|
||||||
// END DEFINE_GETSETOOP.
|
// END DEFINE_GETSETOOP.
|
||||||
@ -364,12 +454,11 @@ DEFINE_GETSETOOP(jdouble, Double);
|
|||||||
#define DEFINE_GETSETOOP_VOLATILE(java_type, Type) \
|
#define DEFINE_GETSETOOP_VOLATILE(java_type, Type) \
|
||||||
\
|
\
|
||||||
UNSAFE_ENTRY(java_type, Unsafe_Get##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
|
UNSAFE_ENTRY(java_type, Unsafe_Get##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
|
||||||
GET_FIELD_VOLATILE(obj, offset, java_type, v); \
|
return MemoryAccess(thread, obj, offset).get_volatile<java_type>(); \
|
||||||
return v; \
|
|
||||||
} UNSAFE_END \
|
} UNSAFE_END \
|
||||||
\
|
\
|
||||||
UNSAFE_ENTRY(void, Unsafe_Set##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
|
UNSAFE_ENTRY(void, Unsafe_Put##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
|
||||||
SET_FIELD_VOLATILE(obj, offset, java_type, x); \
|
MemoryAccess(thread, obj, offset).put_volatile<java_type>(x); \
|
||||||
} UNSAFE_END \
|
} UNSAFE_END \
|
||||||
\
|
\
|
||||||
// END DEFINE_GETSETOOP_VOLATILE.
|
// END DEFINE_GETSETOOP_VOLATILE.
|
||||||
@ -400,98 +489,6 @@ UNSAFE_LEAF(void, Unsafe_FullFence(JNIEnv *env, jobject unsafe)) {
|
|||||||
OrderAccess::fence();
|
OrderAccess::fence();
|
||||||
} UNSAFE_END
|
} UNSAFE_END
|
||||||
|
|
||||||
////// Data in the C heap.
|
|
||||||
|
|
||||||
// Note: These do not throw NullPointerException for bad pointers.
|
|
||||||
// They just crash. Only a oop base pointer can generate a NullPointerException.
|
|
||||||
//
|
|
||||||
#define DEFINE_GETSETNATIVE(java_type, Type, native_type) \
|
|
||||||
\
|
|
||||||
UNSAFE_ENTRY(java_type, Unsafe_GetNative##Type(JNIEnv *env, jobject unsafe, jlong addr)) { \
|
|
||||||
void* p = addr_from_java(addr); \
|
|
||||||
JavaThread* t = JavaThread::current(); \
|
|
||||||
t->set_doing_unsafe_access(true); \
|
|
||||||
java_type x = *(volatile native_type*)p; \
|
|
||||||
t->set_doing_unsafe_access(false); \
|
|
||||||
return x; \
|
|
||||||
} UNSAFE_END \
|
|
||||||
\
|
|
||||||
UNSAFE_ENTRY(void, Unsafe_SetNative##Type(JNIEnv *env, jobject unsafe, jlong addr, java_type x)) { \
|
|
||||||
JavaThread* t = JavaThread::current(); \
|
|
||||||
t->set_doing_unsafe_access(true); \
|
|
||||||
void* p = addr_from_java(addr); \
|
|
||||||
*(volatile native_type*)p = x; \
|
|
||||||
t->set_doing_unsafe_access(false); \
|
|
||||||
} UNSAFE_END \
|
|
||||||
\
|
|
||||||
// END DEFINE_GETSETNATIVE.
|
|
||||||
|
|
||||||
DEFINE_GETSETNATIVE(jbyte, Byte, signed char)
|
|
||||||
DEFINE_GETSETNATIVE(jshort, Short, signed short);
|
|
||||||
DEFINE_GETSETNATIVE(jchar, Char, unsigned short);
|
|
||||||
DEFINE_GETSETNATIVE(jint, Int, jint);
|
|
||||||
// no long -- handled specially
|
|
||||||
DEFINE_GETSETNATIVE(jfloat, Float, float);
|
|
||||||
DEFINE_GETSETNATIVE(jdouble, Double, double);
|
|
||||||
|
|
||||||
#undef DEFINE_GETSETNATIVE
|
|
||||||
|
|
||||||
UNSAFE_ENTRY(jlong, Unsafe_GetNativeLong(JNIEnv *env, jobject unsafe, jlong addr)) {
|
|
||||||
JavaThread* t = JavaThread::current();
|
|
||||||
// We do it this way to avoid problems with access to heap using 64
|
|
||||||
// bit loads, as jlong in heap could be not 64-bit aligned, and on
|
|
||||||
// some CPUs (SPARC) it leads to SIGBUS.
|
|
||||||
t->set_doing_unsafe_access(true);
|
|
||||||
void* p = addr_from_java(addr);
|
|
||||||
jlong x;
|
|
||||||
|
|
||||||
if (is_ptr_aligned(p, sizeof(jlong)) == 0) {
|
|
||||||
// jlong is aligned, do a volatile access
|
|
||||||
x = *(volatile jlong*)p;
|
|
||||||
} else {
|
|
||||||
jlong_accessor acc;
|
|
||||||
acc.words[0] = ((volatile jint*)p)[0];
|
|
||||||
acc.words[1] = ((volatile jint*)p)[1];
|
|
||||||
x = acc.long_value;
|
|
||||||
}
|
|
||||||
|
|
||||||
t->set_doing_unsafe_access(false);
|
|
||||||
|
|
||||||
return x;
|
|
||||||
} UNSAFE_END
|
|
||||||
|
|
||||||
UNSAFE_ENTRY(void, Unsafe_SetNativeLong(JNIEnv *env, jobject unsafe, jlong addr, jlong x)) {
|
|
||||||
JavaThread* t = JavaThread::current();
|
|
||||||
// see comment for Unsafe_GetNativeLong
|
|
||||||
t->set_doing_unsafe_access(true);
|
|
||||||
void* p = addr_from_java(addr);
|
|
||||||
|
|
||||||
if (is_ptr_aligned(p, sizeof(jlong))) {
|
|
||||||
// jlong is aligned, do a volatile access
|
|
||||||
*(volatile jlong*)p = x;
|
|
||||||
} else {
|
|
||||||
jlong_accessor acc;
|
|
||||||
acc.long_value = x;
|
|
||||||
((volatile jint*)p)[0] = acc.words[0];
|
|
||||||
((volatile jint*)p)[1] = acc.words[1];
|
|
||||||
}
|
|
||||||
|
|
||||||
t->set_doing_unsafe_access(false);
|
|
||||||
} UNSAFE_END
|
|
||||||
|
|
||||||
|
|
||||||
UNSAFE_LEAF(jlong, Unsafe_GetNativeAddress(JNIEnv *env, jobject unsafe, jlong addr)) {
|
|
||||||
void* p = addr_from_java(addr);
|
|
||||||
|
|
||||||
return addr_to_java(*(void**)p);
|
|
||||||
} UNSAFE_END
|
|
||||||
|
|
||||||
UNSAFE_LEAF(void, Unsafe_SetNativeAddress(JNIEnv *env, jobject unsafe, jlong addr, jlong x)) {
|
|
||||||
void* p = addr_from_java(addr);
|
|
||||||
*(void**)p = addr_from_java(x);
|
|
||||||
} UNSAFE_END
|
|
||||||
|
|
||||||
|
|
||||||
////// Allocation requests
|
////// Allocation requests
|
||||||
|
|
||||||
UNSAFE_ENTRY(jobject, Unsafe_AllocateInstance(JNIEnv *env, jobject unsafe, jclass cls)) {
|
UNSAFE_ENTRY(jobject, Unsafe_AllocateInstance(JNIEnv *env, jobject unsafe, jclass cls)) {
|
||||||
@ -980,8 +977,8 @@ UNSAFE_ENTRY(jint, Unsafe_CompareAndExchangeInt(JNIEnv *env, jobject unsafe, job
|
|||||||
} UNSAFE_END
|
} UNSAFE_END
|
||||||
|
|
||||||
UNSAFE_ENTRY(jlong, Unsafe_CompareAndExchangeLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
|
UNSAFE_ENTRY(jlong, Unsafe_CompareAndExchangeLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
|
||||||
Handle p (THREAD, JNIHandles::resolve(obj));
|
Handle p(THREAD, JNIHandles::resolve(obj));
|
||||||
jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
|
jlong* addr = (jlong*)index_oop_from_field_offset_long(p(), offset);
|
||||||
|
|
||||||
#ifdef SUPPORTS_NATIVE_CX8
|
#ifdef SUPPORTS_NATIVE_CX8
|
||||||
return (jlong)(Atomic::cmpxchg(x, addr, e));
|
return (jlong)(Atomic::cmpxchg(x, addr, e));
|
||||||
@ -1017,7 +1014,7 @@ UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapObject(JNIEnv *env, jobject unsafe,
|
|||||||
|
|
||||||
UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
|
UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
|
||||||
oop p = JNIHandles::resolve(obj);
|
oop p = JNIHandles::resolve(obj);
|
||||||
jint* addr = (jint *) index_oop_from_field_offset_long(p, offset);
|
jint* addr = (jint *)index_oop_from_field_offset_long(p, offset);
|
||||||
|
|
||||||
return (jint)(Atomic::cmpxchg(x, addr, e)) == e;
|
return (jint)(Atomic::cmpxchg(x, addr, e)) == e;
|
||||||
} UNSAFE_END
|
} UNSAFE_END
|
||||||
@ -1143,20 +1140,16 @@ UNSAFE_ENTRY(jint, Unsafe_GetLoadAverage0(JNIEnv *env, jobject unsafe, jdoubleAr
|
|||||||
|
|
||||||
#define DECLARE_GETPUTOOP(Type, Desc) \
|
#define DECLARE_GETPUTOOP(Type, Desc) \
|
||||||
{CC "get" #Type, CC "(" OBJ "J)" #Desc, FN_PTR(Unsafe_Get##Type)}, \
|
{CC "get" #Type, CC "(" OBJ "J)" #Desc, FN_PTR(Unsafe_Get##Type)}, \
|
||||||
{CC "put" #Type, CC "(" OBJ "J" #Desc ")V", FN_PTR(Unsafe_Set##Type)}, \
|
{CC "put" #Type, CC "(" OBJ "J" #Desc ")V", FN_PTR(Unsafe_Put##Type)}, \
|
||||||
{CC "get" #Type "Volatile", CC "(" OBJ "J)" #Desc, FN_PTR(Unsafe_Get##Type##Volatile)}, \
|
{CC "get" #Type "Volatile", CC "(" OBJ "J)" #Desc, FN_PTR(Unsafe_Get##Type##Volatile)}, \
|
||||||
{CC "put" #Type "Volatile", CC "(" OBJ "J" #Desc ")V", FN_PTR(Unsafe_Set##Type##Volatile)}
|
{CC "put" #Type "Volatile", CC "(" OBJ "J" #Desc ")V", FN_PTR(Unsafe_Put##Type##Volatile)}
|
||||||
|
|
||||||
|
|
||||||
#define DECLARE_GETPUTNATIVE(Byte, B) \
|
|
||||||
{CC "get" #Byte, CC "(" ADR ")" #B, FN_PTR(Unsafe_GetNative##Byte)}, \
|
|
||||||
{CC "put" #Byte, CC "(" ADR#B ")V", FN_PTR(Unsafe_SetNative##Byte)}
|
|
||||||
|
|
||||||
static JNINativeMethod jdk_internal_misc_Unsafe_methods[] = {
|
static JNINativeMethod jdk_internal_misc_Unsafe_methods[] = {
|
||||||
{CC "getObject", CC "(" OBJ "J)" OBJ "", FN_PTR(Unsafe_GetObject)},
|
{CC "getObject", CC "(" OBJ "J)" OBJ "", FN_PTR(Unsafe_GetObject)},
|
||||||
{CC "putObject", CC "(" OBJ "J" OBJ ")V", FN_PTR(Unsafe_SetObject)},
|
{CC "putObject", CC "(" OBJ "J" OBJ ")V", FN_PTR(Unsafe_PutObject)},
|
||||||
{CC "getObjectVolatile",CC "(" OBJ "J)" OBJ "", FN_PTR(Unsafe_GetObjectVolatile)},
|
{CC "getObjectVolatile",CC "(" OBJ "J)" OBJ "", FN_PTR(Unsafe_GetObjectVolatile)},
|
||||||
{CC "putObjectVolatile",CC "(" OBJ "J" OBJ ")V", FN_PTR(Unsafe_SetObjectVolatile)},
|
{CC "putObjectVolatile",CC "(" OBJ "J" OBJ ")V", FN_PTR(Unsafe_PutObjectVolatile)},
|
||||||
|
|
||||||
{CC "getUncompressedObject", CC "(" ADR ")" OBJ, FN_PTR(Unsafe_GetUncompressedObject)},
|
{CC "getUncompressedObject", CC "(" ADR ")" OBJ, FN_PTR(Unsafe_GetUncompressedObject)},
|
||||||
{CC "getJavaMirror", CC "(" ADR ")" CLS, FN_PTR(Unsafe_GetJavaMirror)},
|
{CC "getJavaMirror", CC "(" ADR ")" CLS, FN_PTR(Unsafe_GetJavaMirror)},
|
||||||
@ -1171,17 +1164,6 @@ static JNINativeMethod jdk_internal_misc_Unsafe_methods[] = {
|
|||||||
DECLARE_GETPUTOOP(Float, F),
|
DECLARE_GETPUTOOP(Float, F),
|
||||||
DECLARE_GETPUTOOP(Double, D),
|
DECLARE_GETPUTOOP(Double, D),
|
||||||
|
|
||||||
DECLARE_GETPUTNATIVE(Byte, B),
|
|
||||||
DECLARE_GETPUTNATIVE(Short, S),
|
|
||||||
DECLARE_GETPUTNATIVE(Char, C),
|
|
||||||
DECLARE_GETPUTNATIVE(Int, I),
|
|
||||||
DECLARE_GETPUTNATIVE(Long, J),
|
|
||||||
DECLARE_GETPUTNATIVE(Float, F),
|
|
||||||
DECLARE_GETPUTNATIVE(Double, D),
|
|
||||||
|
|
||||||
{CC "getAddress", CC "(" ADR ")" ADR, FN_PTR(Unsafe_GetNativeAddress)},
|
|
||||||
{CC "putAddress", CC "(" ADR "" ADR ")V", FN_PTR(Unsafe_SetNativeAddress)},
|
|
||||||
|
|
||||||
{CC "allocateMemory0", CC "(J)" ADR, FN_PTR(Unsafe_AllocateMemory0)},
|
{CC "allocateMemory0", CC "(J)" ADR, FN_PTR(Unsafe_AllocateMemory0)},
|
||||||
{CC "reallocateMemory0", CC "(" ADR "J)" ADR, FN_PTR(Unsafe_ReallocateMemory0)},
|
{CC "reallocateMemory0", CC "(" ADR "J)" ADR, FN_PTR(Unsafe_ReallocateMemory0)},
|
||||||
{CC "freeMemory0", CC "(" ADR ")V", FN_PTR(Unsafe_FreeMemory0)},
|
{CC "freeMemory0", CC "(" ADR ")V", FN_PTR(Unsafe_FreeMemory0)},
|
||||||
@ -1239,7 +1221,6 @@ static JNINativeMethod jdk_internal_misc_Unsafe_methods[] = {
|
|||||||
#undef DAC_Args
|
#undef DAC_Args
|
||||||
|
|
||||||
#undef DECLARE_GETPUTOOP
|
#undef DECLARE_GETPUTOOP
|
||||||
#undef DECLARE_GETPUTNATIVE
|
|
||||||
|
|
||||||
|
|
||||||
// This function is exported, used by NativeLookup.
|
// This function is exported, used by NativeLookup.
|
||||||
|
@ -1762,6 +1762,21 @@ methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
|
|||||||
return callee_method;
|
return callee_method;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
|
||||||
|
// The faulting unsafe accesses should be changed to throw the error
|
||||||
|
// synchronously instead. Meanwhile the faulting instruction will be
|
||||||
|
// skipped over (effectively turning it into a no-op) and an
|
||||||
|
// asynchronous exception will be raised which the thread will
|
||||||
|
// handle at a later point. If the instruction is a load it will
|
||||||
|
// return garbage.
|
||||||
|
|
||||||
|
// Request an async exception.
|
||||||
|
thread->set_pending_unsafe_access_error();
|
||||||
|
|
||||||
|
// Return address of next instruction to execute.
|
||||||
|
return next_pc;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
void SharedRuntime::check_member_name_argument_is_last_argument(const methodHandle& method,
|
void SharedRuntime::check_member_name_argument_is_last_argument(const methodHandle& method,
|
||||||
const BasicType* sig_bt,
|
const BasicType* sig_bt,
|
||||||
|
@ -522,6 +522,8 @@ class SharedRuntime: AllStatic {
|
|||||||
static address handle_wrong_method_abstract(JavaThread* thread);
|
static address handle_wrong_method_abstract(JavaThread* thread);
|
||||||
static address handle_wrong_method_ic_miss(JavaThread* thread);
|
static address handle_wrong_method_ic_miss(JavaThread* thread);
|
||||||
|
|
||||||
|
static address handle_unsafe_access(JavaThread* thread, address next_pc);
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
|
|
||||||
// Collect and print inline cache miss statistics
|
// Collect and print inline cache miss statistics
|
||||||
|
@ -55,7 +55,6 @@ address StubRoutines::_throw_IncompatibleClassChangeError_entry = NULL;
|
|||||||
address StubRoutines::_throw_NullPointerException_at_call_entry = NULL;
|
address StubRoutines::_throw_NullPointerException_at_call_entry = NULL;
|
||||||
address StubRoutines::_throw_StackOverflowError_entry = NULL;
|
address StubRoutines::_throw_StackOverflowError_entry = NULL;
|
||||||
address StubRoutines::_throw_delayed_StackOverflowError_entry = NULL;
|
address StubRoutines::_throw_delayed_StackOverflowError_entry = NULL;
|
||||||
address StubRoutines::_handler_for_unsafe_access_entry = NULL;
|
|
||||||
jint StubRoutines::_verify_oop_count = 0;
|
jint StubRoutines::_verify_oop_count = 0;
|
||||||
address StubRoutines::_verify_oop_subroutine_entry = NULL;
|
address StubRoutines::_verify_oop_subroutine_entry = NULL;
|
||||||
address StubRoutines::_atomic_xchg_entry = NULL;
|
address StubRoutines::_atomic_xchg_entry = NULL;
|
||||||
|
@ -111,7 +111,6 @@ class StubRoutines: AllStatic {
|
|||||||
static address _throw_NullPointerException_at_call_entry;
|
static address _throw_NullPointerException_at_call_entry;
|
||||||
static address _throw_StackOverflowError_entry;
|
static address _throw_StackOverflowError_entry;
|
||||||
static address _throw_delayed_StackOverflowError_entry;
|
static address _throw_delayed_StackOverflowError_entry;
|
||||||
static address _handler_for_unsafe_access_entry;
|
|
||||||
|
|
||||||
static address _atomic_xchg_entry;
|
static address _atomic_xchg_entry;
|
||||||
static address _atomic_xchg_ptr_entry;
|
static address _atomic_xchg_ptr_entry;
|
||||||
@ -288,10 +287,6 @@ class StubRoutines: AllStatic {
|
|||||||
static address throw_StackOverflowError_entry() { return _throw_StackOverflowError_entry; }
|
static address throw_StackOverflowError_entry() { return _throw_StackOverflowError_entry; }
|
||||||
static address throw_delayed_StackOverflowError_entry() { return _throw_delayed_StackOverflowError_entry; }
|
static address throw_delayed_StackOverflowError_entry() { return _throw_delayed_StackOverflowError_entry; }
|
||||||
|
|
||||||
// Exceptions during unsafe access - should throw Java exception rather
|
|
||||||
// than crash.
|
|
||||||
static address handler_for_unsafe_access() { return _handler_for_unsafe_access_entry; }
|
|
||||||
|
|
||||||
static address atomic_xchg_entry() { return _atomic_xchg_entry; }
|
static address atomic_xchg_entry() { return _atomic_xchg_entry; }
|
||||||
static address atomic_xchg_ptr_entry() { return _atomic_xchg_ptr_entry; }
|
static address atomic_xchg_ptr_entry() { return _atomic_xchg_ptr_entry; }
|
||||||
static address atomic_store_entry() { return _atomic_store_entry; }
|
static address atomic_store_entry() { return _atomic_store_entry; }
|
||||||
|
@ -317,14 +317,6 @@ class vframeStreamCommon : StackObj {
|
|||||||
intptr_t* frame_id() const { return _frame.id(); }
|
intptr_t* frame_id() const { return _frame.id(); }
|
||||||
address frame_pc() const { return _frame.pc(); }
|
address frame_pc() const { return _frame.pc(); }
|
||||||
|
|
||||||
javaVFrame* java_frame() {
|
|
||||||
vframe* vf = vframe::new_vframe(&_frame, &_reg_map, _thread);
|
|
||||||
if (vf->is_java_frame()) {
|
|
||||||
return (javaVFrame*)vf;
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
CodeBlob* cb() const { return _frame.cb(); }
|
CodeBlob* cb() const { return _frame.cb(); }
|
||||||
CompiledMethod* nm() const {
|
CompiledMethod* nm() const {
|
||||||
assert( cb() != NULL && cb()->is_compiled(), "usage");
|
assert( cb() != NULL && cb()->is_compiled(), "usage");
|
||||||
|
@ -290,8 +290,8 @@ const char* VMError::_current_step_info;
|
|||||||
|
|
||||||
void VMError::report(outputStream* st, bool _verbose) {
|
void VMError::report(outputStream* st, bool _verbose) {
|
||||||
|
|
||||||
# define BEGIN if (_current_step == 0) { _current_step = 1;
|
# define BEGIN if (_current_step == 0) { _current_step = __LINE__;
|
||||||
# define STEP(n, s) } if (_current_step < n) { _current_step = n; _current_step_info = s;
|
# define STEP(s) } if (_current_step < __LINE__) { _current_step = __LINE__; _current_step_info = s;
|
||||||
# define END }
|
# define END }
|
||||||
|
|
||||||
// don't allocate large buffer on stack
|
// don't allocate large buffer on stack
|
||||||
@ -299,7 +299,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
|
|
||||||
BEGIN
|
BEGIN
|
||||||
|
|
||||||
STEP(10, "(printing fatal error message)")
|
STEP("printing fatal error message")
|
||||||
|
|
||||||
st->print_cr("#");
|
st->print_cr("#");
|
||||||
if (should_report_bug(_id)) {
|
if (should_report_bug(_id)) {
|
||||||
@ -314,21 +314,21 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
|
|
||||||
// test secondary error handling. Test it twice, to test that resetting
|
// test secondary error handling. Test it twice, to test that resetting
|
||||||
// error handler after a secondary crash works.
|
// error handler after a secondary crash works.
|
||||||
STEP(20, "(test secondary crash 1)")
|
STEP("test secondary crash 1")
|
||||||
if (_verbose && TestCrashInErrorHandler != 0) {
|
if (_verbose && TestCrashInErrorHandler != 0) {
|
||||||
st->print_cr("Will crash now (TestCrashInErrorHandler=" UINTX_FORMAT ")...",
|
st->print_cr("Will crash now (TestCrashInErrorHandler=" UINTX_FORMAT ")...",
|
||||||
TestCrashInErrorHandler);
|
TestCrashInErrorHandler);
|
||||||
controlled_crash(TestCrashInErrorHandler);
|
controlled_crash(TestCrashInErrorHandler);
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(30, "(test secondary crash 2)")
|
STEP("test secondary crash 2")
|
||||||
if (_verbose && TestCrashInErrorHandler != 0) {
|
if (_verbose && TestCrashInErrorHandler != 0) {
|
||||||
st->print_cr("Will crash now (TestCrashInErrorHandler=" UINTX_FORMAT ")...",
|
st->print_cr("Will crash now (TestCrashInErrorHandler=" UINTX_FORMAT ")...",
|
||||||
TestCrashInErrorHandler);
|
TestCrashInErrorHandler);
|
||||||
controlled_crash(TestCrashInErrorHandler);
|
controlled_crash(TestCrashInErrorHandler);
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(40, "(test safefetch in error handler)")
|
STEP("test safefetch in error handler")
|
||||||
// test whether it is safe to use SafeFetch32 in Crash Handler. Test twice
|
// test whether it is safe to use SafeFetch32 in Crash Handler. Test twice
|
||||||
// to test that resetting the signal handler works correctly.
|
// to test that resetting the signal handler works correctly.
|
||||||
if (_verbose && TestSafeFetchInErrorHandler) {
|
if (_verbose && TestSafeFetchInErrorHandler) {
|
||||||
@ -349,7 +349,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
}
|
}
|
||||||
#endif // PRODUCT
|
#endif // PRODUCT
|
||||||
|
|
||||||
STEP(50, "(printing type of error)")
|
STEP("printing type of error")
|
||||||
|
|
||||||
switch(_id) {
|
switch(_id) {
|
||||||
case OOM_MALLOC_ERROR:
|
case OOM_MALLOC_ERROR:
|
||||||
@ -384,7 +384,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(60, "(printing exception/signal name)")
|
STEP("printing exception/signal name")
|
||||||
|
|
||||||
st->print_cr("#");
|
st->print_cr("#");
|
||||||
st->print("# ");
|
st->print("# ");
|
||||||
@ -414,14 +414,14 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(70, "(printing current thread and pid)")
|
STEP("printing current thread and pid")
|
||||||
|
|
||||||
// process id, thread id
|
// process id, thread id
|
||||||
st->print(", pid=%d", os::current_process_id());
|
st->print(", pid=%d", os::current_process_id());
|
||||||
st->print(", tid=" UINTX_FORMAT, os::current_thread_id());
|
st->print(", tid=" UINTX_FORMAT, os::current_thread_id());
|
||||||
st->cr();
|
st->cr();
|
||||||
|
|
||||||
STEP(80, "(printing error message)")
|
STEP("printing error message")
|
||||||
|
|
||||||
if (should_report_bug(_id)) { // already printed the message.
|
if (should_report_bug(_id)) { // already printed the message.
|
||||||
// error message
|
// error message
|
||||||
@ -432,11 +432,11 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(90, "(printing Java version string)")
|
STEP("printing Java version string")
|
||||||
|
|
||||||
report_vm_version(st, buf, sizeof(buf));
|
report_vm_version(st, buf, sizeof(buf));
|
||||||
|
|
||||||
STEP(100, "(printing problematic frame)")
|
STEP("printing problematic frame")
|
||||||
|
|
||||||
// Print current frame if we have a context (i.e. it's a crash)
|
// Print current frame if we have a context (i.e. it's a crash)
|
||||||
if (_context) {
|
if (_context) {
|
||||||
@ -448,7 +448,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->print_cr("#");
|
st->print_cr("#");
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(110, "(printing core file information)")
|
STEP("printing core file information")
|
||||||
st->print("# ");
|
st->print("# ");
|
||||||
if (CreateCoredumpOnCrash) {
|
if (CreateCoredumpOnCrash) {
|
||||||
if (coredump_status) {
|
if (coredump_status) {
|
||||||
@ -462,13 +462,13 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
st->print_cr("#");
|
st->print_cr("#");
|
||||||
|
|
||||||
STEP(120, "(printing bug submit message)")
|
STEP("printing bug submit message")
|
||||||
|
|
||||||
if (should_report_bug(_id) && _verbose) {
|
if (should_report_bug(_id) && _verbose) {
|
||||||
print_bug_submit_message(st, _thread);
|
print_bug_submit_message(st, _thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(130, "(printing summary)" )
|
STEP("printing summary")
|
||||||
|
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
st->cr();
|
st->cr();
|
||||||
@ -476,7 +476,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(140, "(printing VM option summary)" )
|
STEP("printing VM option summary")
|
||||||
|
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
// VM options
|
// VM options
|
||||||
@ -484,20 +484,20 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(150, "(printing summary machine and OS info)")
|
STEP("printing summary machine and OS info")
|
||||||
|
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
os::print_summary_info(st, buf, sizeof(buf));
|
os::print_summary_info(st, buf, sizeof(buf));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
STEP(160, "(printing date and time)" )
|
STEP("printing date and time")
|
||||||
|
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
os::print_date_and_time(st, buf, sizeof(buf));
|
os::print_date_and_time(st, buf, sizeof(buf));
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(170, "(printing thread)" )
|
STEP("printing thread")
|
||||||
|
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
st->cr();
|
st->cr();
|
||||||
@ -505,7 +505,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(180, "(printing current thread)" )
|
STEP("printing current thread")
|
||||||
|
|
||||||
// current thread
|
// current thread
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
@ -519,7 +519,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(190, "(printing current compile task)" )
|
STEP("printing current compile task")
|
||||||
|
|
||||||
if (_verbose && _thread && _thread->is_Compiler_thread()) {
|
if (_verbose && _thread && _thread->is_Compiler_thread()) {
|
||||||
CompilerThread* t = (CompilerThread*)_thread;
|
CompilerThread* t = (CompilerThread*)_thread;
|
||||||
@ -532,7 +532,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
STEP(200, "(printing stack bounds)" )
|
STEP("printing stack bounds")
|
||||||
|
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
st->print("Stack: ");
|
st->print("Stack: ");
|
||||||
@ -563,7 +563,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(210, "(printing native stack)" )
|
STEP("printing native stack")
|
||||||
|
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
if (os::platform_print_native_stack(st, _context, buf, sizeof(buf))) {
|
if (os::platform_print_native_stack(st, _context, buf, sizeof(buf))) {
|
||||||
@ -577,13 +577,13 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(220, "(printing Java stack)" )
|
STEP("printing Java stack")
|
||||||
|
|
||||||
if (_verbose && _thread && _thread->is_Java_thread()) {
|
if (_verbose && _thread && _thread->is_Java_thread()) {
|
||||||
print_stack_trace(st, (JavaThread*)_thread, buf, sizeof(buf));
|
print_stack_trace(st, (JavaThread*)_thread, buf, sizeof(buf));
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(230, "(printing target Java thread stack)" )
|
STEP("printing target Java thread stack")
|
||||||
|
|
||||||
// printing Java thread stack trace if it is involved in GC crash
|
// printing Java thread stack trace if it is involved in GC crash
|
||||||
if (_verbose && _thread && (_thread->is_Named_thread())) {
|
if (_verbose && _thread && (_thread->is_Named_thread())) {
|
||||||
@ -594,7 +594,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(240, "(printing siginfo)" )
|
STEP("printing siginfo")
|
||||||
|
|
||||||
// signal no, signal code, address that caused the fault
|
// signal no, signal code, address that caused the fault
|
||||||
if (_verbose && _siginfo) {
|
if (_verbose && _siginfo) {
|
||||||
@ -603,7 +603,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(245, "(CDS archive access warning)" )
|
STEP("CDS archive access warning")
|
||||||
|
|
||||||
// Print an explicit hint if we crashed on access to the CDS archive.
|
// Print an explicit hint if we crashed on access to the CDS archive.
|
||||||
if (_verbose && _siginfo) {
|
if (_verbose && _siginfo) {
|
||||||
@ -611,7 +611,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(250, "(printing register info)")
|
STEP("printing register info")
|
||||||
|
|
||||||
// decode register contents if possible
|
// decode register contents if possible
|
||||||
if (_verbose && _context && Universe::is_fully_initialized()) {
|
if (_verbose && _context && Universe::is_fully_initialized()) {
|
||||||
@ -619,7 +619,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(260, "(printing registers, top of stack, instructions near pc)")
|
STEP("printing registers, top of stack, instructions near pc")
|
||||||
|
|
||||||
// registers, top of stack, instructions near pc
|
// registers, top of stack, instructions near pc
|
||||||
if (_verbose && _context) {
|
if (_verbose && _context) {
|
||||||
@ -627,7 +627,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(265, "(printing code blob if possible)")
|
STEP("printing code blob if possible")
|
||||||
|
|
||||||
if (_verbose && _context) {
|
if (_verbose && _context) {
|
||||||
CodeBlob* cb = CodeCache::find_blob(_pc);
|
CodeBlob* cb = CodeCache::find_blob(_pc);
|
||||||
@ -652,7 +652,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(270, "(printing VM operation)" )
|
STEP("printing VM operation")
|
||||||
|
|
||||||
if (_verbose && _thread && _thread->is_VM_thread()) {
|
if (_verbose && _thread && _thread->is_VM_thread()) {
|
||||||
VMThread* t = (VMThread*)_thread;
|
VMThread* t = (VMThread*)_thread;
|
||||||
@ -664,7 +664,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(280, "(printing process)" )
|
STEP("printing process")
|
||||||
|
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
st->cr();
|
st->cr();
|
||||||
@ -672,7 +672,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(290, "(printing all threads)" )
|
STEP("printing all threads")
|
||||||
|
|
||||||
// all threads
|
// all threads
|
||||||
if (_verbose && _thread) {
|
if (_verbose && _thread) {
|
||||||
@ -680,7 +680,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(300, "(printing VM state)" )
|
STEP("printing VM state")
|
||||||
|
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
// Safepoint state
|
// Safepoint state
|
||||||
@ -702,7 +702,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(310, "(printing owned locks on error)" )
|
STEP("printing owned locks on error")
|
||||||
|
|
||||||
// mutexes/monitors that currently have an owner
|
// mutexes/monitors that currently have an owner
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
@ -710,7 +710,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(320, "(printing number of OutOfMemoryError and StackOverflow exceptions)")
|
STEP("printing number of OutOfMemoryError and StackOverflow exceptions")
|
||||||
|
|
||||||
if (_verbose && Exceptions::has_exception_counts()) {
|
if (_verbose && Exceptions::has_exception_counts()) {
|
||||||
st->print_cr("OutOfMemory and StackOverflow Exception counts:");
|
st->print_cr("OutOfMemory and StackOverflow Exception counts:");
|
||||||
@ -718,7 +718,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(330, "(printing compressed oops mode")
|
STEP("printing compressed oops mode")
|
||||||
|
|
||||||
if (_verbose && UseCompressedOops) {
|
if (_verbose && UseCompressedOops) {
|
||||||
Universe::print_compressed_oops_mode(st);
|
Universe::print_compressed_oops_mode(st);
|
||||||
@ -728,7 +728,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(340, "(printing heap information)" )
|
STEP("printing heap information")
|
||||||
|
|
||||||
if (_verbose && Universe::is_fully_initialized()) {
|
if (_verbose && Universe::is_fully_initialized()) {
|
||||||
Universe::heap()->print_on_error(st);
|
Universe::heap()->print_on_error(st);
|
||||||
@ -737,7 +737,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(350, "(printing code cache information)" )
|
STEP("printing code cache information")
|
||||||
|
|
||||||
if (_verbose && Universe::is_fully_initialized()) {
|
if (_verbose && Universe::is_fully_initialized()) {
|
||||||
// print code cache information before vm abort
|
// print code cache information before vm abort
|
||||||
@ -745,14 +745,14 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(360, "(printing ring buffers)" )
|
STEP("printing ring buffers")
|
||||||
|
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
Events::print_all(st);
|
Events::print_all(st);
|
||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(370, "(printing dynamic libraries)" )
|
STEP("printing dynamic libraries")
|
||||||
|
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
// dynamic libraries, or memory map
|
// dynamic libraries, or memory map
|
||||||
@ -760,7 +760,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(380, "(printing VM options)" )
|
STEP("printing VM options")
|
||||||
|
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
// VM options
|
// VM options
|
||||||
@ -768,40 +768,40 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(390, "(printing warning if internal testing API used)" )
|
STEP("printing warning if internal testing API used")
|
||||||
|
|
||||||
if (WhiteBox::used()) {
|
if (WhiteBox::used()) {
|
||||||
st->print_cr("Unsupported internal testing APIs have been used.");
|
st->print_cr("Unsupported internal testing APIs have been used.");
|
||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(395, "(printing log configuration)")
|
STEP("printing log configuration")
|
||||||
if (_verbose){
|
if (_verbose){
|
||||||
st->print_cr("Logging:");
|
st->print_cr("Logging:");
|
||||||
LogConfiguration::describe_current_configuration(st);
|
LogConfiguration::describe_current_configuration(st);
|
||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(400, "(printing all environment variables)" )
|
STEP("printing all environment variables")
|
||||||
|
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
os::print_environment_variables(st, env_list);
|
os::print_environment_variables(st, env_list);
|
||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(410, "(printing signal handlers)" )
|
STEP("printing signal handlers")
|
||||||
|
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
os::print_signal_handlers(st, buf, sizeof(buf));
|
os::print_signal_handlers(st, buf, sizeof(buf));
|
||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(420, "(Native Memory Tracking)" )
|
STEP("Native Memory Tracking")
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
MemTracker::error_report(st);
|
MemTracker::error_report(st);
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(430, "(printing system)" )
|
STEP("printing system")
|
||||||
|
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
st->cr();
|
st->cr();
|
||||||
@ -809,27 +809,27 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(440, "(printing OS information)" )
|
STEP("printing OS information")
|
||||||
|
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
os::print_os_info(st);
|
os::print_os_info(st);
|
||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(450, "(printing CPU info)" )
|
STEP("printing CPU info")
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
os::print_cpu_info(st, buf, sizeof(buf));
|
os::print_cpu_info(st, buf, sizeof(buf));
|
||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(460, "(printing memory info)" )
|
STEP("printing memory info")
|
||||||
|
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
os::print_memory_info(st);
|
os::print_memory_info(st);
|
||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
STEP(470, "(printing internal vm info)" )
|
STEP("printing internal vm info")
|
||||||
|
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
st->print_cr("vm_info: %s", Abstract_VM_Version::internal_vm_info_string());
|
st->print_cr("vm_info: %s", Abstract_VM_Version::internal_vm_info_string());
|
||||||
@ -837,7 +837,7 @@ void VMError::report(outputStream* st, bool _verbose) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// print a defined marker to show that error handling finished correctly.
|
// print a defined marker to show that error handling finished correctly.
|
||||||
STEP(480, "(printing end marker)" )
|
STEP("printing end marker")
|
||||||
|
|
||||||
if (_verbose) {
|
if (_verbose) {
|
||||||
st->print_cr("END.");
|
st->print_cr("END.");
|
||||||
@ -858,35 +858,35 @@ void VMError::print_vm_info(outputStream* st) {
|
|||||||
char buf[O_BUFLEN];
|
char buf[O_BUFLEN];
|
||||||
report_vm_version(st, buf, sizeof(buf));
|
report_vm_version(st, buf, sizeof(buf));
|
||||||
|
|
||||||
// STEP("(printing summary)")
|
// STEP("printing summary")
|
||||||
|
|
||||||
st->cr();
|
st->cr();
|
||||||
st->print_cr("--------------- S U M M A R Y ------------");
|
st->print_cr("--------------- S U M M A R Y ------------");
|
||||||
st->cr();
|
st->cr();
|
||||||
|
|
||||||
// STEP("(printing VM option summary)")
|
// STEP("printing VM option summary")
|
||||||
|
|
||||||
// VM options
|
// VM options
|
||||||
Arguments::print_summary_on(st);
|
Arguments::print_summary_on(st);
|
||||||
st->cr();
|
st->cr();
|
||||||
|
|
||||||
// STEP("(printing summary machine and OS info)")
|
// STEP("printing summary machine and OS info")
|
||||||
|
|
||||||
os::print_summary_info(st, buf, sizeof(buf));
|
os::print_summary_info(st, buf, sizeof(buf));
|
||||||
|
|
||||||
// STEP("(printing date and time)")
|
// STEP("printing date and time")
|
||||||
|
|
||||||
os::print_date_and_time(st, buf, sizeof(buf));
|
os::print_date_and_time(st, buf, sizeof(buf));
|
||||||
|
|
||||||
// Skip: STEP("(printing thread)")
|
// Skip: STEP("printing thread")
|
||||||
|
|
||||||
// STEP("(printing process)")
|
// STEP("printing process")
|
||||||
|
|
||||||
st->cr();
|
st->cr();
|
||||||
st->print_cr("--------------- P R O C E S S ---------------");
|
st->print_cr("--------------- P R O C E S S ---------------");
|
||||||
st->cr();
|
st->cr();
|
||||||
|
|
||||||
// STEP("(printing number of OutOfMemoryError and StackOverflow exceptions)")
|
// STEP("printing number of OutOfMemoryError and StackOverflow exceptions")
|
||||||
|
|
||||||
if (Exceptions::has_exception_counts()) {
|
if (Exceptions::has_exception_counts()) {
|
||||||
st->print_cr("OutOfMemory and StackOverflow Exception counts:");
|
st->print_cr("OutOfMemory and StackOverflow Exception counts:");
|
||||||
@ -894,7 +894,7 @@ void VMError::print_vm_info(outputStream* st) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
// STEP("(printing compressed oops mode")
|
// STEP("printing compressed oops mode")
|
||||||
|
|
||||||
if (UseCompressedOops) {
|
if (UseCompressedOops) {
|
||||||
Universe::print_compressed_oops_mode(st);
|
Universe::print_compressed_oops_mode(st);
|
||||||
@ -904,7 +904,7 @@ void VMError::print_vm_info(outputStream* st) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
// STEP("(printing heap information)")
|
// STEP("printing heap information")
|
||||||
|
|
||||||
if (Universe::is_fully_initialized()) {
|
if (Universe::is_fully_initialized()) {
|
||||||
Universe::heap()->print_on_error(st);
|
Universe::heap()->print_on_error(st);
|
||||||
@ -913,7 +913,7 @@ void VMError::print_vm_info(outputStream* st) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
// STEP("(printing code cache information)")
|
// STEP("printing code cache information")
|
||||||
|
|
||||||
if (Universe::is_fully_initialized()) {
|
if (Universe::is_fully_initialized()) {
|
||||||
// print code cache information before vm abort
|
// print code cache information before vm abort
|
||||||
@ -921,77 +921,77 @@ void VMError::print_vm_info(outputStream* st) {
|
|||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
// STEP("(printing ring buffers)")
|
// STEP("printing ring buffers")
|
||||||
|
|
||||||
Events::print_all(st);
|
Events::print_all(st);
|
||||||
st->cr();
|
st->cr();
|
||||||
|
|
||||||
// STEP("(printing dynamic libraries)")
|
// STEP("printing dynamic libraries")
|
||||||
|
|
||||||
// dynamic libraries, or memory map
|
// dynamic libraries, or memory map
|
||||||
os::print_dll_info(st);
|
os::print_dll_info(st);
|
||||||
st->cr();
|
st->cr();
|
||||||
|
|
||||||
// STEP("(printing VM options)")
|
// STEP("printing VM options")
|
||||||
|
|
||||||
// VM options
|
// VM options
|
||||||
Arguments::print_on(st);
|
Arguments::print_on(st);
|
||||||
st->cr();
|
st->cr();
|
||||||
|
|
||||||
// STEP("(printing warning if internal testing API used)")
|
// STEP("printing warning if internal testing API used")
|
||||||
|
|
||||||
if (WhiteBox::used()) {
|
if (WhiteBox::used()) {
|
||||||
st->print_cr("Unsupported internal testing APIs have been used.");
|
st->print_cr("Unsupported internal testing APIs have been used.");
|
||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
// STEP("(printing log configuration)")
|
// STEP("printing log configuration")
|
||||||
st->print_cr("Logging:");
|
st->print_cr("Logging:");
|
||||||
LogConfiguration::describe(st);
|
LogConfiguration::describe(st);
|
||||||
st->cr();
|
st->cr();
|
||||||
|
|
||||||
// STEP("(printing all environment variables)")
|
// STEP("printing all environment variables")
|
||||||
|
|
||||||
os::print_environment_variables(st, env_list);
|
os::print_environment_variables(st, env_list);
|
||||||
st->cr();
|
st->cr();
|
||||||
|
|
||||||
// STEP("(printing signal handlers)")
|
// STEP("printing signal handlers")
|
||||||
|
|
||||||
os::print_signal_handlers(st, buf, sizeof(buf));
|
os::print_signal_handlers(st, buf, sizeof(buf));
|
||||||
st->cr();
|
st->cr();
|
||||||
|
|
||||||
// STEP("(Native Memory Tracking)")
|
// STEP("Native Memory Tracking")
|
||||||
|
|
||||||
MemTracker::error_report(st);
|
MemTracker::error_report(st);
|
||||||
|
|
||||||
// STEP("(printing system)")
|
// STEP("printing system")
|
||||||
|
|
||||||
st->cr();
|
st->cr();
|
||||||
st->print_cr("--------------- S Y S T E M ---------------");
|
st->print_cr("--------------- S Y S T E M ---------------");
|
||||||
st->cr();
|
st->cr();
|
||||||
|
|
||||||
// STEP("(printing OS information)")
|
// STEP("printing OS information")
|
||||||
|
|
||||||
os::print_os_info(st);
|
os::print_os_info(st);
|
||||||
st->cr();
|
st->cr();
|
||||||
|
|
||||||
// STEP("(printing CPU info)")
|
// STEP("printing CPU info")
|
||||||
|
|
||||||
os::print_cpu_info(st, buf, sizeof(buf));
|
os::print_cpu_info(st, buf, sizeof(buf));
|
||||||
st->cr();
|
st->cr();
|
||||||
|
|
||||||
// STEP("(printing memory info)")
|
// STEP("printing memory info")
|
||||||
|
|
||||||
os::print_memory_info(st);
|
os::print_memory_info(st);
|
||||||
st->cr();
|
st->cr();
|
||||||
|
|
||||||
// STEP("(printing internal vm info)")
|
// STEP("printing internal vm info")
|
||||||
|
|
||||||
st->print_cr("vm_info: %s", Abstract_VM_Version::internal_vm_info_string());
|
st->print_cr("vm_info: %s", Abstract_VM_Version::internal_vm_info_string());
|
||||||
st->cr();
|
st->cr();
|
||||||
|
|
||||||
// print a defined marker to show that error handling finished correctly.
|
// print a defined marker to show that error handling finished correctly.
|
||||||
// STEP("(printing end marker)")
|
// STEP("printing end marker")
|
||||||
|
|
||||||
st->print_cr("END.");
|
st->print_cr("END.");
|
||||||
}
|
}
|
||||||
@ -1190,7 +1190,7 @@ void VMError::report_and_die(int id, const char* message, const char* detail_fmt
|
|||||||
}
|
}
|
||||||
|
|
||||||
jio_snprintf(buffer, sizeof(buffer),
|
jio_snprintf(buffer, sizeof(buffer),
|
||||||
"[error occurred during error reporting %s, id 0x%x]",
|
"[error occurred during error reporting (%s), id 0x%x]",
|
||||||
_current_step_info, _id);
|
_current_step_info, _id);
|
||||||
if (log.is_open()) {
|
if (log.is_open()) {
|
||||||
log.cr();
|
log.cr();
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
/*
|
/*
|
||||||
* @test
|
* @test
|
||||||
* @bug 8136421
|
* @bug 8136421
|
||||||
|
* @ignore 8155216
|
||||||
* @requires (os.simpleArch == "x64" | os.simpleArch == "sparcv9" | os.simpleArch == "aarch64")
|
* @requires (os.simpleArch == "x64" | os.simpleArch == "sparcv9" | os.simpleArch == "aarch64")
|
||||||
* @library / /testlibrary /test/lib/
|
* @library / /testlibrary /test/lib/
|
||||||
* @library ../common/patches
|
* @library ../common/patches
|
||||||
|
Loading…
x
Reference in New Issue
Block a user