8353174: Clean up thread register handling after 32-bit x86 removal

Reviewed-by: cslucas, kvn, vlivanov
This commit is contained in:
Aleksey Shipilev 2025-04-09 07:28:47 +00:00
parent 0f70aae1cc
commit 6df34c361e
15 changed files with 135 additions and 217 deletions

View File

@ -326,7 +326,7 @@ void LIR_Assembler::clinit_barrier(ciMethod* method) {
Register klass = rscratch1;
__ mov_metadata(klass, method->holder()->constant_encoding());
__ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
__ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
__ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
@ -482,7 +482,7 @@ void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
code_stub->set_safepoint_offset(__ offset());
__ relocate(relocInfo::poll_return_type);
__ safepoint_poll(*code_stub->entry(), r15_thread, true /* at_return */, true /* in_nmethod */);
__ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
__ ret(0);
}

View File

@ -62,8 +62,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
}
if (LockingMode == LM_LIGHTWEIGHT) {
const Register thread = r15_thread;
lightweight_lock(disp_hdr, obj, hdr, thread, tmp, slow_case);
lightweight_lock(disp_hdr, obj, hdr, tmp, slow_case);
} else if (LockingMode == LM_LEGACY) {
Label done;
// Load object header
@ -128,7 +127,7 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
verify_oop(obj);
if (LockingMode == LM_LIGHTWEIGHT) {
lightweight_unlock(obj, disp_hdr, r15_thread, hdr, slow_case);
lightweight_unlock(obj, disp_hdr, hdr, slow_case);
} else if (LockingMode == LM_LEGACY) {
// test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object - if the object header is not pointing to

View File

@ -51,7 +51,7 @@
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
// setup registers
const Register thread = r15_thread; // is callee-saved register (Visual C++ calling conventions)
const Register thread = r15_thread;
assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
assert(oop_result1 != thread && metadata_result != thread, "registers must be different");
assert(args_size >= 0, "illegal args_size");
@ -66,11 +66,11 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
int call_offset = -1;
if (!align_stack) {
set_last_Java_frame(thread, noreg, rbp, nullptr, rscratch1);
set_last_Java_frame(noreg, rbp, nullptr, rscratch1);
} else {
address the_pc = pc();
call_offset = offset();
set_last_Java_frame(thread, noreg, rbp, the_pc, rscratch1);
set_last_Java_frame(noreg, rbp, the_pc, rscratch1);
andptr(rsp, -(StackAlignmentInBytes)); // Align stack
}
@ -84,7 +84,7 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
guarantee(thread != rax, "change this code");
push(rax);
{ Label L;
get_thread(rax);
get_thread_slow(rax);
cmpptr(thread, rax);
jcc(Assembler::equal, L);
int3();
@ -93,7 +93,7 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
}
pop(rax);
#endif
reset_last_Java_frame(thread, true);
reset_last_Java_frame(true);
// check for pending exceptions
{ Label L;
@ -120,10 +120,10 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
}
// get oop results if there are any and reset the values in the thread
if (oop_result1->is_valid()) {
get_vm_result(oop_result1, thread);
get_vm_result(oop_result1);
}
if (metadata_result->is_valid()) {
get_vm_result_2(metadata_result, thread);
get_vm_result_2(metadata_result);
}
assert(call_offset >= 0, "Should be set");
@ -715,8 +715,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
const Register thread = r15_thread;
// No need to worry about dummy
__ mov(c_rarg0, thread);
__ set_last_Java_frame(thread, noreg, rbp, nullptr, rscratch1);
__ set_last_Java_frame(noreg, rbp, nullptr, rscratch1);
// do the call
__ call(RuntimeAddress(target));
OopMapSet* oop_maps = new OopMapSet();
@ -726,7 +725,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
guarantee(thread != rax, "change this code");
__ push(rax);
{ Label L;
__ get_thread(rax);
__ get_thread_slow(rax);
__ cmpptr(thread, rax);
__ jcc(Assembler::equal, L);
__ stop("StubAssembler::call_RT: rdi/r15 not callee saved?");
@ -734,7 +733,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
}
__ pop(rax);
#endif
__ reset_last_Java_frame(thread, true);
__ reset_last_Java_frame(true);
// check for pending exceptions
{ Label L;

View File

@ -291,7 +291,7 @@ void DowncallLinker::StubGenerator::generate() {
Assembler::StoreLoad | Assembler::StoreStore));
}
__ safepoint_poll(L_safepoint_poll_slow_path, r15_thread, true /* at_return */, false /* in_nmethod */);
__ safepoint_poll(L_safepoint_poll_slow_path, true /* at_return */, false /* in_nmethod */);
__ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
__ jcc(Assembler::notEqual, L_safepoint_poll_slow_path);
@ -305,7 +305,7 @@ void DowncallLinker::StubGenerator::generate() {
__ jcc(Assembler::equal, L_reguard);
__ bind(L_after_reguard);
__ reset_last_Java_frame(r15_thread, true);
__ reset_last_Java_frame(true);
__ block_comment("} thread native2java");
}

View File

@ -296,7 +296,6 @@ void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
}
void InterpreterMacroAssembler::call_VM_base(Register oop_result,
Register java_thread,
Register last_java_sp,
address entry_point,
int number_of_arguments,
@ -319,7 +318,7 @@ void InterpreterMacroAssembler::call_VM_base(Register oop_result,
}
#endif /* ASSERT */
// super call
MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp,
MacroAssembler::call_VM_base(oop_result, last_java_sp,
entry_point, number_of_arguments,
check_exceptions);
// interpreter specific
@ -379,7 +378,7 @@ void InterpreterMacroAssembler::restore_after_resume(bool is_native) {
}
}
void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
void InterpreterMacroAssembler::check_and_handle_popframe() {
if (JvmtiExport::can_pop_frame()) {
Label L;
// Initiate popframe handling only if it is not already being
@ -389,7 +388,7 @@ void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread)
// This method is only called just after the call into the vm in
// call_VM_base, so the arg registers are available.
Register pop_cond = c_rarg0;
movl(pop_cond, Address(java_thread, JavaThread::popframe_condition_offset()));
movl(pop_cond, Address(r15_thread, JavaThread::popframe_condition_offset()));
testl(pop_cond, JavaThread::popframe_pending_bit);
jcc(Assembler::zero, L);
testl(pop_cond, JavaThread::popframe_processing_bit);
@ -430,7 +429,7 @@ void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
}
void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) {
void InterpreterMacroAssembler::check_and_handle_earlyret() {
if (JvmtiExport::can_force_early_return()) {
Label L;
Register tmp = c_rarg0;
@ -810,13 +809,13 @@ void InterpreterMacroAssembler::remove_activation(
// the stack, will call InterpreterRuntime::at_unwind.
Label slow_path;
Label fast_path;
safepoint_poll(slow_path, rthread, true /* at_return */, false /* in_nmethod */);
safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
jmp(fast_path);
bind(slow_path);
push(state);
set_last_Java_frame(rthread, noreg, rbp, (address)pc(), rscratch1);
set_last_Java_frame(noreg, rbp, (address)pc(), rscratch1);
super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread);
reset_last_Java_frame(rthread, true);
reset_last_Java_frame(true);
pop(state);
bind(fast_path);
@ -1031,8 +1030,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
}
if (LockingMode == LM_LIGHTWEIGHT) {
const Register thread = r15_thread;
lightweight_lock(lock_reg, obj_reg, swap_reg, thread, tmp_reg, slow_case);
lightweight_lock(lock_reg, obj_reg, swap_reg, tmp_reg, slow_case);
} else if (LockingMode == LM_LEGACY) {
// Load immediate 1 into swap_reg %rax
movl(swap_reg, 1);
@ -1141,7 +1139,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
movptr(Address(lock_reg, BasicObjectLock::obj_offset()), NULL_WORD);
if (LockingMode == LM_LIGHTWEIGHT) {
lightweight_unlock(obj_reg, swap_reg, r15_thread, header_reg, slow_case);
lightweight_unlock(obj_reg, swap_reg, header_reg, slow_case);
} else if (LockingMode == LM_LEGACY) {
// Load the old header from BasicLock structure
movptr(header_reg, Address(swap_reg,

View File

@ -42,7 +42,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
protected:
virtual void call_VM_base(Register oop_result,
Register java_thread,
Register last_java_sp,
address entry_point,
int number_of_arguments,
@ -58,8 +57,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
void jump_to_entry(address entry);
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
virtual void check_and_handle_popframe();
virtual void check_and_handle_earlyret();
void load_earlyret_value(TosState state);

View File

@ -724,17 +724,6 @@ void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) {
}
}
void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
reset_last_Java_frame(r15_thread, clear_fp);
}
void MacroAssembler::set_last_Java_frame(Register last_java_sp,
Register last_java_fp,
address last_java_pc,
Register rscratch) {
set_last_Java_frame(r15_thread, last_java_sp, last_java_fp, last_java_pc, rscratch);
}
static void pass_arg0(MacroAssembler* masm, Register arg) {
if (c_rarg0 != arg ) {
masm->mov(c_rarg0, arg);
@ -1487,8 +1476,7 @@ void MacroAssembler::call_VM(Register oop_result,
address entry_point,
int number_of_arguments,
bool check_exceptions) {
Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions);
}
void MacroAssembler::call_VM(Register oop_result,
@ -1533,8 +1521,7 @@ void MacroAssembler::super_call_VM(Register oop_result,
address entry_point,
int number_of_arguments,
bool check_exceptions) {
Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
MacroAssembler::call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions);
}
void MacroAssembler::super_call_VM(Register oop_result,
@ -1575,31 +1562,22 @@ void MacroAssembler::super_call_VM(Register oop_result,
}
void MacroAssembler::call_VM_base(Register oop_result,
Register java_thread,
Register last_java_sp,
address entry_point,
int number_of_arguments,
bool check_exceptions) {
// determine java_thread register
if (!java_thread->is_valid()) {
#ifdef _LP64
java_thread = r15_thread;
#else
java_thread = rdi;
get_thread(java_thread);
#endif // LP64
}
Register java_thread = r15_thread;
// determine last_java_sp register
if (!last_java_sp->is_valid()) {
last_java_sp = rsp;
}
// debugging support
assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
#ifdef ASSERT
// TraceBytecodes does not use r12 but saves it over the call, so don't verify
// r12 is the heapbase.
LP64_ONLY(if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
#endif // ASSERT
assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
@ -1607,53 +1585,42 @@ void MacroAssembler::call_VM_base(Register oop_result,
// push java thread (becomes first argument of C function)
NOT_LP64(push(java_thread); number_of_arguments++);
LP64_ONLY(mov(c_rarg0, r15_thread));
mov(c_rarg0, r15_thread);
// set last Java frame before call
assert(last_java_sp != rbp, "can't use ebp/rbp");
// Only interpreter should have to set fp
set_last_Java_frame(java_thread, last_java_sp, rbp, nullptr, rscratch1);
set_last_Java_frame(last_java_sp, rbp, nullptr, rscratch1);
// do the call, remove parameters
MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
// restore the thread (cannot use the pushed argument since arguments
// may be overwritten by C code generated by an optimizing compiler);
// however can use the register value directly if it is callee saved.
if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) {
// rdi & rsi (also r15) are callee saved -> nothing to do
#ifdef ASSERT
guarantee(java_thread != rax, "change this code");
push(rax);
{ Label L;
get_thread(rax);
cmpptr(java_thread, rax);
jcc(Assembler::equal, L);
STOP("MacroAssembler::call_VM_base: rdi not callee saved?");
bind(L);
}
pop(rax);
#endif
} else {
get_thread(java_thread);
// Check that thread register is not clobbered.
guarantee(java_thread != rax, "change this code");
push(rax);
{ Label L;
get_thread_slow(rax);
cmpptr(java_thread, rax);
jcc(Assembler::equal, L);
STOP("MacroAssembler::call_VM_base: java_thread not callee saved?");
bind(L);
}
pop(rax);
#endif
// reset last Java frame
// Only interpreter should have to clear fp
reset_last_Java_frame(java_thread, true);
reset_last_Java_frame(true);
// C++ interp handles this in the interpreter
check_and_handle_popframe(java_thread);
check_and_handle_earlyret(java_thread);
check_and_handle_popframe();
check_and_handle_earlyret();
if (check_exceptions) {
// check for pending exceptions (java_thread is set upon return)
cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD);
#ifndef _LP64
jump_cc(Assembler::notEqual,
RuntimeAddress(StubRoutines::forward_exception_entry()));
#else
cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
// This used to conditionally jump to forward_exception however it is
// possible if we relocate that the branch will not reach. So we must jump
// around so we can always reach
@ -1662,36 +1629,24 @@ void MacroAssembler::call_VM_base(Register oop_result,
jcc(Assembler::equal, ok);
jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
bind(ok);
#endif // LP64
}
// get oop result if there is one and reset the value in the thread
if (oop_result->is_valid()) {
get_vm_result(oop_result, java_thread);
get_vm_result(oop_result);
}
}
void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
// Calculate the value for last_Java_sp somewhat subtle.
// call_VM does an intermediate call which places a return address on
// the stack just under the stack pointer as the user finished with it.
// This allows use to retrieve last_Java_pc from last_Java_sp[-1].
// Calculate the value for last_Java_sp
// somewhat subtle. call_VM does an intermediate call
// which places a return address on the stack just under the
// stack pointer as the user finished with it. This allows
// use to retrieve last_Java_pc from last_Java_sp[-1].
// On 32bit we then have to push additional args on the stack to accomplish
// the actual requested call. On 64bit call_VM only can use register args
// so the only extra space is the return address that call_VM created.
// This hopefully explains the calculations here.
#ifdef _LP64
// We've pushed one address, correct last_Java_sp
lea(rax, Address(rsp, wordSize));
#else
lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize));
#endif // LP64
call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions);
call_VM_base(oop_result, rax, entry_point, number_of_arguments, check_exceptions);
}
// Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter.
@ -1768,21 +1723,21 @@ void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Reg
MacroAssembler::call_VM_leaf_base(entry_point, 4);
}
void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
void MacroAssembler::get_vm_result(Register oop_result) {
movptr(oop_result, Address(r15_thread, JavaThread::vm_result_offset()));
movptr(Address(r15_thread, JavaThread::vm_result_offset()), NULL_WORD);
verify_oop_msg(oop_result, "broken oop in call_VM_base");
}
void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD);
void MacroAssembler::get_vm_result_2(Register metadata_result) {
movptr(metadata_result, Address(r15_thread, JavaThread::vm_result_2_offset()));
movptr(Address(r15_thread, JavaThread::vm_result_2_offset()), NULL_WORD);
}
void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
void MacroAssembler::check_and_handle_earlyret() {
}
void MacroAssembler::check_and_handle_popframe(Register java_thread) {
void MacroAssembler::check_and_handle_popframe() {
}
void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) {
@ -3170,20 +3125,16 @@ void MacroAssembler::stop_if_in_cont(Register cont, const char* name) {
}
#endif
void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) { // determine java_thread register
if (!java_thread->is_valid()) {
java_thread = rdi;
get_thread(java_thread);
}
void MacroAssembler::reset_last_Java_frame(bool clear_fp) { // determine java_thread register
// we must set sp to zero to clear frame
movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
// must clear fp, so that compiled frames are not confused; it is
// possible that we need it only for debugging
if (clear_fp) {
movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
}
// Always clear the pc because it could have been set by make_walkable()
movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
vzeroupper();
}
@ -3202,15 +3153,15 @@ void MacroAssembler::save_rax(Register tmp) {
else if (tmp != rax) mov(tmp, rax);
}
void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod) {
void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod) {
if (at_return) {
// Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
// we may safely use rsp instead to perform the stack watermark check.
cmpptr(in_nmethod ? rsp : rbp, Address(thread_reg, JavaThread::polling_word_offset()));
cmpptr(in_nmethod ? rsp : rbp, Address(r15_thread, JavaThread::polling_word_offset()));
jcc(Assembler::above, slow_path);
return;
}
testb(Address(thread_reg, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
jcc(Assembler::notZero, slow_path); // handshake bit set implies poll
}
@ -3219,44 +3170,36 @@ void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, bool
// When entering C land, the rbp, & rsp of the last Java frame have to be recorded
// in the (thread-local) JavaThread object. When leaving C land, the last Java fp
// has to be reset to 0. This is required to allow proper stack traversal.
void MacroAssembler::set_last_Java_frame(Register java_thread,
Register last_java_sp,
void MacroAssembler::set_last_Java_frame(Register last_java_sp,
Register last_java_fp,
address last_java_pc,
Register rscratch) {
vzeroupper();
// determine java_thread register
if (!java_thread->is_valid()) {
java_thread = rdi;
get_thread(java_thread);
}
// determine last_java_sp register
if (!last_java_sp->is_valid()) {
last_java_sp = rsp;
}
// last_java_fp is optional
if (last_java_fp->is_valid()) {
movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
}
// last_java_pc is optional
if (last_java_pc != nullptr) {
Address java_pc(java_thread,
Address java_pc(r15_thread,
JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
lea(java_pc, InternalAddress(last_java_pc), rscratch);
}
movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
}
#ifdef _LP64
void MacroAssembler::set_last_Java_frame(Register last_java_sp,
Register last_java_fp,
Label &L,
Register scratch) {
lea(scratch, L);
movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), scratch);
set_last_Java_frame(r15_thread, last_java_sp, last_java_fp, nullptr, scratch);
set_last_Java_frame(last_java_sp, last_java_fp, nullptr, scratch);
}
#endif
void MacroAssembler::shlptr(Register dst, int imm8) {
LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8));
@ -4111,8 +4054,8 @@ void MacroAssembler::clear_jobject_tag(Register possibly_non_local) {
}
void MacroAssembler::resolve_jobject(Register value,
Register thread,
Register tmp) {
Register thread = r15_thread;
assert_different_registers(value, thread, tmp);
Label done, tagged, weak_tagged;
testptr(value, value);
@ -4144,8 +4087,8 @@ void MacroAssembler::resolve_jobject(Register value,
}
void MacroAssembler::resolve_global_jobject(Register value,
Register thread,
Register tmp) {
Register thread = r15_thread;
assert_different_registers(value, thread, tmp);
Label done;
@ -5476,7 +5419,7 @@ void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass,
#endif // LP64
void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) {
void MacroAssembler::clinit_barrier(Register klass, Label* L_fast_path, Label* L_slow_path) {
assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required");
Label L_fallthrough;
@ -5492,7 +5435,7 @@ void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fa
jcc(Assembler::equal, *L_fast_path);
// Fast path check: current thread is initializer thread
cmpptr(thread, Address(klass, InstanceKlass::init_thread_offset()));
cmpptr(r15_thread, Address(klass, InstanceKlass::init_thread_offset()));
if (L_slow_path == &L_fallthrough) {
jcc(Assembler::equal, *L_fast_path);
bind(*L_slow_path);
@ -10746,33 +10689,29 @@ Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond)
}
// This is simply a call to Thread::current()
void MacroAssembler::get_thread(Register thread) {
void MacroAssembler::get_thread_slow(Register thread) {
if (thread != rax) {
push(rax);
}
LP64_ONLY(push(rdi);)
LP64_ONLY(push(rsi);)
push(rdi);
push(rsi);
push(rdx);
push(rcx);
#ifdef _LP64
push(r8);
push(r9);
push(r10);
push(r11);
#endif
MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0);
#ifdef _LP64
pop(r11);
pop(r10);
pop(r9);
pop(r8);
#endif
pop(rcx);
pop(rdx);
LP64_ONLY(pop(rsi);)
LP64_ONLY(pop(rdi);)
pop(rsi);
pop(rdi);
if (thread != rax) {
mov(thread, rax);
pop(rax);
@ -10801,7 +10740,9 @@ void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigne
// reg_rax: rax
// thread: the thread which attempts to lock obj
// tmp: a temporary register
void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {
void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow) {
Register thread = r15_thread;
assert(reg_rax == rax, "");
assert_different_registers(basic_lock, obj, reg_rax, thread, tmp);
@ -10855,7 +10796,9 @@ void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Registe
// reg_rax: rax
// thread: the thread
// tmp: a temporary register
void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {
void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) {
Register thread = r15_thread;
assert(reg_rax == rax, "");
assert_different_registers(obj, reg_rax, thread, tmp);

View File

@ -59,13 +59,10 @@ class MacroAssembler: public Assembler {
// may customize this version by overriding it for its purposes (e.g., to save/restore
// additional registers when doing a VM call).
//
// If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base
// returns the register which contains the thread upon return. If a thread register has been
// specified, the return value will correspond to that register. If no last_java_sp is specified
// (noreg) than rsp will be used instead.
// call_VM_base returns the register which contains the thread upon return.
// If no last_java_sp is specified (noreg) than rsp will be used instead.
virtual void call_VM_base( // returns the register containing the thread upon return
Register oop_result, // where an oop-result ends up if any; use noreg otherwise
Register java_thread, // the thread if computed before ; use noreg otherwise
Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
address entry_point, // the entry point
int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
@ -85,8 +82,8 @@ class MacroAssembler: public Assembler {
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
// The implementation is only non-empty for the InterpreterMacroAssembler,
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
virtual void check_and_handle_popframe();
virtual void check_and_handle_earlyret();
Address as_Address(AddressLiteral adr);
Address as_Address(ArrayAddress adr, Register rscratch);
@ -224,9 +221,10 @@ class MacroAssembler: public Assembler {
void enter();
void leave();
// Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
// The pointer will be loaded into the thread register.
void get_thread(Register thread);
// Support for getting the JavaThread pointer (i.e.; a reference to thread-local information).
// The pointer will be loaded into the thread register. This is a slow version that does native call.
// Normally, JavaThread pointer is available in r15_thread, use that where possible.
void get_thread_slow(Register thread);
#ifdef _LP64
// Support for argument shuffling
@ -291,8 +289,8 @@ class MacroAssembler: public Assembler {
Register arg_1, Register arg_2, Register arg_3,
bool check_exceptions = true);
void get_vm_result (Register oop_result, Register thread);
void get_vm_result_2(Register metadata_result, Register thread);
void get_vm_result (Register oop_result);
void get_vm_result_2(Register metadata_result);
// These always tightly bind to MacroAssembler::call_VM_base
// bypassing the virtual implementation
@ -323,35 +321,22 @@ class MacroAssembler: public Assembler {
void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
// last Java Frame (fills frame anchor)
void set_last_Java_frame(Register thread,
Register last_java_sp,
Register last_java_fp,
address last_java_pc,
Register rscratch);
// thread in the default location (r15_thread on 64bit)
void set_last_Java_frame(Register last_java_sp,
Register last_java_fp,
address last_java_pc,
Register rscratch);
#ifdef _LP64
void set_last_Java_frame(Register last_java_sp,
Register last_java_fp,
Label &last_java_pc,
Register scratch);
#endif
void reset_last_Java_frame(Register thread, bool clear_fp);
// thread in the default location (r15_thread on 64bit)
void reset_last_Java_frame(bool clear_fp);
// jobjects
void clear_jobject_tag(Register possibly_non_local);
void resolve_jobject(Register value, Register thread, Register tmp);
void resolve_global_jobject(Register value, Register thread, Register tmp);
void resolve_jobject(Register value, Register tmp);
void resolve_global_jobject(Register value, Register tmp);
// C 'boolean' to Java boolean: x == 0 ? 0 : 1
void c2bool(Register x);
@ -762,7 +747,6 @@ public:
Label& L_success);
void clinit_barrier(Register klass,
Register thread,
Label* L_fast_path = nullptr,
Label* L_slow_path = nullptr);
@ -837,7 +821,7 @@ public:
// Check for reserved stack access in method being exited (for JIT)
void reserved_stack_check();
void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod);
void safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod);
void verify_tlab();
@ -2247,8 +2231,8 @@ public:
void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg);
void lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register thread, Register tmp, Label& slow);
void lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow);
void lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow);
void lightweight_unlock(Register obj, Register reg_rax, Register tmp, Label& slow);
#ifdef _LP64
void save_legacy_gprs();

View File

@ -1104,7 +1104,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
Register klass = rscratch1;
__ load_method_holder(klass, method);
__ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
__ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
__ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
@ -2003,7 +2003,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
Label L_skip_barrier;
Register klass = r10;
__ mov_metadata(klass, method->method_holder()); // InstanceKlass*
__ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
__ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
__ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
@ -2280,7 +2280,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ inc_held_monitor_count();
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
__ lightweight_lock(lock_reg, obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
__ lightweight_lock(lock_reg, obj_reg, swap_reg, rscratch1, slow_path_lock);
}
// Slow path will re-enter here
@ -2340,7 +2340,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
Label Continue;
Label slow_path;
__ safepoint_poll(slow_path, r15_thread, true /* at_return */, false /* in_nmethod */);
__ safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
__ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
__ jcc(Assembler::equal, Continue);
@ -2431,7 +2431,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ dec_held_monitor_count();
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
__ lightweight_unlock(obj_reg, swap_reg, r15_thread, lock_reg, slow_path_unlock);
__ lightweight_unlock(obj_reg, swap_reg, lock_reg, slow_path_unlock);
}
// slow path re-enters here
@ -2456,7 +2456,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Unbox oop result, e.g. JNIHandles::resolve value.
if (is_reference_type(ret_type)) {
__ resolve_jobject(rax /* value */,
r15_thread /* thread */,
rcx /* tmp */);
}
@ -3234,7 +3233,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address desti
__ jcc(Assembler::notEqual, pending);
// get the returned Method*
__ get_vm_result_2(rbx, r15_thread);
__ get_vm_result_2(rbx);
__ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
__ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
@ -3661,7 +3660,7 @@ RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
__ reset_last_Java_frame(true);
// rax is jobject handle result, unpack and process it through a barrier.
__ resolve_global_jobject(rax, r15_thread, c_rarg0);
__ resolve_global_jobject(rax, c_rarg0);
__ leave();
__ ret(0);

View File

@ -339,7 +339,7 @@ address StubGenerator::generate_call_stub(address& return_address) {
__ jcc(Assembler::equal, L1);
__ stop("StubRoutines::call_stub: r15_thread is corrupted");
__ bind(L1);
__ get_thread(rbx);
__ get_thread_slow(rbx);
__ cmpptr(r15_thread, thread);
__ jcc(Assembler::equal, L2);
__ stop("StubRoutines::call_stub: r15_thread is modified by call");
@ -426,7 +426,7 @@ address StubGenerator::generate_catch_exception() {
__ jcc(Assembler::equal, L1);
__ stop("StubRoutines::catch_exception: r15_thread is corrupted");
__ bind(L1);
__ get_thread(rbx);
__ get_thread_slow(rbx);
__ cmpptr(r15_thread, thread);
__ jcc(Assembler::equal, L2);
__ stop("StubRoutines::catch_exception: r15_thread is modified by call");
@ -1313,7 +1313,7 @@ void StubGenerator::setup_arg_regs_using_thread(int nargs) {
__ mov(rax, r9); // r9 is also saved_r15
}
__ mov(saved_r15, r15); // r15 is callee saved and needs to be restored
__ get_thread(r15_thread);
__ get_thread_slow(r15_thread);
assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9,
"unexpected argument registers");
__ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset())), rdi);
@ -1337,7 +1337,7 @@ void StubGenerator::restore_arg_regs_using_thread() {
assert(_regs_in_thread, "wrong call to restore_arg_regs");
const Register saved_r15 = r9;
#ifdef _WIN64
__ get_thread(r15_thread);
__ get_thread_slow(r15_thread);
__ movptr(rsi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset())));
__ movptr(rdi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset())));
__ mov(r15, saved_r15); // r15 is callee saved and needs to be restored
@ -3974,7 +3974,7 @@ address StubGenerator::generate_upcall_stub_load_target() {
StubCodeMark mark(this, stub_id);
address start = __ pc();
__ resolve_global_jobject(j_rarg0, r15_thread, rscratch1);
__ resolve_global_jobject(j_rarg0, rscratch1);
// Load target method from receiver
__ load_heap_oop(rbx, Address(j_rarg0, java_lang_invoke_MethodHandle::form_offset()), rscratch1);
__ load_heap_oop(rbx, Address(rbx, java_lang_invoke_LambdaForm::vmentry_offset()), rscratch1);

View File

@ -2476,7 +2476,7 @@ address StubGenerator::generate_checkcast_copy(StubGenStubId stub_id, address *e
#ifdef ASSERT
Label L2;
__ get_thread(r14);
__ get_thread_slow(r14);
__ cmpptr(r15_thread, r14);
__ jcc(Assembler::equal, L2);
__ stop("StubRoutines::call_stub: r15_thread is modified by call");

View File

@ -204,10 +204,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
}
if (JvmtiExport::can_pop_frame()) {
__ check_and_handle_popframe(r15_thread);
__ check_and_handle_popframe();
}
if (JvmtiExport::can_force_early_return()) {
__ check_and_handle_earlyret(r15_thread);
__ check_and_handle_earlyret();
}
__ dispatch_next(state, step);
@ -991,7 +991,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
Label Continue;
Label slow_path;
__ safepoint_poll(slow_path, thread, true /* at_return */, false /* in_nmethod */);
__ safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
__ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
__ jcc(Assembler::equal, Continue);
@ -1034,7 +1034,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
}
// reset_last_Java_frame
__ reset_last_Java_frame(thread, true);
__ reset_last_Java_frame(true);
if (CheckJNICalls) {
// clear_pending_jni_exception_check
@ -1057,7 +1057,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ pop(ltos);
// Unbox oop result, e.g. JNIHandles::resolve value.
__ resolve_jobject(rax /* value */,
thread /* thread */,
t /* tmp */);
__ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax);
// keep stack depth as expected by pushing oop which will eventually be discarded
@ -1495,7 +1494,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// PC must point into interpreter here
__ set_last_Java_frame(noreg, rbp, __ pc(), rscratch1);
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
__ reset_last_Java_frame(thread, true);
__ reset_last_Java_frame(true);
// Restore the last_sp and null it out
__ movptr(rcx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
@ -1548,7 +1547,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// remove the activation (without doing throws on illegalMonitorExceptions)
__ remove_activation(vtos, rdx, false, true, false);
// restore exception
__ get_vm_result(rax, thread);
__ get_vm_result(rax);
// In between activations - previous activation type unknown yet
// compute continuation point - the continuation point expects the

View File

@ -190,7 +190,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
// c_rarg1: scratch (rsi on non-Win64, rdx on Win64)
Label slow_path;
__ safepoint_poll(slow_path, r15_thread, true /* at_return */, false /* in_nmethod */);
__ safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
// We don't generate local frame and don't align stack because
// we call stub code and there is no safepoint on this path.
@ -234,7 +234,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
// r13: senderSP must preserved for slow path, set SP to it on fast path
Label slow_path;
__ safepoint_poll(slow_path, r15_thread, false /* at_return */, false /* in_nmethod */);
__ safepoint_poll(slow_path, false /* at_return */, false /* in_nmethod */);
// We don't generate local frame and don't align stack because
// we call stub code and there is no safepoint on this path.

View File

@ -478,7 +478,7 @@ void TemplateTable::condy_helper(Label& Done) {
const Register rarg = c_rarg1;
__ movl(rarg, (int)bytecode());
call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg);
__ get_vm_result_2(flags, r15_thread);
__ get_vm_result_2(flags);
// VMr = obj = base address to find primitive value to push
// VMr2 = flags = (tos, off) using format of CPCE::_flags
__ movl(off, flags);
@ -2263,12 +2263,10 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
const Register method = temp;
const Register klass = temp;
const Register thread = r15_thread;
assert(thread != noreg, "x86_32 not supported");
__ movptr(method, Address(cache, in_bytes(ResolvedMethodEntry::method_offset())));
__ load_method_holder(klass, method);
__ clinit_barrier(klass, thread, nullptr /*L_fast_path*/, &L_clinit_barrier_slow);
__ clinit_barrier(klass, nullptr /*L_fast_path*/, &L_clinit_barrier_slow);
}
}
@ -3572,7 +3570,7 @@ void TemplateTable::_new() {
// make sure klass is initialized
// init_state needs acquire, but x86 is TSO, and so we are already good.
assert(VM_Version::supports_fast_class_init_checks(), "must support fast class initialization checks");
__ clinit_barrier(rcx, r15_thread, nullptr /*L_fast_path*/, &slow_case);
__ clinit_barrier(rcx, nullptr /*L_fast_path*/, &slow_case);
// get instance_size in InstanceKlass (scaled to a count of bytes)
__ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
@ -3712,7 +3710,7 @@ void TemplateTable::checkcast() {
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
// vm_result_2 has metadata result
__ get_vm_result_2(rax, r15_thread);
__ get_vm_result_2(rax);
__ pop_ptr(rdx); // restore receiver
__ jmpb(resolved);
@ -3767,9 +3765,9 @@ void TemplateTable::instanceof() {
__ push(atos); // save receiver for result, and for GC
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
// vm_result_2 has metadata result
__ get_vm_result_2(rax, r15_thread);
// vm_result_2 has metadata result
__ get_vm_result_2(rax);
__ pop_ptr(rdx); // restore receiver
__ verify_oop(rdx);

View File

@ -845,7 +845,7 @@ void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Register klass = rscratch1;
__ mov_metadata(klass, C->method()->holder()->constant_encoding());
__ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
__ clinit_barrier(klass, &L_skip_barrier /*L_fast_path*/);
__ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
@ -943,7 +943,7 @@ void MachEpilogNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
code_stub = &stub->entry();
}
__ relocate(relocInfo::poll_return_type);
__ safepoint_poll(*code_stub, r15_thread, true /* at_return */, true /* in_nmethod */);
__ safepoint_poll(*code_stub, true /* at_return */, true /* in_nmethod */);
}
}