8205336: Modularize allocations in assembler
Reviewed-by: aph, eosterlund
This commit is contained in:
parent
9c47d8db3f
commit
3ac6f8d3b9
@ -166,7 +166,6 @@ void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, i
|
||||
tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
|
||||
} else {
|
||||
eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
|
||||
incr_allocated_bytes(noreg, var_size_in_bytes, con_size_in_bytes, t1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -722,7 +722,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
__ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
|
||||
|
||||
__ eden_allocate(obj, obj_size, 0, t1, slow_path);
|
||||
__ incr_allocated_bytes(rthread, obj_size, 0, rscratch1);
|
||||
|
||||
__ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false);
|
||||
__ verify_oop(obj);
|
||||
@ -823,7 +822,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
__ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);
|
||||
|
||||
__ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size
|
||||
__ incr_allocated_bytes(rthread, arr_size, 0, rscratch1);
|
||||
|
||||
__ initialize_header(obj, klass, length, t1, t2);
|
||||
__ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
|
||||
|
@ -24,7 +24,9 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/barrierSetAssembler.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "runtime/jniHandles.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
|
||||
#define __ masm->
|
||||
|
||||
@ -121,3 +123,109 @@ void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Re
|
||||
__ andr(obj, obj, ~JNIHandles::weak_tag_mask);
|
||||
__ ldr(obj, Address(obj, 0)); // *obj
|
||||
}
|
||||
|
||||
// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
|
||||
void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1,
|
||||
Register t2,
|
||||
Label& slow_case) {
|
||||
assert_different_registers(obj, t2);
|
||||
assert_different_registers(obj, var_size_in_bytes);
|
||||
Register end = t2;
|
||||
|
||||
// verify_tlab();
|
||||
|
||||
__ ldr(obj, Address(rthread, JavaThread::tlab_top_offset()));
|
||||
if (var_size_in_bytes == noreg) {
|
||||
__ lea(end, Address(obj, con_size_in_bytes));
|
||||
} else {
|
||||
__ lea(end, Address(obj, var_size_in_bytes));
|
||||
}
|
||||
__ ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset()));
|
||||
__ cmp(end, rscratch1);
|
||||
__ br(Assembler::HI, slow_case);
|
||||
|
||||
// update the tlab top pointer
|
||||
__ str(end, Address(rthread, JavaThread::tlab_top_offset()));
|
||||
|
||||
// recover var_size_in_bytes if necessary
|
||||
if (var_size_in_bytes == end) {
|
||||
__ sub(var_size_in_bytes, var_size_in_bytes, obj);
|
||||
}
|
||||
// verify_tlab();
|
||||
}
|
||||
|
||||
// Defines obj, preserves var_size_in_bytes
|
||||
void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1,
|
||||
Label& slow_case) {
|
||||
assert_different_registers(obj, var_size_in_bytes, t1);
|
||||
if (!Universe::heap()->supports_inline_contig_alloc()) {
|
||||
__ b(slow_case);
|
||||
} else {
|
||||
Register end = t1;
|
||||
Register heap_end = rscratch2;
|
||||
Label retry;
|
||||
__ bind(retry);
|
||||
{
|
||||
unsigned long offset;
|
||||
__ adrp(rscratch1, ExternalAddress((address) Universe::heap()->end_addr()), offset);
|
||||
__ ldr(heap_end, Address(rscratch1, offset));
|
||||
}
|
||||
|
||||
ExternalAddress heap_top((address) Universe::heap()->top_addr());
|
||||
|
||||
// Get the current top of the heap
|
||||
{
|
||||
unsigned long offset;
|
||||
__ adrp(rscratch1, heap_top, offset);
|
||||
// Use add() here after ARDP, rather than lea().
|
||||
// lea() does not generate anything if its offset is zero.
|
||||
// However, relocs expect to find either an ADD or a load/store
|
||||
// insn after an ADRP. add() always generates an ADD insn, even
|
||||
// for add(Rn, Rn, 0).
|
||||
__ add(rscratch1, rscratch1, offset);
|
||||
__ ldaxr(obj, rscratch1);
|
||||
}
|
||||
|
||||
// Adjust it my the size of our new object
|
||||
if (var_size_in_bytes == noreg) {
|
||||
__ lea(end, Address(obj, con_size_in_bytes));
|
||||
} else {
|
||||
__ lea(end, Address(obj, var_size_in_bytes));
|
||||
}
|
||||
|
||||
// if end < obj then we wrapped around high memory
|
||||
__ cmp(end, obj);
|
||||
__ br(Assembler::LO, slow_case);
|
||||
|
||||
__ cmp(end, heap_end);
|
||||
__ br(Assembler::HI, slow_case);
|
||||
|
||||
// If heap_top hasn't been changed by some other thread, update it.
|
||||
__ stlxr(rscratch2, end, rscratch1);
|
||||
__ cbnzw(rscratch2, retry);
|
||||
|
||||
incr_allocated_bytes(masm, var_size_in_bytes, con_size_in_bytes, t1);
|
||||
}
|
||||
}
|
||||
|
||||
void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1) {
|
||||
assert(t1->is_valid(), "need temp reg");
|
||||
|
||||
__ ldr(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));
|
||||
if (var_size_in_bytes->is_valid()) {
|
||||
__ add(t1, t1, var_size_in_bytes);
|
||||
} else {
|
||||
__ add(t1, t1, con_size_in_bytes);
|
||||
}
|
||||
__ str(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));
|
||||
}
|
||||
|
||||
|
@ -30,6 +30,11 @@
|
||||
#include "oops/access.hpp"
|
||||
|
||||
class BarrierSetAssembler: public CHeapObj<mtGC> {
|
||||
private:
|
||||
void incr_allocated_bytes(MacroAssembler* masm,
|
||||
Register var_size_in_bytes, int con_size_in_bytes,
|
||||
Register t1 = noreg);
|
||||
|
||||
public:
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register addr, Register count, RegSet saved_regs) {}
|
||||
@ -46,6 +51,22 @@ public:
|
||||
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
|
||||
Register obj, Register tmp, Label& slowpath);
|
||||
|
||||
virtual void tlab_allocate(MacroAssembler* masm,
|
||||
Register obj, // result: pointer to object after successful allocation
|
||||
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
|
||||
int con_size_in_bytes, // object size in bytes if known at compile time
|
||||
Register t1, // temp register
|
||||
Register t2, // temp register
|
||||
Label& slow_case // continuation point if fast allocation fails
|
||||
);
|
||||
|
||||
void eden_allocate(MacroAssembler* masm,
|
||||
Register obj, // result: pointer to object after successful allocation
|
||||
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
|
||||
int con_size_in_bytes, // object size in bytes if known at compile time
|
||||
Register t1, // temp register
|
||||
Label& slow_case // continuation point if fast allocation fails
|
||||
);
|
||||
virtual void barrier_stubs_init() {}
|
||||
};
|
||||
|
||||
|
@ -2440,24 +2440,6 @@ ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word)
|
||||
|
||||
#undef ATOMIC_XCHG
|
||||
|
||||
void MacroAssembler::incr_allocated_bytes(Register thread,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1) {
|
||||
if (!thread->is_valid()) {
|
||||
thread = rthread;
|
||||
}
|
||||
assert(t1->is_valid(), "need temp reg");
|
||||
|
||||
ldr(t1, Address(thread, in_bytes(JavaThread::allocated_bytes_offset())));
|
||||
if (var_size_in_bytes->is_valid()) {
|
||||
add(t1, t1, var_size_in_bytes);
|
||||
} else {
|
||||
add(t1, t1, con_size_in_bytes);
|
||||
}
|
||||
str(t1, Address(thread, in_bytes(JavaThread::allocated_bytes_offset())));
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
extern "C" void findpc(intptr_t x);
|
||||
#endif
|
||||
@ -4085,30 +4067,18 @@ void MacroAssembler::tlab_allocate(Register obj,
|
||||
Register t1,
|
||||
Register t2,
|
||||
Label& slow_case) {
|
||||
assert_different_registers(obj, t2);
|
||||
assert_different_registers(obj, var_size_in_bytes);
|
||||
Register end = t2;
|
||||
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
|
||||
}
|
||||
|
||||
// verify_tlab();
|
||||
|
||||
ldr(obj, Address(rthread, JavaThread::tlab_top_offset()));
|
||||
if (var_size_in_bytes == noreg) {
|
||||
lea(end, Address(obj, con_size_in_bytes));
|
||||
} else {
|
||||
lea(end, Address(obj, var_size_in_bytes));
|
||||
}
|
||||
ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset()));
|
||||
cmp(end, rscratch1);
|
||||
br(Assembler::HI, slow_case);
|
||||
|
||||
// update the tlab top pointer
|
||||
str(end, Address(rthread, JavaThread::tlab_top_offset()));
|
||||
|
||||
// recover var_size_in_bytes if necessary
|
||||
if (var_size_in_bytes == end) {
|
||||
sub(var_size_in_bytes, var_size_in_bytes, obj);
|
||||
}
|
||||
// verify_tlab();
|
||||
// Defines obj, preserves var_size_in_bytes
|
||||
void MacroAssembler::eden_allocate(Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1,
|
||||
Label& slow_case) {
|
||||
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->eden_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
|
||||
}
|
||||
|
||||
// Zero words; len is in bytes
|
||||
@ -4173,61 +4143,6 @@ void MacroAssembler::zero_memory(Register addr, Register len, Register t1) {
|
||||
cbnz(len, loop);
|
||||
}
|
||||
|
||||
// Defines obj, preserves var_size_in_bytes
|
||||
void MacroAssembler::eden_allocate(Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1,
|
||||
Label& slow_case) {
|
||||
assert_different_registers(obj, var_size_in_bytes, t1);
|
||||
if (!Universe::heap()->supports_inline_contig_alloc()) {
|
||||
b(slow_case);
|
||||
} else {
|
||||
Register end = t1;
|
||||
Register heap_end = rscratch2;
|
||||
Label retry;
|
||||
bind(retry);
|
||||
{
|
||||
unsigned long offset;
|
||||
adrp(rscratch1, ExternalAddress((address) Universe::heap()->end_addr()), offset);
|
||||
ldr(heap_end, Address(rscratch1, offset));
|
||||
}
|
||||
|
||||
ExternalAddress heap_top((address) Universe::heap()->top_addr());
|
||||
|
||||
// Get the current top of the heap
|
||||
{
|
||||
unsigned long offset;
|
||||
adrp(rscratch1, heap_top, offset);
|
||||
// Use add() here after ARDP, rather than lea().
|
||||
// lea() does not generate anything if its offset is zero.
|
||||
// However, relocs expect to find either an ADD or a load/store
|
||||
// insn after an ADRP. add() always generates an ADD insn, even
|
||||
// for add(Rn, Rn, 0).
|
||||
add(rscratch1, rscratch1, offset);
|
||||
ldaxr(obj, rscratch1);
|
||||
}
|
||||
|
||||
// Adjust it my the size of our new object
|
||||
if (var_size_in_bytes == noreg) {
|
||||
lea(end, Address(obj, con_size_in_bytes));
|
||||
} else {
|
||||
lea(end, Address(obj, var_size_in_bytes));
|
||||
}
|
||||
|
||||
// if end < obj then we wrapped around high memory
|
||||
cmp(end, obj);
|
||||
br(Assembler::LO, slow_case);
|
||||
|
||||
cmp(end, heap_end);
|
||||
br(Assembler::HI, slow_case);
|
||||
|
||||
// If heap_top hasn't been changed by some other thread, update it.
|
||||
stlxr(rscratch2, end, rscratch1);
|
||||
cbnzw(rscratch2, retry);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::verify_tlab() {
|
||||
#ifdef ASSERT
|
||||
if (UseTLAB && VerifyOops) {
|
||||
|
@ -866,10 +866,6 @@ public:
|
||||
void zero_memory(Register addr, Register len, Register t1);
|
||||
void verify_tlab();
|
||||
|
||||
void incr_allocated_bytes(Register thread,
|
||||
Register var_size_in_bytes, int con_size_in_bytes,
|
||||
Register t1 = noreg);
|
||||
|
||||
// interface method calling
|
||||
void lookup_interface_method(Register recv_klass,
|
||||
Register intf_klass,
|
||||
|
@ -3562,7 +3562,6 @@ void TemplateTable::_new() {
|
||||
// r3: instance size in bytes
|
||||
if (allow_shared_alloc) {
|
||||
__ eden_allocate(r0, r3, 0, r10, slow_case);
|
||||
__ incr_allocated_bytes(rthread, r3, 0, rscratch1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -139,10 +139,9 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
|
||||
// Defines obj, preserves var_size_in_bytes
|
||||
void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) {
|
||||
if (UseTLAB) {
|
||||
tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
|
||||
tlab_allocate(noreg, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
|
||||
} else {
|
||||
eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
|
||||
incr_allocated_bytes(noreg, var_size_in_bytes, con_size_in_bytes, t1);
|
||||
eden_allocate(noreg, obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1056,8 +1056,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
// get the instance size (size is postive so movl is fine for 64bit)
|
||||
__ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
|
||||
|
||||
__ eden_allocate(obj, obj_size, 0, t1, slow_path);
|
||||
__ incr_allocated_bytes(thread, obj_size, 0);
|
||||
__ eden_allocate(thread, obj, obj_size, 0, t1, slow_path);
|
||||
|
||||
__ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false);
|
||||
__ verify_oop(obj);
|
||||
@ -1155,12 +1154,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
__ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
|
||||
__ andptr(arr_size, ~MinObjAlignmentInBytesMask);
|
||||
|
||||
__ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size
|
||||
|
||||
// Using t2 for non 64-bit.
|
||||
const Register thread = NOT_LP64(t2) LP64_ONLY(r15_thread);
|
||||
NOT_LP64(__ get_thread(thread));
|
||||
__ incr_allocated_bytes(thread, arr_size, 0);
|
||||
__ eden_allocate(thread, obj, arr_size, 0, t1, slow_path); // preserves arr_size
|
||||
|
||||
__ initialize_header(obj, klass, length, t1, t2);
|
||||
__ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
|
||||
|
@ -24,8 +24,10 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/barrierSetAssembler.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "interpreter/interp_masm.hpp"
|
||||
#include "runtime/jniHandles.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
|
||||
#define __ masm->
|
||||
|
||||
@ -213,3 +215,110 @@ void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Re
|
||||
__ clear_jweak_tag(obj);
|
||||
__ movptr(obj, Address(obj, 0));
|
||||
}
|
||||
|
||||
void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm,
|
||||
Register thread, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1,
|
||||
Register t2,
|
||||
Label& slow_case) {
|
||||
assert_different_registers(obj, t1, t2);
|
||||
assert_different_registers(obj, var_size_in_bytes, t1);
|
||||
Register end = t2;
|
||||
if (!thread->is_valid()) {
|
||||
#ifdef _LP64
|
||||
thread = r15_thread;
|
||||
#else
|
||||
assert(t1->is_valid(), "need temp reg");
|
||||
thread = t1;
|
||||
__ get_thread(thread);
|
||||
#endif
|
||||
}
|
||||
|
||||
__ verify_tlab();
|
||||
|
||||
__ movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
|
||||
if (var_size_in_bytes == noreg) {
|
||||
__ lea(end, Address(obj, con_size_in_bytes));
|
||||
} else {
|
||||
__ lea(end, Address(obj, var_size_in_bytes, Address::times_1));
|
||||
}
|
||||
__ cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
|
||||
__ jcc(Assembler::above, slow_case);
|
||||
|
||||
// update the tlab top pointer
|
||||
__ movptr(Address(thread, JavaThread::tlab_top_offset()), end);
|
||||
|
||||
// recover var_size_in_bytes if necessary
|
||||
if (var_size_in_bytes == end) {
|
||||
__ subptr(var_size_in_bytes, obj);
|
||||
}
|
||||
__ verify_tlab();
|
||||
}
|
||||
|
||||
// Defines obj, preserves var_size_in_bytes
|
||||
void BarrierSetAssembler::eden_allocate(MacroAssembler* masm,
|
||||
Register thread, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1,
|
||||
Label& slow_case) {
|
||||
assert(obj == rax, "obj must be in rax, for cmpxchg");
|
||||
assert_different_registers(obj, var_size_in_bytes, t1);
|
||||
if (!Universe::heap()->supports_inline_contig_alloc()) {
|
||||
__ jmp(slow_case);
|
||||
} else {
|
||||
Register end = t1;
|
||||
Label retry;
|
||||
__ bind(retry);
|
||||
ExternalAddress heap_top((address) Universe::heap()->top_addr());
|
||||
__ movptr(obj, heap_top);
|
||||
if (var_size_in_bytes == noreg) {
|
||||
__ lea(end, Address(obj, con_size_in_bytes));
|
||||
} else {
|
||||
__ lea(end, Address(obj, var_size_in_bytes, Address::times_1));
|
||||
}
|
||||
// if end < obj then we wrapped around => object too long => slow case
|
||||
__ cmpptr(end, obj);
|
||||
__ jcc(Assembler::below, slow_case);
|
||||
__ cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
|
||||
__ jcc(Assembler::above, slow_case);
|
||||
// Compare obj with the top addr, and if still equal, store the new top addr in
|
||||
// end at the address of the top addr pointer. Sets ZF if was equal, and clears
|
||||
// it otherwise. Use lock prefix for atomicity on MPs.
|
||||
__ locked_cmpxchgptr(end, heap_top);
|
||||
__ jcc(Assembler::notEqual, retry);
|
||||
incr_allocated_bytes(masm, thread, var_size_in_bytes, con_size_in_bytes, thread->is_valid() ? noreg : t1);
|
||||
}
|
||||
}
|
||||
|
||||
void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm, Register thread,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1) {
|
||||
if (!thread->is_valid()) {
|
||||
#ifdef _LP64
|
||||
thread = r15_thread;
|
||||
#else
|
||||
assert(t1->is_valid(), "need temp reg");
|
||||
thread = t1;
|
||||
__ get_thread(thread);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
if (var_size_in_bytes->is_valid()) {
|
||||
__ addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
|
||||
} else {
|
||||
__ addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
|
||||
}
|
||||
#else
|
||||
if (var_size_in_bytes->is_valid()) {
|
||||
__ addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
|
||||
} else {
|
||||
__ addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
|
||||
}
|
||||
__ adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0);
|
||||
#endif
|
||||
}
|
||||
|
@ -32,7 +32,12 @@
|
||||
class InterpreterMacroAssembler;
|
||||
|
||||
class BarrierSetAssembler: public CHeapObj<mtGC> {
|
||||
protected:
|
||||
private:
|
||||
void incr_allocated_bytes(MacroAssembler* masm, Register thread,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1);
|
||||
|
||||
public:
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register src, Register dst, Register count) {}
|
||||
@ -60,6 +65,19 @@ public:
|
||||
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
|
||||
Register obj, Register tmp, Label& slowpath);
|
||||
|
||||
virtual void tlab_allocate(MacroAssembler* masm,
|
||||
Register thread, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1, Register t2,
|
||||
Label& slow_case);
|
||||
virtual void eden_allocate(MacroAssembler* masm,
|
||||
Register thread, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1,
|
||||
Label& slow_case);
|
||||
|
||||
virtual void barrier_stubs_init() {}
|
||||
};
|
||||
|
||||
|
@ -2959,40 +2959,6 @@ void MacroAssembler::empty_FPU_stack() {
|
||||
#endif // !LP64 || C1 || !C2 || INCLUDE_JVMCI
|
||||
|
||||
|
||||
// Defines obj, preserves var_size_in_bytes
|
||||
void MacroAssembler::eden_allocate(Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1,
|
||||
Label& slow_case) {
|
||||
assert(obj == rax, "obj must be in rax, for cmpxchg");
|
||||
assert_different_registers(obj, var_size_in_bytes, t1);
|
||||
if (!Universe::heap()->supports_inline_contig_alloc()) {
|
||||
jmp(slow_case);
|
||||
} else {
|
||||
Register end = t1;
|
||||
Label retry;
|
||||
bind(retry);
|
||||
ExternalAddress heap_top((address) Universe::heap()->top_addr());
|
||||
movptr(obj, heap_top);
|
||||
if (var_size_in_bytes == noreg) {
|
||||
lea(end, Address(obj, con_size_in_bytes));
|
||||
} else {
|
||||
lea(end, Address(obj, var_size_in_bytes, Address::times_1));
|
||||
}
|
||||
// if end < obj then we wrapped around => object too long => slow case
|
||||
cmpptr(end, obj);
|
||||
jcc(Assembler::below, slow_case);
|
||||
cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
|
||||
jcc(Assembler::above, slow_case);
|
||||
// Compare obj with the top addr, and if still equal, store the new top addr in
|
||||
// end at the address of the top addr pointer. Sets ZF if was equal, and clears
|
||||
// it otherwise. Use lock prefix for atomicity on MPs.
|
||||
locked_cmpxchgptr(end, heap_top);
|
||||
jcc(Assembler::notEqual, retry);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::enter() {
|
||||
push(rbp);
|
||||
mov(rbp, rsp);
|
||||
@ -5310,38 +5276,24 @@ void MacroAssembler::testptr(Register dst, Register src) {
|
||||
}
|
||||
|
||||
// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
|
||||
void MacroAssembler::tlab_allocate(Register obj,
|
||||
void MacroAssembler::tlab_allocate(Register thread, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1,
|
||||
Register t2,
|
||||
Label& slow_case) {
|
||||
assert_different_registers(obj, t1, t2);
|
||||
assert_different_registers(obj, var_size_in_bytes, t1);
|
||||
Register end = t2;
|
||||
Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread);
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
|
||||
}
|
||||
|
||||
verify_tlab();
|
||||
|
||||
NOT_LP64(get_thread(thread));
|
||||
|
||||
movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
|
||||
if (var_size_in_bytes == noreg) {
|
||||
lea(end, Address(obj, con_size_in_bytes));
|
||||
} else {
|
||||
lea(end, Address(obj, var_size_in_bytes, Address::times_1));
|
||||
}
|
||||
cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
|
||||
jcc(Assembler::above, slow_case);
|
||||
|
||||
// update the tlab top pointer
|
||||
movptr(Address(thread, JavaThread::tlab_top_offset()), end);
|
||||
|
||||
// recover var_size_in_bytes if necessary
|
||||
if (var_size_in_bytes == end) {
|
||||
subptr(var_size_in_bytes, obj);
|
||||
}
|
||||
verify_tlab();
|
||||
// Defines obj, preserves var_size_in_bytes
|
||||
void MacroAssembler::eden_allocate(Register thread, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1,
|
||||
Label& slow_case) {
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->eden_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
|
||||
}
|
||||
|
||||
// Preserves the contents of address, destroys the contents length_in_bytes and temp.
|
||||
@ -5400,36 +5352,6 @@ void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int
|
||||
bind(done);
|
||||
}
|
||||
|
||||
void MacroAssembler::incr_allocated_bytes(Register thread,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1) {
|
||||
if (!thread->is_valid()) {
|
||||
#ifdef _LP64
|
||||
thread = r15_thread;
|
||||
#else
|
||||
assert(t1->is_valid(), "need temp reg");
|
||||
thread = t1;
|
||||
get_thread(thread);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
if (var_size_in_bytes->is_valid()) {
|
||||
addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
|
||||
} else {
|
||||
addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
|
||||
}
|
||||
#else
|
||||
if (var_size_in_bytes->is_valid()) {
|
||||
addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
|
||||
} else {
|
||||
addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
|
||||
}
|
||||
adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Look up the method for a megamorphic invokeinterface call.
|
||||
// The target method is determined by <intf_klass, itable_index>.
|
||||
// The receiver klass is in recv_klass.
|
||||
|
@ -504,6 +504,7 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
// allocation
|
||||
void eden_allocate(
|
||||
Register thread, // Current thread
|
||||
Register obj, // result: pointer to object after successful allocation
|
||||
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
|
||||
int con_size_in_bytes, // object size in bytes if known at compile time
|
||||
@ -511,6 +512,7 @@ class MacroAssembler: public Assembler {
|
||||
Label& slow_case // continuation point if fast allocation fails
|
||||
);
|
||||
void tlab_allocate(
|
||||
Register thread, // Current thread
|
||||
Register obj, // result: pointer to object after successful allocation
|
||||
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
|
||||
int con_size_in_bytes, // object size in bytes if known at compile time
|
||||
@ -520,10 +522,6 @@ class MacroAssembler: public Assembler {
|
||||
);
|
||||
void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
|
||||
|
||||
void incr_allocated_bytes(Register thread,
|
||||
Register var_size_in_bytes, int con_size_in_bytes,
|
||||
Register t1 = noreg);
|
||||
|
||||
// interface method calling
|
||||
void lookup_interface_method(Register recv_klass,
|
||||
Register intf_klass,
|
||||
|
@ -4013,7 +4013,7 @@ void TemplateTable::_new() {
|
||||
#endif // _LP64
|
||||
|
||||
if (UseTLAB) {
|
||||
__ tlab_allocate(rax, rdx, 0, rcx, rbx, slow_case);
|
||||
__ tlab_allocate(thread, rax, rdx, 0, rcx, rbx, slow_case);
|
||||
if (ZeroTLAB) {
|
||||
// the fields have been already cleared
|
||||
__ jmp(initialize_header);
|
||||
@ -4025,10 +4025,7 @@ void TemplateTable::_new() {
|
||||
// Allocation in the shared Eden, if allowed.
|
||||
//
|
||||
// rdx: instance size in bytes
|
||||
if (allow_shared_alloc) {
|
||||
__ eden_allocate(rax, rdx, 0, rbx, slow_case);
|
||||
__ incr_allocated_bytes(thread, rdx, 0);
|
||||
}
|
||||
__ eden_allocate(thread, rax, rdx, 0, rbx, slow_case);
|
||||
}
|
||||
|
||||
// If UseTLAB or allow_shared_alloc are true, the object is created above and
|
||||
|
Loading…
x
Reference in New Issue
Block a user