8202776: Modularize GC allocations in runtime
Reviewed-by: eosterlund, shade
This commit is contained in:
parent
fcfd1c85dd
commit
26b8ea76f5
@ -365,20 +365,32 @@ void CollectedHeap::check_for_valid_allocation_state() {
|
||||
}
|
||||
#endif
|
||||
|
||||
HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
|
||||
HeapWord* CollectedHeap::obj_allocate_raw(Klass* klass, size_t size,
|
||||
bool* gc_overhead_limit_was_exceeded, TRAPS) {
|
||||
if (UseTLAB) {
|
||||
HeapWord* result = allocate_from_tlab(klass, size, THREAD);
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
return Universe::heap()->mem_allocate(size, gc_overhead_limit_was_exceeded);
|
||||
}
|
||||
|
||||
HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, size_t size, TRAPS) {
|
||||
ThreadLocalAllocBuffer& tlab = THREAD->tlab();
|
||||
|
||||
// Retain tlab and allocate object in shared space if
|
||||
// the amount free in the tlab is too large to discard.
|
||||
if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
|
||||
thread->tlab().record_slow_allocation(size);
|
||||
if (tlab.free() > tlab.refill_waste_limit()) {
|
||||
tlab.record_slow_allocation(size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Discard tlab and allocate a new one.
|
||||
// To minimize fragmentation, the last TLAB may be smaller than the rest.
|
||||
size_t new_tlab_size = thread->tlab().compute_size(size);
|
||||
size_t new_tlab_size = tlab.compute_size(size);
|
||||
|
||||
thread->tlab().clear_before_allocation();
|
||||
tlab.clear_before_allocation();
|
||||
|
||||
if (new_tlab_size == 0) {
|
||||
return NULL;
|
||||
@ -397,7 +409,7 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, s
|
||||
assert(actual_tlab_size != 0, "Allocation succeeded but actual size not updated. obj at: " PTR_FORMAT " min: " SIZE_FORMAT ", desired: " SIZE_FORMAT,
|
||||
p2i(obj), min_tlab_size, new_tlab_size);
|
||||
|
||||
AllocTracer::send_allocation_in_new_tlab(klass, obj, actual_tlab_size * HeapWordSize, size * HeapWordSize, thread);
|
||||
AllocTracer::send_allocation_in_new_tlab(klass, obj, actual_tlab_size * HeapWordSize, size * HeapWordSize, THREAD);
|
||||
|
||||
if (ZeroTLAB) {
|
||||
// ..and clear it.
|
||||
@ -412,7 +424,7 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, s
|
||||
Copy::fill_to_words(obj + hdr_size, actual_tlab_size - hdr_size, badHeapWordVal);
|
||||
#endif // ASSERT
|
||||
}
|
||||
thread->tlab().fill(obj, obj + size, actual_tlab_size);
|
||||
tlab.fill(obj, obj + size, actual_tlab_size);
|
||||
return obj;
|
||||
}
|
||||
|
||||
|
@ -141,8 +141,15 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
virtual void resize_all_tlabs();
|
||||
|
||||
// Allocate from the current thread's TLAB, with broken-out slow path.
|
||||
inline static HeapWord* allocate_from_tlab(Klass* klass, Thread* thread, size_t size);
|
||||
static HeapWord* allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size);
|
||||
inline static HeapWord* allocate_from_tlab(Klass* klass, size_t size, TRAPS);
|
||||
static HeapWord* allocate_from_tlab_slow(Klass* klass, size_t size, TRAPS);
|
||||
|
||||
// Raw memory allocation facilities
|
||||
// The obj and array allocate methods are covers for these methods.
|
||||
// mem_allocate() should never be
|
||||
// called to allocate TLABs, only individual objects.
|
||||
virtual HeapWord* mem_allocate(size_t size,
|
||||
bool* gc_overhead_limit_was_exceeded) = 0;
|
||||
|
||||
// Allocate an uninitialized block of the given size, or returns NULL if
|
||||
// this is impossible.
|
||||
@ -309,12 +316,12 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
inline static oop array_allocate_nozero(Klass* klass, int size, int length, TRAPS);
|
||||
inline static oop class_allocate(Klass* klass, int size, TRAPS);
|
||||
|
||||
// Raw memory allocation facilities
|
||||
// The obj and array allocate methods are covers for these methods.
|
||||
// mem_allocate() should never be
|
||||
// called to allocate TLABs, only individual objects.
|
||||
virtual HeapWord* mem_allocate(size_t size,
|
||||
bool* gc_overhead_limit_was_exceeded) = 0;
|
||||
// Raw memory allocation. This may or may not use TLAB allocations to satisfy the
|
||||
// allocation. A GC implementation may override this function to satisfy the allocation
|
||||
// in any way. But the default is to try a TLAB allocation, and otherwise perform
|
||||
// mem_allocate.
|
||||
virtual HeapWord* obj_allocate_raw(Klass* klass, size_t size,
|
||||
bool* gc_overhead_limit_was_exceeded, TRAPS);
|
||||
|
||||
// Utilities for turning raw memory into filler objects.
|
||||
//
|
||||
|
@ -137,18 +137,10 @@ HeapWord* CollectedHeap::common_mem_allocate_noinit(Klass* klass, size_t size, T
|
||||
return NULL; // caller does a CHECK_0 too
|
||||
}
|
||||
|
||||
HeapWord* result = NULL;
|
||||
if (UseTLAB) {
|
||||
result = allocate_from_tlab(klass, THREAD, size);
|
||||
if (result != NULL) {
|
||||
assert(!HAS_PENDING_EXCEPTION,
|
||||
"Unexpected exception, will result in uninitialized storage");
|
||||
return result;
|
||||
}
|
||||
}
|
||||
bool gc_overhead_limit_was_exceeded = false;
|
||||
result = Universe::heap()->mem_allocate(size,
|
||||
&gc_overhead_limit_was_exceeded);
|
||||
CollectedHeap* heap = Universe::heap();
|
||||
HeapWord* result = heap->obj_allocate_raw(klass, size, &gc_overhead_limit_was_exceeded, THREAD);
|
||||
|
||||
if (result != NULL) {
|
||||
NOT_PRODUCT(Universe::heap()->
|
||||
check_for_non_bad_heap_word_value(result, size));
|
||||
@ -161,7 +153,6 @@ HeapWord* CollectedHeap::common_mem_allocate_noinit(Klass* klass, size_t size, T
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
if (!gc_overhead_limit_was_exceeded) {
|
||||
// -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
|
||||
report_java_out_of_memory("Java heap space");
|
||||
@ -193,15 +184,18 @@ HeapWord* CollectedHeap::common_mem_allocate_init(Klass* klass, size_t size, TRA
|
||||
return obj;
|
||||
}
|
||||
|
||||
HeapWord* CollectedHeap::allocate_from_tlab(Klass* klass, Thread* thread, size_t size) {
|
||||
HeapWord* CollectedHeap::allocate_from_tlab(Klass* klass, size_t size, TRAPS) {
|
||||
assert(UseTLAB, "should use UseTLAB");
|
||||
|
||||
HeapWord* obj = thread->tlab().allocate(size);
|
||||
HeapWord* obj = THREAD->tlab().allocate(size);
|
||||
if (obj != NULL) {
|
||||
return obj;
|
||||
}
|
||||
// Otherwise...
|
||||
return allocate_from_tlab_slow(klass, thread, size);
|
||||
obj = allocate_from_tlab_slow(klass, size, THREAD);
|
||||
assert(obj == NULL || !HAS_PENDING_EXCEPTION,
|
||||
"Unexpected exception, will result in uninitialized storage");
|
||||
return obj;
|
||||
}
|
||||
|
||||
void CollectedHeap::init_obj(HeapWord* obj, size_t size) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user