6578152: fill_region_with_object has usability and safety issues
Reviewed-by: apetrusenko, ysr
This commit is contained in:
parent
26f6b1692a
commit
000b184507
@ -2954,7 +2954,7 @@ public:
|
||||
// The object has been either evacuated or is dead. Fill it with a
|
||||
// dummy object.
|
||||
MemRegion mr((HeapWord*)obj, obj->size());
|
||||
SharedHeap::fill_region_with_object(mr);
|
||||
CollectedHeap::fill_with_object(mr);
|
||||
_cm->clearRangeBothMaps(mr);
|
||||
}
|
||||
}
|
||||
@ -3225,7 +3225,7 @@ void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
|
||||
// Otherwise, try to claim it.
|
||||
block = r->par_allocate(free_words);
|
||||
} while (block == NULL);
|
||||
SharedHeap::fill_region_with_object(MemRegion(block, free_words));
|
||||
fill_with_object(block, free_words);
|
||||
}
|
||||
|
||||
#define use_local_bitmaps 1
|
||||
@ -3619,9 +3619,8 @@ public:
|
||||
guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1),
|
||||
"should contain whole object");
|
||||
alloc_buffer(purpose)->undo_allocation(obj, word_sz);
|
||||
}
|
||||
else {
|
||||
SharedHeap::fill_region_with_object(MemRegion(obj, word_sz));
|
||||
} else {
|
||||
CollectedHeap::fill_with_object(obj, word_sz);
|
||||
add_to_undo_waste(word_sz);
|
||||
}
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
|
||||
HeapWord* tmp = hr->allocate(sz);
|
||||
assert(tmp != NULL, "Humongous allocation failure");
|
||||
MemRegion mr = MemRegion(tmp, sz);
|
||||
SharedHeap::fill_region_with_object(mr);
|
||||
CollectedHeap::fill_with_object(mr);
|
||||
hr->declare_filled_region_to_BOT(mr);
|
||||
if (i == first) {
|
||||
first_hr->set_startsHumongous();
|
||||
|
@ -51,14 +51,14 @@ void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
|
||||
if (_retained) {
|
||||
// If the buffer had been retained shorten the previous filler object.
|
||||
assert(_retained_filler.end() <= _top, "INVARIANT");
|
||||
SharedHeap::fill_region_with_object(_retained_filler);
|
||||
CollectedHeap::fill_with_object(_retained_filler);
|
||||
// Wasted space book-keeping, otherwise (normally) done in invalidate()
|
||||
_wasted += _retained_filler.word_size();
|
||||
_retained = false;
|
||||
}
|
||||
assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained.");
|
||||
if (_top < _hard_end) {
|
||||
SharedHeap::fill_region_with_object(MemRegion(_top, _hard_end));
|
||||
CollectedHeap::fill_with_object(_top, _hard_end);
|
||||
if (!retain) {
|
||||
invalidate();
|
||||
} else {
|
||||
@ -155,7 +155,7 @@ ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
|
||||
// modifying the _next_threshold state in the BOT.
|
||||
void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
|
||||
bool contig) {
|
||||
SharedHeap::fill_region_with_object(mr);
|
||||
CollectedHeap::fill_with_object(mr);
|
||||
if (contig) {
|
||||
_bt.alloc_block(mr.start(), mr.end());
|
||||
} else {
|
||||
@ -171,7 +171,7 @@ HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
|
||||
"or else _true_end should be equal to _hard_end");
|
||||
assert(_retained, "or else _true_end should be equal to _hard_end");
|
||||
assert(_retained_filler.end() <= _top, "INVARIANT");
|
||||
SharedHeap::fill_region_with_object(_retained_filler);
|
||||
CollectedHeap::fill_with_object(_retained_filler);
|
||||
if (_top < _hard_end) {
|
||||
fill_region_with_block(MemRegion(_top, _hard_end), true);
|
||||
}
|
||||
@ -316,11 +316,9 @@ void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
|
||||
while (_top <= chunk_boundary) {
|
||||
assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,
|
||||
"Consequence of last card handling above.");
|
||||
MemRegion chunk_portion(chunk_boundary, _hard_end);
|
||||
_bt.BlockOffsetArray::alloc_block(chunk_portion.start(),
|
||||
chunk_portion.end());
|
||||
SharedHeap::fill_region_with_object(chunk_portion);
|
||||
_hard_end = chunk_portion.start();
|
||||
_bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end);
|
||||
CollectedHeap::fill_with_object(chunk_boundary, _hard_end);
|
||||
_hard_end = chunk_boundary;
|
||||
chunk_boundary -= ChunkSizeInWords;
|
||||
}
|
||||
_end = _hard_end - AlignmentReserve;
|
||||
|
@ -201,7 +201,7 @@ void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,
|
||||
"Should contain whole object.");
|
||||
to_space_alloc_buffer()->undo_allocation(obj, word_sz);
|
||||
} else {
|
||||
SharedHeap::fill_region_with_object(MemRegion(obj, word_sz));
|
||||
CollectedHeap::fill_with_object(obj, word_sz);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -389,7 +389,7 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
|
||||
// full GC.
|
||||
const size_t alignment = old_gen->virtual_space()->alignment();
|
||||
const size_t eden_used = eden_space->used_in_bytes();
|
||||
const size_t promoted = (size_t)(size_policy->avg_promoted()->padded_average());
|
||||
const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
|
||||
const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
|
||||
const size_t eden_capacity = eden_space->capacity_in_bytes();
|
||||
|
||||
@ -416,16 +416,14 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
|
||||
|
||||
// Fill the unused part of the old gen.
|
||||
MutableSpace* const old_space = old_gen->object_space();
|
||||
MemRegion old_gen_unused(old_space->top(), old_space->end());
|
||||
HeapWord* const unused_start = old_space->top();
|
||||
size_t const unused_words = pointer_delta(old_space->end(), unused_start);
|
||||
|
||||
// If the unused part of the old gen cannot be filled, skip
|
||||
// absorbing eden.
|
||||
if (old_gen_unused.word_size() < SharedHeap::min_fill_size()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!old_gen_unused.is_empty()) {
|
||||
SharedHeap::fill_region_with_object(old_gen_unused);
|
||||
if (unused_words > 0) {
|
||||
if (unused_words < CollectedHeap::min_fill_size()) {
|
||||
return false; // If the old gen cannot be filled, must give up.
|
||||
}
|
||||
CollectedHeap::fill_with_objects(unused_start, unused_words);
|
||||
}
|
||||
|
||||
// Take the live data from eden and set both top and end in the old gen to
|
||||
@ -441,9 +439,8 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
|
||||
|
||||
// Update the object start array for the filler object and the data from eden.
|
||||
ObjectStartArray* const start_array = old_gen->start_array();
|
||||
HeapWord* const start = old_gen_unused.start();
|
||||
for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) {
|
||||
start_array->allocate_block(addr);
|
||||
for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
|
||||
start_array->allocate_block(p);
|
||||
}
|
||||
|
||||
// Could update the promoted average here, but it is not typically updated at
|
||||
|
@ -275,22 +275,9 @@ bool PSMarkSweepDecorator::insert_deadspace(size_t& allowed_deadspace_words,
|
||||
HeapWord* q, size_t deadlength) {
|
||||
if (allowed_deadspace_words >= deadlength) {
|
||||
allowed_deadspace_words -= deadlength;
|
||||
oop(q)->set_mark(markOopDesc::prototype()->set_marked());
|
||||
const size_t aligned_min_int_array_size =
|
||||
align_object_size(typeArrayOopDesc::header_size(T_INT));
|
||||
if (deadlength >= aligned_min_int_array_size) {
|
||||
oop(q)->set_klass(Universe::intArrayKlassObj());
|
||||
assert(((deadlength - aligned_min_int_array_size) * (HeapWordSize/sizeof(jint))) < (size_t)max_jint,
|
||||
"deadspace too big for Arrayoop");
|
||||
typeArrayOop(q)->set_length((int)((deadlength - aligned_min_int_array_size)
|
||||
* (HeapWordSize/sizeof(jint))));
|
||||
} else {
|
||||
assert((int) deadlength == instanceOopDesc::header_size(),
|
||||
"size for smallest fake dead object doesn't match");
|
||||
oop(q)->set_klass(SystemDictionary::object_klass());
|
||||
}
|
||||
assert((int) deadlength == oop(q)->size(),
|
||||
"make sure size for fake dead object match");
|
||||
CollectedHeap::fill_with_object(q, deadlength);
|
||||
oop(q)->set_mark(oop(q)->mark()->set_marked());
|
||||
assert((int) deadlength == oop(q)->size(), "bad filler object size");
|
||||
// Recall that we required "q == compaction_top".
|
||||
return true;
|
||||
} else {
|
||||
|
@ -1308,8 +1308,7 @@ void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
|
||||
}
|
||||
#endif // #ifdef _LP64
|
||||
|
||||
MemRegion region(obj_beg, obj_len);
|
||||
SharedHeap::fill_region_with_object(region);
|
||||
gc_heap()->fill_with_object(obj_beg, obj_len);
|
||||
_mark_bitmap.mark_obj(obj_beg, obj_len);
|
||||
_summary_data.add_obj(obj_beg, obj_len);
|
||||
assert(start_array(id) != NULL, "sanity");
|
||||
@ -1807,9 +1806,14 @@ bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_po
|
||||
|
||||
// Fill the unused part of the old gen.
|
||||
MutableSpace* const old_space = old_gen->object_space();
|
||||
MemRegion old_gen_unused(old_space->top(), old_space->end());
|
||||
if (!old_gen_unused.is_empty()) {
|
||||
SharedHeap::fill_region_with_object(old_gen_unused);
|
||||
HeapWord* const unused_start = old_space->top();
|
||||
size_t const unused_words = pointer_delta(old_space->end(), unused_start);
|
||||
|
||||
if (unused_words > 0) {
|
||||
if (unused_words < CollectedHeap::min_fill_size()) {
|
||||
return false; // If the old gen cannot be filled, must give up.
|
||||
}
|
||||
CollectedHeap::fill_with_objects(unused_start, unused_words);
|
||||
}
|
||||
|
||||
// Take the live data from eden and set both top and end in the old gen to
|
||||
@ -1825,9 +1829,8 @@ bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_po
|
||||
|
||||
// Update the object start array for the filler object and the data from eden.
|
||||
ObjectStartArray* const start_array = old_gen->start_array();
|
||||
HeapWord* const start = old_gen_unused.start();
|
||||
for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) {
|
||||
start_array->allocate_block(addr);
|
||||
for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
|
||||
start_array->allocate_block(p);
|
||||
}
|
||||
|
||||
// Could update the promoted average here, but it is not typically updated at
|
||||
|
@ -1324,31 +1324,28 @@ inline void UpdateOnlyClosure::do_addr(HeapWord* addr)
|
||||
oop(addr)->update_contents(compaction_manager());
|
||||
}
|
||||
|
||||
class FillClosure: public ParMarkBitMapClosure {
|
||||
public:
|
||||
class FillClosure: public ParMarkBitMapClosure
|
||||
{
|
||||
public:
|
||||
FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
|
||||
ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
|
||||
_space_id(space_id),
|
||||
_start_array(PSParallelCompact::start_array(space_id)) {
|
||||
assert(_space_id == PSParallelCompact::perm_space_id ||
|
||||
_space_id == PSParallelCompact::old_space_id,
|
||||
_start_array(PSParallelCompact::start_array(space_id))
|
||||
{
|
||||
assert(space_id == PSParallelCompact::perm_space_id ||
|
||||
space_id == PSParallelCompact::old_space_id,
|
||||
"cannot use FillClosure in the young gen");
|
||||
assert(bitmap() != NULL, "need a bitmap");
|
||||
assert(_start_array != NULL, "need a start array");
|
||||
}
|
||||
|
||||
void fill_region(HeapWord* addr, size_t size) {
|
||||
MemRegion region(addr, size);
|
||||
SharedHeap::fill_region_with_object(region);
|
||||
_start_array->allocate_block(addr);
|
||||
}
|
||||
|
||||
virtual IterationStatus do_addr(HeapWord* addr, size_t size) {
|
||||
fill_region(addr, size);
|
||||
CollectedHeap::fill_with_objects(addr, size);
|
||||
HeapWord* const end = addr + size;
|
||||
do {
|
||||
_start_array->allocate_block(addr);
|
||||
addr += oop(addr)->size();
|
||||
} while (addr < end);
|
||||
return ParMarkBitMap::incomplete;
|
||||
}
|
||||
|
||||
private:
|
||||
const PSParallelCompact::SpaceId _space_id;
|
||||
ObjectStartArray* const _start_array;
|
||||
ObjectStartArray* const _start_array;
|
||||
};
|
||||
|
@ -499,26 +499,15 @@ oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
|
||||
// We lost, someone else "owns" this object
|
||||
guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
|
||||
|
||||
// Unallocate the space used. NOTE! We may have directly allocated
|
||||
// the object. If so, we cannot deallocate it, so we have to test!
|
||||
// Try to deallocate the space. If it was directly allocated we cannot
|
||||
// deallocate it, so we have to test. If the deallocation fails,
|
||||
// overwrite with a filler object.
|
||||
if (new_obj_is_tenured) {
|
||||
if (!_old_lab.unallocate_object(new_obj)) {
|
||||
// The promotion lab failed to unallocate the object.
|
||||
// We need to overwrite the object with a filler that
|
||||
// contains no interior pointers.
|
||||
MemRegion mr((HeapWord*)new_obj, new_obj_size);
|
||||
// Clean this up and move to oopFactory (see bug 4718422)
|
||||
SharedHeap::fill_region_with_object(mr);
|
||||
}
|
||||
} else {
|
||||
if (!_young_lab.unallocate_object(new_obj)) {
|
||||
// The promotion lab failed to unallocate the object.
|
||||
// We need to overwrite the object with a filler that
|
||||
// contains no interior pointers.
|
||||
MemRegion mr((HeapWord*)new_obj, new_obj_size);
|
||||
// Clean this up and move to oopFactory (see bug 4718422)
|
||||
SharedHeap::fill_region_with_object(mr);
|
||||
CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
|
||||
}
|
||||
} else if (!_young_lab.unallocate_object(new_obj)) {
|
||||
CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
|
||||
}
|
||||
|
||||
// don't update this before the unallocation!
|
||||
|
@ -76,8 +76,8 @@ void MutableNUMASpace::ensure_parsability() {
|
||||
MutableSpace *s = ls->space();
|
||||
if (s->top() < top()) { // For all spaces preceeding the one containing top()
|
||||
if (s->free_in_words() > 0) {
|
||||
SharedHeap::fill_region_with_object(MemRegion(s->top(), s->end()));
|
||||
size_t area_touched_words = pointer_delta(s->end(), s->top());
|
||||
CollectedHeap::fill_with_object(s->top(), area_touched_words);
|
||||
#ifndef ASSERT
|
||||
if (!ZapUnusedHeapArea) {
|
||||
area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
|
||||
@ -686,11 +686,11 @@ void MutableNUMASpace::set_top(HeapWord* value) {
|
||||
// a minimal object; assuming that's not the last chunk in which case we don't care.
|
||||
if (i < lgrp_spaces()->length() - 1) {
|
||||
size_t remainder = pointer_delta(s->end(), value);
|
||||
const size_t minimal_object_size = oopDesc::header_size();
|
||||
if (remainder < minimal_object_size && remainder > 0) {
|
||||
// Add a filler object of a minimal size, it will cross the chunk boundary.
|
||||
SharedHeap::fill_region_with_object(MemRegion(value, minimal_object_size));
|
||||
value += minimal_object_size;
|
||||
const size_t min_fill_size = CollectedHeap::min_fill_size();
|
||||
if (remainder < min_fill_size && remainder > 0) {
|
||||
// Add a minimum size filler object; it will cross the chunk boundary.
|
||||
CollectedHeap::fill_with_object(value, min_fill_size);
|
||||
value += min_fill_size;
|
||||
assert(!s->contains(value), "Should be in the next chunk");
|
||||
// Restart the loop from the same chunk, since the value has moved
|
||||
// to the next one.
|
||||
|
@ -30,12 +30,21 @@
|
||||
int CollectedHeap::_fire_out_of_memory_count = 0;
|
||||
#endif
|
||||
|
||||
size_t CollectedHeap::_filler_array_max_size = 0;
|
||||
|
||||
// Memory state functions.
|
||||
|
||||
CollectedHeap::CollectedHeap() :
|
||||
_reserved(), _barrier_set(NULL), _is_gc_active(false),
|
||||
_total_collections(0), _total_full_collections(0),
|
||||
_gc_cause(GCCause::_no_gc), _gc_lastcause(GCCause::_no_gc) {
|
||||
CollectedHeap::CollectedHeap()
|
||||
{
|
||||
const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
|
||||
const size_t elements_per_word = HeapWordSize / sizeof(jint);
|
||||
_filler_array_max_size = align_object_size(filler_array_hdr_size() +
|
||||
max_len * elements_per_word);
|
||||
|
||||
_barrier_set = NULL;
|
||||
_is_gc_active = false;
|
||||
_total_collections = _total_full_collections = 0;
|
||||
_gc_cause = _gc_lastcause = GCCause::_no_gc;
|
||||
NOT_PRODUCT(_promotion_failure_alot_count = 0;)
|
||||
NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
|
||||
|
||||
@ -128,6 +137,95 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
|
||||
return obj;
|
||||
}
|
||||
|
||||
size_t CollectedHeap::filler_array_hdr_size() {
|
||||
return size_t(arrayOopDesc::header_size(T_INT));
|
||||
}
|
||||
|
||||
size_t CollectedHeap::filler_array_min_size() {
|
||||
return align_object_size(filler_array_hdr_size());
|
||||
}
|
||||
|
||||
size_t CollectedHeap::filler_array_max_size() {
|
||||
return _filler_array_max_size;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
|
||||
{
|
||||
assert(words >= min_fill_size(), "too small to fill");
|
||||
assert(words % MinObjAlignment == 0, "unaligned size");
|
||||
assert(Universe::heap()->is_in_reserved(start), "not in heap");
|
||||
assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
|
||||
}
|
||||
|
||||
void CollectedHeap::zap_filler_array(HeapWord* start, size_t words)
|
||||
{
|
||||
if (ZapFillerObjects) {
|
||||
Copy::fill_to_words(start + filler_array_hdr_size(),
|
||||
words - filler_array_hdr_size(), 0XDEAFBABE);
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void
|
||||
CollectedHeap::fill_with_array(HeapWord* start, size_t words)
|
||||
{
|
||||
assert(words >= filler_array_min_size(), "too small for an array");
|
||||
assert(words <= filler_array_max_size(), "too big for a single object");
|
||||
|
||||
const size_t payload_size = words - filler_array_hdr_size();
|
||||
const size_t len = payload_size * HeapWordSize / sizeof(jint);
|
||||
|
||||
// Set the length first for concurrent GC.
|
||||
((arrayOop)start)->set_length((int)len);
|
||||
post_allocation_setup_common(Universe::fillerArrayKlassObj(), start,
|
||||
words);
|
||||
DEBUG_ONLY(zap_filler_array(start, words);)
|
||||
}
|
||||
|
||||
void
|
||||
CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words)
|
||||
{
|
||||
assert(words <= filler_array_max_size(), "too big for a single object");
|
||||
|
||||
if (words >= filler_array_min_size()) {
|
||||
fill_with_array(start, words);
|
||||
} else if (words > 0) {
|
||||
assert(words == min_fill_size(), "unaligned size");
|
||||
post_allocation_setup_common(SystemDictionary::object_klass(), start,
|
||||
words);
|
||||
}
|
||||
}
|
||||
|
||||
void CollectedHeap::fill_with_object(HeapWord* start, size_t words)
|
||||
{
|
||||
DEBUG_ONLY(fill_args_check(start, words);)
|
||||
HandleMark hm; // Free handles before leaving.
|
||||
fill_with_object_impl(start, words);
|
||||
}
|
||||
|
||||
void CollectedHeap::fill_with_objects(HeapWord* start, size_t words)
|
||||
{
|
||||
DEBUG_ONLY(fill_args_check(start, words);)
|
||||
HandleMark hm; // Free handles before leaving.
|
||||
|
||||
#ifdef LP64
|
||||
// A single array can fill ~8G, so multiple objects are needed only in 64-bit.
|
||||
// First fill with arrays, ensuring that any remaining space is big enough to
|
||||
// fill. The remainder is filled with a single object.
|
||||
const size_t min = min_fill_size();
|
||||
const size_t max = filler_array_max_size();
|
||||
while (words > max) {
|
||||
const size_t cur = words - max >= min ? max : max - min;
|
||||
fill_with_array(start, cur);
|
||||
start += cur;
|
||||
words -= cur;
|
||||
}
|
||||
#endif
|
||||
|
||||
fill_with_object_impl(start, words);
|
||||
}
|
||||
|
||||
oop CollectedHeap::new_store_barrier(oop new_obj) {
|
||||
// %%% This needs refactoring. (It was imported from the server compiler.)
|
||||
guarantee(can_elide_tlab_store_barriers(), "store barrier elision not supported");
|
||||
|
@ -47,6 +47,9 @@ class CollectedHeap : public CHeapObj {
|
||||
static int _fire_out_of_memory_count;
|
||||
#endif
|
||||
|
||||
// Used for filler objects (static, but initialized in ctor).
|
||||
static size_t _filler_array_max_size;
|
||||
|
||||
protected:
|
||||
MemRegion _reserved;
|
||||
BarrierSet* _barrier_set;
|
||||
@ -119,6 +122,21 @@ class CollectedHeap : public CHeapObj {
|
||||
// Clears an allocated object.
|
||||
inline static void init_obj(HeapWord* obj, size_t size);
|
||||
|
||||
// Filler object utilities.
|
||||
static inline size_t filler_array_hdr_size();
|
||||
static inline size_t filler_array_min_size();
|
||||
static inline size_t filler_array_max_size();
|
||||
|
||||
DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
|
||||
DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words);)
|
||||
|
||||
// Fill with a single array; caller must ensure filler_array_min_size() <=
|
||||
// words <= filler_array_max_size().
|
||||
static inline void fill_with_array(HeapWord* start, size_t words);
|
||||
|
||||
// Fill with a single object (either an int array or a java.lang.Object).
|
||||
static inline void fill_with_object_impl(HeapWord* start, size_t words);
|
||||
|
||||
// Verification functions
|
||||
virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
|
||||
PRODUCT_RETURN;
|
||||
@ -294,6 +312,27 @@ class CollectedHeap : public CHeapObj {
|
||||
// The boundary between a "large" and "small" array of primitives, in words.
|
||||
virtual size_t large_typearray_limit() = 0;
|
||||
|
||||
// Utilities for turning raw memory into filler objects.
|
||||
//
|
||||
// min_fill_size() is the smallest region that can be filled.
|
||||
// fill_with_objects() can fill arbitrary-sized regions of the heap using
|
||||
// multiple objects. fill_with_object() is for regions known to be smaller
|
||||
// than the largest array of integers; it uses a single object to fill the
|
||||
// region and has slightly less overhead.
|
||||
static size_t min_fill_size() {
|
||||
return size_t(align_object_size(oopDesc::header_size()));
|
||||
}
|
||||
|
||||
static void fill_with_objects(HeapWord* start, size_t words);
|
||||
|
||||
static void fill_with_object(HeapWord* start, size_t words);
|
||||
static void fill_with_object(MemRegion region) {
|
||||
fill_with_object(region.start(), region.word_size());
|
||||
}
|
||||
static void fill_with_object(HeapWord* start, HeapWord* end) {
|
||||
fill_with_object(start, pointer_delta(end, start));
|
||||
}
|
||||
|
||||
// Some heaps may offer a contiguous region for shared non-blocking
|
||||
// allocation, via inlined code (by exporting the address of the top and
|
||||
// end fields defining the extent of the contiguous allocation region.)
|
||||
|
@ -34,7 +34,6 @@ void CollectedHeap::post_allocation_setup_common(KlassHandle klass,
|
||||
void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass,
|
||||
HeapWord* objPtr,
|
||||
size_t size) {
|
||||
|
||||
oop obj = (oop)objPtr;
|
||||
|
||||
assert(obj != NULL, "NULL object pointer");
|
||||
@ -44,9 +43,6 @@ void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass,
|
||||
// May be bootstrapping
|
||||
obj->set_mark(markOopDesc::prototype());
|
||||
}
|
||||
|
||||
// support low memory notifications (no-op if not enabled)
|
||||
LowMemoryDetector::detect_low_memory_for_collected_pools();
|
||||
}
|
||||
|
||||
void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
|
||||
@ -65,6 +61,9 @@ void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
|
||||
|
||||
// Support for jvmti and dtrace
|
||||
inline void post_allocation_notify(KlassHandle klass, oop obj) {
|
||||
// support low memory notifications (no-op if not enabled)
|
||||
LowMemoryDetector::detect_low_memory_for_collected_pools();
|
||||
|
||||
// support for JVMTI VMObjectAlloc event (no-op if not enabled)
|
||||
JvmtiExport::vm_object_alloc_event_collector(obj);
|
||||
|
||||
|
@ -28,21 +28,22 @@ collectedHeap.cpp collectedHeap.hpp
|
||||
collectedHeap.cpp collectedHeap.inline.hpp
|
||||
collectedHeap.cpp init.hpp
|
||||
collectedHeap.cpp oop.inline.hpp
|
||||
collectedHeap.cpp systemDictionary.hpp
|
||||
collectedHeap.cpp thread_<os_family>.inline.hpp
|
||||
|
||||
collectedHeap.hpp allocation.hpp
|
||||
collectedHeap.hpp barrierSet.hpp
|
||||
collectedHeap.hpp gcCause.hpp
|
||||
collectedHeap.hpp handles.hpp
|
||||
collectedHeap.hpp perfData.hpp
|
||||
collectedHeap.hpp perfData.hpp
|
||||
collectedHeap.hpp safepoint.hpp
|
||||
|
||||
collectedHeap.inline.hpp arrayOop.hpp
|
||||
collectedHeap.inline.hpp collectedHeap.hpp
|
||||
collectedHeap.inline.hpp copy.hpp
|
||||
collectedHeap.inline.hpp jvmtiExport.hpp
|
||||
collectedHeap.inline.hpp lowMemoryDetector.hpp
|
||||
collectedHeap.inline.hpp sharedRuntime.hpp
|
||||
collectedHeap.inline.hpp lowMemoryDetector.hpp
|
||||
collectedHeap.inline.hpp sharedRuntime.hpp
|
||||
collectedHeap.inline.hpp thread.hpp
|
||||
collectedHeap.inline.hpp threadLocalAllocBuffer.inline.hpp
|
||||
collectedHeap.inline.hpp universe.hpp
|
||||
|
@ -248,46 +248,6 @@ void SharedHeap::ref_processing_init() {
|
||||
perm_gen()->ref_processor_init();
|
||||
}
|
||||
|
||||
void SharedHeap::fill_region_with_object(MemRegion mr) {
|
||||
// Disable the posting of JVMTI VMObjectAlloc events as we
|
||||
// don't want the filling of tlabs with filler arrays to be
|
||||
// reported to the profiler.
|
||||
NoJvmtiVMObjectAllocMark njm;
|
||||
|
||||
// Disable low memory detector because there is no real allocation.
|
||||
LowMemoryDetectorDisabler lmd_dis;
|
||||
|
||||
// It turns out that post_allocation_setup_array takes a handle, so the
|
||||
// call below contains an implicit conversion. Best to free that handle
|
||||
// as soon as possible.
|
||||
HandleMark hm;
|
||||
|
||||
size_t word_size = mr.word_size();
|
||||
size_t aligned_array_header_size =
|
||||
align_object_size(typeArrayOopDesc::header_size(T_INT));
|
||||
|
||||
if (word_size >= aligned_array_header_size) {
|
||||
const size_t array_length =
|
||||
pointer_delta(mr.end(), mr.start()) -
|
||||
typeArrayOopDesc::header_size(T_INT);
|
||||
const size_t array_length_words =
|
||||
array_length * (HeapWordSize/sizeof(jint));
|
||||
post_allocation_setup_array(Universe::intArrayKlassObj(),
|
||||
mr.start(),
|
||||
mr.word_size(),
|
||||
(int)array_length_words);
|
||||
#ifdef ASSERT
|
||||
HeapWord* elt_words = (mr.start() + typeArrayOopDesc::header_size(T_INT));
|
||||
Copy::fill_to_words(elt_words, array_length, 0xDEAFBABE);
|
||||
#endif
|
||||
} else {
|
||||
assert(word_size == (size_t)oopDesc::header_size(), "Unaligned?");
|
||||
post_allocation_setup_obj(SystemDictionary::object_klass(),
|
||||
mr.start(),
|
||||
mr.word_size());
|
||||
}
|
||||
}
|
||||
|
||||
// Some utilities.
|
||||
void SharedHeap::print_size_transition(outputStream* out,
|
||||
size_t bytes_before,
|
||||
|
@ -108,14 +108,6 @@ public:
|
||||
|
||||
void set_perm(PermGen* perm_gen) { _perm_gen = perm_gen; }
|
||||
|
||||
// A helper function that fills a region of the heap with
|
||||
// with a single object.
|
||||
static void fill_region_with_object(MemRegion mr);
|
||||
|
||||
// Minimum garbage fill object size
|
||||
static size_t min_fill_size() { return (size_t)align_object_size(oopDesc::header_size()); }
|
||||
static size_t min_fill_size_in_bytes() { return min_fill_size() * HeapWordSize; }
|
||||
|
||||
// This function returns the "GenRemSet" object that allows us to scan
|
||||
// generations; at least the perm gen, possibly more in a fully
|
||||
// generational heap.
|
||||
|
@ -409,19 +409,9 @@ bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
|
||||
HeapWord* q, size_t deadlength) {
|
||||
if (allowed_deadspace_words >= deadlength) {
|
||||
allowed_deadspace_words -= deadlength;
|
||||
oop(q)->set_mark(markOopDesc::prototype()->set_marked());
|
||||
const size_t min_int_array_size = typeArrayOopDesc::header_size(T_INT);
|
||||
if (deadlength >= min_int_array_size) {
|
||||
oop(q)->set_klass(Universe::intArrayKlassObj());
|
||||
typeArrayOop(q)->set_length((int)((deadlength - min_int_array_size)
|
||||
* (HeapWordSize/sizeof(jint))));
|
||||
} else {
|
||||
assert((int) deadlength == instanceOopDesc::header_size(),
|
||||
"size for smallest fake dead object doesn't match");
|
||||
oop(q)->set_klass(SystemDictionary::object_klass());
|
||||
}
|
||||
assert((int) deadlength == oop(q)->size(),
|
||||
"make sure size for fake dead object match");
|
||||
CollectedHeap::fill_with_object(q, deadlength);
|
||||
oop(q)->set_mark(oop(q)->mark()->set_marked());
|
||||
assert((int) deadlength == oop(q)->size(), "bad filler object size");
|
||||
// Recall that we required "q == compaction_top".
|
||||
return true;
|
||||
} else {
|
||||
|
@ -387,7 +387,7 @@ void TenuredGeneration::par_promote_alloc_undo(int thread_num,
|
||||
"should contain whole object");
|
||||
buf->undo_allocation(obj, word_sz);
|
||||
} else {
|
||||
SharedHeap::fill_region_with_object(MemRegion(obj, word_sz));
|
||||
CollectedHeap::fill_with_object(obj, word_sz);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -100,8 +100,7 @@ void ThreadLocalAllocBuffer::accumulate_statistics() {
|
||||
void ThreadLocalAllocBuffer::make_parsable(bool retire) {
|
||||
if (end() != NULL) {
|
||||
invariants();
|
||||
MemRegion mr(top(), hard_end());
|
||||
SharedHeap::fill_region_with_object(mr);
|
||||
CollectedHeap::fill_with_object(top(), hard_end());
|
||||
|
||||
if (retire || ZeroTLAB) { // "Reset" the TLAB
|
||||
set_start(NULL);
|
||||
|
@ -49,16 +49,17 @@ klassOop Universe::_constantPoolKlassObj = NULL;
|
||||
klassOop Universe::_constantPoolCacheKlassObj = NULL;
|
||||
klassOop Universe::_compiledICHolderKlassObj = NULL;
|
||||
klassOop Universe::_systemObjArrayKlassObj = NULL;
|
||||
oop Universe::_int_mirror = NULL;
|
||||
oop Universe::_float_mirror = NULL;
|
||||
oop Universe::_double_mirror = NULL;
|
||||
oop Universe::_byte_mirror = NULL;
|
||||
oop Universe::_bool_mirror = NULL;
|
||||
oop Universe::_char_mirror = NULL;
|
||||
oop Universe::_long_mirror = NULL;
|
||||
oop Universe::_short_mirror = NULL;
|
||||
oop Universe::_void_mirror = NULL;
|
||||
oop Universe::_mirrors[T_VOID+1] = { NULL /*, NULL...*/ };
|
||||
klassOop Universe::_fillerArrayKlassObj = NULL;
|
||||
oop Universe::_int_mirror = NULL;
|
||||
oop Universe::_float_mirror = NULL;
|
||||
oop Universe::_double_mirror = NULL;
|
||||
oop Universe::_byte_mirror = NULL;
|
||||
oop Universe::_bool_mirror = NULL;
|
||||
oop Universe::_char_mirror = NULL;
|
||||
oop Universe::_long_mirror = NULL;
|
||||
oop Universe::_short_mirror = NULL;
|
||||
oop Universe::_void_mirror = NULL;
|
||||
oop Universe::_mirrors[T_VOID+1] = { NULL /*, NULL...*/ };
|
||||
oop Universe::_main_thread_group = NULL;
|
||||
oop Universe::_system_thread_group = NULL;
|
||||
typeArrayOop Universe::_the_empty_byte_array = NULL;
|
||||
@ -126,6 +127,7 @@ void Universe::system_classes_do(void f(klassOop)) {
|
||||
f(instanceKlassKlassObj());
|
||||
f(constantPoolKlassObj());
|
||||
f(systemObjArrayKlassObj());
|
||||
f(fillerArrayKlassObj());
|
||||
}
|
||||
|
||||
void Universe::oops_do(OopClosure* f, bool do_all) {
|
||||
@ -180,6 +182,7 @@ void Universe::oops_do(OopClosure* f, bool do_all) {
|
||||
f->do_oop((oop*)&_constantPoolCacheKlassObj);
|
||||
f->do_oop((oop*)&_compiledICHolderKlassObj);
|
||||
f->do_oop((oop*)&_systemObjArrayKlassObj);
|
||||
f->do_oop((oop*)&_fillerArrayKlassObj);
|
||||
f->do_oop((oop*)&_the_empty_byte_array);
|
||||
f->do_oop((oop*)&_the_empty_short_array);
|
||||
f->do_oop((oop*)&_the_empty_int_array);
|
||||
@ -257,16 +260,17 @@ void Universe::genesis(TRAPS) {
|
||||
_typeArrayKlassObjs[T_INT] = _intArrayKlassObj;
|
||||
_typeArrayKlassObjs[T_LONG] = _longArrayKlassObj;
|
||||
|
||||
_methodKlassObj = methodKlass::create_klass(CHECK);
|
||||
_constMethodKlassObj = constMethodKlass::create_klass(CHECK);
|
||||
_methodDataKlassObj = methodDataKlass::create_klass(CHECK);
|
||||
_methodKlassObj = methodKlass::create_klass(CHECK);
|
||||
_constMethodKlassObj = constMethodKlass::create_klass(CHECK);
|
||||
_methodDataKlassObj = methodDataKlass::create_klass(CHECK);
|
||||
_constantPoolKlassObj = constantPoolKlass::create_klass(CHECK);
|
||||
_constantPoolCacheKlassObj = constantPoolCacheKlass::create_klass(CHECK);
|
||||
|
||||
_compiledICHolderKlassObj = compiledICHolderKlass::create_klass(CHECK);
|
||||
_systemObjArrayKlassObj = objArrayKlassKlass::cast(objArrayKlassKlassObj())->allocate_system_objArray_klass(CHECK);
|
||||
_fillerArrayKlassObj = typeArrayKlass::create_klass(T_INT, sizeof(jint), "<filler>", CHECK);
|
||||
|
||||
_the_empty_byte_array = oopFactory::new_permanent_byteArray(0, CHECK);
|
||||
_the_empty_byte_array = oopFactory::new_permanent_byteArray(0, CHECK);
|
||||
_the_empty_short_array = oopFactory::new_permanent_shortArray(0, CHECK);
|
||||
_the_empty_int_array = oopFactory::new_permanent_intArray(0, CHECK);
|
||||
_the_empty_system_obj_array = oopFactory::new_system_objArray(0, CHECK);
|
||||
@ -274,7 +278,6 @@ void Universe::genesis(TRAPS) {
|
||||
_the_array_interfaces_array = oopFactory::new_system_objArray(2, CHECK);
|
||||
_vm_exception = oopFactory::new_symbol("vm exception holder", CHECK);
|
||||
} else {
|
||||
|
||||
FileMapInfo *mapinfo = FileMapInfo::current_info();
|
||||
char* buffer = mapinfo->region_base(CompactingPermGenGen::md);
|
||||
void** vtbl_list = (void**)buffer;
|
||||
|
@ -92,6 +92,7 @@ class LatestMethodOopCache : public CommonMethodOopCache {
|
||||
|
||||
|
||||
class Universe: AllStatic {
|
||||
// Ugh. Universe is much too friendly.
|
||||
friend class MarkSweep;
|
||||
friend class oopDesc;
|
||||
friend class ClassLoader;
|
||||
@ -132,6 +133,7 @@ class Universe: AllStatic {
|
||||
static klassOop _constantPoolCacheKlassObj;
|
||||
static klassOop _compiledICHolderKlassObj;
|
||||
static klassOop _systemObjArrayKlassObj;
|
||||
static klassOop _fillerArrayKlassObj;
|
||||
|
||||
// Known objects in the VM
|
||||
|
||||
@ -264,6 +266,7 @@ class Universe: AllStatic {
|
||||
static klassOop constantPoolCacheKlassObj() { return _constantPoolCacheKlassObj; }
|
||||
static klassOop compiledICHolderKlassObj() { return _compiledICHolderKlassObj; }
|
||||
static klassOop systemObjArrayKlassObj() { return _systemObjArrayKlassObj; }
|
||||
static klassOop fillerArrayKlassObj() { return _fillerArrayKlassObj; }
|
||||
|
||||
// Known objects in tbe VM
|
||||
static oop int_mirror() { return check_mirror(_int_mirror);
|
||||
|
@ -96,19 +96,20 @@ class arrayOopDesc : public oopDesc {
|
||||
: typesize_in_bytes/HeapWordSize);
|
||||
}
|
||||
|
||||
// This method returns the maximum length that can passed into
|
||||
// typeArrayOop::object_size(scale, length, header_size) without causing an
|
||||
// overflow. We substract an extra 2*wordSize to guard against double word
|
||||
// alignments. It gets the scale from the type2aelembytes array.
|
||||
// Return the maximum length of an array of BasicType. The length can passed
|
||||
// to typeArrayOop::object_size(scale, length, header_size) without causing an
|
||||
// overflow.
|
||||
static int32_t max_array_length(BasicType type) {
|
||||
assert(type >= 0 && type < T_CONFLICT, "wrong type");
|
||||
assert(type2aelembytes(type) != 0, "wrong type");
|
||||
// We use max_jint, since object_size is internally represented by an 'int'
|
||||
// This gives us an upper bound of max_jint words for the size of the oop.
|
||||
int32_t max_words = (max_jint - header_size(type) - 2);
|
||||
int elembytes = type2aelembytes(type);
|
||||
jlong len = ((jlong)max_words * HeapWordSize) / elembytes;
|
||||
return (len > max_jint) ? max_jint : (int32_t)len;
|
||||
}
|
||||
const int bytes_per_element = type2aelembytes(type);
|
||||
if (bytes_per_element < HeapWordSize) {
|
||||
return max_jint;
|
||||
}
|
||||
|
||||
const int32_t max_words = align_size_down(max_jint, MinObjAlignment);
|
||||
const int32_t max_element_words = max_words - header_size(type);
|
||||
const int32_t words_per_element = bytes_per_element >> LogHeapWordSize;
|
||||
return max_element_words / words_per_element;
|
||||
}
|
||||
};
|
||||
|
@ -36,13 +36,14 @@ bool typeArrayKlass::compute_is_subtype_of(klassOop k) {
|
||||
return element_type() == tak->element_type();
|
||||
}
|
||||
|
||||
klassOop typeArrayKlass::create_klass(BasicType type, int scale, TRAPS) {
|
||||
klassOop typeArrayKlass::create_klass(BasicType type, int scale,
|
||||
const char* name_str, TRAPS) {
|
||||
typeArrayKlass o;
|
||||
|
||||
symbolHandle sym(symbolOop(NULL));
|
||||
// bootstrapping: don't create sym if symbolKlass not created yet
|
||||
if (Universe::symbolKlassObj() != NULL) {
|
||||
sym = oopFactory::new_symbol_handle(external_name(type), CHECK_NULL);
|
||||
if (Universe::symbolKlassObj() != NULL && name_str != NULL) {
|
||||
sym = oopFactory::new_symbol_handle(name_str, CHECK_NULL);
|
||||
}
|
||||
KlassHandle klassklass (THREAD, Universe::typeArrayKlassKlassObj());
|
||||
|
||||
|
@ -39,7 +39,11 @@ class typeArrayKlass : public arrayKlass {
|
||||
|
||||
// klass allocation
|
||||
DEFINE_ALLOCATE_PERMANENT(typeArrayKlass);
|
||||
static klassOop create_klass(BasicType type, int scale, TRAPS);
|
||||
static klassOop create_klass(BasicType type, int scale, const char* name_str,
|
||||
TRAPS);
|
||||
static inline klassOop create_klass(BasicType type, int scale, TRAPS) {
|
||||
return create_klass(type, scale, external_name(type), CHECK_NULL);
|
||||
}
|
||||
|
||||
int oop_size(oop obj) const;
|
||||
int klass_oop_size() const { return object_size(); }
|
||||
|
@ -625,6 +625,9 @@ class CommandLineFlags {
|
||||
develop(bool, CheckZapUnusedHeapArea, false, \
|
||||
"Check zapping of unused heap space") \
|
||||
\
|
||||
develop(bool, ZapFillerObjects, trueInDebug, \
|
||||
"Zap filler objects with 0xDEAFBABE") \
|
||||
\
|
||||
develop(bool, PrintVMMessages, true, \
|
||||
"Print vm messages on console") \
|
||||
\
|
||||
|
Loading…
x
Reference in New Issue
Block a user