This commit is contained in:
John Coomes 2011-07-06 08:43:01 -07:00
commit ce3ae66eeb
69 changed files with 2897 additions and 1624 deletions

View File

@ -1258,7 +1258,6 @@ class BacktraceBuilder: public StackObj {
objArrayOop _methods;
typeArrayOop _bcis;
int _index;
bool _dirty;
No_Safepoint_Verifier _nsv;
public:
@ -1272,37 +1271,13 @@ class BacktraceBuilder: public StackObj {
};
// constructor for new backtrace
BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL), _dirty(false) {
BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL) {
expand(CHECK);
_backtrace = _head;
_index = 0;
}
void flush() {
// The following appears to have been an optimization to save from
// doing a barrier for each individual store into the _methods array,
// but rather to do it for the entire array after the series of writes.
// That optimization seems to have been lost when compressed oops was
// implemented. However, the extra card-marks below was left in place,
// but is now redundant because the individual stores into the
// _methods array already execute the barrier code. CR 6918185 has
// been filed so the original code may be restored by deferring the
// barriers until after the entire sequence of stores, thus re-enabling
// the intent of the original optimization. In the meantime the redundant
// card mark below is now disabled.
if (_dirty && _methods != NULL) {
#if 0
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
bs->write_ref_array((HeapWord*)_methods->base(), _methods->length());
#endif
_dirty = false;
}
}
void expand(TRAPS) {
flush();
objArrayHandle old_head(THREAD, _head);
Pause_No_Safepoint_Verifier pnsv(&_nsv);
@ -1328,7 +1303,6 @@ class BacktraceBuilder: public StackObj {
}
oop backtrace() {
flush();
return _backtrace();
}
@ -1342,7 +1316,6 @@ class BacktraceBuilder: public StackObj {
_methods->obj_at_put(_index, method);
_bcis->ushort_at_put(_index, bci);
_index++;
_dirty = true;
}
methodOop current_method() {

View File

@ -1833,8 +1833,6 @@ CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
}
)
_indexedFreeList[size].removeChunk(fc);
debug_only(fc->clearNext());
debug_only(fc->clearPrev());
NOT_PRODUCT(
if (FLSVerifyIndexTable) {
verifyIndexedFreeList(size);

View File

@ -114,17 +114,11 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
linkNext(ptr);
if (ptr != NULL) ptr->linkPrev(this);
}
void linkAfterNonNull(FreeChunk* ptr) {
assert(ptr != NULL, "precondition violation");
linkNext(ptr);
ptr->linkPrev(this);
}
void linkNext(FreeChunk* ptr) { _next = ptr; }
void linkPrev(FreeChunk* ptr) {
LP64_ONLY(if (UseCompressedOops) _prev = ptr; else)
_prev = (FreeChunk*)((intptr_t)ptr | 0x1);
}
void clearPrev() { _prev = NULL; }
void clearNext() { _next = NULL; }
void markNotFree() {
// Set _prev (klass) to null before (if) clearing the mark word below

View File

@ -300,8 +300,21 @@ void FreeList::verify_stats() const {
// dictionary for example, this might be the first block and
// in that case there would be no place that we could record
// the stats (which are kept in the block itself).
assert(_allocation_stats.prevSweep() + _allocation_stats.splitBirths() + 1 // Total Stock + 1
>= _allocation_stats.splitDeaths() + (ssize_t)count(), "Conservation Principle");
assert((_allocation_stats.prevSweep() + _allocation_stats.splitBirths()
+ _allocation_stats.coalBirths() + 1) // Total Production Stock + 1
>= (_allocation_stats.splitDeaths() + _allocation_stats.coalDeaths()
+ (ssize_t)count()), // Total Current Stock + depletion
err_msg("FreeList " PTR_FORMAT " of size " SIZE_FORMAT
" violates Conservation Principle: "
"prevSweep(" SIZE_FORMAT ")"
" + splitBirths(" SIZE_FORMAT ")"
" + coalBirths(" SIZE_FORMAT ") + 1 >= "
" splitDeaths(" SIZE_FORMAT ")"
" coalDeaths(" SIZE_FORMAT ")"
" + count(" SSIZE_FORMAT ")",
this, _size, _allocation_stats.prevSweep(), _allocation_stats.splitBirths(),
_allocation_stats.splitBirths(), _allocation_stats.splitDeaths(),
_allocation_stats.coalDeaths(), count()));
}
void FreeList::assert_proper_lock_protection_work() const {

File diff suppressed because it is too large Load Diff

View File

@ -131,22 +131,22 @@ class CMBitMap : public CMBitMapRO {
void mark(HeapWord* addr) {
assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
"outside underlying space?");
_bm.at_put(heapWordToOffset(addr), true);
_bm.set_bit(heapWordToOffset(addr));
}
void clear(HeapWord* addr) {
assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
"outside underlying space?");
_bm.at_put(heapWordToOffset(addr), false);
_bm.clear_bit(heapWordToOffset(addr));
}
bool parMark(HeapWord* addr) {
assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
"outside underlying space?");
return _bm.par_at_put(heapWordToOffset(addr), true);
return _bm.par_set_bit(heapWordToOffset(addr));
}
bool parClear(HeapWord* addr) {
assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
"outside underlying space?");
return _bm.par_at_put(heapWordToOffset(addr), false);
return _bm.par_clear_bit(heapWordToOffset(addr));
}
void markRange(MemRegion mr);
void clearAll();
@ -605,10 +605,10 @@ public:
void mark_stack_pop(oop* arr, int max, int* n) {
_markStack.par_pop_arr(arr, max, n);
}
size_t mark_stack_size() { return _markStack.size(); }
size_t mark_stack_size() { return _markStack.size(); }
size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
bool mark_stack_overflow() { return _markStack.overflow(); }
bool mark_stack_empty() { return _markStack.isEmpty(); }
bool mark_stack_overflow() { return _markStack.overflow(); }
bool mark_stack_empty() { return _markStack.isEmpty(); }
// (Lock-free) Manipulation of the region stack
bool region_stack_push_lock_free(MemRegion mr) {
@ -736,12 +736,14 @@ public:
// will dump the contents of its reference fields, as well as
// liveness information for the object and its referents. The dump
// will be written to a file with the following name:
// G1PrintReachableBaseFile + "." + str. use_prev_marking decides
// whether the prev (use_prev_marking == true) or next
// (use_prev_marking == false) marking information will be used to
// determine the liveness of each object / referent. If all is true,
// all objects in the heap will be dumped, otherwise only the live
// ones. In the dump the following symbols / abbreviations are used:
// G1PrintReachableBaseFile + "." + str.
// vo decides whether the prev (vo == UsePrevMarking), the next
// (vo == UseNextMarking) marking information, or the mark word
// (vo == UseMarkWord) will be used to determine the liveness of
// each object / referent.
// If all is true, all objects in the heap will be dumped, otherwise
// only the live ones. In the dump the following symbols / breviations
// are used:
// M : an explicitly live object (its bitmap bit is set)
// > : an implicitly live object (over tams)
// O : an object outside the G1 heap (typically: in the perm gen)
@ -749,7 +751,7 @@ public:
// AND MARKED : indicates that an object is both explicitly and
// implicitly live (it should be one or the other, not both)
void print_reachable(const char* str,
bool use_prev_marking, bool all) PRODUCT_RETURN;
VerifyOption vo, bool all) PRODUCT_RETURN;
// Clear the next marking bitmap (will be called concurrently).
void clearNextBitmap();
@ -831,8 +833,9 @@ public:
// _min_finger then we need to gray objects.
// This routine is like registerCSetRegion but for an entire
// collection of regions.
if (max_finger > _min_finger)
if (max_finger > _min_finger) {
_should_gray_objects = true;
}
}
// Returns "true" if at least one mark has been completed.
@ -878,14 +881,18 @@ public:
// The following indicate whether a given verbose level has been
// set. Notice that anything above stats is conditional to
// _MARKING_VERBOSE_ having been set to 1
bool verbose_stats()
{ return _verbose_level >= stats_verbose; }
bool verbose_low()
{ return _MARKING_VERBOSE_ && _verbose_level >= low_verbose; }
bool verbose_medium()
{ return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose; }
bool verbose_high()
{ return _MARKING_VERBOSE_ && _verbose_level >= high_verbose; }
bool verbose_stats() {
return _verbose_level >= stats_verbose;
}
bool verbose_low() {
return _MARKING_VERBOSE_ && _verbose_level >= low_verbose;
}
bool verbose_medium() {
return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose;
}
bool verbose_high() {
return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
}
};
// A class representing a marking task.
@ -928,7 +935,7 @@ private:
double _start_time_ms;
// the oop closure used for iterations over oops
OopClosure* _oop_closure;
G1CMOopClosure* _cm_oop_closure;
// the region this task is scanning, NULL if we're not scanning any
HeapRegion* _curr_region;
@ -1061,8 +1068,9 @@ private:
// respective limit and calls reached_limit() if they have
void check_limits() {
if (_words_scanned >= _words_scanned_limit ||
_refs_reached >= _refs_reached_limit)
_refs_reached >= _refs_reached_limit) {
reached_limit();
}
}
// this is supposed to be called regularly during a marking step as
// it checks a bunch of conditions that might cause the marking step
@ -1122,32 +1130,17 @@ public:
// Clears any recorded partially scanned region
void clear_aborted_region() { set_aborted_region(MemRegion()); }
void set_oop_closure(OopClosure* oop_closure) {
_oop_closure = oop_closure;
}
void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
// It grays the object by marking it and, if necessary, pushing it
// on the local queue
void deal_with_reference(oop obj);
inline void deal_with_reference(oop obj);
// It scans an object and visits its children.
void scan_object(oop obj) {
assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
if (_cm->verbose_high())
gclog_or_tty->print_cr("[%d] we're scanning object "PTR_FORMAT,
_task_id, (void*) obj);
size_t obj_size = obj->size();
_words_scanned += obj_size;
obj->oop_iterate(_oop_closure);
statsOnly( ++_objs_scanned );
check_limits();
}
void scan_object(oop obj);
// It pushes an object on the local queue.
void push(oop obj);
inline void push(oop obj);
// These two move entries to/from the global stack.
void move_entries_to_global_stack();

View File

@ -0,0 +1,156 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
inline void CMTask::push(oop obj) {
HeapWord* objAddr = (HeapWord*) obj;
assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
assert(!_g1h->is_on_master_free_list(
_g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
assert(!_g1h->is_obj_ill(obj), "invariant");
assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%d] pushing "PTR_FORMAT, _task_id, (void*) obj);
}
if (!_task_queue->push(obj)) {
// The local task queue looks full. We need to push some entries
// to the global stack.
if (_cm->verbose_medium()) {
gclog_or_tty->print_cr("[%d] task queue overflow, "
"moving entries to the global stack",
_task_id);
}
move_entries_to_global_stack();
// this should succeed since, even if we overflow the global
// stack, we should have definitely removed some entries from the
// local queue. So, there must be space on it.
bool success = _task_queue->push(obj);
assert(success, "invariant");
}
statsOnly( int tmp_size = _task_queue->size();
if (tmp_size > _local_max_size) {
_local_max_size = tmp_size;
}
++_local_pushes );
}
// This determines whether the method below will check both the local
// and global fingers when determining whether to push on the stack a
// gray object (value 1) or whether it will only check the global one
// (value 0). The tradeoffs are that the former will be a bit more
// accurate and possibly push less on the stack, but it might also be
// a little bit slower.
#define _CHECK_BOTH_FINGERS_ 1
inline void CMTask::deal_with_reference(oop obj) {
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%d] we're dealing with reference = "PTR_FORMAT,
_task_id, (void*) obj);
}
++_refs_reached;
HeapWord* objAddr = (HeapWord*) obj;
assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
if (_g1h->is_in_g1_reserved(objAddr)) {
assert(obj != NULL, "null check is implicit");
if (!_nextMarkBitMap->isMarked(objAddr)) {
// Only get the containing region if the object is not marked on the
// bitmap (otherwise, it's a waste of time since we won't do
// anything with it).
HeapRegion* hr = _g1h->heap_region_containing_raw(obj);
if (!hr->obj_allocated_since_next_marking(obj)) {
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%d] "PTR_FORMAT" is not considered marked",
_task_id, (void*) obj);
}
// we need to mark it first
if (_nextMarkBitMap->parMark(objAddr)) {
// No OrderAccess:store_load() is needed. It is implicit in the
// CAS done in parMark(objAddr) above
HeapWord* global_finger = _cm->finger();
#if _CHECK_BOTH_FINGERS_
// we will check both the local and global fingers
if (_finger != NULL && objAddr < _finger) {
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%d] below the local finger ("PTR_FORMAT"), "
"pushing it", _task_id, _finger);
}
push(obj);
} else if (_curr_region != NULL && objAddr < _region_limit) {
// do nothing
} else if (objAddr < global_finger) {
// Notice that the global finger might be moving forward
// concurrently. This is not a problem. In the worst case, we
// mark the object while it is above the global finger and, by
// the time we read the global finger, it has moved forward
// passed this object. In this case, the object will probably
// be visited when a task is scanning the region and will also
// be pushed on the stack. So, some duplicate work, but no
// correctness problems.
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%d] below the global finger "
"("PTR_FORMAT"), pushing it",
_task_id, global_finger);
}
push(obj);
} else {
// do nothing
}
#else // _CHECK_BOTH_FINGERS_
// we will only check the global finger
if (objAddr < global_finger) {
// see long comment above
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%d] below the global finger "
"("PTR_FORMAT"), pushing it",
_task_id, global_finger);
}
push(obj);
}
#endif // _CHECK_BOTH_FINGERS_
}
}
}
}
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP

File diff suppressed because it is too large Load Diff

View File

@ -27,8 +27,10 @@
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/g1HRPrinter.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp"
#include "gc_implementation/shared/hSpaceCounters.hpp"
#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
@ -42,7 +44,6 @@
// heap subsets that will yield large amounts of garbage.
class HeapRegion;
class HeapRegionSeq;
class HRRSCleanupTask;
class PermanentGenerationSpec;
class GenerationSpec;
@ -103,6 +104,19 @@ public:
size_t length() { return _length; }
size_t survivor_length() { return _survivor_length; }
// Currently we do not keep track of the used byte sum for the
// young list and the survivors and it'd be quite a lot of work to
// do so. When we'll eventually replace the young list with
// instances of HeapRegionLinkedList we'll get that for free. So,
// we'll report the more accurate information then.
size_t eden_used_bytes() {
assert(length() >= survivor_length(), "invariant");
return (length() - survivor_length()) * HeapRegion::GrainBytes;
}
size_t survivor_used_bytes() {
return survivor_length() * HeapRegion::GrainBytes;
}
void rs_length_sampling_init();
bool rs_length_sampling_more();
void rs_length_sampling_next();
@ -183,9 +197,6 @@ private:
// The part of _g1_storage that is currently committed.
MemRegion _g1_committed;
// The maximum part of _g1_storage that has ever been committed.
MemRegion _g1_max_committed;
// The master free list. It will satisfy all new region allocations.
MasterFreeRegionList _free_list;
@ -209,7 +220,7 @@ private:
void rebuild_region_lists();
// The sequence of all heap regions in the heap.
HeapRegionSeq* _hrs;
HeapRegionSeq _hrs;
// Alloc region used to satisfy mutator allocation requests.
MutatorAllocRegion _mutator_alloc_region;
@ -288,6 +299,8 @@ private:
size_t* _surviving_young_words;
G1HRPrinter _hr_printer;
void setup_surviving_young_words();
void update_surviving_young_words(size_t* surv_young_words);
void cleanup_surviving_young_words();
@ -408,13 +421,15 @@ protected:
// Attempt to satisfy a humongous allocation request of the given
// size by finding a contiguous set of free regions of num_regions
// length and remove them from the master free list. Return the
// index of the first region or -1 if the search was unsuccessful.
int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size);
// index of the first region or G1_NULL_HRS_INDEX if the search
// was unsuccessful.
size_t humongous_obj_allocate_find_first(size_t num_regions,
size_t word_size);
// Initialize a contiguous set of free regions of length num_regions
// and starting at index first so that they appear as a single
// humongous region.
HeapWord* humongous_obj_allocate_initialize_regions(int first,
HeapWord* humongous_obj_allocate_initialize_regions(size_t first,
size_t num_regions,
size_t word_size);
@ -434,8 +449,7 @@ protected:
// * All allocation requests for new TLABs should go to
// allocate_new_tlab().
//
// * All non-TLAB allocation requests should go to mem_allocate()
// and mem_allocate() should never be called with is_tlab == true.
// * All non-TLAB allocation requests should go to mem_allocate().
//
// * If either call cannot satisfy the allocation request using the
// current allocating region, they will try to get a new one. If
@ -455,8 +469,6 @@ protected:
virtual HeapWord* allocate_new_tlab(size_t word_size);
virtual HeapWord* mem_allocate(size_t word_size,
bool is_noref,
bool is_tlab, /* expected to be false */
bool* gc_overhead_limit_was_exceeded);
// The following three methods take a gc_count_before_ret
@ -574,8 +586,8 @@ public:
void register_region_with_in_cset_fast_test(HeapRegion* r) {
assert(_in_cset_fast_test_base != NULL, "sanity");
assert(r->in_collection_set(), "invariant");
int index = r->hrs_index();
assert(0 <= index && (size_t) index < _in_cset_fast_test_length, "invariant");
size_t index = r->hrs_index();
assert(index < _in_cset_fast_test_length, "invariant");
assert(!_in_cset_fast_test_base[index], "invariant");
_in_cset_fast_test_base[index] = true;
}
@ -626,6 +638,8 @@ public:
return _full_collections_completed;
}
G1HRPrinter* hr_printer() { return &_hr_printer; }
protected:
// Shrink the garbage-first heap by at most the given size (in bytes!).
@ -741,6 +755,11 @@ protected:
HumongousRegionSet* humongous_proxy_set,
bool par);
// Notifies all the necessary spaces that the committed space has
// been updated (either expanded or shrunk). It should be called
// after _g1_storage is updated.
void update_committed_space(HeapWord* old_end, HeapWord* new_end);
// The concurrent marker (and the thread it runs in.)
ConcurrentMark* _cm;
ConcurrentMarkThread* _cmThread;
@ -803,7 +822,6 @@ protected:
oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
void handle_evacuation_failure_common(oop obj, markOop m);
// Ensure that the relevant gc_alloc regions are set.
void get_gc_alloc_regions();
// We're done with GC alloc regions. We are going to tear down the
@ -954,15 +972,13 @@ public:
}
// The total number of regions in the heap.
size_t n_regions();
size_t n_regions() { return _hrs.length(); }
// The max number of regions in the heap.
size_t max_regions() { return _hrs.max_length(); }
// The number of regions that are completely free.
size_t max_regions();
// The number of regions that are completely free.
size_t free_regions() {
return _free_list.length();
}
size_t free_regions() { return _free_list.length(); }
// The number of regions that are not completely free.
size_t used_regions() { return n_regions() - free_regions(); }
@ -970,6 +986,10 @@ public:
// The number of regions available for "regular" expansion.
size_t expansion_regions() { return _expansion_regions; }
// Factory method for HeapRegion instances. It will return NULL if
// the allocation fails.
HeapRegion* new_heap_region(size_t hrs_index, HeapWord* bottom);
void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
@ -1131,17 +1151,15 @@ public:
// Iterate over heap regions, in address order, terminating the
// iteration early if the "doHeapRegion" method returns "true".
void heap_region_iterate(HeapRegionClosure* blk);
void heap_region_iterate(HeapRegionClosure* blk) const;
// Iterate over heap regions starting with r (or the first region if "r"
// is NULL), in address order, terminating early if the "doHeapRegion"
// method returns "true".
void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk);
void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const;
// As above but starting from the region at index idx.
void heap_region_iterate_from(int idx, HeapRegionClosure* blk);
HeapRegion* region_at(size_t idx);
// Return the region with the given index. It assumes the index is valid.
HeapRegion* region_at(size_t index) const { return _hrs.at(index); }
// Divide the heap region sequence into "chunks" of some size (the number
// of regions divided by the number of parallel threads times some
@ -1182,12 +1200,14 @@ public:
// A G1CollectedHeap will contain some number of heap regions. This
// finds the region containing a given address, or else returns NULL.
HeapRegion* heap_region_containing(const void* addr) const;
template <class T>
inline HeapRegion* heap_region_containing(const T addr) const;
// Like the above, but requires "addr" to be in the heap (to avoid a
// null-check), and unlike the above, may return an continuing humongous
// region.
HeapRegion* heap_region_containing_raw(const void* addr) const;
template <class T>
inline HeapRegion* heap_region_containing_raw(const T addr) const;
// A CollectedHeap is divided into a dense sequence of "blocks"; that is,
// each address in the (reserved) heap is a member of exactly
@ -1249,7 +1269,7 @@ public:
return true;
}
bool is_in_young(oop obj) {
bool is_in_young(const oop obj) {
HeapRegion* hr = heap_region_containing(obj);
return hr != NULL && hr->is_young();
}
@ -1286,10 +1306,6 @@ public:
return true;
}
// The boundary between a "large" and "small" array of primitives, in
// words.
virtual size_t large_typearray_limit();
// Returns "true" iff the given word_size is "very large".
static bool isHumongous(size_t word_size) {
// Note this has to be strictly greater-than as the TLABs
@ -1329,14 +1345,20 @@ public:
// Perform verification.
// use_prev_marking == true -> use "prev" marking information,
// use_prev_marking == false -> use "next" marking information
// vo == UsePrevMarking -> use "prev" marking information,
// vo == UseNextMarking -> use "next" marking information
// vo == UseMarkWord -> use the mark word in the object header
//
// NOTE: Only the "prev" marking information is guaranteed to be
// consistent most of the time, so most calls to this should use
// use_prev_marking == true. Currently, there is only one case where
// this is called with use_prev_marking == false, which is to verify
// the "next" marking information at the end of remark.
void verify(bool allow_dirty, bool silent, bool use_prev_marking);
// vo == UsePrevMarking.
// Currently, there is only one case where this is called with
// vo == UseNextMarking, which is to verify the "next" marking
// information at the end of remark.
// Currently there is only one place where this is called with
// vo == UseMarkWord, which is to verify the marking during a
// full GC.
void verify(bool allow_dirty, bool silent, VerifyOption vo);
// Override; it uses the "prev" marking information
virtual void verify(bool allow_dirty, bool silent);
@ -1355,10 +1377,9 @@ public:
// Override
void print_tracing_info() const;
// If "addr" is a pointer into the (reserved?) heap, returns a positive
// number indicating the "arena" within the heap in which "addr" falls.
// Or else returns 0.
virtual int addr_to_arena_id(void* addr) const;
// The following two methods are helpful for debugging RSet issues.
void print_cset_rsets() PRODUCT_RETURN;
void print_all_rsets() PRODUCT_RETURN;
// Convenience function to be used in situations where the heap type can be
// asserted to be this type.
@ -1389,24 +1410,27 @@ public:
// bitmap off to the side.
void doConcurrentMark();
// This is called from the marksweep collector which then does
// a concurrent mark and verifies that the results agree with
// the stop the world marking.
void checkConcurrentMark();
// Do a full concurrent marking, synchronously.
void do_sync_mark();
bool isMarkedPrev(oop obj) const;
bool isMarkedNext(oop obj) const;
// use_prev_marking == true -> use "prev" marking information,
// use_prev_marking == false -> use "next" marking information
// vo == UsePrevMarking -> use "prev" marking information,
// vo == UseNextMarking -> use "next" marking information,
// vo == UseMarkWord -> use mark word from object header
bool is_obj_dead_cond(const oop obj,
const HeapRegion* hr,
const bool use_prev_marking) const {
if (use_prev_marking) {
return is_obj_dead(obj, hr);
} else {
return is_obj_ill(obj, hr);
const VerifyOption vo) const {
switch (vo) {
case VerifyOption_G1UsePrevMarking:
return is_obj_dead(obj, hr);
case VerifyOption_G1UseNextMarking:
return is_obj_ill(obj, hr);
default:
assert(vo == VerifyOption_G1UseMarkWord, "must be");
return !obj->is_gc_marked();
}
}
@ -1447,18 +1471,24 @@ public:
// Added if it is in permanent gen it isn't dead.
// Added if it is NULL it isn't dead.
// use_prev_marking == true -> use "prev" marking information,
// use_prev_marking == false -> use "next" marking information
// vo == UsePrevMarking -> use "prev" marking information,
// vo == UseNextMarking -> use "next" marking information,
// vo == UseMarkWord -> use mark word from object header
bool is_obj_dead_cond(const oop obj,
const bool use_prev_marking) {
if (use_prev_marking) {
return is_obj_dead(obj);
} else {
return is_obj_ill(obj);
const VerifyOption vo) const {
switch (vo) {
case VerifyOption_G1UsePrevMarking:
return is_obj_dead(obj);
case VerifyOption_G1UseNextMarking:
return is_obj_ill(obj);
default:
assert(vo == VerifyOption_G1UseMarkWord, "must be");
return !obj->is_gc_marked();
}
}
bool is_obj_dead(const oop obj) {
bool is_obj_dead(const oop obj) const {
const HeapRegion* hr = heap_region_containing(obj);
if (hr == NULL) {
if (Universe::heap()->is_in_permanent(obj))
@ -1469,7 +1499,7 @@ public:
else return is_obj_dead(obj, hr);
}
bool is_obj_ill(const oop obj) {
bool is_obj_ill(const oop obj) const {
const HeapRegion* hr = heap_region_containing(obj);
if (hr == NULL) {
if (Universe::heap()->is_in_permanent(obj))

View File

@ -34,9 +34,10 @@
// Inline functions for G1CollectedHeap
template <class T>
inline HeapRegion*
G1CollectedHeap::heap_region_containing(const void* addr) const {
HeapRegion* hr = _hrs->addr_to_region(addr);
G1CollectedHeap::heap_region_containing(const T addr) const {
HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr);
// hr can be null if addr in perm_gen
if (hr != NULL && hr->continuesHumongous()) {
hr = hr->humongous_start_region();
@ -44,19 +45,16 @@ G1CollectedHeap::heap_region_containing(const void* addr) const {
return hr;
}
template <class T>
inline HeapRegion*
G1CollectedHeap::heap_region_containing_raw(const void* addr) const {
assert(_g1_reserved.contains(addr), "invariant");
size_t index = pointer_delta(addr, _g1_reserved.start(), 1)
>> HeapRegion::LogOfHRGrainBytes;
HeapRegion* res = _hrs->at(index);
assert(res == _hrs->addr_to_region(addr), "sanity");
G1CollectedHeap::heap_region_containing_raw(const T addr) const {
assert(_g1_reserved.contains((const void*) addr), "invariant");
HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr);
return res;
}
inline bool G1CollectedHeap::obj_in_cs(oop obj) {
HeapRegion* r = _hrs->addr_to_region(obj);
HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
return r != NULL && r->in_collection_set();
}

View File

@ -239,6 +239,10 @@ G1CollectorPolicy::G1CollectorPolicy() :
_should_revert_to_full_young_gcs(false),
_last_full_young_gc(false),
_eden_bytes_before_gc(0),
_survivor_bytes_before_gc(0),
_capacity_before_gc(0),
_prev_collection_pause_used_at_end_bytes(0),
_collection_set(NULL),
@ -897,6 +901,11 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
_bytes_in_to_space_after_gc = 0;
_bytes_in_collection_set_before_gc = 0;
YoungList* young_list = _g1->young_list();
_eden_bytes_before_gc = young_list->eden_used_bytes();
_survivor_bytes_before_gc = young_list->survivor_used_bytes();
_capacity_before_gc = _g1->capacity();
#ifdef DEBUG
// initialise these to something well known so that we can spot
// if they are not set properly
@ -1460,14 +1469,6 @@ void G1CollectorPolicy::record_collection_pause_end() {
}
}
}
if (PrintGCDetails)
gclog_or_tty->print(" [");
if (PrintGC || PrintGCDetails)
_g1->print_size_transition(gclog_or_tty,
_cur_collection_pause_used_at_start_bytes,
_g1->used(), _g1->capacity());
if (PrintGCDetails)
gclog_or_tty->print_cr("]");
_all_pause_times_ms->add(elapsed_ms);
if (update_stats) {
@ -1672,6 +1673,40 @@ void G1CollectorPolicy::record_collection_pause_end() {
// </NEW PREDICTION>
}
#define EXT_SIZE_FORMAT "%d%s"
#define EXT_SIZE_PARAMS(bytes) \
byte_size_in_proper_unit((bytes)), \
proper_unit_for_byte_size((bytes))
void G1CollectorPolicy::print_heap_transition() {
if (PrintGCDetails) {
YoungList* young_list = _g1->young_list();
size_t eden_bytes = young_list->eden_used_bytes();
size_t survivor_bytes = young_list->survivor_used_bytes();
size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
size_t used = _g1->used();
size_t capacity = _g1->capacity();
gclog_or_tty->print_cr(
" [Eden: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
"Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
"Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
EXT_SIZE_PARAMS(_eden_bytes_before_gc),
EXT_SIZE_PARAMS(eden_bytes),
EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
EXT_SIZE_PARAMS(survivor_bytes),
EXT_SIZE_PARAMS(used_before_gc),
EXT_SIZE_PARAMS(_capacity_before_gc),
EXT_SIZE_PARAMS(used),
EXT_SIZE_PARAMS(capacity));
} else if (PrintGC) {
_g1->print_size_transition(gclog_or_tty,
_cur_collection_pause_used_at_start_bytes,
_g1->used(), _g1->capacity());
}
}
// <NEW PREDICTION>
void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
@ -2435,21 +2470,6 @@ record_collection_pause_start(double start_time_sec, size_t start_used) {
G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
}
class NextNonCSElemFinder: public HeapRegionClosure {
HeapRegion* _res;
public:
NextNonCSElemFinder(): _res(NULL) {}
bool doHeapRegion(HeapRegion* r) {
if (!r->in_collection_set()) {
_res = r;
return true;
} else {
return false;
}
}
HeapRegion* res() { return _res; }
};
class KnownGarbageClosure: public HeapRegionClosure {
CollectionSetChooser* _hrSorted;
@ -2618,14 +2638,6 @@ add_to_collection_set(HeapRegion* hr) {
assert(_inc_cset_build_state == Active, "Precondition");
assert(!hr->is_young(), "non-incremental add of young region");
if (G1PrintHeapRegions) {
gclog_or_tty->print_cr("added region to cset "
"%d:["PTR_FORMAT", "PTR_FORMAT"], "
"top "PTR_FORMAT", %s",
hr->hrs_index(), hr->bottom(), hr->end(),
hr->top(), hr->is_young() ? "YOUNG" : "NOT_YOUNG");
}
if (_g1->mark_in_progress())
_g1->concurrent_mark()->registerCSetRegion(hr);
@ -2791,14 +2803,6 @@ void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
_inc_cset_tail->set_next_in_collection_set(hr);
}
_inc_cset_tail = hr;
if (G1PrintHeapRegions) {
gclog_or_tty->print_cr(" added region to incremental cset (RHS) "
"%d:["PTR_FORMAT", "PTR_FORMAT"], "
"top "PTR_FORMAT", young %s",
hr->hrs_index(), hr->bottom(), hr->end(),
hr->top(), (hr->is_young()) ? "YES" : "NO");
}
}
// Add the region to the LHS of the incremental cset
@ -2816,14 +2820,6 @@ void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
_inc_cset_tail = hr;
}
_inc_cset_head = hr;
if (G1PrintHeapRegions) {
gclog_or_tty->print_cr(" added region to incremental cset (LHS) "
"%d:["PTR_FORMAT", "PTR_FORMAT"], "
"top "PTR_FORMAT", young %s",
hr->hrs_index(), hr->bottom(), hr->end(),
hr->top(), (hr->is_young()) ? "YES" : "NO");
}
}
#ifndef PRODUCT

View File

@ -891,6 +891,7 @@ public:
virtual void record_collection_pause_end_G1_strong_roots();
virtual void record_collection_pause_end();
void print_heap_transition();
// Record the fact that a full collection occurred.
virtual void record_full_collection_start();
@ -1179,6 +1180,11 @@ protected:
// The limit on the number of regions allocated for survivors.
size_t _max_survivor_regions;
// For reporting purposes.
size_t _eden_bytes_before_gc;
size_t _survivor_bytes_before_gc;
size_t _capacity_before_gc;
// The amount of survor regions after a collection.
size_t _recorded_survivor_regions;
// List of survivor regions.

View File

@ -0,0 +1,112 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1HRPrinter.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "utilities/ostream.hpp"
const char* G1HRPrinter::action_name(ActionType action) {
switch(action) {
case Alloc: return "ALLOC";
case AllocForce: return "ALLOC-FORCE";
case Retire: return "RETIRE";
case Reuse: return "REUSE";
case CSet: return "CSET";
case EvacFailure: return "EVAC-FAILURE";
case Cleanup: return "CLEANUP";
case PostCompaction: return "POST-COMPACTION";
case Commit: return "COMMIT";
case Uncommit: return "UNCOMMIT";
default: ShouldNotReachHere();
}
// trying to keep the Windows compiler happy
return NULL;
}
const char* G1HRPrinter::region_type_name(RegionType type) {
switch (type) {
case Unset: return NULL;
case Eden: return "Eden";
case Survivor: return "Survivor";
case Old: return "Old";
case SingleHumongous: return "SingleH";
case StartsHumongous: return "StartsH";
case ContinuesHumongous: return "ContinuesH";
default: ShouldNotReachHere();
}
// trying to keep the Windows compiler happy
return NULL;
}
const char* G1HRPrinter::phase_name(PhaseType phase) {
switch (phase) {
case StartGC: return "StartGC";
case EndGC: return "EndGC";
case StartFullGC: return "StartFullGC";
case EndFullGC: return "EndFullGC";
default: ShouldNotReachHere();
}
// trying to keep the Windows compiler happy
return NULL;
}
#define G1HR_PREFIX " G1HR"
void G1HRPrinter::print(ActionType action, RegionType type,
HeapRegion* hr, HeapWord* top) {
const char* action_str = action_name(action);
const char* type_str = region_type_name(type);
HeapWord* bottom = hr->bottom();
if (type_str != NULL) {
if (top != NULL) {
gclog_or_tty->print_cr(G1HR_PREFIX" %s(%s) "PTR_FORMAT" "PTR_FORMAT,
action_str, type_str, bottom, top);
} else {
gclog_or_tty->print_cr(G1HR_PREFIX" %s(%s) "PTR_FORMAT,
action_str, type_str, bottom);
}
} else {
if (top != NULL) {
gclog_or_tty->print_cr(G1HR_PREFIX" %s "PTR_FORMAT" "PTR_FORMAT,
action_str, bottom, top);
} else {
gclog_or_tty->print_cr(G1HR_PREFIX" %s "PTR_FORMAT,
action_str, bottom);
}
}
}
void G1HRPrinter::print(ActionType action, HeapWord* bottom, HeapWord* end) {
const char* action_str = action_name(action);
gclog_or_tty->print_cr(G1HR_PREFIX" %s ["PTR_FORMAT","PTR_FORMAT"]",
action_str, bottom, end);
}
void G1HRPrinter::print(PhaseType phase, size_t phase_num) {
const char* phase_str = phase_name(phase);
gclog_or_tty->print_cr(G1HR_PREFIX" #%s "SIZE_FORMAT, phase_str, phase_num);
}

View File

@ -0,0 +1,182 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1HRPRINTER_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1HRPRINTER_HPP
#include "memory/allocation.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#define SKIP_RETIRED_FULL_REGIONS 1
class G1HRPrinter VALUE_OBJ_CLASS_SPEC {
public:
typedef enum {
Alloc,
AllocForce,
Retire,
Reuse,
CSet,
EvacFailure,
Cleanup,
PostCompaction,
Commit,
Uncommit
} ActionType;
typedef enum {
Unset,
Eden,
Survivor,
Old,
SingleHumongous,
StartsHumongous,
ContinuesHumongous
} RegionType;
typedef enum {
StartGC,
EndGC,
StartFullGC,
EndFullGC
} PhaseType;
private:
bool _active;
static const char* action_name(ActionType action);
static const char* region_type_name(RegionType type);
static const char* phase_name(PhaseType phase);
// Print an action event. This version is used in most scenarios and
// only prints the region's bottom. The parameters type and top are
// optional (the "not set" values are Unset and NULL).
static void print(ActionType action, RegionType type,
HeapRegion* hr, HeapWord* top);
// Print an action event. This version prints both the region's
// bottom and end. Used for Commit / Uncommit events.
static void print(ActionType action, HeapWord* bottom, HeapWord* end);
// Print a phase event.
static void print(PhaseType phase, size_t phase_num);
public:
// In some places we iterate over a list in order to generate output
// for the list's elements. By exposing this we can avoid this
// iteration if the printer is not active.
const bool is_active() { return _active; }
// Have to set this explicitly as we have to do this during the
// heap's initialize() method, not in the constructor.
void set_active(bool active) { _active = active; }
// The methods below are convenient wrappers for the print() methods.
void alloc(HeapRegion* hr, RegionType type, bool force = false) {
if (is_active()) {
print((!force) ? Alloc : AllocForce, type, hr, NULL);
}
}
void alloc(RegionType type, HeapRegion* hr, HeapWord* top) {
if (is_active()) {
print(Alloc, type, hr, top);
}
}
void retire(HeapRegion* hr) {
if (is_active()) {
if (!SKIP_RETIRED_FULL_REGIONS || hr->top() < hr->end()) {
print(Retire, Unset, hr, hr->top());
}
}
}
void reuse(HeapRegion* hr) {
if (is_active()) {
print(Reuse, Unset, hr, NULL);
}
}
void cset(HeapRegion* hr) {
if (is_active()) {
print(CSet, Unset, hr, NULL);
}
}
void evac_failure(HeapRegion* hr) {
if (is_active()) {
print(EvacFailure, Unset, hr, NULL);
}
}
void cleanup(HeapRegion* hr) {
if (is_active()) {
print(Cleanup, Unset, hr, NULL);
}
}
void post_compaction(HeapRegion* hr, RegionType type) {
if (is_active()) {
print(PostCompaction, type, hr, hr->top());
}
}
void commit(HeapWord* bottom, HeapWord* end) {
if (is_active()) {
print(Commit, bottom, end);
}
}
void uncommit(HeapWord* bottom, HeapWord* end) {
if (is_active()) {
print(Uncommit, bottom, end);
}
}
void start_gc(bool full, size_t gc_num) {
if (is_active()) {
if (!full) {
print(StartGC, gc_num);
} else {
print(StartFullGC, gc_num);
}
}
}
void end_gc(bool full, size_t gc_num) {
if (is_active()) {
if (!full) {
print(EndGC, gc_num);
} else {
print(EndFullGC, gc_num);
}
}
}
G1HRPrinter() : _active(false) { }
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1HRPRINTER_HPP

View File

@ -84,11 +84,6 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
if (VerifyDuringGC) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
g1h->checkConcurrentMark();
}
mark_sweep_phase2();
// Don't add any more derived pointers during phase3
@ -179,6 +174,29 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
assert(GenMarkSweep::_marking_stack.is_empty(),
"stack should be empty by now");
if (VerifyDuringGC) {
HandleMark hm; // handle scope
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
Universe::heap()->prepare_for_verify();
// Note: we can verify only the heap here. When an object is
// marked, the previous value of the mark word (including
// identity hash values, ages, etc) is preserved, and the mark
// word is set to markOop::marked_value - effectively removing
// any hash values from the mark word. These hash values are
// used when verifying the dictionaries and so removing them
// from the mark word can make verification of the dictionaries
// fail. At the end of the GC, the orginal mark word values
// (including hash values) are restored to the appropriate
// objects.
Universe::heap()->verify(/* allow dirty */ true,
/* silent */ false,
/* option */ VerifyOption_G1UseMarkWord);
G1CollectedHeap* g1h = G1CollectedHeap::heap();
gclog_or_tty->print_cr("]");
}
}
class G1PrepareCompactClosure: public HeapRegionClosure {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,6 +33,7 @@ class DirtyCardToOopClosure;
class CMBitMap;
class CMMarkStack;
class G1ParScanThreadState;
class CMTask;
// A class that scans oops in a given heap region (much as OopsInGenClosure
// scans oops in a generation.)
@ -40,7 +41,7 @@ class OopsInHeapRegionClosure: public OopsInGenClosure {
protected:
HeapRegion* _from;
public:
virtual void set_region(HeapRegion* from) { _from = from; }
void set_region(HeapRegion* from) { _from = from; }
};
class G1ParClosureSuper : public OopsInHeapRegionClosure {
@ -161,44 +162,6 @@ public:
bool do_header() { return false; }
};
class FilterInHeapRegionAndIntoCSClosure : public OopsInHeapRegionClosure {
G1CollectedHeap* _g1;
OopsInHeapRegionClosure* _oc;
public:
FilterInHeapRegionAndIntoCSClosure(G1CollectedHeap* g1,
OopsInHeapRegionClosure* oc) :
_g1(g1), _oc(oc)
{}
template <class T> void do_oop_nv(T* p);
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
bool apply_to_weak_ref_discovered_field() { return true; }
bool do_header() { return false; }
void set_region(HeapRegion* from) {
_oc->set_region(from);
}
};
class FilterAndMarkInHeapRegionAndIntoCSClosure : public OopsInHeapRegionClosure {
G1CollectedHeap* _g1;
ConcurrentMark* _cm;
OopsInHeapRegionClosure* _oc;
public:
FilterAndMarkInHeapRegionAndIntoCSClosure(G1CollectedHeap* g1,
OopsInHeapRegionClosure* oc,
ConcurrentMark* cm)
: _g1(g1), _oc(oc), _cm(cm) { }
template <class T> void do_oop_nv(T* p);
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
bool apply_to_weak_ref_discovered_field() { return true; }
bool do_header() { return false; }
void set_region(HeapRegion* from) {
_oc->set_region(from);
}
};
class FilterOutOfRegionClosure: public OopClosure {
HeapWord* _r_bottom;
HeapWord* _r_end;
@ -214,4 +177,16 @@ public:
int out_of_region() { return _out_of_region; }
};
// Closure for iterating over object fields during concurrent marking
class G1CMOopClosure : public OopClosure {
G1CollectedHeap* _g1h;
ConcurrentMark* _cm;
CMTask* _task;
public:
G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task);
template <class T> void do_oop_nv(T* p);
virtual void do_oop( oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/concurrentMark.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/g1OopClosures.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
@ -66,27 +66,6 @@ template <class T> inline void FilterOutOfRegionClosure::do_oop_nv(T* p) {
}
}
template <class T> inline void FilterInHeapRegionAndIntoCSClosure::do_oop_nv(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop) &&
_g1->obj_in_cs(oopDesc::decode_heap_oop_not_null(heap_oop)))
_oc->do_oop(p);
}
template <class T> inline void FilterAndMarkInHeapRegionAndIntoCSClosure::do_oop_nv(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
HeapRegion* hr = _g1->heap_region_containing((HeapWord*) obj);
if (hr != NULL) {
if (hr->in_collection_set())
_oc->do_oop(p);
else if (!hr->is_young())
_cm->grayRoot(obj);
}
}
}
// This closure is applied to the fields of the objects that have just been copied.
template <class T> inline void G1ParScanClosure::do_oop_nv(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
@ -129,5 +108,18 @@ template <class T> inline void G1ParPushHeapRSClosure::do_oop_nv(T* p) {
}
}
template <class T> inline void G1CMOopClosure::do_oop_nv(T* p) {
assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
assert(!_g1h->is_on_master_free_list(
_g1h->heap_region_containing((HeapWord*) p)), "invariant");
oop obj = oopDesc::load_decode_heap_oop(p);
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%d] we're looking at location "
"*"PTR_FORMAT" = "PTR_FORMAT,
_task->task_id(), p, (void*) obj);
}
_task->deal_with_reference(obj);
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP

View File

@ -66,41 +66,6 @@ void ct_freq_update_histo_and_reset() {
}
#endif
class IntoCSOopClosure: public OopsInHeapRegionClosure {
OopsInHeapRegionClosure* _blk;
G1CollectedHeap* _g1;
public:
IntoCSOopClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) :
_g1(g1), _blk(blk) {}
void set_region(HeapRegion* from) {
_blk->set_region(from);
}
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }
template <class T> void do_oop_work(T* p) {
oop obj = oopDesc::load_decode_heap_oop(p);
if (_g1->obj_in_cs(obj)) _blk->do_oop(p);
}
bool apply_to_weak_ref_discovered_field() { return true; }
bool idempotent() { return true; }
};
class VerifyRSCleanCardOopClosure: public OopClosure {
G1CollectedHeap* _g1;
public:
VerifyRSCleanCardOopClosure(G1CollectedHeap* g1) : _g1(g1) {}
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }
template <class T> void do_oop_work(T* p) {
oop obj = oopDesc::load_decode_heap_oop(p);
HeapRegion* to = _g1->heap_region_containing(obj);
guarantee(to == NULL || !to->in_collection_set(),
"Missed a rem set member.");
}
};
G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
: _g1(g1), _conc_refine_cards(0),
_ct_bs(ct_bs), _g1p(_g1->g1_policy()),
@ -332,31 +297,6 @@ void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
_g1p->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
}
#ifndef PRODUCT
class PrintRSClosure : public HeapRegionClosure {
int _count;
public:
PrintRSClosure() : _count(0) {}
bool doHeapRegion(HeapRegion* r) {
HeapRegionRemSet* hrrs = r->rem_set();
_count += (int) hrrs->occupied();
if (hrrs->occupied() == 0) {
gclog_or_tty->print("Heap Region [" PTR_FORMAT ", " PTR_FORMAT ") "
"has no remset entries\n",
r->bottom(), r->end());
} else {
gclog_or_tty->print("Printing rem set for heap region [" PTR_FORMAT ", " PTR_FORMAT ")\n",
r->bottom(), r->end());
r->print();
hrrs->print();
gclog_or_tty->print("\nDone printing rem set\n");
}
return false;
}
int occupied() {return _count;}
};
#endif
class CountRSSizeClosure: public HeapRegionClosure {
size_t _n;
size_t _tot;
@ -482,10 +422,6 @@ void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
}
void G1RemSet::prepare_for_oops_into_collection_set_do() {
#if G1_REM_SET_LOGGING
PrintRSClosure cl;
_g1->collection_set_iterate(&cl);
#endif
cleanupHRRS();
ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
_g1->set_refine_cte_cl_concurrency(false);
@ -504,14 +440,6 @@ void G1RemSet::prepare_for_oops_into_collection_set_do() {
}
class cleanUpIteratorsClosure : public HeapRegionClosure {
bool doHeapRegion(HeapRegion *r) {
HeapRegionRemSet* hrrs = r->rem_set();
hrrs->init_for_par_iteration();
return false;
}
};
// This closure, applied to a DirtyCardQueueSet, is used to immediately
// update the RSets for the regions in the CSet. For each card it iterates
// through the oops which coincide with that card. It scans the reference
@ -572,18 +500,13 @@ public:
void G1RemSet::cleanup_after_oops_into_collection_set_do() {
guarantee( _cards_scanned != NULL, "invariant" );
_total_cards_scanned = 0;
for (uint i = 0; i < n_workers(); ++i)
for (uint i = 0; i < n_workers(); ++i) {
_total_cards_scanned += _cards_scanned[i];
}
FREE_C_HEAP_ARRAY(size_t, _cards_scanned);
_cards_scanned = NULL;
// Cleanup after copy
#if G1_REM_SET_LOGGING
PrintRSClosure cl;
_g1->heap_region_iterate(&cl);
#endif
_g1->set_refine_cte_cl_concurrency(true);
cleanUpIteratorsClosure iterClosure;
_g1->collection_set_iterate(&iterClosure);
// Set all cards back to clean.
_g1->cleanUpCardTable();

View File

@ -142,8 +142,6 @@ public:
virtual void prepare_for_verify();
};
#define G1_REM_SET_LOGGING 0
class CountNonCleanMemRegionClosure: public MemRegionClosure {
G1CollectedHeap* _g1;
int _n;

View File

@ -65,12 +65,6 @@ inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, int tid) {
HeapRegion* to = _g1->heap_region_containing(obj);
if (to != NULL && from != to) {
#if G1_REM_SET_LOGGING
gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS"
" for region [" PTR_FORMAT ", " PTR_FORMAT ")",
p, obj,
to->bottom(), to->end());
#endif
assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
to->rem_set()->add_reference(p, tid);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,8 +45,7 @@ typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
class FilterIntoCSClosure;
class FilterOutOfRegionClosure;
class FilterInHeapRegionAndIntoCSClosure;
class FilterAndMarkInHeapRegionAndIntoCSClosure;
class G1CMOopClosure;
#ifdef FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES
#error "FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES already defined."
@ -58,8 +57,7 @@ class FilterAndMarkInHeapRegionAndIntoCSClosure;
f(G1ParPushHeapRSClosure,_nv) \
f(FilterIntoCSClosure,_nv) \
f(FilterOutOfRegionClosure,_nv) \
f(FilterInHeapRegionAndIntoCSClosure,_nv) \
f(FilterAndMarkInHeapRegionAndIntoCSClosure,_nv)
f(G1CMOopClosure,_nv)
#ifdef FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES
#error "FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES already defined."

View File

@ -60,13 +60,14 @@ private:
oop _containing_obj;
bool _failures;
int _n_failures;
bool _use_prev_marking;
VerifyOption _vo;
public:
// use_prev_marking == true -> use "prev" marking information,
// use_prev_marking == false -> use "next" marking information
VerifyLiveClosure(G1CollectedHeap* g1h, bool use_prev_marking) :
// _vo == UsePrevMarking -> use "prev" marking information,
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseMarkWord -> use mark word from object header.
VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
_g1h(g1h), _bs(NULL), _containing_obj(NULL),
_failures(false), _n_failures(0), _use_prev_marking(use_prev_marking)
_failures(false), _n_failures(0), _vo(vo)
{
BarrierSet* bs = _g1h->barrier_set();
if (bs->is_a(BarrierSet::CardTableModRef))
@ -95,14 +96,14 @@ public:
template <class T> void do_oop_work(T* p) {
assert(_containing_obj != NULL, "Precondition");
assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
"Precondition");
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
bool failed = false;
if (!_g1h->is_in_closed_subset(obj) ||
_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
_g1h->is_obj_dead_cond(obj, _vo)) {
if (!_failures) {
gclog_or_tty->print_cr("");
gclog_or_tty->print_cr("----------");
@ -159,20 +160,16 @@ public:
gclog_or_tty->print_cr("----------");
}
gclog_or_tty->print_cr("Missing rem set entry:");
gclog_or_tty->print_cr("Field "PTR_FORMAT
" of obj "PTR_FORMAT
", in region %d ["PTR_FORMAT
", "PTR_FORMAT"),",
p, (void*) _containing_obj,
from->hrs_index(),
from->bottom(),
from->end());
gclog_or_tty->print_cr("Field "PTR_FORMAT" "
"of obj "PTR_FORMAT", "
"in region "HR_FORMAT,
p, (void*) _containing_obj,
HR_FORMAT_PARAMS(from));
_containing_obj->print_on(gclog_or_tty);
gclog_or_tty->print_cr("points to obj "PTR_FORMAT
" in region %d ["PTR_FORMAT
", "PTR_FORMAT").",
(void*) obj, to->hrs_index(),
to->bottom(), to->end());
gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
"in region "HR_FORMAT,
(void*) obj,
HR_FORMAT_PARAMS(to));
obj->print_on(gclog_or_tty);
gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
cv_obj, cv_field);
@ -484,11 +481,10 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
HeapRegion::
HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr, bool is_zeroed)
HeapRegion(size_t hrs_index, G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr, bool is_zeroed)
: G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
_next_fk(HeapRegionDCTOC::NoFilterKind),
_hrs_index(-1),
_next_fk(HeapRegionDCTOC::NoFilterKind), _hrs_index(hrs_index),
_humongous_type(NotHumongous), _humongous_start_region(NULL),
_in_collection_set(false), _is_gc_alloc_region(false),
_next_in_special_set(NULL), _orig_end(NULL),
@ -740,20 +736,20 @@ void HeapRegion::print_on(outputStream* st) const {
void HeapRegion::verify(bool allow_dirty) const {
bool dummy = false;
verify(allow_dirty, /* use_prev_marking */ true, /* failures */ &dummy);
verify(allow_dirty, VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
}
// This really ought to be commoned up into OffsetTableContigSpace somehow.
// We would need a mechanism to make that code skip dead objects.
void HeapRegion::verify(bool allow_dirty,
bool use_prev_marking,
VerifyOption vo,
bool* failures) const {
G1CollectedHeap* g1 = G1CollectedHeap::heap();
*failures = false;
HeapWord* p = bottom();
HeapWord* prev_p = NULL;
VerifyLiveClosure vl_cl(g1, use_prev_marking);
VerifyLiveClosure vl_cl(g1, vo);
bool is_humongous = isHumongous();
bool do_bot_verify = !is_young();
size_t object_num = 0;
@ -778,7 +774,7 @@ void HeapRegion::verify(bool allow_dirty,
return;
}
if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
if (!g1->is_obj_dead_cond(obj, this, vo)) {
if (obj->is_oop()) {
klassOop klass = obj->klass();
if (!klass->is_perm()) {

View File

@ -52,9 +52,11 @@ class HeapRegionRemSetIterator;
class HeapRegion;
class HeapRegionSetBase;
#define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
#define HR_FORMAT_PARAMS(_hr_) (_hr_)->hrs_index(), (_hr_)->bottom(), \
(_hr_)->top(), (_hr_)->end()
#define HR_FORMAT SIZE_FORMAT":(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
#define HR_FORMAT_PARAMS(_hr_) \
(_hr_)->hrs_index(), \
(_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \
(_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
// A dirty card to oop closure for heap regions. It
// knows how to get the G1 heap and how to use the bitmap
@ -237,9 +239,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
protected:
// If this region is a member of a HeapRegionSeq, the index in that
// sequence, otherwise -1.
int _hrs_index;
// The index of this region in the heap region sequence.
size_t _hrs_index;
HumongousType _humongous_type;
// For a humongous region, region in which it starts.
@ -296,8 +297,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
enum YoungType {
NotYoung, // a region is not young
Young, // a region is young
Survivor // a region is young and it contains
// survivor
Survivor // a region is young and it contains survivors
};
volatile YoungType _young_type;
@ -351,7 +351,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
public:
// If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
HeapRegion(size_t hrs_index,
G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr, bool is_zeroed);
static int LogOfHRGrainBytes;
@ -393,8 +394,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
// If this region is a member of a HeapRegionSeq, the index in that
// sequence, otherwise -1.
int hrs_index() const { return _hrs_index; }
void set_hrs_index(int index) { _hrs_index = index; }
size_t hrs_index() const { return _hrs_index; }
// The number of bytes marked live in the region in the last marking phase.
size_t marked_bytes() { return _prev_marked_bytes; }
@ -579,6 +579,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
HeapWord* orig_end() { return _orig_end; }
// Allows logical separation between objects allocated before and after.
void save_marks();
@ -853,14 +855,20 @@ class HeapRegion: public G1OffsetTableContigSpace {
void print() const;
void print_on(outputStream* st) const;
// use_prev_marking == true -> use "prev" marking information,
// use_prev_marking == false -> use "next" marking information
// vo == UsePrevMarking -> use "prev" marking information,
// vo == UseNextMarking -> use "next" marking information
// vo == UseMarkWord -> use the mark word in the object header
//
// NOTE: Only the "prev" marking information is guaranteed to be
// consistent most of the time, so most calls to this should use
// use_prev_marking == true. Currently, there is only one case where
// this is called with use_prev_marking == false, which is to verify
// the "next" marking information at the end of remark.
void verify(bool allow_dirty, bool use_prev_marking, bool *failures) const;
// vo == UsePrevMarking.
// Currently, there is only one case where this is called with
// vo == UseNextMarking, which is to verify the "next" marking
// information at the end of remark.
// Currently there is only one place where this is called with
// vo == UseMarkWord, which is to verify the marking during a
// full GC.
void verify(bool allow_dirty, VerifyOption vo, bool *failures) const;
// Override; it uses the "prev" marking information
virtual void verify(bool allow_dirty) const;

View File

@ -834,7 +834,7 @@ PosParPRT* OtherRegionsTable::delete_region_table() {
#endif
// Set the corresponding coarse bit.
int max_hrs_index = max->hr()->hrs_index();
size_t max_hrs_index = max->hr()->hrs_index();
if (!_coarse_map.at(max_hrs_index)) {
_coarse_map.at_put(max_hrs_index, true);
_n_coarse_entries++;
@ -860,7 +860,8 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
BitMap* region_bm, BitMap* card_bm) {
// First eliminated garbage regions from the coarse map.
if (G1RSScrubVerbose)
gclog_or_tty->print_cr("Scrubbing region %d:", hr()->hrs_index());
gclog_or_tty->print_cr("Scrubbing region "SIZE_FORMAT":",
hr()->hrs_index());
assert(_coarse_map.size() == region_bm->size(), "Precondition");
if (G1RSScrubVerbose)
@ -878,7 +879,8 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
PosParPRT* nxt = cur->next();
// If the entire region is dead, eliminate.
if (G1RSScrubVerbose)
gclog_or_tty->print_cr(" For other region %d:", cur->hr()->hrs_index());
gclog_or_tty->print_cr(" For other region "SIZE_FORMAT":",
cur->hr()->hrs_index());
if (!region_bm->at(cur->hr()->hrs_index())) {
*prev = nxt;
cur->set_next(NULL);
@ -994,7 +996,7 @@ void OtherRegionsTable::clear() {
void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
size_t hrs_ind = (size_t)from_hr->hrs_index();
size_t hrs_ind = from_hr->hrs_index();
size_t ind = hrs_ind & _mod_max_fine_entries_mask;
if (del_single_region_table(ind, from_hr)) {
assert(!_coarse_map.at(hrs_ind), "Inv");
@ -1002,7 +1004,7 @@ void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
_coarse_map.par_at_put(hrs_ind, 0);
}
// Check to see if any of the fcc entries come from here.
int hr_ind = hr()->hrs_index();
size_t hr_ind = hr()->hrs_index();
for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
int fcc_ent = _from_card_cache[tid][hr_ind];
if (fcc_ent != -1) {
@ -1083,8 +1085,9 @@ int HeapRegionRemSet::num_par_rem_sets() {
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
HeapRegion* hr)
: _bosa(bosa), _other_regions(hr), _iter_state(Unclaimed) { }
: _bosa(bosa), _other_regions(hr) {
reset_for_par_iteration();
}
void HeapRegionRemSet::setup_remset_size() {
// Setup sparse and fine-grain tables sizes.
@ -1099,10 +1102,6 @@ void HeapRegionRemSet::setup_remset_size() {
guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
}
void HeapRegionRemSet::init_for_par_iteration() {
_iter_state = Unclaimed;
}
bool HeapRegionRemSet::claim_iter() {
if (_iter_state != Unclaimed) return false;
jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
@ -1117,7 +1116,6 @@ bool HeapRegionRemSet::iter_is_complete() {
return _iter_state == Complete;
}
void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const {
iter->initialize(this);
}
@ -1130,7 +1128,7 @@ void HeapRegionRemSet::print() const {
while (iter.has_next(card_index)) {
HeapWord* card_start =
G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
gclog_or_tty->print_cr(" Card " PTR_FORMAT ".", card_start);
gclog_or_tty->print_cr(" Card " PTR_FORMAT, card_start);
}
// XXX
if (iter.n_yielded() != occupied()) {
@ -1157,6 +1155,14 @@ void HeapRegionRemSet::par_cleanup() {
void HeapRegionRemSet::clear() {
_other_regions.clear();
assert(occupied() == 0, "Should be clear.");
reset_for_par_iteration();
}
void HeapRegionRemSet::reset_for_par_iteration() {
_iter_state = Unclaimed;
_iter_claimed = 0;
// It's good to check this to make sure that the two methods are in sync.
assert(verify_ready_for_par_iteration(), "post-condition");
}
void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,

View File

@ -262,8 +262,6 @@ public:
virtual void cleanup() = 0;
#endif
// Should be called from single-threaded code.
void init_for_par_iteration();
// Attempt to claim the region. Returns true iff this call caused an
// atomic transition from Unclaimed to Claimed.
bool claim_iter();
@ -273,7 +271,6 @@ public:
bool iter_is_complete();
// Support for claiming blocks of cards during iteration
void set_iter_claimed(size_t x) { _iter_claimed = (jlong)x; }
size_t iter_claimed() const { return (size_t)_iter_claimed; }
// Claim the next block of cards
size_t iter_claimed_next(size_t step) {
@ -284,6 +281,11 @@ public:
} while (Atomic::cmpxchg((jlong)next, &_iter_claimed, (jlong)current) != (jlong)current);
return current;
}
void reset_for_par_iteration();
bool verify_ready_for_par_iteration() {
return (_iter_state == Unclaimed) && (_iter_claimed == 0);
}
// Initialize the given iterator to iterate over this rem set.
void init_iterator(HeapRegionRemSetIterator* iter) const;

View File

@ -23,259 +23,182 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "memory/allocation.hpp"
// Local to this file.
// Private
static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1;
else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1;
else if (*hr1p == *hr2p) return 0;
else {
assert(false, "We should never compare distinct overlapping regions.");
}
return 0;
}
size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) {
size_t len = length();
assert(num > 1, "use this only for sequences of length 2 or greater");
assert(from <= len,
err_msg("from: "SIZE_FORMAT" should be valid and <= than "SIZE_FORMAT,
from, len));
HeapRegionSeq::HeapRegionSeq(const size_t max_size) :
_alloc_search_start(0),
// The line below is the worst bit of C++ hackery I've ever written
// (Detlefs, 11/23). You should think of it as equivalent to
// "_regions(100, true)": initialize the growable array and inform it
// that it should allocate its elem array(s) on the C heap.
//
// The first argument, however, is actually a comma expression
// (set_allocation_type(this, C_HEAP), 100). The purpose of the
// set_allocation_type() call is to replace the default allocation
// type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
// allow to pass the assert in GenericGrowableArray() which checks
// that a growable array object must be on C heap if elements are.
//
// Note: containing object is allocated on C heap since it is CHeapObj.
//
_regions((ResourceObj::set_allocation_type((address)&_regions,
ResourceObj::C_HEAP),
(int)max_size),
true),
_next_rr_candidate(0),
_seq_bottom(NULL)
{}
// Private methods.
void HeapRegionSeq::print_empty_runs() {
int empty_run = 0;
int n_empty = 0;
int empty_run_start;
for (int i = 0; i < _regions.length(); i++) {
HeapRegion* r = _regions.at(i);
if (r->continuesHumongous()) continue;
if (r->is_empty()) {
assert(!r->isHumongous(), "H regions should not be empty.");
if (empty_run == 0) empty_run_start = i;
empty_run++;
n_empty++;
} else {
if (empty_run > 0) {
gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
empty_run = 0;
}
}
}
if (empty_run > 0) {
gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
}
gclog_or_tty->print_cr(" [tot = %d]", n_empty);
}
int HeapRegionSeq::find(HeapRegion* hr) {
// FIXME: optimized for adjacent regions of fixed size.
int ind = hr->hrs_index();
if (ind != -1) {
assert(_regions.at(ind) == hr, "Mismatch");
}
return ind;
}
// Public methods.
void HeapRegionSeq::insert(HeapRegion* hr) {
assert(!_regions.is_full(), "Too many elements in HeapRegionSeq");
if (_regions.length() == 0
|| _regions.top()->end() <= hr->bottom()) {
hr->set_hrs_index(_regions.length());
_regions.append(hr);
} else {
_regions.append(hr);
_regions.sort(orderRegions);
for (int i = 0; i < _regions.length(); i++) {
_regions.at(i)->set_hrs_index(i);
}
}
char* bot = (char*)_regions.at(0)->bottom();
if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot;
}
size_t HeapRegionSeq::length() {
return _regions.length();
}
size_t HeapRegionSeq::free_suffix() {
size_t res = 0;
int first = _regions.length() - 1;
int cur = first;
while (cur >= 0 &&
(_regions.at(cur)->is_empty()
&& (first == cur
|| (_regions.at(cur+1)->bottom() ==
_regions.at(cur)->end())))) {
res++;
cur--;
}
return res;
}
int HeapRegionSeq::find_contiguous_from(int from, size_t num) {
assert(num > 1, "pre-condition");
assert(0 <= from && from <= _regions.length(),
err_msg("from: %d should be valid and <= than %d",
from, _regions.length()));
int curr = from;
int first = -1;
size_t curr = from;
size_t first = G1_NULL_HRS_INDEX;
size_t num_so_far = 0;
while (curr < _regions.length() && num_so_far < num) {
HeapRegion* curr_hr = _regions.at(curr);
if (curr_hr->is_empty()) {
if (first == -1) {
while (curr < len && num_so_far < num) {
if (at(curr)->is_empty()) {
if (first == G1_NULL_HRS_INDEX) {
first = curr;
num_so_far = 1;
} else {
num_so_far += 1;
}
} else {
first = -1;
first = G1_NULL_HRS_INDEX;
num_so_far = 0;
}
curr += 1;
}
assert(num_so_far <= num, "post-condition");
if (num_so_far == num) {
// we found enough space for the humongous object
assert(from <= first && first < _regions.length(), "post-condition");
assert(first < curr && (curr - first) == (int) num, "post-condition");
for (int i = first; i < first + (int) num; ++i) {
assert(_regions.at(i)->is_empty(), "post-condition");
assert(from <= first && first < len, "post-condition");
assert(first < curr && (curr - first) == num, "post-condition");
for (size_t i = first; i < first + num; ++i) {
assert(at(i)->is_empty(), "post-condition");
}
return first;
} else {
// we failed to find enough space for the humongous object
return -1;
return G1_NULL_HRS_INDEX;
}
}
int HeapRegionSeq::find_contiguous(size_t num) {
assert(num > 1, "otherwise we should not be calling this");
assert(0 <= _alloc_search_start && _alloc_search_start <= _regions.length(),
err_msg("_alloc_search_start: %d should be valid and <= than %d",
_alloc_search_start, _regions.length()));
// Public
int start = _alloc_search_start;
int res = find_contiguous_from(start, num);
if (res == -1 && start != 0) {
// Try starting from the beginning. If _alloc_search_start was 0,
// no point in doing this again.
res = find_contiguous_from(0, num);
void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
size_t max_length) {
assert((size_t) bottom % HeapRegion::GrainBytes == 0,
"bottom should be heap region aligned");
assert((size_t) end % HeapRegion::GrainBytes == 0,
"end should be heap region aligned");
_length = 0;
_heap_bottom = bottom;
_heap_end = end;
_region_shift = HeapRegion::LogOfHRGrainBytes;
_next_search_index = 0;
_allocated_length = 0;
_max_length = max_length;
_regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length);
memset(_regions, 0, max_length * sizeof(HeapRegion*));
_regions_biased = _regions - ((size_t) bottom >> _region_shift);
assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
"bottom should be included in the region with index 0");
}
MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
HeapWord* new_end,
FreeRegionList* list) {
assert(old_end < new_end, "don't call it otherwise");
G1CollectedHeap* g1h = G1CollectedHeap::heap();
HeapWord* next_bottom = old_end;
assert(_heap_bottom <= next_bottom, "invariant");
while (next_bottom < new_end) {
assert(next_bottom < _heap_end, "invariant");
size_t index = length();
assert(index < _max_length, "otherwise we cannot expand further");
if (index == 0) {
// We have not allocated any regions so far
assert(next_bottom == _heap_bottom, "invariant");
} else {
// next_bottom should match the end of the last/previous region
assert(next_bottom == at(index - 1)->end(), "invariant");
}
if (index == _allocated_length) {
// We have to allocate a new HeapRegion.
HeapRegion* new_hr = g1h->new_heap_region(index, next_bottom);
if (new_hr == NULL) {
// allocation failed, we bail out and return what we have done so far
return MemRegion(old_end, next_bottom);
}
assert(_regions[index] == NULL, "invariant");
_regions[index] = new_hr;
increment_length(&_allocated_length);
}
// Have to increment the length first, otherwise we will get an
// assert failure at(index) below.
increment_length(&_length);
HeapRegion* hr = at(index);
list->add_as_tail(hr);
next_bottom = hr->end();
}
if (res != -1) {
assert(0 <= res && res < _regions.length(),
err_msg("res: %d should be valid", res));
_alloc_search_start = res + (int) num;
assert(0 < _alloc_search_start && _alloc_search_start <= _regions.length(),
err_msg("_alloc_search_start: %d should be valid",
_alloc_search_start));
assert(next_bottom == new_end, "post-condition");
return MemRegion(old_end, next_bottom);
}
size_t HeapRegionSeq::free_suffix() {
size_t res = 0;
size_t index = length();
while (index > 0) {
index -= 1;
if (!at(index)->is_empty()) {
break;
}
res += 1;
}
return res;
}
void HeapRegionSeq::iterate(HeapRegionClosure* blk) {
iterate_from((HeapRegion*)NULL, blk);
size_t HeapRegionSeq::find_contiguous(size_t num) {
assert(num > 1, "use this only for sequences of length 2 or greater");
assert(_next_search_index <= length(),
err_msg("_next_search_indeex: "SIZE_FORMAT" "
"should be valid and <= than "SIZE_FORMAT,
_next_search_index, length()));
size_t start = _next_search_index;
size_t res = find_contiguous_from(start, num);
if (res == G1_NULL_HRS_INDEX && start > 0) {
// Try starting from the beginning. If _next_search_index was 0,
// no point in doing this again.
res = find_contiguous_from(0, num);
}
if (res != G1_NULL_HRS_INDEX) {
assert(res < length(),
err_msg("res: "SIZE_FORMAT" should be valid", res));
_next_search_index = res + num;
assert(_next_search_index <= length(),
err_msg("_next_search_indeex: "SIZE_FORMAT" "
"should be valid and <= than "SIZE_FORMAT,
_next_search_index, length()));
}
return res;
}
// The first argument r is the heap region at which iteration begins.
// This operation runs fastest when r is NULL, or the heap region for
// which a HeapRegionClosure most recently returned true, or the
// heap region immediately to its right in the sequence. In all
// other cases a linear search is required to find the index of r.
void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) {
// :::: FIXME ::::
// Static cache value is bad, especially when we start doing parallel
// remembered set update. For now just don't cache anything (the
// code in the def'd out blocks).
#if 0
static int cached_j = 0;
#endif
int len = _regions.length();
int j = 0;
// Find the index of r.
if (r != NULL) {
#if 0
assert(cached_j >= 0, "Invariant.");
if ((cached_j < len) && (r == _regions.at(cached_j))) {
j = cached_j;
} else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) {
j = cached_j + 1;
} else {
j = find(r);
#endif
if (j < 0) {
j = 0;
}
#if 0
}
#endif
}
int i;
for (i = j; i < len; i += 1) {
int res = blk->doHeapRegion(_regions.at(i));
if (res) {
#if 0
cached_j = i;
#endif
blk->incomplete();
return;
}
}
for (i = 0; i < j; i += 1) {
int res = blk->doHeapRegion(_regions.at(i));
if (res) {
#if 0
cached_j = i;
#endif
blk->incomplete();
return;
}
}
void HeapRegionSeq::iterate(HeapRegionClosure* blk) const {
iterate_from((HeapRegion*) NULL, blk);
}
void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) {
int len = _regions.length();
int i;
for (i = idx; i < len; i++) {
if (blk->doHeapRegion(_regions.at(i))) {
void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
size_t hr_index = 0;
if (hr != NULL) {
hr_index = (size_t) hr->hrs_index();
}
size_t len = length();
for (size_t i = hr_index; i < len; i += 1) {
bool res = blk->doHeapRegion(at(i));
if (res) {
blk->incomplete();
return;
}
}
for (i = 0; i < idx; i++) {
if (blk->doHeapRegion(_regions.at(i))) {
for (size_t i = 0; i < hr_index; i += 1) {
bool res = blk->doHeapRegion(at(i));
if (res) {
blk->incomplete();
return;
}
@ -283,54 +206,92 @@ void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) {
}
MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
size_t& num_regions_deleted) {
size_t* num_regions_deleted) {
// Reset this in case it's currently pointing into the regions that
// we just removed.
_alloc_search_start = 0;
_next_search_index = 0;
assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
assert(length() > 0, "the region sequence should not be empty");
assert(length() <= _allocated_length, "invariant");
assert(_allocated_length > 0, "we should have at least one region committed");
if (_regions.length() == 0) {
num_regions_deleted = 0;
return MemRegion();
}
int j = _regions.length() - 1;
HeapWord* end = _regions.at(j)->end();
// around the loop, i will be the next region to be removed
size_t i = length() - 1;
assert(i > 0, "we should never remove all regions");
// [last_start, end) is the MemRegion that covers the regions we will remove.
HeapWord* end = at(i)->end();
HeapWord* last_start = end;
while (j >= 0 && shrink_bytes > 0) {
HeapRegion* cur = _regions.at(j);
// We have to leave humongous regions where they are,
// and work around them.
if (cur->isHumongous()) {
return MemRegion(last_start, end);
}
assert(cur == _regions.top(), "Should be top");
*num_regions_deleted = 0;
while (shrink_bytes > 0) {
HeapRegion* cur = at(i);
// We should leave the humongous regions where they are.
if (cur->isHumongous()) break;
// We should stop shrinking if we come across a non-empty region.
if (!cur->is_empty()) break;
i -= 1;
*num_regions_deleted += 1;
shrink_bytes -= cur->capacity();
num_regions_deleted++;
_regions.pop();
last_start = cur->bottom();
// We need to delete these somehow, but can't currently do so here: if
// we do, the ZF thread may still access the deleted region. We'll
// leave this here as a reminder that we have to do something about
// this.
// delete cur;
j--;
decrement_length(&_length);
// We will reclaim the HeapRegion. _allocated_length should be
// covering this index. So, even though we removed the region from
// the active set by decreasing _length, we still have it
// available in the future if we need to re-use it.
assert(i > 0, "we should never remove all regions");
assert(length() > 0, "we should never remove all regions");
}
return MemRegion(last_start, end);
}
class PrintHeapRegionClosure : public HeapRegionClosure {
public:
bool doHeapRegion(HeapRegion* r) {
gclog_or_tty->print(PTR_FORMAT ":", r);
r->print();
return false;
}
};
#ifndef PRODUCT
void HeapRegionSeq::verify_optional() {
guarantee(_length <= _allocated_length,
err_msg("invariant: _length: "SIZE_FORMAT" "
"_allocated_length: "SIZE_FORMAT,
_length, _allocated_length));
guarantee(_allocated_length <= _max_length,
err_msg("invariant: _allocated_length: "SIZE_FORMAT" "
"_max_length: "SIZE_FORMAT,
_allocated_length, _max_length));
guarantee(_next_search_index <= _length,
err_msg("invariant: _next_search_index: "SIZE_FORMAT" "
"_length: "SIZE_FORMAT,
_next_search_index, _length));
void HeapRegionSeq::print() {
PrintHeapRegionClosure cl;
iterate(&cl);
HeapWord* prev_end = _heap_bottom;
for (size_t i = 0; i < _allocated_length; i += 1) {
HeapRegion* hr = _regions[i];
guarantee(hr != NULL, err_msg("invariant: i: "SIZE_FORMAT, i));
guarantee(hr->bottom() == prev_end,
err_msg("invariant i: "SIZE_FORMAT" "HR_FORMAT" "
"prev_end: "PTR_FORMAT,
i, HR_FORMAT_PARAMS(hr), prev_end));
guarantee(hr->hrs_index() == i,
err_msg("invariant: i: "SIZE_FORMAT" hrs_index(): "SIZE_FORMAT,
i, hr->hrs_index()));
if (i < _length) {
// Asserts will fire if i is >= _length
HeapWord* addr = hr->bottom();
guarantee(addr_to_region(addr) == hr, "sanity");
guarantee(addr_to_region_unsafe(addr) == hr, "sanity");
} else {
guarantee(hr->is_empty(), "sanity");
guarantee(!hr->isHumongous(), "sanity");
// using assert instead of guarantee here since containing_set()
// is only available in non-product builds.
assert(hr->containing_set() == NULL, "sanity");
}
if (hr->startsHumongous()) {
prev_end = hr->orig_end();
} else {
prev_end = hr->end();
}
}
for (size_t i = _allocated_length; i < _max_length; i += 1) {
guarantee(_regions[i] == NULL, err_msg("invariant i: "SIZE_FORMAT, i));
}
}
#endif // PRODUCT

View File

@ -25,92 +25,143 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#include "gc_implementation/g1/heapRegion.hpp"
#include "utilities/growableArray.hpp"
class HeapRegion;
class HeapRegionClosure;
class FreeRegionList;
#define G1_NULL_HRS_INDEX ((size_t) -1)
// This class keeps track of the region metadata (i.e., HeapRegion
// instances). They are kept in the _regions array in address
// order. A region's index in the array corresponds to its index in
// the heap (i.e., 0 is the region at the bottom of the heap, 1 is
// the one after it, etc.). Two regions that are consecutive in the
// array should also be adjacent in the address space (i.e.,
// region(i).end() == region(i+1).bottom().
//
// We create a HeapRegion when we commit the region's address space
// for the first time. When we uncommit the address space of a
// region we retain the HeapRegion to be able to re-use it in the
// future (in case we recommit it).
//
// We keep track of three lengths:
//
// * _length (returned by length()) is the number of currently
// committed regions.
// * _allocated_length (not exposed outside this class) is the
// number of regions for which we have HeapRegions.
// * _max_length (returned by max_length()) is the maximum number of
// regions the heap can have.
//
// and maintain that: _length <= _allocated_length <= _max_length
class HeapRegionSeq: public CHeapObj {
// _regions is kept sorted by start address order, and no two regions are
// overlapping.
GrowableArray<HeapRegion*> _regions;
// The array that holds the HeapRegions.
HeapRegion** _regions;
// The index in "_regions" at which to start the next allocation search.
// (For efficiency only; private to obj_allocate after initialization.)
int _alloc_search_start;
// Version of _regions biased to address 0
HeapRegion** _regions_biased;
// Finds a contiguous set of empty regions of length num, starting
// from a given index.
int find_contiguous_from(int from, size_t num);
// The number of regions committed in the heap.
size_t _length;
// Currently, we're choosing collection sets in a round-robin fashion,
// starting here.
int _next_rr_candidate;
// The address of the first reserved word in the heap.
HeapWord* _heap_bottom;
// The bottom address of the bottom-most region, or else NULL if there
// are no regions in the sequence.
char* _seq_bottom;
// The address of the last reserved word in the heap - 1.
HeapWord* _heap_end;
// The log of the region byte size.
size_t _region_shift;
// A hint for which index to start searching from for humongous
// allocations.
size_t _next_search_index;
// The number of regions for which we have allocated HeapRegions for.
size_t _allocated_length;
// The maximum number of regions in the heap.
size_t _max_length;
// Find a contiguous set of empty regions of length num, starting
// from the given index.
size_t find_contiguous_from(size_t from, size_t num);
// Map a heap address to a biased region index. Assume that the
// address is valid.
inline size_t addr_to_index_biased(HeapWord* addr) const;
void increment_length(size_t* length) {
assert(*length < _max_length, "pre-condition");
*length += 1;
}
void decrement_length(size_t* length) {
assert(*length > 0, "pre-condition");
*length -= 1;
}
public:
// Initializes "this" to the empty sequence of regions.
HeapRegionSeq(const size_t max_size);
// Empty contructor, we'll initialize it with the initialize() method.
HeapRegionSeq() { }
// Adds "hr" to "this" sequence. Requires "hr" not to overlap with
// any region already in "this". (Will perform better if regions are
// inserted in ascending address order.)
void insert(HeapRegion* hr);
void initialize(HeapWord* bottom, HeapWord* end, size_t max_length);
// Given a HeapRegion*, returns its index within _regions,
// or returns -1 if not found.
int find(HeapRegion* hr);
// Return the HeapRegion at the given index. Assume that the index
// is valid.
inline HeapRegion* at(size_t index) const;
// Requires the index to be valid, and return the region at the index.
HeapRegion* at(size_t i) { return _regions.at((int)i); }
// If addr is within the committed space return its corresponding
// HeapRegion, otherwise return NULL.
inline HeapRegion* addr_to_region(HeapWord* addr) const;
// Return the number of regions in the sequence.
size_t length();
// Return the HeapRegion that corresponds to the given
// address. Assume the address is valid.
inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
// Returns the number of contiguous regions at the end of the sequence
// Return the number of regions that have been committed in the heap.
size_t length() const { return _length; }
// Return the maximum number of regions in the heap.
size_t max_length() const { return _max_length; }
// Expand the sequence to reflect that the heap has grown from
// old_end to new_end. Either create new HeapRegions, or re-use
// existing ones, and return them in the given list. Returns the
// memory region that covers the newly-created regions. If a
// HeapRegion allocation fails, the result memory region might be
// smaller than the desired one.
MemRegion expand_by(HeapWord* old_end, HeapWord* new_end,
FreeRegionList* list);
// Return the number of contiguous regions at the end of the sequence
// that are available for allocation.
size_t free_suffix();
// Find a contiguous set of empty regions of length num and return
// the index of the first region or -1 if the search was unsuccessful.
int find_contiguous(size_t num);
// the index of the first region or G1_NULL_HRS_INDEX if the
// search was unsuccessful.
size_t find_contiguous(size_t num);
// Apply the "doHeapRegion" method of "blk" to all regions in "this",
// in address order, terminating the iteration early
// if the "doHeapRegion" method returns "true".
void iterate(HeapRegionClosure* blk);
// Apply blk->doHeapRegion() on all committed regions in address order,
// terminating the iteration early if doHeapRegion() returns true.
void iterate(HeapRegionClosure* blk) const;
// Apply the "doHeapRegion" method of "blk" to all regions in "this",
// starting at "r" (or first region, if "r" is NULL), in a circular
// manner, terminating the iteration early if the "doHeapRegion" method
// returns "true".
void iterate_from(HeapRegion* r, HeapRegionClosure* blk);
// As above, but start the iteration from hr and loop around. If hr
// is NULL, we start from the first region in the heap.
void iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const;
// As above, but start from a given index in the sequence
// instead of a given heap region.
void iterate_from(int idx, HeapRegionClosure* blk);
// Requires "shrink_bytes" to be a multiple of the page size and heap
// region granularity. Deletes as many "rightmost" completely free heap
// regions from the sequence as comprise shrink_bytes bytes. Returns the
// MemRegion indicating the region those regions comprised, and sets
// "num_regions_deleted" to the number of regions deleted.
MemRegion shrink_by(size_t shrink_bytes, size_t& num_regions_deleted);
// If "addr" falls within a region in the sequence, return that region,
// or else NULL.
inline HeapRegion* addr_to_region(const void* addr);
void print();
// Prints out runs of empty regions.
void print_empty_runs();
// Tag as uncommitted as many regions that are completely free as
// possible, up to shrink_bytes, from the suffix of the committed
// sequence. Return a MemRegion that corresponds to the address
// range of the uncommitted regions. Assume shrink_bytes is page and
// heap region aligned.
MemRegion shrink_by(size_t shrink_bytes, size_t* num_regions_deleted);
// Do some sanity checking.
void verify_optional() PRODUCT_RETURN;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,23 +25,42 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
inline HeapRegion* HeapRegionSeq::addr_to_region(const void* addr) {
assert(_seq_bottom != NULL, "bad _seq_bottom in addr_to_region");
if ((char*) addr >= _seq_bottom) {
size_t diff = (size_t) pointer_delta((HeapWord*) addr,
(HeapWord*) _seq_bottom);
int index = (int) (diff >> HeapRegion::LogOfHRGrainWords);
assert(index >= 0, "invariant / paranoia");
if (index < _regions.length()) {
HeapRegion* hr = _regions.at(index);
assert(hr->is_in_reserved(addr),
"addr_to_region is wrong...");
return hr;
}
inline size_t HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
assert(_heap_bottom <= addr && addr < _heap_end,
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
addr, _heap_bottom, _heap_end));
size_t index = (size_t) addr >> _region_shift;
return index;
}
inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
assert(_heap_bottom <= addr && addr < _heap_end,
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
addr, _heap_bottom, _heap_end));
size_t index_biased = addr_to_index_biased(addr);
HeapRegion* hr = _regions_biased[index_biased];
assert(hr != NULL, "invariant");
return hr;
}
inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
if (addr != NULL && addr < _heap_end) {
assert(addr >= _heap_bottom,
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, _heap_bottom));
return addr_to_region_unsafe(addr);
}
return NULL;
}
inline HeapRegion* HeapRegionSeq::at(size_t index) const {
assert(index < length(), "pre-condition");
HeapRegion* hr = _regions[index];
assert(hr != NULL, "sanity");
assert(hr->hrs_index() == index, "sanity");
return hr;
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp"
//////////////////// FreeRegionList ////////////////////
@ -38,6 +39,16 @@ const char* FreeRegionList::verify_region_extra(HeapRegion* hr) {
//////////////////// MasterFreeRegionList ////////////////////
const char* MasterFreeRegionList::verify_region_extra(HeapRegion* hr) {
// We should reset the RSet for parallel iteration before we add it
// to the master free list so that it is ready when the region is
// re-allocated.
if (!hr->rem_set()->verify_ready_for_par_iteration()) {
return "the region's RSet should be ready for parallel iteration";
}
return FreeRegionList::verify_region_extra(hr);
}
bool MasterFreeRegionList::check_mt_safety() {
// Master Free List MT safety protocol:
// (a) If we're at a safepoint, operations on the master free list

View File

@ -1,5 +1,5 @@
/*
* copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,6 +44,7 @@ public:
class MasterFreeRegionList : public FreeRegionList {
protected:
virtual const char* verify_region_extra(HeapRegion* hr);
virtual bool check_mt_safety();
public:

View File

@ -481,8 +481,9 @@ size_t SparsePRT::mem_size() const {
bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) {
#if SPARSE_PRT_VERBOSE
gclog_or_tty->print_cr(" Adding card %d from region %d to region %d sparse.",
card_index, region_id, _hr->hrs_index());
gclog_or_tty->print_cr(" Adding card %d from region %d to region "
SIZE_FORMAT" sparse.",
card_index, region_id, _hr->hrs_index());
#endif
if (_next->occupied_entries() * 2 > _next->capacity()) {
expand();
@ -533,8 +534,8 @@ void SparsePRT::expand() {
_next = new RSHashTable(last->capacity() * 2);
#if SPARSE_PRT_VERBOSE
gclog_or_tty->print_cr(" Expanded sparse table for %d to %d.",
_hr->hrs_index(), _next->capacity());
gclog_or_tty->print_cr(" Expanded sparse table for "SIZE_FORMAT" to %d.",
_hr->hrs_index(), _next->capacity());
#endif
for (size_t i = 0; i < last->capacity(); i++) {
SparsePRTEntry* e = last->entry((int)i);

View File

@ -99,6 +99,18 @@ void VM_G1IncCollectionPause::doit() {
// At this point we are supposed to start a concurrent cycle. We
// will do so if one is not already in progress.
bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle();
// The above routine returns true if we were able to force the
// next GC pause to be an initial mark; it returns false if a
// marking cycle is already in progress.
//
// If a marking cycle is already in progress just return and skip
// the pause - the requesting thread should block in doit_epilogue
// until the marking cycle is complete.
if (!res) {
assert(_word_size == 0, "ExplicitGCInvokesConcurrent shouldn't be allocating");
return;
}
}
_pause_succeeded =

View File

@ -348,15 +348,31 @@ process_chunk_boundaries(Space* sp,
// cleared before we had a chance to examine it. In that case, the value
// will have been logged in the LNC for that chunk.
// We need to examine as many chunks to the right as this object
// covers.
const uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1)
- lowest_non_clean_base_chunk_index;
DEBUG_ONLY(const uintptr_t last_chunk_index = addr_to_chunk_index(used.last())
- lowest_non_clean_base_chunk_index;)
assert(last_chunk_index_to_check <= last_chunk_index,
err_msg("Out of bounds: last_chunk_index_to_check " INTPTR_FORMAT
" exceeds last_chunk_index " INTPTR_FORMAT,
last_chunk_index_to_check, last_chunk_index));
// covers. However, we need to bound this checking to the largest
// entry in the LNC array: this is because the heap may expand
// after the LNC array has been created but before we reach this point,
// and the last block in our chunk may have been expanded to include
// the expansion delta (and possibly subsequently allocated from, so
// it wouldn't be sufficient to check whether that last block was
// or was not an object at this point).
uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1)
- lowest_non_clean_base_chunk_index;
const uintptr_t last_chunk_index = addr_to_chunk_index(used.last())
- lowest_non_clean_base_chunk_index;
if (last_chunk_index_to_check > last_chunk_index) {
assert(last_block + last_block_size > used.end(),
err_msg("Inconsistency detected: last_block [" PTR_FORMAT "," PTR_FORMAT "]"
" does not exceed used.end() = " PTR_FORMAT ","
" yet last_chunk_index_to_check " INTPTR_FORMAT
" exceeds last_chunk_index " INTPTR_FORMAT,
last_chunk_index_to_check, last_chunk_index));
assert(sp->used_region().end() > used.end(),
err_msg("Expansion did not happen: "
"[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")",
sp->used_region().start(), sp->used_region().end(), used.start(), used.end()));
NOISY(tty->print_cr(" process_chunk_boundary: heap expanded; explicitly bounding last_chunk");)
last_chunk_index_to_check = last_chunk_index;
}
for (uintptr_t lnc_index = cur_chunk_index + 1;
lnc_index <= last_chunk_index_to_check;
lnc_index++) {

View File

@ -386,8 +386,6 @@ bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
// we rely on the size_policy object to force a bail out.
HeapWord* ParallelScavengeHeap::mem_allocate(
size_t size,
bool is_noref,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded) {
assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
@ -398,7 +396,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
// limit is being exceeded as checked below.
*gc_overhead_limit_was_exceeded = false;
HeapWord* result = young_gen()->allocate(size, is_tlab);
HeapWord* result = young_gen()->allocate(size);
uint loop_count = 0;
uint gc_count = 0;
@ -419,7 +417,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
MutexLocker ml(Heap_lock);
gc_count = Universe::heap()->total_collections();
result = young_gen()->allocate(size, is_tlab);
result = young_gen()->allocate(size);
// (1) If the requested object is too large to easily fit in the
// young_gen, or
@ -433,21 +431,13 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
if (result != NULL) {
return result;
}
if (!is_tlab &&
size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
result = old_gen()->allocate(size, is_tlab);
if (size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
result = old_gen()->allocate(size);
if (result != NULL) {
return result;
}
}
if (GC_locker::is_active_and_needs_gc()) {
// GC is locked out. If this is a TLAB allocation,
// return NULL; the requestor will retry allocation
// of an idividual object at a time.
if (is_tlab) {
return NULL;
}
// If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and
// GC allowed. When the critical section clears, a GC is
@ -472,7 +462,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
if (result == NULL) {
// Generate a VM operation
VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count);
VM_ParallelGCFailedAllocation op(size, gc_count);
VMThread::execute(&op);
// Did the VM operation execute? If so, return the result directly.
@ -526,7 +516,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
(loop_count % QueuedAllocationWarningCount == 0)) {
warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
" size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : "");
" size=%d", loop_count, size);
}
}
@ -539,7 +529,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
// time over limit here, that is the responsibility of the heap specific
// collection methods. This method decides where to attempt allocations,
// and when to attempt collections, but no collection specific policy.
HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) {
HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
assert(!Universe::heap()->is_gc_active(), "not reentrant");
@ -553,7 +543,7 @@ HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) {
// First level allocation failure, scavenge and allocate in young gen.
GCCauseSetter gccs(this, GCCause::_allocation_failure);
PSScavenge::invoke();
HeapWord* result = young_gen()->allocate(size, is_tlab);
HeapWord* result = young_gen()->allocate(size);
// Second level allocation failure.
// Mark sweep and allocate in young generation.
@ -562,28 +552,28 @@ HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) {
// Don't mark sweep twice if so.
if (mark_sweep_invocation_count == total_invocations()) {
invoke_full_gc(false);
result = young_gen()->allocate(size, is_tlab);
result = young_gen()->allocate(size);
}
}
// Third level allocation failure.
// After mark sweep and young generation allocation failure,
// allocate in old generation.
if (result == NULL && !is_tlab) {
result = old_gen()->allocate(size, is_tlab);
if (result == NULL) {
result = old_gen()->allocate(size);
}
// Fourth level allocation failure. We're running out of memory.
// More complete mark sweep and allocate in young generation.
if (result == NULL) {
invoke_full_gc(true);
result = young_gen()->allocate(size, is_tlab);
result = young_gen()->allocate(size);
}
// Fifth level allocation failure.
// After more complete mark sweep, allocate in old generation.
if (result == NULL && !is_tlab) {
result = old_gen()->allocate(size, is_tlab);
if (result == NULL) {
result = old_gen()->allocate(size);
}
return result;
@ -761,7 +751,7 @@ size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
}
HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
return young_gen()->allocate(size, true);
return young_gen()->allocate(size);
}
void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
@ -901,7 +891,7 @@ void ParallelScavengeHeap::print_tracing_info() const {
}
void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) {
// Why do we need the total_collections()-filter below?
if (total_collections() > 0) {
if (!silent) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -165,12 +165,13 @@ CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector
// an excessive amount of time is being spent doing collections
// and caused a NULL to be returned. If a NULL is not returned,
// "gc_time_limit_was_exceeded" has an undefined meaning.
HeapWord* mem_allocate(size_t size,
bool is_noref,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded);
HeapWord* failed_mem_allocate(size_t size, bool is_tlab);
// Allocation attempt(s) during a safepoint. It should never be called
// to allocate a new TLAB as this allocation might be satisfied out
// of the old generation.
HeapWord* failed_mem_allocate(size_t size);
HeapWord* permanent_mem_allocate(size_t size);
HeapWord* failed_permanent_mem_allocate(size_t size);
@ -194,8 +195,6 @@ CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector
inline void invoke_scavenge();
inline void invoke_full_gc(bool maximum_compaction);
size_t large_typearray_limit() { return FastAllocateSizeLimit; }
bool supports_inline_contig_alloc() const { return !UseNUMA; }
HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
@ -253,7 +252,7 @@ CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector
virtual void gc_threads_do(ThreadClosure* tc) const;
virtual void print_tracing_info() const;
void verify(bool allow_dirty, bool silent, bool /* option */);
void verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */);
void print_heap_change(size_t prev_used);

View File

@ -182,12 +182,12 @@ size_t PSOldGen::contiguous_available() const {
// Allocation. We report all successful allocations to the size policy
// Note that the perm gen does not use this method, and should not!
HeapWord* PSOldGen::allocate(size_t word_size, bool is_tlab) {
HeapWord* PSOldGen::allocate(size_t word_size) {
assert_locked_or_safepoint(Heap_lock);
HeapWord* res = allocate_noexpand(word_size, is_tlab);
HeapWord* res = allocate_noexpand(word_size);
if (res == NULL) {
res = expand_and_allocate(word_size, is_tlab);
res = expand_and_allocate(word_size);
}
// Allocations in the old generation need to be reported
@ -199,13 +199,12 @@ HeapWord* PSOldGen::allocate(size_t word_size, bool is_tlab) {
return res;
}
HeapWord* PSOldGen::expand_and_allocate(size_t word_size, bool is_tlab) {
assert(!is_tlab, "TLAB's are not supported in PSOldGen");
HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
expand(word_size*HeapWordSize);
if (GCExpandToAllocateDelayMillis > 0) {
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
}
return allocate_noexpand(word_size, is_tlab);
return allocate_noexpand(word_size);
}
HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {

View File

@ -60,9 +60,8 @@ class PSOldGen : public CHeapObj {
// Used when initializing the _name field.
static inline const char* select_name();
HeapWord* allocate_noexpand(size_t word_size, bool is_tlab) {
HeapWord* allocate_noexpand(size_t word_size) {
// We assume the heap lock is held here.
assert(!is_tlab, "Does not support TLAB allocation");
assert_locked_or_safepoint(Heap_lock);
HeapWord* res = object_space()->allocate(word_size);
if (res != NULL) {
@ -89,7 +88,7 @@ class PSOldGen : public CHeapObj {
return (res == NULL) ? expand_and_cas_allocate(word_size) : res;
}
HeapWord* expand_and_allocate(size_t word_size, bool is_tlab);
HeapWord* expand_and_allocate(size_t word_size);
HeapWord* expand_and_cas_allocate(size_t word_size);
void expand(size_t bytes);
bool expand_by(size_t bytes);
@ -164,7 +163,7 @@ class PSOldGen : public CHeapObj {
// Allocation. We report all successful allocations to the size policy
// Note that the perm gen does not use this method, and should not!
HeapWord* allocate(size_t word_size, bool is_tlab);
HeapWord* allocate(size_t word_size);
// Iteration.
void oop_iterate(OopClosure* cl) { object_space()->oop_iterate(cl); }

View File

@ -46,10 +46,10 @@ PSPermGen::PSPermGen(ReservedSpace rs, size_t alignment,
HeapWord* PSPermGen::allocate_permanent(size_t size) {
assert_locked_or_safepoint(Heap_lock);
HeapWord* obj = allocate_noexpand(size, false);
HeapWord* obj = allocate_noexpand(size);
if (obj == NULL) {
obj = expand_and_allocate(size, false);
obj = expand_and_allocate(size);
}
return obj;

View File

@ -157,7 +157,7 @@ class PSYoungGen : public CHeapObj {
}
// Allocation
HeapWord* allocate(size_t word_size, bool is_tlab) {
HeapWord* allocate(size_t word_size) {
HeapWord* result = eden_space()->cas_allocate(word_size);
return result;
}

View File

@ -33,10 +33,9 @@
// The following methods are used by the parallel scavenge collector
VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t size,
bool is_tlab, unsigned int gc_count) :
unsigned int gc_count) :
VM_GC_Operation(gc_count, GCCause::_allocation_failure),
_size(size),
_is_tlab(is_tlab),
_result(NULL)
{
}
@ -48,7 +47,7 @@ void VM_ParallelGCFailedAllocation::doit() {
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
GCCauseSetter gccs(heap, _gc_cause);
_result = heap->failed_mem_allocate(_size, _is_tlab);
_result = heap->failed_mem_allocate(_size);
if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,12 +32,10 @@
class VM_ParallelGCFailedAllocation: public VM_GC_Operation {
private:
size_t _size;
bool _is_tlab;
HeapWord* _result;
public:
VM_ParallelGCFailedAllocation(size_t size, bool is_tlab,
unsigned int gc_count);
VM_ParallelGCFailedAllocation(size_t size, unsigned int gc_count);
virtual VMOp_Type type() const {
return VMOp_ParallelGCFailedAllocation;

View File

@ -99,14 +99,16 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
// vulnerable to noisy glitches. In such cases, we
// ignore the current sample and use currently available
// historical estimates.
// XXX NEEDS TO BE FIXED
// assert(prevSweep() + splitBirths() >= splitDeaths() + (ssize_t)count, "Conservation Principle");
// ^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
// "Total Stock" "Not used at this block size"
assert(prevSweep() + splitBirths() + coalBirths() // "Total Production Stock"
>= splitDeaths() + coalDeaths() + (ssize_t)count, // "Current stock + depletion"
"Conservation Principle");
if (inter_sweep_current > _threshold) {
ssize_t demand = prevSweep() - (ssize_t)count + splitBirths() - splitDeaths();
// XXX NEEDS TO BE FIXED
// assert(demand >= 0, "Demand should be non-negative");
ssize_t demand = prevSweep() - (ssize_t)count + splitBirths() + coalBirths()
- splitDeaths() - coalDeaths();
assert(demand >= 0,
err_msg("Demand (" SSIZE_FORMAT ") should be non-negative for "
PTR_FORMAT " (size=" SIZE_FORMAT ")",
demand, this, count));
// Defensive: adjust for imprecision in event counting
if (demand < 0) {
demand = 0;

View File

@ -43,17 +43,6 @@ ConcurrentGCThread::ConcurrentGCThread() :
_sts.initialize();
};
void ConcurrentGCThread::stopWorldAndDo(VoidClosure* op) {
MutexLockerEx x(Heap_lock,
Mutex::_no_safepoint_check_flag);
// warning("CGC: about to try stopping world");
SafepointSynchronize::begin();
// warning("CGC: successfully stopped world");
op->do_void();
SafepointSynchronize::end();
// warning("CGC: successfully restarted world");
}
void ConcurrentGCThread::safepoint_synchronize() {
_sts.suspend_all();
}

View File

@ -95,8 +95,6 @@ protected:
static int set_CGC_flag(int b) { return _CGC_flag |= b; }
static int reset_CGC_flag(int b) { return _CGC_flag &= ~b; }
void stopWorldAndDo(VoidClosure* op);
// All instances share this one set.
static SuspendibleThreadSet _sts;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -93,7 +93,7 @@ class CollectedHeap : public CHeapObj {
// pure virtual.
void pre_initialize();
// Create a new tlab
// Create a new tlab. All TLAB allocations must go through this.
virtual HeapWord* allocate_new_tlab(size_t size);
// Accumulate statistics on all tlabs.
@ -109,11 +109,11 @@ class CollectedHeap : public CHeapObj {
// Allocate an uninitialized block of the given size, or returns NULL if
// this is impossible.
inline static HeapWord* common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS);
inline static HeapWord* common_mem_allocate_noinit(size_t size, TRAPS);
// Like allocate_init, but the block returned by a successful allocation
// is guaranteed initialized to zeros.
inline static HeapWord* common_mem_allocate_init(size_t size, bool is_noref, TRAPS);
inline static HeapWord* common_mem_allocate_init(size_t size, TRAPS);
// Same as common_mem version, except memory is allocated in the permanent area
// If there is no permanent area, revert to common_mem_allocate_noinit
@ -322,7 +322,6 @@ class CollectedHeap : public CHeapObj {
// General obj/array allocation facilities.
inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
inline static oop large_typearray_allocate(KlassHandle klass, int size, int length, TRAPS);
// Special obj/array allocation facilities.
// Some heaps may want to manage "permanent" data uniquely. These default
@ -345,16 +344,12 @@ class CollectedHeap : public CHeapObj {
// Raw memory allocation facilities
// The obj and array allocate methods are covers for these methods.
// The permanent allocation method should default to mem_allocate if
// permanent memory isn't supported.
// permanent memory isn't supported. mem_allocate() should never be
// called to allocate TLABs, only individual objects.
virtual HeapWord* mem_allocate(size_t size,
bool is_noref,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded) = 0;
virtual HeapWord* permanent_mem_allocate(size_t size) = 0;
// The boundary between a "large" and "small" array of primitives, in words.
virtual size_t large_typearray_limit() = 0;
// Utilities for turning raw memory into filler objects.
//
// min_fill_size() is the smallest region that can be filled.
@ -606,7 +601,7 @@ class CollectedHeap : public CHeapObj {
virtual void print_tracing_info() const = 0;
// Heap verification
virtual void verify(bool allow_dirty, bool silent, bool option) = 0;
virtual void verify(bool allow_dirty, bool silent, VerifyOption option) = 0;
// Non product verification and debugging.
#ifndef PRODUCT

View File

@ -122,7 +122,7 @@ void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
post_allocation_notify(klass, (oop)obj);
}
HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS) {
HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, TRAPS) {
// Clear unhandled oops for memory allocation. Memory allocation might
// not take out a lock if from tlab, so clear here.
@ -133,7 +133,6 @@ HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref,
return NULL; // caller does a CHECK_0 too
}
// We may want to update this, is_noref objects might not be allocated in TLABs.
HeapWord* result = NULL;
if (UseTLAB) {
result = CollectedHeap::allocate_from_tlab(THREAD, size);
@ -145,8 +144,6 @@ HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref,
}
bool gc_overhead_limit_was_exceeded = false;
result = Universe::heap()->mem_allocate(size,
is_noref,
false,
&gc_overhead_limit_was_exceeded);
if (result != NULL) {
NOT_PRODUCT(Universe::heap()->
@ -183,8 +180,8 @@ HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref,
}
}
HeapWord* CollectedHeap::common_mem_allocate_init(size_t size, bool is_noref, TRAPS) {
HeapWord* obj = common_mem_allocate_noinit(size, is_noref, CHECK_NULL);
HeapWord* CollectedHeap::common_mem_allocate_init(size_t size, TRAPS) {
HeapWord* obj = common_mem_allocate_noinit(size, CHECK_NULL);
init_obj(obj, size);
return obj;
}
@ -255,7 +252,7 @@ oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) {
debug_only(check_for_valid_allocation_state());
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
assert(size >= 0, "int won't convert to size_t");
HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL);
HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL);
post_allocation_setup_obj(klass, obj, size);
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
return (oop)obj;
@ -268,20 +265,7 @@ oop CollectedHeap::array_allocate(KlassHandle klass,
debug_only(check_for_valid_allocation_state());
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
assert(size >= 0, "int won't convert to size_t");
HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL);
post_allocation_setup_array(klass, obj, size, length);
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
return (oop)obj;
}
oop CollectedHeap::large_typearray_allocate(KlassHandle klass,
int size,
int length,
TRAPS) {
debug_only(check_for_valid_allocation_state());
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
assert(size >= 0, "int won't convert to size_t");
HeapWord* obj = common_mem_allocate_init(size, true, CHECK_NULL);
HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL);
post_allocation_setup_array(klass, obj, size, length);
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
return (oop)obj;

View File

@ -750,10 +750,6 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
return NULL;
}
size_t GenCollectorPolicy::large_typearray_limit() {
return FastAllocateSizeLimit;
}
// Return true if any of the following is true:
// . the allocation won't fit into the current young gen heap
// . gc locker is occupied (jni critical section)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -280,9 +280,6 @@ class GenCollectorPolicy : public CollectorPolicy {
HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab);
// The size that defines a "large array".
virtual size_t large_typearray_limit();
// Adaptive size policy
virtual void initialize_size_policy(size_t init_eden_size,
size_t init_promo_size,

View File

@ -434,11 +434,9 @@ HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
}
HeapWord* GenCollectedHeap::mem_allocate(size_t size,
bool is_large_noref,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded) {
return collector_policy()->mem_allocate_work(size,
is_tlab,
false /* is_tlab */,
gc_overhead_limit_was_exceeded);
}
@ -1120,11 +1118,9 @@ size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
bool gc_overhead_limit_was_exceeded;
HeapWord* result = mem_allocate(size /* size */,
false /* is_large_noref */,
true /* is_tlab */,
&gc_overhead_limit_was_exceeded);
return result;
return collector_policy()->mem_allocate_work(size /* size */,
true /* is_tlab */,
&gc_overhead_limit_was_exceeded);
}
// Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size
@ -1179,10 +1175,6 @@ void GenCollectedHeap::release_scratch() {
}
}
size_t GenCollectedHeap::large_typearray_limit() {
return gen_policy()->large_typearray_limit();
}
class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
void do_generation(Generation* gen) {
gen->prepare_for_verify();
@ -1260,7 +1252,7 @@ GCStats* GenCollectedHeap::gc_stats(int level) const {
return _gens[level]->gc_stats();
}
void GenCollectedHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
void GenCollectedHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) {
if (!silent) {
gclog_or_tty->print("permgen ");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -161,8 +161,6 @@ public:
size_t max_capacity() const;
HeapWord* mem_allocate(size_t size,
bool is_large_noref,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded);
// We may support a shared contiguous allocation area, if the youngest
@ -315,8 +313,6 @@ public:
// contributed as it needs.
void release_scratch();
size_t large_typearray_limit();
// Ensure parsability: override
virtual void ensure_parsability(bool retire_tlabs);
@ -361,7 +357,7 @@ public:
void prepare_for_verify();
// Override.
void verify(bool allow_dirty, bool silent, bool /* option */);
void verify(bool allow_dirty, bool silent, VerifyOption option);
// Override.
void print() const;

View File

@ -1278,7 +1278,7 @@ void Universe::print_heap_after_gc(outputStream* st) {
st->print_cr("}");
}
void Universe::verify(bool allow_dirty, bool silent, bool option) {
void Universe::verify(bool allow_dirty, bool silent, VerifyOption option) {
if (SharedSkipVerify) {
return;
}

View File

@ -109,6 +109,14 @@ struct NarrowOopStruct {
bool _use_implicit_null_checks;
};
enum VerifyOption {
VerifyOption_Default = 0,
// G1
VerifyOption_G1UsePrevMarking = VerifyOption_Default,
VerifyOption_G1UseNextMarking = VerifyOption_G1UsePrevMarking + 1,
VerifyOption_G1UseMarkWord = VerifyOption_G1UseNextMarking + 1
};
class Universe: AllStatic {
// Ugh. Universe is much too friendly.
@ -404,7 +412,8 @@ class Universe: AllStatic {
// Debugging
static bool verify_in_progress() { return _verify_in_progress; }
static void verify(bool allow_dirty = true, bool silent = false, bool option = true);
static void verify(bool allow_dirty = true, bool silent = false,
VerifyOption option = VerifyOption_Default );
static int verify_count() { return _verify_count; }
static void print();
static void print_on(outputStream* st);

View File

@ -49,6 +49,7 @@
#include "runtime/relocator.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
#include "utilities/quickSort.hpp"
#include "utilities/xmlstream.hpp"
@ -1207,41 +1208,6 @@ void methodOopDesc::print_short_name(outputStream* st) {
if (WizardMode) signature()->print_symbol_on(st);
}
extern "C" {
static int method_compare(methodOop* a, methodOop* b) {
return (*a)->name()->fast_compare((*b)->name());
}
// Prevent qsort from reordering a previous valid sort by
// considering the address of the methodOops if two methods
// would otherwise compare as equal. Required to preserve
// optimal access order in the shared archive. Slower than
// method_compare, only used for shared archive creation.
static int method_compare_idempotent(methodOop* a, methodOop* b) {
int i = method_compare(a, b);
if (i != 0) return i;
return ( a < b ? -1 : (a == b ? 0 : 1));
}
// We implement special compare versions for narrow oops to avoid
// testing for UseCompressedOops on every comparison.
static int method_compare_narrow(narrowOop* a, narrowOop* b) {
methodOop m = (methodOop)oopDesc::load_decode_heap_oop(a);
methodOop n = (methodOop)oopDesc::load_decode_heap_oop(b);
return m->name()->fast_compare(n->name());
}
static int method_compare_narrow_idempotent(narrowOop* a, narrowOop* b) {
int i = method_compare_narrow(a, b);
if (i != 0) return i;
return ( a < b ? -1 : (a == b ? 0 : 1));
}
typedef int (*compareFn)(const void*, const void*);
}
// This is only done during class loading, so it is OK to assume method_idnum matches the methods() array
static void reorder_based_on_method_index(objArrayOop methods,
objArrayOop annotations,
@ -1265,6 +1231,14 @@ static void reorder_based_on_method_index(objArrayOop methods,
}
}
// Comparer for sorting an object array containing
// methodOops.
template <class T>
static int method_comparator(T a, T b) {
methodOop m = (methodOop)oopDesc::decode_heap_oop_not_null(a);
methodOop n = (methodOop)oopDesc::decode_heap_oop_not_null(b);
return m->name()->fast_compare(n->name());
}
// This is only done during class loading, so it is OK to assume method_idnum matches the methods() array
void methodOopDesc::sort_methods(objArrayOop methods,
@ -1287,30 +1261,19 @@ void methodOopDesc::sort_methods(objArrayOop methods,
m->set_method_idnum(i);
}
}
// Use a simple bubble sort for small number of methods since
// qsort requires a functional pointer call for each comparison.
if (length < 8) {
bool sorted = true;
for (int i=length-1; i>0; i--) {
for (int j=0; j<i; j++) {
methodOop m1 = (methodOop)methods->obj_at(j);
methodOop m2 = (methodOop)methods->obj_at(j+1);
if ((uintptr_t)m1->name() > (uintptr_t)m2->name()) {
methods->obj_at_put(j, m2);
methods->obj_at_put(j+1, m1);
sorted = false;
}
}
if (sorted) break;
sorted = true;
{
No_Safepoint_Verifier nsv;
if (UseCompressedOops) {
QuickSort::sort<narrowOop>((narrowOop*)(methods->base()), length, method_comparator<narrowOop>, idempotent);
} else {
QuickSort::sort<oop>((oop*)(methods->base()), length, method_comparator<oop>, idempotent);
}
if (UseConcMarkSweepGC) {
// For CMS we need to dirty the cards for the array
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
bs->write_ref_array(methods->base(), length);
}
} else {
compareFn compare =
(UseCompressedOops ?
(compareFn) (idempotent ? method_compare_narrow_idempotent : method_compare_narrow):
(compareFn) (idempotent ? method_compare_idempotent : method_compare));
qsort(methods->base(), length, heapOopSize, compare);
}
// Sort annotations if necessary

View File

@ -84,11 +84,7 @@ typeArrayOop typeArrayKlass::allocate(int length, TRAPS) {
KlassHandle h_k(THREAD, as_klassOop());
typeArrayOop t;
CollectedHeap* ch = Universe::heap();
if (size < ch->large_typearray_limit()) {
t = (typeArrayOop)CollectedHeap::array_allocate(h_k, (int)size, length, CHECK_NULL);
} else {
t = (typeArrayOop)CollectedHeap::large_typearray_allocate(h_k, (int)size, length, CHECK_NULL);
}
t = (typeArrayOop)CollectedHeap::array_allocate(h_k, (int)size, length, CHECK_NULL);
assert(t->is_parsable(), "Don't publish unless parsable");
return t;
} else {

View File

@ -3296,6 +3296,19 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_GetDefaultJavaVMInitArgs(void *args_) {
return ret;
}
#ifndef PRODUCT
#include "utilities/quickSort.hpp"
void execute_internal_vm_tests() {
if (ExecuteInternalVMTests) {
assert(QuickSort::test_quick_sort(), "test_quick_sort failed");
tty->print_cr("All tests passed");
}
}
#endif
HS_DTRACE_PROBE_DECL3(hotspot_jni, CreateJavaVM__entry, vm, penv, args);
DT_RETURN_MARK_DECL(CreateJavaVM, jint);
@ -3386,6 +3399,7 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_CreateJavaVM(JavaVM **vm, void **penv, v
}
NOT_PRODUCT(test_error_handler(ErrorHandlerTest));
NOT_PRODUCT(execute_internal_vm_tests());
return result;
}

View File

@ -1680,8 +1680,33 @@ static bool verify_serial_gc_flags() {
UseParallelGC || UseParallelOldGC));
}
// check if do gclog rotation
// +UseGCLogFileRotation is a must,
// no gc log rotation when log file not supplied or
// NumberOfGCLogFiles is 0, or GCLogFileSize is 0
void check_gclog_consistency() {
if (UseGCLogFileRotation) {
if ((Arguments::gc_log_filename() == NULL) ||
(NumberOfGCLogFiles == 0) ||
(GCLogFileSize == 0)) {
jio_fprintf(defaultStream::output_stream(),
"To enable GC log rotation, use -Xloggc:<filename> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=<num_of_files> -XX:GCLogFileSize=<num_of_size>\n"
"where num_of_file > 0 and num_of_size > 0\n"
"GC log rotation is turned off\n");
UseGCLogFileRotation = false;
}
}
if (UseGCLogFileRotation && GCLogFileSize < 8*K) {
FLAG_SET_CMDLINE(uintx, GCLogFileSize, 8*K);
jio_fprintf(defaultStream::output_stream(),
"GCLogFileSize changed to minimum 8K\n");
}
}
// Check consistency of GC selection
bool Arguments::check_gc_consistency() {
check_gclog_consistency();
bool status = true;
// Ensure that the user has not selected conflicting sets
// of collectors. [Note: this check is merely a user convenience;
@ -2672,6 +2697,7 @@ SOLARIS_ONLY(
return JNI_ERR;
}
}
// Change the default value for flags which have different default values
// when working with older JDKs.
if (JDK_Version::current().compare_major(6) <= 0 &&

View File

@ -83,3 +83,13 @@ unsigned Atomic::cmpxchg(unsigned int exchange_value,
return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
(jint)compare_value);
}
jlong Atomic::add(jlong add_value, volatile jlong* dest) {
jlong old = load(dest);
jlong new_value = old + add_value;
while (old != cmpxchg(new_value, dest, old)) {
old = load(dest);
new_value = old + add_value;
}
return old;
}

View File

@ -51,6 +51,8 @@ class Atomic : AllStatic {
static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
static void* add_ptr(intptr_t add_value, volatile void* dest);
static jlong add (jlong add_value, volatile jlong* dest);
// Atomically increment location
static void inc (volatile jint* dest);
static void inc_ptr(volatile intptr_t* dest);

View File

@ -1944,6 +1944,9 @@ class CommandLineFlags {
"Number of ObjArray elements to push onto the marking stack" \
"before pushing a continuation entry") \
\
notproduct(bool, ExecuteInternalVMTests, false, \
"Enable execution of internal VM tests.") \
\
product_pd(bool, UseTLAB, "Use thread-local object allocation") \
\
product_pd(bool, ResizeTLAB, \
@ -2332,6 +2335,20 @@ class CommandLineFlags {
"Print diagnostic message when GC is stalled" \
"by JNI critical section") \
\
/* GC log rotation setting */ \
\
product(bool, UseGCLogFileRotation, false, \
"Prevent large gclog file for long running app. " \
"Requires -Xloggc:<filename>") \
\
product(uintx, NumberOfGCLogFiles, 0, \
"Number of gclog files in rotation, " \
"Default: 0, no rotation") \
\
product(uintx, GCLogFileSize, 0, \
"GC log file size, Default: 0 bytes, no rotation " \
"Only valid with UseGCLogFileRotation") \
\
/* JVMTI heap profiling */ \
\
diagnostic(bool, TraceJVMTIObjectTagging, false, \

View File

@ -468,12 +468,10 @@ void before_exit(JavaThread * thread) {
StatSampler::disengage();
StatSampler::destroy();
#ifndef SERIALGC
// stop CMS threads
if (UseConcMarkSweepGC) {
ConcurrentMarkSweepThread::stop();
}
#endif // SERIALGC
// We do not need to explicitly stop concurrent GC threads because the
// JVM will be taken down at a safepoint when such threads are inactive --
// except for some concurrent G1 threads, see (comment in)
// Threads::destroy_vm().
// Print GC/heap related information.
if (PrintGCDetails) {

View File

@ -511,6 +511,11 @@ void SafepointSynchronize::do_cleanup_tasks() {
TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);
NMethodSweeper::scan_stacks();
// rotate log files?
if (UseGCLogFileRotation) {
gclog_or_tty->rotate_log();
}
}

View File

@ -3698,6 +3698,14 @@ bool Threads::destroy_vm() {
// heap is unparseable if they are caught. Grab the Heap_lock
// to prevent this. The GC vm_operations will not be able to
// queue until after the vm thread is dead.
// After this point, we'll never emerge out of the safepoint before
// the VM exits, so concurrent GC threads do not need to be explicitly
// stopped; they remain inactive until the process exits.
// Note: some concurrent G1 threads may be running during a safepoint,
// but these will not be accessing the heap, just some G1-specific side
// data structures that are not accessed by any other threads but them
// after this point in a terminal safepoint.
MutexLocker ml(Heap_lock);
VMThread::wait_for_vm_thread_exit();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -161,11 +161,11 @@ class BitMap VALUE_OBJ_CLASS_SPEC {
// Set or clear the specified bit.
inline void set_bit(idx_t bit);
void clear_bit(idx_t bit);
inline void clear_bit(idx_t bit);
// Atomically set or clear the specified bit.
bool par_set_bit(idx_t bit);
bool par_clear_bit(idx_t bit);
inline bool par_set_bit(idx_t bit);
inline bool par_clear_bit(idx_t bit);
// Put the given value at the given offset. The parallel version
// will CAS the value into the bitmap and is quite a bit slower.

View File

@ -349,7 +349,7 @@ char* fileStream::readln(char *data, int count ) {
fileStream::~fileStream() {
if (_file != NULL) {
if (_need_close) fclose(_file);
_file = NULL;
_file = NULL;
}
}
@ -377,6 +377,86 @@ void fdStream::write(const char* s, size_t len) {
update_position(s, len);
}
rotatingFileStream::~rotatingFileStream() {
if (_file != NULL) {
if (_need_close) fclose(_file);
_file = NULL;
FREE_C_HEAP_ARRAY(char, _file_name);
_file_name = NULL;
}
}
rotatingFileStream::rotatingFileStream(const char* file_name) {
_cur_file_num = 0;
_bytes_writen = 0L;
_file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10);
jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num);
_file = fopen(_file_name, "w");
_need_close = true;
}
rotatingFileStream::rotatingFileStream(const char* file_name, const char* opentype) {
_cur_file_num = 0;
_bytes_writen = 0L;
_file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10);
jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num);
_file = fopen(_file_name, opentype);
_need_close = true;
}
void rotatingFileStream::write(const char* s, size_t len) {
if (_file != NULL) {
// Make an unused local variable to avoid warning from gcc 4.x compiler.
size_t count = fwrite(s, 1, len, _file);
Atomic::add((jlong)count, &_bytes_writen);
}
update_position(s, len);
}
// rotate_log must be called from VMThread at safepoint. In case need change parameters
// for gc log rotation from thread other than VMThread, a sub type of VM_Operation
// should be created and be submitted to VMThread's operation queue. DO NOT call this
// function directly. Currently, it is safe to rotate log at safepoint through VMThread.
// That is, no mutator threads and concurrent GC threads run parallel with VMThread to
// write to gc log file at safepoint. If in future, changes made for mutator threads or
// concurrent GC threads to run parallel with VMThread at safepoint, write and rotate_log
// must be synchronized.
void rotatingFileStream::rotate_log() {
if (_bytes_writen < (jlong)GCLogFileSize) return;
#ifdef ASSERT
Thread *thread = Thread::current();
assert(thread == NULL ||
(thread->is_VM_thread() && SafepointSynchronize::is_at_safepoint()),
"Must be VMThread at safepoint");
#endif
if (NumberOfGCLogFiles == 1) {
// rotate in same file
rewind();
_bytes_writen = 0L;
return;
}
// rotate file in names file.0, file.1, file.2, ..., file.<MaxGCLogFileNumbers-1>
// close current file, rotate to next file
if (_file != NULL) {
_cur_file_num ++;
if (_cur_file_num >= NumberOfGCLogFiles) _cur_file_num = 0;
jio_snprintf(_file_name, strlen(Arguments::gc_log_filename()) + 10, "%s.%d",
Arguments::gc_log_filename(), _cur_file_num);
fclose(_file);
_file = NULL;
}
_file = fopen(_file_name, "w");
if (_file != NULL) {
_bytes_writen = 0L;
_need_close = true;
} else {
tty->print_cr("failed to open rotation log file %s due to %s\n",
_file_name, strerror(errno));
_need_close = false;
}
}
defaultStream* defaultStream::instance = NULL;
int defaultStream::_output_fd = 1;
int defaultStream::_error_fd = 2;
@ -749,14 +829,17 @@ void ostream_init_log() {
gclog_or_tty = tty; // default to tty
if (Arguments::gc_log_filename() != NULL) {
fileStream * gclog = new(ResourceObj::C_HEAP)
fileStream(Arguments::gc_log_filename());
fileStream * gclog = UseGCLogFileRotation ?
new(ResourceObj::C_HEAP)
rotatingFileStream(Arguments::gc_log_filename()) :
new(ResourceObj::C_HEAP)
fileStream(Arguments::gc_log_filename());
if (gclog->is_open()) {
// now we update the time stamp of the GC log to be synced up
// with tty.
gclog->time_stamp().update_to(tty->time_stamp().ticks());
gclog_or_tty = gclog;
}
gclog_or_tty = gclog;
}
// If we haven't lazily initialized the logfile yet, do it now,

View File

@ -110,14 +110,15 @@ class outputStream : public ResourceObj {
// flushing
virtual void flush() {}
virtual void write(const char* str, size_t len) = 0;
virtual ~outputStream() {} // close properly on deletion
virtual void rotate_log() {} // GC log rotation
virtual ~outputStream() {} // close properly on deletion
void dec_cr() { dec(); cr(); }
void inc_cr() { inc(); cr(); }
};
// standard output
// ANSI C++ name collision
// ANSI C++ name collision
extern outputStream* tty; // tty output
extern outputStream* gclog_or_tty; // stream for gc log if -Xloggc:<f>, or tty
@ -176,6 +177,7 @@ class fileStream : public outputStream {
FILE* _file;
bool _need_close;
public:
fileStream() { _file = NULL; _need_close = false; }
fileStream(const char* file_name);
fileStream(const char* file_name, const char* opentype);
fileStream(FILE* file) { _file = file; _need_close = false; }
@ -210,6 +212,20 @@ class fdStream : public outputStream {
void flush() {};
};
class rotatingFileStream : public fileStream {
protected:
char* _file_name;
jlong _bytes_writen;
uintx _cur_file_num; // current logfile rotation number, from 0 to MaxGCLogFileNumbers-1
public:
rotatingFileStream(const char* file_name);
rotatingFileStream(const char* file_name, const char* opentype);
rotatingFileStream(FILE* file) : fileStream(file) {}
~rotatingFileStream();
virtual void write(const char* c, size_t len);
virtual void rotate_log();
};
void ostream_init();
void ostream_init_log();
void ostream_exit();

View File

@ -0,0 +1,218 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "utilities/quickSort.hpp"
#ifndef PRODUCT
// Unit tests
#include "runtime/os.hpp"
#include <stdlib.h>
static int test_comparator(int a, int b) {
if (a == b) {
return 0;
}
if (a < b) {
return -1;
}
return 1;
}
static int test_even_odd_comparator(int a, int b) {
bool a_is_odd = (a % 2) == 1;
bool b_is_odd = (b % 2) == 1;
if (a_is_odd == b_is_odd) {
return 0;
}
if (a_is_odd) {
return -1;
}
return 1;
}
static int test_stdlib_comparator(const void* a, const void* b) {
int ai = *(int*)a;
int bi = *(int*)b;
if (ai == bi) {
return 0;
}
if (ai < bi) {
return -1;
}
return 1;
}
void QuickSort::print_array(const char* prefix, int* array, int length) {
tty->print("%s:", prefix);
for (int i = 0; i < length; i++) {
tty->print(" %d", array[i]);
}
tty->print_cr("");
}
bool QuickSort::compare_arrays(int* actual, int* expected, int length) {
for (int i = 0; i < length; i++) {
if (actual[i] != expected[i]) {
print_array("Sorted array ", actual, length);
print_array("Expected array", expected, length);
return false;
}
}
return true;
}
template <class C>
bool QuickSort::sort_and_compare(int* arrayToSort, int* expectedResult, int length, C comparator, bool idempotent) {
sort<int, C>(arrayToSort, length, comparator, idempotent);
return compare_arrays(arrayToSort, expectedResult, length);
}
bool QuickSort::test_quick_sort() {
tty->print_cr("test_quick_sort\n");
{
int* test_array = NULL;
int* expected_array = NULL;
assert(sort_and_compare(test_array, expected_array, 0, test_comparator), "Empty array not handled");
}
{
int test_array[] = {3};
int expected_array[] = {3};
assert(sort_and_compare(test_array, expected_array, 1, test_comparator), "Single value array not handled");
}
{
int test_array[] = {3,2};
int expected_array[] = {2,3};
assert(sort_and_compare(test_array, expected_array, 2, test_comparator), "Array with 2 values not correctly sorted");
}
{
int test_array[] = {3,2,1};
int expected_array[] = {1,2,3};
assert(sort_and_compare(test_array, expected_array, 3, test_comparator), "Array with 3 values not correctly sorted");
}
{
int test_array[] = {4,3,2,1};
int expected_array[] = {1,2,3,4};
assert(sort_and_compare(test_array, expected_array, 4, test_comparator), "Array with 4 values not correctly sorted");
}
{
int test_array[] = {7,1,5,3,6,9,8,2,4,0};
int expected_array[] = {0,1,2,3,4,5,6,7,8,9};
assert(sort_and_compare(test_array, expected_array, 10, test_comparator), "Array with 10 values not correctly sorted");
}
{
int test_array[] = {4,4,1,4};
int expected_array[] = {1,4,4,4};
assert(sort_and_compare(test_array, expected_array, 4, test_comparator), "3 duplicates not sorted correctly");
}
{
int test_array[] = {0,1,2,3,4,5,6,7,8,9};
int expected_array[] = {0,1,2,3,4,5,6,7,8,9};
assert(sort_and_compare(test_array, expected_array, 10, test_comparator), "Already sorted array not correctly sorted");
}
{
// one of the random arrays that found an issue in the partion method.
int test_array[] = {76,46,81,8,64,56,75,11,51,55,11,71,59,27,9,64,69,75,21,25,39,40,44,32,7,8,40,41,24,78,24,74,9,65,28,6,40,31,22,13,27,82};
int expected_array[] = {6,7,8,8,9,9,11,11,13,21,22,24,24,25,27,27,28,31,32,39,40,40,40,41,44,46,51,55,56,59,64,64,65,69,71,74,75,75,76,78,81,82};
assert(sort_and_compare(test_array, expected_array, 42, test_comparator), "Not correctly sorted");
}
{
int test_array[] = {2,8,1,4};
int expected_array[] = {1,4,2,8};
assert(sort_and_compare(test_array, expected_array, 4, test_even_odd_comparator), "Even/odd not sorted correctly");
}
{ // Some idempotent tests
{
// An array of lenght 3 is only sorted by find_pivot. Make sure that it is idempotent.
int test_array[] = {1,4,8};
int expected_array[] = {1,4,8};
assert(sort_and_compare(test_array, expected_array, 3, test_even_odd_comparator, true), "Even/odd not idempotent");
}
{
int test_array[] = {1,7,9,4,8,2};
int expected_array[] = {1,7,9,4,8,2};
assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent");
}
{
int test_array[] = {1,9,7,4,2,8};
int expected_array[] = {1,9,7,4,2,8};
assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent");
}
{
int test_array[] = {7,9,1,2,8,4};
int expected_array[] = {7,9,1,2,8,4};
assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent");
}
{
int test_array[] = {7,1,9,2,4,8};
int expected_array[] = {7,1,9,2,4,8};
assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent");
}
{
int test_array[] = {9,1,7,4,8,2};
int expected_array[] = {9,1,7,4,8,2};
assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent");
}
{
int test_array[] = {9,7,1,4,2,8};
int expected_array[] = {9,7,1,4,2,8};
assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent");
}
}
// test sorting random arrays
for (int i = 0; i < 1000; i++) {
int length = os::random() % 100;
int* test_array = new int[length];
int* expected_array = new int[length];
for (int j = 0; j < length; j++) {
// Choose random values, but get a chance of getting duplicates
test_array[j] = os::random() % (length * 2);
expected_array[j] = test_array[j];
}
// Compare sorting to stdlib::qsort()
qsort(expected_array, length, sizeof(int), test_stdlib_comparator);
assert(sort_and_compare(test_array, expected_array, length, test_comparator), "Random array not correctly sorted");
// Make sure sorting is idempotent.
// Both test_array and expected_array are sorted by the test_comparator.
// Now sort them once with the test_even_odd_comparator. Then sort the
// test_array one more time with test_even_odd_comparator and verify that
// it is idempotent.
sort(expected_array, length, test_even_odd_comparator, true);
sort(test_array, length, test_even_odd_comparator, true);
assert(compare_arrays(test_array, expected_array, length), "Sorting identical arrays rendered different results");
sort(test_array, length, test_even_odd_comparator, true);
assert(compare_arrays(test_array, expected_array, length), "Sorting already sorted array changed order of elements - not idempotent");
delete[] test_array;
delete[] expected_array;
}
return true;
}
#endif

View File

@ -0,0 +1,138 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_UTILITIES_QUICKSORT_HPP
#define SHARE_VM_UTILITIES_QUICKSORT_HPP
#include "memory/allocation.hpp"
#include "runtime/globals.hpp"
#include "utilities/debug.hpp"
class QuickSort : AllStatic {
private:
template<class T>
static void swap(T* array, int x, int y) {
T tmp = array[x];
array[x] = array[y];
array[y] = tmp;
}
// As pivot we use the median of the first, last and middle elements.
// We swap in these three values at the right place in the array. This
// means that this method not only returns the index of the pivot
// element. It also alters the array so that:
// array[first] <= array[middle] <= array[last]
// A side effect of this is that arrays of length <= 3 are sorted.
template<class T, class C>
static int find_pivot(T* array, int length, C comparator) {
assert(length > 1, "length of array must be > 0");
int middle_index = length / 2;
int last_index = length - 1;
if (comparator(array[0], array[middle_index]) == 1) {
swap(array, 0, middle_index);
}
if (comparator(array[0], array[last_index]) == 1) {
swap(array, 0, last_index);
}
if (comparator(array[middle_index], array[last_index]) == 1) {
swap(array, middle_index, last_index);
}
// Now the value in the middle of the array is the median
// of the fist, last and middle values. Use this as pivot.
return middle_index;
}
template<class T, class C, bool idempotent>
static int partition(T* array, int pivot, int length, C comparator) {
int left_index = -1;
int right_index = length;
T pivot_val = array[pivot];
while (true) {
do {
left_index++;
} while (comparator(array[left_index], pivot_val) == -1);
do {
right_index--;
} while (comparator(array[right_index], pivot_val) == 1);
if (left_index < right_index) {
if (!idempotent || comparator(array[left_index], array[right_index]) != 0) {
swap(array, left_index, right_index);
}
} else {
return right_index;
}
}
ShouldNotReachHere();
return 0;
}
template<class T, class C, bool idempotent>
static void inner_sort(T* array, int length, C comparator) {
if (length < 2) {
return;
}
int pivot = find_pivot(array, length, comparator);
if (length < 4) {
// arrays up to length 3 will be sorted after finding the pivot
return;
}
int split = partition<T, C, idempotent>(array, pivot, length, comparator);
int first_part_length = split + 1;
inner_sort<T, C, idempotent>(array, first_part_length, comparator);
inner_sort<T, C, idempotent>(&array[first_part_length], length - first_part_length, comparator);
}
public:
// The idempotent parameter prevents the sort from
// reordering a previous valid sort by not swapping
// fields that compare as equal. This requires extra
// calls to the comparator, so the performance
// impact depends on the comparator.
template<class T, class C>
static void sort(T* array, int length, C comparator, bool idempotent) {
// Switch "idempotent" from function paramter to template parameter
if (idempotent) {
inner_sort<T, C, true>(array, length, comparator);
} else {
inner_sort<T, C, false>(array, length, comparator);
}
}
// for unit testing
#ifndef PRODUCT
static void print_array(const char* prefix, int* array, int length);
static bool compare_arrays(int* actual, int* expected, int length);
template <class C> static bool sort_and_compare(int* arrayToSort, int* expectedResult, int length, C comparator, bool idempotent = false);
static bool test_quick_sort();
#endif
};
#endif //SHARE_VM_UTILITIES_QUICKSORT_HPP

View File

@ -0,0 +1,179 @@
##
## @test @(#)test6941923.sh
## @bug 6941923
## @summary test new added flags for gc log rotation
## @author yqi
## @run shell test6941923.sh
##
## skip on windows
OS=`uname -s`
case "$OS" in
SunOS | Linux )
NULL=/dev/null
PS=":"
FS="/"
;;
Windows_* )
echo "Test skipped for Windows"
exit 0
;;
* )
echo "Unrecognized system!"
exit 1;
;;
esac
if [ "${JAVA_HOME}" = "" ]
then
echo "JAVA_HOME not set"
exit 0
fi
$JAVA_HOME/bin/java -version > $NULL 2>&1
if [ $? != 0 ]; then
echo "Wrong JAVA_HOME? JAVA_HOME: $JAVA_HOME"
exit $?
fi
# create a small test case
testname="Test"
if [ -e ${testname}.java ]; then
rm -rf ${testname}.*
fi
cat >> ${testname}.java << __EOF__
import java.util.Vector;
public class Test implements Runnable
{
private boolean _should_stop = false;
public static void main(String[] args) throws Exception {
long limit = Long.parseLong(args[0]) * 60L * 1000L; // minutes
Test t = new Test();
t.set_stop(false);
Thread thr = new Thread(t);
thr.start();
long time1 = System.currentTimeMillis();
long time2 = System.currentTimeMillis();
while (time2 - time1 < limit) {
try {
Thread.sleep(2000); // 2 seconds
}
catch(Exception e) {}
time2 = System.currentTimeMillis();
System.out.print("\r... " + (time2 - time1)/1000 + " seconds");
}
System.out.println();
t.set_stop(true);
}
public void set_stop(boolean value) { _should_stop = value; }
public void run() {
int cap = 20000;
int fix_size = 2048;
int loop = 0;
Vector< byte[] > v = new Vector< byte[] >(cap);
while(!_should_stop) {
byte[] g = new byte[fix_size];
v.add(g);
loop++;
if (loop > cap) {
v = null;
cap *= 2;
if (cap > 80000) cap = 80000;
v = new Vector< byte[] >(cap);
}
}
}
}
__EOF__
msgsuccess="succeeded"
msgfail="failed"
gclogsize="16K"
filesize=$((16*1024))
$JAVA_HOME/bin/javac ${testname}.java > $NULL 2>&1
if [ $? != 0 ]; then
echo "$JAVA_HOME/bin/javac ${testname}.java $fail"
exit -1
fi
# test for 2 minutes, it will complete circulation of gc log rotation
tts=2
logfile="test.log"
hotspotlog="hotspot.log"
if [ -e $logfile ]; then
rm -rf $logfile
fi
#also delete $hotspotlog if it exists
if [ -f $hotspotlog ]; then
rm -rf $hotspotlog
fi
options="-Xloggc:$logfile -XX:+UseConcMarkSweepGC -XX:+PrintGC -XX:+PrintGCDetails -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=$gclogsize"
echo "Test gc log rotation in same file, wait for $tts minutes ...."
$JAVA_HOME/bin/java $options $testname $tts
if [ $? != 0 ]; then
echo "$msgfail"
exit -1
fi
# rotation file will be $logfile.0
if [ -f $logfile.0 ]; then
outfilesize=`ls -l $logfile.0 | awk '{print $5 }'`
if [ $((outfilesize)) -ge $((filesize)) ]; then
echo $msgsuccess
else
echo $msgfail
fi
else
echo $msgfail
exit -1
fi
# delete log file
rm -rf $logfile.0
if [ -f $hotspotlog ]; then
rm -rf $hotspotlog
fi
#multiple log files
numoffiles=3
options="-Xloggc:$logfile -XX:+UseConcMarkSweepGC -XX:+PrintGC -XX:+PrintGCDetails -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=$numoffiles -XX:GCLogFileSize=$gclogsize"
echo "Test gc log rotation in $numoffiles files, wait for $tts minutes ...."
$JAVA_HOME/bin/java $options $testname $tts
if [ $? != 0 ]; then
echo "$msgfail"
exit -1
fi
atleast=0 # at least size of numoffile-1 files >= $gclogsize
tk=0
while [ $(($tk)) -lt $(($numoffiles)) ]
do
if [ -f $logfile.$tk ]; then
outfilesize=`ls -l $logfile.$tk | awk '{ print $5 }'`
if [ $(($outfilesize)) -ge $(($filesize)) ]; then
atleast=$((atleast+1))
fi
fi
tk=$((tk+1))
done
rm -rf $logfile.*
rm -rf $testname.*
rm -rf $hotspotlog
if [ $(($atleast)) -ge $(($numoffiles-1)) ]; then
echo $msgsuccess
else
echo $msgfail
exit -1
fi