Merge
This commit is contained in:
commit
5b60edebb8
@ -249,8 +249,6 @@ void AbstractAssembler::block_comment(const char* comment) {
|
|||||||
bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
|
bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
|
||||||
// Exception handler checks the nmethod's implicit null checks table
|
// Exception handler checks the nmethod's implicit null checks table
|
||||||
// only when this method returns false.
|
// only when this method returns false.
|
||||||
#ifndef SPARC
|
|
||||||
// Sparc does not have based addressing
|
|
||||||
if (UseCompressedOops) {
|
if (UseCompressedOops) {
|
||||||
// The first page after heap_base is unmapped and
|
// The first page after heap_base is unmapped and
|
||||||
// the 'offset' is equal to [heap_base + offset] for
|
// the 'offset' is equal to [heap_base + offset] for
|
||||||
@ -261,7 +259,6 @@ bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
|
|||||||
offset = (intptr_t)(pointer_delta((void*)offset, (void*)heap_base, 1));
|
offset = (intptr_t)(pointer_delta((void*)offset, (void*)heap_base, 1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif // SPARC
|
|
||||||
return offset < 0 || os::vm_page_size() <= offset;
|
return offset < 0 || os::vm_page_size() <= offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ bool ciMethodBlocks::is_block_start(int bci) {
|
|||||||
// first half. Returns the range beginning at bci.
|
// first half. Returns the range beginning at bci.
|
||||||
ciBlock *ciMethodBlocks::split_block_at(int bci) {
|
ciBlock *ciMethodBlocks::split_block_at(int bci) {
|
||||||
ciBlock *former_block = block_containing(bci);
|
ciBlock *former_block = block_containing(bci);
|
||||||
ciBlock *new_block = new(_arena) ciBlock(_method, _num_blocks++, this, former_block->start_bci());
|
ciBlock *new_block = new(_arena) ciBlock(_method, _num_blocks++, former_block->start_bci());
|
||||||
_blocks->append(new_block);
|
_blocks->append(new_block);
|
||||||
assert(former_block != NULL, "must not be NULL");
|
assert(former_block != NULL, "must not be NULL");
|
||||||
new_block->set_limit_bci(bci);
|
new_block->set_limit_bci(bci);
|
||||||
@ -83,7 +83,7 @@ ciBlock *ciMethodBlocks::make_block_at(int bci) {
|
|||||||
if (cb == NULL ) {
|
if (cb == NULL ) {
|
||||||
// This is our first time visiting this bytecode. Create
|
// This is our first time visiting this bytecode. Create
|
||||||
// a fresh block and assign it this starting point.
|
// a fresh block and assign it this starting point.
|
||||||
ciBlock *nb = new(_arena) ciBlock(_method, _num_blocks++, this, bci);
|
ciBlock *nb = new(_arena) ciBlock(_method, _num_blocks++, bci);
|
||||||
_blocks->append(nb);
|
_blocks->append(nb);
|
||||||
_bci_to_block[bci] = nb;
|
_bci_to_block[bci] = nb;
|
||||||
return nb;
|
return nb;
|
||||||
@ -98,6 +98,11 @@ ciBlock *ciMethodBlocks::make_block_at(int bci) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ciBlock *ciMethodBlocks::make_dummy_block() {
|
||||||
|
ciBlock *dum = new(_arena) ciBlock(_method, -1, 0);
|
||||||
|
return dum;
|
||||||
|
}
|
||||||
|
|
||||||
void ciMethodBlocks::do_analysis() {
|
void ciMethodBlocks::do_analysis() {
|
||||||
ciBytecodeStream s(_method);
|
ciBytecodeStream s(_method);
|
||||||
ciBlock *cur_block = block_containing(0);
|
ciBlock *cur_block = block_containing(0);
|
||||||
@ -253,7 +258,7 @@ ciMethodBlocks::ciMethodBlocks(Arena *arena, ciMethod *meth): _method(meth),
|
|||||||
Copy::zero_to_words((HeapWord*) _bci_to_block, b2bsize / sizeof(HeapWord));
|
Copy::zero_to_words((HeapWord*) _bci_to_block, b2bsize / sizeof(HeapWord));
|
||||||
|
|
||||||
// create initial block covering the entire method
|
// create initial block covering the entire method
|
||||||
ciBlock *b = new(arena) ciBlock(_method, _num_blocks++, this, 0);
|
ciBlock *b = new(arena) ciBlock(_method, _num_blocks++, 0);
|
||||||
_blocks->append(b);
|
_blocks->append(b);
|
||||||
_bci_to_block[0] = b;
|
_bci_to_block[0] = b;
|
||||||
|
|
||||||
@ -334,7 +339,7 @@ void ciMethodBlocks::dump() {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
ciBlock::ciBlock(ciMethod *method, int index, ciMethodBlocks *mb, int start_bci) :
|
ciBlock::ciBlock(ciMethod *method, int index, int start_bci) :
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
_method(method),
|
_method(method),
|
||||||
#endif
|
#endif
|
||||||
|
@ -48,6 +48,8 @@ public:
|
|||||||
int num_blocks() { return _num_blocks;}
|
int num_blocks() { return _num_blocks;}
|
||||||
void clear_processed();
|
void clear_processed();
|
||||||
|
|
||||||
|
ciBlock *make_dummy_block(); // a block not associated with a bci
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void dump();
|
void dump();
|
||||||
#endif
|
#endif
|
||||||
@ -81,7 +83,7 @@ public:
|
|||||||
fall_through_bci = -1
|
fall_through_bci = -1
|
||||||
};
|
};
|
||||||
|
|
||||||
ciBlock(ciMethod *method, int index, ciMethodBlocks *mb, int start_bci);
|
ciBlock(ciMethod *method, int index, int start_bci);
|
||||||
int start_bci() const { return _start_bci; }
|
int start_bci() const { return _start_bci; }
|
||||||
int limit_bci() const { return _limit_bci; }
|
int limit_bci() const { return _limit_bci; }
|
||||||
int control_bci() const { return _control_bci; }
|
int control_bci() const { return _control_bci; }
|
||||||
@ -94,7 +96,6 @@ public:
|
|||||||
int ex_limit_bci() const { return _ex_limit_bci; }
|
int ex_limit_bci() const { return _ex_limit_bci; }
|
||||||
bool contains(int bci) const { return start_bci() <= bci && bci < limit_bci(); }
|
bool contains(int bci) const { return start_bci() <= bci && bci < limit_bci(); }
|
||||||
|
|
||||||
|
|
||||||
// flag handling
|
// flag handling
|
||||||
bool processed() const { return (_flags & Processed) != 0; }
|
bool processed() const { return (_flags & Processed) != 0; }
|
||||||
bool is_handler() const { return (_flags & Handler) != 0; }
|
bool is_handler() const { return (_flags & Handler) != 0; }
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -34,11 +34,13 @@ private:
|
|||||||
int _max_locals;
|
int _max_locals;
|
||||||
int _max_stack;
|
int _max_stack;
|
||||||
int _code_size;
|
int _code_size;
|
||||||
|
bool _has_irreducible_entry;
|
||||||
|
|
||||||
const char* _failure_reason;
|
const char* _failure_reason;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
class StateVector;
|
class StateVector;
|
||||||
|
class Loop;
|
||||||
class Block;
|
class Block;
|
||||||
|
|
||||||
// Build a type flow analyzer
|
// Build a type flow analyzer
|
||||||
@ -55,6 +57,7 @@ public:
|
|||||||
int max_stack() const { return _max_stack; }
|
int max_stack() const { return _max_stack; }
|
||||||
int max_cells() const { return _max_locals + _max_stack; }
|
int max_cells() const { return _max_locals + _max_stack; }
|
||||||
int code_size() const { return _code_size; }
|
int code_size() const { return _code_size; }
|
||||||
|
bool has_irreducible_entry() const { return _has_irreducible_entry; }
|
||||||
|
|
||||||
// Represents information about an "active" jsr call. This
|
// Represents information about an "active" jsr call. This
|
||||||
// class represents a call to the routine at some entry address
|
// class represents a call to the routine at some entry address
|
||||||
@ -125,6 +128,19 @@ public:
|
|||||||
void print_on(outputStream* st) const PRODUCT_RETURN;
|
void print_on(outputStream* st) const PRODUCT_RETURN;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class LocalSet VALUE_OBJ_CLASS_SPEC {
|
||||||
|
private:
|
||||||
|
enum Constants { max = 63 };
|
||||||
|
uint64_t _bits;
|
||||||
|
public:
|
||||||
|
LocalSet() : _bits(0) {}
|
||||||
|
void add(uint32_t i) { if (i < (uint32_t)max) _bits |= (1LL << i); }
|
||||||
|
void add(LocalSet* ls) { _bits |= ls->_bits; }
|
||||||
|
bool test(uint32_t i) const { return i < (uint32_t)max ? (_bits>>i)&1U : true; }
|
||||||
|
void clear() { _bits = 0; }
|
||||||
|
void print_on(outputStream* st, int limit) const PRODUCT_RETURN;
|
||||||
|
};
|
||||||
|
|
||||||
// Used as a combined index for locals and temps
|
// Used as a combined index for locals and temps
|
||||||
enum Cell {
|
enum Cell {
|
||||||
Cell_0, Cell_max = INT_MAX
|
Cell_0, Cell_max = INT_MAX
|
||||||
@ -142,6 +158,8 @@ public:
|
|||||||
int _trap_bci;
|
int _trap_bci;
|
||||||
int _trap_index;
|
int _trap_index;
|
||||||
|
|
||||||
|
LocalSet _def_locals; // For entire block
|
||||||
|
|
||||||
static ciType* type_meet_internal(ciType* t1, ciType* t2, ciTypeFlow* analyzer);
|
static ciType* type_meet_internal(ciType* t1, ciType* t2, ciTypeFlow* analyzer);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -181,6 +199,9 @@ public:
|
|||||||
int monitor_count() const { return _monitor_count; }
|
int monitor_count() const { return _monitor_count; }
|
||||||
void set_monitor_count(int mc) { _monitor_count = mc; }
|
void set_monitor_count(int mc) { _monitor_count = mc; }
|
||||||
|
|
||||||
|
LocalSet* def_locals() { return &_def_locals; }
|
||||||
|
const LocalSet* def_locals() const { return &_def_locals; }
|
||||||
|
|
||||||
static Cell start_cell() { return (Cell)0; }
|
static Cell start_cell() { return (Cell)0; }
|
||||||
static Cell next_cell(Cell c) { return (Cell)(((int)c) + 1); }
|
static Cell next_cell(Cell c) { return (Cell)(((int)c) + 1); }
|
||||||
Cell limit_cell() const {
|
Cell limit_cell() const {
|
||||||
@ -250,6 +271,10 @@ public:
|
|||||||
return type->basic_type() == T_DOUBLE;
|
return type->basic_type() == T_DOUBLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void store_to_local(int lnum) {
|
||||||
|
_def_locals.add((uint) lnum);
|
||||||
|
}
|
||||||
|
|
||||||
void push_translate(ciType* type);
|
void push_translate(ciType* type);
|
||||||
|
|
||||||
void push_int() {
|
void push_int() {
|
||||||
@ -358,6 +383,7 @@ public:
|
|||||||
"must be reference type or return address");
|
"must be reference type or return address");
|
||||||
overwrite_local_double_long(index);
|
overwrite_local_double_long(index);
|
||||||
set_type_at(local(index), type);
|
set_type_at(local(index), type);
|
||||||
|
store_to_local(index);
|
||||||
}
|
}
|
||||||
|
|
||||||
void load_local_double(int index) {
|
void load_local_double(int index) {
|
||||||
@ -376,6 +402,8 @@ public:
|
|||||||
overwrite_local_double_long(index);
|
overwrite_local_double_long(index);
|
||||||
set_type_at(local(index), type);
|
set_type_at(local(index), type);
|
||||||
set_type_at(local(index+1), type2);
|
set_type_at(local(index+1), type2);
|
||||||
|
store_to_local(index);
|
||||||
|
store_to_local(index+1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void load_local_float(int index) {
|
void load_local_float(int index) {
|
||||||
@ -388,6 +416,7 @@ public:
|
|||||||
assert(is_float(type), "must be float type");
|
assert(is_float(type), "must be float type");
|
||||||
overwrite_local_double_long(index);
|
overwrite_local_double_long(index);
|
||||||
set_type_at(local(index), type);
|
set_type_at(local(index), type);
|
||||||
|
store_to_local(index);
|
||||||
}
|
}
|
||||||
|
|
||||||
void load_local_int(int index) {
|
void load_local_int(int index) {
|
||||||
@ -400,6 +429,7 @@ public:
|
|||||||
assert(is_int(type), "must be int type");
|
assert(is_int(type), "must be int type");
|
||||||
overwrite_local_double_long(index);
|
overwrite_local_double_long(index);
|
||||||
set_type_at(local(index), type);
|
set_type_at(local(index), type);
|
||||||
|
store_to_local(index);
|
||||||
}
|
}
|
||||||
|
|
||||||
void load_local_long(int index) {
|
void load_local_long(int index) {
|
||||||
@ -418,6 +448,8 @@ public:
|
|||||||
overwrite_local_double_long(index);
|
overwrite_local_double_long(index);
|
||||||
set_type_at(local(index), type);
|
set_type_at(local(index), type);
|
||||||
set_type_at(local(index+1), type2);
|
set_type_at(local(index+1), type2);
|
||||||
|
store_to_local(index);
|
||||||
|
store_to_local(index+1);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop interpretation of this path with a trap.
|
// Stop interpretation of this path with a trap.
|
||||||
@ -450,13 +482,31 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Parameter for "find_block" calls:
|
// Parameter for "find_block" calls:
|
||||||
// Describes the difference between a public and private copy.
|
// Describes the difference between a public and backedge copy.
|
||||||
enum CreateOption {
|
enum CreateOption {
|
||||||
create_public_copy,
|
create_public_copy,
|
||||||
create_private_copy,
|
create_backedge_copy,
|
||||||
no_create
|
no_create
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Successor iterator
|
||||||
|
class SuccIter : public StackObj {
|
||||||
|
private:
|
||||||
|
Block* _pred;
|
||||||
|
int _index;
|
||||||
|
Block* _succ;
|
||||||
|
public:
|
||||||
|
SuccIter() : _pred(NULL), _index(-1), _succ(NULL) {}
|
||||||
|
SuccIter(Block* pred) : _pred(pred), _index(-1), _succ(NULL) { next(); }
|
||||||
|
int index() { return _index; }
|
||||||
|
Block* pred() { return _pred; } // Return predecessor
|
||||||
|
bool done() { return _index < 0; } // Finished?
|
||||||
|
Block* succ() { return _succ; } // Return current successor
|
||||||
|
void next(); // Advance
|
||||||
|
void set_succ(Block* succ); // Update current successor
|
||||||
|
bool is_normal_ctrl() { return index() < _pred->successors()->length(); }
|
||||||
|
};
|
||||||
|
|
||||||
// A basic block
|
// A basic block
|
||||||
class Block : public ResourceObj {
|
class Block : public ResourceObj {
|
||||||
private:
|
private:
|
||||||
@ -470,15 +520,24 @@ public:
|
|||||||
int _trap_bci;
|
int _trap_bci;
|
||||||
int _trap_index;
|
int _trap_index;
|
||||||
|
|
||||||
// A reasonable approximation to pre-order, provided.to the client.
|
// pre_order, assigned at first visit. Used as block ID and "visited" tag
|
||||||
int _pre_order;
|
int _pre_order;
|
||||||
|
|
||||||
// Has this block been cloned for some special purpose?
|
// A post-order, used to compute the reverse post order (RPO) provided to the client
|
||||||
bool _private_copy;
|
int _post_order; // used to compute rpo
|
||||||
|
|
||||||
|
// Has this block been cloned for a loop backedge?
|
||||||
|
bool _backedge_copy;
|
||||||
|
|
||||||
// A pointer used for our internal work list
|
// A pointer used for our internal work list
|
||||||
Block* _next;
|
Block* _next;
|
||||||
bool _on_work_list;
|
bool _on_work_list; // on the work list
|
||||||
|
Block* _rpo_next; // Reverse post order list
|
||||||
|
|
||||||
|
// Loop info
|
||||||
|
Loop* _loop; // nearest loop
|
||||||
|
bool _irreducible_entry; // entry to irreducible loop
|
||||||
|
bool _exception_entry; // entry to exception handler
|
||||||
|
|
||||||
ciBlock* ciblock() const { return _ciblock; }
|
ciBlock* ciblock() const { return _ciblock; }
|
||||||
StateVector* state() const { return _state; }
|
StateVector* state() const { return _state; }
|
||||||
@ -504,10 +563,11 @@ public:
|
|||||||
int start() const { return _ciblock->start_bci(); }
|
int start() const { return _ciblock->start_bci(); }
|
||||||
int limit() const { return _ciblock->limit_bci(); }
|
int limit() const { return _ciblock->limit_bci(); }
|
||||||
int control() const { return _ciblock->control_bci(); }
|
int control() const { return _ciblock->control_bci(); }
|
||||||
|
JsrSet* jsrs() const { return _jsrs; }
|
||||||
|
|
||||||
bool is_private_copy() const { return _private_copy; }
|
bool is_backedge_copy() const { return _backedge_copy; }
|
||||||
void set_private_copy(bool z);
|
void set_backedge_copy(bool z);
|
||||||
int private_copy_count() const { return outer()->private_copy_count(ciblock()->index(), _jsrs); }
|
int backedge_copy_count() const { return outer()->backedge_copy_count(ciblock()->index(), _jsrs); }
|
||||||
|
|
||||||
// access to entry state
|
// access to entry state
|
||||||
int stack_size() const { return _state->stack_size(); }
|
int stack_size() const { return _state->stack_size(); }
|
||||||
@ -515,6 +575,20 @@ public:
|
|||||||
ciType* local_type_at(int i) const { return _state->local_type_at(i); }
|
ciType* local_type_at(int i) const { return _state->local_type_at(i); }
|
||||||
ciType* stack_type_at(int i) const { return _state->stack_type_at(i); }
|
ciType* stack_type_at(int i) const { return _state->stack_type_at(i); }
|
||||||
|
|
||||||
|
// Data flow on locals
|
||||||
|
bool is_invariant_local(uint v) const {
|
||||||
|
assert(is_loop_head(), "only loop heads");
|
||||||
|
// Find outermost loop with same loop head
|
||||||
|
Loop* lp = loop();
|
||||||
|
while (lp->parent() != NULL) {
|
||||||
|
if (lp->parent()->head() != lp->head()) break;
|
||||||
|
lp = lp->parent();
|
||||||
|
}
|
||||||
|
return !lp->def_locals()->test(v);
|
||||||
|
}
|
||||||
|
LocalSet* def_locals() { return _state->def_locals(); }
|
||||||
|
const LocalSet* def_locals() const { return _state->def_locals(); }
|
||||||
|
|
||||||
// Get the successors for this Block.
|
// Get the successors for this Block.
|
||||||
GrowableArray<Block*>* successors(ciBytecodeStream* str,
|
GrowableArray<Block*>* successors(ciBytecodeStream* str,
|
||||||
StateVector* state,
|
StateVector* state,
|
||||||
@ -524,13 +598,6 @@ public:
|
|||||||
return _successors;
|
return _successors;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper function for "successors" when making private copies of
|
|
||||||
// loop heads for C2.
|
|
||||||
Block * clone_loop_head(ciTypeFlow* analyzer,
|
|
||||||
int branch_bci,
|
|
||||||
Block* target,
|
|
||||||
JsrSet* jsrs);
|
|
||||||
|
|
||||||
// Get the exceptional successors for this Block.
|
// Get the exceptional successors for this Block.
|
||||||
GrowableArray<Block*>* exceptions() {
|
GrowableArray<Block*>* exceptions() {
|
||||||
if (_exceptions == NULL) {
|
if (_exceptions == NULL) {
|
||||||
@ -584,17 +651,126 @@ public:
|
|||||||
bool is_on_work_list() const { return _on_work_list; }
|
bool is_on_work_list() const { return _on_work_list; }
|
||||||
|
|
||||||
bool has_pre_order() const { return _pre_order >= 0; }
|
bool has_pre_order() const { return _pre_order >= 0; }
|
||||||
void set_pre_order(int po) { assert(!has_pre_order() && po >= 0, ""); _pre_order = po; }
|
void set_pre_order(int po) { assert(!has_pre_order(), ""); _pre_order = po; }
|
||||||
int pre_order() const { assert(has_pre_order(), ""); return _pre_order; }
|
int pre_order() const { assert(has_pre_order(), ""); return _pre_order; }
|
||||||
|
void set_next_pre_order() { set_pre_order(outer()->inc_next_pre_order()); }
|
||||||
bool is_start() const { return _pre_order == outer()->start_block_num(); }
|
bool is_start() const { return _pre_order == outer()->start_block_num(); }
|
||||||
|
|
||||||
// A ranking used in determining order within the work list.
|
// Reverse post order
|
||||||
bool is_simpler_than(Block* other);
|
void df_init();
|
||||||
|
bool has_post_order() const { return _post_order >= 0; }
|
||||||
|
void set_post_order(int po) { assert(!has_post_order() && po >= 0, ""); _post_order = po; }
|
||||||
|
void reset_post_order(int o){ _post_order = o; }
|
||||||
|
int post_order() const { assert(has_post_order(), ""); return _post_order; }
|
||||||
|
|
||||||
|
bool has_rpo() const { return has_post_order() && outer()->have_block_count(); }
|
||||||
|
int rpo() const { assert(has_rpo(), ""); return outer()->block_count() - post_order() - 1; }
|
||||||
|
void set_rpo_next(Block* b) { _rpo_next = b; }
|
||||||
|
Block* rpo_next() { return _rpo_next; }
|
||||||
|
|
||||||
|
// Loops
|
||||||
|
Loop* loop() const { return _loop; }
|
||||||
|
void set_loop(Loop* lp) { _loop = lp; }
|
||||||
|
bool is_loop_head() const { return _loop && _loop->head() == this; }
|
||||||
|
void set_irreducible_entry(bool c) { _irreducible_entry = c; }
|
||||||
|
bool is_irreducible_entry() const { return _irreducible_entry; }
|
||||||
|
bool is_visited() const { return has_pre_order(); }
|
||||||
|
bool is_post_visited() const { return has_post_order(); }
|
||||||
|
bool is_clonable_exit(Loop* lp);
|
||||||
|
Block* looping_succ(Loop* lp); // Successor inside of loop
|
||||||
|
bool is_single_entry_loop_head() const {
|
||||||
|
if (!is_loop_head()) return false;
|
||||||
|
for (Loop* lp = loop(); lp != NULL && lp->head() == this; lp = lp->parent())
|
||||||
|
if (lp->is_irreducible()) return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
void print_value_on(outputStream* st) const PRODUCT_RETURN;
|
void print_value_on(outputStream* st) const PRODUCT_RETURN;
|
||||||
void print_on(outputStream* st) const PRODUCT_RETURN;
|
void print_on(outputStream* st) const PRODUCT_RETURN;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Loop
|
||||||
|
class Loop : public ResourceObj {
|
||||||
|
private:
|
||||||
|
Loop* _parent;
|
||||||
|
Loop* _sibling; // List of siblings, null terminated
|
||||||
|
Loop* _child; // Head of child list threaded thru sibling pointer
|
||||||
|
Block* _head; // Head of loop
|
||||||
|
Block* _tail; // Tail of loop
|
||||||
|
bool _irreducible;
|
||||||
|
LocalSet _def_locals;
|
||||||
|
|
||||||
|
public:
|
||||||
|
Loop(Block* head, Block* tail) :
|
||||||
|
_head(head), _tail(tail),
|
||||||
|
_parent(NULL), _sibling(NULL), _child(NULL),
|
||||||
|
_irreducible(false), _def_locals() {}
|
||||||
|
|
||||||
|
Loop* parent() const { return _parent; }
|
||||||
|
Loop* sibling() const { return _sibling; }
|
||||||
|
Loop* child() const { return _child; }
|
||||||
|
Block* head() const { return _head; }
|
||||||
|
Block* tail() const { return _tail; }
|
||||||
|
void set_parent(Loop* p) { _parent = p; }
|
||||||
|
void set_sibling(Loop* s) { _sibling = s; }
|
||||||
|
void set_child(Loop* c) { _child = c; }
|
||||||
|
void set_head(Block* hd) { _head = hd; }
|
||||||
|
void set_tail(Block* tl) { _tail = tl; }
|
||||||
|
|
||||||
|
int depth() const; // nesting depth
|
||||||
|
|
||||||
|
// Returns true if lp is a nested loop or us.
|
||||||
|
bool contains(Loop* lp) const;
|
||||||
|
bool contains(Block* blk) const { return contains(blk->loop()); }
|
||||||
|
|
||||||
|
// Data flow on locals
|
||||||
|
LocalSet* def_locals() { return &_def_locals; }
|
||||||
|
const LocalSet* def_locals() const { return &_def_locals; }
|
||||||
|
|
||||||
|
// Merge the branch lp into this branch, sorting on the loop head
|
||||||
|
// pre_orders. Returns the new branch.
|
||||||
|
Loop* sorted_merge(Loop* lp);
|
||||||
|
|
||||||
|
// Mark non-single entry to loop
|
||||||
|
void set_irreducible(Block* entry) {
|
||||||
|
_irreducible = true;
|
||||||
|
entry->set_irreducible_entry(true);
|
||||||
|
}
|
||||||
|
bool is_irreducible() const { return _irreducible; }
|
||||||
|
|
||||||
|
bool is_root() const { return _tail->pre_order() == max_jint; }
|
||||||
|
|
||||||
|
void print(outputStream* st = tty, int indent = 0) const PRODUCT_RETURN;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Postorder iteration over the loop tree.
|
||||||
|
class PostorderLoops : public StackObj {
|
||||||
|
private:
|
||||||
|
Loop* _root;
|
||||||
|
Loop* _current;
|
||||||
|
public:
|
||||||
|
PostorderLoops(Loop* root) : _root(root), _current(root) {
|
||||||
|
while (_current->child() != NULL) {
|
||||||
|
_current = _current->child();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bool done() { return _current == NULL; } // Finished iterating?
|
||||||
|
void next(); // Advance to next loop
|
||||||
|
Loop* current() { return _current; } // Return current loop.
|
||||||
|
};
|
||||||
|
|
||||||
|
// Preorder iteration over the loop tree.
|
||||||
|
class PreorderLoops : public StackObj {
|
||||||
|
private:
|
||||||
|
Loop* _root;
|
||||||
|
Loop* _current;
|
||||||
|
public:
|
||||||
|
PreorderLoops(Loop* root) : _root(root), _current(root) {}
|
||||||
|
bool done() { return _current == NULL; } // Finished iterating?
|
||||||
|
void next(); // Advance to next loop
|
||||||
|
Loop* current() { return _current; } // Return current loop.
|
||||||
|
};
|
||||||
|
|
||||||
// Standard indexes of successors, for various bytecodes.
|
// Standard indexes of successors, for various bytecodes.
|
||||||
enum {
|
enum {
|
||||||
FALL_THROUGH = 0, // normal control
|
FALL_THROUGH = 0, // normal control
|
||||||
@ -619,6 +795,12 @@ private:
|
|||||||
// Tells if a given instruction is able to generate an exception edge.
|
// Tells if a given instruction is able to generate an exception edge.
|
||||||
bool can_trap(ciBytecodeStream& str);
|
bool can_trap(ciBytecodeStream& str);
|
||||||
|
|
||||||
|
// Clone the loop heads. Returns true if any cloning occurred.
|
||||||
|
bool clone_loop_heads(Loop* lp, StateVector* temp_vector, JsrSet* temp_set);
|
||||||
|
|
||||||
|
// Clone lp's head and replace tail's successors with clone.
|
||||||
|
Block* clone_loop_head(Loop* lp, StateVector* temp_vector, JsrSet* temp_set);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Return the block beginning at bci which has a JsrSet compatible
|
// Return the block beginning at bci which has a JsrSet compatible
|
||||||
// with jsrs.
|
// with jsrs.
|
||||||
@ -627,8 +809,8 @@ public:
|
|||||||
// block factory
|
// block factory
|
||||||
Block* get_block_for(int ciBlockIndex, JsrSet* jsrs, CreateOption option = create_public_copy);
|
Block* get_block_for(int ciBlockIndex, JsrSet* jsrs, CreateOption option = create_public_copy);
|
||||||
|
|
||||||
// How many of the blocks have the private_copy bit set?
|
// How many of the blocks have the backedge_copy bit set?
|
||||||
int private_copy_count(int ciBlockIndex, JsrSet* jsrs) const;
|
int backedge_copy_count(int ciBlockIndex, JsrSet* jsrs) const;
|
||||||
|
|
||||||
// Return an existing block containing bci which has a JsrSet compatible
|
// Return an existing block containing bci which has a JsrSet compatible
|
||||||
// with jsrs, or NULL if there is none.
|
// with jsrs, or NULL if there is none.
|
||||||
@ -651,11 +833,18 @@ public:
|
|||||||
return _block_map[po]; }
|
return _block_map[po]; }
|
||||||
Block* start_block() const { return pre_order_at(start_block_num()); }
|
Block* start_block() const { return pre_order_at(start_block_num()); }
|
||||||
int start_block_num() const { return 0; }
|
int start_block_num() const { return 0; }
|
||||||
|
Block* rpo_at(int rpo) const { assert(0 <= rpo && rpo < block_count(), "out of bounds");
|
||||||
|
return _block_map[rpo]; }
|
||||||
|
int next_pre_order() { return _next_pre_order; }
|
||||||
|
int inc_next_pre_order() { return _next_pre_order++; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// A work list used during flow analysis.
|
// A work list used during flow analysis.
|
||||||
Block* _work_list;
|
Block* _work_list;
|
||||||
|
|
||||||
|
// List of blocks in reverse post order
|
||||||
|
Block* _rpo_list;
|
||||||
|
|
||||||
// Next Block::_pre_order. After mapping, doubles as block_count.
|
// Next Block::_pre_order. After mapping, doubles as block_count.
|
||||||
int _next_pre_order;
|
int _next_pre_order;
|
||||||
|
|
||||||
@ -668,6 +857,15 @@ private:
|
|||||||
// Add a basic block to our work list.
|
// Add a basic block to our work list.
|
||||||
void add_to_work_list(Block* block);
|
void add_to_work_list(Block* block);
|
||||||
|
|
||||||
|
// Prepend a basic block to rpo list.
|
||||||
|
void prepend_to_rpo_list(Block* blk) {
|
||||||
|
blk->set_rpo_next(_rpo_list);
|
||||||
|
_rpo_list = blk;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root of the loop tree
|
||||||
|
Loop* _loop_tree_root;
|
||||||
|
|
||||||
// State used for make_jsr_record
|
// State used for make_jsr_record
|
||||||
int _jsr_count;
|
int _jsr_count;
|
||||||
GrowableArray<JsrRecord*>* _jsr_records;
|
GrowableArray<JsrRecord*>* _jsr_records;
|
||||||
@ -677,6 +875,9 @@ public:
|
|||||||
// does not already exist.
|
// does not already exist.
|
||||||
JsrRecord* make_jsr_record(int entry_address, int return_address);
|
JsrRecord* make_jsr_record(int entry_address, int return_address);
|
||||||
|
|
||||||
|
void set_loop_tree_root(Loop* ltr) { _loop_tree_root = ltr; }
|
||||||
|
Loop* loop_tree_root() { return _loop_tree_root; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Get the initial state for start_bci:
|
// Get the initial state for start_bci:
|
||||||
const StateVector* get_start_state();
|
const StateVector* get_start_state();
|
||||||
@ -703,6 +904,15 @@ private:
|
|||||||
// necessary.
|
// necessary.
|
||||||
void flow_types();
|
void flow_types();
|
||||||
|
|
||||||
|
// Perform the depth first type flow analysis. Helper for flow_types.
|
||||||
|
void df_flow_types(Block* start,
|
||||||
|
bool do_flow,
|
||||||
|
StateVector* temp_vector,
|
||||||
|
JsrSet* temp_set);
|
||||||
|
|
||||||
|
// Incrementally build loop tree.
|
||||||
|
void build_loop_tree(Block* blk);
|
||||||
|
|
||||||
// Create the block map, which indexes blocks in pre_order.
|
// Create the block map, which indexes blocks in pre_order.
|
||||||
void map_blocks();
|
void map_blocks();
|
||||||
|
|
||||||
@ -711,4 +921,6 @@ public:
|
|||||||
void do_flow();
|
void do_flow();
|
||||||
|
|
||||||
void print_on(outputStream* st) const PRODUCT_RETURN;
|
void print_on(outputStream* st) const PRODUCT_RETURN;
|
||||||
|
|
||||||
|
void rpo_print_on(outputStream* st) const PRODUCT_RETURN;
|
||||||
};
|
};
|
||||||
|
@ -1350,11 +1350,7 @@ bool nmethod::can_unload(BoolObjectClosure* is_alive,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
|
assert(unloading_occurred, "Inconsistency in unloading");
|
||||||
// Cannot do this test if verification of the UseParallelOldGC
|
|
||||||
// code using the PSMarkSweep code is being done.
|
|
||||||
assert(unloading_occurred, "Inconsistency in unloading");
|
|
||||||
}
|
|
||||||
make_unloaded(is_alive, obj);
|
make_unloaded(is_alive, obj);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -210,10 +210,6 @@ void ParallelScavengeHeap::post_initialize() {
|
|||||||
PSScavenge::initialize();
|
PSScavenge::initialize();
|
||||||
if (UseParallelOldGC) {
|
if (UseParallelOldGC) {
|
||||||
PSParallelCompact::post_initialize();
|
PSParallelCompact::post_initialize();
|
||||||
if (VerifyParallelOldWithMarkSweep) {
|
|
||||||
// Will be used for verification of par old.
|
|
||||||
PSMarkSweep::initialize();
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
PSMarkSweep::initialize();
|
PSMarkSweep::initialize();
|
||||||
}
|
}
|
||||||
@ -402,7 +398,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
if (!is_tlab &&
|
if (!is_tlab &&
|
||||||
size >= (young_gen()->eden_space()->capacity_in_words() / 2)) {
|
size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
|
||||||
result = old_gen()->allocate(size, is_tlab);
|
result = old_gen()->allocate(size, is_tlab);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
return result;
|
return result;
|
||||||
|
@ -146,7 +146,7 @@ void RefProcTaskExecutor::execute(ProcessTask& task)
|
|||||||
{
|
{
|
||||||
ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
|
ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
|
||||||
uint parallel_gc_threads = heap->gc_task_manager()->workers();
|
uint parallel_gc_threads = heap->gc_task_manager()->workers();
|
||||||
ChunkTaskQueueSet* qset = ParCompactionManager::chunk_array();
|
RegionTaskQueueSet* qset = ParCompactionManager::region_array();
|
||||||
ParallelTaskTerminator terminator(parallel_gc_threads, qset);
|
ParallelTaskTerminator terminator(parallel_gc_threads, qset);
|
||||||
GCTaskQueue* q = GCTaskQueue::create();
|
GCTaskQueue* q = GCTaskQueue::create();
|
||||||
for(uint i=0; i<parallel_gc_threads; i++) {
|
for(uint i=0; i<parallel_gc_threads; i++) {
|
||||||
@ -205,38 +205,38 @@ void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// StealChunkCompactionTask
|
// StealRegionCompactionTask
|
||||||
//
|
//
|
||||||
|
|
||||||
|
|
||||||
StealChunkCompactionTask::StealChunkCompactionTask(ParallelTaskTerminator* t) :
|
StealRegionCompactionTask::StealRegionCompactionTask(ParallelTaskTerminator* t):
|
||||||
_terminator(t) {};
|
_terminator(t) {}
|
||||||
|
|
||||||
void StealChunkCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
||||||
assert(Universe::heap()->is_gc_active(), "called outside gc");
|
assert(Universe::heap()->is_gc_active(), "called outside gc");
|
||||||
|
|
||||||
NOT_PRODUCT(TraceTime tm("StealChunkCompactionTask",
|
NOT_PRODUCT(TraceTime tm("StealRegionCompactionTask",
|
||||||
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
|
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
|
||||||
|
|
||||||
ParCompactionManager* cm =
|
ParCompactionManager* cm =
|
||||||
ParCompactionManager::gc_thread_compaction_manager(which);
|
ParCompactionManager::gc_thread_compaction_manager(which);
|
||||||
|
|
||||||
// Has to drain stacks first because there may be chunks on
|
// Has to drain stacks first because there may be regions on
|
||||||
// preloaded onto the stack and this thread may never have
|
// preloaded onto the stack and this thread may never have
|
||||||
// done a draining task. Are the draining tasks needed?
|
// done a draining task. Are the draining tasks needed?
|
||||||
|
|
||||||
cm->drain_chunk_stacks();
|
cm->drain_region_stacks();
|
||||||
|
|
||||||
size_t chunk_index = 0;
|
size_t region_index = 0;
|
||||||
int random_seed = 17;
|
int random_seed = 17;
|
||||||
|
|
||||||
// If we're the termination task, try 10 rounds of stealing before
|
// If we're the termination task, try 10 rounds of stealing before
|
||||||
// setting the termination flag
|
// setting the termination flag
|
||||||
|
|
||||||
while(true) {
|
while(true) {
|
||||||
if (ParCompactionManager::steal(which, &random_seed, chunk_index)) {
|
if (ParCompactionManager::steal(which, &random_seed, region_index)) {
|
||||||
PSParallelCompact::fill_and_update_chunk(cm, chunk_index);
|
PSParallelCompact::fill_and_update_region(cm, region_index);
|
||||||
cm->drain_chunk_stacks();
|
cm->drain_region_stacks();
|
||||||
} else {
|
} else {
|
||||||
if (terminator()->offer_termination()) {
|
if (terminator()->offer_termination()) {
|
||||||
break;
|
break;
|
||||||
@ -249,11 +249,10 @@ void StealChunkCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
|||||||
|
|
||||||
UpdateDensePrefixTask::UpdateDensePrefixTask(
|
UpdateDensePrefixTask::UpdateDensePrefixTask(
|
||||||
PSParallelCompact::SpaceId space_id,
|
PSParallelCompact::SpaceId space_id,
|
||||||
size_t chunk_index_start,
|
size_t region_index_start,
|
||||||
size_t chunk_index_end) :
|
size_t region_index_end) :
|
||||||
_space_id(space_id), _chunk_index_start(chunk_index_start),
|
_space_id(space_id), _region_index_start(region_index_start),
|
||||||
_chunk_index_end(chunk_index_end)
|
_region_index_end(region_index_end) {}
|
||||||
{}
|
|
||||||
|
|
||||||
void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
|
void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
|
||||||
|
|
||||||
@ -265,8 +264,8 @@ void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
|
|||||||
|
|
||||||
PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
|
PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
|
||||||
_space_id,
|
_space_id,
|
||||||
_chunk_index_start,
|
_region_index_start,
|
||||||
_chunk_index_end);
|
_region_index_end);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
||||||
@ -278,6 +277,6 @@ void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
|||||||
ParCompactionManager* cm =
|
ParCompactionManager* cm =
|
||||||
ParCompactionManager::gc_thread_compaction_manager(which);
|
ParCompactionManager::gc_thread_compaction_manager(which);
|
||||||
|
|
||||||
// Process any chunks already in the compaction managers stacks.
|
// Process any regions already in the compaction managers stacks.
|
||||||
cm->drain_chunk_stacks();
|
cm->drain_region_stacks();
|
||||||
}
|
}
|
||||||
|
@ -188,18 +188,18 @@ class StealMarkingTask : public GCTask {
|
|||||||
};
|
};
|
||||||
|
|
||||||
//
|
//
|
||||||
// StealChunkCompactionTask
|
// StealRegionCompactionTask
|
||||||
//
|
//
|
||||||
// This task is used to distribute work to idle threads.
|
// This task is used to distribute work to idle threads.
|
||||||
//
|
//
|
||||||
|
|
||||||
class StealChunkCompactionTask : public GCTask {
|
class StealRegionCompactionTask : public GCTask {
|
||||||
private:
|
private:
|
||||||
ParallelTaskTerminator* const _terminator;
|
ParallelTaskTerminator* const _terminator;
|
||||||
public:
|
public:
|
||||||
StealChunkCompactionTask(ParallelTaskTerminator* t);
|
StealRegionCompactionTask(ParallelTaskTerminator* t);
|
||||||
|
|
||||||
char* name() { return (char *)"steal-chunk-task"; }
|
char* name() { return (char *)"steal-region-task"; }
|
||||||
ParallelTaskTerminator* terminator() { return _terminator; }
|
ParallelTaskTerminator* terminator() { return _terminator; }
|
||||||
|
|
||||||
virtual void do_it(GCTaskManager* manager, uint which);
|
virtual void do_it(GCTaskManager* manager, uint which);
|
||||||
@ -215,15 +215,15 @@ class StealChunkCompactionTask : public GCTask {
|
|||||||
class UpdateDensePrefixTask : public GCTask {
|
class UpdateDensePrefixTask : public GCTask {
|
||||||
private:
|
private:
|
||||||
PSParallelCompact::SpaceId _space_id;
|
PSParallelCompact::SpaceId _space_id;
|
||||||
size_t _chunk_index_start;
|
size_t _region_index_start;
|
||||||
size_t _chunk_index_end;
|
size_t _region_index_end;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
char* name() { return (char *)"update-dense_prefix-task"; }
|
char* name() { return (char *)"update-dense_prefix-task"; }
|
||||||
|
|
||||||
UpdateDensePrefixTask(PSParallelCompact::SpaceId space_id,
|
UpdateDensePrefixTask(PSParallelCompact::SpaceId space_id,
|
||||||
size_t chunk_index_start,
|
size_t region_index_start,
|
||||||
size_t chunk_index_end);
|
size_t region_index_end);
|
||||||
|
|
||||||
virtual void do_it(GCTaskManager* manager, uint which);
|
virtual void do_it(GCTaskManager* manager, uint which);
|
||||||
};
|
};
|
||||||
@ -231,17 +231,17 @@ class UpdateDensePrefixTask : public GCTask {
|
|||||||
//
|
//
|
||||||
// DrainStacksCompactionTask
|
// DrainStacksCompactionTask
|
||||||
//
|
//
|
||||||
// This task processes chunks that have been added to the stacks of each
|
// This task processes regions that have been added to the stacks of each
|
||||||
// compaction manager.
|
// compaction manager.
|
||||||
//
|
//
|
||||||
// Trying to use one draining thread does not work because there are no
|
// Trying to use one draining thread does not work because there are no
|
||||||
// guarantees about which task will be picked up by which thread. For example,
|
// guarantees about which task will be picked up by which thread. For example,
|
||||||
// if thread A gets all the preloaded chunks, thread A may not get a draining
|
// if thread A gets all the preloaded regions, thread A may not get a draining
|
||||||
// task (they may all be done by other threads).
|
// task (they may all be done by other threads).
|
||||||
//
|
//
|
||||||
|
|
||||||
class DrainStacksCompactionTask : public GCTask {
|
class DrainStacksCompactionTask : public GCTask {
|
||||||
public:
|
public:
|
||||||
char* name() { return (char *)"drain-chunk-task"; }
|
char* name() { return (char *)"drain-region-task"; }
|
||||||
virtual void do_it(GCTaskManager* manager, uint which);
|
virtual void do_it(GCTaskManager* manager, uint which);
|
||||||
};
|
};
|
||||||
|
@ -30,7 +30,7 @@ ParCompactionManager** ParCompactionManager::_manager_array = NULL;
|
|||||||
OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
|
OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
|
||||||
ObjectStartArray* ParCompactionManager::_start_array = NULL;
|
ObjectStartArray* ParCompactionManager::_start_array = NULL;
|
||||||
ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
|
ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
|
||||||
ChunkTaskQueueSet* ParCompactionManager::_chunk_array = NULL;
|
RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
|
||||||
|
|
||||||
ParCompactionManager::ParCompactionManager() :
|
ParCompactionManager::ParCompactionManager() :
|
||||||
_action(CopyAndUpdate) {
|
_action(CopyAndUpdate) {
|
||||||
@ -46,13 +46,13 @@ ParCompactionManager::ParCompactionManager() :
|
|||||||
|
|
||||||
// We want the overflow stack to be permanent
|
// We want the overflow stack to be permanent
|
||||||
_overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
|
_overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
|
||||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
#ifdef USE_RegionTaskQueueWithOverflow
|
||||||
chunk_stack()->initialize();
|
region_stack()->initialize();
|
||||||
#else
|
#else
|
||||||
chunk_stack()->initialize();
|
region_stack()->initialize();
|
||||||
|
|
||||||
// We want the overflow stack to be permanent
|
// We want the overflow stack to be permanent
|
||||||
_chunk_overflow_stack =
|
_region_overflow_stack =
|
||||||
new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true);
|
new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -86,18 +86,18 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
|
|||||||
|
|
||||||
_stack_array = new OopTaskQueueSet(parallel_gc_threads);
|
_stack_array = new OopTaskQueueSet(parallel_gc_threads);
|
||||||
guarantee(_stack_array != NULL, "Count not initialize promotion manager");
|
guarantee(_stack_array != NULL, "Count not initialize promotion manager");
|
||||||
_chunk_array = new ChunkTaskQueueSet(parallel_gc_threads);
|
_region_array = new RegionTaskQueueSet(parallel_gc_threads);
|
||||||
guarantee(_chunk_array != NULL, "Count not initialize promotion manager");
|
guarantee(_region_array != NULL, "Count not initialize promotion manager");
|
||||||
|
|
||||||
// Create and register the ParCompactionManager(s) for the worker threads.
|
// Create and register the ParCompactionManager(s) for the worker threads.
|
||||||
for(uint i=0; i<parallel_gc_threads; i++) {
|
for(uint i=0; i<parallel_gc_threads; i++) {
|
||||||
_manager_array[i] = new ParCompactionManager();
|
_manager_array[i] = new ParCompactionManager();
|
||||||
guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
|
guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
|
||||||
stack_array()->register_queue(i, _manager_array[i]->marking_stack());
|
stack_array()->register_queue(i, _manager_array[i]->marking_stack());
|
||||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
#ifdef USE_RegionTaskQueueWithOverflow
|
||||||
chunk_array()->register_queue(i, _manager_array[i]->chunk_stack()->task_queue());
|
region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue());
|
||||||
#else
|
#else
|
||||||
chunk_array()->register_queue(i, _manager_array[i]->chunk_stack());
|
region_array()->register_queue(i, _manager_array[i]->region_stack());
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -153,31 +153,31 @@ oop ParCompactionManager::retrieve_for_scanning() {
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save chunk on a stack
|
// Save region on a stack
|
||||||
void ParCompactionManager::save_for_processing(size_t chunk_index) {
|
void ParCompactionManager::save_for_processing(size_t region_index) {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
const ParallelCompactData& sd = PSParallelCompact::summary_data();
|
const ParallelCompactData& sd = PSParallelCompact::summary_data();
|
||||||
ParallelCompactData::ChunkData* const chunk_ptr = sd.chunk(chunk_index);
|
ParallelCompactData::RegionData* const region_ptr = sd.region(region_index);
|
||||||
assert(chunk_ptr->claimed(), "must be claimed");
|
assert(region_ptr->claimed(), "must be claimed");
|
||||||
assert(chunk_ptr->_pushed++ == 0, "should only be pushed once");
|
assert(region_ptr->_pushed++ == 0, "should only be pushed once");
|
||||||
#endif
|
#endif
|
||||||
chunk_stack_push(chunk_index);
|
region_stack_push(region_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ParCompactionManager::chunk_stack_push(size_t chunk_index) {
|
void ParCompactionManager::region_stack_push(size_t region_index) {
|
||||||
|
|
||||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
#ifdef USE_RegionTaskQueueWithOverflow
|
||||||
chunk_stack()->save(chunk_index);
|
region_stack()->save(region_index);
|
||||||
#else
|
#else
|
||||||
if(!chunk_stack()->push(chunk_index)) {
|
if(!region_stack()->push(region_index)) {
|
||||||
chunk_overflow_stack()->push(chunk_index);
|
region_overflow_stack()->push(region_index);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ParCompactionManager::retrieve_for_processing(size_t& chunk_index) {
|
bool ParCompactionManager::retrieve_for_processing(size_t& region_index) {
|
||||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
#ifdef USE_RegionTaskQueueWithOverflow
|
||||||
return chunk_stack()->retrieve(chunk_index);
|
return region_stack()->retrieve(region_index);
|
||||||
#else
|
#else
|
||||||
// Should not be used in the parallel case
|
// Should not be used in the parallel case
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
@ -230,14 +230,14 @@ void ParCompactionManager::drain_marking_stacks(OopClosure* blk) {
|
|||||||
assert(overflow_stack()->length() == 0, "Sanity");
|
assert(overflow_stack()->length() == 0, "Sanity");
|
||||||
}
|
}
|
||||||
|
|
||||||
void ParCompactionManager::drain_chunk_overflow_stack() {
|
void ParCompactionManager::drain_region_overflow_stack() {
|
||||||
size_t chunk_index = (size_t) -1;
|
size_t region_index = (size_t) -1;
|
||||||
while(chunk_stack()->retrieve_from_overflow(chunk_index)) {
|
while(region_stack()->retrieve_from_overflow(region_index)) {
|
||||||
PSParallelCompact::fill_and_update_chunk(this, chunk_index);
|
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ParCompactionManager::drain_chunk_stacks() {
|
void ParCompactionManager::drain_region_stacks() {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||||
@ -249,42 +249,42 @@ void ParCompactionManager::drain_chunk_stacks() {
|
|||||||
#if 1 // def DO_PARALLEL - the serial code hasn't been updated
|
#if 1 // def DO_PARALLEL - the serial code hasn't been updated
|
||||||
do {
|
do {
|
||||||
|
|
||||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
#ifdef USE_RegionTaskQueueWithOverflow
|
||||||
// Drain overflow stack first, so other threads can steal from
|
// Drain overflow stack first, so other threads can steal from
|
||||||
// claimed stack while we work.
|
// claimed stack while we work.
|
||||||
size_t chunk_index = (size_t) -1;
|
size_t region_index = (size_t) -1;
|
||||||
while(chunk_stack()->retrieve_from_overflow(chunk_index)) {
|
while(region_stack()->retrieve_from_overflow(region_index)) {
|
||||||
PSParallelCompact::fill_and_update_chunk(this, chunk_index);
|
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (chunk_stack()->retrieve_from_stealable_queue(chunk_index)) {
|
while (region_stack()->retrieve_from_stealable_queue(region_index)) {
|
||||||
PSParallelCompact::fill_and_update_chunk(this, chunk_index);
|
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||||
}
|
}
|
||||||
} while (!chunk_stack()->is_empty());
|
} while (!region_stack()->is_empty());
|
||||||
#else
|
#else
|
||||||
// Drain overflow stack first, so other threads can steal from
|
// Drain overflow stack first, so other threads can steal from
|
||||||
// claimed stack while we work.
|
// claimed stack while we work.
|
||||||
while(!chunk_overflow_stack()->is_empty()) {
|
while(!region_overflow_stack()->is_empty()) {
|
||||||
size_t chunk_index = chunk_overflow_stack()->pop();
|
size_t region_index = region_overflow_stack()->pop();
|
||||||
PSParallelCompact::fill_and_update_chunk(this, chunk_index);
|
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t chunk_index = -1;
|
size_t region_index = -1;
|
||||||
// obj is a reference!!!
|
// obj is a reference!!!
|
||||||
while (chunk_stack()->pop_local(chunk_index)) {
|
while (region_stack()->pop_local(region_index)) {
|
||||||
// It would be nice to assert about the type of objects we might
|
// It would be nice to assert about the type of objects we might
|
||||||
// pop, but they can come from anywhere, unfortunately.
|
// pop, but they can come from anywhere, unfortunately.
|
||||||
PSParallelCompact::fill_and_update_chunk(this, chunk_index);
|
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||||
}
|
}
|
||||||
} while((chunk_stack()->size() != 0) ||
|
} while((region_stack()->size() != 0) ||
|
||||||
(chunk_overflow_stack()->length() != 0));
|
(region_overflow_stack()->length() != 0));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
#ifdef USE_RegionTaskQueueWithOverflow
|
||||||
assert(chunk_stack()->is_empty(), "Sanity");
|
assert(region_stack()->is_empty(), "Sanity");
|
||||||
#else
|
#else
|
||||||
assert(chunk_stack()->size() == 0, "Sanity");
|
assert(region_stack()->size() == 0, "Sanity");
|
||||||
assert(chunk_overflow_stack()->length() == 0, "Sanity");
|
assert(region_overflow_stack()->length() == 0, "Sanity");
|
||||||
#endif
|
#endif
|
||||||
#else
|
#else
|
||||||
oop obj;
|
oop obj;
|
||||||
|
@ -52,7 +52,7 @@ class ParCompactionManager : public CHeapObj {
|
|||||||
friend class ParallelTaskTerminator;
|
friend class ParallelTaskTerminator;
|
||||||
friend class ParMarkBitMap;
|
friend class ParMarkBitMap;
|
||||||
friend class PSParallelCompact;
|
friend class PSParallelCompact;
|
||||||
friend class StealChunkCompactionTask;
|
friend class StealRegionCompactionTask;
|
||||||
friend class UpdateAndFillClosure;
|
friend class UpdateAndFillClosure;
|
||||||
friend class RefProcTaskExecutor;
|
friend class RefProcTaskExecutor;
|
||||||
|
|
||||||
@ -72,27 +72,27 @@ class ParCompactionManager : public CHeapObj {
|
|||||||
// ------------------------ End don't putback if not needed
|
// ------------------------ End don't putback if not needed
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static ParCompactionManager** _manager_array;
|
static ParCompactionManager** _manager_array;
|
||||||
static OopTaskQueueSet* _stack_array;
|
static OopTaskQueueSet* _stack_array;
|
||||||
static ObjectStartArray* _start_array;
|
static ObjectStartArray* _start_array;
|
||||||
static ChunkTaskQueueSet* _chunk_array;
|
static RegionTaskQueueSet* _region_array;
|
||||||
static PSOldGen* _old_gen;
|
static PSOldGen* _old_gen;
|
||||||
|
|
||||||
OopTaskQueue _marking_stack;
|
OopTaskQueue _marking_stack;
|
||||||
GrowableArray<oop>* _overflow_stack;
|
GrowableArray<oop>* _overflow_stack;
|
||||||
// Is there a way to reuse the _marking_stack for the
|
// Is there a way to reuse the _marking_stack for the
|
||||||
// saving empty chunks? For now just create a different
|
// saving empty regions? For now just create a different
|
||||||
// type of TaskQueue.
|
// type of TaskQueue.
|
||||||
|
|
||||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
#ifdef USE_RegionTaskQueueWithOverflow
|
||||||
ChunkTaskQueueWithOverflow _chunk_stack;
|
RegionTaskQueueWithOverflow _region_stack;
|
||||||
#else
|
#else
|
||||||
ChunkTaskQueue _chunk_stack;
|
RegionTaskQueue _region_stack;
|
||||||
GrowableArray<size_t>* _chunk_overflow_stack;
|
GrowableArray<size_t>* _region_overflow_stack;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if 1 // does this happen enough to need a per thread stack?
|
#if 1 // does this happen enough to need a per thread stack?
|
||||||
GrowableArray<Klass*>* _revisit_klass_stack;
|
GrowableArray<Klass*>* _revisit_klass_stack;
|
||||||
#endif
|
#endif
|
||||||
static ParMarkBitMap* _mark_bitmap;
|
static ParMarkBitMap* _mark_bitmap;
|
||||||
|
|
||||||
@ -100,21 +100,22 @@ class ParCompactionManager : public CHeapObj {
|
|||||||
|
|
||||||
static PSOldGen* old_gen() { return _old_gen; }
|
static PSOldGen* old_gen() { return _old_gen; }
|
||||||
static ObjectStartArray* start_array() { return _start_array; }
|
static ObjectStartArray* start_array() { return _start_array; }
|
||||||
static OopTaskQueueSet* stack_array() { return _stack_array; }
|
static OopTaskQueueSet* stack_array() { return _stack_array; }
|
||||||
|
|
||||||
static void initialize(ParMarkBitMap* mbm);
|
static void initialize(ParMarkBitMap* mbm);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Array of tasks. Needed by the ParallelTaskTerminator.
|
// Array of tasks. Needed by the ParallelTaskTerminator.
|
||||||
static ChunkTaskQueueSet* chunk_array() { return _chunk_array; }
|
static RegionTaskQueueSet* region_array() { return _region_array; }
|
||||||
|
OopTaskQueue* marking_stack() { return &_marking_stack; }
|
||||||
OopTaskQueue* marking_stack() { return &_marking_stack; }
|
GrowableArray<oop>* overflow_stack() { return _overflow_stack; }
|
||||||
GrowableArray<oop>* overflow_stack() { return _overflow_stack; }
|
#ifdef USE_RegionTaskQueueWithOverflow
|
||||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
RegionTaskQueueWithOverflow* region_stack() { return &_region_stack; }
|
||||||
ChunkTaskQueueWithOverflow* chunk_stack() { return &_chunk_stack; }
|
|
||||||
#else
|
#else
|
||||||
ChunkTaskQueue* chunk_stack() { return &_chunk_stack; }
|
RegionTaskQueue* region_stack() { return &_region_stack; }
|
||||||
GrowableArray<size_t>* chunk_overflow_stack() { return _chunk_overflow_stack; }
|
GrowableArray<size_t>* region_overflow_stack() {
|
||||||
|
return _region_overflow_stack;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Pushes onto the marking stack. If the marking stack is full,
|
// Pushes onto the marking stack. If the marking stack is full,
|
||||||
@ -123,9 +124,9 @@ class ParCompactionManager : public CHeapObj {
|
|||||||
// Do not implement an equivalent stack_pop. Deal with the
|
// Do not implement an equivalent stack_pop. Deal with the
|
||||||
// marking stack and overflow stack directly.
|
// marking stack and overflow stack directly.
|
||||||
|
|
||||||
// Pushes onto the chunk stack. If the chunk stack is full,
|
// Pushes onto the region stack. If the region stack is full,
|
||||||
// pushes onto the chunk overflow stack.
|
// pushes onto the region overflow stack.
|
||||||
void chunk_stack_push(size_t chunk_index);
|
void region_stack_push(size_t region_index);
|
||||||
public:
|
public:
|
||||||
|
|
||||||
Action action() { return _action; }
|
Action action() { return _action; }
|
||||||
@ -160,10 +161,10 @@ class ParCompactionManager : public CHeapObj {
|
|||||||
// Get a oop for scanning. If returns null, no oop were found.
|
// Get a oop for scanning. If returns null, no oop were found.
|
||||||
oop retrieve_for_scanning();
|
oop retrieve_for_scanning();
|
||||||
|
|
||||||
// Save chunk for later processing. Must not fail.
|
// Save region for later processing. Must not fail.
|
||||||
void save_for_processing(size_t chunk_index);
|
void save_for_processing(size_t region_index);
|
||||||
// Get a chunk for processing. If returns null, no chunk were found.
|
// Get a region for processing. If returns null, no region were found.
|
||||||
bool retrieve_for_processing(size_t& chunk_index);
|
bool retrieve_for_processing(size_t& region_index);
|
||||||
|
|
||||||
// Access function for compaction managers
|
// Access function for compaction managers
|
||||||
static ParCompactionManager* gc_thread_compaction_manager(int index);
|
static ParCompactionManager* gc_thread_compaction_manager(int index);
|
||||||
@ -172,18 +173,18 @@ class ParCompactionManager : public CHeapObj {
|
|||||||
return stack_array()->steal(queue_num, seed, t);
|
return stack_array()->steal(queue_num, seed, t);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool steal(int queue_num, int* seed, ChunkTask& t) {
|
static bool steal(int queue_num, int* seed, RegionTask& t) {
|
||||||
return chunk_array()->steal(queue_num, seed, t);
|
return region_array()->steal(queue_num, seed, t);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process tasks remaining on any stack
|
// Process tasks remaining on any stack
|
||||||
void drain_marking_stacks(OopClosure *blk);
|
void drain_marking_stacks(OopClosure *blk);
|
||||||
|
|
||||||
// Process tasks remaining on any stack
|
// Process tasks remaining on any stack
|
||||||
void drain_chunk_stacks();
|
void drain_region_stacks();
|
||||||
|
|
||||||
// Process tasks remaining on any stack
|
// Process tasks remaining on any stack
|
||||||
void drain_chunk_overflow_stack();
|
void drain_region_overflow_stack();
|
||||||
|
|
||||||
// Debugging support
|
// Debugging support
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
|
@ -35,9 +35,7 @@ void PSMarkSweep::initialize() {
|
|||||||
_ref_processor = new ReferenceProcessor(mr,
|
_ref_processor = new ReferenceProcessor(mr,
|
||||||
true, // atomic_discovery
|
true, // atomic_discovery
|
||||||
false); // mt_discovery
|
false); // mt_discovery
|
||||||
if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
|
_counters = new CollectorCounters("PSMarkSweep", 1);
|
||||||
_counters = new CollectorCounters("PSMarkSweep", 1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This method contains all heap specific policy for invoking mark sweep.
|
// This method contains all heap specific policy for invoking mark sweep.
|
||||||
@ -518,9 +516,6 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
|||||||
follow_stack();
|
follow_stack();
|
||||||
|
|
||||||
// Process reference objects found during marking
|
// Process reference objects found during marking
|
||||||
|
|
||||||
// Skipping the reference processing for VerifyParallelOldWithMarkSweep
|
|
||||||
// affects the marking (makes it different).
|
|
||||||
{
|
{
|
||||||
ReferencePolicy *soft_ref_policy;
|
ReferencePolicy *soft_ref_policy;
|
||||||
if (clear_all_softrefs) {
|
if (clear_all_softrefs) {
|
||||||
|
@ -152,20 +152,15 @@ void PSMarkSweepDecorator::precompact() {
|
|||||||
oop(q)->forward_to(oop(compact_top));
|
oop(q)->forward_to(oop(compact_top));
|
||||||
assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
|
assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
|
||||||
} else {
|
} else {
|
||||||
// Don't clear the mark since it's confuses parallel old
|
// if the object isn't moving we can just set the mark to the default
|
||||||
// verification.
|
// mark and handle it specially later on.
|
||||||
if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
|
oop(q)->init_mark();
|
||||||
// if the object isn't moving we can just set the mark to the default
|
|
||||||
// mark and handle it specially later on.
|
|
||||||
oop(q)->init_mark();
|
|
||||||
}
|
|
||||||
assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
|
assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update object start array
|
// Update object start array
|
||||||
if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
|
if (start_array) {
|
||||||
if (start_array)
|
start_array->allocate_block(compact_top);
|
||||||
start_array->allocate_block(compact_top);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), size));
|
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), size));
|
||||||
@ -219,19 +214,14 @@ void PSMarkSweepDecorator::precompact() {
|
|||||||
assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
|
assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
|
||||||
} else {
|
} else {
|
||||||
// if the object isn't moving we can just set the mark to the default
|
// if the object isn't moving we can just set the mark to the default
|
||||||
// Don't clear the mark since it's confuses parallel old
|
// mark and handle it specially later on.
|
||||||
// verification.
|
oop(q)->init_mark();
|
||||||
if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
|
|
||||||
// mark and handle it specially later on.
|
|
||||||
oop(q)->init_mark();
|
|
||||||
}
|
|
||||||
assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
|
assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
|
// Update object start array
|
||||||
// Update object start array
|
if (start_array) {
|
||||||
if (start_array)
|
start_array->allocate_block(compact_top);
|
||||||
start_array->allocate_block(compact_top);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), sz));
|
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), sz));
|
||||||
|
@ -152,9 +152,7 @@ void PSOldGen::precompact() {
|
|||||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||||
|
|
||||||
// Reset start array first.
|
// Reset start array first.
|
||||||
debug_only(if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {)
|
|
||||||
start_array()->reset();
|
start_array()->reset();
|
||||||
debug_only(})
|
|
||||||
|
|
||||||
object_mark_sweep()->precompact();
|
object_mark_sweep()->precompact();
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -123,8 +123,6 @@ void PSPermGen::move_and_update(ParCompactionManager* cm) {
|
|||||||
|
|
||||||
void PSPermGen::precompact() {
|
void PSPermGen::precompact() {
|
||||||
// Reset start array first.
|
// Reset start array first.
|
||||||
debug_only(if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {)
|
|
||||||
_start_array.reset();
|
_start_array.reset();
|
||||||
debug_only(})
|
|
||||||
object_mark_sweep()->precompact();
|
object_mark_sweep()->precompact();
|
||||||
}
|
}
|
||||||
|
@ -50,7 +50,8 @@ class ImmutableSpace: public CHeapObj {
|
|||||||
size_t capacity_in_bytes() const { return capacity_in_words() * HeapWordSize; }
|
size_t capacity_in_bytes() const { return capacity_in_words() * HeapWordSize; }
|
||||||
|
|
||||||
// Size computations. Sizes are in heapwords.
|
// Size computations. Sizes are in heapwords.
|
||||||
size_t capacity_in_words() const { return pointer_delta(end(), bottom()); }
|
size_t capacity_in_words() const { return pointer_delta(end(), bottom()); }
|
||||||
|
virtual size_t capacity_in_words(Thread*) const { return capacity_in_words(); }
|
||||||
|
|
||||||
// Iteration.
|
// Iteration.
|
||||||
virtual void oop_iterate(OopClosure* cl);
|
virtual void oop_iterate(OopClosure* cl);
|
||||||
|
@ -23,13 +23,6 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
inline void MarkSweep::mark_object(oop obj) {
|
inline void MarkSweep::mark_object(oop obj) {
|
||||||
#ifndef SERIALGC
|
|
||||||
if (UseParallelOldGC && VerifyParallelOldWithMarkSweep) {
|
|
||||||
assert(PSParallelCompact::mark_bitmap()->is_marked(obj),
|
|
||||||
"Should be marked in the marking bitmap");
|
|
||||||
}
|
|
||||||
#endif // SERIALGC
|
|
||||||
|
|
||||||
// some marks may contain information we need to preserve so we store them away
|
// some marks may contain information we need to preserve so we store them away
|
||||||
// and overwrite the mark. We'll restore it at the end of markSweep.
|
// and overwrite the mark. We'll restore it at the end of markSweep.
|
||||||
markOop mark = obj->mark();
|
markOop mark = obj->mark();
|
||||||
|
@ -181,6 +181,25 @@ size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
|
|||||||
return lgrp_spaces()->at(i)->space()->free_in_bytes();
|
return lgrp_spaces()->at(i)->space()->free_in_bytes();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
size_t MutableNUMASpace::capacity_in_words(Thread* thr) const {
|
||||||
|
guarantee(thr != NULL, "No thread");
|
||||||
|
int lgrp_id = thr->lgrp_id();
|
||||||
|
if (lgrp_id == -1) {
|
||||||
|
if (lgrp_spaces()->length() > 0) {
|
||||||
|
return capacity_in_words() / lgrp_spaces()->length();
|
||||||
|
} else {
|
||||||
|
assert(false, "There should be at least one locality group");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
|
||||||
|
if (i == -1) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return lgrp_spaces()->at(i)->space()->capacity_in_words();
|
||||||
|
}
|
||||||
|
|
||||||
// Check if the NUMA topology has changed. Add and remove spaces if needed.
|
// Check if the NUMA topology has changed. Add and remove spaces if needed.
|
||||||
// The update can be forced by setting the force parameter equal to true.
|
// The update can be forced by setting the force parameter equal to true.
|
||||||
bool MutableNUMASpace::update_layout(bool force) {
|
bool MutableNUMASpace::update_layout(bool force) {
|
||||||
@ -722,7 +741,8 @@ HeapWord* MutableNUMASpace::allocate(size_t size) {
|
|||||||
i = os::random() % lgrp_spaces()->length();
|
i = os::random() % lgrp_spaces()->length();
|
||||||
}
|
}
|
||||||
|
|
||||||
MutableSpace *s = lgrp_spaces()->at(i)->space();
|
LGRPSpace* ls = lgrp_spaces()->at(i);
|
||||||
|
MutableSpace *s = ls->space();
|
||||||
HeapWord *p = s->allocate(size);
|
HeapWord *p = s->allocate(size);
|
||||||
|
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
@ -743,6 +763,9 @@ HeapWord* MutableNUMASpace::allocate(size_t size) {
|
|||||||
*(int*)i = 0;
|
*(int*)i = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (p == NULL) {
|
||||||
|
ls->set_allocation_failed();
|
||||||
|
}
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -761,7 +784,8 @@ HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
|
|||||||
if (i == -1) {
|
if (i == -1) {
|
||||||
i = os::random() % lgrp_spaces()->length();
|
i = os::random() % lgrp_spaces()->length();
|
||||||
}
|
}
|
||||||
MutableSpace *s = lgrp_spaces()->at(i)->space();
|
LGRPSpace *ls = lgrp_spaces()->at(i);
|
||||||
|
MutableSpace *s = ls->space();
|
||||||
HeapWord *p = s->cas_allocate(size);
|
HeapWord *p = s->cas_allocate(size);
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
size_t remainder = pointer_delta(s->end(), p + size);
|
size_t remainder = pointer_delta(s->end(), p + size);
|
||||||
@ -790,6 +814,9 @@ HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
|
|||||||
*(int*)i = 0;
|
*(int*)i = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (p == NULL) {
|
||||||
|
ls->set_allocation_failed();
|
||||||
|
}
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -60,6 +60,7 @@ class MutableNUMASpace : public MutableSpace {
|
|||||||
MutableSpace* _space;
|
MutableSpace* _space;
|
||||||
MemRegion _invalid_region;
|
MemRegion _invalid_region;
|
||||||
AdaptiveWeightedAverage *_alloc_rate;
|
AdaptiveWeightedAverage *_alloc_rate;
|
||||||
|
bool _allocation_failed;
|
||||||
|
|
||||||
struct SpaceStats {
|
struct SpaceStats {
|
||||||
size_t _local_space, _remote_space, _unbiased_space, _uncommited_space;
|
size_t _local_space, _remote_space, _unbiased_space, _uncommited_space;
|
||||||
@ -81,7 +82,7 @@ class MutableNUMASpace : public MutableSpace {
|
|||||||
char* last_page_scanned() { return _last_page_scanned; }
|
char* last_page_scanned() { return _last_page_scanned; }
|
||||||
void set_last_page_scanned(char* p) { _last_page_scanned = p; }
|
void set_last_page_scanned(char* p) { _last_page_scanned = p; }
|
||||||
public:
|
public:
|
||||||
LGRPSpace(int l) : _lgrp_id(l), _last_page_scanned(NULL) {
|
LGRPSpace(int l) : _lgrp_id(l), _last_page_scanned(NULL), _allocation_failed(false) {
|
||||||
_space = new MutableSpace();
|
_space = new MutableSpace();
|
||||||
_alloc_rate = new AdaptiveWeightedAverage(NUMAChunkResizeWeight);
|
_alloc_rate = new AdaptiveWeightedAverage(NUMAChunkResizeWeight);
|
||||||
}
|
}
|
||||||
@ -103,8 +104,21 @@ class MutableNUMASpace : public MutableSpace {
|
|||||||
return *(int*)lgrp_id_value == p->lgrp_id();
|
return *(int*)lgrp_id_value == p->lgrp_id();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Report a failed allocation.
|
||||||
|
void set_allocation_failed() { _allocation_failed = true; }
|
||||||
|
|
||||||
void sample() {
|
void sample() {
|
||||||
alloc_rate()->sample(space()->used_in_bytes());
|
// If there was a failed allocation make allocation rate equal
|
||||||
|
// to the size of the whole chunk. This ensures the progress of
|
||||||
|
// the adaptation process.
|
||||||
|
size_t alloc_rate_sample;
|
||||||
|
if (_allocation_failed) {
|
||||||
|
alloc_rate_sample = space()->capacity_in_bytes();
|
||||||
|
_allocation_failed = false;
|
||||||
|
} else {
|
||||||
|
alloc_rate_sample = space()->used_in_bytes();
|
||||||
|
}
|
||||||
|
alloc_rate()->sample(alloc_rate_sample);
|
||||||
}
|
}
|
||||||
|
|
||||||
MemRegion invalid_region() const { return _invalid_region; }
|
MemRegion invalid_region() const { return _invalid_region; }
|
||||||
@ -190,6 +204,9 @@ class MutableNUMASpace : public MutableSpace {
|
|||||||
virtual void ensure_parsability();
|
virtual void ensure_parsability();
|
||||||
virtual size_t used_in_words() const;
|
virtual size_t used_in_words() const;
|
||||||
virtual size_t free_in_words() const;
|
virtual size_t free_in_words() const;
|
||||||
|
|
||||||
|
using MutableSpace::capacity_in_words;
|
||||||
|
virtual size_t capacity_in_words(Thread* thr) const;
|
||||||
virtual size_t tlab_capacity(Thread* thr) const;
|
virtual size_t tlab_capacity(Thread* thr) const;
|
||||||
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
|
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
|
||||||
|
|
||||||
|
@ -586,6 +586,7 @@ locknode.hpp subnode.hpp
|
|||||||
loopTransform.cpp addnode.hpp
|
loopTransform.cpp addnode.hpp
|
||||||
loopTransform.cpp allocation.inline.hpp
|
loopTransform.cpp allocation.inline.hpp
|
||||||
loopTransform.cpp connode.hpp
|
loopTransform.cpp connode.hpp
|
||||||
|
loopTransform.cpp compileLog.hpp
|
||||||
loopTransform.cpp divnode.hpp
|
loopTransform.cpp divnode.hpp
|
||||||
loopTransform.cpp loopnode.hpp
|
loopTransform.cpp loopnode.hpp
|
||||||
loopTransform.cpp mulnode.hpp
|
loopTransform.cpp mulnode.hpp
|
||||||
@ -601,6 +602,7 @@ loopnode.cpp addnode.hpp
|
|||||||
loopnode.cpp allocation.inline.hpp
|
loopnode.cpp allocation.inline.hpp
|
||||||
loopnode.cpp callnode.hpp
|
loopnode.cpp callnode.hpp
|
||||||
loopnode.cpp ciMethodData.hpp
|
loopnode.cpp ciMethodData.hpp
|
||||||
|
loopnode.cpp compileLog.hpp
|
||||||
loopnode.cpp connode.hpp
|
loopnode.cpp connode.hpp
|
||||||
loopnode.cpp divnode.hpp
|
loopnode.cpp divnode.hpp
|
||||||
loopnode.cpp loopnode.hpp
|
loopnode.cpp loopnode.hpp
|
||||||
|
@ -25,19 +25,6 @@
|
|||||||
#include "incls/_precompiled.incl"
|
#include "incls/_precompiled.incl"
|
||||||
#include "incls/_bytecodeInfo.cpp.incl"
|
#include "incls/_bytecodeInfo.cpp.incl"
|
||||||
|
|
||||||
// These variables are declared in parse1.cpp
|
|
||||||
extern int explicit_null_checks_inserted;
|
|
||||||
extern int explicit_null_checks_elided;
|
|
||||||
extern int explicit_null_checks_inserted_old;
|
|
||||||
extern int explicit_null_checks_elided_old;
|
|
||||||
extern int nodes_created_old;
|
|
||||||
extern int nodes_created;
|
|
||||||
extern int methods_parsed_old;
|
|
||||||
extern int methods_parsed;
|
|
||||||
extern int methods_seen;
|
|
||||||
extern int methods_seen_old;
|
|
||||||
|
|
||||||
|
|
||||||
//=============================================================================
|
//=============================================================================
|
||||||
//------------------------------InlineTree-------------------------------------
|
//------------------------------InlineTree-------------------------------------
|
||||||
InlineTree::InlineTree( Compile* c, const InlineTree *caller_tree, ciMethod* callee, JVMState* caller_jvms, int caller_bci, float site_invoke_ratio )
|
InlineTree::InlineTree( Compile* c, const InlineTree *caller_tree, ciMethod* callee, JVMState* caller_jvms, int caller_bci, float site_invoke_ratio )
|
||||||
@ -517,27 +504,3 @@ InlineTree* InlineTree::find_subtree_from_root(InlineTree* root, JVMState* jvms,
|
|||||||
}
|
}
|
||||||
return iltp;
|
return iltp;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
#ifndef PRODUCT
|
|
||||||
|
|
||||||
static void per_method_stats() {
|
|
||||||
// Compute difference between this method's cumulative totals and old totals
|
|
||||||
int explicit_null_checks_cur = explicit_null_checks_inserted - explicit_null_checks_inserted_old;
|
|
||||||
int elided_null_checks_cur = explicit_null_checks_elided - explicit_null_checks_elided_old;
|
|
||||||
|
|
||||||
// Print differences
|
|
||||||
if( explicit_null_checks_cur )
|
|
||||||
tty->print_cr("XXX Explicit NULL checks inserted: %d", explicit_null_checks_cur);
|
|
||||||
if( elided_null_checks_cur )
|
|
||||||
tty->print_cr("XXX Explicit NULL checks removed at parse time: %d", elided_null_checks_cur);
|
|
||||||
|
|
||||||
// Store the current cumulative totals
|
|
||||||
nodes_created_old = nodes_created;
|
|
||||||
methods_parsed_old = methods_parsed;
|
|
||||||
methods_seen_old = methods_seen;
|
|
||||||
explicit_null_checks_inserted_old = explicit_null_checks_inserted;
|
|
||||||
explicit_null_checks_elided_old = explicit_null_checks_elided;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
@ -1034,6 +1034,39 @@ AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
|
|||||||
//=============================================================================
|
//=============================================================================
|
||||||
uint AllocateArrayNode::size_of() const { return sizeof(*this); }
|
uint AllocateArrayNode::size_of() const { return sizeof(*this); }
|
||||||
|
|
||||||
|
// Retrieve the length from the AllocateArrayNode. Narrow the type with a
|
||||||
|
// CastII, if appropriate. If we are not allowed to create new nodes, and
|
||||||
|
// a CastII is appropriate, return NULL.
|
||||||
|
Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) {
|
||||||
|
Node *length = in(AllocateNode::ALength);
|
||||||
|
assert(length != NULL, "length is not null");
|
||||||
|
|
||||||
|
const TypeInt* length_type = phase->find_int_type(length);
|
||||||
|
const TypeAryPtr* ary_type = oop_type->isa_aryptr();
|
||||||
|
|
||||||
|
if (ary_type != NULL && length_type != NULL) {
|
||||||
|
const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
|
||||||
|
if (narrow_length_type != length_type) {
|
||||||
|
// Assert one of:
|
||||||
|
// - the narrow_length is 0
|
||||||
|
// - the narrow_length is not wider than length
|
||||||
|
assert(narrow_length_type == TypeInt::ZERO ||
|
||||||
|
(narrow_length_type->_hi <= length_type->_hi &&
|
||||||
|
narrow_length_type->_lo >= length_type->_lo),
|
||||||
|
"narrow type must be narrower than length type");
|
||||||
|
|
||||||
|
// Return NULL if new nodes are not allowed
|
||||||
|
if (!allow_new_nodes) return NULL;
|
||||||
|
// Create a cast which is control dependent on the initialization to
|
||||||
|
// propagate the fact that the array length must be positive.
|
||||||
|
length = new (phase->C, 2) CastIINode(length, narrow_length_type);
|
||||||
|
length->set_req(0, initialization()->proj_out(0));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return length;
|
||||||
|
}
|
||||||
|
|
||||||
//=============================================================================
|
//=============================================================================
|
||||||
uint LockNode::size_of() const { return sizeof(*this); }
|
uint LockNode::size_of() const { return sizeof(*this); }
|
||||||
|
|
||||||
|
@ -755,6 +755,15 @@ public:
|
|||||||
virtual int Opcode() const;
|
virtual int Opcode() const;
|
||||||
virtual uint size_of() const; // Size is bigger
|
virtual uint size_of() const; // Size is bigger
|
||||||
|
|
||||||
|
// Dig the length operand out of a array allocation site.
|
||||||
|
Node* Ideal_length() {
|
||||||
|
return in(AllocateNode::ALength);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dig the length operand out of a array allocation site and narrow the
|
||||||
|
// type with a CastII, if necesssary
|
||||||
|
Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
|
||||||
|
|
||||||
// Pattern-match a possible usage of AllocateArrayNode.
|
// Pattern-match a possible usage of AllocateArrayNode.
|
||||||
// Return null if no allocation is recognized.
|
// Return null if no allocation is recognized.
|
||||||
static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
|
static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
|
||||||
@ -762,12 +771,6 @@ public:
|
|||||||
return (allo == NULL || !allo->is_AllocateArray())
|
return (allo == NULL || !allo->is_AllocateArray())
|
||||||
? NULL : allo->as_AllocateArray();
|
? NULL : allo->as_AllocateArray();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dig the length operand out of a (possible) array allocation site.
|
|
||||||
static Node* Ideal_length(Node* ptr, PhaseTransform* phase) {
|
|
||||||
AllocateArrayNode* allo = Ideal_array_allocation(ptr, phase);
|
|
||||||
return (allo == NULL) ? NULL : allo->in(AllocateNode::ALength);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
//------------------------------AbstractLockNode-----------------------------------
|
//------------------------------AbstractLockNode-----------------------------------
|
||||||
|
@ -1665,7 +1665,11 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
|||||||
// compress paths and change unreachable cycles to TOP
|
// compress paths and change unreachable cycles to TOP
|
||||||
// If not, we can update the input infinitely along a MergeMem cycle
|
// If not, we can update the input infinitely along a MergeMem cycle
|
||||||
// Equivalent code is in MemNode::Ideal_common
|
// Equivalent code is in MemNode::Ideal_common
|
||||||
Node *m = phase->transform(n);
|
Node *m = phase->transform(n);
|
||||||
|
if (outcnt() == 0) { // Above transform() may kill us!
|
||||||
|
progress = phase->C->top();
|
||||||
|
break;
|
||||||
|
}
|
||||||
// If tranformed to a MergeMem, get the desired slice
|
// If tranformed to a MergeMem, get the desired slice
|
||||||
// Otherwise the returned node represents memory for every slice
|
// Otherwise the returned node represents memory for every slice
|
||||||
Node *new_mem = (m->is_MergeMem()) ?
|
Node *new_mem = (m->is_MergeMem()) ?
|
||||||
@ -1765,6 +1769,51 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef _LP64
|
||||||
|
// Push DecodeN down through phi.
|
||||||
|
// The rest of phi graph will transform by split EncodeP node though phis up.
|
||||||
|
if (UseCompressedOops && can_reshape && progress == NULL) {
|
||||||
|
bool may_push = true;
|
||||||
|
bool has_decodeN = false;
|
||||||
|
Node* in_decodeN = NULL;
|
||||||
|
for (uint i=1; i<req(); ++i) {// For all paths in
|
||||||
|
Node *ii = in(i);
|
||||||
|
if (ii->is_DecodeN() && ii->bottom_type() == bottom_type()) {
|
||||||
|
has_decodeN = true;
|
||||||
|
in_decodeN = ii->in(1);
|
||||||
|
} else if (!ii->is_Phi()) {
|
||||||
|
may_push = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (has_decodeN && may_push) {
|
||||||
|
PhaseIterGVN *igvn = phase->is_IterGVN();
|
||||||
|
// Note: in_decodeN is used only to define the type of new phi here.
|
||||||
|
PhiNode *new_phi = PhiNode::make_blank(in(0), in_decodeN);
|
||||||
|
uint orig_cnt = req();
|
||||||
|
for (uint i=1; i<req(); ++i) {// For all paths in
|
||||||
|
Node *ii = in(i);
|
||||||
|
Node* new_ii = NULL;
|
||||||
|
if (ii->is_DecodeN()) {
|
||||||
|
assert(ii->bottom_type() == bottom_type(), "sanity");
|
||||||
|
new_ii = ii->in(1);
|
||||||
|
} else {
|
||||||
|
assert(ii->is_Phi(), "sanity");
|
||||||
|
if (ii->as_Phi() == this) {
|
||||||
|
new_ii = new_phi;
|
||||||
|
} else {
|
||||||
|
new_ii = new (phase->C, 2) EncodePNode(ii, in_decodeN->bottom_type());
|
||||||
|
igvn->register_new_node_with_optimizer(new_ii);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
new_phi->set_req(i, new_ii);
|
||||||
|
}
|
||||||
|
igvn->register_new_node_with_optimizer(new_phi, this);
|
||||||
|
progress = new (phase->C, 2) DecodeNNode(new_phi, bottom_type());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
return progress; // Return any progress
|
return progress; // Return any progress
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -467,6 +467,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
set_print_assembly(print_opto_assembly);
|
set_print_assembly(print_opto_assembly);
|
||||||
|
set_parsed_irreducible_loop(false);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (ProfileTraps) {
|
if (ProfileTraps) {
|
||||||
@ -550,6 +551,8 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
|||||||
rethrow_exceptions(kit.transfer_exceptions_into_jvms());
|
rethrow_exceptions(kit.transfer_exceptions_into_jvms());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
print_method("Before RemoveUseless");
|
||||||
|
|
||||||
// Remove clutter produced by parsing.
|
// Remove clutter produced by parsing.
|
||||||
if (!failing()) {
|
if (!failing()) {
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
@ -615,8 +618,6 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
|||||||
if (failing()) return;
|
if (failing()) return;
|
||||||
NOT_PRODUCT( verify_graph_edges(); )
|
NOT_PRODUCT( verify_graph_edges(); )
|
||||||
|
|
||||||
print_method("Before Matching");
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
if (PrintIdeal) {
|
if (PrintIdeal) {
|
||||||
ttyLocker ttyl; // keep the following output all in one block
|
ttyLocker ttyl; // keep the following output all in one block
|
||||||
@ -720,6 +721,7 @@ Compile::Compile( ciEnv* ci_env,
|
|||||||
TraceTime t1(NULL, &_t_totalCompilation, TimeCompiler, false);
|
TraceTime t1(NULL, &_t_totalCompilation, TimeCompiler, false);
|
||||||
TraceTime t2(NULL, &_t_stubCompilation, TimeCompiler, false);
|
TraceTime t2(NULL, &_t_stubCompilation, TimeCompiler, false);
|
||||||
set_print_assembly(PrintFrameConverterAssembly);
|
set_print_assembly(PrintFrameConverterAssembly);
|
||||||
|
set_parsed_irreducible_loop(false);
|
||||||
#endif
|
#endif
|
||||||
CompileWrapper cw(this);
|
CompileWrapper cw(this);
|
||||||
Init(/*AliasLevel=*/ 0);
|
Init(/*AliasLevel=*/ 0);
|
||||||
@ -2073,6 +2075,44 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
|
case Op_CastPP:
|
||||||
|
if (n->in(1)->is_DecodeN() && UseImplicitNullCheckForNarrowOop) {
|
||||||
|
Compile* C = Compile::current();
|
||||||
|
Node* in1 = n->in(1);
|
||||||
|
const Type* t = n->bottom_type();
|
||||||
|
Node* new_in1 = in1->clone();
|
||||||
|
new_in1->as_DecodeN()->set_type(t);
|
||||||
|
|
||||||
|
if (!Matcher::clone_shift_expressions) {
|
||||||
|
//
|
||||||
|
// x86, ARM and friends can handle 2 adds in addressing mode
|
||||||
|
// and Matcher can fold a DecodeN node into address by using
|
||||||
|
// a narrow oop directly and do implicit NULL check in address:
|
||||||
|
//
|
||||||
|
// [R12 + narrow_oop_reg<<3 + offset]
|
||||||
|
// NullCheck narrow_oop_reg
|
||||||
|
//
|
||||||
|
// On other platforms (Sparc) we have to keep new DecodeN node and
|
||||||
|
// use it to do implicit NULL check in address:
|
||||||
|
//
|
||||||
|
// decode_not_null narrow_oop_reg, base_reg
|
||||||
|
// [base_reg + offset]
|
||||||
|
// NullCheck base_reg
|
||||||
|
//
|
||||||
|
// Pin the new DecodeN node to non-null path on these patforms (Sparc)
|
||||||
|
// to keep the information to which NULL check the new DecodeN node
|
||||||
|
// corresponds to use it as value in implicit_null_check().
|
||||||
|
//
|
||||||
|
new_in1->set_req(0, n->in(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
n->subsume_by(new_in1);
|
||||||
|
if (in1->outcnt() == 0) {
|
||||||
|
in1->disconnect_inputs(NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
case Op_CmpP:
|
case Op_CmpP:
|
||||||
// Do this transformation here to preserve CmpPNode::sub() and
|
// Do this transformation here to preserve CmpPNode::sub() and
|
||||||
// other TypePtr related Ideal optimizations (for example, ptr nullness).
|
// other TypePtr related Ideal optimizations (for example, ptr nullness).
|
||||||
@ -2092,24 +2132,44 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
|||||||
} else if (in2->Opcode() == Op_ConP) {
|
} else if (in2->Opcode() == Op_ConP) {
|
||||||
const Type* t = in2->bottom_type();
|
const Type* t = in2->bottom_type();
|
||||||
if (t == TypePtr::NULL_PTR && UseImplicitNullCheckForNarrowOop) {
|
if (t == TypePtr::NULL_PTR && UseImplicitNullCheckForNarrowOop) {
|
||||||
if (Matcher::clone_shift_expressions) {
|
new_in2 = ConNode::make(C, TypeNarrowOop::NULL_PTR);
|
||||||
// x86, ARM and friends can handle 2 adds in addressing mode.
|
//
|
||||||
// Decode a narrow oop and do implicit NULL check in address
|
// This transformation together with CastPP transformation above
|
||||||
// [R12 + narrow_oop_reg<<3 + offset]
|
// will generated code for implicit NULL checks for compressed oops.
|
||||||
new_in2 = ConNode::make(C, TypeNarrowOop::NULL_PTR);
|
//
|
||||||
} else {
|
// The original code after Optimize()
|
||||||
// Don't replace CmpP(o ,null) if 'o' is used in AddP
|
//
|
||||||
// to generate implicit NULL check on Sparc where
|
// LoadN memory, narrow_oop_reg
|
||||||
// narrow oops can't be used in address.
|
// decode narrow_oop_reg, base_reg
|
||||||
uint i = 0;
|
// CmpP base_reg, NULL
|
||||||
for (; i < in1->outcnt(); i++) {
|
// CastPP base_reg // NotNull
|
||||||
if (in1->raw_out(i)->is_AddP())
|
// Load [base_reg + offset], val_reg
|
||||||
break;
|
//
|
||||||
}
|
// after these transformations will be
|
||||||
if (i >= in1->outcnt()) {
|
//
|
||||||
new_in2 = ConNode::make(C, TypeNarrowOop::NULL_PTR);
|
// LoadN memory, narrow_oop_reg
|
||||||
}
|
// CmpN narrow_oop_reg, NULL
|
||||||
}
|
// decode_not_null narrow_oop_reg, base_reg
|
||||||
|
// Load [base_reg + offset], val_reg
|
||||||
|
//
|
||||||
|
// and the uncommon path (== NULL) will use narrow_oop_reg directly
|
||||||
|
// since narrow oops can be used in debug info now (see the code in
|
||||||
|
// final_graph_reshaping_walk()).
|
||||||
|
//
|
||||||
|
// At the end the code will be matched to
|
||||||
|
// on x86:
|
||||||
|
//
|
||||||
|
// Load_narrow_oop memory, narrow_oop_reg
|
||||||
|
// Load [R12 + narrow_oop_reg<<3 + offset], val_reg
|
||||||
|
// NullCheck narrow_oop_reg
|
||||||
|
//
|
||||||
|
// and on sparc:
|
||||||
|
//
|
||||||
|
// Load_narrow_oop memory, narrow_oop_reg
|
||||||
|
// decode_not_null narrow_oop_reg, base_reg
|
||||||
|
// Load [base_reg + offset], val_reg
|
||||||
|
// NullCheck base_reg
|
||||||
|
//
|
||||||
} else if (t->isa_oopptr()) {
|
} else if (t->isa_oopptr()) {
|
||||||
new_in2 = ConNode::make(C, t->make_narrowoop());
|
new_in2 = ConNode::make(C, t->make_narrowoop());
|
||||||
}
|
}
|
||||||
@ -2126,6 +2186,49 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case Op_DecodeN:
|
||||||
|
assert(!n->in(1)->is_EncodeP(), "should be optimized out");
|
||||||
|
break;
|
||||||
|
|
||||||
|
case Op_EncodeP: {
|
||||||
|
Node* in1 = n->in(1);
|
||||||
|
if (in1->is_DecodeN()) {
|
||||||
|
n->subsume_by(in1->in(1));
|
||||||
|
} else if (in1->Opcode() == Op_ConP) {
|
||||||
|
Compile* C = Compile::current();
|
||||||
|
const Type* t = in1->bottom_type();
|
||||||
|
if (t == TypePtr::NULL_PTR) {
|
||||||
|
n->subsume_by(ConNode::make(C, TypeNarrowOop::NULL_PTR));
|
||||||
|
} else if (t->isa_oopptr()) {
|
||||||
|
n->subsume_by(ConNode::make(C, t->make_narrowoop()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (in1->outcnt() == 0) {
|
||||||
|
in1->disconnect_inputs(NULL);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case Op_Phi:
|
||||||
|
if (n->as_Phi()->bottom_type()->isa_narrowoop()) {
|
||||||
|
// The EncodeP optimization may create Phi with the same edges
|
||||||
|
// for all paths. It is not handled well by Register Allocator.
|
||||||
|
Node* unique_in = n->in(1);
|
||||||
|
assert(unique_in != NULL, "");
|
||||||
|
uint cnt = n->req();
|
||||||
|
for (uint i = 2; i < cnt; i++) {
|
||||||
|
Node* m = n->in(i);
|
||||||
|
assert(m != NULL, "");
|
||||||
|
if (unique_in != m)
|
||||||
|
unique_in = NULL;
|
||||||
|
}
|
||||||
|
if (unique_in != NULL) {
|
||||||
|
n->subsume_by(unique_in);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
case Op_ModI:
|
case Op_ModI:
|
||||||
|
@ -160,6 +160,7 @@ class Compile : public Phase {
|
|||||||
bool _print_assembly; // True if we should dump assembly code for this compilation
|
bool _print_assembly; // True if we should dump assembly code for this compilation
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
bool _trace_opto_output;
|
bool _trace_opto_output;
|
||||||
|
bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Compilation environment.
|
// Compilation environment.
|
||||||
@ -319,6 +320,8 @@ class Compile : public Phase {
|
|||||||
}
|
}
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
bool trace_opto_output() const { return _trace_opto_output; }
|
bool trace_opto_output() const { return _trace_opto_output; }
|
||||||
|
bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
|
||||||
|
void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void begin_method() {
|
void begin_method() {
|
||||||
|
@ -433,8 +433,8 @@ Node *ConstraintCastNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
|
|||||||
// If not converting int->oop, throw away cast after constant propagation
|
// If not converting int->oop, throw away cast after constant propagation
|
||||||
Node *CastPPNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
|
Node *CastPPNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
|
||||||
const Type *t = ccp->type(in(1));
|
const Type *t = ccp->type(in(1));
|
||||||
if (!t->isa_oop_ptr()) {
|
if (!t->isa_oop_ptr() || in(1)->is_DecodeN()) {
|
||||||
return NULL; // do not transform raw pointers
|
return NULL; // do not transform raw pointers or narrow oops
|
||||||
}
|
}
|
||||||
return ConstraintCastNode::Ideal_DU_postCCP(ccp);
|
return ConstraintCastNode::Ideal_DU_postCCP(ccp);
|
||||||
}
|
}
|
||||||
|
@ -795,7 +795,7 @@ ciMethod* Parse::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* k
|
|||||||
|
|
||||||
ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass();
|
ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass();
|
||||||
if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() &&
|
if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() &&
|
||||||
(ikl == actual_receiver || ikl->is_subclass_of(actual_receiver))) {
|
(ikl == actual_receiver || ikl->is_subtype_of(actual_receiver))) {
|
||||||
// ikl is a same or better type than the original actual_receiver,
|
// ikl is a same or better type than the original actual_receiver,
|
||||||
// e.g. static receiver from bytecodes.
|
// e.g. static receiver from bytecodes.
|
||||||
actual_receiver = ikl;
|
actual_receiver = ikl;
|
||||||
|
@ -587,7 +587,7 @@ PreserveJVMState::PreserveJVMState(GraphKit* kit, bool clone_map) {
|
|||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
_bci = kit->bci();
|
_bci = kit->bci();
|
||||||
Parse* parser = kit->is_Parse();
|
Parse* parser = kit->is_Parse();
|
||||||
int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->pre_order();
|
int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo();
|
||||||
_block = block;
|
_block = block;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
@ -596,7 +596,7 @@ PreserveJVMState::~PreserveJVMState() {
|
|||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
assert(kit->bci() == _bci, "bci must not shift");
|
assert(kit->bci() == _bci, "bci must not shift");
|
||||||
Parse* parser = kit->is_Parse();
|
Parse* parser = kit->is_Parse();
|
||||||
int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->pre_order();
|
int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo();
|
||||||
assert(block == _block, "block must not shift");
|
assert(block == _block, "block must not shift");
|
||||||
#endif
|
#endif
|
||||||
kit->set_map(_map);
|
kit->set_map(_map);
|
||||||
@ -1049,10 +1049,19 @@ Node* GraphKit::load_object_klass(Node* obj) {
|
|||||||
//-------------------------load_array_length-----------------------------------
|
//-------------------------load_array_length-----------------------------------
|
||||||
Node* GraphKit::load_array_length(Node* array) {
|
Node* GraphKit::load_array_length(Node* array) {
|
||||||
// Special-case a fresh allocation to avoid building nodes:
|
// Special-case a fresh allocation to avoid building nodes:
|
||||||
Node* alen = AllocateArrayNode::Ideal_length(array, &_gvn);
|
AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn);
|
||||||
if (alen != NULL) return alen;
|
Node *alen;
|
||||||
Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
|
if (alloc == NULL) {
|
||||||
return _gvn.transform( new (C, 3) LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS));
|
Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
|
||||||
|
alen = _gvn.transform( new (C, 3) LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS));
|
||||||
|
} else {
|
||||||
|
alen = alloc->Ideal_length();
|
||||||
|
Node* ccast = alloc->make_ideal_length(_gvn.type(array)->is_aryptr(), &_gvn);
|
||||||
|
if (ccast != alen) {
|
||||||
|
alen = _gvn.transform(ccast);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return alen;
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------do_null_check----------------------------------
|
//------------------------------do_null_check----------------------------------
|
||||||
@ -2847,20 +2856,18 @@ Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
|
|||||||
assert(just_allocated_object(control()) == javaoop, "just allocated");
|
assert(just_allocated_object(control()) == javaoop, "just allocated");
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
{ // Verify that the AllocateNode::Ideal_foo recognizers work:
|
{ // Verify that the AllocateNode::Ideal_allocation recognizers work:
|
||||||
Node* kn = alloc->in(AllocateNode::KlassNode);
|
assert(AllocateNode::Ideal_allocation(rawoop, &_gvn) == alloc,
|
||||||
Node* ln = alloc->in(AllocateNode::ALength);
|
"Ideal_allocation works");
|
||||||
assert(AllocateNode::Ideal_klass(rawoop, &_gvn) == kn,
|
assert(AllocateNode::Ideal_allocation(javaoop, &_gvn) == alloc,
|
||||||
"Ideal_klass works");
|
"Ideal_allocation works");
|
||||||
assert(AllocateNode::Ideal_klass(javaoop, &_gvn) == kn,
|
|
||||||
"Ideal_klass works");
|
|
||||||
if (alloc->is_AllocateArray()) {
|
if (alloc->is_AllocateArray()) {
|
||||||
assert(AllocateArrayNode::Ideal_length(rawoop, &_gvn) == ln,
|
assert(AllocateArrayNode::Ideal_array_allocation(rawoop, &_gvn) == alloc->as_AllocateArray(),
|
||||||
"Ideal_length works");
|
"Ideal_allocation works");
|
||||||
assert(AllocateArrayNode::Ideal_length(javaoop, &_gvn) == ln,
|
assert(AllocateArrayNode::Ideal_array_allocation(javaoop, &_gvn) == alloc->as_AllocateArray(),
|
||||||
"Ideal_length works");
|
"Ideal_allocation works");
|
||||||
} else {
|
} else {
|
||||||
assert(ln->is_top(), "no length, please");
|
assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif //ASSERT
|
#endif //ASSERT
|
||||||
@ -3109,25 +3116,20 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
|
|||||||
// (This happens via a non-constant argument to inline_native_newArray.)
|
// (This happens via a non-constant argument to inline_native_newArray.)
|
||||||
// In any case, the value of klass_node provides the desired array type.
|
// In any case, the value of klass_node provides the desired array type.
|
||||||
const TypeInt* length_type = _gvn.find_int_type(length);
|
const TypeInt* length_type = _gvn.find_int_type(length);
|
||||||
const TypeInt* narrow_length_type = NULL;
|
|
||||||
const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
|
const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
|
||||||
if (ary_type->isa_aryptr() && length_type != NULL) {
|
if (ary_type->isa_aryptr() && length_type != NULL) {
|
||||||
// Try to get a better type than POS for the size
|
// Try to get a better type than POS for the size
|
||||||
ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
|
ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
|
||||||
narrow_length_type = ary_type->is_aryptr()->size();
|
|
||||||
if (narrow_length_type == length_type)
|
|
||||||
narrow_length_type = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* javaoop = set_output_for_allocation(alloc, ary_type, raw_mem_only);
|
Node* javaoop = set_output_for_allocation(alloc, ary_type, raw_mem_only);
|
||||||
|
|
||||||
// Cast length on remaining path to be positive:
|
// Cast length on remaining path to be as narrow as possible
|
||||||
if (narrow_length_type != NULL) {
|
if (map()->find_edge(length) >= 0) {
|
||||||
Node* ccast = new (C, 2) CastIINode(length, narrow_length_type);
|
Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
|
||||||
ccast->set_req(0, control());
|
if (ccast != length) {
|
||||||
_gvn.set_type_bottom(ccast);
|
_gvn.set_type_bottom(ccast);
|
||||||
record_for_igvn(ccast);
|
record_for_igvn(ccast);
|
||||||
if (map()->find_edge(length) >= 0) {
|
|
||||||
replace_in_map(length, ccast);
|
replace_in_map(length, ccast);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -485,8 +485,9 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
|
|||||||
// Liveout things are presumed live for the whole block. We accumulate
|
// Liveout things are presumed live for the whole block. We accumulate
|
||||||
// 'area' accordingly. If they get killed in the block, we'll subtract
|
// 'area' accordingly. If they get killed in the block, we'll subtract
|
||||||
// the unused part of the block from the area.
|
// the unused part of the block from the area.
|
||||||
double cost = b->_freq * double(last_inst-last_phi);
|
int inst_count = last_inst - last_phi;
|
||||||
assert( cost >= 0, "negative spill cost" );
|
double cost = (inst_count <= 0) ? 0.0 : b->_freq * double(inst_count);
|
||||||
|
assert(!(cost < 0.0), "negative spill cost" );
|
||||||
IndexSetIterator elements(&liveout);
|
IndexSetIterator elements(&liveout);
|
||||||
uint lidx;
|
uint lidx;
|
||||||
while ((lidx = elements.next()) != 0) {
|
while ((lidx = elements.next()) != 0) {
|
||||||
@ -590,7 +591,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
|
|||||||
} else { // Else it is live
|
} else { // Else it is live
|
||||||
// A DEF also ends 'area' partway through the block.
|
// A DEF also ends 'area' partway through the block.
|
||||||
lrgs(r)._area -= cost;
|
lrgs(r)._area -= cost;
|
||||||
assert( lrgs(r)._area >= 0, "negative spill area" );
|
assert(!(lrgs(r)._area < 0.0), "negative spill area" );
|
||||||
|
|
||||||
// Insure high score for immediate-use spill copies so they get a color
|
// Insure high score for immediate-use spill copies so they get a color
|
||||||
if( n->is_SpillCopy()
|
if( n->is_SpillCopy()
|
||||||
@ -703,8 +704,9 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
|
|||||||
|
|
||||||
} // End of if normal register-allocated value
|
} // End of if normal register-allocated value
|
||||||
|
|
||||||
cost -= b->_freq; // Area remaining in the block
|
// Area remaining in the block
|
||||||
if( cost < 0.0 ) cost = 0.0; // Cost goes negative in the Phi area
|
inst_count--;
|
||||||
|
cost = (inst_count <= 0) ? 0.0 : b->_freq * double(inst_count);
|
||||||
|
|
||||||
// Make all inputs live
|
// Make all inputs live
|
||||||
if( !n->is_Phi() ) { // Phi function uses come from prior block
|
if( !n->is_Phi() ) { // Phi function uses come from prior block
|
||||||
@ -751,7 +753,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
|
|||||||
assert( pressure[0] == count_int_pressure (&liveout), "" );
|
assert( pressure[0] == count_int_pressure (&liveout), "" );
|
||||||
assert( pressure[1] == count_float_pressure(&liveout), "" );
|
assert( pressure[1] == count_float_pressure(&liveout), "" );
|
||||||
}
|
}
|
||||||
assert( lrg._area >= 0, "negative spill area" );
|
assert(!(lrg._area < 0.0), "negative spill area" );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} // End of reverse pass over all instructions in block
|
} // End of reverse pass over all instructions in block
|
||||||
|
@ -1012,6 +1012,8 @@ void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool ad
|
|||||||
if (!has_ctrl(old))
|
if (!has_ctrl(old))
|
||||||
set_loop(nnn, loop);
|
set_loop(nnn, loop);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
loop->record_for_igvn();
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------do_maximally_unroll----------------------------
|
//------------------------------do_maximally_unroll----------------------------
|
||||||
|
@ -1279,7 +1279,7 @@ void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) {
|
|||||||
// Visit all children, looking for Phis
|
// Visit all children, looking for Phis
|
||||||
for (DUIterator i = cl->outs(); cl->has_out(i); i++) {
|
for (DUIterator i = cl->outs(); cl->has_out(i); i++) {
|
||||||
Node *out = cl->out(i);
|
Node *out = cl->out(i);
|
||||||
if (!out->is_Phi()) continue; // Looking for phis
|
if (!out->is_Phi() || out == phi) continue; // Looking for other phis
|
||||||
PhiNode* phi2 = out->as_Phi();
|
PhiNode* phi2 = out->as_Phi();
|
||||||
Node *incr2 = phi2->in( LoopNode::LoopBackControl );
|
Node *incr2 = phi2->in( LoopNode::LoopBackControl );
|
||||||
// Look for induction variables of the form: X += constant
|
// Look for induction variables of the form: X += constant
|
||||||
@ -1388,6 +1388,37 @@ void IdealLoopTree::dump( ) const {
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static void log_loop_tree(IdealLoopTree* root, IdealLoopTree* loop, CompileLog* log) {
|
||||||
|
if (loop == root) {
|
||||||
|
if (loop->_child != NULL) {
|
||||||
|
log->begin_head("loop_tree");
|
||||||
|
log->end_head();
|
||||||
|
if( loop->_child ) log_loop_tree(root, loop->_child, log);
|
||||||
|
log->tail("loop_tree");
|
||||||
|
assert(loop->_next == NULL, "what?");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Node* head = loop->_head;
|
||||||
|
log->begin_head("loop");
|
||||||
|
log->print(" idx='%d' ", head->_idx);
|
||||||
|
if (loop->_irreducible) log->print("irreducible='1' ");
|
||||||
|
if (head->is_Loop()) {
|
||||||
|
if (head->as_Loop()->is_inner_loop()) log->print("inner_loop='1' ");
|
||||||
|
if (head->as_Loop()->is_partial_peel_loop()) log->print("partial_peel_loop='1' ");
|
||||||
|
}
|
||||||
|
if (head->is_CountedLoop()) {
|
||||||
|
CountedLoopNode* cl = head->as_CountedLoop();
|
||||||
|
if (cl->is_pre_loop()) log->print("pre_loop='%d' ", cl->main_idx());
|
||||||
|
if (cl->is_main_loop()) log->print("main_loop='%d' ", cl->_idx);
|
||||||
|
if (cl->is_post_loop()) log->print("post_loop='%d' ", cl->main_idx());
|
||||||
|
}
|
||||||
|
log->end_head();
|
||||||
|
if( loop->_child ) log_loop_tree(root, loop->_child, log);
|
||||||
|
log->tail("loop");
|
||||||
|
if( loop->_next ) log_loop_tree(root, loop->_next, log);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//=============================================================================
|
//=============================================================================
|
||||||
//------------------------------PhaseIdealLoop---------------------------------
|
//------------------------------PhaseIdealLoop---------------------------------
|
||||||
// Create a PhaseLoop. Build the ideal Loop tree. Map each Ideal Node to
|
// Create a PhaseLoop. Build the ideal Loop tree. Map each Ideal Node to
|
||||||
@ -1624,10 +1655,13 @@ PhaseIdealLoop::PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify
|
|||||||
// Cleanup any modified bits
|
// Cleanup any modified bits
|
||||||
_igvn.optimize();
|
_igvn.optimize();
|
||||||
|
|
||||||
// Do not repeat loop optimizations if irreducible loops are present
|
// disable assert until issue with split_flow_path is resolved (6742111)
|
||||||
// by claiming no-progress.
|
// assert(!_has_irreducible_loops || C->parsed_irreducible_loop() || C->is_osr_compilation(),
|
||||||
if( _has_irreducible_loops )
|
// "shouldn't introduce irreducible loops");
|
||||||
C->clear_major_progress();
|
|
||||||
|
if (C->log() != NULL) {
|
||||||
|
log_loop_tree(_ltree_root, _ltree_root, C->log());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
@ -2732,11 +2766,7 @@ void PhaseIdealLoop::dump( ) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void PhaseIdealLoop::dump( IdealLoopTree *loop, uint idx, Node_List &rpo_list ) const {
|
void PhaseIdealLoop::dump( IdealLoopTree *loop, uint idx, Node_List &rpo_list ) const {
|
||||||
|
loop->dump_head();
|
||||||
// Indent by loop nesting depth
|
|
||||||
for( uint x = 0; x < loop->_nest; x++ )
|
|
||||||
tty->print(" ");
|
|
||||||
tty->print_cr("---- Loop N%d-N%d ----", loop->_head->_idx,loop->_tail->_idx);
|
|
||||||
|
|
||||||
// Now scan for CFG nodes in the same loop
|
// Now scan for CFG nodes in the same loop
|
||||||
for( uint j=idx; j > 0; j-- ) {
|
for( uint j=idx; j > 0; j-- ) {
|
||||||
|
@ -192,6 +192,8 @@ public:
|
|||||||
int is_main_no_pre_loop() const { return _loop_flags & Main_Has_No_Pre_Loop; }
|
int is_main_no_pre_loop() const { return _loop_flags & Main_Has_No_Pre_Loop; }
|
||||||
void set_main_no_pre_loop() { _loop_flags |= Main_Has_No_Pre_Loop; }
|
void set_main_no_pre_loop() { _loop_flags |= Main_Has_No_Pre_Loop; }
|
||||||
|
|
||||||
|
int main_idx() const { return _main_idx; }
|
||||||
|
|
||||||
|
|
||||||
void set_pre_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Pre ; _main_idx = main->_idx; }
|
void set_pre_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Pre ; _main_idx = main->_idx; }
|
||||||
void set_main_loop ( ) { assert(is_normal_loop(),""); _loop_flags |= Main; }
|
void set_main_loop ( ) { assert(is_normal_loop(),""); _loop_flags |= Main; }
|
||||||
|
@ -2667,6 +2667,10 @@ void PhaseIdealLoop::reorg_offsets( IdealLoopTree *loop ) {
|
|||||||
// Fix this by adjusting to use the post-increment trip counter.
|
// Fix this by adjusting to use the post-increment trip counter.
|
||||||
Node *phi = cl->phi();
|
Node *phi = cl->phi();
|
||||||
if( !phi ) return; // Dead infinite loop
|
if( !phi ) return; // Dead infinite loop
|
||||||
|
|
||||||
|
// Shape messed up, probably by iteration_split_impl
|
||||||
|
if (phi->in(LoopNode::LoopBackControl) != cl->incr()) return;
|
||||||
|
|
||||||
bool progress = true;
|
bool progress = true;
|
||||||
while (progress) {
|
while (progress) {
|
||||||
progress = false;
|
progress = false;
|
||||||
|
@ -273,7 +273,7 @@ void Matcher::match( ) {
|
|||||||
find_shared( C->root() );
|
find_shared( C->root() );
|
||||||
find_shared( C->top() );
|
find_shared( C->top() );
|
||||||
|
|
||||||
C->print_method("Before Matching", 2);
|
C->print_method("Before Matching");
|
||||||
|
|
||||||
// Swap out to old-space; emptying new-space
|
// Swap out to old-space; emptying new-space
|
||||||
Arena *old = C->node_arena()->move_contents(C->old_arena());
|
Arena *old = C->node_arena()->move_contents(C->old_arena());
|
||||||
@ -840,7 +840,7 @@ Node *Matcher::xform( Node *n, int max_stack ) {
|
|||||||
_new2old_map.map(m->_idx, n);
|
_new2old_map.map(m->_idx, n);
|
||||||
#endif
|
#endif
|
||||||
if (m->in(0) != NULL) // m might be top
|
if (m->in(0) != NULL) // m might be top
|
||||||
collect_null_checks(m);
|
collect_null_checks(m, n);
|
||||||
} else { // Else just a regular 'ol guy
|
} else { // Else just a regular 'ol guy
|
||||||
m = n->clone(); // So just clone into new-space
|
m = n->clone(); // So just clone into new-space
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
@ -1478,12 +1478,19 @@ MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
|
|||||||
m = _mem_node;
|
m = _mem_node;
|
||||||
assert(m != NULL && m->is_Mem(), "expecting memory node");
|
assert(m != NULL && m->is_Mem(), "expecting memory node");
|
||||||
}
|
}
|
||||||
if (m->adr_type() != mach->adr_type()) {
|
const Type* mach_at = mach->adr_type();
|
||||||
|
// DecodeN node consumed by an address may have different type
|
||||||
|
// then its input. Don't compare types for such case.
|
||||||
|
if (m->adr_type() != mach_at && m->in(MemNode::Address)->is_AddP() &&
|
||||||
|
m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeN()) {
|
||||||
|
mach_at = m->adr_type();
|
||||||
|
}
|
||||||
|
if (m->adr_type() != mach_at) {
|
||||||
m->dump();
|
m->dump();
|
||||||
tty->print_cr("mach:");
|
tty->print_cr("mach:");
|
||||||
mach->dump(1);
|
mach->dump(1);
|
||||||
}
|
}
|
||||||
assert(m->adr_type() == mach->adr_type(), "matcher should not change adr type");
|
assert(m->adr_type() == mach_at, "matcher should not change adr type");
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
@ -1995,7 +2002,7 @@ void Matcher::dump_old2new_map() {
|
|||||||
// it. Used by later implicit-null-check handling. Actually collects
|
// it. Used by later implicit-null-check handling. Actually collects
|
||||||
// either an IfTrue or IfFalse for the common NOT-null path, AND the ideal
|
// either an IfTrue or IfFalse for the common NOT-null path, AND the ideal
|
||||||
// value being tested.
|
// value being tested.
|
||||||
void Matcher::collect_null_checks( Node *proj ) {
|
void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
|
||||||
Node *iff = proj->in(0);
|
Node *iff = proj->in(0);
|
||||||
if( iff->Opcode() == Op_If ) {
|
if( iff->Opcode() == Op_If ) {
|
||||||
// During matching If's have Bool & Cmp side-by-side
|
// During matching If's have Bool & Cmp side-by-side
|
||||||
@ -2008,20 +2015,47 @@ void Matcher::collect_null_checks( Node *proj ) {
|
|||||||
if (ct == TypePtr::NULL_PTR ||
|
if (ct == TypePtr::NULL_PTR ||
|
||||||
(opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) {
|
(opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) {
|
||||||
|
|
||||||
|
bool push_it = false;
|
||||||
if( proj->Opcode() == Op_IfTrue ) {
|
if( proj->Opcode() == Op_IfTrue ) {
|
||||||
extern int all_null_checks_found;
|
extern int all_null_checks_found;
|
||||||
all_null_checks_found++;
|
all_null_checks_found++;
|
||||||
if( b->_test._test == BoolTest::ne ) {
|
if( b->_test._test == BoolTest::ne ) {
|
||||||
_null_check_tests.push(proj);
|
push_it = true;
|
||||||
_null_check_tests.push(cmp->in(1));
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert( proj->Opcode() == Op_IfFalse, "" );
|
assert( proj->Opcode() == Op_IfFalse, "" );
|
||||||
if( b->_test._test == BoolTest::eq ) {
|
if( b->_test._test == BoolTest::eq ) {
|
||||||
_null_check_tests.push(proj);
|
push_it = true;
|
||||||
_null_check_tests.push(cmp->in(1));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if( push_it ) {
|
||||||
|
_null_check_tests.push(proj);
|
||||||
|
Node* val = cmp->in(1);
|
||||||
|
#ifdef _LP64
|
||||||
|
if (UseCompressedOops && !Matcher::clone_shift_expressions &&
|
||||||
|
val->bottom_type()->isa_narrowoop()) {
|
||||||
|
//
|
||||||
|
// Look for DecodeN node which should be pinned to orig_proj.
|
||||||
|
// On platforms (Sparc) which can not handle 2 adds
|
||||||
|
// in addressing mode we have to keep a DecodeN node and
|
||||||
|
// use it to do implicit NULL check in address.
|
||||||
|
//
|
||||||
|
// DecodeN node was pinned to non-null path (orig_proj) during
|
||||||
|
// CastPP transformation in final_graph_reshaping_impl().
|
||||||
|
//
|
||||||
|
uint cnt = orig_proj->outcnt();
|
||||||
|
for (uint i = 0; i < orig_proj->outcnt(); i++) {
|
||||||
|
Node* d = orig_proj->raw_out(i);
|
||||||
|
if (d->is_DecodeN() && d->in(1) == val) {
|
||||||
|
val = d;
|
||||||
|
val->set_req(0, NULL); // Unpin now.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
_null_check_tests.push(val);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -166,7 +166,7 @@ public:
|
|||||||
// List of IfFalse or IfTrue Nodes that indicate a taken null test.
|
// List of IfFalse or IfTrue Nodes that indicate a taken null test.
|
||||||
// List is valid in the post-matching space.
|
// List is valid in the post-matching space.
|
||||||
Node_List _null_check_tests;
|
Node_List _null_check_tests;
|
||||||
void collect_null_checks( Node *proj );
|
void collect_null_checks( Node *proj, Node *orig_proj );
|
||||||
void validate_null_checks( );
|
void validate_null_checks( );
|
||||||
|
|
||||||
Matcher( Node_List &proj_list );
|
Matcher( Node_List &proj_list );
|
||||||
|
@ -1887,6 +1887,38 @@ const Type *LoadRangeNode::Value( PhaseTransform *phase ) const {
|
|||||||
return tap->size();
|
return tap->size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//-------------------------------Ideal---------------------------------------
|
||||||
|
// Feed through the length in AllocateArray(...length...)._length.
|
||||||
|
Node *LoadRangeNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||||
|
Node* p = MemNode::Ideal_common(phase, can_reshape);
|
||||||
|
if (p) return (p == NodeSentinel) ? NULL : p;
|
||||||
|
|
||||||
|
// Take apart the address into an oop and and offset.
|
||||||
|
// Return 'this' if we cannot.
|
||||||
|
Node* adr = in(MemNode::Address);
|
||||||
|
intptr_t offset = 0;
|
||||||
|
Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
|
||||||
|
if (base == NULL) return NULL;
|
||||||
|
const TypeAryPtr* tary = phase->type(adr)->isa_aryptr();
|
||||||
|
if (tary == NULL) return NULL;
|
||||||
|
|
||||||
|
// We can fetch the length directly through an AllocateArrayNode.
|
||||||
|
// This works even if the length is not constant (clone or newArray).
|
||||||
|
if (offset == arrayOopDesc::length_offset_in_bytes()) {
|
||||||
|
AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase);
|
||||||
|
if (alloc != NULL) {
|
||||||
|
Node* allocated_length = alloc->Ideal_length();
|
||||||
|
Node* len = alloc->make_ideal_length(tary, phase);
|
||||||
|
if (allocated_length != len) {
|
||||||
|
// New CastII improves on this.
|
||||||
|
return len;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------Identity---------------------------------------
|
//------------------------------Identity---------------------------------------
|
||||||
// Feed through the length in AllocateArray(...length...)._length.
|
// Feed through the length in AllocateArray(...length...)._length.
|
||||||
Node* LoadRangeNode::Identity( PhaseTransform *phase ) {
|
Node* LoadRangeNode::Identity( PhaseTransform *phase ) {
|
||||||
@ -1905,15 +1937,22 @@ Node* LoadRangeNode::Identity( PhaseTransform *phase ) {
|
|||||||
// We can fetch the length directly through an AllocateArrayNode.
|
// We can fetch the length directly through an AllocateArrayNode.
|
||||||
// This works even if the length is not constant (clone or newArray).
|
// This works even if the length is not constant (clone or newArray).
|
||||||
if (offset == arrayOopDesc::length_offset_in_bytes()) {
|
if (offset == arrayOopDesc::length_offset_in_bytes()) {
|
||||||
Node* allocated_length = AllocateArrayNode::Ideal_length(base, phase);
|
AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase);
|
||||||
if (allocated_length != NULL) {
|
if (alloc != NULL) {
|
||||||
return allocated_length;
|
Node* allocated_length = alloc->Ideal_length();
|
||||||
|
// Do not allow make_ideal_length to allocate a CastII node.
|
||||||
|
Node* len = alloc->make_ideal_length(tary, phase, false);
|
||||||
|
if (allocated_length == len) {
|
||||||
|
// Return allocated_length only if it would not be improved by a CastII.
|
||||||
|
return allocated_length;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return this;
|
return this;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//=============================================================================
|
//=============================================================================
|
||||||
//---------------------------StoreNode::make-----------------------------------
|
//---------------------------StoreNode::make-----------------------------------
|
||||||
// Polymorphic factory method:
|
// Polymorphic factory method:
|
||||||
|
@ -241,6 +241,7 @@ public:
|
|||||||
virtual int Opcode() const;
|
virtual int Opcode() const;
|
||||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||||
virtual Node *Identity( PhaseTransform *phase );
|
virtual Node *Identity( PhaseTransform *phase );
|
||||||
|
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||||
};
|
};
|
||||||
|
|
||||||
//------------------------------LoadLNode--------------------------------------
|
//------------------------------LoadLNode--------------------------------------
|
||||||
|
@ -167,9 +167,19 @@ class Parse : public GraphKit {
|
|||||||
|
|
||||||
int start() const { return flow()->start(); }
|
int start() const { return flow()->start(); }
|
||||||
int limit() const { return flow()->limit(); }
|
int limit() const { return flow()->limit(); }
|
||||||
int pre_order() const { return flow()->pre_order(); }
|
int rpo() const { return flow()->rpo(); }
|
||||||
int start_sp() const { return flow()->stack_size(); }
|
int start_sp() const { return flow()->stack_size(); }
|
||||||
|
|
||||||
|
bool is_loop_head() const { return flow()->is_loop_head(); }
|
||||||
|
bool is_SEL_head() const { return flow()->is_single_entry_loop_head(); }
|
||||||
|
bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); }
|
||||||
|
bool is_invariant_local(uint i) const {
|
||||||
|
const JVMState* jvms = start_map()->jvms();
|
||||||
|
if (!jvms->is_loc(i)) return false;
|
||||||
|
return flow()->is_invariant_local(i - jvms->locoff());
|
||||||
|
}
|
||||||
|
bool can_elide_SEL_phi(uint i) const { assert(is_SEL_head(),""); return is_invariant_local(i); }
|
||||||
|
|
||||||
const Type* peek(int off=0) const { return stack_type_at(start_sp() - (off+1)); }
|
const Type* peek(int off=0) const { return stack_type_at(start_sp() - (off+1)); }
|
||||||
|
|
||||||
const Type* stack_type_at(int i) const;
|
const Type* stack_type_at(int i) const;
|
||||||
@ -305,7 +315,7 @@ class Parse : public GraphKit {
|
|||||||
// entry_bci() -- see osr_bci, etc.
|
// entry_bci() -- see osr_bci, etc.
|
||||||
|
|
||||||
ciTypeFlow* flow() const { return _flow; }
|
ciTypeFlow* flow() const { return _flow; }
|
||||||
// blocks() -- see pre_order_at, start_block, etc.
|
// blocks() -- see rpo_at, start_block, etc.
|
||||||
int block_count() const { return _block_count; }
|
int block_count() const { return _block_count; }
|
||||||
|
|
||||||
GraphKit& exits() { return _exits; }
|
GraphKit& exits() { return _exits; }
|
||||||
@ -330,12 +340,12 @@ class Parse : public GraphKit {
|
|||||||
// Must this parse be aborted?
|
// Must this parse be aborted?
|
||||||
bool failing() { return C->failing(); }
|
bool failing() { return C->failing(); }
|
||||||
|
|
||||||
Block* pre_order_at(int po) {
|
Block* rpo_at(int rpo) {
|
||||||
assert(0 <= po && po < _block_count, "oob");
|
assert(0 <= rpo && rpo < _block_count, "oob");
|
||||||
return &_blocks[po];
|
return &_blocks[rpo];
|
||||||
}
|
}
|
||||||
Block* start_block() {
|
Block* start_block() {
|
||||||
return pre_order_at(flow()->start_block()->pre_order());
|
return rpo_at(flow()->start_block()->rpo());
|
||||||
}
|
}
|
||||||
// Can return NULL if the flow pass did not complete a block.
|
// Can return NULL if the flow pass did not complete a block.
|
||||||
Block* successor_for_bci(int bci) {
|
Block* successor_for_bci(int bci) {
|
||||||
@ -359,9 +369,6 @@ class Parse : public GraphKit {
|
|||||||
// Parse all the basic blocks.
|
// Parse all the basic blocks.
|
||||||
void do_all_blocks();
|
void do_all_blocks();
|
||||||
|
|
||||||
// Helper for do_all_blocks; makes one pass in pre-order.
|
|
||||||
void visit_blocks();
|
|
||||||
|
|
||||||
// Parse the current basic block
|
// Parse the current basic block
|
||||||
void do_one_block();
|
void do_one_block();
|
||||||
|
|
||||||
|
@ -29,17 +29,17 @@
|
|||||||
// the most. Some of the non-static variables are needed in bytecodeInfo.cpp
|
// the most. Some of the non-static variables are needed in bytecodeInfo.cpp
|
||||||
// and eventually should be encapsulated in a proper class (gri 8/18/98).
|
// and eventually should be encapsulated in a proper class (gri 8/18/98).
|
||||||
|
|
||||||
int nodes_created = 0; int nodes_created_old = 0;
|
int nodes_created = 0;
|
||||||
int methods_parsed = 0; int methods_parsed_old = 0;
|
int methods_parsed = 0;
|
||||||
int methods_seen = 0; int methods_seen_old = 0;
|
int methods_seen = 0;
|
||||||
|
int blocks_parsed = 0;
|
||||||
|
int blocks_seen = 0;
|
||||||
|
|
||||||
int explicit_null_checks_inserted = 0, explicit_null_checks_inserted_old = 0;
|
int explicit_null_checks_inserted = 0;
|
||||||
int explicit_null_checks_elided = 0, explicit_null_checks_elided_old = 0;
|
int explicit_null_checks_elided = 0;
|
||||||
int all_null_checks_found = 0, implicit_null_checks = 0;
|
int all_null_checks_found = 0, implicit_null_checks = 0;
|
||||||
int implicit_null_throws = 0;
|
int implicit_null_throws = 0;
|
||||||
|
|
||||||
int parse_idx = 0;
|
|
||||||
size_t parse_arena = 0;
|
|
||||||
int reclaim_idx = 0;
|
int reclaim_idx = 0;
|
||||||
int reclaim_in = 0;
|
int reclaim_in = 0;
|
||||||
int reclaim_node = 0;
|
int reclaim_node = 0;
|
||||||
@ -61,6 +61,7 @@ void Parse::print_statistics() {
|
|||||||
tty->cr();
|
tty->cr();
|
||||||
if (methods_seen != methods_parsed)
|
if (methods_seen != methods_parsed)
|
||||||
tty->print_cr("Reasons for parse failures (NOT cumulative):");
|
tty->print_cr("Reasons for parse failures (NOT cumulative):");
|
||||||
|
tty->print_cr("Blocks parsed: %d Blocks seen: %d", blocks_parsed, blocks_seen);
|
||||||
|
|
||||||
if( explicit_null_checks_inserted )
|
if( explicit_null_checks_inserted )
|
||||||
tty->print_cr("%d original NULL checks - %d elided (%2d%%); optimizer leaves %d,", explicit_null_checks_inserted, explicit_null_checks_elided, (100*explicit_null_checks_elided)/explicit_null_checks_inserted, all_null_checks_found);
|
tty->print_cr("%d original NULL checks - %d elided (%2d%%); optimizer leaves %d,", explicit_null_checks_inserted, explicit_null_checks_elided, (100*explicit_null_checks_elided)/explicit_null_checks_inserted, all_null_checks_found);
|
||||||
@ -373,6 +374,12 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
|
|||||||
C->record_method_not_compilable_all_tiers(_flow->failure_reason());
|
C->record_method_not_compilable_all_tiers(_flow->failure_reason());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
if (_flow->has_irreducible_entry()) {
|
||||||
|
C->set_parsed_irreducible_loop(true);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (_expected_uses <= 0) {
|
if (_expected_uses <= 0) {
|
||||||
_prof_factor = 1;
|
_prof_factor = 1;
|
||||||
} else {
|
} else {
|
||||||
@ -556,118 +563,93 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
|
|||||||
set_map(entry_map);
|
set_map(entry_map);
|
||||||
do_exits();
|
do_exits();
|
||||||
|
|
||||||
// Collect a few more statistics.
|
|
||||||
parse_idx += C->unique();
|
|
||||||
parse_arena += C->node_arena()->used();
|
|
||||||
|
|
||||||
if (log) log->done("parse nodes='%d' memory='%d'",
|
if (log) log->done("parse nodes='%d' memory='%d'",
|
||||||
C->unique(), C->node_arena()->used());
|
C->unique(), C->node_arena()->used());
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------do_all_blocks-------------------------------------
|
//---------------------------do_all_blocks-------------------------------------
|
||||||
void Parse::do_all_blocks() {
|
void Parse::do_all_blocks() {
|
||||||
_blocks_merged = 0;
|
bool has_irreducible = flow()->has_irreducible_entry();
|
||||||
_blocks_parsed = 0;
|
|
||||||
|
|
||||||
int old_blocks_merged = -1;
|
// Walk over all blocks in Reverse Post-Order.
|
||||||
int old_blocks_parsed = -1;
|
while (true) {
|
||||||
|
bool progress = false;
|
||||||
|
for (int rpo = 0; rpo < block_count(); rpo++) {
|
||||||
|
Block* block = rpo_at(rpo);
|
||||||
|
|
||||||
for (int tries = 0; ; tries++) {
|
if (block->is_parsed()) continue;
|
||||||
visit_blocks();
|
|
||||||
if (failing()) return; // Check for bailout
|
|
||||||
|
|
||||||
// No need for a work list. The outer loop is hardly ever repeated.
|
if (!block->is_merged()) {
|
||||||
// The following loop traverses the blocks in a reasonable pre-order,
|
// Dead block, no state reaches this block
|
||||||
// as produced by the ciTypeFlow pass.
|
continue;
|
||||||
|
|
||||||
// This loop can be taken more than once if there are two entries to
|
|
||||||
// a loop (irreduceable CFG), and the edge which ciTypeFlow chose
|
|
||||||
// as the first predecessor to the loop goes dead in the parser,
|
|
||||||
// due to parse-time optimization. (Could happen with obfuscated code.)
|
|
||||||
|
|
||||||
// Look for progress, or the lack of it:
|
|
||||||
if (_blocks_parsed == block_count()) {
|
|
||||||
// That's all, folks.
|
|
||||||
if (TraceOptoParse) {
|
|
||||||
tty->print_cr("All blocks parsed.");
|
|
||||||
}
|
}
|
||||||
break;
|
|
||||||
|
// Prepare to parse this block.
|
||||||
|
load_state_from(block);
|
||||||
|
|
||||||
|
if (stopped()) {
|
||||||
|
// Block is dead.
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
blocks_parsed++;
|
||||||
|
|
||||||
|
progress = true;
|
||||||
|
if (block->is_loop_head() || block->is_handler() || has_irreducible && !block->is_ready()) {
|
||||||
|
// Not all preds have been parsed. We must build phis everywhere.
|
||||||
|
// (Note that dead locals do not get phis built, ever.)
|
||||||
|
ensure_phis_everywhere();
|
||||||
|
|
||||||
|
// Leave behind an undisturbed copy of the map, for future merges.
|
||||||
|
set_map(clone_map());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (control()->is_Region() && !block->is_loop_head() && !has_irreducible && !block->is_handler()) {
|
||||||
|
// In the absence of irreducible loops, the Region and Phis
|
||||||
|
// associated with a merge that doesn't involve a backedge can
|
||||||
|
// be simplfied now since the RPO parsing order guarantees
|
||||||
|
// that any path which was supposed to reach here has already
|
||||||
|
// been parsed or must be dead.
|
||||||
|
Node* c = control();
|
||||||
|
Node* result = _gvn.transform_no_reclaim(control());
|
||||||
|
if (c != result && TraceOptoParse) {
|
||||||
|
tty->print_cr("Block #%d replace %d with %d", block->rpo(), c->_idx, result->_idx);
|
||||||
|
}
|
||||||
|
if (result != top()) {
|
||||||
|
record_for_igvn(result);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the block.
|
||||||
|
do_one_block();
|
||||||
|
|
||||||
|
// Check for bailouts.
|
||||||
|
if (failing()) return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// How much work was done this time around?
|
// with irreducible loops multiple passes might be necessary to parse everything
|
||||||
int new_blocks_merged = _blocks_merged - old_blocks_merged;
|
if (!has_irreducible || !progress) {
|
||||||
int new_blocks_parsed = _blocks_parsed - old_blocks_parsed;
|
|
||||||
if (new_blocks_merged == 0) {
|
|
||||||
if (TraceOptoParse) {
|
|
||||||
tty->print_cr("All live blocks parsed; %d dead blocks.", block_count() - _blocks_parsed);
|
|
||||||
}
|
|
||||||
// No new blocks have become parseable. Some blocks are just dead.
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
assert(new_blocks_parsed > 0, "must make progress");
|
|
||||||
assert(tries < block_count(), "the pre-order cannot be this bad!");
|
|
||||||
|
|
||||||
old_blocks_merged = _blocks_merged;
|
|
||||||
old_blocks_parsed = _blocks_parsed;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
blocks_seen += block_count();
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
// Make sure there are no half-processed blocks remaining.
|
// Make sure there are no half-processed blocks remaining.
|
||||||
// Every remaining unprocessed block is dead and may be ignored now.
|
// Every remaining unprocessed block is dead and may be ignored now.
|
||||||
for (int po = 0; po < block_count(); po++) {
|
for (int rpo = 0; rpo < block_count(); rpo++) {
|
||||||
Block* block = pre_order_at(po);
|
Block* block = rpo_at(rpo);
|
||||||
if (!block->is_parsed()) {
|
if (!block->is_parsed()) {
|
||||||
if (TraceOptoParse) {
|
if (TraceOptoParse) {
|
||||||
tty->print("Skipped dead block %d at bci:%d", po, block->start());
|
tty->print_cr("Skipped dead block %d at bci:%d", rpo, block->start());
|
||||||
assert(!block->is_merged(), "no half-processed blocks");
|
|
||||||
}
|
}
|
||||||
|
assert(!block->is_merged(), "no half-processed blocks");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------visit_blocks--------------------------------------
|
|
||||||
void Parse::visit_blocks() {
|
|
||||||
// Walk over all blocks, parsing every one that has been reached (merged).
|
|
||||||
for (int po = 0; po < block_count(); po++) {
|
|
||||||
Block* block = pre_order_at(po);
|
|
||||||
|
|
||||||
if (block->is_parsed()) {
|
|
||||||
// Do not parse twice.
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!block->is_merged()) {
|
|
||||||
// No state on this block. It had not yet been reached.
|
|
||||||
// Delay reaching it until later.
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare to parse this block.
|
|
||||||
load_state_from(block);
|
|
||||||
|
|
||||||
if (stopped()) {
|
|
||||||
// Block is dead.
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!block->is_ready() || block->is_handler()) {
|
|
||||||
// Not all preds have been parsed. We must build phis everywhere.
|
|
||||||
// (Note that dead locals do not get phis built, ever.)
|
|
||||||
ensure_phis_everywhere();
|
|
||||||
|
|
||||||
// Leave behind an undisturbed copy of the map, for future merges.
|
|
||||||
set_map(clone_map());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ready or not, parse the block.
|
|
||||||
do_one_block();
|
|
||||||
|
|
||||||
// Check for bailouts.
|
|
||||||
if (failing()) return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//-------------------------------build_exits----------------------------------
|
//-------------------------------build_exits----------------------------------
|
||||||
// Build normal and exceptional exit merge points.
|
// Build normal and exceptional exit merge points.
|
||||||
void Parse::build_exits() {
|
void Parse::build_exits() {
|
||||||
@ -1134,24 +1116,24 @@ void Parse::init_blocks() {
|
|||||||
_blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
|
_blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
|
||||||
Copy::zero_to_bytes(_blocks, sizeof(Block)*_block_count);
|
Copy::zero_to_bytes(_blocks, sizeof(Block)*_block_count);
|
||||||
|
|
||||||
int po;
|
int rpo;
|
||||||
|
|
||||||
// Initialize the structs.
|
// Initialize the structs.
|
||||||
for (po = 0; po < block_count(); po++) {
|
for (rpo = 0; rpo < block_count(); rpo++) {
|
||||||
Block* block = pre_order_at(po);
|
Block* block = rpo_at(rpo);
|
||||||
block->init_node(this, po);
|
block->init_node(this, rpo);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Collect predecessor and successor information.
|
// Collect predecessor and successor information.
|
||||||
for (po = 0; po < block_count(); po++) {
|
for (rpo = 0; rpo < block_count(); rpo++) {
|
||||||
Block* block = pre_order_at(po);
|
Block* block = rpo_at(rpo);
|
||||||
block->init_graph(this);
|
block->init_graph(this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//-------------------------------init_node-------------------------------------
|
//-------------------------------init_node-------------------------------------
|
||||||
void Parse::Block::init_node(Parse* outer, int po) {
|
void Parse::Block::init_node(Parse* outer, int rpo) {
|
||||||
_flow = outer->flow()->pre_order_at(po);
|
_flow = outer->flow()->rpo_at(rpo);
|
||||||
_pred_count = 0;
|
_pred_count = 0;
|
||||||
_preds_parsed = 0;
|
_preds_parsed = 0;
|
||||||
_count = 0;
|
_count = 0;
|
||||||
@ -1177,7 +1159,7 @@ void Parse::Block::init_graph(Parse* outer) {
|
|||||||
int p = 0;
|
int p = 0;
|
||||||
for (int i = 0; i < ns+ne; i++) {
|
for (int i = 0; i < ns+ne; i++) {
|
||||||
ciTypeFlow::Block* tf2 = (i < ns) ? tfs->at(i) : tfe->at(i-ns);
|
ciTypeFlow::Block* tf2 = (i < ns) ? tfs->at(i) : tfe->at(i-ns);
|
||||||
Block* block2 = outer->pre_order_at(tf2->pre_order());
|
Block* block2 = outer->rpo_at(tf2->rpo());
|
||||||
_successors[i] = block2;
|
_successors[i] = block2;
|
||||||
|
|
||||||
// Accumulate pred info for the other block, too.
|
// Accumulate pred info for the other block, too.
|
||||||
@ -1368,10 +1350,11 @@ void Parse::do_one_block() {
|
|||||||
int nt = b->all_successors();
|
int nt = b->all_successors();
|
||||||
|
|
||||||
tty->print("Parsing block #%d at bci [%d,%d), successors: ",
|
tty->print("Parsing block #%d at bci [%d,%d), successors: ",
|
||||||
block()->pre_order(), block()->start(), block()->limit());
|
block()->rpo(), block()->start(), block()->limit());
|
||||||
for (int i = 0; i < nt; i++) {
|
for (int i = 0; i < nt; i++) {
|
||||||
tty->print((( i < ns) ? " %d" : " %d(e)"), b->successor_at(i)->pre_order());
|
tty->print((( i < ns) ? " %d" : " %d(e)"), b->successor_at(i)->rpo());
|
||||||
}
|
}
|
||||||
|
if (b->is_loop_head()) tty->print(" lphd");
|
||||||
tty->print_cr("");
|
tty->print_cr("");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1501,7 +1484,7 @@ void Parse::handle_missing_successor(int target_bci) {
|
|||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
Block* b = block();
|
Block* b = block();
|
||||||
int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
|
int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
|
||||||
tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->pre_order(), trap_bci);
|
tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
|
||||||
#endif
|
#endif
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
@ -1509,7 +1492,7 @@ void Parse::handle_missing_successor(int target_bci) {
|
|||||||
//--------------------------merge_common---------------------------------------
|
//--------------------------merge_common---------------------------------------
|
||||||
void Parse::merge_common(Parse::Block* target, int pnum) {
|
void Parse::merge_common(Parse::Block* target, int pnum) {
|
||||||
if (TraceOptoParse) {
|
if (TraceOptoParse) {
|
||||||
tty->print("Merging state at block #%d bci:%d", target->pre_order(), target->start());
|
tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Zap extra stack slots to top
|
// Zap extra stack slots to top
|
||||||
@ -1534,6 +1517,7 @@ void Parse::merge_common(Parse::Block* target, int pnum) {
|
|||||||
// which must not be allowed into this block's map.)
|
// which must not be allowed into this block's map.)
|
||||||
if (pnum > PhiNode::Input // Known multiple inputs.
|
if (pnum > PhiNode::Input // Known multiple inputs.
|
||||||
|| target->is_handler() // These have unpredictable inputs.
|
|| target->is_handler() // These have unpredictable inputs.
|
||||||
|
|| target->is_loop_head() // Known multiple inputs
|
||||||
|| control()->is_Region()) { // We must hide this guy.
|
|| control()->is_Region()) { // We must hide this guy.
|
||||||
// Add a Region to start the new basic block. Phis will be added
|
// Add a Region to start the new basic block. Phis will be added
|
||||||
// later lazily.
|
// later lazily.
|
||||||
@ -1575,15 +1559,21 @@ void Parse::merge_common(Parse::Block* target, int pnum) {
|
|||||||
|
|
||||||
// Compute where to merge into
|
// Compute where to merge into
|
||||||
// Merge incoming control path
|
// Merge incoming control path
|
||||||
r->set_req(pnum, newin->control());
|
r->init_req(pnum, newin->control());
|
||||||
|
|
||||||
if (pnum == 1) { // Last merge for this Region?
|
if (pnum == 1) { // Last merge for this Region?
|
||||||
_gvn.transform_no_reclaim(r);
|
if (!block()->flow()->is_irreducible_entry()) {
|
||||||
|
Node* result = _gvn.transform_no_reclaim(r);
|
||||||
|
if (r != result && TraceOptoParse) {
|
||||||
|
tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
record_for_igvn(r);
|
record_for_igvn(r);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update all the non-control inputs to map:
|
// Update all the non-control inputs to map:
|
||||||
assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
|
assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
|
||||||
|
bool check_elide_phi = target->is_SEL_backedge(save_block);
|
||||||
for (uint j = 1; j < newin->req(); j++) {
|
for (uint j = 1; j < newin->req(); j++) {
|
||||||
Node* m = map()->in(j); // Current state of target.
|
Node* m = map()->in(j); // Current state of target.
|
||||||
Node* n = newin->in(j); // Incoming change to target state.
|
Node* n = newin->in(j); // Incoming change to target state.
|
||||||
@ -1603,7 +1593,11 @@ void Parse::merge_common(Parse::Block* target, int pnum) {
|
|||||||
merge_memory_edges(n->as_MergeMem(), pnum, nophi);
|
merge_memory_edges(n->as_MergeMem(), pnum, nophi);
|
||||||
continue;
|
continue;
|
||||||
default: // All normal stuff
|
default: // All normal stuff
|
||||||
if (phi == NULL) phi = ensure_phi(j, nophi);
|
if (phi == NULL) {
|
||||||
|
if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
|
||||||
|
phi = ensure_phi(j, nophi);
|
||||||
|
}
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1736,9 +1730,13 @@ void Parse::ensure_phis_everywhere() {
|
|||||||
uint nof_monitors = map()->jvms()->nof_monitors();
|
uint nof_monitors = map()->jvms()->nof_monitors();
|
||||||
|
|
||||||
assert(TypeFunc::Parms == map()->jvms()->locoff(), "parser map should contain only youngest jvms");
|
assert(TypeFunc::Parms == map()->jvms()->locoff(), "parser map should contain only youngest jvms");
|
||||||
|
bool check_elide_phi = block()->is_SEL_head();
|
||||||
for (uint i = TypeFunc::Parms; i < monoff; i++) {
|
for (uint i = TypeFunc::Parms; i < monoff; i++) {
|
||||||
ensure_phi(i);
|
if (!check_elide_phi || !block()->can_elide_SEL_phi(i)) {
|
||||||
|
ensure_phi(i);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Even monitors need Phis, though they are well-structured.
|
// Even monitors need Phis, though they are well-structured.
|
||||||
// This is true for OSR methods, and also for the rare cases where
|
// This is true for OSR methods, and also for the rare cases where
|
||||||
// a monitor object is the subject of a replace_in_map operation.
|
// a monitor object is the subject of a replace_in_map operation.
|
||||||
|
@ -100,16 +100,17 @@ Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
|
|||||||
|
|
||||||
// Do the range check
|
// Do the range check
|
||||||
if (GenerateRangeChecks && need_range_check) {
|
if (GenerateRangeChecks && need_range_check) {
|
||||||
// Range is constant in array-oop, so we can use the original state of mem
|
|
||||||
Node* len = load_array_length(ary);
|
|
||||||
Node* tst;
|
Node* tst;
|
||||||
if (sizetype->_hi <= 0) {
|
if (sizetype->_hi <= 0) {
|
||||||
// If the greatest array bound is negative, we can conclude that we're
|
// The greatest array bound is negative, so we can conclude that we're
|
||||||
// compiling unreachable code, but the unsigned compare trick used below
|
// compiling unreachable code, but the unsigned compare trick used below
|
||||||
// only works with non-negative lengths. Instead, hack "tst" to be zero so
|
// only works with non-negative lengths. Instead, hack "tst" to be zero so
|
||||||
// the uncommon_trap path will always be taken.
|
// the uncommon_trap path will always be taken.
|
||||||
tst = _gvn.intcon(0);
|
tst = _gvn.intcon(0);
|
||||||
} else {
|
} else {
|
||||||
|
// Range is constant in array-oop, so we can use the original state of mem
|
||||||
|
Node* len = load_array_length(ary);
|
||||||
|
|
||||||
// Test length vs index (standard trick using unsigned compare)
|
// Test length vs index (standard trick using unsigned compare)
|
||||||
Node* chk = _gvn.transform( new (C, 3) CmpUNode(idx, len) );
|
Node* chk = _gvn.transform( new (C, 3) CmpUNode(idx, len) );
|
||||||
BoolTest::mask btest = BoolTest::lt;
|
BoolTest::mask btest = BoolTest::lt;
|
||||||
@ -137,9 +138,12 @@ Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
|
|||||||
// Check for always knowing you are throwing a range-check exception
|
// Check for always knowing you are throwing a range-check exception
|
||||||
if (stopped()) return top();
|
if (stopped()) return top();
|
||||||
|
|
||||||
Node* ptr = array_element_address( ary, idx, type, sizetype);
|
Node* ptr = array_element_address(ary, idx, type, sizetype);
|
||||||
|
|
||||||
if (result2 != NULL) *result2 = elemtype;
|
if (result2 != NULL) *result2 = elemtype;
|
||||||
|
|
||||||
|
assert(ptr != top(), "top should go hand-in-hand with stopped");
|
||||||
|
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3157,17 +3157,18 @@ static jint max_array_length(BasicType etype) {
|
|||||||
|
|
||||||
// Narrow the given size type to the index range for the given array base type.
|
// Narrow the given size type to the index range for the given array base type.
|
||||||
// Return NULL if the resulting int type becomes empty.
|
// Return NULL if the resulting int type becomes empty.
|
||||||
const TypeInt* TypeAryPtr::narrow_size_type(const TypeInt* size, BasicType elem) {
|
const TypeInt* TypeAryPtr::narrow_size_type(const TypeInt* size) const {
|
||||||
jint hi = size->_hi;
|
jint hi = size->_hi;
|
||||||
jint lo = size->_lo;
|
jint lo = size->_lo;
|
||||||
jint min_lo = 0;
|
jint min_lo = 0;
|
||||||
jint max_hi = max_array_length(elem);
|
jint max_hi = max_array_length(elem()->basic_type());
|
||||||
//if (index_not_size) --max_hi; // type of a valid array index, FTR
|
//if (index_not_size) --max_hi; // type of a valid array index, FTR
|
||||||
bool chg = false;
|
bool chg = false;
|
||||||
if (lo < min_lo) { lo = min_lo; chg = true; }
|
if (lo < min_lo) { lo = min_lo; chg = true; }
|
||||||
if (hi > max_hi) { hi = max_hi; chg = true; }
|
if (hi > max_hi) { hi = max_hi; chg = true; }
|
||||||
|
// Negative length arrays will produce weird intermediate dead fath-path code
|
||||||
if (lo > hi)
|
if (lo > hi)
|
||||||
return NULL;
|
return TypeInt::ZERO;
|
||||||
if (!chg)
|
if (!chg)
|
||||||
return size;
|
return size;
|
||||||
return TypeInt::make(lo, hi, Type::WidenMin);
|
return TypeInt::make(lo, hi, Type::WidenMin);
|
||||||
@ -3176,9 +3177,7 @@ const TypeInt* TypeAryPtr::narrow_size_type(const TypeInt* size, BasicType elem)
|
|||||||
//-------------------------------cast_to_size----------------------------------
|
//-------------------------------cast_to_size----------------------------------
|
||||||
const TypeAryPtr* TypeAryPtr::cast_to_size(const TypeInt* new_size) const {
|
const TypeAryPtr* TypeAryPtr::cast_to_size(const TypeInt* new_size) const {
|
||||||
assert(new_size != NULL, "");
|
assert(new_size != NULL, "");
|
||||||
new_size = narrow_size_type(new_size, elem()->basic_type());
|
new_size = narrow_size_type(new_size);
|
||||||
if (new_size == NULL) // Negative length arrays will produce weird
|
|
||||||
new_size = TypeInt::ZERO; // intermediate dead fast-path goo
|
|
||||||
if (new_size == size()) return this;
|
if (new_size == size()) return this;
|
||||||
const TypeAry* new_ary = TypeAry::make(elem(), new_size);
|
const TypeAry* new_ary = TypeAry::make(elem(), new_size);
|
||||||
return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id);
|
return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id);
|
||||||
|
@ -840,6 +840,7 @@ public:
|
|||||||
virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const;
|
virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const;
|
||||||
|
|
||||||
virtual const TypeAryPtr* cast_to_size(const TypeInt* size) const;
|
virtual const TypeAryPtr* cast_to_size(const TypeInt* size) const;
|
||||||
|
virtual const TypeInt* narrow_size_type(const TypeInt* size) const;
|
||||||
|
|
||||||
virtual bool empty(void) const; // TRUE if type is vacuous
|
virtual bool empty(void) const; // TRUE if type is vacuous
|
||||||
virtual const TypePtr *add_offset( intptr_t offset ) const;
|
virtual const TypePtr *add_offset( intptr_t offset ) const;
|
||||||
@ -865,7 +866,6 @@ public:
|
|||||||
}
|
}
|
||||||
static const TypeAryPtr *_array_body_type[T_CONFLICT+1];
|
static const TypeAryPtr *_array_body_type[T_CONFLICT+1];
|
||||||
// sharpen the type of an int which is used as an array size
|
// sharpen the type of an int which is used as an array size
|
||||||
static const TypeInt* narrow_size_type(const TypeInt* size, BasicType elem);
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
virtual void dump2( Dict &d, uint depth, outputStream *st ) const; // Specialized per-Type dumping
|
virtual void dump2( Dict &d, uint depth, outputStream *st ) const; // Specialized per-Type dumping
|
||||||
#endif
|
#endif
|
||||||
|
@ -121,7 +121,7 @@ JvmtiEnvBase::JvmtiEnvBase() : _env_event_enable() {
|
|||||||
JvmtiEventController::env_initialize((JvmtiEnv*)this);
|
JvmtiEventController::env_initialize((JvmtiEnv*)this);
|
||||||
|
|
||||||
#ifdef JVMTI_TRACE
|
#ifdef JVMTI_TRACE
|
||||||
_jvmti_external.functions = strlen(TraceJVMTI)? &jvmtiTrace_Interface : &jvmti_Interface;
|
_jvmti_external.functions = TraceJVMTI != NULL ? &jvmtiTrace_Interface : &jvmti_Interface;
|
||||||
#else
|
#else
|
||||||
_jvmti_external.functions = &jvmti_Interface;
|
_jvmti_external.functions = &jvmti_Interface;
|
||||||
#endif
|
#endif
|
||||||
|
@ -73,7 +73,7 @@ void JvmtiTrace::initialize() {
|
|||||||
|
|
||||||
const char *very_end;
|
const char *very_end;
|
||||||
const char *curr;
|
const char *curr;
|
||||||
if (strlen(TraceJVMTI)) {
|
if (TraceJVMTI != NULL) {
|
||||||
curr = TraceJVMTI;
|
curr = TraceJVMTI;
|
||||||
} else {
|
} else {
|
||||||
curr = ""; // hack in fixed tracing here
|
curr = ""; // hack in fixed tracing here
|
||||||
|
@ -365,8 +365,11 @@ bool CommandLineFlags::ccstrAtPut(char* name, size_t len, ccstr* value, FlagValu
|
|||||||
if (result == NULL) return false;
|
if (result == NULL) return false;
|
||||||
if (!result->is_ccstr()) return false;
|
if (!result->is_ccstr()) return false;
|
||||||
ccstr old_value = result->get_ccstr();
|
ccstr old_value = result->get_ccstr();
|
||||||
char* new_value = NEW_C_HEAP_ARRAY(char, strlen(*value)+1);
|
char* new_value = NULL;
|
||||||
strcpy(new_value, *value);
|
if (*value != NULL) {
|
||||||
|
new_value = NEW_C_HEAP_ARRAY(char, strlen(*value)+1);
|
||||||
|
strcpy(new_value, *value);
|
||||||
|
}
|
||||||
result->set_ccstr(new_value);
|
result->set_ccstr(new_value);
|
||||||
if (result->origin == DEFAULT && old_value != NULL) {
|
if (result->origin == DEFAULT && old_value != NULL) {
|
||||||
// Prior value is NOT heap allocated, but was a literal constant.
|
// Prior value is NOT heap allocated, but was a literal constant.
|
||||||
|
@ -707,7 +707,7 @@ class CommandLineFlags {
|
|||||||
diagnostic(bool, PrintAssembly, false, \
|
diagnostic(bool, PrintAssembly, false, \
|
||||||
"Print assembly code (using external disassembler.so)") \
|
"Print assembly code (using external disassembler.so)") \
|
||||||
\
|
\
|
||||||
diagnostic(ccstr, PrintAssemblyOptions, false, \
|
diagnostic(ccstr, PrintAssemblyOptions, NULL, \
|
||||||
"Options string passed to disassembler.so") \
|
"Options string passed to disassembler.so") \
|
||||||
\
|
\
|
||||||
diagnostic(bool, PrintNMethods, false, \
|
diagnostic(bool, PrintNMethods, false, \
|
||||||
@ -848,7 +848,7 @@ class CommandLineFlags {
|
|||||||
"Use LWP-based instead of libthread-based synchronization " \
|
"Use LWP-based instead of libthread-based synchronization " \
|
||||||
"(SPARC only)") \
|
"(SPARC only)") \
|
||||||
\
|
\
|
||||||
product(ccstr, SyncKnobs, "", \
|
product(ccstr, SyncKnobs, NULL, \
|
||||||
"(Unstable) Various monitor synchronization tunables") \
|
"(Unstable) Various monitor synchronization tunables") \
|
||||||
\
|
\
|
||||||
product(intx, EmitSync, 0, \
|
product(intx, EmitSync, 0, \
|
||||||
@ -1032,7 +1032,7 @@ class CommandLineFlags {
|
|||||||
notproduct(bool, TraceJVMCalls, false, \
|
notproduct(bool, TraceJVMCalls, false, \
|
||||||
"Trace JVM calls") \
|
"Trace JVM calls") \
|
||||||
\
|
\
|
||||||
product(ccstr, TraceJVMTI, "", \
|
product(ccstr, TraceJVMTI, NULL, \
|
||||||
"Trace flags for JVMTI functions and events") \
|
"Trace flags for JVMTI functions and events") \
|
||||||
\
|
\
|
||||||
/* This option can change an EMCP method into an obsolete method. */ \
|
/* This option can change an EMCP method into an obsolete method. */ \
|
||||||
@ -1157,10 +1157,6 @@ class CommandLineFlags {
|
|||||||
"In the Parallel Old garbage collector use parallel dense" \
|
"In the Parallel Old garbage collector use parallel dense" \
|
||||||
" prefix update") \
|
" prefix update") \
|
||||||
\
|
\
|
||||||
develop(bool, UseParallelOldGCChunkPointerCalc, true, \
|
|
||||||
"In the Parallel Old garbage collector use chucks to calculate" \
|
|
||||||
" new object locations") \
|
|
||||||
\
|
|
||||||
product(uintx, HeapMaximumCompactionInterval, 20, \
|
product(uintx, HeapMaximumCompactionInterval, 20, \
|
||||||
"How often should we maximally compact the heap (not allowing " \
|
"How often should we maximally compact the heap (not allowing " \
|
||||||
"any dead space)") \
|
"any dead space)") \
|
||||||
@ -1189,21 +1185,14 @@ class CommandLineFlags {
|
|||||||
product(uintx, ParallelCMSThreads, 0, \
|
product(uintx, ParallelCMSThreads, 0, \
|
||||||
"Max number of threads CMS will use for concurrent work") \
|
"Max number of threads CMS will use for concurrent work") \
|
||||||
\
|
\
|
||||||
develop(bool, VerifyParallelOldWithMarkSweep, false, \
|
|
||||||
"Use the MarkSweep code to verify phases of Parallel Old") \
|
|
||||||
\
|
|
||||||
develop(uintx, VerifyParallelOldWithMarkSweepInterval, 1, \
|
|
||||||
"Interval at which the MarkSweep code is used to verify " \
|
|
||||||
"phases of Parallel Old") \
|
|
||||||
\
|
|
||||||
develop(bool, ParallelOldMTUnsafeMarkBitMap, false, \
|
develop(bool, ParallelOldMTUnsafeMarkBitMap, false, \
|
||||||
"Use the Parallel Old MT unsafe in marking the bitmap") \
|
"Use the Parallel Old MT unsafe in marking the bitmap") \
|
||||||
\
|
\
|
||||||
develop(bool, ParallelOldMTUnsafeUpdateLiveData, false, \
|
develop(bool, ParallelOldMTUnsafeUpdateLiveData, false, \
|
||||||
"Use the Parallel Old MT unsafe in update of live size") \
|
"Use the Parallel Old MT unsafe in update of live size") \
|
||||||
\
|
\
|
||||||
develop(bool, TraceChunkTasksQueuing, false, \
|
develop(bool, TraceRegionTasksQueuing, false, \
|
||||||
"Trace the queuing of the chunk tasks") \
|
"Trace the queuing of the region tasks") \
|
||||||
\
|
\
|
||||||
product(uintx, ParallelMarkingThreads, 0, \
|
product(uintx, ParallelMarkingThreads, 0, \
|
||||||
"Number of marking threads concurrent gc will use") \
|
"Number of marking threads concurrent gc will use") \
|
||||||
|
@ -109,72 +109,72 @@ void ParallelTaskTerminator::reset_for_reuse() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ChunkTaskQueueWithOverflow::is_empty() {
|
bool RegionTaskQueueWithOverflow::is_empty() {
|
||||||
return (_chunk_queue.size() == 0) &&
|
return (_region_queue.size() == 0) &&
|
||||||
(_overflow_stack->length() == 0);
|
(_overflow_stack->length() == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ChunkTaskQueueWithOverflow::stealable_is_empty() {
|
bool RegionTaskQueueWithOverflow::stealable_is_empty() {
|
||||||
return _chunk_queue.size() == 0;
|
return _region_queue.size() == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ChunkTaskQueueWithOverflow::overflow_is_empty() {
|
bool RegionTaskQueueWithOverflow::overflow_is_empty() {
|
||||||
return _overflow_stack->length() == 0;
|
return _overflow_stack->length() == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ChunkTaskQueueWithOverflow::initialize() {
|
void RegionTaskQueueWithOverflow::initialize() {
|
||||||
_chunk_queue.initialize();
|
_region_queue.initialize();
|
||||||
assert(_overflow_stack == 0, "Creating memory leak");
|
assert(_overflow_stack == 0, "Creating memory leak");
|
||||||
_overflow_stack =
|
_overflow_stack =
|
||||||
new (ResourceObj::C_HEAP) GrowableArray<ChunkTask>(10, true);
|
new (ResourceObj::C_HEAP) GrowableArray<RegionTask>(10, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ChunkTaskQueueWithOverflow::save(ChunkTask t) {
|
void RegionTaskQueueWithOverflow::save(RegionTask t) {
|
||||||
if (TraceChunkTasksQueuing && Verbose) {
|
if (TraceRegionTasksQueuing && Verbose) {
|
||||||
gclog_or_tty->print_cr("CTQ: save " PTR_FORMAT, t);
|
gclog_or_tty->print_cr("CTQ: save " PTR_FORMAT, t);
|
||||||
}
|
}
|
||||||
if(!_chunk_queue.push(t)) {
|
if(!_region_queue.push(t)) {
|
||||||
_overflow_stack->push(t);
|
_overflow_stack->push(t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note that using this method will retrieve all chunks
|
// Note that using this method will retrieve all regions
|
||||||
// that have been saved but that it will always check
|
// that have been saved but that it will always check
|
||||||
// the overflow stack. It may be more efficient to
|
// the overflow stack. It may be more efficient to
|
||||||
// check the stealable queue and the overflow stack
|
// check the stealable queue and the overflow stack
|
||||||
// separately.
|
// separately.
|
||||||
bool ChunkTaskQueueWithOverflow::retrieve(ChunkTask& chunk_task) {
|
bool RegionTaskQueueWithOverflow::retrieve(RegionTask& region_task) {
|
||||||
bool result = retrieve_from_overflow(chunk_task);
|
bool result = retrieve_from_overflow(region_task);
|
||||||
if (!result) {
|
if (!result) {
|
||||||
result = retrieve_from_stealable_queue(chunk_task);
|
result = retrieve_from_stealable_queue(region_task);
|
||||||
}
|
}
|
||||||
if (TraceChunkTasksQueuing && Verbose && result) {
|
if (TraceRegionTasksQueuing && Verbose && result) {
|
||||||
gclog_or_tty->print_cr(" CTQ: retrieve " PTR_FORMAT, result);
|
gclog_or_tty->print_cr(" CTQ: retrieve " PTR_FORMAT, result);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ChunkTaskQueueWithOverflow::retrieve_from_stealable_queue(
|
bool RegionTaskQueueWithOverflow::retrieve_from_stealable_queue(
|
||||||
ChunkTask& chunk_task) {
|
RegionTask& region_task) {
|
||||||
bool result = _chunk_queue.pop_local(chunk_task);
|
bool result = _region_queue.pop_local(region_task);
|
||||||
if (TraceChunkTasksQueuing && Verbose) {
|
if (TraceRegionTasksQueuing && Verbose) {
|
||||||
gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, chunk_task);
|
gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ChunkTaskQueueWithOverflow::retrieve_from_overflow(
|
bool
|
||||||
ChunkTask& chunk_task) {
|
RegionTaskQueueWithOverflow::retrieve_from_overflow(RegionTask& region_task) {
|
||||||
bool result;
|
bool result;
|
||||||
if (!_overflow_stack->is_empty()) {
|
if (!_overflow_stack->is_empty()) {
|
||||||
chunk_task = _overflow_stack->pop();
|
region_task = _overflow_stack->pop();
|
||||||
result = true;
|
result = true;
|
||||||
} else {
|
} else {
|
||||||
chunk_task = (ChunkTask) NULL;
|
region_task = (RegionTask) NULL;
|
||||||
result = false;
|
result = false;
|
||||||
}
|
}
|
||||||
if (TraceChunkTasksQueuing && Verbose) {
|
if (TraceRegionTasksQueuing && Verbose) {
|
||||||
gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, chunk_task);
|
gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -557,32 +557,32 @@ class StarTask {
|
|||||||
typedef GenericTaskQueue<StarTask> OopStarTaskQueue;
|
typedef GenericTaskQueue<StarTask> OopStarTaskQueue;
|
||||||
typedef GenericTaskQueueSet<StarTask> OopStarTaskQueueSet;
|
typedef GenericTaskQueueSet<StarTask> OopStarTaskQueueSet;
|
||||||
|
|
||||||
typedef size_t ChunkTask; // index for chunk
|
typedef size_t RegionTask; // index for region
|
||||||
typedef GenericTaskQueue<ChunkTask> ChunkTaskQueue;
|
typedef GenericTaskQueue<RegionTask> RegionTaskQueue;
|
||||||
typedef GenericTaskQueueSet<ChunkTask> ChunkTaskQueueSet;
|
typedef GenericTaskQueueSet<RegionTask> RegionTaskQueueSet;
|
||||||
|
|
||||||
class ChunkTaskQueueWithOverflow: public CHeapObj {
|
class RegionTaskQueueWithOverflow: public CHeapObj {
|
||||||
protected:
|
protected:
|
||||||
ChunkTaskQueue _chunk_queue;
|
RegionTaskQueue _region_queue;
|
||||||
GrowableArray<ChunkTask>* _overflow_stack;
|
GrowableArray<RegionTask>* _overflow_stack;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ChunkTaskQueueWithOverflow() : _overflow_stack(NULL) {}
|
RegionTaskQueueWithOverflow() : _overflow_stack(NULL) {}
|
||||||
// Initialize both stealable queue and overflow
|
// Initialize both stealable queue and overflow
|
||||||
void initialize();
|
void initialize();
|
||||||
// Save first to stealable queue and then to overflow
|
// Save first to stealable queue and then to overflow
|
||||||
void save(ChunkTask t);
|
void save(RegionTask t);
|
||||||
// Retrieve first from overflow and then from stealable queue
|
// Retrieve first from overflow and then from stealable queue
|
||||||
bool retrieve(ChunkTask& chunk_index);
|
bool retrieve(RegionTask& region_index);
|
||||||
// Retrieve from stealable queue
|
// Retrieve from stealable queue
|
||||||
bool retrieve_from_stealable_queue(ChunkTask& chunk_index);
|
bool retrieve_from_stealable_queue(RegionTask& region_index);
|
||||||
// Retrieve from overflow
|
// Retrieve from overflow
|
||||||
bool retrieve_from_overflow(ChunkTask& chunk_index);
|
bool retrieve_from_overflow(RegionTask& region_index);
|
||||||
bool is_empty();
|
bool is_empty();
|
||||||
bool stealable_is_empty();
|
bool stealable_is_empty();
|
||||||
bool overflow_is_empty();
|
bool overflow_is_empty();
|
||||||
juint stealable_size() { return _chunk_queue.size(); }
|
juint stealable_size() { return _region_queue.size(); }
|
||||||
ChunkTaskQueue* task_queue() { return &_chunk_queue; }
|
RegionTaskQueue* task_queue() { return &_region_queue; }
|
||||||
};
|
};
|
||||||
|
|
||||||
#define USE_ChunkTaskQueueWithOverflow
|
#define USE_RegionTaskQueueWithOverflow
|
||||||
|
53
hotspot/test/compiler/6711100/Test.java
Normal file
53
hotspot/test/compiler/6711100/Test.java
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2008 Sun Microsystems, Inc. All Rights Reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||||
|
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||||
|
* have any questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @test
|
||||||
|
* @bug 6711100
|
||||||
|
* @summary 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
|
||||||
|
* @run main/othervm -Xcomp -XX:CompileOnly=Test.<init> Test
|
||||||
|
*/
|
||||||
|
|
||||||
|
public class Test {
|
||||||
|
|
||||||
|
static byte b;
|
||||||
|
|
||||||
|
// The server compiler chokes on compiling
|
||||||
|
// this method when f() is not inlined
|
||||||
|
public Test() {
|
||||||
|
b = (new byte[1])[(new byte[f()])[-1]];
|
||||||
|
}
|
||||||
|
|
||||||
|
protected static int f() {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void main(String[] args) {
|
||||||
|
try {
|
||||||
|
Test t = new Test();
|
||||||
|
} catch (ArrayIndexOutOfBoundsException e) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
Loading…
x
Reference in New Issue
Block a user