8059606: Enable per-method usage of CompileThresholdScaling (per-method compilation thresholds)

Changed interpreter and compilation policies to allow using CompileThresholdScaling on a per-method level

Reviewed-by: jrose, kvn
This commit is contained in:
Zoltan Majo 2015-01-21 10:51:35 +01:00
parent 3d814126c2
commit e559c17954
32 changed files with 293 additions and 157 deletions

View File

@ -1374,6 +1374,7 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
} }
void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count, void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
Register method_counters,
Register Rtmp, Register Rtmp,
Label &profile_continue) { Label &profile_continue) {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
@ -1386,9 +1387,8 @@ void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocat
br_notnull_short(ImethodDataPtr, Assembler::pn, done); br_notnull_short(ImethodDataPtr, Assembler::pn, done);
// Test to see if we should create a method data oop // Test to see if we should create a method data oop
AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit); Address profile_limit(method_counters, MethodCounters::interpreter_profile_limit_offset());
sethi(profile_limit, Rtmp); ld(profile_limit, Rtmp);
ld(Rtmp, profile_limit.low10(), Rtmp);
cmp(invocation_count, Rtmp); cmp(invocation_count, Rtmp);
// Use long branches because call_VM() code and following code generated by // Use long branches because call_VM() code and following code generated by
// test_backedge_count_for_osr() is large in debug VM. // test_backedge_count_for_osr() is large in debug VM.
@ -2375,6 +2375,7 @@ void InterpreterMacroAssembler::increment_backedge_counter( Register Rcounters,
#ifndef CC_INTERP #ifndef CC_INTERP
void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count, void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count,
Register method_counters,
Register branch_bcp, Register branch_bcp,
Register Rtmp ) { Register Rtmp ) {
Label did_not_overflow; Label did_not_overflow;
@ -2382,8 +2383,8 @@ void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_c
assert_different_registers(backedge_count, Rtmp, branch_bcp); assert_different_registers(backedge_count, Rtmp, branch_bcp);
assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr"); assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit); Address limit(method_counters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
load_contents(limit, Rtmp); ld(limit, Rtmp);
cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow); cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow);
// When ProfileInterpreter is on, the backedge_count comes from the // When ProfileInterpreter is on, the backedge_count comes from the
@ -2500,17 +2501,13 @@ void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
// Jump if ((*counter_addr += increment) & mask) satisfies the condition. // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
int increment, int mask, int increment, Address mask_addr,
Register scratch1, Register scratch2, Register scratch1, Register scratch2,
Condition cond, Label *where) { Condition cond, Label *where) {
ld(counter_addr, scratch1); ld(counter_addr, scratch1);
add(scratch1, increment, scratch1); add(scratch1, increment, scratch1);
if (is_simm13(mask)) { ld(mask_addr, scratch2);
andcc(scratch1, mask, G0);
} else {
set(mask, scratch2);
andcc(scratch1, scratch2, G0); andcc(scratch1, scratch2, G0);
}
br(cond, false, Assembler::pn, *where); br(cond, false, Assembler::pn, *where);
delayed()->st(scratch1, counter_addr); delayed()->st(scratch1, counter_addr);
} }

View File

@ -267,7 +267,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ); void increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 );
void increment_backedge_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ); void increment_backedge_counter( Register Rcounters, Register Rtmp, Register Rtmp2 );
#ifndef CC_INTERP #ifndef CC_INTERP
void test_backedge_count_for_osr( Register backedge_count, Register branch_bcp, Register Rtmp ); void test_backedge_count_for_osr(Register backedge_count, Register method_counters, Register branch_bcp, Register Rtmp );
#endif /* CC_INTERP */ #endif /* CC_INTERP */
// Object locking // Object locking
@ -280,7 +280,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void set_method_data_pointer_for_bcp(); void set_method_data_pointer_for_bcp();
void test_method_data_pointer(Label& zero_continue); void test_method_data_pointer(Label& zero_continue);
void verify_method_data_pointer(); void verify_method_data_pointer();
void test_invocation_counter_for_mdp(Register invocation_count, Register Rtmp, Label &profile_continue); void test_invocation_counter_for_mdp(Register invocation_count, Register method_counters, Register Rtmp, Label &profile_continue);
void set_mdp_data_at(int constant, Register value); void set_mdp_data_at(int constant, Register value);
void increment_mdp_data_at(Address counter, Register bumped_count, void increment_mdp_data_at(Address counter, Register bumped_count,
@ -291,7 +291,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
Register bumped_count, Register scratch2, Register bumped_count, Register scratch2,
bool decrement = false); bool decrement = false);
void increment_mask_and_jump(Address counter_addr, void increment_mask_and_jump(Address counter_addr,
int increment, int mask, int increment, Address mask_addr,
Register scratch1, Register scratch2, Register scratch1, Register scratch2,
Condition cond, Label *where); Condition cond, Label *where);
void set_mdp_flag_at(int flag_constant, Register scratch); void set_mdp_flag_at(int flag_constant, Register scratch);

View File

@ -282,12 +282,11 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state)
void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
// Note: In tiered we increment either counters in MethodCounters* or in // Note: In tiered we increment either counters in MethodCounters* or in
// MDO depending if we're profiling or not. // MDO depending if we're profiling or not.
const Register Rcounters = G3_scratch; const Register G3_method_counters = G3_scratch;
Label done; Label done;
if (TieredCompilation) { if (TieredCompilation) {
const int increment = InvocationCounter::count_increment; const int increment = InvocationCounter::count_increment;
const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
Label no_mdo; Label no_mdo;
if (ProfileInterpreter) { if (ProfileInterpreter) {
// If no method data exists, go to profile_continue. // If no method data exists, go to profile_continue.
@ -297,6 +296,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
Address mdo_invocation_counter(G4_scratch, Address mdo_invocation_counter(G4_scratch,
in_bytes(MethodData::invocation_counter_offset()) + in_bytes(MethodData::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset())); in_bytes(InvocationCounter::counter_offset()));
Address mask(G4_scratch, in_bytes(MethodData::invoke_mask_offset()));
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
G3_scratch, Lscratch, G3_scratch, Lscratch,
Assembler::zero, overflow); Assembler::zero, overflow);
@ -305,20 +305,21 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
// Increment counter in MethodCounters* // Increment counter in MethodCounters*
__ bind(no_mdo); __ bind(no_mdo);
Address invocation_counter(Rcounters, Address invocation_counter(G3_method_counters,
in_bytes(MethodCounters::invocation_counter_offset()) + in_bytes(MethodCounters::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset())); in_bytes(InvocationCounter::counter_offset()));
__ get_method_counters(Lmethod, Rcounters, done); __ get_method_counters(Lmethod, G3_method_counters, done);
Address mask(G3_method_counters, in_bytes(MethodCounters::invoke_mask_offset()));
__ increment_mask_and_jump(invocation_counter, increment, mask, __ increment_mask_and_jump(invocation_counter, increment, mask,
G4_scratch, Lscratch, G4_scratch, Lscratch,
Assembler::zero, overflow); Assembler::zero, overflow);
__ bind(done); __ bind(done);
} else { } else { // not TieredCompilation
// Update standard invocation counters // Update standard invocation counters
__ get_method_counters(Lmethod, Rcounters, done); __ get_method_counters(Lmethod, G3_method_counters, done);
__ increment_invocation_counter(Rcounters, O0, G4_scratch); __ increment_invocation_counter(G3_method_counters, O0, G4_scratch);
if (ProfileInterpreter) { if (ProfileInterpreter) {
Address interpreter_invocation_counter(Rcounters, Address interpreter_invocation_counter(G3_method_counters,
in_bytes(MethodCounters::interpreter_invocation_counter_offset())); in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
__ ld(interpreter_invocation_counter, G4_scratch); __ ld(interpreter_invocation_counter, G4_scratch);
__ inc(G4_scratch); __ inc(G4_scratch);
@ -327,16 +328,16 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
if (ProfileInterpreter && profile_method != NULL) { if (ProfileInterpreter && profile_method != NULL) {
// Test to see if we should create a method data oop // Test to see if we should create a method data oop
AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit); Address profile_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
__ load_contents(profile_limit, G3_scratch); __ ld(profile_limit, G1_scratch);
__ cmp_and_br_short(O0, G3_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue); __ cmp_and_br_short(O0, G1_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue);
// if no method data exists, go to profile_method // if no method data exists, go to profile_method
__ test_method_data_pointer(*profile_method); __ test_method_data_pointer(*profile_method);
} }
AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit); Address invocation_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_invocation_limit_offset()));
__ load_contents(invocation_limit, G3_scratch); __ ld(invocation_limit, G3_scratch);
__ cmp(O0, G3_scratch); __ cmp(O0, G3_scratch);
__ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance
__ delayed()->nop(); __ delayed()->nop();

View File

@ -1599,13 +1599,12 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// Bump bytecode pointer by displacement (take the branch) // Bump bytecode pointer by displacement (take the branch)
__ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
const Register Rcounters = G3_scratch; const Register G3_method_counters = G3_scratch;
__ get_method_counters(Lmethod, Rcounters, Lforward); __ get_method_counters(Lmethod, G3_method_counters, Lforward);
if (TieredCompilation) { if (TieredCompilation) {
Label Lno_mdo, Loverflow; Label Lno_mdo, Loverflow;
int increment = InvocationCounter::count_increment; int increment = InvocationCounter::count_increment;
int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
if (ProfileInterpreter) { if (ProfileInterpreter) {
// If no method data exists, go to profile_continue. // If no method data exists, go to profile_continue.
__ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch); __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
@ -1614,6 +1613,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// Increment backedge counter in the MDO // Increment backedge counter in the MDO
Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) + Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) +
in_bytes(InvocationCounter::counter_offset())); in_bytes(InvocationCounter::counter_offset()));
Address mask(G4_scratch, in_bytes(MethodData::backedge_mask_offset()));
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0, __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0,
Assembler::notZero, &Lforward); Assembler::notZero, &Lforward);
__ ba_short(Loverflow); __ ba_short(Loverflow);
@ -1621,9 +1621,10 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// If there's no MDO, increment counter in MethodCounters* // If there's no MDO, increment counter in MethodCounters*
__ bind(Lno_mdo); __ bind(Lno_mdo);
Address backedge_counter(Rcounters, Address backedge_counter(G3_method_counters,
in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(MethodCounters::backedge_counter_offset()) +
in_bytes(InvocationCounter::counter_offset())); in_bytes(InvocationCounter::counter_offset()));
Address mask(G3_method_counters, in_bytes(MethodCounters::backedge_mask_offset()));
__ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0, __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0,
Assembler::notZero, &Lforward); Assembler::notZero, &Lforward);
__ bind(Loverflow); __ bind(Loverflow);
@ -1663,18 +1664,19 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ jmp(O2, G0); __ jmp(O2, G0);
__ delayed()->nop(); __ delayed()->nop();
} else { } else { // not TieredCompilation
// Update Backedge branch separately from invocations // Update Backedge branch separately from invocations
const Register G4_invoke_ctr = G4; const Register G4_invoke_ctr = G4;
__ increment_backedge_counter(Rcounters, G4_invoke_ctr, G1_scratch); __ increment_backedge_counter(G3_method_counters, G4_invoke_ctr, G1_scratch);
if (ProfileInterpreter) { if (ProfileInterpreter) {
__ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward); __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_method_counters, G1_scratch, Lforward);
if (UseOnStackReplacement) { if (UseOnStackReplacement) {
__ test_backedge_count_for_osr(O2_bumped_count, l_cur_bcp, G3_scratch);
__ test_backedge_count_for_osr(O2_bumped_count, G3_method_counters, l_cur_bcp, G1_scratch);
} }
} else { } else {
if (UseOnStackReplacement) { if (UseOnStackReplacement) {
__ test_backedge_count_for_osr(G4_invoke_ctr, l_cur_bcp, G3_scratch); __ test_backedge_count_for_osr(G4_invoke_ctr, G3_method_counters, l_cur_bcp, G1_scratch);
} }
} }
} }

View File

@ -1360,7 +1360,7 @@ void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
// Jump if ((*counter_addr += increment) & mask) satisfies the condition. // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
int increment, int mask, int increment, Address mask,
Register scratch, bool preloaded, Register scratch, bool preloaded,
Condition cond, Label* where) { Condition cond, Label* where) {
if (!preloaded) { if (!preloaded) {

View File

@ -182,7 +182,7 @@
void increment_mdp_data_at(Register mdp_in, Register reg, int constant, void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
bool decrement = false); bool decrement = false);
void increment_mask_and_jump(Address counter_addr, void increment_mask_and_jump(Address counter_addr,
int increment, int mask, int increment, Address mask,
Register scratch, bool preloaded, Register scratch, bool preloaded,
Condition cond, Label* where); Condition cond, Label* where);
void set_mdp_flag_at(Register mdp_in, int flag_constant); void set_mdp_flag_at(Register mdp_in, int flag_constant);

View File

@ -1426,7 +1426,7 @@ void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
// Jump if ((*counter_addr += increment) & mask) satisfies the condition. // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
int increment, int mask, int increment, Address mask,
Register scratch, bool preloaded, Register scratch, bool preloaded,
Condition cond, Label* where) { Condition cond, Label* where) {
if (!preloaded) { if (!preloaded) {

View File

@ -191,7 +191,7 @@
void increment_mdp_data_at(Register mdp_in, Register reg, int constant, void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
bool decrement = false); bool decrement = false);
void increment_mask_and_jump(Address counter_addr, void increment_mask_and_jump(Address counter_addr,
int increment, int mask, int increment, Address mask,
Register scratch, bool preloaded, Register scratch, bool preloaded,
Condition cond, Label* where); Condition cond, Label* where);
void set_mdp_flag_at(Register mdp_in, int flag_constant); void set_mdp_flag_at(Register mdp_in, int flag_constant);

View File

@ -346,7 +346,6 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
// depending if we're profiling or not. // depending if we're profiling or not.
if (TieredCompilation) { if (TieredCompilation) {
int increment = InvocationCounter::count_increment; int increment = InvocationCounter::count_increment;
int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
Label no_mdo; Label no_mdo;
if (ProfileInterpreter) { if (ProfileInterpreter) {
// Are we profiling? // Are we profiling?
@ -356,6 +355,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
// Increment counter in the MDO // Increment counter in the MDO
const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset())); in_bytes(InvocationCounter::counter_offset()));
const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
__ jmp(done); __ jmp(done);
} }
@ -366,10 +366,11 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
InvocationCounter::counter_offset()); InvocationCounter::counter_offset());
__ get_method_counters(rbx, rax, done); __ get_method_counters(rbx, rax, done);
const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
__ increment_mask_and_jump(invocation_counter, increment, mask, __ increment_mask_and_jump(invocation_counter, increment, mask,
rcx, false, Assembler::zero, overflow); rcx, false, Assembler::zero, overflow);
__ bind(done); __ bind(done);
} else { } else { // not TieredCompilation
const Address backedge_counter(rax, const Address backedge_counter(rax,
MethodCounters::backedge_counter_offset() + MethodCounters::backedge_counter_offset() +
InvocationCounter::counter_offset()); InvocationCounter::counter_offset());
@ -400,16 +401,16 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
if (ProfileInterpreter && profile_method != NULL) { if (ProfileInterpreter && profile_method != NULL) {
// Test to see if we should create a method data oop // Test to see if we should create a method data oop
__ cmp32(rcx, __ movptr(rax, Address(rbx, Method::method_counters_offset()));
ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit)); __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
__ jcc(Assembler::less, *profile_method_continue); __ jcc(Assembler::less, *profile_method_continue);
// if no method data exists, go to profile_method // if no method data exists, go to profile_method
__ test_method_data_pointer(rax, *profile_method); __ test_method_data_pointer(rax, *profile_method);
} }
__ cmp32(rcx, __ movptr(rax, Address(rbx, Method::method_counters_offset()));
ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
__ jcc(Assembler::aboveEqual, *overflow); __ jcc(Assembler::aboveEqual, *overflow);
__ bind(done); __ bind(done);
} }

View File

@ -299,7 +299,6 @@ void InterpreterGenerator::generate_counter_incr(
// Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
if (TieredCompilation) { if (TieredCompilation) {
int increment = InvocationCounter::count_increment; int increment = InvocationCounter::count_increment;
int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
Label no_mdo; Label no_mdo;
if (ProfileInterpreter) { if (ProfileInterpreter) {
// Are we profiling? // Are we profiling?
@ -309,6 +308,7 @@ void InterpreterGenerator::generate_counter_incr(
// Increment counter in the MDO // Increment counter in the MDO
const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset())); in_bytes(InvocationCounter::counter_offset()));
const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
__ jmp(done); __ jmp(done);
} }
@ -318,10 +318,11 @@ void InterpreterGenerator::generate_counter_incr(
MethodCounters::invocation_counter_offset() + MethodCounters::invocation_counter_offset() +
InvocationCounter::counter_offset()); InvocationCounter::counter_offset());
__ get_method_counters(rbx, rax, done); __ get_method_counters(rbx, rax, done);
const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
__ increment_mask_and_jump(invocation_counter, increment, mask, rcx, __ increment_mask_and_jump(invocation_counter, increment, mask, rcx,
false, Assembler::zero, overflow); false, Assembler::zero, overflow);
__ bind(done); __ bind(done);
} else { } else { // not TieredCompilation
const Address backedge_counter(rax, const Address backedge_counter(rax,
MethodCounters::backedge_counter_offset() + MethodCounters::backedge_counter_offset() +
InvocationCounter::counter_offset()); InvocationCounter::counter_offset());
@ -350,14 +351,16 @@ void InterpreterGenerator::generate_counter_incr(
if (ProfileInterpreter && profile_method != NULL) { if (ProfileInterpreter && profile_method != NULL) {
// Test to see if we should create a method data oop // Test to see if we should create a method data oop
__ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit)); __ movptr(rax, Address(rbx, Method::method_counters_offset()));
__ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
__ jcc(Assembler::less, *profile_method_continue); __ jcc(Assembler::less, *profile_method_continue);
// if no method data exists, go to profile_method // if no method data exists, go to profile_method
__ test_method_data_pointer(rax, *profile_method); __ test_method_data_pointer(rax, *profile_method);
} }
__ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); __ movptr(rax, Address(rbx, Method::method_counters_offset()));
__ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
__ jcc(Assembler::aboveEqual, *overflow); __ jcc(Assembler::aboveEqual, *overflow);
__ bind(done); __ bind(done);
} }

View File

@ -1621,7 +1621,6 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
if (TieredCompilation) { if (TieredCompilation) {
Label no_mdo; Label no_mdo;
int increment = InvocationCounter::count_increment; int increment = InvocationCounter::count_increment;
int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
if (ProfileInterpreter) { if (ProfileInterpreter) {
// Are we profiling? // Are we profiling?
__ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset()))); __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
@ -1630,6 +1629,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// Increment the MDO backedge counter // Increment the MDO backedge counter
const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) + const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
in_bytes(InvocationCounter::counter_offset())); in_bytes(InvocationCounter::counter_offset()));
const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask, __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
rax, false, Assembler::zero, &backedge_counter_overflow); rax, false, Assembler::zero, &backedge_counter_overflow);
__ jmp(dispatch); __ jmp(dispatch);
@ -1637,9 +1637,10 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ bind(no_mdo); __ bind(no_mdo);
// Increment backedge counter in MethodCounters* // Increment backedge counter in MethodCounters*
__ movptr(rcx, Address(rcx, Method::method_counters_offset())); __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
__ increment_mask_and_jump(Address(rcx, be_offset), increment, mask, __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
rax, false, Assembler::zero, &backedge_counter_overflow); rax, false, Assembler::zero, &backedge_counter_overflow);
} else { } else { // not TieredCompilation
// increment counter // increment counter
__ movptr(rcx, Address(rcx, Method::method_counters_offset())); __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
__ movl(rax, Address(rcx, be_offset)); // load backedge counter __ movl(rax, Address(rcx, be_offset)); // load backedge counter
@ -1653,8 +1654,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
if (ProfileInterpreter) { if (ProfileInterpreter) {
// Test to see if we should create a method data oop // Test to see if we should create a method data oop
__ cmp32(rax, __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
__ jcc(Assembler::less, dispatch); __ jcc(Assembler::less, dispatch);
// if no method data exists, go to profile method // if no method data exists, go to profile method
@ -1662,8 +1662,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
if (UseOnStackReplacement) { if (UseOnStackReplacement) {
// check for overflow against rbx, which is the MDO taken count // check for overflow against rbx, which is the MDO taken count
__ cmp32(rbx, __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
__ jcc(Assembler::below, dispatch); __ jcc(Assembler::below, dispatch);
// When ProfileInterpreter is on, the backedge_count comes from the // When ProfileInterpreter is on, the backedge_count comes from the
@ -1678,8 +1677,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
} else { } else {
if (UseOnStackReplacement) { if (UseOnStackReplacement) {
// check for overflow against rax, which is the sum of the counters // check for overflow against rax, which is the sum of the counters
__ cmp32(rax, __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
__ jcc(Assembler::aboveEqual, backedge_counter_overflow); __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
} }

View File

@ -1642,7 +1642,6 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
if (TieredCompilation) { if (TieredCompilation) {
Label no_mdo; Label no_mdo;
int increment = InvocationCounter::count_increment; int increment = InvocationCounter::count_increment;
int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
if (ProfileInterpreter) { if (ProfileInterpreter) {
// Are we profiling? // Are we profiling?
__ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset()))); __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
@ -1651,6 +1650,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// Increment the MDO backedge counter // Increment the MDO backedge counter
const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) + const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
in_bytes(InvocationCounter::counter_offset())); in_bytes(InvocationCounter::counter_offset()));
const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask, __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
rax, false, Assembler::zero, &backedge_counter_overflow); rax, false, Assembler::zero, &backedge_counter_overflow);
__ jmp(dispatch); __ jmp(dispatch);
@ -1658,9 +1658,10 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ bind(no_mdo); __ bind(no_mdo);
// Increment backedge counter in MethodCounters* // Increment backedge counter in MethodCounters*
__ movptr(rcx, Address(rcx, Method::method_counters_offset())); __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
__ increment_mask_and_jump(Address(rcx, be_offset), increment, mask, __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
rax, false, Assembler::zero, &backedge_counter_overflow); rax, false, Assembler::zero, &backedge_counter_overflow);
} else { } else { // not TieredCompilation
// increment counter // increment counter
__ movptr(rcx, Address(rcx, Method::method_counters_offset())); __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
__ movl(rax, Address(rcx, be_offset)); // load backedge counter __ movl(rax, Address(rcx, be_offset)); // load backedge counter
@ -1674,8 +1675,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
if (ProfileInterpreter) { if (ProfileInterpreter) {
// Test to see if we should create a method data oop // Test to see if we should create a method data oop
__ cmp32(rax, __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
__ jcc(Assembler::less, dispatch); __ jcc(Assembler::less, dispatch);
// if no method data exists, go to profile method // if no method data exists, go to profile method
@ -1683,8 +1683,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
if (UseOnStackReplacement) { if (UseOnStackReplacement) {
// check for overflow against ebx which is the MDO taken count // check for overflow against ebx which is the MDO taken count
__ cmp32(rbx, __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
__ jcc(Assembler::below, dispatch); __ jcc(Assembler::below, dispatch);
// When ProfileInterpreter is on, the backedge_count comes // When ProfileInterpreter is on, the backedge_count comes
@ -1702,8 +1701,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
if (UseOnStackReplacement) { if (UseOnStackReplacement) {
// check for overflow against eax, which is the sum of the // check for overflow against eax, which is the sum of the
// counters // counters
__ cmp32(rax, __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
__ jcc(Assembler::aboveEqual, backedge_counter_overflow); __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
} }

View File

@ -32,6 +32,7 @@
#include "ci/ciArrayKlass.hpp" #include "ci/ciArrayKlass.hpp"
#include "ci/ciInstance.hpp" #include "ci/ciInstance.hpp"
#include "ci/ciObjArray.hpp" #include "ci/ciObjArray.hpp"
#include "runtime/arguments.hpp"
#include "runtime/sharedRuntime.hpp" #include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp" #include "runtime/stubRoutines.hpp"
#include "runtime/vm_version.hpp" #include "runtime/vm_version.hpp"
@ -3351,7 +3352,12 @@ void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
if (!x->inlinee()->is_accessor()) { if (!x->inlinee()->is_accessor()) {
CodeEmitInfo* info = state_for(x, x->state(), true); CodeEmitInfo* info = state_for(x, x->state(), true);
// Notify the runtime very infrequently only to take care of counter overflows // Notify the runtime very infrequently only to take care of counter overflows
increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true); int freq_log = Tier23InlineeNotifyFreqLog;
double scale;
if (_method->has_option_value("CompileThresholdScaling", scale)) {
freq_log = Arguments::scaled_freq_log(freq_log, scale);
}
increment_event_counter_impl(info, x->inlinee(), right_n_bits(freq_log), InvocationEntryBci, false, true);
} }
} }
@ -3366,7 +3372,11 @@ void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool bac
ShouldNotReachHere(); ShouldNotReachHere();
} }
// Increment the appropriate invocation/backedge counter and notify the runtime. // Increment the appropriate invocation/backedge counter and notify the runtime.
increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true); double scale;
if (_method->has_option_value("CompileThresholdScaling", scale)) {
freq_log = Arguments::scaled_freq_log(freq_log, scale);
}
increment_event_counter_impl(info, info->scope()->method(), right_n_bits(freq_log), bci, backedge, true);
} }
void LIRGenerator::decrement_age(CodeEmitInfo* info) { void LIRGenerator::decrement_age(CodeEmitInfo* info) {

View File

@ -1470,7 +1470,9 @@ bool CompileBroker::compilation_is_prohibited(methodHandle method, int osr_bci,
// The method may be explicitly excluded by the user. // The method may be explicitly excluded by the user.
bool quietly; bool quietly;
if (CompilerOracle::should_exclude(method, quietly)) { double scale;
if (CompilerOracle::should_exclude(method, quietly)
|| (CompilerOracle::has_option_value(method, "CompileThresholdScaling", scale) && scale == 0)) {
if (!quietly) { if (!quietly) {
// This does not happen quietly... // This does not happen quietly...
ResourceMark rm; ResourceMark rm;

View File

@ -53,7 +53,7 @@
/* /*
* USELABELS - If using GCC, then use labels for the opcode dispatching * USELABELS - If using GCC, then use labels for the opcode dispatching
* rather -then a switch statement. This improves performance because it * rather -then a switch statement. This improves performance because it
* gives us the oportunity to have the instructions that calculate the * gives us the opportunity to have the instructions that calculate the
* next opcode to jump to be intermixed with the rest of the instructions * next opcode to jump to be intermixed with the rest of the instructions
* that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro). * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro).
*/ */

View File

@ -36,7 +36,7 @@
// Implementation notes: For space reasons, state & counter are both encoded in one word, // Implementation notes: For space reasons, state & counter are both encoded in one word,
// The state is encoded using some of the least significant bits, the counter is using the // The state is encoded using some of the least significant bits, the counter is using the
// more significant bits. The counter is incremented before a method is activated and an // more significant bits. The counter is incremented before a method is activated and an
// action is triggered when when count() > limit(). // action is triggered when count() > limit().
class InvocationCounter VALUE_OBJ_CLASS_SPEC { class InvocationCounter VALUE_OBJ_CLASS_SPEC {
friend class VMStructs; friend class VMStructs;
@ -48,7 +48,6 @@ class InvocationCounter VALUE_OBJ_CLASS_SPEC {
number_of_state_bits = 2, number_of_state_bits = 2,
number_of_carry_bits = 1, number_of_carry_bits = 1,
number_of_noncount_bits = number_of_state_bits + number_of_carry_bits, number_of_noncount_bits = number_of_state_bits + number_of_carry_bits,
number_of_count_bits = BitsPerInt - number_of_noncount_bits,
state_limit = nth_bit(number_of_state_bits), state_limit = nth_bit(number_of_state_bits),
count_grain = nth_bit(number_of_state_bits + number_of_carry_bits), count_grain = nth_bit(number_of_state_bits + number_of_carry_bits),
carry_mask = right_n_bits(number_of_carry_bits) << number_of_state_bits, carry_mask = right_n_bits(number_of_carry_bits) << number_of_state_bits,
@ -68,6 +67,7 @@ class InvocationCounter VALUE_OBJ_CLASS_SPEC {
count_increment = count_grain, // use this value to increment the 32bit _counter word count_increment = count_grain, // use this value to increment the 32bit _counter word
count_mask_value = count_mask, // use this value to mask the backedge counter count_mask_value = count_mask, // use this value to mask the backedge counter
count_shift = number_of_noncount_bits, count_shift = number_of_noncount_bits,
number_of_count_bits = BitsPerInt - number_of_noncount_bits,
count_limit = nth_bit(number_of_count_bits - 1) count_limit = nth_bit(number_of_count_bits - 1)
}; };

View File

@ -412,15 +412,14 @@ MethodCounters* Method::build_method_counters(Method* m, TRAPS) {
} }
methodHandle mh(m); methodHandle mh(m);
ClassLoaderData* loader_data = mh->method_holder()->class_loader_data(); MethodCounters* counters = MethodCounters::allocate(mh, THREAD);
MethodCounters* counters = MethodCounters::allocate(loader_data, THREAD);
if (HAS_PENDING_EXCEPTION) { if (HAS_PENDING_EXCEPTION) {
CompileBroker::log_metaspace_failure(); CompileBroker::log_metaspace_failure();
ClassLoaderDataGraph::set_metaspace_oom(true); ClassLoaderDataGraph::set_metaspace_oom(true);
return NULL; // return the exception (which is cleared) return NULL; // return the exception (which is cleared)
} }
if (!mh->init_method_counters(counters)) { if (!mh->init_method_counters(counters)) {
MetadataFactory::free_metadata(loader_data, counters); MetadataFactory::free_metadata(mh->method_holder()->class_loader_data(), counters);
} }
return mh->method_counters(); return mh->method_counters();
} }

View File

@ -23,10 +23,11 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "oops/methodCounters.hpp" #include "oops/methodCounters.hpp"
#include "runtime/thread.inline.hpp" #include "runtime/handles.inline.hpp"
MethodCounters* MethodCounters::allocate(ClassLoaderData* loader_data, TRAPS) { MethodCounters* MethodCounters::allocate(methodHandle mh, TRAPS) {
return new(loader_data, size(), false, MetaspaceObj::MethodCountersType, THREAD) MethodCounters(); ClassLoaderData* loader_data = mh->method_holder()->class_loader_data();
return new(loader_data, size(), false, MetaspaceObj::MethodCountersType, THREAD) MethodCounters(mh);
} }
void MethodCounters::clear_counters() { void MethodCounters::clear_counters() {

View File

@ -26,7 +26,9 @@
#define SHARE_VM_OOPS_METHODCOUNTERS_HPP #define SHARE_VM_OOPS_METHODCOUNTERS_HPP
#include "oops/metadata.hpp" #include "oops/metadata.hpp"
#include "compiler/compilerOracle.hpp"
#include "interpreter/invocationCounter.hpp" #include "interpreter/invocationCounter.hpp"
#include "runtime/arguments.hpp"
class MethodCounters: public MetaspaceObj { class MethodCounters: public MetaspaceObj {
friend class VMStructs; friend class VMStructs;
@ -45,7 +47,11 @@ class MethodCounters: public MetaspaceObj {
// 3. (INT_MIN..0] - method is hot and will deopt and get // 3. (INT_MIN..0] - method is hot and will deopt and get
// recompiled without the counters // recompiled without the counters
int _nmethod_age; int _nmethod_age;
int _interpreter_invocation_limit; // per-method InterpreterInvocationLimit
int _interpreter_backward_branch_limit; // per-method InterpreterBackwardBranchLimit
int _interpreter_profile_limit; // per-method InterpreterProfileLimit
int _invoke_mask; // per-method Tier0InvokeNotifyFreqLog
int _backedge_mask; // per-method Tier0BackedgeNotifyFreqLog
#ifdef TIERED #ifdef TIERED
float _rate; // Events (invocation and backedge counter increments) per millisecond float _rate; // Events (invocation and backedge counter increments) per millisecond
jlong _prev_time; // Previous time the rate was acquired jlong _prev_time; // Previous time the rate was acquired
@ -53,7 +59,7 @@ class MethodCounters: public MetaspaceObj {
u1 _highest_osr_comp_level; // Same for OSR level u1 _highest_osr_comp_level; // Same for OSR level
#endif #endif
MethodCounters() : _interpreter_invocation_count(0), MethodCounters(methodHandle mh) : _interpreter_invocation_count(0),
_interpreter_throwout_count(0), _interpreter_throwout_count(0),
_number_of_breakpoints(0), _number_of_breakpoints(0),
_nmethod_age(INT_MAX) _nmethod_age(INT_MAX)
@ -70,10 +76,28 @@ class MethodCounters: public MetaspaceObj {
if (StressCodeAging) { if (StressCodeAging) {
set_nmethod_age(HotMethodDetectionLimit); set_nmethod_age(HotMethodDetectionLimit);
} }
// Set per-method thresholds.
double scale = 1.0;
CompilerOracle::has_option_value(mh, "CompileThresholdScaling", scale);
int compile_threshold = Arguments::scaled_compile_threshold(CompileThreshold, scale);
_interpreter_invocation_limit = compile_threshold << InvocationCounter::count_shift;
if (ProfileInterpreter) {
// If interpreter profiling is enabled, the backward branch limit
// is compared against the method data counter rather than an invocation
// counter, therefore no shifting of bits is required.
_interpreter_backward_branch_limit = (compile_threshold * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100;
} else {
_interpreter_backward_branch_limit = ((compile_threshold * OnStackReplacePercentage) / 100) << InvocationCounter::count_shift;
}
_interpreter_profile_limit = ((compile_threshold * InterpreterProfilePercentage) / 100) << InvocationCounter::count_shift;
_invoke_mask = right_n_bits(Arguments::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
_backedge_mask = right_n_bits(Arguments::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
} }
public: public:
static MethodCounters* allocate(ClassLoaderData* loader_data, TRAPS); static MethodCounters* allocate(methodHandle mh, TRAPS);
void deallocate_contents(ClassLoaderData* loader_data) {} void deallocate_contents(ClassLoaderData* loader_data) {}
DEBUG_ONLY(bool on_stack() { return false; }) // for template DEBUG_ONLY(bool on_stack() { return false; }) // for template
@ -161,5 +185,24 @@ class MethodCounters: public MetaspaceObj {
return offset_of(MethodCounters, _interpreter_invocation_count); return offset_of(MethodCounters, _interpreter_invocation_count);
} }
static ByteSize interpreter_invocation_limit_offset() {
return byte_offset_of(MethodCounters, _interpreter_invocation_limit);
}
static ByteSize interpreter_backward_branch_limit_offset() {
return byte_offset_of(MethodCounters, _interpreter_backward_branch_limit);
}
static ByteSize interpreter_profile_limit_offset() {
return byte_offset_of(MethodCounters, _interpreter_profile_limit);
}
static ByteSize invoke_mask_offset() {
return byte_offset_of(MethodCounters, _invoke_mask);
}
static ByteSize backedge_mask_offset() {
return byte_offset_of(MethodCounters, _backedge_mask);
}
}; };
#endif //SHARE_VM_OOPS_METHODCOUNTERS_HPP #endif //SHARE_VM_OOPS_METHODCOUNTERS_HPP

View File

@ -31,6 +31,7 @@
#include "memory/heapInspection.hpp" #include "memory/heapInspection.hpp"
#include "oops/methodData.hpp" #include "oops/methodData.hpp"
#include "prims/jvmtiRedefineClasses.hpp" #include "prims/jvmtiRedefineClasses.hpp"
#include "runtime/arguments.hpp"
#include "runtime/compilationPolicy.hpp" #include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp" #include "runtime/deoptimization.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
@ -1131,6 +1132,13 @@ void MethodData::init() {
_backedge_counter.init(); _backedge_counter.init();
_invocation_counter_start = 0; _invocation_counter_start = 0;
_backedge_counter_start = 0; _backedge_counter_start = 0;
// Set per-method invoke- and backedge mask.
double scale = 1.0;
CompilerOracle::has_option_value(_method, "CompileThresholdScaling", scale);
_invoke_mask = right_n_bits(Arguments::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
_backedge_mask = right_n_bits(Arguments::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
_tenure_traps = 0; _tenure_traps = 0;
_num_loops = 0; _num_loops = 0;
_num_blocks = 0; _num_blocks = 0;

View File

@ -2088,6 +2088,8 @@ private:
int _invocation_counter_start; int _invocation_counter_start;
int _backedge_counter_start; int _backedge_counter_start;
uint _tenure_traps; uint _tenure_traps;
int _invoke_mask; // per-method Tier0InvokeNotifyFreqLog
int _backedge_mask; // per-method Tier0BackedgeNotifyFreqLog
#if INCLUDE_RTM_OPT #if INCLUDE_RTM_OPT
// State of RTM code generation during compilation of the method // State of RTM code generation during compilation of the method
@ -2447,10 +2449,19 @@ public:
static ByteSize invocation_counter_offset() { static ByteSize invocation_counter_offset() {
return byte_offset_of(MethodData, _invocation_counter); return byte_offset_of(MethodData, _invocation_counter);
} }
static ByteSize backedge_counter_offset() { static ByteSize backedge_counter_offset() {
return byte_offset_of(MethodData, _backedge_counter); return byte_offset_of(MethodData, _backedge_counter);
} }
static ByteSize invoke_mask_offset() {
return byte_offset_of(MethodData, _invoke_mask);
}
static ByteSize backedge_mask_offset() {
return byte_offset_of(MethodData, _backedge_mask);
}
static ByteSize parameters_type_data_di_offset() { static ByteSize parameters_type_data_di_offset() {
return byte_offset_of(MethodData, _parameters_type_data_di); return byte_offset_of(MethodData, _parameters_type_data_di);
} }

View File

@ -155,7 +155,7 @@ bool AdvancedThresholdPolicy::is_method_profiled(Method* method) {
if (mdo != NULL) { if (mdo != NULL) {
int i = mdo->invocation_count_delta(); int i = mdo->invocation_count_delta();
int b = mdo->backedge_count_delta(); int b = mdo->backedge_count_delta();
return call_predicate_helper<CompLevel_full_profile>(i, b, 1); return call_predicate_helper<CompLevel_full_profile>(i, b, 1, method);
} }
return false; return false;
} }
@ -229,32 +229,32 @@ double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k)
// Tier?LoadFeedback is basically a coefficient that determines of // Tier?LoadFeedback is basically a coefficient that determines of
// how many methods per compiler thread can be in the queue before // how many methods per compiler thread can be in the queue before
// the threshold values double. // the threshold values double.
bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) { bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) {
switch(cur_level) { switch(cur_level) {
case CompLevel_none: case CompLevel_none:
case CompLevel_limited_profile: { case CompLevel_limited_profile: {
double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
return loop_predicate_helper<CompLevel_none>(i, b, k); return loop_predicate_helper<CompLevel_none>(i, b, k, method);
} }
case CompLevel_full_profile: { case CompLevel_full_profile: {
double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback); double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
return loop_predicate_helper<CompLevel_full_profile>(i, b, k); return loop_predicate_helper<CompLevel_full_profile>(i, b, k, method);
} }
default: default:
return true; return true;
} }
} }
bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) { bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) {
switch(cur_level) { switch(cur_level) {
case CompLevel_none: case CompLevel_none:
case CompLevel_limited_profile: { case CompLevel_limited_profile: {
double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
return call_predicate_helper<CompLevel_none>(i, b, k); return call_predicate_helper<CompLevel_none>(i, b, k, method);
} }
case CompLevel_full_profile: { case CompLevel_full_profile: {
double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback); double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
return call_predicate_helper<CompLevel_full_profile>(i, b, k); return call_predicate_helper<CompLevel_full_profile>(i, b, k, method);
} }
default: default:
return true; return true;
@ -271,7 +271,7 @@ bool AdvancedThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_le
int i = method->invocation_count(); int i = method->invocation_count();
int b = method->backedge_count(); int b = method->backedge_count();
double k = Tier0ProfilingStartPercentage / 100.0; double k = Tier0ProfilingStartPercentage / 100.0;
return call_predicate_helper<CompLevel_none>(i, b, k) || loop_predicate_helper<CompLevel_none>(i, b, k); return call_predicate_helper<CompLevel_none>(i, b, k, method) || loop_predicate_helper<CompLevel_none>(i, b, k, method);
} }
return false; return false;
} }
@ -348,7 +348,7 @@ CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel
// If we were at full profile level, would we switch to full opt? // If we were at full profile level, would we switch to full opt?
if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) { if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
next_level = CompLevel_full_optimization; next_level = CompLevel_full_optimization;
} else if ((this->*p)(i, b, cur_level)) { } else if ((this->*p)(i, b, cur_level, method)) {
// C1-generated fully profiled code is about 30% slower than the limited profile // C1-generated fully profiled code is about 30% slower than the limited profile
// code that has only invocation and backedge counters. The observation is that // code that has only invocation and backedge counters. The observation is that
// if C2 queue is large enough we can spend too much time in the fully profiled code // if C2 queue is large enough we can spend too much time in the fully profiled code
@ -374,7 +374,7 @@ CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel
if (mdo->would_profile()) { if (mdo->would_profile()) {
if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
Tier3DelayOff * compiler_count(CompLevel_full_optimization) && Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
(this->*p)(i, b, cur_level))) { (this->*p)(i, b, cur_level, method))) {
next_level = CompLevel_full_profile; next_level = CompLevel_full_profile;
} }
} else { } else {
@ -390,7 +390,7 @@ CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel
if (mdo->would_profile()) { if (mdo->would_profile()) {
int mdo_i = mdo->invocation_count_delta(); int mdo_i = mdo->invocation_count_delta();
int mdo_b = mdo->backedge_count_delta(); int mdo_b = mdo->backedge_count_delta();
if ((this->*p)(mdo_i, mdo_b, cur_level)) { if ((this->*p)(mdo_i, mdo_b, cur_level, method)) {
next_level = CompLevel_full_optimization; next_level = CompLevel_full_optimization;
} }
} else { } else {

View File

@ -84,7 +84,7 @@ class CompileQueue;
* invocation and backedge notifications. Basically every n-th invocation or backedge a mutator thread * invocation and backedge notifications. Basically every n-th invocation or backedge a mutator thread
* makes a call into the runtime. * makes a call into the runtime.
* *
* - Tier?CompileThreshold, Tier?BackEdgeThreshold, Tier?MinInvocationThreshold control * - Tier?InvocationThreshold, Tier?CompileThreshold, Tier?BackEdgeThreshold, Tier?MinInvocationThreshold control
* compilation thresholds. * compilation thresholds.
* Level 2 thresholds are not used and are provided for option-compatibility and potential future use. * Level 2 thresholds are not used and are provided for option-compatibility and potential future use.
* Other thresholds work as follows: * Other thresholds work as follows:
@ -100,7 +100,9 @@ class CompileQueue;
* The same predicate is used to control the transition from level 3 to level 4 (C2). It should be * The same predicate is used to control the transition from level 3 to level 4 (C2). It should be
* noted though that the thresholds are relative. Moreover i and b for the 0->3 transition come * noted though that the thresholds are relative. Moreover i and b for the 0->3 transition come
* from Method* and for 3->4 transition they come from MDO (since profiled invocations are * from Method* and for 3->4 transition they come from MDO (since profiled invocations are
* counted separately). * counted separately). Finally, if a method does not contain anything worth profiling, a transition
* from level 3 to level 4 occurs without considering thresholds (e.g., with fewer invocations than
* what is specified by Tier4InvocationThreshold).
* *
* OSR transitions are controlled simply with b > TierXBackEdgeThreshold * s predicates. * OSR transitions are controlled simply with b > TierXBackEdgeThreshold * s predicates.
* *
@ -164,9 +166,9 @@ class AdvancedThresholdPolicy : public SimpleThresholdPolicy {
// Call and loop predicates determine whether a transition to a higher compilation // Call and loop predicates determine whether a transition to a higher compilation
// level should be performed (pointers to predicate functions are passed to common(). // level should be performed (pointers to predicate functions are passed to common().
// Predicates also take compiler load into account. // Predicates also take compiler load into account.
typedef bool (AdvancedThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level); typedef bool (AdvancedThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level, Method* method);
bool call_predicate(int i, int b, CompLevel cur_level); bool call_predicate(int i, int b, CompLevel cur_level, Method* method);
bool loop_predicate(int i, int b, CompLevel cur_level); bool loop_predicate(int i, int b, CompLevel cur_level, Method* method);
// Common transition function. Given a predicate determines if a method should transition to another level. // Common transition function. Given a predicate determines if a method should transition to another level.
CompLevel common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback = false); CompLevel common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback = false);
// Transition functions. // Transition functions.

View File

@ -1126,16 +1126,35 @@ static void no_shared_spaces(const char* message) {
} }
#endif #endif
// Returns threshold scaled with CompileThresholdScaling intx Arguments::scaled_compile_threshold(intx threshold, double scale) {
intx Arguments::get_scaled_compile_threshold(intx threshold) { if (scale == 1.0 || scale < 0.0) {
return (intx)(threshold * CompileThresholdScaling); return threshold;
} else {
return (intx)(threshold * scale);
}
} }
// Returns freq_log scaled with CompileThresholdScaling // Returns freq_log scaled with CompileThresholdScaling
intx Arguments::get_scaled_freq_log(intx freq_log) { intx Arguments::scaled_freq_log(intx freq_log, double scale) {
intx scaled_freq = get_scaled_compile_threshold((intx)1 << freq_log); // Check if scaling is necessary or negative value was specified.
if (scaled_freq == 0) { if (scale == 1.0 || scale < 0.0) {
return 0; return freq_log;
}
// Check value to avoid calculating log2 of 0.
if (scale == 0.0) {
return 1;
}
intx scaled_freq = scaled_compile_threshold((intx)1 << freq_log, scale);
// Determine the maximum notification frequency value currently supported.
// The largest mask value that the interpreter/C1 can handle is
// of length InvocationCounter::number_of_count_bits. Mask values are always
// one bit shorter then the value of the notification frequency. Set
// max_freq_bits accordingly.
intx max_freq_bits = InvocationCounter::number_of_count_bits + 1;
if (scaled_freq > nth_bit(max_freq_bits)) {
return max_freq_bits;
} else { } else {
return log2_intptr(scaled_freq); return log2_intptr(scaled_freq);
} }
@ -1180,31 +1199,36 @@ void Arguments::set_tiered_flags() {
Tier3InvokeNotifyFreqLog = 0; Tier3InvokeNotifyFreqLog = 0;
Tier4InvocationThreshold = 0; Tier4InvocationThreshold = 0;
} }
if (CompileThresholdScaling < 0) {
vm_exit_during_initialization("Negative value specified for CompileThresholdScaling", NULL);
}
// Scale tiered compilation thresholds // Scale tiered compilation thresholds
if (!FLAG_IS_DEFAULT(CompileThresholdScaling)) { if (!FLAG_IS_DEFAULT(CompileThresholdScaling)) {
FLAG_SET_ERGO(intx, Tier0InvokeNotifyFreqLog, get_scaled_freq_log(Tier0InvokeNotifyFreqLog)); FLAG_SET_ERGO(intx, Tier0InvokeNotifyFreqLog, scaled_freq_log(Tier0InvokeNotifyFreqLog));
FLAG_SET_ERGO(intx, Tier0BackedgeNotifyFreqLog, get_scaled_freq_log(Tier0BackedgeNotifyFreqLog)); FLAG_SET_ERGO(intx, Tier0BackedgeNotifyFreqLog, scaled_freq_log(Tier0BackedgeNotifyFreqLog));
FLAG_SET_ERGO(intx, Tier3InvocationThreshold, get_scaled_compile_threshold(Tier3InvocationThreshold)); FLAG_SET_ERGO(intx, Tier3InvocationThreshold, scaled_compile_threshold(Tier3InvocationThreshold));
FLAG_SET_ERGO(intx, Tier3MinInvocationThreshold, get_scaled_compile_threshold(Tier3MinInvocationThreshold)); FLAG_SET_ERGO(intx, Tier3MinInvocationThreshold, scaled_compile_threshold(Tier3MinInvocationThreshold));
FLAG_SET_ERGO(intx, Tier3CompileThreshold, get_scaled_compile_threshold(Tier3CompileThreshold)); FLAG_SET_ERGO(intx, Tier3CompileThreshold, scaled_compile_threshold(Tier3CompileThreshold));
FLAG_SET_ERGO(intx, Tier3BackEdgeThreshold, get_scaled_compile_threshold(Tier3BackEdgeThreshold)); FLAG_SET_ERGO(intx, Tier3BackEdgeThreshold, scaled_compile_threshold(Tier3BackEdgeThreshold));
// Tier2{Invocation,MinInvocation,Compile,Backedge}Threshold should be scaled here // Tier2{Invocation,MinInvocation,Compile,Backedge}Threshold should be scaled here
// once these thresholds become supported. // once these thresholds become supported.
FLAG_SET_ERGO(intx, Tier2InvokeNotifyFreqLog, get_scaled_freq_log(Tier2InvokeNotifyFreqLog)); FLAG_SET_ERGO(intx, Tier2InvokeNotifyFreqLog, scaled_freq_log(Tier2InvokeNotifyFreqLog));
FLAG_SET_ERGO(intx, Tier2BackedgeNotifyFreqLog, get_scaled_freq_log(Tier2BackedgeNotifyFreqLog)); FLAG_SET_ERGO(intx, Tier2BackedgeNotifyFreqLog, scaled_freq_log(Tier2BackedgeNotifyFreqLog));
FLAG_SET_ERGO(intx, Tier3InvokeNotifyFreqLog, get_scaled_freq_log(Tier3InvokeNotifyFreqLog)); FLAG_SET_ERGO(intx, Tier3InvokeNotifyFreqLog, scaled_freq_log(Tier3InvokeNotifyFreqLog));
FLAG_SET_ERGO(intx, Tier3BackedgeNotifyFreqLog, get_scaled_freq_log(Tier3BackedgeNotifyFreqLog)); FLAG_SET_ERGO(intx, Tier3BackedgeNotifyFreqLog, scaled_freq_log(Tier3BackedgeNotifyFreqLog));
FLAG_SET_ERGO(intx, Tier23InlineeNotifyFreqLog, get_scaled_freq_log(Tier23InlineeNotifyFreqLog)); FLAG_SET_ERGO(intx, Tier23InlineeNotifyFreqLog, scaled_freq_log(Tier23InlineeNotifyFreqLog));
FLAG_SET_ERGO(intx, Tier4InvocationThreshold, get_scaled_compile_threshold(Tier4InvocationThreshold)); FLAG_SET_ERGO(intx, Tier4InvocationThreshold, scaled_compile_threshold(Tier4InvocationThreshold));
FLAG_SET_ERGO(intx, Tier4MinInvocationThreshold, get_scaled_compile_threshold(Tier4MinInvocationThreshold)); FLAG_SET_ERGO(intx, Tier4MinInvocationThreshold, scaled_compile_threshold(Tier4MinInvocationThreshold));
FLAG_SET_ERGO(intx, Tier4CompileThreshold, get_scaled_compile_threshold(Tier4CompileThreshold)); FLAG_SET_ERGO(intx, Tier4CompileThreshold, scaled_compile_threshold(Tier4CompileThreshold));
FLAG_SET_ERGO(intx, Tier4BackEdgeThreshold, get_scaled_compile_threshold(Tier4BackEdgeThreshold)); FLAG_SET_ERGO(intx, Tier4BackEdgeThreshold, scaled_compile_threshold(Tier4BackEdgeThreshold));
} }
} }
@ -3456,7 +3480,7 @@ jint Arguments::finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_req
} }
if ((TieredCompilation && CompileThresholdScaling == 0) if ((TieredCompilation && CompileThresholdScaling == 0)
|| (!TieredCompilation && get_scaled_compile_threshold(CompileThreshold) == 0)) { || (!TieredCompilation && scaled_compile_threshold(CompileThreshold) == 0)) {
set_mode_flags(_int); set_mode_flags(_int);
} }
@ -3896,7 +3920,7 @@ jint Arguments::apply_ergo() {
} }
// Scale CompileThreshold // Scale CompileThreshold
if (!FLAG_IS_DEFAULT(CompileThresholdScaling)) { if (!FLAG_IS_DEFAULT(CompileThresholdScaling)) {
FLAG_SET_ERGO(intx, CompileThreshold, get_scaled_compile_threshold(CompileThreshold)); FLAG_SET_ERGO(intx, CompileThreshold, scaled_compile_threshold(CompileThreshold));
} }
} }

View File

@ -328,9 +328,6 @@ class Arguments : AllStatic {
static bool _ClipInlining; static bool _ClipInlining;
static bool _CIDynamicCompilePriority; static bool _CIDynamicCompilePriority;
// Scale compile thresholds
static intx get_scaled_compile_threshold(intx threshold);
static intx get_scaled_freq_log(intx freq_log);
// Tiered // Tiered
static void set_tiered_flags(); static void set_tiered_flags();
static int get_min_number_of_compiler_threads(); static int get_min_number_of_compiler_threads();
@ -452,6 +449,18 @@ class Arguments : AllStatic {
static char* SharedArchivePath; static char* SharedArchivePath;
public: public:
// Scale compile thresholds
// Returns threshold scaled with CompileThresholdScaling
static intx scaled_compile_threshold(intx threshold, double scale);
static intx scaled_compile_threshold(intx threshold) {
return scaled_compile_threshold(threshold, CompileThresholdScaling);
}
// Returns freq_log scaled with CompileThresholdScaling
static intx scaled_freq_log(intx freq_log, double scale);
static intx scaled_freq_log(intx freq_log) {
return scaled_freq_log(freq_log, CompileThresholdScaling);
}
// Parses the arguments, first phase // Parses the arguments, first phase
static jint parse(const JavaVMInitArgs* args); static jint parse(const JavaVMInitArgs* args);
// Apply ergonomics // Apply ergonomics

View File

@ -2477,7 +2477,7 @@ class CommandLineFlags {
"Number of compiler threads to run") \ "Number of compiler threads to run") \
\ \
product(intx, CompilationPolicyChoice, 0, \ product(intx, CompilationPolicyChoice, 0, \
"which compilation policy (0/1)") \ "which compilation policy (0-3)") \
\ \
develop(bool, UseStackBanging, true, \ develop(bool, UseStackBanging, true, \
"use stack banging for stack overflow checks (required for " \ "use stack banging for stack overflow checks (required for " \
@ -3528,7 +3528,16 @@ class CommandLineFlags {
\ \
product(double, CompileThresholdScaling, 1.0, \ product(double, CompileThresholdScaling, 1.0, \
"Factor to control when first compilation happens " \ "Factor to control when first compilation happens " \
"(both with and without tiered compilation)") \ "(both with and without tiered compilation): " \
"values greater than 1.0 delay counter overflow, " \
"values between 0 and 1.0 rush counter overflow, " \
"value of 1.0 leave compilation thresholds unchanged " \
"value of 0.0 is equivalent to -Xint. " \
"" \
"Flag can be set as per-method option. " \
"If a value is specified for a method, compilation thresholds " \
"for that method are scaled by both the value of the global flag "\
"and the value of the per-method flag.") \
\ \
product(intx, Tier0InvokeNotifyFreqLog, 7, \ product(intx, Tier0InvokeNotifyFreqLog, 7, \
"Interpreter (tier 0) invocation notification frequency") \ "Interpreter (tier 0) invocation notification frequency") \

View File

@ -257,28 +257,28 @@ void SimpleThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel l
// Call and loop predicates determine whether a transition to a higher // Call and loop predicates determine whether a transition to a higher
// compilation level should be performed (pointers to predicate functions // compilation level should be performed (pointers to predicate functions
// are passed to common() transition function). // are passed to common() transition function).
bool SimpleThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) { bool SimpleThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) {
switch(cur_level) { switch(cur_level) {
case CompLevel_none: case CompLevel_none:
case CompLevel_limited_profile: { case CompLevel_limited_profile: {
return loop_predicate_helper<CompLevel_none>(i, b, 1.0); return loop_predicate_helper<CompLevel_none>(i, b, 1.0, method);
} }
case CompLevel_full_profile: { case CompLevel_full_profile: {
return loop_predicate_helper<CompLevel_full_profile>(i, b, 1.0); return loop_predicate_helper<CompLevel_full_profile>(i, b, 1.0, method);
} }
default: default:
return true; return true;
} }
} }
bool SimpleThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) { bool SimpleThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) {
switch(cur_level) { switch(cur_level) {
case CompLevel_none: case CompLevel_none:
case CompLevel_limited_profile: { case CompLevel_limited_profile: {
return call_predicate_helper<CompLevel_none>(i, b, 1.0); return call_predicate_helper<CompLevel_none>(i, b, 1.0, method);
} }
case CompLevel_full_profile: { case CompLevel_full_profile: {
return call_predicate_helper<CompLevel_full_profile>(i, b, 1.0); return call_predicate_helper<CompLevel_full_profile>(i, b, 1.0, method);
} }
default: default:
return true; return true;
@ -293,8 +293,8 @@ bool SimpleThresholdPolicy::is_mature(Method* method) {
int i = mdo->invocation_count(); int i = mdo->invocation_count();
int b = mdo->backedge_count(); int b = mdo->backedge_count();
double k = ProfileMaturityPercentage / 100.0; double k = ProfileMaturityPercentage / 100.0;
return call_predicate_helper<CompLevel_full_profile>(i, b, k) || return call_predicate_helper<CompLevel_full_profile>(i, b, k, method) ||
loop_predicate_helper<CompLevel_full_profile>(i, b, k); loop_predicate_helper<CompLevel_full_profile>(i, b, k, method);
} }
return false; return false;
} }
@ -313,7 +313,7 @@ CompLevel SimpleThresholdPolicy::common(Predicate p, Method* method, CompLevel c
// If we were at full profile level, would we switch to full opt? // If we were at full profile level, would we switch to full opt?
if (common(p, method, CompLevel_full_profile) == CompLevel_full_optimization) { if (common(p, method, CompLevel_full_profile) == CompLevel_full_optimization) {
next_level = CompLevel_full_optimization; next_level = CompLevel_full_optimization;
} else if ((this->*p)(i, b, cur_level)) { } else if ((this->*p)(i, b, cur_level, method)) {
next_level = CompLevel_full_profile; next_level = CompLevel_full_profile;
} }
break; break;
@ -325,7 +325,7 @@ CompLevel SimpleThresholdPolicy::common(Predicate p, Method* method, CompLevel c
if (mdo->would_profile()) { if (mdo->would_profile()) {
int mdo_i = mdo->invocation_count_delta(); int mdo_i = mdo->invocation_count_delta();
int mdo_b = mdo->backedge_count_delta(); int mdo_b = mdo->backedge_count_delta();
if ((this->*p)(mdo_i, mdo_b, cur_level)) { if ((this->*p)(mdo_i, mdo_b, cur_level, method)) {
next_level = CompLevel_full_optimization; next_level = CompLevel_full_optimization;
} }
} else { } else {

View File

@ -43,9 +43,9 @@ class SimpleThresholdPolicy : public CompilationPolicy {
// Call and loop predicates determine whether a transition to a higher compilation // Call and loop predicates determine whether a transition to a higher compilation
// level should be performed (pointers to predicate functions are passed to common_TF(). // level should be performed (pointers to predicate functions are passed to common_TF().
// Predicates also take compiler load into account. // Predicates also take compiler load into account.
typedef bool (SimpleThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level); typedef bool (SimpleThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level, Method* method);
bool call_predicate(int i, int b, CompLevel cur_level); bool call_predicate(int i, int b, CompLevel cur_level, Method* method);
bool loop_predicate(int i, int b, CompLevel cur_level); bool loop_predicate(int i, int b, CompLevel cur_level, Method* method);
// Common transition function. Given a predicate determines if a method should transition to another level. // Common transition function. Given a predicate determines if a method should transition to another level.
CompLevel common(Predicate p, Method* method, CompLevel cur_level); CompLevel common(Predicate p, Method* method, CompLevel cur_level);
// Transition functions. // Transition functions.
@ -76,8 +76,8 @@ protected:
// Predicate helpers are used by .*_predicate() methods as well as others. // Predicate helpers are used by .*_predicate() methods as well as others.
// They check the given counter values, multiplied by the scale against the thresholds. // They check the given counter values, multiplied by the scale against the thresholds.
template<CompLevel level> static inline bool call_predicate_helper(int i, int b, double scale); template<CompLevel level> static inline bool call_predicate_helper(int i, int b, double scale, Method* method);
template<CompLevel level> static inline bool loop_predicate_helper(int i, int b, double scale); template<CompLevel level> static inline bool loop_predicate_helper(int i, int b, double scale, Method* method);
// Get a compilation level for a given method. // Get a compilation level for a given method.
static CompLevel comp_level(Method* method) { static CompLevel comp_level(Method* method) {

View File

@ -25,8 +25,14 @@
#ifndef SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_INLINE_HPP #ifndef SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_INLINE_HPP
#define SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_INLINE_HPP #define SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_INLINE_HPP
#include "compiler/compilerOracle.hpp"
template<CompLevel level> template<CompLevel level>
bool SimpleThresholdPolicy::call_predicate_helper(int i, int b, double scale) { bool SimpleThresholdPolicy::call_predicate_helper(int i, int b, double scale, Method* method) {
double threshold_scaling;
if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) {
scale *= threshold_scaling;
}
switch(level) { switch(level) {
case CompLevel_none: case CompLevel_none:
case CompLevel_limited_profile: case CompLevel_limited_profile:
@ -40,7 +46,11 @@ bool SimpleThresholdPolicy::call_predicate_helper(int i, int b, double scale) {
} }
template<CompLevel level> template<CompLevel level>
bool SimpleThresholdPolicy::loop_predicate_helper(int i, int b, double scale) { bool SimpleThresholdPolicy::loop_predicate_helper(int i, int b, double scale, Method* method) {
double threshold_scaling;
if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) {
scale *= threshold_scaling;
}
switch(level) { switch(level) {
case CompLevel_none: case CompLevel_none:
case CompLevel_limited_profile: case CompLevel_limited_profile:

View File

@ -351,11 +351,18 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
nonstatic_field(MethodData, _arg_stack, intx) \ nonstatic_field(MethodData, _arg_stack, intx) \
nonstatic_field(MethodData, _arg_returned, intx) \ nonstatic_field(MethodData, _arg_returned, intx) \
nonstatic_field(MethodData, _tenure_traps, uint) \ nonstatic_field(MethodData, _tenure_traps, uint) \
nonstatic_field(MethodData, _invoke_mask, int) \
nonstatic_field(MethodData, _backedge_mask, int) \
nonstatic_field(DataLayout, _header._struct._tag, u1) \ nonstatic_field(DataLayout, _header._struct._tag, u1) \
nonstatic_field(DataLayout, _header._struct._flags, u1) \ nonstatic_field(DataLayout, _header._struct._flags, u1) \
nonstatic_field(DataLayout, _header._struct._bci, u2) \ nonstatic_field(DataLayout, _header._struct._bci, u2) \
nonstatic_field(DataLayout, _cells[0], intptr_t) \ nonstatic_field(DataLayout, _cells[0], intptr_t) \
nonstatic_field(MethodCounters, _nmethod_age, int) \ nonstatic_field(MethodCounters, _nmethod_age, int) \
nonstatic_field(MethodCounters, _interpreter_invocation_limit, int) \
nonstatic_field(MethodCounters, _interpreter_backward_branch_limit, int) \
nonstatic_field(MethodCounters, _interpreter_profile_limit, int) \
nonstatic_field(MethodCounters, _invoke_mask, int) \
nonstatic_field(MethodCounters, _backedge_mask, int) \
nonstatic_field(MethodCounters, _interpreter_invocation_count, int) \ nonstatic_field(MethodCounters, _interpreter_invocation_count, int) \
nonstatic_field(MethodCounters, _interpreter_throwout_count, u2) \ nonstatic_field(MethodCounters, _interpreter_throwout_count, u2) \
nonstatic_field(MethodCounters, _number_of_breakpoints, u2) \ nonstatic_field(MethodCounters, _number_of_breakpoints, u2) \

View File

@ -1142,8 +1142,9 @@ inline bool is_power_of_2_long(jlong x) {
return ((x != NoLongBits) && (mask_long_bits(x, x - 1) == NoLongBits)); return ((x != NoLongBits) && (mask_long_bits(x, x - 1) == NoLongBits));
} }
//* largest i such that 2^i <= x // Returns largest i such that 2^i <= x.
// A negative value of 'x' will return '31' // If x < 0, the function returns 31 on a 32-bit machine and 63 on a 64-bit machine.
// If x == 0, the function returns -1.
inline int log2_intptr(intptr_t x) { inline int log2_intptr(intptr_t x) {
int i = -1; int i = -1;
uintptr_t p = 1; uintptr_t p = 1;
@ -1152,7 +1153,7 @@ inline int log2_intptr(intptr_t x) {
i++; p *= 2; i++; p *= 2;
} }
// p = 2^(i+1) && x < p (i.e., 2^i <= x < 2^(i+1)) // p = 2^(i+1) && x < p (i.e., 2^i <= x < 2^(i+1))
// (if p = 0 then overflow occurred and i = 31) // If p = 0, overflow has occurred and i = 31 or i = 63 (depending on the machine word size).
return i; return i;
} }

View File

@ -54,7 +54,7 @@ public class CheckCompileThresholdScaling {
// //
// Tier0InvokeNotifyFreqLog, Tier0BackedgeNotifyFreqLog, // Tier0InvokeNotifyFreqLog, Tier0BackedgeNotifyFreqLog,
// Tier3InvocationThreshold, Tier3MinInvocationThreshold, // Tier3InvocationThreshold, Tier3MinInvocationThreshold,
// Tier3CompileThreshold, and Tier3BackEdgeThreshold, // Tier3CompileThreshold, Tier3BackEdgeThreshold,
// Tier2InvokeNotifyFreqLog, Tier2BackedgeNotifyFreqLog, // Tier2InvokeNotifyFreqLog, Tier2BackedgeNotifyFreqLog,
// Tier3InvokeNotifyFreqLog, Tier3BackedgeNotifyFreqLog, // Tier3InvokeNotifyFreqLog, Tier3BackedgeNotifyFreqLog,
// Tier23InlineeNotifyFreqLog, Tier4InvocationThreshold, // Tier23InlineeNotifyFreqLog, Tier4InvocationThreshold,