6953144: Tiered compilation

Infrastructure for tiered compilation support (interpreter + c1 + c2) for 32 and 64 bit. Simple tiered policy implementation.

Reviewed-by: kvn, never, phh, twisti
This commit is contained in:
Igor Veresov 2010-09-03 17:51:07 -07:00
parent 6e78f6cb4b
commit 2c66a6c3fd
104 changed files with 7720 additions and 1701 deletions

View file

@ -1580,6 +1580,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
const Register O0_cur_bcp = O0;
__ mov( Lbcp, O0_cur_bcp );
bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
if ( increment_invocation_counter_for_backward_branches ) {
Label Lforward;
@ -1588,17 +1589,84 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// Bump bytecode pointer by displacement (take the branch)
__ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
// Update Backedge branch separately from invocations
const Register G4_invoke_ctr = G4;
__ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
if (ProfileInterpreter) {
__ test_invocation_counter_for_mdp(G4_invoke_ctr, Lbcp, G3_scratch, Lforward);
if (UseOnStackReplacement) {
__ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch);
if (TieredCompilation) {
Label Lno_mdo, Loverflow;
int increment = InvocationCounter::count_increment;
int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
__ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch);
__ br_null(G4_scratch, false, Assembler::pn, Lno_mdo);
__ delayed()->nop();
// Increment backedge counter in the MDO
Address mdo_backedge_counter(G4_scratch, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, Lscratch,
Assembler::notZero, &Lforward);
__ ba(false, Loverflow);
__ delayed()->nop();
}
// If there's no MDO, increment counter in methodOop
__ bind(Lno_mdo);
Address backedge_counter(Lmethod, in_bytes(methodOopDesc::backedge_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(backedge_counter, increment, mask, G3_scratch, Lscratch,
Assembler::notZero, &Lforward);
__ bind(Loverflow);
// notify point for loop, pass branch bytecode
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O0_cur_bcp);
// Was an OSR adapter generated?
// O0 = osr nmethod
__ br_null(O0, false, Assembler::pn, Lforward);
__ delayed()->nop();
// Has the nmethod been invalidated already?
__ ld(O0, nmethod::entry_bci_offset(), O2);
__ cmp(O2, InvalidOSREntryBci);
__ br(Assembler::equal, false, Assembler::pn, Lforward);
__ delayed()->nop();
// migrate the interpreter frame off of the stack
__ mov(G2_thread, L7);
// save nmethod
__ mov(O0, L6);
__ set_last_Java_frame(SP, noreg);
__ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
__ reset_last_Java_frame();
__ mov(L7, G2_thread);
// move OSR nmethod to I1
__ mov(L6, I1);
// OSR buffer to I0
__ mov(O0, I0);
// remove the interpreter frame
__ restore(I5_savedSP, 0, SP);
// Jump to the osr code.
__ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
__ jmp(O2, G0);
__ delayed()->nop();
} else {
if (UseOnStackReplacement) {
__ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch);
// Update Backedge branch separately from invocations
const Register G4_invoke_ctr = G4;
__ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
if (ProfileInterpreter) {
__ test_invocation_counter_for_mdp(G4_invoke_ctr, Lbcp, G3_scratch, Lforward);
if (UseOnStackReplacement) {
__ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch);
}
} else {
if (UseOnStackReplacement) {
__ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch);
}
}
}