7071653: JSR 292: call site change notification should be pushed not pulled

Reviewed-by: kvn, never, bdelsart
This commit is contained in:
Christian Thalinger 2011-08-16 04:14:05 -07:00
parent ac99f413d7
commit 134c40b4db
27 changed files with 906 additions and 526 deletions

View file

@ -758,6 +758,20 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Regis
} }
void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
Register temp,
Register bytecode,
int byte_no,
int bcp_offset,
size_t index_size) {
get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size);
ld_ptr(cache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode);
const int shift_count = (1 + byte_no) * BitsPerByte;
srl( bytecode, shift_count, bytecode);
and3(bytecode, 0xFF, bytecode);
}
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
int bcp_offset, size_t index_size) { int bcp_offset, size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");

View file

@ -189,6 +189,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
setCCOrNot should_set_CC = dont_set_CC ); setCCOrNot should_set_CC = dont_set_CC );
void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register temp, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_index_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));

View file

@ -149,36 +149,68 @@ Address TemplateTable::at_bcp(int offset) {
} }
void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register Rbyte_code, void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
Register Rscratch, Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
bool load_bc_into_scratch /*=true*/) { int byte_no) {
// With sharing on, may need to test methodOop flag. // With sharing on, may need to test methodOop flag.
if (!RewriteBytecodes) return; if (!RewriteBytecodes) return;
if (load_bc_into_scratch) __ set(bc, Rbyte_code); Label L_patch_done;
Label patch_done;
if (JvmtiExport::can_post_breakpoint()) { switch (bc) {
Label fast_patch; case Bytecodes::_fast_aputfield:
__ ldub(at_bcp(0), Rscratch); case Bytecodes::_fast_bputfield:
__ cmp_and_br_short(Rscratch, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, fast_patch); case Bytecodes::_fast_cputfield:
// perform the quickening, slowly, in the bowels of the breakpoint table case Bytecodes::_fast_dputfield:
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, Rbyte_code); case Bytecodes::_fast_fputfield:
__ ba_short(patch_done); case Bytecodes::_fast_iputfield:
__ bind(fast_patch); case Bytecodes::_fast_lputfield:
case Bytecodes::_fast_sputfield:
{
// We skip bytecode quickening for putfield instructions when
// the put_code written to the constant pool cache is zero.
// This is required so that every execution of this instruction
// calls out to InterpreterRuntime::resolve_get_put to do
// additional, required work.
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
assert(load_bc_into_bc_reg, "we use bc_reg as temp");
__ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1);
__ set(bc, bc_reg);
__ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done); // don't patch
} }
break;
default:
assert(byte_no == -1, "sanity");
if (load_bc_into_bc_reg) {
__ set(bc, bc_reg);
}
}
if (JvmtiExport::can_post_breakpoint()) {
Label L_fast_patch;
__ ldub(at_bcp(0), temp_reg);
__ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch);
// perform the quickening, slowly, in the bowels of the breakpoint table
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg);
__ ba_short(L_patch_done);
__ bind(L_fast_patch);
}
#ifdef ASSERT #ifdef ASSERT
Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc); Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc);
Label okay; Label L_okay;
__ ldub(at_bcp(0), Rscratch); __ ldub(at_bcp(0), temp_reg);
__ cmp(Rscratch, orig_bytecode); __ cmp(temp_reg, orig_bytecode);
__ br(Assembler::equal, false, Assembler::pt, okay); __ br(Assembler::equal, false, Assembler::pt, L_okay);
__ delayed() ->cmp(Rscratch, Rbyte_code); __ delayed()->cmp(temp_reg, bc_reg);
__ br(Assembler::equal, false, Assembler::pt, okay); __ br(Assembler::equal, false, Assembler::pt, L_okay);
__ delayed()->nop(); __ delayed()->nop();
__ stop("Rewriting wrong bytecode location"); __ stop("patching the wrong bytecode");
__ bind(okay); __ bind(L_okay);
#endif #endif
__ stb(Rbyte_code, at_bcp(0));
__ bind(patch_done); // patch bytecode
__ stb(bc_reg, at_bcp(0));
__ bind(L_patch_done);
} }
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
@ -2061,12 +2093,12 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
// Depends on cpCacheOop layout! // Depends on cpCacheOop layout!
Label resolved; Label resolved;
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
if (byte_no == f1_oop) { if (byte_no == f1_oop) {
// We are resolved if the f1 field contains a non-null object (CallSite, etc.) // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
// This kind of CP cache entry does not need to match the flags byte, because // This kind of CP cache entry does not need to match the flags byte, because
// there is a 1-1 relation between bytecode type and CP entry type. // there is a 1-1 relation between bytecode type and CP entry type.
assert_different_registers(result, Rcache); assert_different_registers(result, Rcache);
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f1_offset(), result); ConstantPoolCacheEntry::f1_offset(), result);
__ tst(result); __ tst(result);
@ -2075,15 +2107,9 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
} else { } else {
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
assert(result == noreg, ""); //else change code for setting result assert(result == noreg, ""); //else change code for setting result
const int shift_count = (1 + byte_no)*BitsPerByte; __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size);
__ cmp(Lbyte_code, (int) bytecode()); // have we resolved this bytecode?
__ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + __ br(Assembler::equal, false, Assembler::pt, resolved);
ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
__ srl( Lbyte_code, shift_count, Lbyte_code );
__ and3( Lbyte_code, 0xFF, Lbyte_code );
__ cmp( Lbyte_code, (int)bytecode());
__ br( Assembler::equal, false, Assembler::pt, resolved);
__ delayed()->set((int)bytecode(), O1); __ delayed()->set((int)bytecode(), O1);
} }
@ -2618,150 +2644,162 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
if (is_static) { if (is_static) {
// putstatic with object type most likely, check that first // putstatic with object type most likely, check that first
__ cmp(Rflags, atos ); __ cmp(Rflags, atos);
__ br(Assembler::notEqual, false, Assembler::pt, notObj); __ br(Assembler::notEqual, false, Assembler::pt, notObj);
__ delayed() ->cmp(Rflags, itos ); __ delayed()->cmp(Rflags, itos);
// atos // atos
{
__ pop_ptr(); __ pop_ptr();
__ verify_oop(Otos_i); __ verify_oop(Otos_i);
do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
__ ba(checkVolatile); __ ba(checkVolatile);
__ delayed()->tst(Lscratch); __ delayed()->tst(Lscratch);
}
__ bind(notObj); __ bind(notObj);
// cmp(Rflags, itos);
// cmp(Rflags, itos );
__ br(Assembler::notEqual, false, Assembler::pt, notInt); __ br(Assembler::notEqual, false, Assembler::pt, notInt);
__ delayed() ->cmp(Rflags, btos ); __ delayed()->cmp(Rflags, btos);
// itos // itos
{
__ pop_i(); __ pop_i();
__ st(Otos_i, Rclass, Roffset); __ st(Otos_i, Rclass, Roffset);
__ ba(checkVolatile); __ ba(checkVolatile);
__ delayed()->tst(Lscratch); __ delayed()->tst(Lscratch);
}
__ bind(notInt); __ bind(notInt);
} else { } else {
// putfield with int type most likely, check that first // putfield with int type most likely, check that first
__ cmp(Rflags, itos ); __ cmp(Rflags, itos);
__ br(Assembler::notEqual, false, Assembler::pt, notInt); __ br(Assembler::notEqual, false, Assembler::pt, notInt);
__ delayed() ->cmp(Rflags, atos ); __ delayed()->cmp(Rflags, atos);
// itos // itos
{
__ pop_i(); __ pop_i();
pop_and_check_object(Rclass); pop_and_check_object(Rclass);
__ st(Otos_i, Rclass, Roffset); __ st(Otos_i, Rclass, Roffset);
patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch); patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no);
__ ba(checkVolatile); __ ba(checkVolatile);
__ delayed()->tst(Lscratch); __ delayed()->tst(Lscratch);
}
__ bind(notInt); __ bind(notInt);
// cmp(Rflags, atos ); // cmp(Rflags, atos);
__ br(Assembler::notEqual, false, Assembler::pt, notObj); __ br(Assembler::notEqual, false, Assembler::pt, notObj);
__ delayed() ->cmp(Rflags, btos ); __ delayed()->cmp(Rflags, btos);
// atos // atos
{
__ pop_ptr(); __ pop_ptr();
pop_and_check_object(Rclass); pop_and_check_object(Rclass);
__ verify_oop(Otos_i); __ verify_oop(Otos_i);
do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no);
patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch);
__ ba(checkVolatile); __ ba(checkVolatile);
__ delayed()->tst(Lscratch); __ delayed()->tst(Lscratch);
}
__ bind(notObj); __ bind(notObj);
} }
// cmp(Rflags, btos ); // cmp(Rflags, btos);
__ br(Assembler::notEqual, false, Assembler::pt, notByte); __ br(Assembler::notEqual, false, Assembler::pt, notByte);
__ delayed() ->cmp(Rflags, ltos ); __ delayed()->cmp(Rflags, ltos);
// btos // btos
{
__ pop_i(); __ pop_i();
if (!is_static) pop_and_check_object(Rclass); if (!is_static) pop_and_check_object(Rclass);
__ stb(Otos_i, Rclass, Roffset); __ stb(Otos_i, Rclass, Roffset);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch); patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no);
} }
__ ba(checkVolatile); __ ba(checkVolatile);
__ delayed()->tst(Lscratch); __ delayed()->tst(Lscratch);
}
__ bind(notByte); __ bind(notByte);
// cmp(Rflags, ltos);
// cmp(Rflags, ltos );
__ br(Assembler::notEqual, false, Assembler::pt, notLong); __ br(Assembler::notEqual, false, Assembler::pt, notLong);
__ delayed() ->cmp(Rflags, ctos ); __ delayed()->cmp(Rflags, ctos);
// ltos // ltos
{
__ pop_l(); __ pop_l();
if (!is_static) pop_and_check_object(Rclass); if (!is_static) pop_and_check_object(Rclass);
__ st_long(Otos_l, Rclass, Roffset); __ st_long(Otos_l, Rclass, Roffset);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch); patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no);
} }
__ ba(checkVolatile); __ ba(checkVolatile);
__ delayed()->tst(Lscratch); __ delayed()->tst(Lscratch);
}
__ bind(notLong); __ bind(notLong);
// cmp(Rflags, ctos);
// cmp(Rflags, ctos );
__ br(Assembler::notEqual, false, Assembler::pt, notChar); __ br(Assembler::notEqual, false, Assembler::pt, notChar);
__ delayed() ->cmp(Rflags, stos ); __ delayed()->cmp(Rflags, stos);
// ctos (char) // ctos (char)
{
__ pop_i(); __ pop_i();
if (!is_static) pop_and_check_object(Rclass); if (!is_static) pop_and_check_object(Rclass);
__ sth(Otos_i, Rclass, Roffset); __ sth(Otos_i, Rclass, Roffset);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch); patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no);
} }
__ ba(checkVolatile); __ ba(checkVolatile);
__ delayed()->tst(Lscratch); __ delayed()->tst(Lscratch);
}
__ bind(notChar); __ bind(notChar);
// cmp(Rflags, stos ); // cmp(Rflags, stos);
__ br(Assembler::notEqual, false, Assembler::pt, notShort); __ br(Assembler::notEqual, false, Assembler::pt, notShort);
__ delayed() ->cmp(Rflags, ftos ); __ delayed()->cmp(Rflags, ftos);
// stos (char) // stos (short)
{
__ pop_i(); __ pop_i();
if (!is_static) pop_and_check_object(Rclass); if (!is_static) pop_and_check_object(Rclass);
__ sth(Otos_i, Rclass, Roffset); __ sth(Otos_i, Rclass, Roffset);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch); patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no);
} }
__ ba(checkVolatile); __ ba(checkVolatile);
__ delayed()->tst(Lscratch); __ delayed()->tst(Lscratch);
}
__ bind(notShort); __ bind(notShort);
// cmp(Rflags, ftos ); // cmp(Rflags, ftos);
__ br(Assembler::notZero, false, Assembler::pt, notFloat); __ br(Assembler::notZero, false, Assembler::pt, notFloat);
__ delayed()->nop(); __ delayed()->nop();
// ftos // ftos
{
__ pop_f(); __ pop_f();
if (!is_static) pop_and_check_object(Rclass); if (!is_static) pop_and_check_object(Rclass);
__ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch); patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no);
} }
__ ba(checkVolatile); __ ba(checkVolatile);
__ delayed()->tst(Lscratch); __ delayed()->tst(Lscratch);
}
__ bind(notFloat); __ bind(notFloat);
// dtos // dtos
{
__ pop_d(); __ pop_d();
if (!is_static) pop_and_check_object(Rclass); if (!is_static) pop_and_check_object(Rclass);
__ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch); patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no);
}
} }
__ bind(checkVolatile); __ bind(checkVolatile);

View file

@ -233,7 +233,7 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_off
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register index, void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register index,
int bcp_offset, size_t index_size) { int bcp_offset, size_t index_size) {
assert(cache != index, "must use different registers"); assert_different_registers(cache, index);
get_cache_index_at_bcp(index, bcp_offset, index_size); get_cache_index_at_bcp(index, bcp_offset, index_size);
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below"); assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
@ -241,6 +241,20 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Regis
} }
void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
Register index,
Register bytecode,
int byte_no,
int bcp_offset,
size_t index_size) {
get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
movptr(bytecode, Address(cache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
const int shift_count = (1 + byte_no) * BitsPerByte;
shrptr(bytecode, shift_count);
andptr(bytecode, 0xFF);
}
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
int bcp_offset, size_t index_size) { int bcp_offset, size_t index_size) {
assert(cache != tmp, "must use different register"); assert(cache != tmp, "must use different register");

View file

@ -83,6 +83,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
} }
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset); void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));

View file

@ -233,7 +233,7 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
Register index, Register index,
int bcp_offset, int bcp_offset,
size_t index_size) { size_t index_size) {
assert(cache != index, "must use different registers"); assert_different_registers(cache, index);
get_cache_index_at_bcp(index, bcp_offset, index_size); get_cache_index_at_bcp(index, bcp_offset, index_size);
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
@ -242,6 +242,22 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
} }
void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
Register index,
Register bytecode,
int byte_no,
int bcp_offset,
size_t index_size) {
get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
// We use a 32-bit load here since the layout of 64-bit words on
// little-endian machines allow us that.
movl(bytecode, Address(cache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
const int shift_count = (1 + byte_no) * BitsPerByte;
shrl(bytecode, shift_count);
andl(bytecode, 0xFF);
}
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
Register tmp, Register tmp,
int bcp_offset, int bcp_offset,

View file

@ -100,13 +100,11 @@ class InterpreterMacroAssembler: public MacroAssembler {
} }
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset); void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache, Register index, void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2));
int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
void pop_ptr(Register r = rax); void pop_ptr(Register r = rax);
void pop_i(Register r = rax); void pop_i(Register r = rax);
void pop_l(Register r = rax); void pop_l(Register r = rax);

View file

@ -202,45 +202,74 @@ Address TemplateTable::at_bcp(int offset) {
} }
void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc, void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
Register scratch, Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
bool load_bc_into_scratch/*=true*/) { int byte_no) {
if (!RewriteBytecodes) return; if (!RewriteBytecodes) return;
Label L_patch_done;
switch (bc) {
case Bytecodes::_fast_aputfield:
case Bytecodes::_fast_bputfield:
case Bytecodes::_fast_cputfield:
case Bytecodes::_fast_dputfield:
case Bytecodes::_fast_fputfield:
case Bytecodes::_fast_iputfield:
case Bytecodes::_fast_lputfield:
case Bytecodes::_fast_sputfield:
{
// We skip bytecode quickening for putfield instructions when
// the put_code written to the constant pool cache is zero.
// This is required so that every execution of this instruction
// calls out to InterpreterRuntime::resolve_get_put to do
// additional, required work.
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
assert(load_bc_into_bc_reg, "we use bc_reg as temp");
__ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1);
__ movl(bc_reg, bc);
__ cmpl(temp_reg, (int) 0);
__ jcc(Assembler::zero, L_patch_done); // don't patch
}
break;
default:
assert(byte_no == -1, "sanity");
// the pair bytecodes have already done the load. // the pair bytecodes have already done the load.
if (load_bc_into_scratch) { if (load_bc_into_bc_reg) {
__ movl(bc, bytecode); __ movl(bc_reg, bc);
} }
Label patch_done; }
if (JvmtiExport::can_post_breakpoint()) { if (JvmtiExport::can_post_breakpoint()) {
Label fast_patch; Label L_fast_patch;
// if a breakpoint is present we can't rewrite the stream directly // if a breakpoint is present we can't rewrite the stream directly
__ movzbl(scratch, at_bcp(0)); __ movzbl(temp_reg, at_bcp(0));
__ cmpl(scratch, Bytecodes::_breakpoint); __ cmpl(temp_reg, Bytecodes::_breakpoint);
__ jcc(Assembler::notEqual, fast_patch); __ jcc(Assembler::notEqual, L_fast_patch);
__ get_method(scratch); __ get_method(temp_reg);
// Let breakpoint table handling rewrite to quicker bytecode // Let breakpoint table handling rewrite to quicker bytecode
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, rsi, bc); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rsi, bc_reg);
#ifndef ASSERT #ifndef ASSERT
__ jmpb(patch_done); __ jmpb(L_patch_done);
#else #else
__ jmp(patch_done); __ jmp(L_patch_done);
#endif #endif
__ bind(fast_patch); __ bind(L_fast_patch);
} }
#ifdef ASSERT #ifdef ASSERT
Label okay; Label L_okay;
__ load_unsigned_byte(scratch, at_bcp(0)); __ load_unsigned_byte(temp_reg, at_bcp(0));
__ cmpl(scratch, (int)Bytecodes::java_code(bytecode)); __ cmpl(temp_reg, (int)Bytecodes::java_code(bc));
__ jccb(Assembler::equal, okay); __ jccb(Assembler::equal, L_okay);
__ cmpl(scratch, bc); __ cmpl(temp_reg, bc_reg);
__ jcc(Assembler::equal, okay); __ jcc(Assembler::equal, L_okay);
__ stop("patching the wrong bytecode"); __ stop("patching the wrong bytecode");
__ bind(okay); __ bind(L_okay);
#endif #endif
// patch bytecode // patch bytecode
__ movb(at_bcp(0), bc); __ movb(at_bcp(0), bc_reg);
__ bind(patch_done); __ bind(L_patch_done);
} }
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
@ -2060,24 +2089,20 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
assert_different_registers(result, Rcache, index, temp); assert_different_registers(result, Rcache, index, temp);
Label resolved; Label resolved;
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
if (byte_no == f1_oop) { if (byte_no == f1_oop) {
// We are resolved if the f1 field contains a non-null object (CallSite, etc.) // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
// This kind of CP cache entry does not need to match the flags byte, because // This kind of CP cache entry does not need to match the flags byte, because
// there is a 1-1 relation between bytecode type and CP entry type. // there is a 1-1 relation between bytecode type and CP entry type.
assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD) assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
__ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset())); __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
__ testptr(result, result); __ testptr(result, result);
__ jcc(Assembler::notEqual, resolved); __ jcc(Assembler::notEqual, resolved);
} else { } else {
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
assert(result == noreg, ""); //else change code for setting result assert(result == noreg, ""); //else change code for setting result
const int shift_count = (1 + byte_no)*BitsPerByte; __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
__ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode?
__ shrl(temp, shift_count);
// have we resolved this bytecode?
__ andl(temp, 0xFF);
__ cmpl(temp, (int)bytecode());
__ jcc(Assembler::equal, resolved); __ jcc(Assembler::equal, resolved);
} }
@ -2453,79 +2478,86 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ shrl(flags, ConstantPoolCacheEntry::tosBits); __ shrl(flags, ConstantPoolCacheEntry::tosBits);
assert(btos == 0, "change code, btos != 0"); assert(btos == 0, "change code, btos != 0");
// btos
__ andl(flags, 0x0f); __ andl(flags, 0x0f);
__ jcc(Assembler::notZero, notByte); __ jcc(Assembler::notZero, notByte);
// btos
{
__ pop(btos); __ pop(btos);
if (!is_static) pop_and_check_object(obj); if (!is_static) pop_and_check_object(obj);
__ movb(lo, rax ); __ movb(lo, rax);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx); patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx, true, byte_no);
} }
__ jmp(Done); __ jmp(Done);
}
__ bind(notByte); __ bind(notByte);
// itos __ cmpl(flags, itos);
__ cmpl(flags, itos );
__ jcc(Assembler::notEqual, notInt); __ jcc(Assembler::notEqual, notInt);
// itos
{
__ pop(itos); __ pop(itos);
if (!is_static) pop_and_check_object(obj); if (!is_static) pop_and_check_object(obj);
__ movl(lo, rax);
__ movl(lo, rax );
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx); patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx, true, byte_no);
} }
__ jmp(Done); __ jmp(Done);
}
__ bind(notInt); __ bind(notInt);
// atos __ cmpl(flags, atos);
__ cmpl(flags, atos );
__ jcc(Assembler::notEqual, notObj); __ jcc(Assembler::notEqual, notObj);
// atos
{
__ pop(atos); __ pop(atos);
if (!is_static) pop_and_check_object(obj); if (!is_static) pop_and_check_object(obj);
do_oop_store(_masm, lo, rax, _bs->kind(), false); do_oop_store(_masm, lo, rax, _bs->kind(), false);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx); patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx, true, byte_no);
} }
__ jmp(Done); __ jmp(Done);
}
__ bind(notObj); __ bind(notObj);
// ctos __ cmpl(flags, ctos);
__ cmpl(flags, ctos );
__ jcc(Assembler::notEqual, notChar); __ jcc(Assembler::notEqual, notChar);
// ctos
{
__ pop(ctos); __ pop(ctos);
if (!is_static) pop_and_check_object(obj); if (!is_static) pop_and_check_object(obj);
__ movw(lo, rax ); __ movw(lo, rax);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx); patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx, true, byte_no);
} }
__ jmp(Done); __ jmp(Done);
}
__ bind(notChar); __ bind(notChar);
// stos __ cmpl(flags, stos);
__ cmpl(flags, stos );
__ jcc(Assembler::notEqual, notShort); __ jcc(Assembler::notEqual, notShort);
// stos
{
__ pop(stos); __ pop(stos);
if (!is_static) pop_and_check_object(obj); if (!is_static) pop_and_check_object(obj);
__ movw(lo, rax ); __ movw(lo, rax);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx); patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx, true, byte_no);
} }
__ jmp(Done); __ jmp(Done);
}
__ bind(notShort); __ bind(notShort);
// ltos __ cmpl(flags, ltos);
__ cmpl(flags, ltos );
__ jcc(Assembler::notEqual, notLong); __ jcc(Assembler::notEqual, notLong);
// ltos
{
Label notVolatileLong; Label notVolatileLong;
__ testl(rdx, rdx); __ testl(rdx, rdx);
__ jcc(Assembler::zero, notVolatileLong); __ jcc(Assembler::zero, notVolatileLong);
@ -2552,39 +2584,47 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
NOT_LP64(__ movptr(hi, rdx)); NOT_LP64(__ movptr(hi, rdx));
__ movptr(lo, rax); __ movptr(lo, rax);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx); patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx, true, byte_no);
} }
__ jmp(notVolatile); __ jmp(notVolatile);
}
__ bind(notLong); __ bind(notLong);
// ftos __ cmpl(flags, ftos);
__ cmpl(flags, ftos );
__ jcc(Assembler::notEqual, notFloat); __ jcc(Assembler::notEqual, notFloat);
// ftos
{
__ pop(ftos); __ pop(ftos);
if (!is_static) pop_and_check_object(obj); if (!is_static) pop_and_check_object(obj);
__ fstp_s(lo); __ fstp_s(lo);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx); patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx, true, byte_no);
} }
__ jmp(Done); __ jmp(Done);
}
__ bind(notFloat); __ bind(notFloat);
// dtos #ifdef ASSERT
__ cmpl(flags, dtos ); __ cmpl(flags, dtos);
__ jcc(Assembler::notEqual, notDouble); __ jcc(Assembler::notEqual, notDouble);
#endif
// dtos
{
__ pop(dtos); __ pop(dtos);
if (!is_static) pop_and_check_object(obj); if (!is_static) pop_and_check_object(obj);
__ fstp_d(lo); __ fstp_d(lo);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx); patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx, true, byte_no);
} }
__ jmp(Done); __ jmp(Done);
}
#ifdef ASSERT
__ bind(notDouble); __ bind(notDouble);
__ stop("Bad state"); __ stop("Bad state");
#endif
__ bind(Done); __ bind(Done);

View file

@ -203,46 +203,74 @@ Address TemplateTable::at_bcp(int offset) {
return Address(r13, offset); return Address(r13, offset);
} }
void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc, void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
Register scratch, Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
bool load_bc_into_scratch/*=true*/) { int byte_no) {
if (!RewriteBytecodes) { if (!RewriteBytecodes) return;
return; Label L_patch_done;
switch (bc) {
case Bytecodes::_fast_aputfield:
case Bytecodes::_fast_bputfield:
case Bytecodes::_fast_cputfield:
case Bytecodes::_fast_dputfield:
case Bytecodes::_fast_fputfield:
case Bytecodes::_fast_iputfield:
case Bytecodes::_fast_lputfield:
case Bytecodes::_fast_sputfield:
{
// We skip bytecode quickening for putfield instructions when
// the put_code written to the constant pool cache is zero.
// This is required so that every execution of this instruction
// calls out to InterpreterRuntime::resolve_get_put to do
// additional, required work.
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
assert(load_bc_into_bc_reg, "we use bc_reg as temp");
__ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
__ movl(bc_reg, bc);
__ cmpl(temp_reg, (int) 0);
__ jcc(Assembler::zero, L_patch_done); // don't patch
} }
break;
default:
assert(byte_no == -1, "sanity");
// the pair bytecodes have already done the load. // the pair bytecodes have already done the load.
if (load_bc_into_scratch) { if (load_bc_into_bc_reg) {
__ movl(bc, bytecode); __ movl(bc_reg, bc);
} }
Label patch_done; }
if (JvmtiExport::can_post_breakpoint()) { if (JvmtiExport::can_post_breakpoint()) {
Label fast_patch; Label L_fast_patch;
// if a breakpoint is present we can't rewrite the stream directly // if a breakpoint is present we can't rewrite the stream directly
__ movzbl(scratch, at_bcp(0)); __ movzbl(temp_reg, at_bcp(0));
__ cmpl(scratch, Bytecodes::_breakpoint); __ cmpl(temp_reg, Bytecodes::_breakpoint);
__ jcc(Assembler::notEqual, fast_patch); __ jcc(Assembler::notEqual, L_fast_patch);
__ get_method(scratch); __ get_method(temp_reg);
// Let breakpoint table handling rewrite to quicker bytecode // Let breakpoint table handling rewrite to quicker bytecode
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, r13, bc); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, r13, bc_reg);
#ifndef ASSERT #ifndef ASSERT
__ jmpb(patch_done); __ jmpb(L_patch_done);
#else #else
__ jmp(patch_done); __ jmp(L_patch_done);
#endif #endif
__ bind(fast_patch); __ bind(L_fast_patch);
} }
#ifdef ASSERT #ifdef ASSERT
Label okay; Label L_okay;
__ load_unsigned_byte(scratch, at_bcp(0)); __ load_unsigned_byte(temp_reg, at_bcp(0));
__ cmpl(scratch, (int) Bytecodes::java_code(bytecode)); __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
__ jcc(Assembler::equal, okay); __ jcc(Assembler::equal, L_okay);
__ cmpl(scratch, bc); __ cmpl(temp_reg, bc_reg);
__ jcc(Assembler::equal, okay); __ jcc(Assembler::equal, L_okay);
__ stop("patching the wrong bytecode"); __ stop("patching the wrong bytecode");
__ bind(okay); __ bind(L_okay);
#endif #endif
// patch bytecode // patch bytecode
__ movb(at_bcp(0), bc); __ movb(at_bcp(0), bc_reg);
__ bind(patch_done); __ bind(L_patch_done);
} }
@ -2098,24 +2126,20 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
assert_different_registers(result, Rcache, index, temp); assert_different_registers(result, Rcache, index, temp);
Label resolved; Label resolved;
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
if (byte_no == f1_oop) { if (byte_no == f1_oop) {
// We are resolved if the f1 field contains a non-null object (CallSite, etc.) // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
// This kind of CP cache entry does not need to match the flags byte, because // This kind of CP cache entry does not need to match the flags byte, because
// there is a 1-1 relation between bytecode type and CP entry type. // there is a 1-1 relation between bytecode type and CP entry type.
assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD) assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
__ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset())); __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
__ testptr(result, result); __ testptr(result, result);
__ jcc(Assembler::notEqual, resolved); __ jcc(Assembler::notEqual, resolved);
} else { } else {
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
assert(result == noreg, ""); //else change code for setting result assert(result == noreg, ""); //else change code for setting result
const int shift_count = (1 + byte_no) * BitsPerByte; __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
__ movl(temp, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode?
__ shrl(temp, shift_count);
// have we resolved this bytecode?
__ andl(temp, 0xFF);
__ cmpl(temp, (int) bytecode());
__ jcc(Assembler::equal, resolved); __ jcc(Assembler::equal, resolved);
} }
@ -2507,101 +2531,123 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
assert(btos == 0, "change code, btos != 0"); assert(btos == 0, "change code, btos != 0");
__ andl(flags, 0x0f); __ andl(flags, 0x0f);
__ jcc(Assembler::notZero, notByte); __ jcc(Assembler::notZero, notByte);
// btos // btos
{
__ pop(btos); __ pop(btos);
if (!is_static) pop_and_check_object(obj); if (!is_static) pop_and_check_object(obj);
__ movb(field, rax); __ movb(field, rax);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx); patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
} }
__ jmp(Done); __ jmp(Done);
}
__ bind(notByte); __ bind(notByte);
__ cmpl(flags, atos); __ cmpl(flags, atos);
__ jcc(Assembler::notEqual, notObj); __ jcc(Assembler::notEqual, notObj);
// atos // atos
{
__ pop(atos); __ pop(atos);
if (!is_static) pop_and_check_object(obj); if (!is_static) pop_and_check_object(obj);
// Store into the field // Store into the field
do_oop_store(_masm, field, rax, _bs->kind(), false); do_oop_store(_masm, field, rax, _bs->kind(), false);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx); patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
} }
__ jmp(Done); __ jmp(Done);
}
__ bind(notObj); __ bind(notObj);
__ cmpl(flags, itos); __ cmpl(flags, itos);
__ jcc(Assembler::notEqual, notInt); __ jcc(Assembler::notEqual, notInt);
// itos // itos
{
__ pop(itos); __ pop(itos);
if (!is_static) pop_and_check_object(obj); if (!is_static) pop_and_check_object(obj);
__ movl(field, rax); __ movl(field, rax);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx); patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
} }
__ jmp(Done); __ jmp(Done);
}
__ bind(notInt); __ bind(notInt);
__ cmpl(flags, ctos); __ cmpl(flags, ctos);
__ jcc(Assembler::notEqual, notChar); __ jcc(Assembler::notEqual, notChar);
// ctos // ctos
{
__ pop(ctos); __ pop(ctos);
if (!is_static) pop_and_check_object(obj); if (!is_static) pop_and_check_object(obj);
__ movw(field, rax); __ movw(field, rax);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx); patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
} }
__ jmp(Done); __ jmp(Done);
}
__ bind(notChar); __ bind(notChar);
__ cmpl(flags, stos); __ cmpl(flags, stos);
__ jcc(Assembler::notEqual, notShort); __ jcc(Assembler::notEqual, notShort);
// stos // stos
{
__ pop(stos); __ pop(stos);
if (!is_static) pop_and_check_object(obj); if (!is_static) pop_and_check_object(obj);
__ movw(field, rax); __ movw(field, rax);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx); patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
} }
__ jmp(Done); __ jmp(Done);
}
__ bind(notShort); __ bind(notShort);
__ cmpl(flags, ltos); __ cmpl(flags, ltos);
__ jcc(Assembler::notEqual, notLong); __ jcc(Assembler::notEqual, notLong);
// ltos // ltos
{
__ pop(ltos); __ pop(ltos);
if (!is_static) pop_and_check_object(obj); if (!is_static) pop_and_check_object(obj);
__ movq(field, rax); __ movq(field, rax);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx); patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
} }
__ jmp(Done); __ jmp(Done);
}
__ bind(notLong); __ bind(notLong);
__ cmpl(flags, ftos); __ cmpl(flags, ftos);
__ jcc(Assembler::notEqual, notFloat); __ jcc(Assembler::notEqual, notFloat);
// ftos // ftos
{
__ pop(ftos); __ pop(ftos);
if (!is_static) pop_and_check_object(obj); if (!is_static) pop_and_check_object(obj);
__ movflt(field, xmm0); __ movflt(field, xmm0);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx); patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
} }
__ jmp(Done); __ jmp(Done);
}
__ bind(notFloat); __ bind(notFloat);
#ifdef ASSERT #ifdef ASSERT
__ cmpl(flags, dtos); __ cmpl(flags, dtos);
__ jcc(Assembler::notEqual, notDouble); __ jcc(Assembler::notEqual, notDouble);
#endif #endif
// dtos // dtos
{
__ pop(dtos); __ pop(dtos);
if (!is_static) pop_and_check_object(obj); if (!is_static) pop_and_check_object(obj);
__ movdbl(field, xmm0); __ movdbl(field, xmm0);
if (!is_static) { if (!is_static) {
patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx); patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
}
} }
#ifdef ASSERT #ifdef ASSERT
@ -2612,12 +2658,12 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
#endif #endif
__ bind(Done); __ bind(Done);
// Check for volatile store // Check for volatile store
__ testl(rdx, rdx); __ testl(rdx, rdx);
__ jcc(Assembler::zero, notVolatile); __ jcc(Assembler::zero, notVolatile);
volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
Assembler::StoreStore)); Assembler::StoreStore));
__ bind(notVolatile); __ bind(notVolatile);
} }

View file

@ -28,6 +28,16 @@
// ciCallSite // ciCallSite
bool ciCallSite::is_constant_call_site() {
return klass()->is_subclass_of(CURRENT_ENV->ConstantCallSite_klass());
}
bool ciCallSite::is_mutable_call_site() {
return klass()->is_subclass_of(CURRENT_ENV->MutableCallSite_klass());
}
bool ciCallSite::is_volatile_call_site() {
return klass()->is_subclass_of(CURRENT_ENV->VolatileCallSite_klass());
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciCallSite::get_target // ciCallSite::get_target
// //

View file

@ -37,6 +37,10 @@ public:
// What kind of ciObject is this? // What kind of ciObject is this?
bool is_call_site() const { return true; } bool is_call_site() const { return true; }
bool is_constant_call_site();
bool is_mutable_call_site();
bool is_volatile_call_site();
// Return the target MethodHandle of this CallSite. // Return the target MethodHandle of this CallSite.
ciMethodHandle* get_target() const; ciMethodHandle* get_target() const;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -178,6 +178,8 @@ public:
bool is_volatile () { return flags().is_volatile(); } bool is_volatile () { return flags().is_volatile(); }
bool is_transient () { return flags().is_transient(); } bool is_transient () { return flags().is_transient(); }
bool is_call_site_target() { return ((holder() == CURRENT_ENV->CallSite_klass()) && (name() == ciSymbol::target_name())); }
// Debugging output // Debugging output
void print(); void print();
void print_name_on(outputStream* st); void print_name_on(outputStream* st);

View file

@ -1978,7 +1978,7 @@ void SystemDictionary::initialize_preloaded_classes(TRAPS) {
// JSR 292 classes // JSR 292 classes
WKID jsr292_group_start = WK_KLASS_ENUM_NAME(MethodHandle_klass); WKID jsr292_group_start = WK_KLASS_ENUM_NAME(MethodHandle_klass);
WKID jsr292_group_end = WK_KLASS_ENUM_NAME(CallSite_klass); WKID jsr292_group_end = WK_KLASS_ENUM_NAME(VolatileCallSite_klass);
initialize_wk_klasses_until(jsr292_group_start, scan, CHECK); initialize_wk_klasses_until(jsr292_group_start, scan, CHECK);
if (EnableInvokeDynamic) { if (EnableInvokeDynamic) {
initialize_wk_klasses_through(jsr292_group_end, scan, CHECK); initialize_wk_klasses_through(jsr292_group_end, scan, CHECK);

View file

@ -155,7 +155,10 @@ class SymbolPropertyTable;
template(BootstrapMethodError_klass, java_lang_BootstrapMethodError, Pre_JSR292) \ template(BootstrapMethodError_klass, java_lang_BootstrapMethodError, Pre_JSR292) \
template(WrongMethodTypeException_klass, java_lang_invoke_WrongMethodTypeException, Pre_JSR292) \ template(WrongMethodTypeException_klass, java_lang_invoke_WrongMethodTypeException, Pre_JSR292) \
template(CallSite_klass, java_lang_invoke_CallSite, Pre_JSR292) \ template(CallSite_klass, java_lang_invoke_CallSite, Pre_JSR292) \
/* Note: MethodHandle must be first, and CallSite last in group */ \ template(ConstantCallSite_klass, java_lang_invoke_ConstantCallSite, Pre_JSR292) \
template(MutableCallSite_klass, java_lang_invoke_MutableCallSite, Pre_JSR292) \
template(VolatileCallSite_klass, java_lang_invoke_VolatileCallSite, Pre_JSR292) \
/* Note: MethodHandle must be first, and VolatileCallSite last in group */ \
\ \
template(StringBuffer_klass, java_lang_StringBuffer, Pre) \ template(StringBuffer_klass, java_lang_StringBuffer, Pre) \
template(StringBuilder_klass, java_lang_StringBuilder, Pre) \ template(StringBuilder_klass, java_lang_StringBuilder, Pre) \

View file

@ -233,6 +233,9 @@
template(java_lang_invoke_InvokeDynamic, "java/lang/invoke/InvokeDynamic") \ template(java_lang_invoke_InvokeDynamic, "java/lang/invoke/InvokeDynamic") \
template(java_lang_invoke_Linkage, "java/lang/invoke/Linkage") \ template(java_lang_invoke_Linkage, "java/lang/invoke/Linkage") \
template(java_lang_invoke_CallSite, "java/lang/invoke/CallSite") \ template(java_lang_invoke_CallSite, "java/lang/invoke/CallSite") \
template(java_lang_invoke_ConstantCallSite, "java/lang/invoke/ConstantCallSite") \
template(java_lang_invoke_MutableCallSite, "java/lang/invoke/MutableCallSite") \
template(java_lang_invoke_VolatileCallSite, "java/lang/invoke/VolatileCallSite") \
template(java_lang_invoke_MethodHandle, "java/lang/invoke/MethodHandle") \ template(java_lang_invoke_MethodHandle, "java/lang/invoke/MethodHandle") \
template(java_lang_invoke_MethodType, "java/lang/invoke/MethodType") \ template(java_lang_invoke_MethodType, "java/lang/invoke/MethodType") \
template(java_lang_invoke_WrongMethodTypeException, "java/lang/invoke/WrongMethodTypeException") \ template(java_lang_invoke_WrongMethodTypeException, "java/lang/invoke/WrongMethodTypeException") \

View file

@ -113,6 +113,11 @@ void Dependencies::assert_has_no_finalizable_subclasses(ciKlass* ctxk) {
assert_common_1(no_finalizable_subclasses, ctxk); assert_common_1(no_finalizable_subclasses, ctxk);
} }
void Dependencies::assert_call_site_target_value(ciKlass* ctxk, ciCallSite* call_site, ciMethodHandle* method_handle) {
check_ctxk(ctxk);
assert_common_3(call_site_target_value, ctxk, call_site, method_handle);
}
// Helper function. If we are adding a new dep. under ctxk2, // Helper function. If we are adding a new dep. under ctxk2,
// try to find an old dep. under a broader* ctxk1. If there is // try to find an old dep. under a broader* ctxk1. If there is
// //
@ -341,7 +346,8 @@ const char* Dependencies::_dep_name[TYPE_LIMIT] = {
"unique_concrete_method", "unique_concrete_method",
"abstract_with_exclusive_concrete_subtypes_2", "abstract_with_exclusive_concrete_subtypes_2",
"exclusive_concrete_methods_2", "exclusive_concrete_methods_2",
"no_finalizable_subclasses" "no_finalizable_subclasses",
"call_site_target_value"
}; };
int Dependencies::_dep_args[TYPE_LIMIT] = { int Dependencies::_dep_args[TYPE_LIMIT] = {
@ -354,7 +360,8 @@ int Dependencies::_dep_args[TYPE_LIMIT] = {
2, // unique_concrete_method ctxk, m 2, // unique_concrete_method ctxk, m
3, // unique_concrete_subtypes_2 ctxk, k1, k2 3, // unique_concrete_subtypes_2 ctxk, k1, k2
3, // unique_concrete_methods_2 ctxk, m1, m2 3, // unique_concrete_methods_2 ctxk, m1, m2
1 // no_finalizable_subclasses ctxk 1, // no_finalizable_subclasses ctxk
3 // call_site_target_value ctxk, call_site, method_handle
}; };
const char* Dependencies::dep_name(Dependencies::DepType dept) { const char* Dependencies::dep_name(Dependencies::DepType dept) {
@ -367,6 +374,13 @@ int Dependencies::dep_args(Dependencies::DepType dept) {
return _dep_args[dept]; return _dep_args[dept];
} }
void Dependencies::check_valid_dependency_type(DepType dept) {
for (int deptv = (int) FIRST_TYPE; deptv < (int) TYPE_LIMIT; deptv++) {
if (dept == ((DepType) deptv)) return;
}
ShouldNotReachHere();
}
// for the sake of the compiler log, print out current dependencies: // for the sake of the compiler log, print out current dependencies:
void Dependencies::log_all_dependencies() { void Dependencies::log_all_dependencies() {
if (log() == NULL) return; if (log() == NULL) return;
@ -800,11 +814,11 @@ class ClassHierarchyWalker {
bool participants_hide_witnesses, bool participants_hide_witnesses,
bool top_level_call = true); bool top_level_call = true);
// the spot-checking version: // the spot-checking version:
klassOop find_witness_in(DepChange& changes, klassOop find_witness_in(KlassDepChange& changes,
klassOop context_type, klassOop context_type,
bool participants_hide_witnesses); bool participants_hide_witnesses);
public: public:
klassOop find_witness_subtype(klassOop context_type, DepChange* changes = NULL) { klassOop find_witness_subtype(klassOop context_type, KlassDepChange* changes = NULL) {
assert(doing_subtype_search(), "must set up a subtype search"); assert(doing_subtype_search(), "must set up a subtype search");
// When looking for unexpected concrete types, // When looking for unexpected concrete types,
// do not look beneath expected ones. // do not look beneath expected ones.
@ -817,7 +831,7 @@ class ClassHierarchyWalker {
return find_witness_anywhere(context_type, participants_hide_witnesses); return find_witness_anywhere(context_type, participants_hide_witnesses);
} }
} }
klassOop find_witness_definer(klassOop context_type, DepChange* changes = NULL) { klassOop find_witness_definer(klassOop context_type, KlassDepChange* changes = NULL) {
assert(!doing_subtype_search(), "must set up a method definer search"); assert(!doing_subtype_search(), "must set up a method definer search");
// When looking for unexpected concrete methods, // When looking for unexpected concrete methods,
// look beneath expected ones, to see if there are overrides. // look beneath expected ones, to see if there are overrides.
@ -878,7 +892,7 @@ static bool count_find_witness_calls() {
#endif //PRODUCT #endif //PRODUCT
klassOop ClassHierarchyWalker::find_witness_in(DepChange& changes, klassOop ClassHierarchyWalker::find_witness_in(KlassDepChange& changes,
klassOop context_type, klassOop context_type,
bool participants_hide_witnesses) { bool participants_hide_witnesses) {
assert(changes.involves_context(context_type), "irrelevant dependency"); assert(changes.involves_context(context_type), "irrelevant dependency");
@ -1137,7 +1151,7 @@ klassOop Dependencies::check_leaf_type(klassOop ctxk) {
// when dealing with the types of actual instances. // when dealing with the types of actual instances.
klassOop Dependencies::check_abstract_with_unique_concrete_subtype(klassOop ctxk, klassOop Dependencies::check_abstract_with_unique_concrete_subtype(klassOop ctxk,
klassOop conck, klassOop conck,
DepChange* changes) { KlassDepChange* changes) {
ClassHierarchyWalker wf(conck); ClassHierarchyWalker wf(conck);
return wf.find_witness_subtype(ctxk, changes); return wf.find_witness_subtype(ctxk, changes);
} }
@ -1146,7 +1160,7 @@ klassOop Dependencies::check_abstract_with_unique_concrete_subtype(klassOop ctxk
// instantiatable. This can allow the compiler to make some paths go // instantiatable. This can allow the compiler to make some paths go
// dead, if they are gated by a test of the type. // dead, if they are gated by a test of the type.
klassOop Dependencies::check_abstract_with_no_concrete_subtype(klassOop ctxk, klassOop Dependencies::check_abstract_with_no_concrete_subtype(klassOop ctxk,
DepChange* changes) { KlassDepChange* changes) {
// Find any concrete subtype, with no participants: // Find any concrete subtype, with no participants:
ClassHierarchyWalker wf; ClassHierarchyWalker wf;
return wf.find_witness_subtype(ctxk, changes); return wf.find_witness_subtype(ctxk, changes);
@ -1156,7 +1170,7 @@ klassOop Dependencies::check_abstract_with_no_concrete_subtype(klassOop ctxk,
// If a concrete class has no concrete subtypes, it can always be // If a concrete class has no concrete subtypes, it can always be
// exactly typed. This allows the use of a cheaper type test. // exactly typed. This allows the use of a cheaper type test.
klassOop Dependencies::check_concrete_with_no_concrete_subtype(klassOop ctxk, klassOop Dependencies::check_concrete_with_no_concrete_subtype(klassOop ctxk,
DepChange* changes) { KlassDepChange* changes) {
// Find any concrete subtype, with only the ctxk as participant: // Find any concrete subtype, with only the ctxk as participant:
ClassHierarchyWalker wf(ctxk); ClassHierarchyWalker wf(ctxk);
return wf.find_witness_subtype(ctxk, changes); return wf.find_witness_subtype(ctxk, changes);
@ -1217,7 +1231,7 @@ klassOop Dependencies::check_abstract_with_exclusive_concrete_subtypes(
klassOop ctxk, klassOop ctxk,
klassOop k1, klassOop k1,
klassOop k2, klassOop k2,
DepChange* changes) { KlassDepChange* changes) {
ClassHierarchyWalker wf; ClassHierarchyWalker wf;
wf.add_participant(k1); wf.add_participant(k1);
wf.add_participant(k2); wf.add_participant(k2);
@ -1278,7 +1292,7 @@ int Dependencies::find_exclusive_concrete_subtypes(klassOop ctxk,
// If a class (or interface) has a unique concrete method uniqm, return NULL. // If a class (or interface) has a unique concrete method uniqm, return NULL.
// Otherwise, return a class that contains an interfering method. // Otherwise, return a class that contains an interfering method.
klassOop Dependencies::check_unique_concrete_method(klassOop ctxk, methodOop uniqm, klassOop Dependencies::check_unique_concrete_method(klassOop ctxk, methodOop uniqm,
DepChange* changes) { KlassDepChange* changes) {
// Here is a missing optimization: If uniqm->is_final(), // Here is a missing optimization: If uniqm->is_final(),
// we don't really need to search beneath it for overrides. // we don't really need to search beneath it for overrides.
// This is probably not important, since we don't use dependencies // This is probably not important, since we don't use dependencies
@ -1321,7 +1335,7 @@ methodOop Dependencies::find_unique_concrete_method(klassOop ctxk, methodOop m)
klassOop Dependencies::check_exclusive_concrete_methods(klassOop ctxk, klassOop Dependencies::check_exclusive_concrete_methods(klassOop ctxk,
methodOop m1, methodOop m1,
methodOop m2, methodOop m2,
DepChange* changes) { KlassDepChange* changes) {
ClassHierarchyWalker wf(m1); ClassHierarchyWalker wf(m1);
wf.add_participant(m1->method_holder()); wf.add_participant(m1->method_holder());
wf.add_participant(m2->method_holder()); wf.add_participant(m2->method_holder());
@ -1383,7 +1397,7 @@ int Dependencies::find_exclusive_concrete_methods(klassOop ctxk,
} }
klassOop Dependencies::check_has_no_finalizable_subclasses(klassOop ctxk, DepChange* changes) { klassOop Dependencies::check_has_no_finalizable_subclasses(klassOop ctxk, KlassDepChange* changes) {
Klass* search_at = ctxk->klass_part(); Klass* search_at = ctxk->klass_part();
if (changes != NULL) if (changes != NULL)
search_at = changes->new_type()->klass_part(); // just look at the new bit search_at = changes->new_type()->klass_part(); // just look at the new bit
@ -1395,8 +1409,39 @@ klassOop Dependencies::check_has_no_finalizable_subclasses(klassOop ctxk, DepCha
} }
klassOop Dependencies::DepStream::check_dependency_impl(DepChange* changes) { klassOop Dependencies::check_call_site_target_value(klassOop ctxk, oop call_site, oop method_handle, CallSiteDepChange* changes) {
assert(call_site ->is_a(SystemDictionary::CallSite_klass()), "sanity");
assert(method_handle->is_a(SystemDictionary::MethodHandle_klass()), "sanity");
if (changes == NULL) {
// Validate all CallSites
if (java_lang_invoke_CallSite::target(call_site) != method_handle)
return ctxk; // assertion failed
} else {
// Validate the given CallSite
if (call_site == changes->call_site() && java_lang_invoke_CallSite::target(call_site) != changes->method_handle()) {
assert(method_handle != changes->method_handle(), "must be");
return ctxk; // assertion failed
}
}
assert(java_lang_invoke_CallSite::target(call_site) == method_handle, "should still be valid");
return NULL; // assertion still valid
}
void Dependencies::DepStream::trace_and_log_witness(klassOop witness) {
if (witness != NULL) {
if (TraceDependencies) {
print_dependency(witness, /*verbose=*/ true);
}
// The following is a no-op unless logging is enabled:
log_dependency(witness);
}
}
klassOop Dependencies::DepStream::check_klass_dependency(KlassDepChange* changes) {
assert_locked_or_safepoint(Compile_lock); assert_locked_or_safepoint(Compile_lock);
Dependencies::check_valid_dependency_type(type());
klassOop witness = NULL; klassOop witness = NULL;
switch (type()) { switch (type()) {
@ -1407,95 +1452,103 @@ klassOop Dependencies::DepStream::check_dependency_impl(DepChange* changes) {
witness = check_leaf_type(context_type()); witness = check_leaf_type(context_type());
break; break;
case abstract_with_unique_concrete_subtype: case abstract_with_unique_concrete_subtype:
witness = check_abstract_with_unique_concrete_subtype(context_type(), witness = check_abstract_with_unique_concrete_subtype(context_type(), type_argument(1), changes);
type_argument(1),
changes);
break; break;
case abstract_with_no_concrete_subtype: case abstract_with_no_concrete_subtype:
witness = check_abstract_with_no_concrete_subtype(context_type(), witness = check_abstract_with_no_concrete_subtype(context_type(), changes);
changes);
break; break;
case concrete_with_no_concrete_subtype: case concrete_with_no_concrete_subtype:
witness = check_concrete_with_no_concrete_subtype(context_type(), witness = check_concrete_with_no_concrete_subtype(context_type(), changes);
changes);
break; break;
case unique_concrete_method: case unique_concrete_method:
witness = check_unique_concrete_method(context_type(), witness = check_unique_concrete_method(context_type(), method_argument(1), changes);
method_argument(1),
changes);
break; break;
case abstract_with_exclusive_concrete_subtypes_2: case abstract_with_exclusive_concrete_subtypes_2:
witness = check_abstract_with_exclusive_concrete_subtypes(context_type(), witness = check_abstract_with_exclusive_concrete_subtypes(context_type(), type_argument(1), type_argument(2), changes);
type_argument(1),
type_argument(2),
changes);
break; break;
case exclusive_concrete_methods_2: case exclusive_concrete_methods_2:
witness = check_exclusive_concrete_methods(context_type(), witness = check_exclusive_concrete_methods(context_type(), method_argument(1), method_argument(2), changes);
method_argument(1),
method_argument(2),
changes);
break; break;
case no_finalizable_subclasses: case no_finalizable_subclasses:
witness = check_has_no_finalizable_subclasses(context_type(), witness = check_has_no_finalizable_subclasses(context_type(), changes);
changes);
break; break;
default: default:
witness = NULL; witness = NULL;
ShouldNotReachHere();
break; break;
} }
if (witness != NULL) { trace_and_log_witness(witness);
if (TraceDependencies) { return witness;
print_dependency(witness, /*verbose=*/ true); }
}
// The following is a no-op unless logging is enabled:
log_dependency(witness); klassOop Dependencies::DepStream::check_call_site_dependency(CallSiteDepChange* changes) {
assert_locked_or_safepoint(Compile_lock);
Dependencies::check_valid_dependency_type(type());
klassOop witness = NULL;
switch (type()) {
case call_site_target_value:
witness = check_call_site_target_value(context_type(), argument(1), argument(2), changes);
break;
default:
witness = NULL;
break;
} }
trace_and_log_witness(witness);
return witness; return witness;
} }
klassOop Dependencies::DepStream::spot_check_dependency_at(DepChange& changes) { klassOop Dependencies::DepStream::spot_check_dependency_at(DepChange& changes) {
if (!changes.involves_context(context_type())) // Handle klass dependency
if (changes.is_klass_change() && changes.as_klass_change()->involves_context(context_type()))
return check_klass_dependency(changes.as_klass_change());
// Handle CallSite dependency
if (changes.is_call_site_change())
return check_call_site_dependency(changes.as_call_site_change());
// irrelevant dependency; skip it // irrelevant dependency; skip it
return NULL; return NULL;
return check_dependency_impl(&changes);
} }
void DepChange::initialize() { void DepChange::print() {
// entire transaction must be under this lock: int nsup = 0, nint = 0;
assert_lock_strong(Compile_lock);
// Mark all dependee and all its superclasses
// Mark transitive interfaces
for (ContextStream str(*this); str.next(); ) { for (ContextStream str(*this); str.next(); ) {
klassOop d = str.klass(); klassOop k = str.klass();
assert(!instanceKlass::cast(d)->is_marked_dependent(), "checking"); switch (str.change_type()) {
instanceKlass::cast(d)->set_is_marked_dependent(true); case Change_new_type:
tty->print_cr(" dependee = %s", instanceKlass::cast(k)->external_name());
break;
case Change_new_sub:
if (!WizardMode) {
++nsup;
} else {
tty->print_cr(" context super = %s", instanceKlass::cast(k)->external_name());
}
break;
case Change_new_impl:
if (!WizardMode) {
++nint;
} else {
tty->print_cr(" context interface = %s", instanceKlass::cast(k)->external_name());
}
break;
}
}
if (nsup + nint != 0) {
tty->print_cr(" context supers = %d, interfaces = %d", nsup, nint);
} }
} }
DepChange::~DepChange() { void DepChange::ContextStream::start() {
// Unmark all dependee and all its superclasses klassOop new_type = _changes.is_klass_change() ? _changes.as_klass_change()->new_type() : (klassOop) NULL;
// Unmark transitive interfaces _change_type = (new_type == NULL ? NO_CHANGE : Start_Klass);
for (ContextStream str(*this); str.next(); ) { _klass = new_type;
klassOop d = str.klass(); _ti_base = NULL;
instanceKlass::cast(d)->set_is_marked_dependent(false); _ti_index = 0;
} _ti_limit = 0;
}
bool DepChange::involves_context(klassOop k) {
if (k == NULL || !Klass::cast(k)->oop_is_instance()) {
return false;
}
instanceKlass* ik = instanceKlass::cast(k);
bool is_contained = ik->is_marked_dependent();
assert(is_contained == Klass::cast(new_type())->is_subtype_of(k),
"correct marking of potential context types");
return is_contained;
} }
bool DepChange::ContextStream::next() { bool DepChange::ContextStream::next() {
@ -1534,33 +1587,37 @@ bool DepChange::ContextStream::next() {
return false; return false;
} }
void DepChange::print() { void KlassDepChange::initialize() {
int nsup = 0, nint = 0; // entire transaction must be under this lock:
assert_lock_strong(Compile_lock);
// Mark all dependee and all its superclasses
// Mark transitive interfaces
for (ContextStream str(*this); str.next(); ) { for (ContextStream str(*this); str.next(); ) {
klassOop k = str.klass(); klassOop d = str.klass();
switch (str.change_type()) { assert(!instanceKlass::cast(d)->is_marked_dependent(), "checking");
case Change_new_type: instanceKlass::cast(d)->set_is_marked_dependent(true);
tty->print_cr(" dependee = %s", instanceKlass::cast(k)->external_name());
break;
case Change_new_sub:
if (!WizardMode) {
++nsup;
} else {
tty->print_cr(" context super = %s", instanceKlass::cast(k)->external_name());
} }
break; }
case Change_new_impl:
if (!WizardMode) { KlassDepChange::~KlassDepChange() {
++nint; // Unmark all dependee and all its superclasses
} else { // Unmark transitive interfaces
tty->print_cr(" context interface = %s", instanceKlass::cast(k)->external_name()); for (ContextStream str(*this); str.next(); ) {
klassOop d = str.klass();
instanceKlass::cast(d)->set_is_marked_dependent(false);
} }
break; }
}
} bool KlassDepChange::involves_context(klassOop k) {
if (nsup + nint != 0) { if (k == NULL || !Klass::cast(k)->oop_is_instance()) {
tty->print_cr(" context supers = %d, interfaces = %d", nsup, nint); return false;
} }
instanceKlass* ik = instanceKlass::cast(k);
bool is_contained = ik->is_marked_dependent();
assert(is_contained == Klass::cast(new_type())->is_subtype_of(k),
"correct marking of potential context types");
return is_contained;
} }
#ifndef PRODUCT #ifndef PRODUCT

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,18 +25,21 @@
#ifndef SHARE_VM_CODE_DEPENDENCIES_HPP #ifndef SHARE_VM_CODE_DEPENDENCIES_HPP
#define SHARE_VM_CODE_DEPENDENCIES_HPP #define SHARE_VM_CODE_DEPENDENCIES_HPP
#include "ci/ciCallSite.hpp"
#include "ci/ciKlass.hpp" #include "ci/ciKlass.hpp"
#include "ci/ciMethodHandle.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/compressedStream.hpp" #include "code/compressedStream.hpp"
#include "code/nmethod.hpp" #include "code/nmethod.hpp"
#include "utilities/growableArray.hpp" #include "utilities/growableArray.hpp"
//** Dependencies represent assertions (approximate invariants) within //** Dependencies represent assertions (approximate invariants) within
// the class hierarchy. An example is an assertion that a given // the runtime system, e.g. class hierarchy changes. An example is an
// method is not overridden; another example is that a type has only // assertion that a given method is not overridden; another example is
// one concrete subtype. Compiled code which relies on such // that a type has only one concrete subtype. Compiled code which
// assertions must be discarded if they are overturned by changes in // relies on such assertions must be discarded if they are overturned
// the class hierarchy. We can think of these assertions as // by changes in the runtime system. We can think of these assertions
// approximate invariants, because we expect them to be overturned // as approximate invariants, because we expect them to be overturned
// very infrequently. We are willing to perform expensive recovery // very infrequently. We are willing to perform expensive recovery
// operations when they are overturned. The benefit, of course, is // operations when they are overturned. The benefit, of course, is
// performing optimistic optimizations (!) on the object code. // performing optimistic optimizations (!) on the object code.
@ -52,6 +55,8 @@ class OopRecorder;
class xmlStream; class xmlStream;
class CompileLog; class CompileLog;
class DepChange; class DepChange;
class KlassDepChange;
class CallSiteDepChange;
class No_Safepoint_Verifier; class No_Safepoint_Verifier;
class Dependencies: public ResourceObj { class Dependencies: public ResourceObj {
@ -152,6 +157,9 @@ class Dependencies: public ResourceObj {
// subclasses require finalization registration. // subclasses require finalization registration.
no_finalizable_subclasses, no_finalizable_subclasses,
// This dependency asserts when the CallSite.target value changed.
call_site_target_value,
TYPE_LIMIT TYPE_LIMIT
}; };
enum { enum {
@ -179,6 +187,7 @@ class Dependencies: public ResourceObj {
static int dep_context_arg(DepType dept) { static int dep_context_arg(DepType dept) {
return dept_in_mask(dept, ctxk_types)? 0: -1; return dept_in_mask(dept, ctxk_types)? 0: -1;
} }
static void check_valid_dependency_type(DepType dept);
private: private:
// State for writing a new set of dependencies: // State for writing a new set of dependencies:
@ -255,6 +264,7 @@ class Dependencies: public ResourceObj {
void assert_abstract_with_exclusive_concrete_subtypes(ciKlass* ctxk, ciKlass* k1, ciKlass* k2); void assert_abstract_with_exclusive_concrete_subtypes(ciKlass* ctxk, ciKlass* k1, ciKlass* k2);
void assert_exclusive_concrete_methods(ciKlass* ctxk, ciMethod* m1, ciMethod* m2); void assert_exclusive_concrete_methods(ciKlass* ctxk, ciMethod* m1, ciMethod* m2);
void assert_has_no_finalizable_subclasses(ciKlass* ctxk); void assert_has_no_finalizable_subclasses(ciKlass* ctxk);
void assert_call_site_target_value(ciKlass* ctxk, ciCallSite* call_site, ciMethodHandle* method_handle);
// Define whether a given method or type is concrete. // Define whether a given method or type is concrete.
// These methods define the term "concrete" as used in this module. // These methods define the term "concrete" as used in this module.
@ -296,19 +306,19 @@ class Dependencies: public ResourceObj {
static klassOop check_evol_method(methodOop m); static klassOop check_evol_method(methodOop m);
static klassOop check_leaf_type(klassOop ctxk); static klassOop check_leaf_type(klassOop ctxk);
static klassOop check_abstract_with_unique_concrete_subtype(klassOop ctxk, klassOop conck, static klassOop check_abstract_with_unique_concrete_subtype(klassOop ctxk, klassOop conck,
DepChange* changes = NULL); KlassDepChange* changes = NULL);
static klassOop check_abstract_with_no_concrete_subtype(klassOop ctxk, static klassOop check_abstract_with_no_concrete_subtype(klassOop ctxk,
DepChange* changes = NULL); KlassDepChange* changes = NULL);
static klassOop check_concrete_with_no_concrete_subtype(klassOop ctxk, static klassOop check_concrete_with_no_concrete_subtype(klassOop ctxk,
DepChange* changes = NULL); KlassDepChange* changes = NULL);
static klassOop check_unique_concrete_method(klassOop ctxk, methodOop uniqm, static klassOop check_unique_concrete_method(klassOop ctxk, methodOop uniqm,
DepChange* changes = NULL); KlassDepChange* changes = NULL);
static klassOop check_abstract_with_exclusive_concrete_subtypes(klassOop ctxk, klassOop k1, klassOop k2, static klassOop check_abstract_with_exclusive_concrete_subtypes(klassOop ctxk, klassOop k1, klassOop k2,
DepChange* changes = NULL); KlassDepChange* changes = NULL);
static klassOop check_exclusive_concrete_methods(klassOop ctxk, methodOop m1, methodOop m2, static klassOop check_exclusive_concrete_methods(klassOop ctxk, methodOop m1, methodOop m2,
DepChange* changes = NULL); KlassDepChange* changes = NULL);
static klassOop check_has_no_finalizable_subclasses(klassOop ctxk, static klassOop check_has_no_finalizable_subclasses(klassOop ctxk, KlassDepChange* changes = NULL);
DepChange* changes = NULL); static klassOop check_call_site_target_value(klassOop ctxk, oop call_site, oop method_handle, CallSiteDepChange* changes = NULL);
// A returned klassOop is NULL if the dependency assertion is still // A returned klassOop is NULL if the dependency assertion is still
// valid. A non-NULL klassOop is a 'witness' to the assertion // valid. A non-NULL klassOop is a 'witness' to the assertion
// failure, a point in the class hierarchy where the assertion has // failure, a point in the class hierarchy where the assertion has
@ -415,7 +425,10 @@ class Dependencies: public ResourceObj {
inline oop recorded_oop_at(int i); inline oop recorded_oop_at(int i);
// => _code? _code->oop_at(i): *_deps->_oop_recorder->handle_at(i) // => _code? _code->oop_at(i): *_deps->_oop_recorder->handle_at(i)
klassOop check_dependency_impl(DepChange* changes); klassOop check_klass_dependency(KlassDepChange* changes);
klassOop check_call_site_dependency(CallSiteDepChange* changes);
void trace_and_log_witness(klassOop witness);
public: public:
DepStream(Dependencies* deps) DepStream(Dependencies* deps)
@ -453,10 +466,13 @@ class Dependencies: public ResourceObj {
return (klassOop) x; return (klassOop) x;
} }
// The point of the whole exercise: Is this dep is still OK? // The point of the whole exercise: Is this dep still OK?
klassOop check_dependency() { klassOop check_dependency() {
return check_dependency_impl(NULL); klassOop result = check_klass_dependency(NULL);
if (result != NULL) return result;
return check_call_site_dependency(NULL);
} }
// A lighter version: Checks only around recent changes in a class // A lighter version: Checks only around recent changes in a class
// hierarchy. (See Universe::flush_dependents_on.) // hierarchy. (See Universe::flush_dependents_on.)
klassOop spot_check_dependency_at(DepChange& changes); klassOop spot_check_dependency_at(DepChange& changes);
@ -472,12 +488,26 @@ class Dependencies: public ResourceObj {
static void print_statistics() PRODUCT_RETURN; static void print_statistics() PRODUCT_RETURN;
}; };
// A class hierarchy change coming through the VM (under the Compile_lock).
// The change is structured as a single new type with any number of supers // Every particular DepChange is a sub-class of this class.
// and implemented interface types. Other than the new type, any of the
// super types can be context types for a relevant dependency, which the
// new type could invalidate.
class DepChange : public StackObj { class DepChange : public StackObj {
public:
// What kind of DepChange is this?
virtual bool is_klass_change() const { return false; }
virtual bool is_call_site_change() const { return false; }
// Subclass casting with assertions.
KlassDepChange* as_klass_change() {
assert(is_klass_change(), "bad cast");
return (KlassDepChange*) this;
}
CallSiteDepChange* as_call_site_change() {
assert(is_call_site_change(), "bad cast");
return (CallSiteDepChange*) this;
}
void print();
public: public:
enum ChangeType { enum ChangeType {
NO_CHANGE = 0, // an uninvolved klass NO_CHANGE = 0, // an uninvolved klass
@ -488,28 +518,6 @@ class DepChange : public StackObj {
Start_Klass = CHANGE_LIMIT // internal indicator for ContextStream Start_Klass = CHANGE_LIMIT // internal indicator for ContextStream
}; };
private:
// each change set is rooted in exactly one new type (at present):
KlassHandle _new_type;
void initialize();
public:
// notes the new type, marks it and all its super-types
DepChange(KlassHandle new_type)
: _new_type(new_type)
{
initialize();
}
// cleans up the marks
~DepChange();
klassOop new_type() { return _new_type(); }
// involves_context(k) is true if k is new_type or any of the super types
bool involves_context(klassOop k);
// Usage: // Usage:
// for (DepChange::ContextStream str(changes); str.next(); ) { // for (DepChange::ContextStream str(changes); str.next(); ) {
// klassOop k = str.klass(); // klassOop k = str.klass();
@ -530,14 +538,7 @@ class DepChange : public StackObj {
int _ti_limit; int _ti_limit;
// start at the beginning: // start at the beginning:
void start() { void start();
klassOop new_type = _changes.new_type();
_change_type = (new_type == NULL ? NO_CHANGE: Start_Klass);
_klass = new_type;
_ti_base = NULL;
_ti_index = 0;
_ti_limit = 0;
}
public: public:
ContextStream(DepChange& changes) ContextStream(DepChange& changes)
@ -555,8 +556,62 @@ class DepChange : public StackObj {
klassOop klass() { return _klass; } klassOop klass() { return _klass; }
}; };
friend class DepChange::ContextStream; friend class DepChange::ContextStream;
};
void print();
// A class hierarchy change coming through the VM (under the Compile_lock).
// The change is structured as a single new type with any number of supers
// and implemented interface types. Other than the new type, any of the
// super types can be context types for a relevant dependency, which the
// new type could invalidate.
class KlassDepChange : public DepChange {
private:
// each change set is rooted in exactly one new type (at present):
KlassHandle _new_type;
void initialize();
public:
// notes the new type, marks it and all its super-types
KlassDepChange(KlassHandle new_type)
: _new_type(new_type)
{
initialize();
}
// cleans up the marks
~KlassDepChange();
// What kind of DepChange is this?
virtual bool is_klass_change() const { return true; }
klassOop new_type() { return _new_type(); }
// involves_context(k) is true if k is new_type or any of the super types
bool involves_context(klassOop k);
};
// A CallSite has changed its target.
class CallSiteDepChange : public DepChange {
private:
Handle _call_site;
Handle _method_handle;
public:
CallSiteDepChange(Handle call_site, Handle method_handle)
: _call_site(call_site),
_method_handle(method_handle)
{
assert(_call_site() ->is_a(SystemDictionary::CallSite_klass()), "must be");
assert(_method_handle()->is_a(SystemDictionary::MethodHandle_klass()), "must be");
}
// What kind of DepChange is this?
virtual bool is_call_site_change() const { return true; }
oop call_site() const { return _call_site(); }
oop method_handle() const { return _method_handle(); }
}; };
#endif // SHARE_VM_CODE_DEPENDENCIES_HPP #endif // SHARE_VM_CODE_DEPENDENCIES_HPP

View file

@ -25,6 +25,7 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "code/codeCache.hpp" #include "code/codeCache.hpp"
#include "code/compiledIC.hpp" #include "code/compiledIC.hpp"
#include "code/dependencies.hpp"
#include "code/nmethod.hpp" #include "code/nmethod.hpp"
#include "code/scopeDesc.hpp" #include "code/scopeDesc.hpp"
#include "compiler/abstractCompiler.hpp" #include "compiler/abstractCompiler.hpp"

View file

@ -509,6 +509,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
// resolve field // resolve field
FieldAccessInfo info; FieldAccessInfo info;
constantPoolHandle pool(thread, method(thread)->constants()); constantPoolHandle pool(thread, method(thread)->constants());
bool is_put = (bytecode == Bytecodes::_putfield || bytecode == Bytecodes::_putstatic);
bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic); bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic);
{ {
@ -528,8 +529,6 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
// exceptions at the correct place. If we do not resolve completely // exceptions at the correct place. If we do not resolve completely
// in the current pass, leaving the put_code set to zero will // in the current pass, leaving the put_code set to zero will
// cause the next put instruction to reresolve. // cause the next put instruction to reresolve.
bool is_put = (bytecode == Bytecodes::_putfield ||
bytecode == Bytecodes::_putstatic);
Bytecodes::Code put_code = (Bytecodes::Code)0; Bytecodes::Code put_code = (Bytecodes::Code)0;
// We also need to delay resolving getstatic instructions until the // We also need to delay resolving getstatic instructions until the
@ -541,7 +540,6 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
!klass->is_initialized()); !klass->is_initialized());
Bytecodes::Code get_code = (Bytecodes::Code)0; Bytecodes::Code get_code = (Bytecodes::Code)0;
if (!uninitialized_static) { if (!uninitialized_static) {
get_code = ((is_static) ? Bytecodes::_getstatic : Bytecodes::_getfield); get_code = ((is_static) ? Bytecodes::_getstatic : Bytecodes::_getfield);
if (is_put || !info.access_flags().is_final()) { if (is_put || !info.access_flags().is_final()) {
@ -549,6 +547,23 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
} }
} }
if (is_put && !is_static && klass->is_subclass_of(SystemDictionary::CallSite_klass()) && (info.name() == vmSymbols::target_name())) {
const jint direction = frame::interpreter_frame_expression_stack_direction();
oop call_site = *((oop*) thread->last_frame().interpreter_frame_tos_at(-1 * direction));
oop method_handle = *((oop*) thread->last_frame().interpreter_frame_tos_at( 0 * direction));
assert(call_site ->is_a(SystemDictionary::CallSite_klass()), "must be");
assert(method_handle->is_a(SystemDictionary::MethodHandle_klass()), "must be");
{
// Walk all nmethods depending on CallSite
MutexLocker mu(Compile_lock, thread);
Universe::flush_dependents_on(call_site, method_handle);
}
// Don't allow fast path for setting CallSite.target and sub-classes.
put_code = (Bytecodes::Code) 0;
}
cache_entry(thread)->set_field( cache_entry(thread)->set_field(
get_code, get_code,
put_code, put_code,

View file

@ -120,8 +120,8 @@ class TemplateTable: AllStatic {
// helpers // helpers
static void unimplemented_bc(); static void unimplemented_bc();
static void patch_bytecode(Bytecodes::Code bc, Register scratch1, static void patch_bytecode(Bytecodes::Code bc, Register bc_reg,
Register scratch2, bool load_bc_in_scratch = true); Register temp_reg, bool load_bc_into_bc_reg = true, int byte_no = -1);
// C calls // C calls
static void call_VM(Register oop_result, address entry_point); static void call_VM(Register oop_result, address entry_point);

View file

@ -1177,7 +1177,7 @@ void Universe::flush_dependents_on(instanceKlassHandle dependee) {
// stopped dring the safepoint so CodeCache will be safe to update without // stopped dring the safepoint so CodeCache will be safe to update without
// holding the CodeCache_lock. // holding the CodeCache_lock.
DepChange changes(dependee); KlassDepChange changes(dependee);
// Compute the dependent nmethods // Compute the dependent nmethods
if (CodeCache::mark_for_deoptimization(changes) > 0) { if (CodeCache::mark_for_deoptimization(changes) > 0) {
@ -1187,6 +1187,37 @@ void Universe::flush_dependents_on(instanceKlassHandle dependee) {
} }
} }
// Flushes compiled methods dependent on a particular CallSite
// instance when its target is different than the given MethodHandle.
void Universe::flush_dependents_on(Handle call_site, Handle method_handle) {
assert_lock_strong(Compile_lock);
if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
// CodeCache can only be updated by a thread_in_VM and they will all be
// stopped dring the safepoint so CodeCache will be safe to update without
// holding the CodeCache_lock.
CallSiteDepChange changes(call_site(), method_handle());
// Compute the dependent nmethods that have a reference to a
// CallSite object. We use instanceKlass::mark_dependent_nmethod
// directly instead of CodeCache::mark_for_deoptimization because we
// want dependents on the class CallSite only not all classes in the
// ContextStream.
int marked = 0;
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
instanceKlass* call_site_klass = instanceKlass::cast(SystemDictionary::CallSite_klass());
marked = call_site_klass->mark_dependent_nmethods(changes);
}
if (marked > 0) {
// At least one nmethod has been marked for deoptimization
VM_Deoptimize op;
VMThread::execute(&op);
}
}
#ifdef HOTSWAP #ifdef HOTSWAP
// Flushes compiled methods dependent on dependee in the evolutionary sense // Flushes compiled methods dependent on dependee in the evolutionary sense
void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) { void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {

View file

@ -439,6 +439,7 @@ class Universe: AllStatic {
// Flushing and deoptimization // Flushing and deoptimization
static void flush_dependents_on(instanceKlassHandle dependee); static void flush_dependents_on(instanceKlassHandle dependee);
static void flush_dependents_on(Handle call_site, Handle method_handle);
#ifdef HOTSWAP #ifdef HOTSWAP
// Flushing and deoptimization in case of evolution // Flushing and deoptimization in case of evolution
static void flush_evol_dependents_on(instanceKlassHandle dependee); static void flush_evol_dependents_on(instanceKlassHandle dependee);

View file

@ -1406,7 +1406,7 @@ class nmethodBucket {
// //
// Walk the list of dependent nmethods searching for nmethods which // Walk the list of dependent nmethods searching for nmethods which
// are dependent on the klassOop that was passed in and mark them for // are dependent on the changes that were passed in and mark them for
// deoptimization. Returns the number of nmethods found. // deoptimization. Returns the number of nmethods found.
// //
int instanceKlass::mark_dependent_nmethods(DepChange& changes) { int instanceKlass::mark_dependent_nmethods(DepChange& changes) {

View file

@ -24,6 +24,7 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "ci/bcEscapeAnalyzer.hpp" #include "ci/bcEscapeAnalyzer.hpp"
#include "ci/ciCallSite.hpp"
#include "ci/ciCPCache.hpp" #include "ci/ciCPCache.hpp"
#include "ci/ciMethodHandle.hpp" #include "ci/ciMethodHandle.hpp"
#include "classfile/javaClasses.hpp" #include "classfile/javaClasses.hpp"
@ -738,6 +739,34 @@ CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMS
} }
CallGenerator* CallGenerator::for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms,
ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
assert(call_site->is_constant_call_site() || call_site->is_mutable_call_site(), "must be");
ciMethodHandle* method_handle = call_site->get_target();
// Set the callee to have access to the class and signature in the
// MethodHandleCompiler.
method_handle->set_callee(callee);
method_handle->set_caller(caller);
method_handle->set_call_profile(profile);
// Get an adapter for the MethodHandle.
ciMethod* target_method = method_handle->get_invokedynamic_adapter();
if (target_method != NULL) {
Compile *C = Compile::current();
CallGenerator* hit_cg = C->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS);
if (hit_cg != NULL && hit_cg->is_inline()) {
// Add a dependence for invalidation of the optimization.
if (call_site->is_mutable_call_site()) {
C->dependencies()->assert_call_site_target_value(C->env()->CallSite_klass(), call_site, method_handle);
}
return hit_cg;
}
}
return NULL;
}
JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) { JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) {
GraphKit kit(jvms); GraphKit kit(jvms);
PhaseGVN& gvn = kit.gvn(); PhaseGVN& gvn = kit.gvn();

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -112,6 +112,7 @@ class CallGenerator : public ResourceObj {
static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index); // virtual, interface static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index); // virtual, interface
static CallGenerator* for_method_handle_inline(Node* method_handle, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile); static CallGenerator* for_method_handle_inline(Node* method_handle, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile);
static CallGenerator* for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile);
// How to generate a replace a direct call with an inline version // How to generate a replace a direct call with an inline version
static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg); static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg);

View file

@ -114,7 +114,7 @@ CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index,
if (cg != NULL) return cg; if (cg != NULL) return cg;
} }
// Do MethodHandle calls. // Do method handle calls.
// NOTE: This must happen before normal inlining logic below since // NOTE: This must happen before normal inlining logic below since
// MethodHandle.invoke* are native methods which obviously don't // MethodHandle.invoke* are native methods which obviously don't
// have bytecodes and so normal inlining fails. // have bytecodes and so normal inlining fails.
@ -127,33 +127,25 @@ CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index,
if (cg != NULL) { if (cg != NULL) {
return cg; return cg;
} }
return CallGenerator::for_direct_call(call_method); return CallGenerator::for_direct_call(call_method);
} }
else { else {
// Get the MethodHandle from the CallSite. // Get the CallSite object.
ciMethod* caller_method = jvms->method(); ciMethod* caller_method = jvms->method();
ciBytecodeStream str(caller_method); ciBytecodeStream str(caller_method);
str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci. str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
ciCallSite* call_site = str.get_call_site(); ciCallSite* call_site = str.get_call_site();
ciMethodHandle* method_handle = call_site->get_target();
// Set the callee to have access to the class and signature in // Inline constant and mutable call sites. We don't inline
// the MethodHandleCompiler. // volatile call sites optimistically since they are specified
method_handle->set_callee(call_method); // to change their value often and that would result in a lot of
method_handle->set_caller(caller); // deoptimizations and recompiles.
method_handle->set_call_profile(profile); if (call_site->is_constant_call_site() || call_site->is_mutable_call_site()) {
CallGenerator* cg = CallGenerator::for_invokedynamic_inline(call_site, jvms, caller, call_method, profile);
// Get an adapter for the MethodHandle. if (cg != NULL) {
ciMethod* target_method = method_handle->get_invokedynamic_adapter(); return cg;
if (target_method != NULL) {
CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor);
if (hit_cg != NULL && hit_cg->is_inline()) {
CallGenerator* miss_cg = CallGenerator::for_dynamic_call(call_method);
return CallGenerator::for_predicted_dynamic_call(method_handle, miss_cg, hit_cg, prof_factor);
} }
} }
// If something failed, generate a normal dynamic call. // If something failed, generate a normal dynamic call.
return CallGenerator::for_dynamic_call(call_method); return CallGenerator::for_dynamic_call(call_method);
} }

View file

@ -100,6 +100,14 @@ void Parse::do_field_access(bool is_get, bool is_field) {
} }
} }
// Deoptimize on putfield writes to CallSite.target
if (!is_get && field->is_call_site_target()) {
uncommon_trap(Deoptimization::Reason_unhandled,
Deoptimization::Action_reinterpret,
NULL, "put to CallSite.target field");
return;
}
assert(field->will_link(method()->holder(), bc()), "getfield: typeflow responsibility"); assert(field->will_link(method()->holder(), bc()), "getfield: typeflow responsibility");
// Note: We do not check for an unloaded field type here any more. // Note: We do not check for an unloaded field type here any more.