6953144: Tiered compilation

Infrastructure for tiered compilation support (interpreter + c1 + c2) for 32 and 64 bit. Simple tiered policy implementation.

Reviewed-by: kvn, never, phh, twisti
This commit is contained in:
Igor Veresov 2010-09-03 17:51:07 -07:00
parent 6e78f6cb4b
commit 2c66a6c3fd
104 changed files with 7720 additions and 1701 deletions

View file

@ -61,11 +61,9 @@ include $(GAMMADIR)/make/defs.make
endif endif
include $(GAMMADIR)/make/$(OSNAME)/makefiles/rules.make include $(GAMMADIR)/make/$(OSNAME)/makefiles/rules.make
ifndef LP64
ifndef CC_INTERP ifndef CC_INTERP
FORCE_TIERED=1 FORCE_TIERED=1
endif endif
endif
ifdef LP64 ifdef LP64
ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","") ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")

View file

@ -1,5 +1,5 @@
# #
# Copyright (c) 1998, 2008, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -52,11 +52,9 @@ include $(GAMMADIR)/make/defs.make
endif endif
include $(GAMMADIR)/make/$(OSNAME)/makefiles/rules.make include $(GAMMADIR)/make/$(OSNAME)/makefiles/rules.make
ifndef LP64
ifndef CC_INTERP ifndef CC_INTERP
FORCE_TIERED=1 FORCE_TIERED=1
endif endif
endif
ifdef LP64 ifdef LP64
ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","") ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
# #
# Copyright (c) 1998, 2008, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -72,13 +72,11 @@ BUILDARCH=ia64
!endif !endif
!endif !endif
!if "$(BUILDARCH)" != "amd64"
!if "$(BUILDARCH)" != "ia64" !if "$(BUILDARCH)" != "ia64"
!ifndef CC_INTERP !ifndef CC_INTERP
FORCE_TIERED=1 FORCE_TIERED=1
!endif !endif
!endif !endif
!endif
!if "$(BUILDARCH)" == "amd64" !if "$(BUILDARCH)" == "amd64"
Platform_arch=x86 Platform_arch=x86

View file

@ -57,13 +57,12 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
#endif #endif
} }
#ifdef TIERED
void CounterOverflowStub::emit_code(LIR_Assembler* ce) { void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry); __ bind(_entry);
__ set(_bci, G4); __ set(_bci, G4);
__ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type); __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
__ delayed()->nop(); __ delayed()->mov_or_nop(_method->as_register(), G5);
ce->add_call_info_here(_info); ce->add_call_info_here(_info);
ce->verify_oop_map(_info); ce->verify_oop_map(_info);
@ -71,7 +70,6 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
__ delayed()->nop(); __ delayed()->nop();
} }
#endif // TIERED
void DivByZeroStub::emit_code(LIR_Assembler* ce) { void DivByZeroStub::emit_code(LIR_Assembler* ce) {
if (_offset != -1) { if (_offset != -1) {

View file

@ -73,6 +73,7 @@ FloatRegister FrameMap::_fpu_regs [FrameMap::nof_fpu_regs];
// some useful constant RInfo's: // some useful constant RInfo's:
LIR_Opr FrameMap::in_long_opr; LIR_Opr FrameMap::in_long_opr;
LIR_Opr FrameMap::out_long_opr; LIR_Opr FrameMap::out_long_opr;
LIR_Opr FrameMap::g1_long_single_opr;
LIR_Opr FrameMap::F0_opr; LIR_Opr FrameMap::F0_opr;
LIR_Opr FrameMap::F0_double_opr; LIR_Opr FrameMap::F0_double_opr;
@ -238,6 +239,7 @@ void FrameMap::initialize() {
in_long_opr = as_long_opr(I0); in_long_opr = as_long_opr(I0);
out_long_opr = as_long_opr(O0); out_long_opr = as_long_opr(O0);
g1_long_single_opr = as_long_single_opr(G1);
G0_opr = as_opr(G0); G0_opr = as_opr(G0);
G1_opr = as_opr(G1); G1_opr = as_opr(G1);

View file

@ -103,6 +103,7 @@
static LIR_Opr in_long_opr; static LIR_Opr in_long_opr;
static LIR_Opr out_long_opr; static LIR_Opr out_long_opr;
static LIR_Opr g1_long_single_opr;
static LIR_Opr F0_opr; static LIR_Opr F0_opr;
static LIR_Opr F0_double_opr; static LIR_Opr F0_double_opr;
@ -113,18 +114,25 @@
private: private:
static FloatRegister _fpu_regs [nof_fpu_regs]; static FloatRegister _fpu_regs [nof_fpu_regs];
static LIR_Opr as_long_single_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
}
static LIR_Opr as_long_pair_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r->successor()), cpu_reg2rnr(r));
}
public: public:
#ifdef _LP64 #ifdef _LP64
static LIR_Opr as_long_opr(Register r) { static LIR_Opr as_long_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r)); return as_long_single_opr(r);
} }
static LIR_Opr as_pointer_opr(Register r) { static LIR_Opr as_pointer_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r)); return as_long_single_opr(r);
} }
#else #else
static LIR_Opr as_long_opr(Register r) { static LIR_Opr as_long_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r->successor()), cpu_reg2rnr(r)); return as_long_pair_opr(r);
} }
static LIR_Opr as_pointer_opr(Register r) { static LIR_Opr as_pointer_opr(Register r) {
return as_opr(r); return as_opr(r);

View file

@ -1625,13 +1625,18 @@ void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
void LIR_Assembler::return_op(LIR_Opr result) { void LIR_Assembler::return_op(LIR_Opr result) {
// the poll may need a register so just pick one that isn't the return register // the poll may need a register so just pick one that isn't the return register
#ifdef TIERED #if defined(TIERED) && !defined(_LP64)
if (result->type_field() == LIR_OprDesc::long_type) { if (result->type_field() == LIR_OprDesc::long_type) {
// Must move the result to G1 // Must move the result to G1
// Must leave proper result in O0,O1 and G1 (TIERED only) // Must leave proper result in O0,O1 and G1 (TIERED only)
__ sllx(I0, 32, G1); // Shift bits into high G1 __ sllx(I0, 32, G1); // Shift bits into high G1
__ srl (I1, 0, I1); // Zero extend O1 (harmless?) __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
__ or3 (I1, G1, G1); // OR 64 bits into G1 __ or3 (I1, G1, G1); // OR 64 bits into G1
#ifdef ASSERT
// mangle it so any problems will show up
__ set(0xdeadbeef, I0);
__ set(0xdeadbeef, I1);
#endif
} }
#endif // TIERED #endif // TIERED
__ set((intptr_t)os::get_polling_page(), L0); __ set((intptr_t)os::get_polling_page(), L0);
@ -2424,6 +2429,195 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
} }
void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
ciMethodData *md, ciProfileData *data,
Register recv, Register tmp1, Label* update_done) {
uint i;
for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test;
// See if the receiver is receiver[n].
Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
mdo_offset_bias);
__ ld_ptr(receiver_addr, tmp1);
__ verify_oop(tmp1);
__ cmp(recv, tmp1);
__ brx(Assembler::notEqual, false, Assembler::pt, next_test);
__ delayed()->nop();
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
mdo_offset_bias);
__ ld_ptr(data_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1);
__ st_ptr(tmp1, data_addr);
__ ba(false, *update_done);
__ delayed()->nop();
__ bind(next_test);
}
// Didn't find receiver; find next empty slot and fill it in
for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test;
Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
mdo_offset_bias);
load(recv_addr, tmp1, T_OBJECT);
__ br_notnull(tmp1, false, Assembler::pt, next_test);
__ delayed()->nop();
__ st_ptr(recv, recv_addr);
__ set(DataLayout::counter_increment, tmp1);
__ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
mdo_offset_bias);
__ ba(false, *update_done);
__ delayed()->nop();
__ bind(next_test);
}
}
void LIR_Assembler::emit_checkcast(LIR_OpTypeCheck *op) {
assert(op->code() == lir_checkcast, "Invalid operation");
// we always need a stub for the failure case.
CodeStub* stub = op->stub();
Register obj = op->object()->as_register();
Register k_RInfo = op->tmp1()->as_register();
Register klass_RInfo = op->tmp2()->as_register();
Register dst = op->result_opr()->as_register();
Register Rtmp1 = op->tmp3()->as_register();
ciKlass* k = op->klass();
if (obj == k_RInfo) {
k_RInfo = klass_RInfo;
klass_RInfo = obj;
}
ciMethodData* md;
ciProfileData* data;
int mdo_offset_bias = 0;
if (op->should_profile()) {
ciMethod* method = op->profiled_method();
assert(method != NULL, "Should have method");
int bci = op->profiled_bci();
md = method->method_data();
if (md == NULL) {
bailout("out of memory building methodDataOop");
return;
}
data = md->bci_to_data(bci);
assert(data != NULL, "need data for checkcast");
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for checkcast");
if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
// The offset is large so bias the mdo by the base of the slot so
// that the ld can use simm13s to reference the slots of the data
mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
}
// We need two temporaries to perform this operation on SPARC,
// so to keep things simple we perform a redundant test here
Label profile_done;
__ br_notnull(obj, false, Assembler::pn, profile_done);
__ delayed()->nop();
Register mdo = k_RInfo;
Register data_val = Rtmp1;
jobject2reg(md->constant_encoding(), mdo);
if (mdo_offset_bias > 0) {
__ set(mdo_offset_bias, data_val);
__ add(mdo, data_val, mdo);
}
Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
__ ldub(flags_addr, data_val);
__ or3(data_val, BitData::null_seen_byte_constant(), data_val);
__ stb(data_val, flags_addr);
__ bind(profile_done);
}
Label profile_cast_failure;
Label done, done_null;
// Where to go in case of cast failure
Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
// patching may screw with our temporaries on sparc,
// so let's do it before loading the class
if (k->is_loaded()) {
jobject2reg(k->constant_encoding(), k_RInfo);
} else {
jobject2reg_with_patching(k_RInfo, op->info_for_patch());
}
assert(obj != k_RInfo, "must be different");
__ br_null(obj, false, Assembler::pn, done_null);
__ delayed()->nop();
// get object class
// not a safepoint as obj null check happens earlier
load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
if (op->fast_check()) {
assert_different_registers(klass_RInfo, k_RInfo);
__ cmp(k_RInfo, klass_RInfo);
__ brx(Assembler::notEqual, false, Assembler::pt, *failure_target);
__ delayed()->nop();
} else {
bool need_slow_path = true;
if (k->is_loaded()) {
if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
need_slow_path = false;
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
(need_slow_path ? &done : NULL),
failure_target, NULL,
RegisterOrConstant(k->super_check_offset()));
} else {
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, &done,
failure_target, NULL);
}
if (need_slow_path) {
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
__ delayed()->nop();
__ cmp(G3, 0);
__ br(Assembler::equal, false, Assembler::pn, *failure_target);
__ delayed()->nop();
}
}
__ bind(done);
if (op->should_profile()) {
Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
assert_different_registers(obj, mdo, recv, tmp1);
jobject2reg(md->constant_encoding(), mdo);
if (mdo_offset_bias > 0) {
__ set(mdo_offset_bias, tmp1);
__ add(mdo, tmp1, mdo);
}
Label update_done;
load(Address(obj, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
// Jump over the failure case
__ ba(false, update_done);
__ delayed()->nop();
// Cast failure case
__ bind(profile_cast_failure);
jobject2reg(md->constant_encoding(), mdo);
if (mdo_offset_bias > 0) {
__ set(mdo_offset_bias, tmp1);
__ add(mdo, tmp1, mdo);
}
Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
__ ld_ptr(data_addr, tmp1);
__ sub(tmp1, DataLayout::counter_increment, tmp1);
__ st_ptr(tmp1, data_addr);
__ ba(false, *stub->entry());
__ delayed()->nop();
__ bind(update_done);
}
__ bind(done_null);
__ mov(obj, dst);
}
void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
LIR_Code code = op->code(); LIR_Code code = op->code();
if (code == lir_store_check) { if (code == lir_store_check) {
@ -2437,8 +2631,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
CodeStub* stub = op->stub(); CodeStub* stub = op->stub();
Label done; Label done;
__ cmp(value, 0); __ br_null(value, false, Assembler::pn, done);
__ br(Assembler::equal, false, Assembler::pn, done);
__ delayed()->nop(); __ delayed()->nop();
load(array, oopDesc::klass_offset_in_bytes(), k_RInfo, T_OBJECT, op->info_for_exception()); load(array, oopDesc::klass_offset_in_bytes(), k_RInfo, T_OBJECT, op->info_for_exception());
load(value, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL); load(value, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
@ -2456,109 +2649,6 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ br(Assembler::equal, false, Assembler::pn, *stub->entry()); __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
__ delayed()->nop(); __ delayed()->nop();
__ bind(done); __ bind(done);
} else if (op->code() == lir_checkcast) {
// we always need a stub for the failure case.
CodeStub* stub = op->stub();
Register obj = op->object()->as_register();
Register k_RInfo = op->tmp1()->as_register();
Register klass_RInfo = op->tmp2()->as_register();
Register dst = op->result_opr()->as_register();
Register Rtmp1 = op->tmp3()->as_register();
ciKlass* k = op->klass();
if (obj == k_RInfo) {
k_RInfo = klass_RInfo;
klass_RInfo = obj;
}
if (op->profiled_method() != NULL) {
ciMethod* method = op->profiled_method();
int bci = op->profiled_bci();
// We need two temporaries to perform this operation on SPARC,
// so to keep things simple we perform a redundant test here
Label profile_done;
__ cmp(obj, 0);
__ br(Assembler::notEqual, false, Assembler::pn, profile_done);
__ delayed()->nop();
// Object is null; update methodDataOop
ciMethodData* md = method->method_data();
if (md == NULL) {
bailout("out of memory building methodDataOop");
return;
}
ciProfileData* data = md->bci_to_data(bci);
assert(data != NULL, "need data for checkcast");
assert(data->is_BitData(), "need BitData for checkcast");
Register mdo = k_RInfo;
Register data_val = Rtmp1;
jobject2reg(md->constant_encoding(), mdo);
int mdo_offset_bias = 0;
if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
// The offset is large so bias the mdo by the base of the slot so
// that the ld can use simm13s to reference the slots of the data
mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
__ set(mdo_offset_bias, data_val);
__ add(mdo, data_val, mdo);
}
Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
__ ldub(flags_addr, data_val);
__ or3(data_val, BitData::null_seen_byte_constant(), data_val);
__ stb(data_val, flags_addr);
__ bind(profile_done);
}
Label done;
// patching may screw with our temporaries on sparc,
// so let's do it before loading the class
if (k->is_loaded()) {
jobject2reg(k->constant_encoding(), k_RInfo);
} else {
jobject2reg_with_patching(k_RInfo, op->info_for_patch());
}
assert(obj != k_RInfo, "must be different");
__ cmp(obj, 0);
__ br(Assembler::equal, false, Assembler::pn, done);
__ delayed()->nop();
// get object class
// not a safepoint as obj null check happens earlier
load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
if (op->fast_check()) {
assert_different_registers(klass_RInfo, k_RInfo);
__ cmp(k_RInfo, klass_RInfo);
__ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
__ delayed()->nop();
__ bind(done);
} else {
bool need_slow_path = true;
if (k->is_loaded()) {
if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
need_slow_path = false;
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
(need_slow_path ? &done : NULL),
stub->entry(), NULL,
RegisterOrConstant(k->super_check_offset()));
} else {
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7,
&done, stub->entry(), NULL);
}
if (need_slow_path) {
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
__ delayed()->nop();
__ cmp(G3, 0);
__ br(Assembler::equal, false, Assembler::pn, *stub->entry());
__ delayed()->nop();
}
__ bind(done);
}
__ mov(obj, dst);
} else if (code == lir_instanceof) { } else if (code == lir_instanceof) {
Register obj = op->object()->as_register(); Register obj = op->object()->as_register();
Register k_RInfo = op->tmp1()->as_register(); Register k_RInfo = op->tmp1()->as_register();
@ -2580,8 +2670,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
jobject2reg_with_patching(k_RInfo, op->info_for_patch()); jobject2reg_with_patching(k_RInfo, op->info_for_patch());
} }
assert(obj != k_RInfo, "must be different"); assert(obj != k_RInfo, "must be different");
__ cmp(obj, 0); __ br_null(obj, true, Assembler::pn, done);
__ br(Assembler::equal, true, Assembler::pn, done);
__ delayed()->set(0, dst); __ delayed()->set(0, dst);
// get object class // get object class
@ -2589,7 +2678,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL); load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
if (op->fast_check()) { if (op->fast_check()) {
__ cmp(k_RInfo, klass_RInfo); __ cmp(k_RInfo, klass_RInfo);
__ br(Assembler::equal, true, Assembler::pt, done); __ brx(Assembler::equal, true, Assembler::pt, done);
__ delayed()->set(1, dst); __ delayed()->set(1, dst);
__ set(0, dst); __ set(0, dst);
__ bind(done); __ bind(done);
@ -2776,9 +2865,14 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
ciProfileData* data = md->bci_to_data(bci); ciProfileData* data = md->bci_to_data(bci);
assert(data->is_CounterData(), "need CounterData for calls"); assert(data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
Register mdo = op->mdo()->as_register(); Register mdo = op->mdo()->as_register();
#ifdef _LP64
assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
Register tmp1 = op->tmp1()->as_register_lo();
#else
assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
Register tmp1 = op->tmp1()->as_register(); Register tmp1 = op->tmp1()->as_register();
#endif
jobject2reg(md->constant_encoding(), mdo); jobject2reg(md->constant_encoding(), mdo);
int mdo_offset_bias = 0; int mdo_offset_bias = 0;
if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) + if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
@ -2795,13 +2889,13 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
// Perform additional virtual call profiling for invokevirtual and // Perform additional virtual call profiling for invokevirtual and
// invokeinterface bytecodes // invokeinterface bytecodes
if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
Tier1ProfileVirtualCalls) { C1ProfileVirtualCalls) {
assert(op->recv()->is_single_cpu(), "recv must be allocated"); assert(op->recv()->is_single_cpu(), "recv must be allocated");
Register recv = op->recv()->as_register(); Register recv = op->recv()->as_register();
assert_different_registers(mdo, tmp1, recv); assert_different_registers(mdo, tmp1, recv);
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
ciKlass* known_klass = op->known_holder(); ciKlass* known_klass = op->known_holder();
if (Tier1OptimizeVirtualCallProfiling && known_klass != NULL) { if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
// We know the type that will be seen at this call site; we can // We know the type that will be seen at this call site; we can
// statically update the methodDataOop rather than needing to do // statically update the methodDataOop rather than needing to do
// dynamic tests on the receiver type // dynamic tests on the receiver type
@ -2816,9 +2910,9 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
Address data_addr(mdo, md->byte_offset_of_slot(data, Address data_addr(mdo, md->byte_offset_of_slot(data,
VirtualCallData::receiver_count_offset(i)) - VirtualCallData::receiver_count_offset(i)) -
mdo_offset_bias); mdo_offset_bias);
__ lduw(data_addr, tmp1); __ ld_ptr(data_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1); __ add(tmp1, DataLayout::counter_increment, tmp1);
__ stw(tmp1, data_addr); __ st_ptr(tmp1, data_addr);
return; return;
} }
} }
@ -2837,70 +2931,32 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
__ st_ptr(tmp1, recv_addr); __ st_ptr(tmp1, recv_addr);
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
mdo_offset_bias); mdo_offset_bias);
__ lduw(data_addr, tmp1); __ ld_ptr(data_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1); __ add(tmp1, DataLayout::counter_increment, tmp1);
__ stw(tmp1, data_addr); __ st_ptr(tmp1, data_addr);
return; return;
} }
} }
} else { } else {
load(Address(recv, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT); load(Address(recv, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
Label update_done; Label update_done;
uint i; type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test;
// See if the receiver is receiver[n].
Address receiver_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
mdo_offset_bias);
__ ld_ptr(receiver_addr, tmp1);
__ verify_oop(tmp1);
__ cmp(recv, tmp1);
__ brx(Assembler::notEqual, false, Assembler::pt, next_test);
__ delayed()->nop();
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
mdo_offset_bias);
__ lduw(data_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1);
__ stw(tmp1, data_addr);
__ br(Assembler::always, false, Assembler::pt, update_done);
__ delayed()->nop();
__ bind(next_test);
}
// Didn't find receiver; find next empty slot and fill it in
for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test;
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
mdo_offset_bias);
load(recv_addr, tmp1, T_OBJECT);
__ tst(tmp1);
__ brx(Assembler::notEqual, false, Assembler::pt, next_test);
__ delayed()->nop();
__ st_ptr(recv, recv_addr);
__ set(DataLayout::counter_increment, tmp1);
__ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
mdo_offset_bias);
__ br(Assembler::always, false, Assembler::pt, update_done);
__ delayed()->nop();
__ bind(next_test);
}
// Receiver did not match any saved receiver and there is no empty row for it. // Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case. // Increment total counter to indicate polymorphic case.
__ lduw(counter_addr, tmp1); __ ld_ptr(counter_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1); __ add(tmp1, DataLayout::counter_increment, tmp1);
__ stw(tmp1, counter_addr); __ st_ptr(tmp1, counter_addr);
__ bind(update_done); __ bind(update_done);
} }
} else { } else {
// Static call // Static call
__ lduw(counter_addr, tmp1); __ ld_ptr(counter_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1); __ add(tmp1, DataLayout::counter_increment, tmp1);
__ stw(tmp1, counter_addr); __ st_ptr(tmp1, counter_addr);
} }
} }
void LIR_Assembler::align_backward_branch_target() { void LIR_Assembler::align_backward_branch_target() {
__ align(OptoLoopAlignment); __ align(OptoLoopAlignment);
} }
@ -3093,31 +3149,36 @@ void LIR_Assembler::membar_release() {
// no-op on TSO // no-op on TSO
} }
// Macro to Pack two sequential registers containing 32 bit values // Pack two sequential registers containing 32 bit values
// into a single 64 bit register. // into a single 64 bit register.
// rs and rs->successor() are packed into rd // src and src->successor() are packed into dst
// rd and rs may be the same register. // src and dst may be the same register.
// Note: rs and rs->successor() are destroyed. // Note: src is destroyed
void LIR_Assembler::pack64( Register rs, Register rd ) { void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) {
Register rs = src->as_register();
Register rd = dst->as_register_lo();
__ sllx(rs, 32, rs); __ sllx(rs, 32, rs);
__ srl(rs->successor(), 0, rs->successor()); __ srl(rs->successor(), 0, rs->successor());
__ or3(rs, rs->successor(), rd); __ or3(rs, rs->successor(), rd);
} }
// Macro to unpack a 64 bit value in a register into // Unpack a 64 bit value in a register into
// two sequential registers. // two sequential registers.
// rd is unpacked into rd and rd->successor() // src is unpacked into dst and dst->successor()
void LIR_Assembler::unpack64( Register rd ) { void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) {
__ mov(rd, rd->successor()); Register rs = src->as_register_lo();
__ srax(rd, 32, rd); Register rd = dst->as_register_hi();
__ sra(rd->successor(), 0, rd->successor()); assert_different_registers(rs, rd, rd->successor());
__ srlx(rs, 32, rd);
__ srl (rs, 0, rd->successor());
} }
void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
LIR_Address* addr = addr_opr->as_address_ptr(); LIR_Address* addr = addr_opr->as_address_ptr();
assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet"); assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet");
__ add(addr->base()->as_register(), addr->disp(), dest->as_register());
__ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register());
} }
@ -3188,11 +3249,36 @@ void LIR_Assembler::peephole(LIR_List* lir) {
tty->cr(); tty->cr();
} }
#endif #endif
continue; } else {
LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
inst->insert_before(i + 1, delay_op);
i++;
} }
LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info()); #if defined(TIERED) && !defined(_LP64)
inst->insert_before(i + 1, delay_op); // fixup the return value from G1 to O0/O1 for long returns.
// It's done here instead of in LIRGenerator because there's
// such a mismatch between the single reg and double reg
// calling convention.
LIR_OpJavaCall* callop = op->as_OpJavaCall();
if (callop->result_opr() == FrameMap::out_long_opr) {
LIR_OpJavaCall* call;
LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length());
for (int a = 0; a < arguments->length(); a++) {
arguments[a] = callop->arguments()[a];
}
if (op->code() == lir_virtual_call) {
call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
callop->vtable_offset(), arguments, callop->info());
} else {
call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
callop->addr(), arguments, callop->info());
}
inst->at_put(i - 1, call);
inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(),
T_LONG, lir_patch_none, NULL));
}
#endif
break; break;
} }
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -71,9 +71,13 @@
static bool is_single_instruction(LIR_Op* op); static bool is_single_instruction(LIR_Op* op);
// Record the type of the receiver in ReceiverTypeData
void type_profile_helper(Register mdo, int mdo_offset_bias,
ciMethodData *md, ciProfileData *data,
Register recv, Register tmp1, Label* update_done);
public: public:
void pack64( Register rs, Register rd ); void pack64(LIR_Opr src, LIR_Opr dst);
void unpack64( Register rd ); void unpack64(LIR_Opr src, LIR_Opr dst);
enum { enum {
#ifdef _LP64 #ifdef _LP64

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -227,29 +227,37 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
} }
} }
LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
LIR_Opr r;
if (type == T_LONG) {
r = LIR_OprFact::longConst(x);
} else if (type == T_INT) {
r = LIR_OprFact::intConst(x);
} else {
ShouldNotReachHere();
}
if (!Assembler::is_simm13(x)) {
LIR_Opr tmp = new_register(type);
__ move(r, tmp);
return tmp;
}
return r;
}
void LIRGenerator::increment_counter(address counter, int step) { void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
LIR_Opr pointer = new_pointer_register(); LIR_Opr pointer = new_pointer_register();
__ move(LIR_OprFact::intptrConst(counter), pointer); __ move(LIR_OprFact::intptrConst(counter), pointer);
LIR_Address* addr = new LIR_Address(pointer, T_INT); LIR_Address* addr = new LIR_Address(pointer, type);
increment_counter(addr, step); increment_counter(addr, step);
} }
void LIRGenerator::increment_counter(LIR_Address* addr, int step) { void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
LIR_Opr temp = new_register(T_INT); LIR_Opr temp = new_register(addr->type());
__ move(addr, temp); __ move(addr, temp);
LIR_Opr c = LIR_OprFact::intConst(step); __ add(temp, load_immediate(step, addr->type()), temp);
if (Assembler::is_simm13(step)) {
__ add(temp, c, temp);
} else {
LIR_Opr temp2 = new_register(T_INT);
__ move(c, temp2);
__ add(temp, temp2, temp);
}
__ move(temp, addr); __ move(temp, addr);
} }
void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
LIR_Opr o7opr = FrameMap::O7_opr; LIR_Opr o7opr = FrameMap::O7_opr;
__ load(new LIR_Address(base, disp, T_INT), o7opr, info); __ load(new LIR_Address(base, disp, T_INT), o7opr, info);
@ -611,7 +619,6 @@ void LIRGenerator::do_CompareOp(CompareOp* x) {
left.load_item(); left.load_item();
right.load_item(); right.load_item();
LIR_Opr reg = rlock_result(x); LIR_Opr reg = rlock_result(x);
if (x->x()->type()->is_float_kind()) { if (x->x()->type()->is_float_kind()) {
Bytecodes::Code code = x->op(); Bytecodes::Code code = x->op();
__ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
@ -1089,12 +1096,12 @@ void LIRGenerator::do_If(If* x) {
// add safepoint before generating condition code so it can be recomputed // add safepoint before generating condition code so it can be recomputed
if (x->is_safepoint()) { if (x->is_safepoint()) {
// increment backedge counter if needed // increment backedge counter if needed
increment_backedge_counter(state_for(x, x->state_before())); increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
__ safepoint(new_register(T_INT), state_for(x, x->state_before())); __ safepoint(new_register(T_INT), state_for(x, x->state_before()));
} }
__ cmp(lir_cond(cond), left, right); __ cmp(lir_cond(cond), left, right);
// Generate branch profiling. Profiling code doesn't kill flags.
profile_branch(x, cond); profile_branch(x, cond);
move_to_phi(x->state()); move_to_phi(x->state());
if (x->x()->type()->is_float_kind()) { if (x->x()->type()->is_float_kind()) {

View file

@ -465,12 +465,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
break; break;
#ifdef TIERED
case counter_overflow_id: case counter_overflow_id:
// G4 contains bci // G4 contains bci, G5 contains method
oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4); oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4, G5);
break; break;
#endif // TIERED
case new_type_array_id: case new_type_array_id:
case new_object_array_id: case new_object_array_id:

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -34,14 +34,7 @@ define_pd_global(bool, ProfileTraps, false);
define_pd_global(bool, UseOnStackReplacement, true ); define_pd_global(bool, UseOnStackReplacement, true );
define_pd_global(bool, TieredCompilation, false); define_pd_global(bool, TieredCompilation, false);
define_pd_global(intx, CompileThreshold, 1000 ); // Design center runs on 1.3.1 define_pd_global(intx, CompileThreshold, 1000 ); // Design center runs on 1.3.1
define_pd_global(intx, Tier2CompileThreshold, 1500 );
define_pd_global(intx, Tier3CompileThreshold, 2000 );
define_pd_global(intx, Tier4CompileThreshold, 2500 );
define_pd_global(intx, BackEdgeThreshold, 100000); define_pd_global(intx, BackEdgeThreshold, 100000);
define_pd_global(intx, Tier2BackEdgeThreshold, 100000);
define_pd_global(intx, Tier3BackEdgeThreshold, 100000);
define_pd_global(intx, Tier4BackEdgeThreshold, 100000);
define_pd_global(intx, OnStackReplacePercentage, 1400 ); define_pd_global(intx, OnStackReplacePercentage, 1400 );
define_pd_global(bool, UseTLAB, true ); define_pd_global(bool, UseTLAB, true );

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -37,21 +37,8 @@ define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(bool, ProfileInterpreter, true); define_pd_global(bool, ProfileInterpreter, true);
#endif // CC_INTERP #endif // CC_INTERP
define_pd_global(bool, TieredCompilation, false); define_pd_global(bool, TieredCompilation, false);
#ifdef TIERED
define_pd_global(intx, CompileThreshold, 1000);
define_pd_global(intx, BackEdgeThreshold, 14000);
#else
define_pd_global(intx, CompileThreshold, 10000); define_pd_global(intx, CompileThreshold, 10000);
define_pd_global(intx, BackEdgeThreshold, 140000); define_pd_global(intx, BackEdgeThreshold, 140000);
#endif // TIERED
define_pd_global(intx, Tier2CompileThreshold, 10000); // unused level
define_pd_global(intx, Tier3CompileThreshold, 10000);
define_pd_global(intx, Tier4CompileThreshold, 40000);
define_pd_global(intx, Tier2BackEdgeThreshold, 100000);
define_pd_global(intx, Tier3BackEdgeThreshold, 100000);
define_pd_global(intx, Tier4BackEdgeThreshold, 100000);
define_pd_global(intx, OnStackReplacePercentage, 140); define_pd_global(intx, OnStackReplacePercentage, 140);
define_pd_global(intx, ConditionalMoveLimit, 4); define_pd_global(intx, ConditionalMoveLimit, 4);

View file

@ -2431,3 +2431,20 @@ void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_na
} }
#endif // CC_INTERP #endif // CC_INTERP
} }
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
int increment, int mask,
Register scratch1, Register scratch2,
Condition cond, Label *where) {
ld(counter_addr, scratch1);
add(scratch1, increment, scratch1);
if (is_simm13(mask)) {
andcc(scratch1, mask, G0);
} else {
set(mask, scratch2);
andcc(scratch1, scratch2, G0);
}
br(cond, false, Assembler::pn, *where);
delayed()->st(scratch1, counter_addr);
}

View file

@ -278,6 +278,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
void increment_mdp_data_at(Register reg, int constant, void increment_mdp_data_at(Register reg, int constant,
Register bumped_count, Register scratch2, Register bumped_count, Register scratch2,
bool decrement = false); bool decrement = false);
void increment_mask_and_jump(Address counter_addr,
int increment, int mask,
Register scratch1, Register scratch2,
Condition cond, Label *where);
void set_mdp_flag_at(int flag_constant, Register scratch); void set_mdp_flag_at(int flag_constant, Register scratch);
void test_mdp_data_at(int offset, Register value, Label& not_equal_continue, void test_mdp_data_at(int offset, Register value, Label& not_equal_continue,
Register scratch); Register scratch);
@ -321,4 +325,5 @@ class InterpreterMacroAssembler: public MacroAssembler {
void save_return_value(TosState state, bool is_native_call); void save_return_value(TosState state, bool is_native_call);
void restore_return_value(TosState state, bool is_native_call); void restore_return_value(TosState state, bool is_native_call);
}; };

View file

@ -3331,10 +3331,8 @@ void SharedRuntime::generate_deopt_blob() {
__ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr); __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
#if !defined(_LP64) #if !defined(_LP64)
#if defined(COMPILER2) #if defined(COMPILER2)
if (!TieredCompilation) { // 32-bit 1-register longs return longs in G1
// 32-bit 1-register longs return longs in G1 __ stx(Greturn1, saved_Greturn1_addr);
__ stx(Greturn1, saved_Greturn1_addr);
}
#endif #endif
__ set_last_Java_frame(SP, noreg); __ set_last_Java_frame(SP, noreg);
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode); __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
@ -3347,24 +3345,15 @@ void SharedRuntime::generate_deopt_blob() {
__ reset_last_Java_frame(); __ reset_last_Java_frame();
__ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0); __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
// In tiered we never use C2 to compile methods returning longs so
// the result is where we expect it already.
#if !defined(_LP64) && defined(COMPILER2) #if !defined(_LP64) && defined(COMPILER2)
// In 32 bit, C2 returns longs in G1 so restore the saved G1 into // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
// I0/I1 if the return value is long. In the tiered world there is // I0/I1 if the return value is long.
// a mismatch between how C1 and C2 return longs compiles and so Label not_long;
// currently compilation of methods which return longs is disabled __ cmp(O0,T_LONG);
// for C2 and so is this code. Eventually C1 and C2 will do the __ br(Assembler::notEqual, false, Assembler::pt, not_long);
// same thing for longs in the tiered world. __ delayed()->nop();
if (!TieredCompilation) { __ ldd(saved_Greturn1_addr,I0);
Label not_long; __ bind(not_long);
__ cmp(O0,T_LONG);
__ br(Assembler::notEqual, false, Assembler::pt, not_long);
__ delayed()->nop();
__ ldd(saved_Greturn1_addr,I0);
__ bind(not_long);
}
#endif #endif
__ ret(); __ ret();
__ delayed()->restore(); __ delayed()->restore();

View file

@ -294,35 +294,65 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state)
// ??: invocation counter // ??: invocation counter
// //
void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
// Update standard invocation counters // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
__ increment_invocation_counter(O0, G3_scratch); if (TieredCompilation) {
if (ProfileInterpreter) { // %%% Merge this into methodDataOop const int increment = InvocationCounter::count_increment;
Address interpreter_invocation_counter(Lmethod, methodOopDesc::interpreter_invocation_counter_offset()); const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
__ ld(interpreter_invocation_counter, G3_scratch); Label no_mdo, done;
__ inc(G3_scratch); if (ProfileInterpreter) {
__ st(G3_scratch, interpreter_invocation_counter); // If no method data exists, go to profile_continue.
} __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch);
__ br_null(G4_scratch, false, Assembler::pn, no_mdo);
__ delayed()->nop();
// Increment counter
Address mdo_invocation_counter(G4_scratch,
in_bytes(methodDataOopDesc::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
G3_scratch, Lscratch,
Assembler::zero, overflow);
__ ba(false, done);
__ delayed()->nop();
}
if (ProfileInterpreter && profile_method != NULL) { // Increment counter in methodOop
// Test to see if we should create a method data oop __ bind(no_mdo);
AddressLiteral profile_limit(&InvocationCounter::InterpreterProfileLimit); Address invocation_counter(Lmethod,
__ sethi(profile_limit, G3_scratch); in_bytes(methodOopDesc::invocation_counter_offset()) +
__ ld(G3_scratch, profile_limit.low10(), G3_scratch); in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(invocation_counter, increment, mask,
G3_scratch, Lscratch,
Assembler::zero, overflow);
__ bind(done);
} else {
// Update standard invocation counters
__ increment_invocation_counter(O0, G3_scratch);
if (ProfileInterpreter) { // %%% Merge this into methodDataOop
Address interpreter_invocation_counter(Lmethod,in_bytes(methodOopDesc::interpreter_invocation_counter_offset()));
__ ld(interpreter_invocation_counter, G3_scratch);
__ inc(G3_scratch);
__ st(G3_scratch, interpreter_invocation_counter);
}
if (ProfileInterpreter && profile_method != NULL) {
// Test to see if we should create a method data oop
AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit);
__ load_contents(profile_limit, G3_scratch);
__ cmp(O0, G3_scratch);
__ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue);
__ delayed()->nop();
// if no method data exists, go to profile_method
__ test_method_data_pointer(*profile_method);
}
AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
__ load_contents(invocation_limit, G3_scratch);
__ cmp(O0, G3_scratch); __ cmp(O0, G3_scratch);
__ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue); __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
__ delayed()->nop(); __ delayed()->nop();
// if no method data exists, go to profile_method
__ test_method_data_pointer(*profile_method);
} }
AddressLiteral invocation_limit(&InvocationCounter::InterpreterInvocationLimit);
__ sethi(invocation_limit, G3_scratch);
__ ld(G3_scratch, invocation_limit.low10(), G3_scratch);
__ cmp(O0, G3_scratch);
__ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
__ delayed()->nop();
} }
// Allocate monitor and lock method (asm interpreter) // Allocate monitor and lock method (asm interpreter)

View file

@ -1580,6 +1580,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
const Register O0_cur_bcp = O0; const Register O0_cur_bcp = O0;
__ mov( Lbcp, O0_cur_bcp ); __ mov( Lbcp, O0_cur_bcp );
bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
if ( increment_invocation_counter_for_backward_branches ) { if ( increment_invocation_counter_for_backward_branches ) {
Label Lforward; Label Lforward;
@ -1588,17 +1589,84 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// Bump bytecode pointer by displacement (take the branch) // Bump bytecode pointer by displacement (take the branch)
__ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
// Update Backedge branch separately from invocations if (TieredCompilation) {
const Register G4_invoke_ctr = G4; Label Lno_mdo, Loverflow;
__ increment_backedge_counter(G4_invoke_ctr, G1_scratch); int increment = InvocationCounter::count_increment;
if (ProfileInterpreter) { int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
__ test_invocation_counter_for_mdp(G4_invoke_ctr, Lbcp, G3_scratch, Lforward); if (ProfileInterpreter) {
if (UseOnStackReplacement) { // If no method data exists, go to profile_continue.
__ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch); __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch);
__ br_null(G4_scratch, false, Assembler::pn, Lno_mdo);
__ delayed()->nop();
// Increment backedge counter in the MDO
Address mdo_backedge_counter(G4_scratch, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, Lscratch,
Assembler::notZero, &Lforward);
__ ba(false, Loverflow);
__ delayed()->nop();
} }
// If there's no MDO, increment counter in methodOop
__ bind(Lno_mdo);
Address backedge_counter(Lmethod, in_bytes(methodOopDesc::backedge_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(backedge_counter, increment, mask, G3_scratch, Lscratch,
Assembler::notZero, &Lforward);
__ bind(Loverflow);
// notify point for loop, pass branch bytecode
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O0_cur_bcp);
// Was an OSR adapter generated?
// O0 = osr nmethod
__ br_null(O0, false, Assembler::pn, Lforward);
__ delayed()->nop();
// Has the nmethod been invalidated already?
__ ld(O0, nmethod::entry_bci_offset(), O2);
__ cmp(O2, InvalidOSREntryBci);
__ br(Assembler::equal, false, Assembler::pn, Lforward);
__ delayed()->nop();
// migrate the interpreter frame off of the stack
__ mov(G2_thread, L7);
// save nmethod
__ mov(O0, L6);
__ set_last_Java_frame(SP, noreg);
__ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
__ reset_last_Java_frame();
__ mov(L7, G2_thread);
// move OSR nmethod to I1
__ mov(L6, I1);
// OSR buffer to I0
__ mov(O0, I0);
// remove the interpreter frame
__ restore(I5_savedSP, 0, SP);
// Jump to the osr code.
__ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
__ jmp(O2, G0);
__ delayed()->nop();
} else { } else {
if (UseOnStackReplacement) { // Update Backedge branch separately from invocations
__ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch); const Register G4_invoke_ctr = G4;
__ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
if (ProfileInterpreter) {
__ test_invocation_counter_for_mdp(G4_invoke_ctr, Lbcp, G3_scratch, Lforward);
if (UseOnStackReplacement) {
__ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch);
}
} else {
if (UseOnStackReplacement) {
__ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch);
}
} }
} }

View file

@ -68,19 +68,15 @@ void ConversionStub::emit_code(LIR_Assembler* ce) {
__ jmp(_continuation); __ jmp(_continuation);
} }
#ifdef TIERED
void CounterOverflowStub::emit_code(LIR_Assembler* ce) { void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry); __ bind(_entry);
ce->store_parameter(_method->as_register(), 1);
ce->store_parameter(_bci, 0); ce->store_parameter(_bci, 0);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id))); __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
ce->add_call_info_here(_info); ce->add_call_info_here(_info);
ce->verify_oop_map(_info); ce->verify_oop_map(_info);
__ jmp(_continuation); __ jmp(_continuation);
} }
#endif // TIERED
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
bool throw_index_out_of_bounds_exception) bool throw_index_out_of_bounds_exception)

View file

@ -1613,7 +1613,194 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
__ bind(*op->stub()->continuation()); __ bind(*op->stub()->continuation());
} }
void LIR_Assembler::type_profile_helper(Register mdo,
ciMethodData *md, ciProfileData *data,
Register recv, Label* update_done) {
uint i;
for (i = 0; i < ReceiverTypeData::row_limit(); i++) {
Label next_test;
// See if the receiver is receiver[n].
__ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
__ jccb(Assembler::notEqual, next_test);
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
__ addptr(data_addr, DataLayout::counter_increment);
__ jmpb(*update_done);
__ bind(next_test);
}
// Didn't find receiver; find next empty slot and fill it in
for (i = 0; i < ReceiverTypeData::row_limit(); i++) {
Label next_test;
Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
__ cmpptr(recv_addr, (intptr_t)NULL_WORD);
__ jccb(Assembler::notEqual, next_test);
__ movptr(recv_addr, recv);
__ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment);
__ jmpb(*update_done);
__ bind(next_test);
}
}
void LIR_Assembler::emit_checkcast(LIR_OpTypeCheck *op) {
assert(op->code() == lir_checkcast, "Invalid operation");
// we always need a stub for the failure case.
CodeStub* stub = op->stub();
Register obj = op->object()->as_register();
Register k_RInfo = op->tmp1()->as_register();
Register klass_RInfo = op->tmp2()->as_register();
Register dst = op->result_opr()->as_register();
ciKlass* k = op->klass();
Register Rtmp1 = noreg;
// check if it needs to be profiled
ciMethodData* md;
ciProfileData* data;
if (op->should_profile()) {
ciMethod* method = op->profiled_method();
assert(method != NULL, "Should have method");
int bci = op->profiled_bci();
md = method->method_data();
if (md == NULL) {
bailout("out of memory building methodDataOop");
return;
}
data = md->bci_to_data(bci);
assert(data != NULL, "need data for checkcast");
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for checkcast");
}
Label profile_cast_failure;
Label done, done_null;
// Where to go in case of cast failure
Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
if (obj == k_RInfo) {
k_RInfo = dst;
} else if (obj == klass_RInfo) {
klass_RInfo = dst;
}
if (k->is_loaded()) {
select_different_registers(obj, dst, k_RInfo, klass_RInfo);
} else {
Rtmp1 = op->tmp3()->as_register();
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
}
assert_different_registers(obj, k_RInfo, klass_RInfo);
if (!k->is_loaded()) {
jobject2reg_with_patching(k_RInfo, op->info_for_patch());
} else {
#ifdef _LP64
__ movoop(k_RInfo, k->constant_encoding());
#endif // _LP64
}
assert(obj != k_RInfo, "must be different");
__ cmpptr(obj, (int32_t)NULL_WORD);
if (op->should_profile()) {
Label profile_done;
__ jccb(Assembler::notEqual, profile_done);
// Object is null; update methodDataOop
Register mdo = klass_RInfo;
__ movoop(mdo, md->constant_encoding());
Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
__ orl(data_addr, header_bits);
__ jmp(done_null);
__ bind(profile_done);
} else {
__ jcc(Assembler::equal, done_null);
}
__ verify_oop(obj);
if (op->fast_check()) {
// get object classo
// not a safepoint as obj null check happens earlier
if (k->is_loaded()) {
#ifdef _LP64
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
#else
__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
#endif // _LP64
} else {
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
}
__ jcc(Assembler::notEqual, *failure_target);
} else {
// get object class
// not a safepoint as obj null check happens earlier
__ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
if (k->is_loaded()) {
// See if we get an immediate positive hit
#ifdef _LP64
__ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
#else
__ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
#endif // _LP64
if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
__ jcc(Assembler::notEqual, *failure_target);
} else {
// See if we get an immediate positive hit
__ jcc(Assembler::equal, done);
// check for self
#ifdef _LP64
__ cmpptr(klass_RInfo, k_RInfo);
#else
__ cmpoop(klass_RInfo, k->constant_encoding());
#endif // _LP64
__ jcc(Assembler::equal, done);
__ push(klass_RInfo);
#ifdef _LP64
__ push(k_RInfo);
#else
__ pushoop(k->constant_encoding());
#endif // _LP64
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
__ pop(klass_RInfo);
__ pop(klass_RInfo);
// result is a boolean
__ cmpl(klass_RInfo, 0);
__ jcc(Assembler::equal, *failure_target);
}
} else {
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, &done, failure_target, NULL);
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
__ push(klass_RInfo);
__ push(k_RInfo);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
__ pop(klass_RInfo);
__ pop(k_RInfo);
// result is a boolean
__ cmpl(k_RInfo, 0);
__ jcc(Assembler::equal, *failure_target);
}
}
__ bind(done);
if (op->should_profile()) {
Register mdo = klass_RInfo, recv = k_RInfo;
__ movoop(mdo, md->constant_encoding());
__ movptr(recv, Address(obj, oopDesc::klass_offset_in_bytes()));
Label update_done;
type_profile_helper(mdo, md, data, recv, &update_done);
__ jmpb(update_done);
__ bind(profile_cast_failure);
__ movoop(mdo, md->constant_encoding());
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
__ subptr(counter_addr, DataLayout::counter_increment);
__ jmp(*stub->entry());
__ bind(update_done);
}
__ bind(done_null);
if (dst != obj) {
__ mov(dst, obj);
}
}
void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
LIR_Code code = op->code(); LIR_Code code = op->code();
@ -1646,140 +1833,6 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ cmpl(k_RInfo, 0); __ cmpl(k_RInfo, 0);
__ jcc(Assembler::equal, *stub->entry()); __ jcc(Assembler::equal, *stub->entry());
__ bind(done); __ bind(done);
} else if (op->code() == lir_checkcast) {
// we always need a stub for the failure case.
CodeStub* stub = op->stub();
Register obj = op->object()->as_register();
Register k_RInfo = op->tmp1()->as_register();
Register klass_RInfo = op->tmp2()->as_register();
Register dst = op->result_opr()->as_register();
ciKlass* k = op->klass();
Register Rtmp1 = noreg;
Label done;
if (obj == k_RInfo) {
k_RInfo = dst;
} else if (obj == klass_RInfo) {
klass_RInfo = dst;
}
if (k->is_loaded()) {
select_different_registers(obj, dst, k_RInfo, klass_RInfo);
} else {
Rtmp1 = op->tmp3()->as_register();
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
}
assert_different_registers(obj, k_RInfo, klass_RInfo);
if (!k->is_loaded()) {
jobject2reg_with_patching(k_RInfo, op->info_for_patch());
} else {
#ifdef _LP64
__ movoop(k_RInfo, k->constant_encoding());
#else
k_RInfo = noreg;
#endif // _LP64
}
assert(obj != k_RInfo, "must be different");
__ cmpptr(obj, (int32_t)NULL_WORD);
if (op->profiled_method() != NULL) {
ciMethod* method = op->profiled_method();
int bci = op->profiled_bci();
Label profile_done;
__ jcc(Assembler::notEqual, profile_done);
// Object is null; update methodDataOop
ciMethodData* md = method->method_data();
if (md == NULL) {
bailout("out of memory building methodDataOop");
return;
}
ciProfileData* data = md->bci_to_data(bci);
assert(data != NULL, "need data for checkcast");
assert(data->is_BitData(), "need BitData for checkcast");
Register mdo = klass_RInfo;
__ movoop(mdo, md->constant_encoding());
Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
__ orl(data_addr, header_bits);
__ jmp(done);
__ bind(profile_done);
} else {
__ jcc(Assembler::equal, done);
}
__ verify_oop(obj);
if (op->fast_check()) {
// get object classo
// not a safepoint as obj null check happens earlier
if (k->is_loaded()) {
#ifdef _LP64
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
#else
__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
#endif // _LP64
} else {
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
}
__ jcc(Assembler::notEqual, *stub->entry());
__ bind(done);
} else {
// get object class
// not a safepoint as obj null check happens earlier
__ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
if (k->is_loaded()) {
// See if we get an immediate positive hit
#ifdef _LP64
__ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
#else
__ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
#endif // _LP64
if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
__ jcc(Assembler::notEqual, *stub->entry());
} else {
// See if we get an immediate positive hit
__ jcc(Assembler::equal, done);
// check for self
#ifdef _LP64
__ cmpptr(klass_RInfo, k_RInfo);
#else
__ cmpoop(klass_RInfo, k->constant_encoding());
#endif // _LP64
__ jcc(Assembler::equal, done);
__ push(klass_RInfo);
#ifdef _LP64
__ push(k_RInfo);
#else
__ pushoop(k->constant_encoding());
#endif // _LP64
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
__ pop(klass_RInfo);
__ pop(klass_RInfo);
// result is a boolean
__ cmpl(klass_RInfo, 0);
__ jcc(Assembler::equal, *stub->entry());
}
__ bind(done);
} else {
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, &done, stub->entry(), NULL);
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
__ push(klass_RInfo);
__ push(k_RInfo);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
__ pop(klass_RInfo);
__ pop(k_RInfo);
// result is a boolean
__ cmpl(k_RInfo, 0);
__ jcc(Assembler::equal, *stub->entry());
__ bind(done);
}
}
if (dst != obj) {
__ mov(dst, obj);
}
} else if (code == lir_instanceof) { } else if (code == lir_instanceof) {
Register obj = op->object()->as_register(); Register obj = op->object()->as_register();
Register k_RInfo = op->tmp1()->as_register(); Register k_RInfo = op->tmp1()->as_register();
@ -1922,7 +1975,6 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
} }
} }
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) { void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) {
Assembler::Condition acond, ncond; Assembler::Condition acond, ncond;
switch (condition) { switch (condition) {
@ -3253,13 +3305,13 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
// Perform additional virtual call profiling for invokevirtual and // Perform additional virtual call profiling for invokevirtual and
// invokeinterface bytecodes // invokeinterface bytecodes
if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
Tier1ProfileVirtualCalls) { C1ProfileVirtualCalls) {
assert(op->recv()->is_single_cpu(), "recv must be allocated"); assert(op->recv()->is_single_cpu(), "recv must be allocated");
Register recv = op->recv()->as_register(); Register recv = op->recv()->as_register();
assert_different_registers(mdo, recv); assert_different_registers(mdo, recv);
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
ciKlass* known_klass = op->known_holder(); ciKlass* known_klass = op->known_holder();
if (Tier1OptimizeVirtualCallProfiling && known_klass != NULL) { if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
// We know the type that will be seen at this call site; we can // We know the type that will be seen at this call site; we can
// statically update the methodDataOop rather than needing to do // statically update the methodDataOop rather than needing to do
// dynamic tests on the receiver type // dynamic tests on the receiver type
@ -3272,7 +3324,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
ciKlass* receiver = vc_data->receiver(i); ciKlass* receiver = vc_data->receiver(i);
if (known_klass->equals(receiver)) { if (known_klass->equals(receiver)) {
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
__ addl(data_addr, DataLayout::counter_increment); __ addptr(data_addr, DataLayout::counter_increment);
return; return;
} }
} }
@ -3288,49 +3340,26 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
__ movoop(recv_addr, known_klass->constant_encoding()); __ movoop(recv_addr, known_klass->constant_encoding());
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
__ addl(data_addr, DataLayout::counter_increment); __ addptr(data_addr, DataLayout::counter_increment);
return; return;
} }
} }
} else { } else {
__ movptr(recv, Address(recv, oopDesc::klass_offset_in_bytes())); __ movptr(recv, Address(recv, oopDesc::klass_offset_in_bytes()));
Label update_done; Label update_done;
uint i; type_profile_helper(mdo, md, data, recv, &update_done);
for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test;
// See if the receiver is receiver[n].
__ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))));
__ jcc(Assembler::notEqual, next_test);
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
__ addl(data_addr, DataLayout::counter_increment);
__ jmp(update_done);
__ bind(next_test);
}
// Didn't find receiver; find next empty slot and fill it in
for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test;
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
__ cmpptr(recv_addr, (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, next_test);
__ movptr(recv_addr, recv);
__ movl(Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))), DataLayout::counter_increment);
__ jmp(update_done);
__ bind(next_test);
}
// Receiver did not match any saved receiver and there is no empty row for it. // Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case. // Increment total counter to indicate polymorphic case.
__ addl(counter_addr, DataLayout::counter_increment); __ addptr(counter_addr, DataLayout::counter_increment);
__ bind(update_done); __ bind(update_done);
} }
} else { } else {
// Static call // Static call
__ addl(counter_addr, DataLayout::counter_increment); __ addptr(counter_addr, DataLayout::counter_increment);
} }
} }
void LIR_Assembler::emit_delay(LIR_OpDelay*) { void LIR_Assembler::emit_delay(LIR_OpDelay*) {
Unimplemented(); Unimplemented();
} }

View file

@ -42,7 +42,10 @@
// method. // method.
Address as_Address(LIR_Address* addr, Register tmp); Address as_Address(LIR_Address* addr, Register tmp);
// Record the type of the receiver in ReceiverTypeData
void type_profile_helper(Register mdo,
ciMethodData *md, ciProfileData *data,
Register recv, Label* update_done);
public: public:
void store_parameter(Register r, int offset_from_esp_in_words); void store_parameter(Register r, int offset_from_esp_in_words);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -182,10 +182,22 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
} }
void LIRGenerator::increment_counter(address counter, int step) { LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
LIR_Opr r;
if (type == T_LONG) {
r = LIR_OprFact::longConst(x);
} else if (type == T_INT) {
r = LIR_OprFact::intConst(x);
} else {
ShouldNotReachHere();
}
return r;
}
void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
LIR_Opr pointer = new_pointer_register(); LIR_Opr pointer = new_pointer_register();
__ move(LIR_OprFact::intptrConst(counter), pointer); __ move(LIR_OprFact::intptrConst(counter), pointer);
LIR_Address* addr = new LIR_Address(pointer, T_INT); LIR_Address* addr = new LIR_Address(pointer, type);
increment_counter(addr, step); increment_counter(addr, step);
} }
@ -194,7 +206,6 @@ void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
__ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr); __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr);
} }
void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
__ cmp_mem_int(condition, base, disp, c, info); __ cmp_mem_int(condition, base, disp, c, info);
} }
@ -1188,8 +1199,7 @@ void LIRGenerator::do_If(If* x) {
// add safepoint before generating condition code so it can be recomputed // add safepoint before generating condition code so it can be recomputed
if (x->is_safepoint()) { if (x->is_safepoint()) {
// increment backedge counter if needed // increment backedge counter if needed
increment_backedge_counter(state_for(x, x->state_before())); increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
__ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
} }
set_no_result(x); set_no_result(x);
@ -1197,6 +1207,7 @@ void LIRGenerator::do_If(If* x) {
LIR_Opr left = xin->result(); LIR_Opr left = xin->result();
LIR_Opr right = yin->result(); LIR_Opr right = yin->result();
__ cmp(lir_cond(cond), left, right); __ cmp(lir_cond(cond), left, right);
// Generate branch profiling. Profiling code doesn't kill flags.
profile_branch(x, cond); profile_branch(x, cond);
move_to_phi(x->state()); move_to_phi(x->state());
if (x->x()->type()->is_float_kind()) { if (x->x()->type()->is_float_kind()) {

View file

@ -1068,15 +1068,16 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
break; break;
#ifdef TIERED
case counter_overflow_id: case counter_overflow_id:
{ {
Register bci = rax; Register bci = rax, method = rbx;
__ enter(); __ enter();
OopMap* map = save_live_registers(sasm, 2); OopMap* map = save_live_registers(sasm, 3);
// Retrieve bci // Retrieve bci
__ movl(bci, Address(rbp, 2*BytesPerWord)); __ movl(bci, Address(rbp, 2*BytesPerWord));
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci); // And a pointer to the methodOop
__ movptr(method, Address(rbp, 3*BytesPerWord));
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
oop_maps = new OopMapSet(); oop_maps = new OopMapSet();
oop_maps->add_gc_map(call_offset, map); oop_maps->add_gc_map(call_offset, map);
restore_live_registers(sasm); restore_live_registers(sasm);
@ -1084,7 +1085,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ ret(0); __ ret(0);
} }
break; break;
#endif // TIERED
case new_type_array_id: case new_type_array_id:
case new_object_array_id: case new_object_array_id:

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -35,14 +35,7 @@ define_pd_global(bool, ProfileTraps, false);
define_pd_global(bool, UseOnStackReplacement, true ); define_pd_global(bool, UseOnStackReplacement, true );
define_pd_global(bool, TieredCompilation, false); define_pd_global(bool, TieredCompilation, false);
define_pd_global(intx, CompileThreshold, 1500 ); define_pd_global(intx, CompileThreshold, 1500 );
define_pd_global(intx, Tier2CompileThreshold, 1500 );
define_pd_global(intx, Tier3CompileThreshold, 2500 );
define_pd_global(intx, Tier4CompileThreshold, 4500 );
define_pd_global(intx, BackEdgeThreshold, 100000); define_pd_global(intx, BackEdgeThreshold, 100000);
define_pd_global(intx, Tier2BackEdgeThreshold, 100000);
define_pd_global(intx, Tier3BackEdgeThreshold, 100000);
define_pd_global(intx, Tier4BackEdgeThreshold, 100000);
define_pd_global(intx, OnStackReplacePercentage, 933 ); define_pd_global(intx, OnStackReplacePercentage, 933 );
define_pd_global(intx, FreqInlineSize, 325 ); define_pd_global(intx, FreqInlineSize, 325 );

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -39,19 +39,8 @@ define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(bool, ProfileInterpreter, true); define_pd_global(bool, ProfileInterpreter, true);
#endif // CC_INTERP #endif // CC_INTERP
define_pd_global(bool, TieredCompilation, false); define_pd_global(bool, TieredCompilation, false);
#ifdef TIERED
define_pd_global(intx, CompileThreshold, 1000);
#else
define_pd_global(intx, CompileThreshold, 10000); define_pd_global(intx, CompileThreshold, 10000);
#endif // TIERED
define_pd_global(intx, Tier2CompileThreshold, 10000);
define_pd_global(intx, Tier3CompileThreshold, 20000);
define_pd_global(intx, Tier4CompileThreshold, 40000);
define_pd_global(intx, BackEdgeThreshold, 100000); define_pd_global(intx, BackEdgeThreshold, 100000);
define_pd_global(intx, Tier2BackEdgeThreshold, 100000);
define_pd_global(intx, Tier3BackEdgeThreshold, 100000);
define_pd_global(intx, Tier4BackEdgeThreshold, 100000);
define_pd_global(intx, OnStackReplacePercentage, 140); define_pd_global(intx, OnStackReplacePercentage, 140);
define_pd_global(intx, ConditionalMoveLimit, 3); define_pd_global(intx, ConditionalMoveLimit, 3);

View file

@ -1397,3 +1397,17 @@ void InterpreterMacroAssembler::notify_method_exit(
NOT_CC_INTERP(pop(state)); NOT_CC_INTERP(pop(state));
} }
} }
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
int increment, int mask,
Register scratch, bool preloaded,
Condition cond, Label* where) {
if (!preloaded) {
movl(scratch, counter_addr);
}
incrementl(scratch, increment);
movl(counter_addr, scratch);
andl(scratch, mask);
jcc(cond, *where);
}

View file

@ -185,6 +185,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
bool decrement = false); bool decrement = false);
void increment_mdp_data_at(Register mdp_in, Register reg, int constant, void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
bool decrement = false); bool decrement = false);
void increment_mask_and_jump(Address counter_addr,
int increment, int mask,
Register scratch, bool preloaded,
Condition cond, Label* where);
void set_mdp_flag_at(Register mdp_in, int flag_constant); void set_mdp_flag_at(Register mdp_in, int flag_constant);
void test_mdp_data_at(Register mdp_in, int offset, Register value, void test_mdp_data_at(Register mdp_in, int offset, Register value,
Register test_value_out, Register test_value_out,

View file

@ -1480,3 +1480,17 @@ void InterpreterMacroAssembler::notify_method_exit(
NOT_CC_INTERP(pop(state)); NOT_CC_INTERP(pop(state));
} }
} }
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
int increment, int mask,
Register scratch, bool preloaded,
Condition cond, Label* where) {
if (!preloaded) {
movl(scratch, counter_addr);
}
incrementl(scratch, increment);
movl(counter_addr, scratch);
andl(scratch, mask);
jcc(cond, *where);
}

View file

@ -194,6 +194,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
bool decrement = false); bool decrement = false);
void increment_mdp_data_at(Register mdp_in, Register reg, int constant, void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
bool decrement = false); bool decrement = false);
void increment_mask_and_jump(Address counter_addr,
int increment, int mask,
Register scratch, bool preloaded,
Condition cond, Label* where);
void set_mdp_flag_at(Register mdp_in, int flag_constant); void set_mdp_flag_at(Register mdp_in, int flag_constant);
void test_mdp_data_at(Register mdp_in, int offset, Register value, void test_mdp_data_at(Register mdp_in, int offset, Register value,
Register test_value_out, Register test_value_out,

View file

@ -359,40 +359,62 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
// rcx: invocation counter // rcx: invocation counter
// //
void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
// Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
if (TieredCompilation) {
int increment = InvocationCounter::count_increment;
int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
Label no_mdo, done;
if (ProfileInterpreter) {
// Are we profiling?
__ movptr(rax, Address(rbx, methodOopDesc::method_data_offset()));
__ testptr(rax, rax);
__ jccb(Assembler::zero, no_mdo);
// Increment counter in the MDO
const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
__ jmpb(done);
}
__ bind(no_mdo);
// Increment counter in methodOop (we don't need to load it, it's in rcx).
__ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow);
__ bind(done);
} else {
const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() +
InvocationCounter::counter_offset());
const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset()); if (ProfileInterpreter) { // %%% Merge this into methodDataOop
const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset()); __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
}
// Update standard invocation counters
__ movl(rax, backedge_counter); // load backedge counter
if (ProfileInterpreter) { // %%% Merge this into methodDataOop __ incrementl(rcx, InvocationCounter::count_increment);
__ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset())); __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
}
// Update standard invocation counters
__ movl(rax, backedge_counter); // load backedge counter
__ incrementl(rcx, InvocationCounter::count_increment); __ movl(invocation_counter, rcx); // save invocation count
__ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits __ addl(rcx, rax); // add both counters
__ movl(invocation_counter, rcx); // save invocation count // profile_method is non-null only for interpreted method so
__ addl(rcx, rax); // add both counters // profile_method != NULL == !native_call
// BytecodeInterpreter only calls for native so code is elided.
// profile_method is non-null only for interpreted method so if (ProfileInterpreter && profile_method != NULL) {
// profile_method != NULL == !native_call // Test to see if we should create a method data oop
// BytecodeInterpreter only calls for native so code is elided. __ cmp32(rcx,
ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
__ jcc(Assembler::less, *profile_method_continue);
// if no method data exists, go to profile_method
__ test_method_data_pointer(rax, *profile_method);
}
if (ProfileInterpreter && profile_method != NULL) {
// Test to see if we should create a method data oop
__ cmp32(rcx, __ cmp32(rcx,
ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit)); ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
__ jcc(Assembler::less, *profile_method_continue); __ jcc(Assembler::aboveEqual, *overflow);
// if no method data exists, go to profile_method
__ test_method_data_pointer(rax, *profile_method);
} }
__ cmp32(rcx,
ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
__ jcc(Assembler::aboveEqual, *overflow);
} }
void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {

View file

@ -310,42 +310,61 @@ void InterpreterGenerator::generate_counter_incr(
Label* overflow, Label* overflow,
Label* profile_method, Label* profile_method,
Label* profile_method_continue) { Label* profile_method_continue) {
const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) +
const Address invocation_counter(rbx, in_bytes(InvocationCounter::counter_offset()));
methodOopDesc::invocation_counter_offset() + // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
if (TieredCompilation) {
int increment = InvocationCounter::count_increment;
int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
Label no_mdo, done;
if (ProfileInterpreter) {
// Are we profiling?
__ movptr(rax, Address(rbx, methodOopDesc::method_data_offset()));
__ testptr(rax, rax);
__ jccb(Assembler::zero, no_mdo);
// Increment counter in the MDO
const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
__ jmpb(done);
}
__ bind(no_mdo);
// Increment counter in methodOop (we don't need to load it, it's in ecx).
__ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow);
__ bind(done);
} else {
const Address backedge_counter(rbx,
methodOopDesc::backedge_counter_offset() +
InvocationCounter::counter_offset()); InvocationCounter::counter_offset());
const Address backedge_counter(rbx,
methodOopDesc::backedge_counter_offset() +
InvocationCounter::counter_offset());
if (ProfileInterpreter) { // %%% Merge this into methodDataOop if (ProfileInterpreter) { // %%% Merge this into methodDataOop
__ incrementl(Address(rbx, __ incrementl(Address(rbx,
methodOopDesc::interpreter_invocation_counter_offset())); methodOopDesc::interpreter_invocation_counter_offset()));
}
// Update standard invocation counters
__ movl(rax, backedge_counter); // load backedge counter
__ incrementl(rcx, InvocationCounter::count_increment);
__ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
__ movl(invocation_counter, rcx); // save invocation count
__ addl(rcx, rax); // add both counters
// profile_method is non-null only for interpreted method so
// profile_method != NULL == !native_call
if (ProfileInterpreter && profile_method != NULL) {
// Test to see if we should create a method data oop
__ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
__ jcc(Assembler::less, *profile_method_continue);
// if no method data exists, go to profile_method
__ test_method_data_pointer(rax, *profile_method);
}
__ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
__ jcc(Assembler::aboveEqual, *overflow);
} }
// Update standard invocation counters
__ movl(rax, backedge_counter); // load backedge counter
__ incrementl(rcx, InvocationCounter::count_increment);
__ andl(rax, InvocationCounter::count_mask_value); // mask out the
// status bits
__ movl(invocation_counter, rcx); // save invocation count
__ addl(rcx, rax); // add both counters
// profile_method is non-null only for interpreted method so
// profile_method != NULL == !native_call
if (ProfileInterpreter && profile_method != NULL) {
// Test to see if we should create a method data oop
__ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
__ jcc(Assembler::less, *profile_method_continue);
// if no method data exists, go to profile_method
__ test_method_data_pointer(rax, *profile_method);
}
__ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
__ jcc(Assembler::aboveEqual, *overflow);
} }
void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {

View file

@ -1558,47 +1558,68 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ testl(rdx, rdx); // check if forward or backward branch __ testl(rdx, rdx); // check if forward or backward branch
__ jcc(Assembler::positive, dispatch); // count only if backward branch __ jcc(Assembler::positive, dispatch); // count only if backward branch
// increment counter if (TieredCompilation) {
__ movl(rax, Address(rcx, be_offset)); // load backedge counter Label no_mdo;
__ incrementl(rax, InvocationCounter::count_increment); // increment counter int increment = InvocationCounter::count_increment;
__ movl(Address(rcx, be_offset), rax); // store counter int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
if (ProfileInterpreter) {
__ movl(rax, Address(rcx, inv_offset)); // load invocation counter // Are we profiling?
__ andl(rax, InvocationCounter::count_mask_value); // and the status bits __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
__ addl(rax, Address(rcx, be_offset)); // add both counters __ testptr(rbx, rbx);
__ jccb(Assembler::zero, no_mdo);
if (ProfileInterpreter) { // Increment the MDO backedge counter
// Test to see if we should create a method data oop const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
__ cmp32(rax, in_bytes(InvocationCounter::counter_offset()));
ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit)); __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
__ jcc(Assembler::less, dispatch); rax, false, Assembler::zero, &backedge_counter_overflow);
__ jmp(dispatch);
// if no method data exists, go to profile method
__ test_method_data_pointer(rax, profile_method);
if (UseOnStackReplacement) {
// check for overflow against rbx, which is the MDO taken count
__ cmp32(rbx,
ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
__ jcc(Assembler::below, dispatch);
// When ProfileInterpreter is on, the backedge_count comes from the
// methodDataOop, which value does not get reset on the call to
// frequency_counter_overflow(). To avoid excessive calls to the overflow
// routine while the method is being compiled, add a second test to make
// sure the overflow function is called only once every overflow_frequency.
const int overflow_frequency = 1024;
__ andptr(rbx, overflow_frequency-1);
__ jcc(Assembler::zero, backedge_counter_overflow);
} }
__ bind(no_mdo);
// Increment backedge counter in methodOop
__ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
rax, false, Assembler::zero, &backedge_counter_overflow);
} else { } else {
if (UseOnStackReplacement) { // increment counter
// check for overflow against rax, which is the sum of the counters __ movl(rax, Address(rcx, be_offset)); // load backedge counter
__ cmp32(rax, __ incrementl(rax, InvocationCounter::count_increment); // increment counter
ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); __ movl(Address(rcx, be_offset), rax); // store counter
__ jcc(Assembler::aboveEqual, backedge_counter_overflow);
__ movl(rax, Address(rcx, inv_offset)); // load invocation counter
__ andl(rax, InvocationCounter::count_mask_value); // and the status bits
__ addl(rax, Address(rcx, be_offset)); // add both counters
if (ProfileInterpreter) {
// Test to see if we should create a method data oop
__ cmp32(rax,
ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
__ jcc(Assembler::less, dispatch);
// if no method data exists, go to profile method
__ test_method_data_pointer(rax, profile_method);
if (UseOnStackReplacement) {
// check for overflow against rbx, which is the MDO taken count
__ cmp32(rbx,
ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
__ jcc(Assembler::below, dispatch);
// When ProfileInterpreter is on, the backedge_count comes from the
// methodDataOop, which value does not get reset on the call to
// frequency_counter_overflow(). To avoid excessive calls to the overflow
// routine while the method is being compiled, add a second test to make
// sure the overflow function is called only once every overflow_frequency.
const int overflow_frequency = 1024;
__ andptr(rbx, overflow_frequency-1);
__ jcc(Assembler::zero, backedge_counter_overflow);
}
} else {
if (UseOnStackReplacement) {
// check for overflow against rax, which is the sum of the counters
__ cmp32(rax,
ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
__ jcc(Assembler::aboveEqual, backedge_counter_overflow);
}
} }
} }
__ bind(dispatch); __ bind(dispatch);

View file

@ -1583,51 +1583,71 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// r14: locals pointer // r14: locals pointer
__ testl(rdx, rdx); // check if forward or backward branch __ testl(rdx, rdx); // check if forward or backward branch
__ jcc(Assembler::positive, dispatch); // count only if backward branch __ jcc(Assembler::positive, dispatch); // count only if backward branch
if (TieredCompilation) {
// increment counter Label no_mdo;
__ movl(rax, Address(rcx, be_offset)); // load backedge counter int increment = InvocationCounter::count_increment;
__ incrementl(rax, InvocationCounter::count_increment); // increment int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
// counter if (ProfileInterpreter) {
__ movl(Address(rcx, be_offset), rax); // store counter // Are we profiling?
__ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
__ movl(rax, Address(rcx, inv_offset)); // load invocation counter __ testptr(rbx, rbx);
__ andl(rax, InvocationCounter::count_mask_value); // and the status bits __ jccb(Assembler::zero, no_mdo);
__ addl(rax, Address(rcx, be_offset)); // add both counters // Increment the MDO backedge counter
const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
if (ProfileInterpreter) { in_bytes(InvocationCounter::counter_offset()));
// Test to see if we should create a method data oop __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
__ cmp32(rax, rax, false, Assembler::zero, &backedge_counter_overflow);
ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit)); __ jmp(dispatch);
__ jcc(Assembler::less, dispatch);
// if no method data exists, go to profile method
__ test_method_data_pointer(rax, profile_method);
if (UseOnStackReplacement) {
// check for overflow against ebx which is the MDO taken count
__ cmp32(rbx,
ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
__ jcc(Assembler::below, dispatch);
// When ProfileInterpreter is on, the backedge_count comes
// from the methodDataOop, which value does not get reset on
// the call to frequency_counter_overflow(). To avoid
// excessive calls to the overflow routine while the method is
// being compiled, add a second test to make sure the overflow
// function is called only once every overflow_frequency.
const int overflow_frequency = 1024;
__ andl(rbx, overflow_frequency - 1);
__ jcc(Assembler::zero, backedge_counter_overflow);
} }
__ bind(no_mdo);
// Increment backedge counter in methodOop
__ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
rax, false, Assembler::zero, &backedge_counter_overflow);
} else { } else {
if (UseOnStackReplacement) { // increment counter
// check for overflow against eax, which is the sum of the __ movl(rax, Address(rcx, be_offset)); // load backedge counter
// counters __ incrementl(rax, InvocationCounter::count_increment); // increment counter
__ cmp32(rax, __ movl(Address(rcx, be_offset), rax); // store counter
ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
__ jcc(Assembler::aboveEqual, backedge_counter_overflow);
__ movl(rax, Address(rcx, inv_offset)); // load invocation counter
__ andl(rax, InvocationCounter::count_mask_value); // and the status bits
__ addl(rax, Address(rcx, be_offset)); // add both counters
if (ProfileInterpreter) {
// Test to see if we should create a method data oop
__ cmp32(rax,
ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
__ jcc(Assembler::less, dispatch);
// if no method data exists, go to profile method
__ test_method_data_pointer(rax, profile_method);
if (UseOnStackReplacement) {
// check for overflow against ebx which is the MDO taken count
__ cmp32(rbx,
ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
__ jcc(Assembler::below, dispatch);
// When ProfileInterpreter is on, the backedge_count comes
// from the methodDataOop, which value does not get reset on
// the call to frequency_counter_overflow(). To avoid
// excessive calls to the overflow routine while the method is
// being compiled, add a second test to make sure the overflow
// function is called only once every overflow_frequency.
const int overflow_frequency = 1024;
__ andl(rbx, overflow_frequency - 1);
__ jcc(Assembler::zero, backedge_counter_overflow);
}
} else {
if (UseOnStackReplacement) {
// check for overflow against eax, which is the sum of the
// counters
__ cmp32(rax,
ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
__ jcc(Assembler::aboveEqual, backedge_counter_overflow);
}
} }
} }
__ bind(dispatch); __ bind(dispatch);
@ -2912,7 +2932,8 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
void TemplateTable::invokevirtual_helper(Register index, void TemplateTable::invokevirtual_helper(Register index,
Register recv, Register recv,
Register flags) { Register flags) {
// Uses temporary registers rax, rdx assert_different_registers(index, recv, rax, rdx); // Uses temporary registers rax, rdx
assert_different_registers(index, recv, rax, rdx);
// Test for an invoke of a final method // Test for an invoke of a final method
Label notFinal; Label notFinal;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -209,7 +209,7 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
(UseCompressedOops ? 16 : 0); // 1 leaq can be 3 bytes + 1 long (UseCompressedOops ? 16 : 0); // 1 leaq can be 3 bytes + 1 long
} else { } else {
// Itable stub size // Itable stub size
return (DebugVtables ? 512 : 72) + (CountCompiledCalls ? 13 : 0) + return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
(UseCompressedOops ? 32 : 0); // 2 leaqs (UseCompressedOops ? 32 : 0); // 2 leaqs
} }
// In order to tune these parameters, run the JVM with VM options // In order to tune these parameters, run the JVM with VM options

View file

@ -652,10 +652,20 @@ void Canonicalizer::do_If(If* x) {
else if (lss_sux == gtr_sux) { cond = If::neq; tsux = lss_sux; fsux = eql_sux; } else if (lss_sux == gtr_sux) { cond = If::neq; tsux = lss_sux; fsux = eql_sux; }
else if (eql_sux == gtr_sux) { cond = If::geq; tsux = eql_sux; fsux = lss_sux; } else if (eql_sux == gtr_sux) { cond = If::geq; tsux = eql_sux; fsux = lss_sux; }
else { ShouldNotReachHere(); } else { ShouldNotReachHere(); }
If* canon = new If(cmp->x(), cond, nan_sux == tsux, cmp->y(), tsux, fsux, cmp->state_before(), x->is_safepoint()); If* canon = new If(cmp->x(), cond, nan_sux == tsux, cmp->y(), tsux, fsux, cmp->state_before(), x->is_safepoint());
if (cmp->x() == cmp->y()) { if (cmp->x() == cmp->y()) {
do_If(canon); do_If(canon);
} else { } else {
if (compilation()->profile_branches()) {
// TODO: If profiling, leave floating point comparisons unoptimized.
// We currently do not support profiling of the unordered case.
switch(cmp->op()) {
case Bytecodes::_fcmpl: case Bytecodes::_fcmpg:
case Bytecodes::_dcmpl: case Bytecodes::_dcmpg:
set_canonical(x);
return;
}
}
set_canonical(canon); set_canonical(canon);
set_bci(cmp->bci()); set_bci(cmp->bci());
} }
@ -881,4 +891,5 @@ void Canonicalizer::do_UnsafePutObject(UnsafePutObject* x) {}
void Canonicalizer::do_UnsafePrefetchRead (UnsafePrefetchRead* x) {} void Canonicalizer::do_UnsafePrefetchRead (UnsafePrefetchRead* x) {}
void Canonicalizer::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {} void Canonicalizer::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
void Canonicalizer::do_ProfileCall(ProfileCall* x) {} void Canonicalizer::do_ProfileCall(ProfileCall* x) {}
void Canonicalizer::do_ProfileCounter(ProfileCounter* x) {} void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {}

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -24,9 +24,11 @@
class Canonicalizer: InstructionVisitor { class Canonicalizer: InstructionVisitor {
private: private:
Compilation *_compilation;
Instruction* _canonical; Instruction* _canonical;
int _bci; int _bci;
Compilation *compilation() { return _compilation; }
void set_canonical(Value x); void set_canonical(Value x);
void set_bci(int bci) { _bci = bci; } void set_bci(int bci) { _bci = bci; }
void set_constant(jint x) { set_canonical(new Constant(new IntConstant(x))); } void set_constant(jint x) { set_canonical(new Constant(new IntConstant(x))); }
@ -43,7 +45,9 @@ class Canonicalizer: InstructionVisitor {
int* scale); int* scale);
public: public:
Canonicalizer(Value x, int bci) { _canonical = x; _bci = bci; if (CanonicalizeNodes) x->visit(this); } Canonicalizer(Compilation* c, Value x, int bci) : _compilation(c), _canonical(x), _bci(bci) {
if (CanonicalizeNodes) x->visit(this);
}
Value canonical() const { return _canonical; } Value canonical() const { return _canonical; }
int bci() const { return _bci; } int bci() const { return _bci; }
@ -92,5 +96,5 @@ class Canonicalizer: InstructionVisitor {
virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x); virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x);
virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x); virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
virtual void do_ProfileCall (ProfileCall* x); virtual void do_ProfileCall (ProfileCall* x);
virtual void do_ProfileCounter (ProfileCounter* x); virtual void do_ProfileInvoke (ProfileInvoke* x);
}; };

View file

@ -80,20 +80,21 @@ class CodeStubList: public _CodeStubList {
} }
}; };
#ifdef TIERED
class CounterOverflowStub: public CodeStub { class CounterOverflowStub: public CodeStub {
private: private:
CodeEmitInfo* _info; CodeEmitInfo* _info;
int _bci; int _bci;
LIR_Opr _method;
public: public:
CounterOverflowStub(CodeEmitInfo* info, int bci) : _info(info), _bci(bci) { CounterOverflowStub(CodeEmitInfo* info, int bci, LIR_Opr method) : _info(info), _bci(bci), _method(method) {
} }
virtual void emit_code(LIR_Assembler* e); virtual void emit_code(LIR_Assembler* e);
virtual void visit(LIR_OpVisitState* visitor) { virtual void visit(LIR_OpVisitState* visitor) {
visitor->do_slow_case(_info); visitor->do_slow_case(_info);
visitor->do_input(_method);
} }
#ifndef PRODUCT #ifndef PRODUCT
@ -101,7 +102,6 @@ public:
#endif // PRODUCT #endif // PRODUCT
}; };
#endif // TIERED
class ConversionStub: public CodeStub { class ConversionStub: public CodeStub {
private: private:

View file

@ -290,9 +290,13 @@ int Compilation::compile_java_method() {
CHECK_BAILOUT_(no_frame_size); CHECK_BAILOUT_(no_frame_size);
if (is_profiling()) {
method()->build_method_data();
}
{ {
PhaseTraceTime timeit(_t_buildIR); PhaseTraceTime timeit(_t_buildIR);
build_hir(); build_hir();
} }
if (BailoutAfterHIR) { if (BailoutAfterHIR) {
BAILOUT_("Bailing out because of -XX:+BailoutAfterHIR", no_frame_size); BAILOUT_("Bailing out because of -XX:+BailoutAfterHIR", no_frame_size);
@ -447,6 +451,7 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
, _masm(NULL) , _masm(NULL)
, _has_exception_handlers(false) , _has_exception_handlers(false)
, _has_fpu_code(true) // pessimistic assumption , _has_fpu_code(true) // pessimistic assumption
, _would_profile(false)
, _has_unsafe_access(false) , _has_unsafe_access(false)
, _has_method_handle_invokes(false) , _has_method_handle_invokes(false)
, _bailout_msg(NULL) , _bailout_msg(NULL)
@ -461,12 +466,16 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
#endif // PRODUCT #endif // PRODUCT
{ {
PhaseTraceTime timeit(_t_compile); PhaseTraceTime timeit(_t_compile);
_arena = Thread::current()->resource_area(); _arena = Thread::current()->resource_area();
_env->set_compiler_data(this); _env->set_compiler_data(this);
_exception_info_list = new ExceptionInfoList(); _exception_info_list = new ExceptionInfoList();
_implicit_exception_table.set_size(0); _implicit_exception_table.set_size(0);
compile_method(); compile_method();
if (is_profiling() && _would_profile) {
ciMethodData *md = method->method_data();
assert (md != NULL, "Should have MDO");
md->set_would_profile(_would_profile);
}
} }
Compilation::~Compilation() { Compilation::~Compilation() {

View file

@ -69,6 +69,7 @@ class Compilation: public StackObj {
bool _has_exception_handlers; bool _has_exception_handlers;
bool _has_fpu_code; bool _has_fpu_code;
bool _has_unsafe_access; bool _has_unsafe_access;
bool _would_profile;
bool _has_method_handle_invokes; // True if this method has MethodHandle invokes. bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
const char* _bailout_msg; const char* _bailout_msg;
ExceptionInfoList* _exception_info_list; ExceptionInfoList* _exception_info_list;
@ -143,6 +144,7 @@ class Compilation: public StackObj {
void set_has_exception_handlers(bool f) { _has_exception_handlers = f; } void set_has_exception_handlers(bool f) { _has_exception_handlers = f; }
void set_has_fpu_code(bool f) { _has_fpu_code = f; } void set_has_fpu_code(bool f) { _has_fpu_code = f; }
void set_has_unsafe_access(bool f) { _has_unsafe_access = f; } void set_has_unsafe_access(bool f) { _has_unsafe_access = f; }
void set_would_profile(bool f) { _would_profile = f; }
// Add a set of exception handlers covering the given PC offset // Add a set of exception handlers covering the given PC offset
void add_exception_handlers_for_pco(int pco, XHandlers* exception_handlers); void add_exception_handlers_for_pco(int pco, XHandlers* exception_handlers);
// Statistics gathering // Statistics gathering
@ -202,6 +204,30 @@ class Compilation: public StackObj {
void compile_only_this_scope(outputStream* st, IRScope* scope); void compile_only_this_scope(outputStream* st, IRScope* scope);
void exclude_this_method(); void exclude_this_method();
#endif // PRODUCT #endif // PRODUCT
bool is_profiling() {
return env()->comp_level() == CompLevel_full_profile ||
env()->comp_level() == CompLevel_limited_profile;
}
bool count_invocations() { return is_profiling(); }
bool count_backedges() { return is_profiling(); }
// Helpers for generation of profile information
bool profile_branches() {
return env()->comp_level() == CompLevel_full_profile &&
C1UpdateMethodData && C1ProfileBranches;
}
bool profile_calls() {
return env()->comp_level() == CompLevel_full_profile &&
C1UpdateMethodData && C1ProfileCalls;
}
bool profile_inlined_calls() {
return profile_calls() && C1ProfileInlinedCalls;
}
bool profile_checkcasts() {
return env()->comp_level() == CompLevel_full_profile &&
C1UpdateMethodData && C1ProfileCheckcasts;
}
}; };

View file

@ -39,9 +39,7 @@ class Compiler: public AbstractCompiler {
// Name of this compiler // Name of this compiler
virtual const char* name() { return "C1"; } virtual const char* name() { return "C1"; }
#ifdef TIERED virtual bool is_c1() { return true; };
virtual bool is_c1() { return true; };
#endif // TIERED
BufferBlob* build_buffer_blob(); BufferBlob* build_buffer_blob();

View file

@ -1144,8 +1144,16 @@ void GraphBuilder::increment() {
void GraphBuilder::_goto(int from_bci, int to_bci) { void GraphBuilder::_goto(int from_bci, int to_bci) {
profile_bci(from_bci); Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci);
append(new Goto(block_at(to_bci), to_bci <= from_bci)); if (is_profiling()) {
compilation()->set_would_profile(true);
}
if (profile_branches()) {
x->set_profiled_method(method());
x->set_profiled_bci(bci());
x->set_should_profile(true);
}
append(x);
} }
@ -1153,11 +1161,45 @@ void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* sta
BlockBegin* tsux = block_at(stream()->get_dest()); BlockBegin* tsux = block_at(stream()->get_dest());
BlockBegin* fsux = block_at(stream()->next_bci()); BlockBegin* fsux = block_at(stream()->next_bci());
bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci(); bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
If* if_node = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb))->as_If(); Instruction *i = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb));
if (profile_branches() && (if_node != NULL)) {
if_node->set_profiled_method(method()); if (is_profiling()) {
if_node->set_profiled_bci(bci()); If* if_node = i->as_If();
if_node->set_should_profile(true); if (if_node != NULL) {
// Note that we'd collect profile data in this method if we wanted it.
compilation()->set_would_profile(true);
// At level 2 we need the proper bci to count backedges
if_node->set_profiled_bci(bci());
if (profile_branches()) {
// Successors can be rotated by the canonicalizer, check for this case.
if_node->set_profiled_method(method());
if_node->set_should_profile(true);
if (if_node->tsux() == fsux) {
if_node->set_swapped(true);
}
}
return;
}
// Check if this If was reduced to Goto.
Goto *goto_node = i->as_Goto();
if (goto_node != NULL) {
compilation()->set_would_profile(true);
if (profile_branches()) {
goto_node->set_profiled_method(method());
goto_node->set_profiled_bci(bci());
goto_node->set_should_profile(true);
// Find out which successor is used.
if (goto_node->default_sux() == tsux) {
goto_node->set_direction(Goto::taken);
} else if (goto_node->default_sux() == fsux) {
goto_node->set_direction(Goto::not_taken);
} else {
ShouldNotReachHere();
}
}
return;
}
} }
} }
@ -1698,8 +1740,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
if (recv != NULL && if (recv != NULL &&
(code == Bytecodes::_invokespecial || (code == Bytecodes::_invokespecial ||
!is_loaded || target->is_final() || !is_loaded || target->is_final())) {
profile_calls())) {
// invokespecial always needs a NULL check. invokevirtual where // invokespecial always needs a NULL check. invokevirtual where
// the target is final or where it's not known that whether the // the target is final or where it's not known that whether the
// target is final requires a NULL check. Otherwise normal // target is final requires a NULL check. Otherwise normal
@ -1709,15 +1750,23 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
null_check(recv); null_check(recv);
} }
if (profile_calls()) { if (is_profiling()) {
assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set"); if (recv != NULL && profile_calls()) {
ciKlass* target_klass = NULL; null_check(recv);
if (cha_monomorphic_target != NULL) { }
target_klass = cha_monomorphic_target->holder(); // Note that we'd collect profile data in this method if we wanted it.
} else if (exact_target != NULL) { compilation()->set_would_profile(true);
target_klass = exact_target->holder();
if (profile_calls()) {
assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set");
ciKlass* target_klass = NULL;
if (cha_monomorphic_target != NULL) {
target_klass = cha_monomorphic_target->holder();
} else if (exact_target != NULL) {
target_klass = exact_target->holder();
}
profile_call(recv, target_klass);
} }
profile_call(recv, target_klass);
} }
Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before); Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before);
@ -1782,10 +1831,16 @@ void GraphBuilder::check_cast(int klass_index) {
CheckCast* c = new CheckCast(klass, apop(), state_before); CheckCast* c = new CheckCast(klass, apop(), state_before);
apush(append_split(c)); apush(append_split(c));
c->set_direct_compare(direct_compare(klass)); c->set_direct_compare(direct_compare(klass));
if (profile_checkcasts()) {
c->set_profiled_method(method()); if (is_profiling()) {
c->set_profiled_bci(bci()); // Note that we'd collect profile data in this method if we wanted it.
c->set_should_profile(true); compilation()->set_would_profile(true);
if (profile_checkcasts()) {
c->set_profiled_method(method());
c->set_profiled_bci(bci());
c->set_should_profile(true);
}
} }
} }
@ -1868,7 +1923,7 @@ Value GraphBuilder::round_fp(Value fp_value) {
Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) { Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) {
Canonicalizer canon(instr, bci); Canonicalizer canon(compilation(), instr, bci);
Instruction* i1 = canon.canonical(); Instruction* i1 = canon.canonical();
if (i1->bci() != -99) { if (i1->bci() != -99) {
// Canonicalizer returned an instruction which was already // Canonicalizer returned an instruction which was already
@ -2651,18 +2706,6 @@ BlockBegin* GraphBuilder::header_block(BlockBegin* entry, BlockBegin::Flag f, Va
h->set_depth_first_number(0); h->set_depth_first_number(0);
Value l = h; Value l = h;
if (profile_branches()) {
// Increment the invocation count on entry to the method. We
// can't use profile_invocation here because append isn't setup to
// work properly at this point. The instruction have to be
// appended to the instruction stream by hand.
Value m = new Constant(new ObjectConstant(compilation()->method()));
h->set_next(m, 0);
Value p = new ProfileCounter(m, methodOopDesc::interpreter_invocation_counter_offset_in_bytes(), 1);
m->set_next(p, 0);
l = p;
}
BlockEnd* g = new Goto(entry, false); BlockEnd* g = new Goto(entry, false);
l->set_next(g, entry->bci()); l->set_next(g, entry->bci());
h->set_end(g); h->set_end(g);
@ -2688,10 +2731,10 @@ BlockBegin* GraphBuilder::setup_start_block(int osr_bci, BlockBegin* std_entry,
// also necessary when profiling so that there's a single block that // also necessary when profiling so that there's a single block that
// can increment the interpreter_invocation_count. // can increment the interpreter_invocation_count.
BlockBegin* new_header_block; BlockBegin* new_header_block;
if (std_entry->number_of_preds() == 0 && !profile_branches()) { if (std_entry->number_of_preds() > 0 || count_invocations() || count_backedges()) {
new_header_block = std_entry;
} else {
new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state); new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state);
} else {
new_header_block = std_entry;
} }
// setup start block (root for the IR graph) // setup start block (root for the IR graph)
@ -3115,16 +3158,21 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
Values* args = state()->pop_arguments(callee->arg_size()); Values* args = state()->pop_arguments(callee->arg_size());
ValueStack* locks = lock_stack(); ValueStack* locks = lock_stack();
if (profile_calls()) {
if (is_profiling()) {
// Don't profile in the special case where the root method // Don't profile in the special case where the root method
// is the intrinsic // is the intrinsic
if (callee != method()) { if (callee != method()) {
Value recv = NULL; // Note that we'd collect profile data in this method if we wanted it.
if (has_receiver) { compilation()->set_would_profile(true);
recv = args->at(0); if (profile_calls()) {
null_check(recv); Value recv = NULL;
if (has_receiver) {
recv = args->at(0);
null_check(recv);
}
profile_call(recv, NULL);
} }
profile_call(recv, NULL);
} }
} }
@ -3296,7 +3344,9 @@ void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool
bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) { bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
assert(!callee->is_native(), "callee must not be native"); assert(!callee->is_native(), "callee must not be native");
if (count_backedges() && callee->has_loops()) {
INLINE_BAILOUT("too complex for tiered");
}
// first perform tests of things it's not possible to inline // first perform tests of things it's not possible to inline
if (callee->has_exception_handlers() && if (callee->has_exception_handlers() &&
!InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers"); !InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers");
@ -3365,11 +3415,18 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
null_check(recv); null_check(recv);
} }
if (profile_inlined_calls()) { if (is_profiling()) {
profile_call(recv, holder_known ? callee->holder() : NULL); // Note that we'd collect profile data in this method if we wanted it.
} // this may be redundant here...
compilation()->set_would_profile(true);
profile_invocation(callee); if (profile_calls()) {
profile_call(recv, holder_known ? callee->holder() : NULL);
}
if (profile_inlined_calls()) {
profile_invocation(callee, state(), 0);
}
}
// Introduce a new callee continuation point - if the callee has // Introduce a new callee continuation point - if the callee has
// more than one return instruction or the return does not allow // more than one return instruction or the return does not allow
@ -3755,30 +3812,10 @@ void GraphBuilder::print_stats() {
} }
#endif // PRODUCT #endif // PRODUCT
void GraphBuilder::profile_call(Value recv, ciKlass* known_holder) { void GraphBuilder::profile_call(Value recv, ciKlass* known_holder) {
append(new ProfileCall(method(), bci(), recv, known_holder)); append(new ProfileCall(method(), bci(), recv, known_holder));
} }
void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state, int bci) {
void GraphBuilder::profile_invocation(ciMethod* callee) { append(new ProfileInvoke(callee, state, bci));
if (profile_calls()) {
// increment the interpreter_invocation_count for the inlinee
Value m = append(new Constant(new ObjectConstant(callee)));
append(new ProfileCounter(m, methodOopDesc::interpreter_invocation_counter_offset_in_bytes(), 1));
}
}
void GraphBuilder::profile_bci(int bci) {
if (profile_branches()) {
ciMethodData* md = method()->method_data();
if (md == NULL) {
BAILOUT("out of memory building methodDataOop");
}
ciProfileData* data = md->bci_to_data(bci);
assert(data != NULL && data->is_JumpData(), "need JumpData for goto");
Value mdo = append(new Constant(new ObjectConstant(md)));
append(new ProfileCounter(mdo, md->byte_offset_of_slot(data, JumpData::taken_offset()), 1));
}
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -342,27 +342,17 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
NOT_PRODUCT(void print_inline_result(ciMethod* callee, bool res);) NOT_PRODUCT(void print_inline_result(ciMethod* callee, bool res);)
// methodDataOop profiling helpers
void profile_call(Value recv, ciKlass* predicted_holder); void profile_call(Value recv, ciKlass* predicted_holder);
void profile_invocation(ciMethod* method); void profile_invocation(ciMethod* inlinee, ValueStack* state, int bci);
void profile_bci(int bci);
// Helpers for generation of profile information // Shortcuts to profiling control.
bool profile_branches() { bool is_profiling() { return _compilation->is_profiling(); }
return _compilation->env()->comp_level() == CompLevel_fast_compile && bool count_invocations() { return _compilation->count_invocations(); }
Tier1UpdateMethodData && Tier1ProfileBranches; bool count_backedges() { return _compilation->count_backedges(); }
} bool profile_branches() { return _compilation->profile_branches(); }
bool profile_calls() { bool profile_calls() { return _compilation->profile_calls(); }
return _compilation->env()->comp_level() == CompLevel_fast_compile && bool profile_inlined_calls() { return _compilation->profile_inlined_calls(); }
Tier1UpdateMethodData && Tier1ProfileCalls; bool profile_checkcasts() { return _compilation->profile_checkcasts(); }
}
bool profile_inlined_calls() {
return profile_calls() && Tier1ProfileInlinedCalls;
}
bool profile_checkcasts() {
return _compilation->env()->comp_level() == CompLevel_fast_compile &&
Tier1UpdateMethodData && Tier1ProfileCheckcasts;
}
public: public:
NOT_PRODUCT(void print_stats();) NOT_PRODUCT(void print_stats();)

View file

@ -296,19 +296,21 @@ IR::IR(Compilation* compilation, ciMethod* method, int osr_bci) :
void IR::optimize() { void IR::optimize() {
Optimizer opt(this); Optimizer opt(this);
if (DoCEE) { if (!compilation()->profile_branches()) {
opt.eliminate_conditional_expressions(); if (DoCEE) {
opt.eliminate_conditional_expressions();
#ifndef PRODUCT #ifndef PRODUCT
if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after CEE"); print(true); } if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after CEE"); print(true); }
if (PrintIR || PrintIR1 ) { tty->print_cr("IR after CEE"); print(false); } if (PrintIR || PrintIR1 ) { tty->print_cr("IR after CEE"); print(false); }
#endif #endif
} }
if (EliminateBlocks) { if (EliminateBlocks) {
opt.eliminate_blocks(); opt.eliminate_blocks();
#ifndef PRODUCT #ifndef PRODUCT
if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after block elimination"); print(true); } if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after block elimination"); print(true); }
if (PrintIR || PrintIR1 ) { tty->print_cr("IR after block elimination"); print(false); } if (PrintIR || PrintIR1 ) { tty->print_cr("IR after block elimination"); print(false); }
#endif #endif
}
} }
if (EliminateNullChecks) { if (EliminateNullChecks) {
opt.eliminate_null_checks(); opt.eliminate_null_checks();
@ -484,6 +486,8 @@ class ComputeLinearScanOrder : public StackObj {
BitMap2D _loop_map; // two-dimensional bit set: a bit is set if a block is contained in a loop BitMap2D _loop_map; // two-dimensional bit set: a bit is set if a block is contained in a loop
BlockList _work_list; // temporary list (used in mark_loops and compute_order) BlockList _work_list; // temporary list (used in mark_loops and compute_order)
Compilation* _compilation;
// accessors for _visited_blocks and _active_blocks // accessors for _visited_blocks and _active_blocks
void init_visited() { _active_blocks.clear(); _visited_blocks.clear(); } void init_visited() { _active_blocks.clear(); _visited_blocks.clear(); }
bool is_visited(BlockBegin* b) const { return _visited_blocks.at(b->block_id()); } bool is_visited(BlockBegin* b) const { return _visited_blocks.at(b->block_id()); }
@ -526,8 +530,9 @@ class ComputeLinearScanOrder : public StackObj {
NOT_PRODUCT(void print_blocks();) NOT_PRODUCT(void print_blocks();)
DEBUG_ONLY(void verify();) DEBUG_ONLY(void verify();)
Compilation* compilation() const { return _compilation; }
public: public:
ComputeLinearScanOrder(BlockBegin* start_block); ComputeLinearScanOrder(Compilation* c, BlockBegin* start_block);
// accessors for final result // accessors for final result
BlockList* linear_scan_order() const { return _linear_scan_order; } BlockList* linear_scan_order() const { return _linear_scan_order; }
@ -535,7 +540,7 @@ class ComputeLinearScanOrder : public StackObj {
}; };
ComputeLinearScanOrder::ComputeLinearScanOrder(BlockBegin* start_block) : ComputeLinearScanOrder::ComputeLinearScanOrder(Compilation* c, BlockBegin* start_block) :
_max_block_id(BlockBegin::number_of_blocks()), _max_block_id(BlockBegin::number_of_blocks()),
_num_blocks(0), _num_blocks(0),
_num_loops(0), _num_loops(0),
@ -547,13 +552,18 @@ ComputeLinearScanOrder::ComputeLinearScanOrder(BlockBegin* start_block) :
_loop_end_blocks(8), _loop_end_blocks(8),
_work_list(8), _work_list(8),
_linear_scan_order(NULL), // initialized later with correct size _linear_scan_order(NULL), // initialized later with correct size
_loop_map(0, 0) // initialized later with correct size _loop_map(0, 0), // initialized later with correct size
_compilation(c)
{ {
TRACE_LINEAR_SCAN(2, "***** computing linear-scan block order"); TRACE_LINEAR_SCAN(2, "***** computing linear-scan block order");
init_visited(); init_visited();
count_edges(start_block, NULL); count_edges(start_block, NULL);
if (compilation()->is_profiling()) {
compilation()->method()->method_data()->set_compilation_stats(_num_loops, _num_blocks);
}
if (_num_loops > 0) { if (_num_loops > 0) {
mark_loops(); mark_loops();
clear_non_natural_loops(start_block); clear_non_natural_loops(start_block);
@ -1130,7 +1140,7 @@ void ComputeLinearScanOrder::verify() {
void IR::compute_code() { void IR::compute_code() {
assert(is_valid(), "IR must be valid"); assert(is_valid(), "IR must be valid");
ComputeLinearScanOrder compute_order(start()); ComputeLinearScanOrder compute_order(compilation(), start());
_num_loops = compute_order.num_loops(); _num_loops = compute_order.num_loops();
_code = compute_order.linear_scan_order(); _code = compute_order.linear_scan_order();
} }

View file

@ -740,9 +740,9 @@ void BlockBegin::block_values_do(ValueVisitor* f) {
#ifndef PRODUCT #ifndef PRODUCT
#define TRACE_PHI(code) if (PrintPhiFunctions) { code; } #define TRACE_PHI(code) if (PrintPhiFunctions) { code; }
#else #else
#define TRACE_PHI(coce) #define TRACE_PHI(coce)
#endif #endif
@ -1011,3 +1011,7 @@ int Phi::operand_count() const {
void Throw::state_values_do(ValueVisitor* f) { void Throw::state_values_do(ValueVisitor* f) {
BlockEnd::state_values_do(f); BlockEnd::state_values_do(f);
} }
void ProfileInvoke::state_values_do(ValueVisitor* f) {
if (state() != NULL) state()->values_do(f);
}

View file

@ -98,7 +98,7 @@ class UnsafePrefetch;
class UnsafePrefetchRead; class UnsafePrefetchRead;
class UnsafePrefetchWrite; class UnsafePrefetchWrite;
class ProfileCall; class ProfileCall;
class ProfileCounter; class ProfileInvoke;
// A Value is a reference to the instruction creating the value // A Value is a reference to the instruction creating the value
typedef Instruction* Value; typedef Instruction* Value;
@ -195,7 +195,7 @@ class InstructionVisitor: public StackObj {
virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x) = 0; virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x) = 0;
virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) = 0; virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) = 0;
virtual void do_ProfileCall (ProfileCall* x) = 0; virtual void do_ProfileCall (ProfileCall* x) = 0;
virtual void do_ProfileCounter (ProfileCounter* x) = 0; virtual void do_ProfileInvoke (ProfileInvoke* x) = 0;
}; };
@ -1733,20 +1733,45 @@ BASE(BlockEnd, StateSplit)
LEAF(Goto, BlockEnd) LEAF(Goto, BlockEnd)
public:
enum Direction {
none, // Just a regular goto
taken, not_taken // Goto produced from If
};
private:
ciMethod* _profiled_method;
int _profiled_bci;
Direction _direction;
public: public:
// creation // creation
Goto(BlockBegin* sux, ValueStack* state_before, bool is_safepoint = false) : BlockEnd(illegalType, state_before, is_safepoint) { Goto(BlockBegin* sux, ValueStack* state_before, bool is_safepoint = false)
: BlockEnd(illegalType, state_before, is_safepoint)
, _direction(none)
, _profiled_method(NULL)
, _profiled_bci(0) {
BlockList* s = new BlockList(1); BlockList* s = new BlockList(1);
s->append(sux); s->append(sux);
set_sux(s); set_sux(s);
} }
Goto(BlockBegin* sux, bool is_safepoint) : BlockEnd(illegalType, NULL, is_safepoint) { Goto(BlockBegin* sux, bool is_safepoint) : BlockEnd(illegalType, NULL, is_safepoint)
, _direction(none)
, _profiled_method(NULL)
, _profiled_bci(0) {
BlockList* s = new BlockList(1); BlockList* s = new BlockList(1);
s->append(sux); s->append(sux);
set_sux(s); set_sux(s);
} }
bool should_profile() const { return check_flag(ProfileMDOFlag); }
ciMethod* profiled_method() const { return _profiled_method; } // set only for profiled branches
int profiled_bci() const { return _profiled_bci; }
Direction direction() const { return _direction; }
void set_should_profile(bool value) { set_flag(ProfileMDOFlag, value); }
void set_profiled_method(ciMethod* method) { _profiled_method = method; }
void set_profiled_bci(int bci) { _profiled_bci = bci; }
void set_direction(Direction d) { _direction = d; }
}; };
@ -1757,6 +1782,8 @@ LEAF(If, BlockEnd)
Value _y; Value _y;
ciMethod* _profiled_method; ciMethod* _profiled_method;
int _profiled_bci; // Canonicalizer may alter bci of If node int _profiled_bci; // Canonicalizer may alter bci of If node
bool _swapped; // Is the order reversed with respect to the original If in the
// bytecode stream?
public: public:
// creation // creation
// unordered_is_true is valid for float/double compares only // unordered_is_true is valid for float/double compares only
@ -1767,6 +1794,7 @@ LEAF(If, BlockEnd)
, _y(y) , _y(y)
, _profiled_method(NULL) , _profiled_method(NULL)
, _profiled_bci(0) , _profiled_bci(0)
, _swapped(false)
{ {
ASSERT_VALUES ASSERT_VALUES
set_flag(UnorderedIsTrueFlag, unordered_is_true); set_flag(UnorderedIsTrueFlag, unordered_is_true);
@ -1788,7 +1816,8 @@ LEAF(If, BlockEnd)
BlockBegin* usux() const { return sux_for(unordered_is_true()); } BlockBegin* usux() const { return sux_for(unordered_is_true()); }
bool should_profile() const { return check_flag(ProfileMDOFlag); } bool should_profile() const { return check_flag(ProfileMDOFlag); }
ciMethod* profiled_method() const { return _profiled_method; } // set only for profiled branches ciMethod* profiled_method() const { return _profiled_method; } // set only for profiled branches
int profiled_bci() const { return _profiled_bci; } // set only for profiled branches int profiled_bci() const { return _profiled_bci; } // set for profiled branches and tiered
bool is_swapped() const { return _swapped; }
// manipulation // manipulation
void swap_operands() { void swap_operands() {
@ -1807,7 +1836,7 @@ LEAF(If, BlockEnd)
void set_should_profile(bool value) { set_flag(ProfileMDOFlag, value); } void set_should_profile(bool value) { set_flag(ProfileMDOFlag, value); }
void set_profiled_method(ciMethod* method) { _profiled_method = method; } void set_profiled_method(ciMethod* method) { _profiled_method = method; }
void set_profiled_bci(int bci) { _profiled_bci = bci; } void set_profiled_bci(int bci) { _profiled_bci = bci; }
void set_swapped(bool value) { _swapped = value; }
// generic // generic
virtual void input_values_do(ValueVisitor* f) { BlockEnd::input_values_do(f); f->visit(&_x); f->visit(&_y); } virtual void input_values_do(ValueVisitor* f) { BlockEnd::input_values_do(f); f->visit(&_x); f->visit(&_y); }
}; };
@ -2235,7 +2264,6 @@ LEAF(UnsafePrefetchWrite, UnsafePrefetch)
} }
}; };
LEAF(ProfileCall, Instruction) LEAF(ProfileCall, Instruction)
private: private:
ciMethod* _method; ciMethod* _method;
@ -2263,35 +2291,32 @@ LEAF(ProfileCall, Instruction)
virtual void input_values_do(ValueVisitor* f) { if (_recv != NULL) f->visit(&_recv); } virtual void input_values_do(ValueVisitor* f) { if (_recv != NULL) f->visit(&_recv); }
}; };
// Use to trip invocation counter of an inlined method
// LEAF(ProfileInvoke, Instruction)
// Simple node representing a counter update generally used for updating MDOs
//
LEAF(ProfileCounter, Instruction)
private: private:
Value _mdo; ciMethod* _inlinee;
int _offset; ValueStack* _state;
int _increment; int _bci_of_invoke;
public: public:
ProfileCounter(Value mdo, int offset, int increment = 1) ProfileInvoke(ciMethod* inlinee, ValueStack* state, int bci)
: Instruction(voidType) : Instruction(voidType)
, _mdo(mdo) , _inlinee(inlinee)
, _offset(offset) , _bci_of_invoke(bci)
, _increment(increment) , _state(state)
{ {
// The ProfileCounter has side-effects and must occur precisely where located // The ProfileInvoke has side-effects and must occur precisely where located QQQ???
pin(); pin();
} }
Value mdo() { return _mdo; } ciMethod* inlinee() { return _inlinee; }
int offset() { return _offset; } ValueStack* state() { return _state; }
int increment() { return _increment; } int bci_of_invoke() { return _bci_of_invoke; }
virtual void input_values_do(ValueVisitor*) {}
virtual void input_values_do(ValueVisitor* f) { f->visit(&_mdo); } virtual void state_values_do(ValueVisitor*);
}; };
class BlockPair: public CompilationResourceObj { class BlockPair: public CompilationResourceObj {
private: private:
BlockBegin* _from; BlockBegin* _from;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -819,7 +819,6 @@ void InstructionPrinter::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
output()->put(')'); output()->put(')');
} }
void InstructionPrinter::do_ProfileCall(ProfileCall* x) { void InstructionPrinter::do_ProfileCall(ProfileCall* x) {
output()->print("profile "); output()->print("profile ");
print_value(x->recv()); print_value(x->recv());
@ -831,20 +830,11 @@ void InstructionPrinter::do_ProfileCall(ProfileCall* x) {
output()->put(')'); output()->put(')');
} }
void InstructionPrinter::do_ProfileInvoke(ProfileInvoke* x) {
output()->print("profile_invoke ");
output()->print(" %s.%s", x->inlinee()->holder()->name()->as_utf8(), x->inlinee()->name()->as_utf8());
output()->put(')');
void InstructionPrinter::do_ProfileCounter(ProfileCounter* x) {
ObjectConstant* oc = x->mdo()->type()->as_ObjectConstant();
if (oc != NULL && oc->value()->is_method() &&
x->offset() == methodOopDesc::interpreter_invocation_counter_offset_in_bytes()) {
print_value(x->mdo());
output()->print(".interpreter_invocation_count += %d", x->increment());
} else {
output()->print("counter [");
print_value(x->mdo());
output()->print(" + %d] += %d", x->offset(), x->increment());
}
} }
#endif // PRODUCT #endif // PRODUCT

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -123,6 +123,6 @@ class InstructionPrinter: public InstructionVisitor {
virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x); virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x);
virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x); virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
virtual void do_ProfileCall (ProfileCall* x); virtual void do_ProfileCall (ProfileCall* x);
virtual void do_ProfileCounter (ProfileCounter* x); virtual void do_ProfileInvoke (ProfileInvoke* x);
}; };
#endif // PRODUCT #endif // PRODUCT

View file

@ -345,9 +345,8 @@ void LIR_OpBranch::negate_cond() {
LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3,
bool fast_check, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, bool fast_check, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch,
CodeStub* stub, CodeStub* stub)
ciMethod* profiled_method,
int profiled_bci)
: LIR_Op(code, result, NULL) : LIR_Op(code, result, NULL)
, _object(object) , _object(object)
, _array(LIR_OprFact::illegalOpr) , _array(LIR_OprFact::illegalOpr)
@ -359,8 +358,10 @@ LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object,
, _stub(stub) , _stub(stub)
, _info_for_patch(info_for_patch) , _info_for_patch(info_for_patch)
, _info_for_exception(info_for_exception) , _info_for_exception(info_for_exception)
, _profiled_method(profiled_method) , _profiled_method(NULL)
, _profiled_bci(profiled_bci) { , _profiled_bci(-1)
, _should_profile(false)
{
if (code == lir_checkcast) { if (code == lir_checkcast) {
assert(info_for_exception != NULL, "checkcast throws exceptions"); assert(info_for_exception != NULL, "checkcast throws exceptions");
} else if (code == lir_instanceof) { } else if (code == lir_instanceof) {
@ -372,7 +373,7 @@ LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object,
LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci) LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception)
: LIR_Op(code, LIR_OprFact::illegalOpr, NULL) : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)
, _object(object) , _object(object)
, _array(array) , _array(array)
@ -384,8 +385,10 @@ LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array, L
, _stub(NULL) , _stub(NULL)
, _info_for_patch(NULL) , _info_for_patch(NULL)
, _info_for_exception(info_for_exception) , _info_for_exception(info_for_exception)
, _profiled_method(profiled_method) , _profiled_method(NULL)
, _profiled_bci(profiled_bci) { , _profiled_bci(-1)
, _should_profile(false)
{
if (code == lir_store_check) { if (code == lir_store_check) {
_stub = new ArrayStoreExceptionStub(info_for_exception); _stub = new ArrayStoreExceptionStub(info_for_exception);
assert(info_for_exception != NULL, "store_check throws exceptions"); assert(info_for_exception != NULL, "store_check throws exceptions");
@ -495,6 +498,8 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
case lir_monaddr: // input and result always valid, info always invalid case lir_monaddr: // input and result always valid, info always invalid
case lir_null_check: // input and info always valid, result always invalid case lir_null_check: // input and info always valid, result always invalid
case lir_move: // input and result always valid, may have info case lir_move: // input and result always valid, may have info
case lir_pack64: // input and result always valid
case lir_unpack64: // input and result always valid
case lir_prefetchr: // input always valid, result and info always invalid case lir_prefetchr: // input always valid, result and info always invalid
case lir_prefetchw: // input always valid, result and info always invalid case lir_prefetchw: // input always valid, result and info always invalid
{ {
@ -903,7 +908,6 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
assert(opProfileCall->_tmp1->is_valid(), "used"); do_temp(opProfileCall->_tmp1); assert(opProfileCall->_tmp1->is_valid(), "used"); do_temp(opProfileCall->_tmp1);
break; break;
} }
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
} }
@ -1015,7 +1019,11 @@ void LIR_OpAllocArray::emit_code(LIR_Assembler* masm) {
} }
void LIR_OpTypeCheck::emit_code(LIR_Assembler* masm) { void LIR_OpTypeCheck::emit_code(LIR_Assembler* masm) {
masm->emit_opTypeCheck(this); if (code() == lir_checkcast) {
masm->emit_checkcast(this);
} else {
masm->emit_opTypeCheck(this);
}
if (stub()) { if (stub()) {
masm->emit_code_stub(stub()); masm->emit_code_stub(stub());
} }
@ -1041,12 +1049,10 @@ void LIR_OpDelay::emit_code(LIR_Assembler* masm) {
masm->emit_delay(this); masm->emit_delay(this);
} }
void LIR_OpProfileCall::emit_code(LIR_Assembler* masm) { void LIR_OpProfileCall::emit_code(LIR_Assembler* masm) {
masm->emit_profile_call(this); masm->emit_profile_call(this);
} }
// LIR_List // LIR_List
LIR_List::LIR_List(Compilation* compilation, BlockBegin* block) LIR_List::LIR_List(Compilation* compilation, BlockBegin* block)
: _operations(8) : _operations(8)
@ -1364,19 +1370,23 @@ void LIR_List::checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
ciMethod* profiled_method, int profiled_bci) { ciMethod* profiled_method, int profiled_bci) {
append(new LIR_OpTypeCheck(lir_checkcast, result, object, klass, LIR_OpTypeCheck* c = new LIR_OpTypeCheck(lir_checkcast, result, object, klass,
tmp1, tmp2, tmp3, fast_check, info_for_exception, info_for_patch, stub, tmp1, tmp2, tmp3, fast_check, info_for_exception, info_for_patch, stub);
profiled_method, profiled_bci)); if (profiled_method != NULL) {
c->set_profiled_method(profiled_method);
c->set_profiled_bci(profiled_bci);
c->set_should_profile(true);
}
append(c);
} }
void LIR_List::instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch) { void LIR_List::instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch) {
append(new LIR_OpTypeCheck(lir_instanceof, result, object, klass, tmp1, tmp2, tmp3, fast_check, NULL, info_for_patch, NULL, NULL, 0)); append(new LIR_OpTypeCheck(lir_instanceof, result, object, klass, tmp1, tmp2, tmp3, fast_check, NULL, info_for_patch, NULL));
} }
void LIR_List::store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception) { void LIR_List::store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception) {
append(new LIR_OpTypeCheck(lir_store_check, object, array, tmp1, tmp2, tmp3, info_for_exception, NULL, 0)); append(new LIR_OpTypeCheck(lir_store_check, object, array, tmp1, tmp2, tmp3, info_for_exception));
} }
@ -1611,6 +1621,8 @@ const char * LIR_Op::name() const {
case lir_convert: s = "convert"; break; case lir_convert: s = "convert"; break;
case lir_alloc_object: s = "alloc_obj"; break; case lir_alloc_object: s = "alloc_obj"; break;
case lir_monaddr: s = "mon_addr"; break; case lir_monaddr: s = "mon_addr"; break;
case lir_pack64: s = "pack64"; break;
case lir_unpack64: s = "unpack64"; break;
// LIR_Op2 // LIR_Op2
case lir_cmp: s = "cmp"; break; case lir_cmp: s = "cmp"; break;
case lir_cmp_l2i: s = "cmp_l2i"; break; case lir_cmp_l2i: s = "cmp_l2i"; break;
@ -1664,7 +1676,6 @@ const char * LIR_Op::name() const {
case lir_cas_int: s = "cas_int"; break; case lir_cas_int: s = "cas_int"; break;
// LIR_OpProfileCall // LIR_OpProfileCall
case lir_profile_call: s = "profile_call"; break; case lir_profile_call: s = "profile_call"; break;
case lir_none: ShouldNotReachHere();break; case lir_none: ShouldNotReachHere();break;
default: s = "illegal_op"; break; default: s = "illegal_op"; break;
} }
@ -1922,7 +1933,6 @@ void LIR_OpProfileCall::print_instr(outputStream* out) const {
tmp1()->print(out); out->print(" "); tmp1()->print(out); out->print(" ");
} }
#endif // PRODUCT #endif // PRODUCT
// Implementation of LIR_InsertionBuffer // Implementation of LIR_InsertionBuffer

View file

@ -849,6 +849,8 @@ enum LIR_Code {
, lir_monaddr , lir_monaddr
, lir_roundfp , lir_roundfp
, lir_safepoint , lir_safepoint
, lir_pack64
, lir_unpack64
, lir_unwind , lir_unwind
, end_op1 , end_op1
, begin_op2 , begin_op2
@ -1464,18 +1466,16 @@ class LIR_OpTypeCheck: public LIR_Op {
CodeEmitInfo* _info_for_patch; CodeEmitInfo* _info_for_patch;
CodeEmitInfo* _info_for_exception; CodeEmitInfo* _info_for_exception;
CodeStub* _stub; CodeStub* _stub;
// Helpers for Tier1UpdateMethodData
ciMethod* _profiled_method; ciMethod* _profiled_method;
int _profiled_bci; int _profiled_bci;
bool _should_profile;
public: public:
LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub);
ciMethod* profiled_method, int profiled_bci);
LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array, LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
ciMethod* profiled_method, int profiled_bci);
LIR_Opr object() const { return _object; } LIR_Opr object() const { return _object; }
LIR_Opr array() const { assert(code() == lir_store_check, "not valid"); return _array; } LIR_Opr array() const { assert(code() == lir_store_check, "not valid"); return _array; }
@ -1489,8 +1489,12 @@ public:
CodeStub* stub() const { return _stub; } CodeStub* stub() const { return _stub; }
// methodDataOop profiling // methodDataOop profiling
ciMethod* profiled_method() { return _profiled_method; } void set_profiled_method(ciMethod *method) { _profiled_method = method; }
int profiled_bci() { return _profiled_bci; } void set_profiled_bci(int bci) { _profiled_bci = bci; }
void set_should_profile(bool b) { _should_profile = b; }
ciMethod* profiled_method() const { return _profiled_method; }
int profiled_bci() const { return _profiled_bci; }
bool should_profile() const { return _should_profile; }
virtual void emit_code(LIR_Assembler* masm); virtual void emit_code(LIR_Assembler* masm);
virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; } virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
@ -1771,7 +1775,6 @@ class LIR_OpProfileCall : public LIR_Op {
virtual void print_instr(outputStream* out) const PRODUCT_RETURN; virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
}; };
class LIR_InsertionBuffer; class LIR_InsertionBuffer;
//--------------------------------LIR_List--------------------------------------------------- //--------------------------------LIR_List---------------------------------------------------
@ -1835,6 +1838,7 @@ class LIR_List: public CompilationResourceObj {
//---------- mutators --------------- //---------- mutators ---------------
void insert_before(int i, LIR_List* op_list) { _operations.insert_before(i, op_list->instructions_list()); } void insert_before(int i, LIR_List* op_list) { _operations.insert_before(i, op_list->instructions_list()); }
void insert_before(int i, LIR_Op* op) { _operations.insert_before(i, op); } void insert_before(int i, LIR_Op* op) { _operations.insert_before(i, op); }
void remove_at(int i) { _operations.remove_at(i); }
//---------- printing ------------- //---------- printing -------------
void print_instructions() PRODUCT_RETURN; void print_instructions() PRODUCT_RETURN;
@ -1908,6 +1912,9 @@ class LIR_List: public CompilationResourceObj {
void logical_or (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or, left, right, dst)); } void logical_or (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or, left, right, dst)); }
void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor, left, right, dst)); } void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor, left, right, dst)); }
void pack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_pack64, src, dst, T_LONG, lir_patch_none, NULL)); }
void unpack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_unpack64, src, dst, T_LONG, lir_patch_none, NULL)); }
void null_check(LIR_Opr opr, CodeEmitInfo* info) { append(new LIR_Op1(lir_null_check, opr, info)); } void null_check(LIR_Opr opr, CodeEmitInfo* info) { append(new LIR_Op1(lir_null_check, opr, info)); }
void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
@ -2034,15 +2041,17 @@ class LIR_List: public CompilationResourceObj {
void fpop_raw() { append(new LIR_Op0(lir_fpop_raw)); } void fpop_raw() { append(new LIR_Op0(lir_fpop_raw)); }
void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch);
void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass, void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
ciMethod* profiled_method, int profiled_bci); ciMethod* profiled_method, int profiled_bci);
void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch);
void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
// methodDataOop profiling // methodDataOop profiling
void profile_call(ciMethod* method, int bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) { append(new LIR_OpProfileCall(lir_profile_call, method, bci, mdo, recv, t1, cha_klass)); } void profile_call(ciMethod* method, int bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
append(new LIR_OpProfileCall(lir_profile_call, method, bci, mdo, recv, t1, cha_klass));
}
}; };
void print_LIR(BlockList* blocks); void print_LIR(BlockList* blocks);

View file

@ -548,6 +548,16 @@ void LIR_Assembler::emit_op1(LIR_Op1* op) {
monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr()); monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
break; break;
#ifdef SPARC
case lir_pack64:
pack64(op->in_opr(), op->result_opr());
break;
case lir_unpack64:
unpack64(op->in_opr(), op->result_opr());
break;
#endif
case lir_unwind: case lir_unwind:
unwind_op(op->in_opr()); unwind_op(op->in_opr());
break; break;

View file

@ -187,6 +187,7 @@ class LIR_Assembler: public CompilationResourceObj {
void emit_alloc_obj(LIR_OpAllocObj* op); void emit_alloc_obj(LIR_OpAllocObj* op);
void emit_alloc_array(LIR_OpAllocArray* op); void emit_alloc_array(LIR_OpAllocArray* op);
void emit_opTypeCheck(LIR_OpTypeCheck* op); void emit_opTypeCheck(LIR_OpTypeCheck* op);
void emit_checkcast(LIR_OpTypeCheck* op);
void emit_compare_and_swap(LIR_OpCompareAndSwap* op); void emit_compare_and_swap(LIR_OpCompareAndSwap* op);
void emit_lock(LIR_OpLock* op); void emit_lock(LIR_OpLock* op);
void emit_call(LIR_OpJavaCall* op); void emit_call(LIR_OpJavaCall* op);

View file

@ -480,16 +480,6 @@ void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result
} }
// increment a counter returning the incremented value
LIR_Opr LIRGenerator::increment_and_return_counter(LIR_Opr base, int offset, int increment) {
LIR_Address* counter = new LIR_Address(base, offset, T_INT);
LIR_Opr result = new_register(T_INT);
__ load(counter, result);
__ add(result, LIR_OprFact::intConst(increment), result);
__ store(result, counter);
return result;
}
void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) { void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
LIR_Opr result_op = result; LIR_Opr result_op = result;
@ -821,7 +811,6 @@ LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
return tmp; return tmp;
} }
void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) { void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
if (if_instr->should_profile()) { if (if_instr->should_profile()) {
ciMethod* method = if_instr->profiled_method(); ciMethod* method = if_instr->profiled_method();
@ -836,24 +825,32 @@ void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
assert(data->is_BranchData(), "need BranchData for two-way branches"); assert(data->is_BranchData(), "need BranchData for two-way branches");
int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset()); int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset()); int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
if (if_instr->is_swapped()) {
int t = taken_count_offset;
taken_count_offset = not_taken_count_offset;
not_taken_count_offset = t;
}
LIR_Opr md_reg = new_register(T_OBJECT); LIR_Opr md_reg = new_register(T_OBJECT);
__ move(LIR_OprFact::oopConst(md->constant_encoding()), md_reg); __ oop2reg(md->constant_encoding(), md_reg);
LIR_Opr data_offset_reg = new_register(T_INT);
LIR_Opr data_offset_reg = new_pointer_register();
__ cmove(lir_cond(cond), __ cmove(lir_cond(cond),
LIR_OprFact::intConst(taken_count_offset), LIR_OprFact::intptrConst(taken_count_offset),
LIR_OprFact::intConst(not_taken_count_offset), LIR_OprFact::intptrConst(not_taken_count_offset),
data_offset_reg); data_offset_reg);
LIR_Opr data_reg = new_register(T_INT);
LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, T_INT); // MDO cells are intptr_t, so the data_reg width is arch-dependent.
LIR_Opr data_reg = new_pointer_register();
LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
__ move(LIR_OprFact::address(data_addr), data_reg); __ move(LIR_OprFact::address(data_addr), data_reg);
LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
// Use leal instead of add to avoid destroying condition codes on x86 // Use leal instead of add to avoid destroying condition codes on x86
LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
__ leal(LIR_OprFact::address(fake_incr_value), data_reg); __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
__ move(data_reg, LIR_OprFact::address(data_addr)); __ move(data_reg, LIR_OprFact::address(data_addr));
} }
} }
// Phi technique: // Phi technique:
// This is about passing live values from one basic block to the other. // This is about passing live values from one basic block to the other.
// In code generated with Java it is rather rare that more than one // In code generated with Java it is rather rare that more than one
@ -1305,8 +1302,6 @@ void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patc
LIR_Opr flag_val = new_register(T_INT); LIR_Opr flag_val = new_register(T_INT);
__ load(mark_active_flag_addr, flag_val); __ load(mark_active_flag_addr, flag_val);
LabelObj* start_store = new LabelObj();
LIR_PatchCode pre_val_patch_code = LIR_PatchCode pre_val_patch_code =
patch ? lir_patch_normal : lir_patch_none; patch ? lir_patch_normal : lir_patch_none;
@ -1757,7 +1752,7 @@ void LIRGenerator::do_Throw(Throw* x) {
#ifndef PRODUCT #ifndef PRODUCT
if (PrintC1Statistics) { if (PrintC1Statistics) {
increment_counter(Runtime1::throw_count_address()); increment_counter(Runtime1::throw_count_address(), T_INT);
} }
#endif #endif
@ -2191,12 +2186,41 @@ void LIRGenerator::do_Goto(Goto* x) {
ValueStack* state = x->state_before() ? x->state_before() : x->state(); ValueStack* state = x->state_before() ? x->state_before() : x->state();
// increment backedge counter if needed // increment backedge counter if needed
increment_backedge_counter(state_for(x, state)); CodeEmitInfo* info = state_for(x, state);
increment_backedge_counter(info, info->bci());
CodeEmitInfo* safepoint_info = state_for(x, state); CodeEmitInfo* safepoint_info = state_for(x, state);
__ safepoint(safepoint_poll_register(), safepoint_info); __ safepoint(safepoint_poll_register(), safepoint_info);
} }
// Gotos can be folded Ifs, handle this case.
if (x->should_profile()) {
ciMethod* method = x->profiled_method();
assert(method != NULL, "method should be set if branch is profiled");
ciMethodData* md = method->method_data();
if (md == NULL) {
bailout("out of memory building methodDataOop");
return;
}
ciProfileData* data = md->bci_to_data(x->profiled_bci());
assert(data != NULL, "must have profiling data");
int offset;
if (x->direction() == Goto::taken) {
assert(data->is_BranchData(), "need BranchData for two-way branches");
offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
} else if (x->direction() == Goto::not_taken) {
assert(data->is_BranchData(), "need BranchData for two-way branches");
offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
} else {
assert(data->is_JumpData(), "need JumpData for branches");
offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
}
LIR_Opr md_reg = new_register(T_OBJECT);
__ oop2reg(md->constant_encoding(), md_reg);
increment_counter(new LIR_Address(md_reg, offset,
NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
}
// emit phi-instruction move after safepoint since this simplifies // emit phi-instruction move after safepoint since this simplifies
// describing the state as the safepoint. // describing the state as the safepoint.
move_to_phi(x->state()); move_to_phi(x->state());
@ -2279,7 +2303,10 @@ void LIRGenerator::do_Base(Base* x) {
} }
// increment invocation counters if needed // increment invocation counters if needed
increment_invocation_counter(new CodeEmitInfo(0, scope()->start()->state(), NULL)); if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
CodeEmitInfo* info = new CodeEmitInfo(InvocationEntryBci, scope()->start()->state(), NULL);
increment_invocation_counter(info);
}
// all blocks with a successor must end with an unconditional jump // all blocks with a successor must end with an unconditional jump
// to the successor even if they are consecutive // to the successor even if they are consecutive
@ -2613,12 +2640,12 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
} }
} }
void LIRGenerator::do_ProfileCall(ProfileCall* x) { void LIRGenerator::do_ProfileCall(ProfileCall* x) {
// Need recv in a temporary register so it interferes with the other temporaries // Need recv in a temporary register so it interferes with the other temporaries
LIR_Opr recv = LIR_OprFact::illegalOpr; LIR_Opr recv = LIR_OprFact::illegalOpr;
LIR_Opr mdo = new_register(T_OBJECT); LIR_Opr mdo = new_register(T_OBJECT);
LIR_Opr tmp = new_register(T_INT); // tmp is used to hold the counters on SPARC
LIR_Opr tmp = new_pointer_register();
if (x->recv() != NULL) { if (x->recv() != NULL) {
LIRItem value(x->recv(), this); LIRItem value(x->recv(), this);
value.load_item(); value.load_item();
@ -2628,14 +2655,69 @@ void LIRGenerator::do_ProfileCall(ProfileCall* x) {
__ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder()); __ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder());
} }
void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
void LIRGenerator::do_ProfileCounter(ProfileCounter* x) { // We can safely ignore accessors here, since c2 will inline them anyway,
LIRItem mdo(x->mdo(), this); // accessors are also always mature.
mdo.load_item(); if (!x->inlinee()->is_accessor()) {
CodeEmitInfo* info = state_for(x, x->state(), true);
increment_counter(new LIR_Address(mdo.result(), x->offset(), T_INT), x->increment()); // Increment invocation counter, don't notify the runtime, because we don't inline loops,
increment_event_counter_impl(info, x->inlinee(), 0, InvocationEntryBci, false, false);
}
} }
void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
int freq_log;
int level = compilation()->env()->comp_level();
if (level == CompLevel_limited_profile) {
freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
} else if (level == CompLevel_full_profile) {
freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
} else {
ShouldNotReachHere();
}
// Increment the appropriate invocation/backedge counter and notify the runtime.
increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
}
void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
ciMethod *method, int frequency,
int bci, bool backedge, bool notify) {
assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
int level = _compilation->env()->comp_level();
assert(level > CompLevel_simple, "Shouldn't be here");
int offset = -1;
LIR_Opr counter_holder = new_register(T_OBJECT);
LIR_Opr meth;
if (level == CompLevel_limited_profile) {
offset = in_bytes(backedge ? methodOopDesc::backedge_counter_offset() :
methodOopDesc::invocation_counter_offset());
__ oop2reg(method->constant_encoding(), counter_holder);
meth = counter_holder;
} else if (level == CompLevel_full_profile) {
offset = in_bytes(backedge ? methodDataOopDesc::backedge_counter_offset() :
methodDataOopDesc::invocation_counter_offset());
__ oop2reg(method->method_data()->constant_encoding(), counter_holder);
meth = new_register(T_OBJECT);
__ oop2reg(method->constant_encoding(), meth);
} else {
ShouldNotReachHere();
}
LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
LIR_Opr result = new_register(T_INT);
__ load(counter, result);
__ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
__ store(result, counter);
if (notify) {
LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
__ logical_and(result, mask, result);
__ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
// The bci for info can point to cmp for if's we want the if bci
CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
__ branch(lir_cond_equal, T_INT, overflow);
__ branch_destination(overflow->continuation());
}
}
LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) { LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
LIRItemList args(1); LIRItemList args(1);
@ -2748,28 +2830,3 @@ LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
return result; return result;
} }
void LIRGenerator::increment_invocation_counter(CodeEmitInfo* info, bool backedge) {
#ifdef TIERED
if (_compilation->env()->comp_level() == CompLevel_fast_compile &&
(method()->code_size() >= Tier1BytecodeLimit || backedge)) {
int limit = InvocationCounter::Tier1InvocationLimit;
int offset = in_bytes(methodOopDesc::invocation_counter_offset() +
InvocationCounter::counter_offset());
if (backedge) {
limit = InvocationCounter::Tier1BackEdgeLimit;
offset = in_bytes(methodOopDesc::backedge_counter_offset() +
InvocationCounter::counter_offset());
}
LIR_Opr meth = new_register(T_OBJECT);
__ oop2reg(method()->constant_encoding(), meth);
LIR_Opr result = increment_and_return_counter(meth, offset, InvocationCounter::count_increment);
__ cmp(lir_cond_aboveEqual, result, LIR_OprFact::intConst(limit));
CodeStub* overflow = new CounterOverflowStub(info, info->bci());
__ branch(lir_cond_aboveEqual, T_INT, overflow);
__ branch_destination(overflow->continuation());
}
#endif
}

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -196,6 +196,9 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
LIR_Opr load_constant(Constant* x); LIR_Opr load_constant(Constant* x);
LIR_Opr load_constant(LIR_Const* constant); LIR_Opr load_constant(LIR_Const* constant);
// Given an immediate value, return an operand usable in logical ops.
LIR_Opr load_immediate(int x, BasicType type);
void set_result(Value x, LIR_Opr opr) { void set_result(Value x, LIR_Opr opr) {
assert(opr->is_valid(), "must set to valid value"); assert(opr->is_valid(), "must set to valid value");
assert(x->operand()->is_illegal(), "operand should never change"); assert(x->operand()->is_illegal(), "operand should never change");
@ -213,8 +216,6 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
LIR_Opr round_item(LIR_Opr opr); LIR_Opr round_item(LIR_Opr opr);
LIR_Opr force_to_spill(LIR_Opr value, BasicType t); LIR_Opr force_to_spill(LIR_Opr value, BasicType t);
void profile_branch(If* if_instr, If::Condition cond);
PhiResolverState& resolver_state() { return _resolver_state; } PhiResolverState& resolver_state() { return _resolver_state; }
void move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val); void move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val);
@ -285,12 +286,9 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
void arithmetic_call_op (Bytecodes::Code code, LIR_Opr result, LIR_OprList* args); void arithmetic_call_op (Bytecodes::Code code, LIR_Opr result, LIR_OprList* args);
void increment_counter(address counter, int step = 1); void increment_counter(address counter, BasicType type, int step = 1);
void increment_counter(LIR_Address* addr, int step = 1); void increment_counter(LIR_Address* addr, int step = 1);
// increment a counter returning the incremented value
LIR_Opr increment_and_return_counter(LIR_Opr base, int offset, int increment);
// is_strictfp is only needed for mul and div (and only generates different code on i486) // is_strictfp is only needed for mul and div (and only generates different code on i486)
void arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp, CodeEmitInfo* info = NULL); void arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp, CodeEmitInfo* info = NULL);
// machine dependent. returns true if it emitted code for the multiply // machine dependent. returns true if it emitted code for the multiply
@ -347,9 +345,21 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
bool can_store_as_constant(Value i, BasicType type) const; bool can_store_as_constant(Value i, BasicType type) const;
LIR_Opr safepoint_poll_register(); LIR_Opr safepoint_poll_register();
void increment_invocation_counter(CodeEmitInfo* info, bool backedge = false);
void increment_backedge_counter(CodeEmitInfo* info) { void profile_branch(If* if_instr, If::Condition cond);
increment_invocation_counter(info, true); void increment_event_counter_impl(CodeEmitInfo* info,
ciMethod *method, int frequency,
int bci, bool backedge, bool notify);
void increment_event_counter(CodeEmitInfo* info, int bci, bool backedge);
void increment_invocation_counter(CodeEmitInfo *info) {
if (compilation()->count_invocations()) {
increment_event_counter(info, InvocationEntryBci, false);
}
}
void increment_backedge_counter(CodeEmitInfo* info, int bci) {
if (compilation()->count_backedges()) {
increment_event_counter(info, bci, true);
}
} }
CodeEmitInfo* state_for(Instruction* x, ValueStack* state, bool ignore_xhandler = false); CodeEmitInfo* state_for(Instruction* x, ValueStack* state, bool ignore_xhandler = false);
@ -503,7 +513,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x); virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x);
virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x); virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
virtual void do_ProfileCall (ProfileCall* x); virtual void do_ProfileCall (ProfileCall* x);
virtual void do_ProfileCounter (ProfileCounter* x); virtual void do_ProfileInvoke (ProfileInvoke* x);
}; };

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -430,7 +430,7 @@ public:
void do_UnsafePrefetchRead (UnsafePrefetchRead* x); void do_UnsafePrefetchRead (UnsafePrefetchRead* x);
void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x); void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
void do_ProfileCall (ProfileCall* x); void do_ProfileCall (ProfileCall* x);
void do_ProfileCounter (ProfileCounter* x); void do_ProfileInvoke (ProfileInvoke* x);
}; };
@ -598,7 +598,7 @@ void NullCheckVisitor::do_UnsafePutObject(UnsafePutObject* x) {}
void NullCheckVisitor::do_UnsafePrefetchRead (UnsafePrefetchRead* x) {} void NullCheckVisitor::do_UnsafePrefetchRead (UnsafePrefetchRead* x) {}
void NullCheckVisitor::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {} void NullCheckVisitor::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
void NullCheckVisitor::do_ProfileCall (ProfileCall* x) { nce()->clear_last_explicit_null_check(); } void NullCheckVisitor::do_ProfileCall (ProfileCall* x) { nce()->clear_last_explicit_null_check(); }
void NullCheckVisitor::do_ProfileCounter (ProfileCounter* x) {} void NullCheckVisitor::do_ProfileInvoke (ProfileInvoke* x) {}
void NullCheckEliminator::visit(Value* p) { void NullCheckEliminator::visit(Value* p) {

View file

@ -140,9 +140,7 @@ void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
case slow_subtype_check_id: case slow_subtype_check_id:
case fpu2long_stub_id: case fpu2long_stub_id:
case unwind_exception_id: case unwind_exception_id:
#ifndef TIERED case counter_overflow_id:
case counter_overflow_id: // Not generated outside the tiered world
#endif
#if defined(SPARC) || defined(PPC) #if defined(SPARC) || defined(PPC)
case handle_exception_nofpu_id: // Unused on sparc case handle_exception_nofpu_id: // Unused on sparc
#endif #endif
@ -322,31 +320,60 @@ JRT_ENTRY(void, Runtime1::post_jvmti_exception_throw(JavaThread* thread))
} }
JRT_END JRT_END
#ifdef TIERED // This is a helper to allow us to safepoint but allow the outer entry
JRT_ENTRY(void, Runtime1::counter_overflow(JavaThread* thread, int bci)) // to be safepoint free if we need to do an osr
RegisterMap map(thread, false); static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, methodOopDesc* m) {
frame fr = thread->last_frame().sender(&map); nmethod* osr_nm = NULL;
methodHandle method(THREAD, m);
RegisterMap map(THREAD, false);
frame fr = THREAD->last_frame().sender(&map);
nmethod* nm = (nmethod*) fr.cb(); nmethod* nm = (nmethod*) fr.cb();
assert(nm!= NULL && nm->is_nmethod(), "what?"); assert(nm!= NULL && nm->is_nmethod(), "Sanity check");
methodHandle method(thread, nm->method()); methodHandle enclosing_method(THREAD, nm->method());
if (bci == 0) {
// invocation counter overflow CompLevel level = (CompLevel)nm->comp_level();
if (!Tier1CountOnly) { int bci = InvocationEntryBci;
CompilationPolicy::policy()->method_invocation_event(method, CHECK); if (branch_bci != InvocationEntryBci) {
} else { // Compute desination bci
method()->invocation_counter()->reset(); address pc = method()->code_base() + branch_bci;
} Bytecodes::Code branch = Bytecodes::code_at(pc, method());
} else { int offset = 0;
if (!Tier1CountOnly) { switch (branch) {
// Twe have a bci but not the destination bci and besides a backedge case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
// event is more for OSR which we don't want here. case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
CompilationPolicy::policy()->method_invocation_event(method, CHECK); case Bytecodes::_if_icmple: case Bytecodes::_ifle:
} else { case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
method()->backedge_counter()->reset(); case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
offset = (int16_t)Bytes::get_Java_u2(pc + 1);
break;
case Bytecodes::_goto_w:
offset = Bytes::get_Java_u4(pc + 1);
break;
default: ;
} }
bci = branch_bci + offset;
} }
osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, THREAD);
return osr_nm;
}
JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* thread, int bci, methodOopDesc* method))
nmethod* osr_nm;
JRT_BLOCK
osr_nm = counter_overflow_helper(thread, bci, method);
if (osr_nm != NULL) {
RegisterMap map(thread, false);
frame fr = thread->last_frame().sender(&map);
VM_DeoptimizeFrame deopt(thread, fr.id());
VMThread::execute(&deopt);
}
JRT_BLOCK_END
return NULL;
JRT_END JRT_END
#endif // TIERED
extern void vm_exit(int code); extern void vm_exit(int code);
@ -898,7 +925,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff); NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
assert(n_copy->data() == 0 || assert(n_copy->data() == 0 ||
n_copy->data() == (int)Universe::non_oop_word(), n_copy->data() == (intptr_t)Universe::non_oop_word(),
"illegal init value"); "illegal init value");
assert(load_klass() != NULL, "klass not set"); assert(load_klass() != NULL, "klass not set");
n_copy->set_data((intx) (load_klass())); n_copy->set_data((intx) (load_klass()));

View file

@ -123,9 +123,7 @@ class Runtime1: public AllStatic {
static void new_object_array(JavaThread* thread, klassOopDesc* klass, jint length); static void new_object_array(JavaThread* thread, klassOopDesc* klass, jint length);
static void new_multi_array (JavaThread* thread, klassOopDesc* klass, int rank, jint* dims); static void new_multi_array (JavaThread* thread, klassOopDesc* klass, int rank, jint* dims);
#ifdef TIERED static address counter_overflow(JavaThread* thread, int bci, methodOopDesc* method);
static void counter_overflow(JavaThread* thread, int bci);
#endif // TIERED
static void unimplemented_entry (JavaThread* thread, StubID id); static void unimplemented_entry (JavaThread* thread, StubID id);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -185,11 +185,11 @@ class ValueNumberingVisitor: public InstructionVisitor {
void do_ExceptionObject(ExceptionObject* x) { /* nothing to do */ } void do_ExceptionObject(ExceptionObject* x) { /* nothing to do */ }
void do_RoundFP (RoundFP* x) { /* nothing to do */ } void do_RoundFP (RoundFP* x) { /* nothing to do */ }
void do_UnsafeGetRaw (UnsafeGetRaw* x) { /* nothing to do */ } void do_UnsafeGetRaw (UnsafeGetRaw* x) { /* nothing to do */ }
void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do */ };
void do_UnsafeGetObject(UnsafeGetObject* x) { /* nothing to do */ } void do_UnsafeGetObject(UnsafeGetObject* x) { /* nothing to do */ }
void do_UnsafePrefetchRead (UnsafePrefetchRead* x) { /* nothing to do */ } void do_UnsafePrefetchRead (UnsafePrefetchRead* x) { /* nothing to do */ }
void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ } void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ }
void do_ProfileCall (ProfileCall* x) { /* nothing to do */ } void do_ProfileCall (ProfileCall* x) { /* nothing to do */ }
void do_ProfileCounter (ProfileCounter* x) { /* nothing to do */ }
}; };

View file

@ -25,12 +25,6 @@
// //
// Defines all global flags used by the client compiler. // Defines all global flags used by the client compiler.
// //
#ifndef TIERED
#define NOT_TIERED(x) x
#else
#define NOT_TIERED(x)
#endif
#define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct) \ #define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct) \
\ \
/* Printing */ \ /* Printing */ \
@ -55,7 +49,7 @@
notproduct(bool, PrintIRDuringConstruction, false, \ notproduct(bool, PrintIRDuringConstruction, false, \
"Print IR as it's being constructed (helpful for debugging frontend)")\ "Print IR as it's being constructed (helpful for debugging frontend)")\
\ \
notproduct(bool, PrintPhiFunctions, false, \ notproduct(bool, PrintPhiFunctions, false, \
"Print phi functions when they are created and simplified") \ "Print phi functions when they are created and simplified") \
\ \
notproduct(bool, PrintIR, false, \ notproduct(bool, PrintIR, false, \
@ -279,41 +273,29 @@
product_pd(intx, SafepointPollOffset, \ product_pd(intx, SafepointPollOffset, \
"Offset added to polling address (Intel only)") \ "Offset added to polling address (Intel only)") \
\ \
product(bool, UseNewFeature1, false, \
"Enable new feature for testing. This is a dummy flag.") \
\
product(bool, UseNewFeature2, false, \
"Enable new feature for testing. This is a dummy flag.") \
\
product(bool, UseNewFeature3, false, \
"Enable new feature for testing. This is a dummy flag.") \
\
product(bool, UseNewFeature4, false, \
"Enable new feature for testing. This is a dummy flag.") \
\
develop(bool, ComputeExactFPURegisterUsage, true, \ develop(bool, ComputeExactFPURegisterUsage, true, \
"Compute additional live set for fpu registers to simplify fpu stack merge (Intel only)") \ "Compute additional live set for fpu registers to simplify fpu stack merge (Intel only)") \
\ \
product(bool, Tier1ProfileCalls, true, \ product(bool, C1ProfileCalls, true, \
"Profile calls when generating code for updating MDOs") \ "Profile calls when generating code for updating MDOs") \
\ \
product(bool, Tier1ProfileVirtualCalls, true, \ product(bool, C1ProfileVirtualCalls, true, \
"Profile virtual calls when generating code for updating MDOs") \ "Profile virtual calls when generating code for updating MDOs") \
\ \
product(bool, Tier1ProfileInlinedCalls, true, \ product(bool, C1ProfileInlinedCalls, true, \
"Profile inlined calls when generating code for updating MDOs") \ "Profile inlined calls when generating code for updating MDOs") \
\ \
product(bool, Tier1ProfileBranches, true, \ product(bool, C1ProfileBranches, true, \
"Profile branches when generating code for updating MDOs") \ "Profile branches when generating code for updating MDOs") \
\ \
product(bool, Tier1ProfileCheckcasts, true, \ product(bool, C1ProfileCheckcasts, true, \
"Profile checkcasts when generating code for updating MDOs") \ "Profile checkcasts when generating code for updating MDOs") \
\ \
product(bool, Tier1OptimizeVirtualCallProfiling, true, \ product(bool, C1OptimizeVirtualCallProfiling, true, \
"Use CHA and exact type results at call sites when updating MDOs") \ "Use CHA and exact type results at call sites when updating MDOs")\
\ \
develop(bool, Tier1CountOnly, false, \ product(bool, C1UpdateMethodData, trueInTiered, \
"Don't schedule tier 2 compiles. Enter VM only") \ "Update methodDataOops in Tier1-generated code") \
\ \
develop(bool, PrintCFGToFile, false, \ develop(bool, PrintCFGToFile, false, \
"print control flow graph to a separate file during compilation") \ "print control flow graph to a separate file during compilation") \

View file

@ -956,18 +956,18 @@ void ciEnv::register_method(ciMethod* target,
if (task() != NULL) task()->set_code(nm); if (task() != NULL) task()->set_code(nm);
if (entry_bci == InvocationEntryBci) { if (entry_bci == InvocationEntryBci) {
#ifdef TIERED if (TieredCompilation) {
// If there is an old version we're done with it // If there is an old version we're done with it
nmethod* old = method->code(); nmethod* old = method->code();
if (TraceMethodReplacement && old != NULL) { if (TraceMethodReplacement && old != NULL) {
ResourceMark rm; ResourceMark rm;
char *method_name = method->name_and_sig_as_C_string(); char *method_name = method->name_and_sig_as_C_string();
tty->print_cr("Replacing method %s", method_name); tty->print_cr("Replacing method %s", method_name);
}
if (old != NULL ) {
old->make_not_entrant();
}
} }
if (old != NULL ) {
old->make_not_entrant();
}
#endif // TIERED
if (TraceNMethodInstalls ) { if (TraceNMethodInstalls ) {
ResourceMark rm; ResourceMark rm;
char *method_name = method->name_and_sig_as_C_string(); char *method_name = method->name_and_sig_as_C_string();
@ -1011,7 +1011,7 @@ ciKlass* ciEnv::find_system_klass(ciSymbol* klass_name) {
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciEnv::comp_level // ciEnv::comp_level
int ciEnv::comp_level() { int ciEnv::comp_level() {
if (task() == NULL) return CompLevel_full_optimization; if (task() == NULL) return CompLevel_highest_tier;
return task()->comp_level(); return task()->comp_level();
} }

View file

@ -49,7 +49,8 @@ ciMethod::ciMethod(methodHandle h_m) : ciObject(h_m) {
_handler_count = h_m()->exception_table()->length() / 4; _handler_count = h_m()->exception_table()->length() / 4;
_uses_monitors = h_m()->access_flags().has_monitor_bytecodes(); _uses_monitors = h_m()->access_flags().has_monitor_bytecodes();
_balanced_monitors = !_uses_monitors || h_m()->access_flags().is_monitor_matching(); _balanced_monitors = !_uses_monitors || h_m()->access_flags().is_monitor_matching();
_is_compilable = !h_m()->is_not_compilable(); _is_c1_compilable = !h_m()->is_not_c1_compilable();
_is_c2_compilable = !h_m()->is_not_c2_compilable();
// Lazy fields, filled in on demand. Require allocation. // Lazy fields, filled in on demand. Require allocation.
_code = NULL; _code = NULL;
_exception_handlers = NULL; _exception_handlers = NULL;
@ -61,11 +62,12 @@ ciMethod::ciMethod(methodHandle h_m) : ciObject(h_m) {
#endif // COMPILER2 || SHARK #endif // COMPILER2 || SHARK
ciEnv *env = CURRENT_ENV; ciEnv *env = CURRENT_ENV;
if (env->jvmti_can_hotswap_or_post_breakpoint() && _is_compilable) { if (env->jvmti_can_hotswap_or_post_breakpoint() && can_be_compiled()) {
// 6328518 check hotswap conditions under the right lock. // 6328518 check hotswap conditions under the right lock.
MutexLocker locker(Compile_lock); MutexLocker locker(Compile_lock);
if (Dependencies::check_evol_method(h_m()) != NULL) { if (Dependencies::check_evol_method(h_m()) != NULL) {
_is_compilable = false; _is_c1_compilable = false;
_is_c2_compilable = false;
} }
} else { } else {
CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
@ -93,7 +95,7 @@ ciMethod::ciMethod(methodHandle h_m) : ciObject(h_m) {
_signature = new (env->arena()) ciSignature(_holder, sig_symbol); _signature = new (env->arena()) ciSignature(_holder, sig_symbol);
_method_data = NULL; _method_data = NULL;
// Take a snapshot of these values, so they will be commensurate with the MDO. // Take a snapshot of these values, so they will be commensurate with the MDO.
if (ProfileInterpreter) { if (ProfileInterpreter || TieredCompilation) {
int invcnt = h_m()->interpreter_invocation_count(); int invcnt = h_m()->interpreter_invocation_count();
// if the value overflowed report it as max int // if the value overflowed report it as max int
_interpreter_invocation_count = invcnt < 0 ? max_jint : invcnt ; _interpreter_invocation_count = invcnt < 0 ? max_jint : invcnt ;
@ -437,11 +439,26 @@ ciCallProfile ciMethod::call_profile_at_bci(int bci) {
// In addition, virtual call sites have receiver type information // In addition, virtual call sites have receiver type information
int receivers_count_total = 0; int receivers_count_total = 0;
int morphism = 0; int morphism = 0;
// Precompute morphism for the possible fixup
for (uint i = 0; i < call->row_limit(); i++) { for (uint i = 0; i < call->row_limit(); i++) {
ciKlass* receiver = call->receiver(i); ciKlass* receiver = call->receiver(i);
if (receiver == NULL) continue; if (receiver == NULL) continue;
morphism += 1; morphism++;
int rcount = call->receiver_count(i); }
int epsilon = 0;
if (TieredCompilation && ProfileInterpreter) {
// Interpreter and C1 treat final and special invokes differently.
// C1 will record a type, whereas the interpreter will just
// increment the count. Detect this case.
if (morphism == 1 && count > 0) {
epsilon = count;
count = 0;
}
}
for (uint i = 0; i < call->row_limit(); i++) {
ciKlass* receiver = call->receiver(i);
if (receiver == NULL) continue;
int rcount = call->receiver_count(i) + epsilon;
if (rcount == 0) rcount = 1; // Should be valid value if (rcount == 0) rcount = 1; // Should be valid value
receivers_count_total += rcount; receivers_count_total += rcount;
// Add the receiver to result data. // Add the receiver to result data.
@ -687,10 +704,17 @@ int ciMethod::interpreter_call_site_count(int bci) {
// invocation counts in methods. // invocation counts in methods.
int ciMethod::scale_count(int count, float prof_factor) { int ciMethod::scale_count(int count, float prof_factor) {
if (count > 0 && method_data() != NULL) { if (count > 0 && method_data() != NULL) {
int current_mileage = method_data()->current_mileage(); int counter_life;
int creation_mileage = method_data()->creation_mileage();
int counter_life = current_mileage - creation_mileage;
int method_life = interpreter_invocation_count(); int method_life = interpreter_invocation_count();
if (TieredCompilation) {
// In tiered the MDO's life is measured directly, so just use the snapshotted counters
counter_life = MAX2(method_data()->invocation_count(), method_data()->backedge_count());
} else {
int current_mileage = method_data()->current_mileage();
int creation_mileage = method_data()->creation_mileage();
counter_life = current_mileage - creation_mileage;
}
// counter_life due to backedge_counter could be > method_life // counter_life due to backedge_counter could be > method_life
if (counter_life > method_life) if (counter_life > method_life)
counter_life = method_life; counter_life = method_life;
@ -778,7 +802,8 @@ ciMethodData* ciMethod::method_data() {
Thread* my_thread = JavaThread::current(); Thread* my_thread = JavaThread::current();
methodHandle h_m(my_thread, get_methodOop()); methodHandle h_m(my_thread, get_methodOop());
if (Tier1UpdateMethodData && is_tier1_compile(env->comp_level())) { // Create an MDO for the inlinee
if (TieredCompilation && is_c1_compile(env->comp_level())) {
build_method_data(h_m); build_method_data(h_m);
} }
@ -885,7 +910,11 @@ bool ciMethod::has_option(const char* option) {
// Have previous compilations of this method succeeded? // Have previous compilations of this method succeeded?
bool ciMethod::can_be_compiled() { bool ciMethod::can_be_compiled() {
check_is_loaded(); check_is_loaded();
return _is_compilable; ciEnv* env = CURRENT_ENV;
if (is_c1_compile(env->comp_level())) {
return _is_c1_compilable;
}
return _is_c2_compilable;
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
@ -895,8 +924,13 @@ bool ciMethod::can_be_compiled() {
void ciMethod::set_not_compilable() { void ciMethod::set_not_compilable() {
check_is_loaded(); check_is_loaded();
VM_ENTRY_MARK; VM_ENTRY_MARK;
_is_compilable = false; ciEnv* env = CURRENT_ENV;
get_methodOop()->set_not_compilable(); if (is_c1_compile(env->comp_level())) {
_is_c1_compilable = false;
} else {
_is_c2_compilable = false;
}
get_methodOop()->set_not_compilable(env->comp_level());
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
@ -910,7 +944,8 @@ void ciMethod::set_not_compilable() {
bool ciMethod::can_be_osr_compiled(int entry_bci) { bool ciMethod::can_be_osr_compiled(int entry_bci) {
check_is_loaded(); check_is_loaded();
VM_ENTRY_MARK; VM_ENTRY_MARK;
return !get_methodOop()->access_flags().is_not_osr_compilable(); ciEnv* env = CURRENT_ENV;
return !get_methodOop()->is_not_osr_compilable(env->comp_level());
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
@ -920,6 +955,14 @@ bool ciMethod::has_compiled_code() {
return get_methodOop()->code() != NULL; return get_methodOop()->code() != NULL;
} }
int ciMethod::comp_level() {
check_is_loaded();
VM_ENTRY_MARK;
nmethod* nm = get_methodOop()->code();
if (nm != NULL) return nm->comp_level();
return 0;
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciMethod::instructions_size // ciMethod::instructions_size
// //
@ -928,18 +971,13 @@ bool ciMethod::has_compiled_code() {
// junk like exception handler, stubs, and constant table, which are // junk like exception handler, stubs, and constant table, which are
// not highly relevant to an inlined method. So we use the more // not highly relevant to an inlined method. So we use the more
// specific accessor nmethod::insts_size. // specific accessor nmethod::insts_size.
int ciMethod::instructions_size() { int ciMethod::instructions_size(int comp_level) {
GUARDED_VM_ENTRY( GUARDED_VM_ENTRY(
nmethod* code = get_methodOop()->code(); nmethod* code = get_methodOop()->code();
// if there's no compiled code or the code was produced by the if (code != NULL && (comp_level == CompLevel_any || comp_level == code->comp_level())) {
// tier1 profiler return 0 for the code size. This should return code->code_end() - code->verified_entry_point();
// probably be based on the compilation level of the nmethod but
// that currently isn't properly recorded.
if (code == NULL ||
(TieredCompilation && code->compiler() != NULL && code->compiler()->is_c1())) {
return 0;
} }
return code->insts_end() - code->verified_entry_point(); return 0;
) )
} }

View file

@ -61,7 +61,8 @@ class ciMethod : public ciObject {
bool _uses_monitors; bool _uses_monitors;
bool _balanced_monitors; bool _balanced_monitors;
bool _is_compilable; bool _is_c1_compilable;
bool _is_c2_compilable;
bool _can_be_statically_bound; bool _can_be_statically_bound;
// Lazy fields, filled in on demand // Lazy fields, filled in on demand
@ -127,6 +128,8 @@ class ciMethod : public ciObject {
int interpreter_invocation_count() const { check_is_loaded(); return _interpreter_invocation_count; } int interpreter_invocation_count() const { check_is_loaded(); return _interpreter_invocation_count; }
int interpreter_throwout_count() const { check_is_loaded(); return _interpreter_throwout_count; } int interpreter_throwout_count() const { check_is_loaded(); return _interpreter_throwout_count; }
int comp_level();
Bytecodes::Code java_code_at_bci(int bci) { Bytecodes::Code java_code_at_bci(int bci) {
address bcp = code() + bci; address bcp = code() + bci;
return Bytecodes::java_code_at(bcp); return Bytecodes::java_code_at(bcp);
@ -209,7 +212,7 @@ class ciMethod : public ciObject {
bool can_be_osr_compiled(int entry_bci); bool can_be_osr_compiled(int entry_bci);
void set_not_compilable(); void set_not_compilable();
bool has_compiled_code(); bool has_compiled_code();
int instructions_size(); int instructions_size(int comp_level = CompLevel_any);
void log_nmethod_identity(xmlStream* log); void log_nmethod_identity(xmlStream* log);
bool is_not_reached(int bci); bool is_not_reached(int bci);
bool was_executed_more_than(int times); bool was_executed_more_than(int times);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -37,6 +37,8 @@ ciMethodData::ciMethodData(methodDataHandle h_md) : ciObject(h_md) {
_data_size = 0; _data_size = 0;
_extra_data_size = 0; _extra_data_size = 0;
_current_mileage = 0; _current_mileage = 0;
_invocation_counter = 0;
_backedge_counter = 0;
_state = empty_state; _state = empty_state;
_saw_free_extra_data = false; _saw_free_extra_data = false;
// Set an initial hint. Don't use set_hint_di() because // Set an initial hint. Don't use set_hint_di() because
@ -56,6 +58,8 @@ ciMethodData::ciMethodData() : ciObject() {
_data_size = 0; _data_size = 0;
_extra_data_size = 0; _extra_data_size = 0;
_current_mileage = 0; _current_mileage = 0;
_invocation_counter = 0;
_backedge_counter = 0;
_state = empty_state; _state = empty_state;
_saw_free_extra_data = false; _saw_free_extra_data = false;
// Set an initial hint. Don't use set_hint_di() because // Set an initial hint. Don't use set_hint_di() because
@ -99,6 +103,8 @@ void ciMethodData::load_data() {
} }
// Note: Extra data are all BitData, and do not need translation. // Note: Extra data are all BitData, and do not need translation.
_current_mileage = methodDataOopDesc::mileage_of(mdo->method()); _current_mileage = methodDataOopDesc::mileage_of(mdo->method());
_invocation_counter = mdo->invocation_count();
_backedge_counter = mdo->backedge_count();
_state = mdo->is_mature()? mature_state: immature_state; _state = mdo->is_mature()? mature_state: immature_state;
_eflags = mdo->eflags(); _eflags = mdo->eflags();
@ -253,6 +259,23 @@ void ciMethodData::update_escape_info() {
} }
} }
void ciMethodData::set_compilation_stats(short loops, short blocks) {
VM_ENTRY_MARK;
methodDataOop mdo = get_methodDataOop();
if (mdo != NULL) {
mdo->set_num_loops(loops);
mdo->set_num_blocks(blocks);
}
}
void ciMethodData::set_would_profile(bool p) {
VM_ENTRY_MARK;
methodDataOop mdo = get_methodDataOop();
if (mdo != NULL) {
mdo->set_would_profile(p);
}
}
bool ciMethodData::has_escape_info() { bool ciMethodData::has_escape_info() {
return eflag_set(methodDataOopDesc::estimated); return eflag_set(methodDataOopDesc::estimated);
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -162,6 +162,12 @@ private:
// Maturity of the oop when the snapshot is taken. // Maturity of the oop when the snapshot is taken.
int _current_mileage; int _current_mileage;
// These counters hold the age of MDO in tiered. In tiered we can have the same method
// running at different compilation levels concurrently. So, in order to precisely measure
// its maturity we need separate counters.
int _invocation_counter;
int _backedge_counter;
// Coherent snapshot of original header. // Coherent snapshot of original header.
methodDataOopDesc _orig; methodDataOopDesc _orig;
@ -223,6 +229,16 @@ public:
int creation_mileage() { return _orig.creation_mileage(); } int creation_mileage() { return _orig.creation_mileage(); }
int current_mileage() { return _current_mileage; } int current_mileage() { return _current_mileage; }
int invocation_count() { return _invocation_counter; }
int backedge_count() { return _backedge_counter; }
// Transfer information about the method to methodDataOop.
// would_profile means we would like to profile this method,
// meaning it's not trivial.
void set_would_profile(bool p);
// Also set the numer of loops and blocks in the method.
// Again, this is used to determine if a method is trivial.
void set_compilation_stats(short loops, short blocks);
void load_data(); void load_data();
// Convert a dp (data pointer) to a di (data index). // Convert a dp (data pointer) to a di (data index).

View file

@ -1292,7 +1292,7 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
// Iterate over all methods in class // Iterate over all methods in class
for (int n = 0; n < k->methods()->length(); n++) { for (int n = 0; n < k->methods()->length(); n++) {
methodHandle m (THREAD, methodOop(k->methods()->obj_at(n))); methodHandle m (THREAD, methodOop(k->methods()->obj_at(n)));
if (CompilationPolicy::canBeCompiled(m)) { if (CompilationPolicy::can_be_compiled(m)) {
if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) { if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) {
// Give sweeper a chance to keep up with CTW // Give sweeper a chance to keep up with CTW
@ -1301,7 +1301,7 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
_codecache_sweep_counter = 0; _codecache_sweep_counter = 0;
} }
// Force compilation // Force compilation
CompileBroker::compile_method(m, InvocationEntryBci, CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_initial_compile,
methodHandle(), 0, "CTW", THREAD); methodHandle(), 0, "CTW", THREAD);
if (HAS_PENDING_EXCEPTION) { if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION; CLEAR_PENDING_EXCEPTION;
@ -1315,7 +1315,7 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
nm->make_not_entrant(); nm->make_not_entrant();
m->clear_code(); m->clear_code();
} }
CompileBroker::compile_method(m, InvocationEntryBci, CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_full_optimization,
methodHandle(), 0, "CTW", THREAD); methodHandle(), 0, "CTW", THREAD);
if (HAS_PENDING_EXCEPTION) { if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION; CLEAR_PENDING_EXCEPTION;

View file

@ -867,9 +867,9 @@ void nmethod::log_identity(xmlStream* log) const {
if (compiler() != NULL) { if (compiler() != NULL) {
log->print(" compiler='%s'", compiler()->name()); log->print(" compiler='%s'", compiler()->name());
} }
#ifdef TIERED if (TieredCompilation) {
log->print(" level='%d'", comp_level()); log->print(" level='%d'", comp_level());
#endif // TIERED }
} }
@ -908,35 +908,71 @@ void nmethod::log_new_nmethod() const {
#undef LOG_OFFSET #undef LOG_OFFSET
void nmethod::print_compilation(outputStream *st, const char *method_name, const char *title,
methodOop method, bool is_blocking, int compile_id, int bci, int comp_level) {
bool is_synchronized = false, has_xhandler = false, is_native = false;
int code_size = -1;
if (method != NULL) {
is_synchronized = method->is_synchronized();
has_xhandler = method->has_exception_handler();
is_native = method->is_native();
code_size = method->code_size();
}
// print compilation number
st->print("%7d %3d", (int)tty->time_stamp().milliseconds(), compile_id);
// print method attributes
const bool is_osr = bci != InvocationEntryBci;
const char blocking_char = is_blocking ? 'b' : ' ';
const char compile_type = is_osr ? '%' : ' ';
const char sync_char = is_synchronized ? 's' : ' ';
const char exception_char = has_xhandler ? '!' : ' ';
const char native_char = is_native ? 'n' : ' ';
st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, native_char);
if (TieredCompilation) {
st->print("%d ", comp_level);
}
// print optional title
bool do_nl = false;
if (title != NULL) {
int tlen = (int) strlen(title);
bool do_nl = false;
if (tlen > 0 && title[tlen-1] == '\n') { tlen--; do_nl = true; }
st->print("%.*s", tlen, title);
} else {
do_nl = true;
}
// print method name string if given
if (method_name != NULL) {
st->print(method_name);
} else {
// otherwise as the method to print itself
if (method != NULL && !Universe::heap()->is_gc_active()) {
method->print_short_name(st);
} else {
st->print("(method)");
}
}
if (method != NULL) {
// print osr_bci if any
if (is_osr) st->print(" @ %d", bci);
// print method size
st->print(" (%d bytes)", code_size);
}
if (do_nl) st->cr();
}
// Print out more verbose output usually for a newly created nmethod. // Print out more verbose output usually for a newly created nmethod.
void nmethod::print_on(outputStream* st, const char* title) const { void nmethod::print_on(outputStream* st, const char* title) const {
if (st != NULL) { if (st != NULL) {
ttyLocker ttyl; ttyLocker ttyl;
// Print a little tag line that looks like +PrintCompilation output: print_compilation(st, /*method_name*/NULL, title,
int tlen = (int) strlen(title); method(), /*is_blocking*/false,
bool do_nl = false; compile_id(), osr_entry_bci(), comp_level());
if (tlen > 0 && title[tlen-1] == '\n') { tlen--; do_nl = true; }
st->print("%3d%c %.*s",
compile_id(),
is_osr_method() ? '%' :
method() != NULL &&
is_native_method() ? 'n' : ' ',
tlen, title);
#ifdef TIERED
st->print(" (%d) ", comp_level());
#endif // TIERED
if (WizardMode) st->print(" (" INTPTR_FORMAT ")", this); if (WizardMode) st->print(" (" INTPTR_FORMAT ")", this);
if (Universe::heap()->is_gc_active() && method() != NULL) {
st->print("(method)");
} else if (method() != NULL) {
method()->print_short_name(st);
if (is_osr_method())
st->print(" @ %d", osr_entry_bci());
if (method()->code_size() > 0)
st->print(" (%d bytes)", method()->code_size());
}
if (do_nl) st->cr();
} }
} }
@ -1137,6 +1173,7 @@ bool nmethod::can_not_entrant_be_converted() {
} }
void nmethod::inc_decompile_count() { void nmethod::inc_decompile_count() {
if (!is_compiled_by_c2()) return;
// Could be gated by ProfileTraps, but do not bother... // Could be gated by ProfileTraps, but do not bother...
methodOop m = method(); methodOop m = method();
if (m == NULL) return; if (m == NULL) return;

View file

@ -599,6 +599,10 @@ public:
void verify_scopes(); void verify_scopes();
void verify_interrupt_point(address interrupt_point); void verify_interrupt_point(address interrupt_point);
// print compilation helper
static void print_compilation(outputStream *st, const char *method_name, const char *title,
methodOop method, bool is_blocking, int compile_id, int bci, int comp_level);
// printing support // printing support
void print() const; void print() const;
void print_code(); void print_code();

View file

@ -123,20 +123,12 @@ int CompileBroker::_sum_standard_bytes_compiled = 0;
int CompileBroker::_sum_nmethod_size = 0; int CompileBroker::_sum_nmethod_size = 0;
int CompileBroker::_sum_nmethod_code_size = 0; int CompileBroker::_sum_nmethod_code_size = 0;
CompileQueue* CompileBroker::_method_queue = NULL; CompileQueue* CompileBroker::_c2_method_queue = NULL;
CompileQueue* CompileBroker::_c1_method_queue = NULL;
CompileTask* CompileBroker::_task_free_list = NULL; CompileTask* CompileBroker::_task_free_list = NULL;
GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL; GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL;
// CompileTaskWrapper
//
// Assign this task to the current thread. Deallocate the task
// when the compilation is complete.
class CompileTaskWrapper : StackObj {
public:
CompileTaskWrapper(CompileTask* task);
~CompileTaskWrapper();
};
CompileTaskWrapper::CompileTaskWrapper(CompileTask* task) { CompileTaskWrapper::CompileTaskWrapper(CompileTask* task) {
CompilerThread* thread = CompilerThread::current(); CompilerThread* thread = CompilerThread::current();
@ -246,6 +238,12 @@ void CompileTask::print() {
bool_to_str(_is_complete), bool_to_str(_is_success)); bool_to_str(_is_complete), bool_to_str(_is_success));
} }
void CompileTask::print_compilation(outputStream *st, methodOop method, char* method_name) {
nmethod::print_compilation(st, method_name,/*title*/ NULL, method,
is_blocking(), compile_id(), osr_bci(), comp_level());
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// CompileTask::print_line_on_error // CompileTask::print_line_on_error
// //
@ -258,32 +256,13 @@ void CompileTask::print() {
// //
void CompileTask::print_line_on_error(outputStream* st, char* buf, int buflen) { void CompileTask::print_line_on_error(outputStream* st, char* buf, int buflen) {
methodOop method = (methodOop)JNIHandles::resolve(_method); methodOop method = (methodOop)JNIHandles::resolve(_method);
// print compiler name // print compiler name
st->print("%s:", CompileBroker::compiler(comp_level())->name()); st->print("%s:", CompileBroker::compiler(comp_level())->name());
char* method_name = NULL;
// print compilation number if (method != NULL) {
st->print("%3d", compile_id()); method_name = method->name_and_sig_as_C_string(buf, buflen);
// print method attributes
const bool is_osr = osr_bci() != CompileBroker::standard_entry_bci;
{ const char blocking_char = is_blocking() ? 'b' : ' ';
const char compile_type = is_osr ? '%' : ' ';
const char sync_char = method->is_synchronized() ? 's' : ' ';
const char exception_char = method->has_exception_handler() ? '!' : ' ';
const char tier_char =
is_highest_tier_compile(comp_level()) ? ' ' : ('0' + comp_level());
st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, tier_char);
} }
print_compilation(st, method, method_name);
// Use buf to get method name and signature
if (method != NULL) st->print("%s", method->name_and_sig_as_C_string(buf, buflen));
// print osr_bci if any
if (is_osr) st->print(" @ %d", osr_bci());
// print method size
st->print_cr(" (%d bytes)", method->code_size());
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
@ -298,29 +277,7 @@ void CompileTask::print_line() {
// print compiler name if requested // print compiler name if requested
if (CIPrintCompilerName) tty->print("%s:", CompileBroker::compiler(comp_level())->name()); if (CIPrintCompilerName) tty->print("%s:", CompileBroker::compiler(comp_level())->name());
print_compilation(tty, method(), NULL);
// print compilation number
tty->print("%3d", compile_id());
// print method attributes
const bool is_osr = osr_bci() != CompileBroker::standard_entry_bci;
{ const char blocking_char = is_blocking() ? 'b' : ' ';
const char compile_type = is_osr ? '%' : ' ';
const char sync_char = method->is_synchronized() ? 's' : ' ';
const char exception_char = method->has_exception_handler() ? '!' : ' ';
const char tier_char =
is_highest_tier_compile(comp_level()) ? ' ' : ('0' + comp_level());
tty->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, tier_char);
}
// print method name
method->print_short_name(tty);
// print osr_bci if any
if (is_osr) tty->print(" @ %d", osr_bci());
// print method size
tty->print_cr(" (%d bytes)", method->code_size());
} }
@ -427,6 +384,7 @@ void CompileQueue::add(CompileTask* task) {
assert(lock()->owned_by_self(), "must own lock"); assert(lock()->owned_by_self(), "must own lock");
task->set_next(NULL); task->set_next(NULL);
task->set_prev(NULL);
if (_last == NULL) { if (_last == NULL) {
// The compile queue is empty. // The compile queue is empty.
@ -437,8 +395,10 @@ void CompileQueue::add(CompileTask* task) {
// Append the task to the queue. // Append the task to the queue.
assert(_last->next() == NULL, "not last"); assert(_last->next() == NULL, "not last");
_last->set_next(task); _last->set_next(task);
task->set_prev(_last);
_last = task; _last = task;
} }
++_size;
// Mark the method as being in the compile queue. // Mark the method as being in the compile queue.
((methodOop)JNIHandles::resolve(task->method_handle()))->set_queued_for_compilation(); ((methodOop)JNIHandles::resolve(task->method_handle()))->set_queued_for_compilation();
@ -452,10 +412,9 @@ void CompileQueue::add(CompileTask* task) {
} }
// Notify CompilerThreads that a task is available. // Notify CompilerThreads that a task is available.
lock()->notify(); lock()->notify_all();
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// CompileQueue::get // CompileQueue::get
// //
@ -464,7 +423,6 @@ CompileTask* CompileQueue::get() {
NMethodSweeper::possibly_sweep(); NMethodSweeper::possibly_sweep();
MutexLocker locker(lock()); MutexLocker locker(lock());
// Wait for an available CompileTask. // Wait for an available CompileTask.
while (_first == NULL) { while (_first == NULL) {
// There is no work to be done right now. Wait. // There is no work to be done right now. Wait.
@ -481,19 +439,31 @@ CompileTask* CompileQueue::get() {
lock()->wait(); lock()->wait();
} }
} }
CompileTask* task = CompilationPolicy::policy()->select_task(this);
CompileTask* task = _first; remove(task);
// Update queue first and last
_first =_first->next();
if (_first == NULL) {
_last = NULL;
}
return task; return task;
} }
void CompileQueue::remove(CompileTask* task)
{
assert(lock()->owned_by_self(), "must own lock");
if (task->prev() != NULL) {
task->prev()->set_next(task->next());
} else {
// max is the first element
assert(task == _first, "Sanity");
_first = task->next();
}
if (task->next() != NULL) {
task->next()->set_prev(task->prev());
} else {
// max is the last element
assert(task == _last, "Sanity");
_last = task->prev();
}
--_size;
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// CompileQueue::print // CompileQueue::print
@ -545,7 +515,6 @@ CompilerCounters::CompilerCounters(const char* thread_name, int instance, TRAPS)
} }
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// CompileBroker::compilation_init // CompileBroker::compilation_init
// //
@ -554,18 +523,18 @@ void CompileBroker::compilation_init() {
_last_method_compiled[0] = '\0'; _last_method_compiled[0] = '\0';
// Set the interface to the current compiler(s). // Set the interface to the current compiler(s).
int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
#ifdef COMPILER1 #ifdef COMPILER1
_compilers[0] = new Compiler(); if (c1_count > 0) {
#ifndef COMPILER2 _compilers[0] = new Compiler();
_compilers[1] = _compilers[0]; }
#endif
#endif // COMPILER1 #endif // COMPILER1
#ifdef COMPILER2 #ifdef COMPILER2
_compilers[1] = new C2Compiler(); if (c2_count > 0) {
#ifndef COMPILER1 _compilers[1] = new C2Compiler();
_compilers[0] = _compilers[1]; }
#endif
#endif // COMPILER2 #endif // COMPILER2
#ifdef SHARK #ifdef SHARK
@ -580,9 +549,7 @@ void CompileBroker::compilation_init() {
_task_free_list = NULL; _task_free_list = NULL;
// Start the CompilerThreads // Start the CompilerThreads
init_compiler_threads(compiler_count()); init_compiler_threads(c1_count, c2_count);
// totalTime performance counter is always created as it is required // totalTime performance counter is always created as it is required
// by the implementation of java.lang.management.CompilationMBean. // by the implementation of java.lang.management.CompilationMBean.
{ {
@ -770,23 +737,38 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
// CompileBroker::init_compiler_threads // CompileBroker::init_compiler_threads
// //
// Initialize the compilation queue // Initialize the compilation queue
void CompileBroker::init_compiler_threads(int compiler_count) { void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) {
EXCEPTION_MARK; EXCEPTION_MARK;
assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
if (c2_compiler_count > 0) {
_c2_method_queue = new CompileQueue("C2MethodQueue", MethodCompileQueue_lock);
}
if (c1_compiler_count > 0) {
_c1_method_queue = new CompileQueue("C1MethodQueue", MethodCompileQueue_lock);
}
int compiler_count = c1_compiler_count + c2_compiler_count;
_method_queue = new CompileQueue("MethodQueue", MethodCompileQueue_lock);
_method_threads = _method_threads =
new (ResourceObj::C_HEAP) GrowableArray<CompilerThread*>(compiler_count, true); new (ResourceObj::C_HEAP) GrowableArray<CompilerThread*>(compiler_count, true);
char name_buffer[256]; char name_buffer[256];
int i; for (int i = 0; i < c2_compiler_count; i++) {
for (i = 0; i < compiler_count; i++) {
// Create a name for our thread. // Create a name for our thread.
sprintf(name_buffer, "CompilerThread%d", i); sprintf(name_buffer, "C2 CompilerThread%d", i);
CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK); CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, CHECK);
CompilerThread* new_thread = make_compiler_thread(name_buffer, _method_queue, counters, CHECK);
_method_threads->append(new_thread); _method_threads->append(new_thread);
} }
for (int i = c2_compiler_count; i < compiler_count; i++) {
// Create a name for our thread.
sprintf(name_buffer, "C1 CompilerThread%d", i);
CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, CHECK);
_method_threads->append(new_thread);
}
if (UsePerfData) { if (UsePerfData) {
PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes,
compiler_count, CHECK); compiler_count, CHECK);
@ -796,7 +778,9 @@ void CompileBroker::init_compiler_threads(int compiler_count) {
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// CompileBroker::is_idle // CompileBroker::is_idle
bool CompileBroker::is_idle() { bool CompileBroker::is_idle() {
if (!_method_queue->is_empty()) { if (_c2_method_queue != NULL && !_c2_method_queue->is_empty()) {
return false;
} else if (_c1_method_queue != NULL && !_c1_method_queue->is_empty()) {
return false; return false;
} else { } else {
int num_threads = _method_threads->length(); int num_threads = _method_threads->length();
@ -859,6 +843,7 @@ void CompileBroker::compile_method_base(methodHandle method,
return; return;
} }
// If this method is already in the compile queue, then // If this method is already in the compile queue, then
// we do not block the current thread. // we do not block the current thread.
if (compilation_is_in_queue(method, osr_bci)) { if (compilation_is_in_queue(method, osr_bci)) {
@ -876,10 +861,11 @@ void CompileBroker::compile_method_base(methodHandle method,
// Outputs from the following MutexLocker block: // Outputs from the following MutexLocker block:
CompileTask* task = NULL; CompileTask* task = NULL;
bool blocking = false; bool blocking = false;
CompileQueue* queue = compile_queue(comp_level);
// Acquire our lock. // Acquire our lock.
{ {
MutexLocker locker(_method_queue->lock(), THREAD); MutexLocker locker(queue->lock(), THREAD);
// Make sure the method has not slipped into the queues since // Make sure the method has not slipped into the queues since
// last we checked; note that those checks were "fast bail-outs". // last we checked; note that those checks were "fast bail-outs".
@ -945,7 +931,7 @@ void CompileBroker::compile_method_base(methodHandle method,
// and in that case it's best to protect both the testing (here) of // and in that case it's best to protect both the testing (here) of
// these bits, and their updating (here and elsewhere) under a // these bits, and their updating (here and elsewhere) under a
// common lock. // common lock.
task = create_compile_task(_method_queue, task = create_compile_task(queue,
compile_id, method, compile_id, method,
osr_bci, comp_level, osr_bci, comp_level,
hot_method, hot_count, comment, hot_method, hot_count, comment,
@ -959,6 +945,7 @@ void CompileBroker::compile_method_base(methodHandle method,
nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci, nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
int comp_level,
methodHandle hot_method, int hot_count, methodHandle hot_method, int hot_count,
const char* comment, TRAPS) { const char* comment, TRAPS) {
// make sure arguments make sense // make sure arguments make sense
@ -967,26 +954,9 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods"); assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods");
assert(!instanceKlass::cast(method->method_holder())->is_not_initialized(), "method holder must be initialized"); assert(!instanceKlass::cast(method->method_holder())->is_not_initialized(), "method holder must be initialized");
int comp_level = CompilationPolicy::policy()->compilation_level(method, osr_bci); if (!TieredCompilation) {
comp_level = CompLevel_highest_tier;
#ifdef TIERED
if (TieredCompilation && StressTieredRuntime) {
static int flipper = 0;
if (is_even(flipper++)) {
comp_level = CompLevel_fast_compile;
} else {
comp_level = CompLevel_full_optimization;
}
} }
#ifdef SPARC
// QQQ FIX ME
// C2 only returns long results in G1 and c1 doesn't understand so disallow c2
// compiles of long results
if (TieredCompilation && method()->result_type() == T_LONG) {
comp_level = CompLevel_fast_compile;
}
#endif // SPARC
#endif // TIERED
// return quickly if possible // return quickly if possible
@ -1000,12 +970,10 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
if (osr_bci == InvocationEntryBci) { if (osr_bci == InvocationEntryBci) {
// standard compilation // standard compilation
nmethod* method_code = method->code(); nmethod* method_code = method->code();
if (method_code != NULL if (method_code != NULL) {
#ifdef TIERED if (compilation_is_complete(method, osr_bci, comp_level)) {
&& ( method_code->is_compiled_by_c2() || comp_level == CompLevel_fast_compile ) return method_code;
#endif // TIERED }
) {
return method_code;
} }
if (method->is_not_compilable(comp_level)) return NULL; if (method->is_not_compilable(comp_level)) return NULL;
@ -1021,10 +989,11 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
// osr compilation // osr compilation
#ifndef TIERED #ifndef TIERED
// seems like an assert of dubious value // seems like an assert of dubious value
assert(comp_level == CompLevel_full_optimization, assert(comp_level == CompLevel_highest_tier,
"all OSR compiles are assumed to be at a single compilation lavel"); "all OSR compiles are assumed to be at a single compilation lavel");
#endif // TIERED #endif // TIERED
nmethod* nm = method->lookup_osr_nmethod_for(osr_bci); // We accept a higher level osr method
nmethod* nm = method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
if (nm != NULL) return nm; if (nm != NULL) return nm;
if (method->is_not_osr_compilable()) return NULL; if (method->is_not_osr_compilable()) return NULL;
} }
@ -1071,8 +1040,7 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
// If the compiler is shut off due to code cache flushing or otherwise, // If the compiler is shut off due to code cache flushing or otherwise,
// fail out now so blocking compiles dont hang the java thread // fail out now so blocking compiles dont hang the java thread
if (!should_compile_new_jobs() || (UseCodeCacheFlushing && CodeCache::needs_flushing())) { if (!should_compile_new_jobs() || (UseCodeCacheFlushing && CodeCache::needs_flushing())) {
method->invocation_counter()->decay(); CompilationPolicy::policy()->delay_compilation(method());
method->backedge_counter()->decay();
return NULL; return NULL;
} }
@ -1088,7 +1056,8 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
} }
// return requested nmethod // return requested nmethod
return osr_bci == InvocationEntryBci ? method->code() : method->lookup_osr_nmethod_for(osr_bci); // We accept a higher level osr method
return osr_bci == InvocationEntryBci ? method->code() : method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
} }
@ -1104,7 +1073,7 @@ bool CompileBroker::compilation_is_complete(methodHandle method,
if (method->is_not_osr_compilable()) { if (method->is_not_osr_compilable()) {
return true; return true;
} else { } else {
nmethod* result = method->lookup_osr_nmethod_for(osr_bci); nmethod* result = method->lookup_osr_nmethod_for(osr_bci, comp_level, true);
return (result != NULL); return (result != NULL);
} }
} else { } else {
@ -1113,15 +1082,7 @@ bool CompileBroker::compilation_is_complete(methodHandle method,
} else { } else {
nmethod* result = method->code(); nmethod* result = method->code();
if (result == NULL) return false; if (result == NULL) return false;
#ifdef TIERED return comp_level == result->comp_level();
if (comp_level == CompLevel_fast_compile) {
// At worst the code is from c1
return true;
}
// comp level must be full opt
return result->is_compiled_by_c2();
#endif // TIERED
return true;
} }
} }
} }
@ -1139,11 +1100,10 @@ bool CompileBroker::compilation_is_complete(methodHandle method,
// versa). This can be remedied by a full queue search to disambiguate // versa). This can be remedied by a full queue search to disambiguate
// cases. If it is deemed profitible, this may be done. // cases. If it is deemed profitible, this may be done.
bool CompileBroker::compilation_is_in_queue(methodHandle method, bool CompileBroker::compilation_is_in_queue(methodHandle method,
int osr_bci) { int osr_bci) {
return method->queued_for_compilation(); return method->queued_for_compilation();
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// CompileBroker::compilation_is_prohibited // CompileBroker::compilation_is_prohibited
// //
@ -1151,11 +1111,9 @@ bool CompileBroker::compilation_is_in_queue(methodHandle method,
bool CompileBroker::compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level) { bool CompileBroker::compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level) {
bool is_native = method->is_native(); bool is_native = method->is_native();
// Some compilers may not support the compilation of natives. // Some compilers may not support the compilation of natives.
// QQQ this needs some work ought to only record not compilable at
// the specified level
if (is_native && if (is_native &&
(!CICompileNatives || !compiler(comp_level)->supports_native())) { (!CICompileNatives || !compiler(comp_level)->supports_native())) {
method->set_not_compilable_quietly(); method->set_not_compilable_quietly(comp_level);
return true; return true;
} }
@ -1194,7 +1152,7 @@ bool CompileBroker::compilation_is_prohibited(methodHandle method, int osr_bci,
// compilations may be numbered separately from regular compilations // compilations may be numbered separately from regular compilations
// if certain debugging flags are used. // if certain debugging flags are used.
uint CompileBroker::assign_compile_id(methodHandle method, int osr_bci) { uint CompileBroker::assign_compile_id(methodHandle method, int osr_bci) {
assert(_method_queue->lock()->owner() == JavaThread::current(), assert(MethodCompileQueue_lock->owner() == Thread::current(),
"must hold the compilation queue lock"); "must hold the compilation queue lock");
bool is_osr = (osr_bci != standard_entry_bci); bool is_osr = (osr_bci != standard_entry_bci);
assert(!method->is_native(), "no longer compile natives"); assert(!method->is_native(), "no longer compile natives");
@ -1643,7 +1601,6 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
#endif #endif
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// CompileBroker::handle_full_code_cache // CompileBroker::handle_full_code_cache
// //
@ -1883,12 +1840,12 @@ void CompileBroker::print_times() {
CompileBroker::_t_standard_compilation.seconds() / CompileBroker::_total_standard_compile_count); CompileBroker::_t_standard_compilation.seconds() / CompileBroker::_total_standard_compile_count);
tty->print_cr(" On stack replacement : %6.3f s, Average : %2.3f", CompileBroker::_t_osr_compilation.seconds(), CompileBroker::_t_osr_compilation.seconds() / CompileBroker::_total_osr_compile_count); tty->print_cr(" On stack replacement : %6.3f s, Average : %2.3f", CompileBroker::_t_osr_compilation.seconds(), CompileBroker::_t_osr_compilation.seconds() / CompileBroker::_total_osr_compile_count);
if (compiler(CompLevel_fast_compile)) { if (compiler(CompLevel_simple) != NULL) {
compiler(CompLevel_fast_compile)->print_timers(); compiler(CompLevel_simple)->print_timers();
if (compiler(CompLevel_fast_compile) != compiler(CompLevel_highest_tier)) }
compiler(CompLevel_highest_tier)->print_timers(); if (compiler(CompLevel_full_optimization) != NULL) {
compiler(CompLevel_full_optimization)->print_timers();
} }
tty->cr(); tty->cr();
int tcb = CompileBroker::_sum_osr_bytes_compiled + CompileBroker::_sum_standard_bytes_compiled; int tcb = CompileBroker::_sum_osr_bytes_compiled + CompileBroker::_sum_standard_bytes_compiled;
tty->print_cr(" Total compiled bytecodes : %6d bytes", tcb); tty->print_cr(" Total compiled bytecodes : %6d bytes", tcb);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,7 @@ class CompileTask : public CHeapObj {
int _comp_level; int _comp_level;
int _num_inlined_bytecodes; int _num_inlined_bytecodes;
nmethodLocker* _code_handle; // holder of eventual result nmethodLocker* _code_handle; // holder of eventual result
CompileTask* _next; CompileTask* _next, *_prev;
// Fields used for logging why the compilation was initiated: // Fields used for logging why the compilation was initiated:
jlong _time_queued; // in units of os::elapsed_counter() jlong _time_queued; // in units of os::elapsed_counter()
@ -49,6 +49,7 @@ class CompileTask : public CHeapObj {
int _hot_count; // information about its invocation counter int _hot_count; // information about its invocation counter
const char* _comment; // more info about the task const char* _comment; // more info about the task
void print_compilation(outputStream *st, methodOop method, char* method_name);
public: public:
CompileTask() { CompileTask() {
_lock = new Monitor(Mutex::nonleaf+2, "CompileTaskLock"); _lock = new Monitor(Mutex::nonleaf+2, "CompileTaskLock");
@ -85,15 +86,17 @@ class CompileTask : public CHeapObj {
CompileTask* next() const { return _next; } CompileTask* next() const { return _next; }
void set_next(CompileTask* next) { _next = next; } void set_next(CompileTask* next) { _next = next; }
CompileTask* prev() const { return _prev; }
void set_prev(CompileTask* prev) { _prev = prev; }
void print(); void print();
void print_line(); void print_line();
void print_line_on_error(outputStream* st, char* buf, int buflen); void print_line_on_error(outputStream* st, char* buf, int buflen);
void log_task(xmlStream* log); void log_task(xmlStream* log);
void log_task_queued(); void log_task_queued();
void log_task_start(CompileLog* log); void log_task_start(CompileLog* log);
void log_task_done(CompileLog* log); void log_task_done(CompileLog* log);
}; };
// CompilerCounters // CompilerCounters
@ -141,7 +144,6 @@ class CompilerCounters : public CHeapObj {
PerfCounter* compile_counter() { return _perf_compiles; } PerfCounter* compile_counter() { return _perf_compiles; }
}; };
// CompileQueue // CompileQueue
// //
// A list of CompileTasks. // A list of CompileTasks.
@ -153,26 +155,42 @@ class CompileQueue : public CHeapObj {
CompileTask* _first; CompileTask* _first;
CompileTask* _last; CompileTask* _last;
int _size;
public: public:
CompileQueue(const char* name, Monitor* lock) { CompileQueue(const char* name, Monitor* lock) {
_name = name; _name = name;
_lock = lock; _lock = lock;
_first = NULL; _first = NULL;
_last = NULL; _last = NULL;
_size = 0;
} }
const char* name() const { return _name; } const char* name() const { return _name; }
Monitor* lock() const { return _lock; } Monitor* lock() const { return _lock; }
void add(CompileTask* task); void add(CompileTask* task);
void remove(CompileTask* task);
CompileTask* first() { return _first; }
CompileTask* last() { return _last; }
CompileTask* get(); CompileTask* get();
bool is_empty() const { return _first == NULL; } bool is_empty() const { return _first == NULL; }
int size() const { return _size; }
void print(); void print();
}; };
// CompileTaskWrapper
//
// Assign this task to the current thread. Deallocate the task
// when the compilation is complete.
class CompileTaskWrapper : StackObj {
public:
CompileTaskWrapper(CompileTask* task);
~CompileTaskWrapper();
};
// Compilation // Compilation
// //
@ -208,7 +226,8 @@ class CompileBroker: AllStatic {
static int _last_compile_level; static int _last_compile_level;
static char _last_method_compiled[name_buffer_length]; static char _last_method_compiled[name_buffer_length];
static CompileQueue* _method_queue; static CompileQueue* _c2_method_queue;
static CompileQueue* _c1_method_queue;
static CompileTask* _task_free_list; static CompileTask* _task_free_list;
static GrowableArray<CompilerThread*>* _method_threads; static GrowableArray<CompilerThread*>* _method_threads;
@ -256,19 +275,9 @@ class CompileBroker: AllStatic {
static int _sum_nmethod_size; static int _sum_nmethod_size;
static int _sum_nmethod_code_size; static int _sum_nmethod_code_size;
static int compiler_count() {
return CICompilerCountPerCPU
// Example: if CICompilerCountPerCPU is true, then we get
// max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
// May help big-app startup time.
? (MAX2(log2_intptr(os::active_processor_count())-1,1))
: CICompilerCount;
}
static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS); static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS);
static void init_compiler_threads(int compiler_count); static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
static bool compilation_is_complete (methodHandle method, int osr_bci, int comp_level); static bool compilation_is_complete (methodHandle method, int osr_bci, int comp_level);
static bool compilation_is_in_queue (methodHandle method, int osr_bci);
static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level); static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
static uint assign_compile_id (methodHandle method, int osr_bci); static uint assign_compile_id (methodHandle method, int osr_bci);
static bool is_compile_blocking (methodHandle method, int osr_bci); static bool is_compile_blocking (methodHandle method, int osr_bci);
@ -301,23 +310,35 @@ class CompileBroker: AllStatic {
int hot_count, int hot_count,
const char* comment, const char* comment,
TRAPS); TRAPS);
static CompileQueue* compile_queue(int comp_level) {
if (is_c2_compile(comp_level)) return _c2_method_queue;
if (is_c1_compile(comp_level)) return _c1_method_queue;
return NULL;
}
public: public:
enum { enum {
// The entry bci used for non-OSR compilations. // The entry bci used for non-OSR compilations.
standard_entry_bci = InvocationEntryBci standard_entry_bci = InvocationEntryBci
}; };
static AbstractCompiler* compiler(int level ) { static AbstractCompiler* compiler(int comp_level) {
if (level == CompLevel_fast_compile) return _compilers[0]; if (is_c2_compile(comp_level)) return _compilers[1]; // C2
assert(level == CompLevel_highest_tier, "what level?"); if (is_c1_compile(comp_level)) return _compilers[0]; // C1
return _compilers[1]; return NULL;
} }
static bool compilation_is_in_queue(methodHandle method, int osr_bci);
static int queue_size(int comp_level) {
CompileQueue *q = compile_queue(comp_level);
return q != NULL ? q->size() : 0;
}
static void compilation_init(); static void compilation_init();
static void init_compiler_thread_log(); static void init_compiler_thread_log();
static nmethod* compile_method(methodHandle method, int osr_bci, static nmethod* compile_method(methodHandle method,
methodHandle hot_method, int hot_count, int osr_bci,
int comp_level,
methodHandle hot_method,
int hot_count,
const char* comment, TRAPS); const char* comment, TRAPS);
static void compiler_thread_loop(); static void compiler_thread_loop();

View file

@ -20,7 +20,6 @@
// or visit www.oracle.com if you need additional information or have any // or visit www.oracle.com if you need additional information or have any
// questions. // questions.
// //
//
// NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps! // NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!

View file

@ -1,5 +1,5 @@
// //
// Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it

View file

@ -1081,6 +1081,8 @@ compilationPolicy.cpp nativeLookup.hpp
compilationPolicy.cpp nmethod.hpp compilationPolicy.cpp nmethod.hpp
compilationPolicy.cpp oop.inline.hpp compilationPolicy.cpp oop.inline.hpp
compilationPolicy.cpp rframe.hpp compilationPolicy.cpp rframe.hpp
compilationPolicy.cpp scopeDesc.hpp
compilationPolicy.cpp simpleThresholdPolicy.hpp
compilationPolicy.cpp stubRoutines.hpp compilationPolicy.cpp stubRoutines.hpp
compilationPolicy.cpp thread.hpp compilationPolicy.cpp thread.hpp
compilationPolicy.cpp timer.hpp compilationPolicy.cpp timer.hpp
@ -1451,6 +1453,7 @@ defaultStream.hpp xmlstream.hpp
deoptimization.cpp allocation.inline.hpp deoptimization.cpp allocation.inline.hpp
deoptimization.cpp biasedLocking.hpp deoptimization.cpp biasedLocking.hpp
deoptimization.cpp bytecode.hpp deoptimization.cpp bytecode.hpp
deoptimization.cpp compilationPolicy.hpp
deoptimization.cpp debugInfoRec.hpp deoptimization.cpp debugInfoRec.hpp
deoptimization.cpp deoptimization.hpp deoptimization.cpp deoptimization.hpp
deoptimization.cpp events.hpp deoptimization.cpp events.hpp
@ -2172,6 +2175,7 @@ interpreterRT_<arch>.hpp generate_platform_dependent_include
interpreterRuntime.cpp biasedLocking.hpp interpreterRuntime.cpp biasedLocking.hpp
interpreterRuntime.cpp collectedHeap.hpp interpreterRuntime.cpp collectedHeap.hpp
interpreterRuntime.cpp compileBroker.hpp
interpreterRuntime.cpp compilationPolicy.hpp interpreterRuntime.cpp compilationPolicy.hpp
interpreterRuntime.cpp constantPoolOop.hpp interpreterRuntime.cpp constantPoolOop.hpp
interpreterRuntime.cpp cpCacheOop.hpp interpreterRuntime.cpp cpCacheOop.hpp
@ -2829,6 +2833,7 @@ methodDataKlass.hpp klass.hpp
methodDataOop.cpp bytecode.hpp methodDataOop.cpp bytecode.hpp
methodDataOop.cpp bytecodeStream.hpp methodDataOop.cpp bytecodeStream.hpp
methodDataOop.cpp compilationPolicy.hpp
methodDataOop.cpp deoptimization.hpp methodDataOop.cpp deoptimization.hpp
methodDataOop.cpp handles.inline.hpp methodDataOop.cpp handles.inline.hpp
methodDataOop.cpp linkResolver.hpp methodDataOop.cpp linkResolver.hpp
@ -2841,6 +2846,7 @@ methodDataOop.hpp bytecodes.hpp
methodDataOop.hpp oop.hpp methodDataOop.hpp oop.hpp
methodDataOop.hpp orderAccess.hpp methodDataOop.hpp orderAccess.hpp
methodDataOop.hpp universe.hpp methodDataOop.hpp universe.hpp
methodDataOop.hpp methodOop.hpp
methodHandleWalk.hpp methodHandles.hpp methodHandleWalk.hpp methodHandles.hpp
@ -2906,6 +2912,7 @@ methodOop.cpp bytecodeStream.hpp
methodOop.cpp bytecodeTracer.hpp methodOop.cpp bytecodeTracer.hpp
methodOop.cpp bytecodes.hpp methodOop.cpp bytecodes.hpp
methodOop.cpp collectedHeap.inline.hpp methodOop.cpp collectedHeap.inline.hpp
methodOop.cpp compilationPolicy.hpp
methodOop.cpp debugInfoRec.hpp methodOop.cpp debugInfoRec.hpp
methodOop.cpp frame.inline.hpp methodOop.cpp frame.inline.hpp
methodOop.cpp gcLocker.hpp methodOop.cpp gcLocker.hpp
@ -3655,6 +3662,7 @@ runtimeService.hpp timer.hpp
safepoint.cpp codeCache.hpp safepoint.cpp codeCache.hpp
safepoint.cpp collectedHeap.hpp safepoint.cpp collectedHeap.hpp
safepoint.cpp compilationPolicy.hpp
safepoint.cpp deoptimization.hpp safepoint.cpp deoptimization.hpp
safepoint.cpp events.hpp safepoint.cpp events.hpp
safepoint.cpp frame.inline.hpp safepoint.cpp frame.inline.hpp
@ -3799,6 +3807,17 @@ signature.hpp allocation.hpp
signature.hpp methodOop.hpp signature.hpp methodOop.hpp
signature.hpp top.hpp signature.hpp top.hpp
simpleThresholdPolicy.cpp arguments.hpp
simpleThresholdPolicy.cpp compileBroker.hpp
simpleThresholdPolicy.cpp resourceArea.hpp
simpleThresholdPolicy.cpp simpleThresholdPolicy.hpp
simpleThresholdPolicy.cpp simpleThresholdPolicy.inline.hpp
simpleThresholdPolicy.hpp compilationPolicy.hpp
simpleThresholdPolicy.hpp globalDefinitions.hpp
simpleThresholdPolicy.hpp methodDataOop.hpp
simpleThresholdPolicy.hpp nmethod.hpp
sizes.cpp sizes.hpp sizes.cpp sizes.hpp
sizes.hpp allocation.hpp sizes.hpp allocation.hpp
@ -3977,6 +3996,7 @@ stubs.hpp os_<os_family>.inline.hpp
sweeper.cpp atomic.hpp sweeper.cpp atomic.hpp
sweeper.cpp codeCache.hpp sweeper.cpp codeCache.hpp
sweeper.cpp compilationPolicy.hpp
sweeper.cpp compileBroker.hpp sweeper.cpp compileBroker.hpp
sweeper.cpp events.hpp sweeper.cpp events.hpp
sweeper.cpp methodOop.hpp sweeper.cpp methodOop.hpp

View file

@ -777,43 +777,6 @@ IRT_END
// Miscellaneous // Miscellaneous
#ifndef PRODUCT
static void trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci, address branch_bcp) {
if (TraceInvocationCounterOverflow) {
InvocationCounter* ic = m->invocation_counter();
InvocationCounter* bc = m->backedge_counter();
ResourceMark rm;
const char* msg =
branch_bcp == NULL
? "comp-policy cntr ovfl @ %d in entry of "
: "comp-policy cntr ovfl @ %d in loop of ";
tty->print(msg, bci);
m->print_value();
tty->cr();
ic->print();
bc->print();
if (ProfileInterpreter) {
if (branch_bcp != NULL) {
methodDataOop mdo = m->method_data();
if (mdo != NULL) {
int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken();
tty->print_cr("back branch count = %d", count);
}
}
}
}
}
static void trace_osr_request(methodHandle method, nmethod* osr, int bci) {
if (TraceOnStackReplacement) {
ResourceMark rm;
tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
method->print_short_name(tty);
tty->print_cr(" at bci %d", bci);
}
}
#endif // !PRODUCT
nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp) { nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp) {
nmethod* nm = frequency_counter_overflow_inner(thread, branch_bcp); nmethod* nm = frequency_counter_overflow_inner(thread, branch_bcp);
assert(branch_bcp != NULL || nm == NULL, "always returns null for non OSR requests"); assert(branch_bcp != NULL || nm == NULL, "always returns null for non OSR requests");
@ -826,7 +789,7 @@ nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, addr
frame fr = thread->last_frame(); frame fr = thread->last_frame();
methodOop method = fr.interpreter_frame_method(); methodOop method = fr.interpreter_frame_method();
int bci = method->bci_from(fr.interpreter_frame_bcp()); int bci = method->bci_from(fr.interpreter_frame_bcp());
nm = method->lookup_osr_nmethod_for(bci); nm = method->lookup_osr_nmethod_for(bci, CompLevel_none, false);
} }
return nm; return nm;
} }
@ -840,74 +803,32 @@ IRT_ENTRY(nmethod*,
frame fr = thread->last_frame(); frame fr = thread->last_frame();
assert(fr.is_interpreted_frame(), "must come from interpreter"); assert(fr.is_interpreted_frame(), "must come from interpreter");
methodHandle method(thread, fr.interpreter_frame_method()); methodHandle method(thread, fr.interpreter_frame_method());
const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : 0; const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci;
const int bci = method->bci_from(fr.interpreter_frame_bcp()); const int bci = branch_bcp != NULL ? method->bci_from(fr.interpreter_frame_bcp()) : InvocationEntryBci;
NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci, branch_bcp);)
if (JvmtiExport::can_post_interpreter_events()) { nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, thread);
if (thread->is_interp_only_mode()) {
// If certain JVMTI events (e.g. frame pop event) are requested then the
// thread is forced to remain in interpreted code. This is
// implemented partly by a check in the run_compiled_code
// section of the interpreter whether we should skip running
// compiled code, and partly by skipping OSR compiles for
// interpreted-only threads.
if (branch_bcp != NULL) {
CompilationPolicy::policy()->reset_counter_for_back_branch_event(method);
return NULL;
}
}
}
if (branch_bcp == NULL) { if (osr_nm != NULL) {
// when code cache is full, compilation gets switched off, UseCompiler // We may need to do on-stack replacement which requires that no
// is set to false // monitors in the activation are biased because their
if (!method->has_compiled_code() && UseCompiler) { // BasicObjectLocks will need to migrate during OSR. Force
CompilationPolicy::policy()->method_invocation_event(method, CHECK_NULL); // unbiasing of all monitors in the activation now (even though
} else { // the OSR nmethod might be invalidated) because we don't have a
// Force counter overflow on method entry, even if no compilation // safepoint opportunity later once the migration begins.
// happened. (The method_invocation_event call does this also.) if (UseBiasedLocking) {
CompilationPolicy::policy()->reset_counter_for_invocation_event(method); ResourceMark rm;
} GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
// compilation at an invocation overflow no longer goes and retries test for for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
// compiled method. We always run the loser of the race as interpreted. kptr < fr.interpreter_frame_monitor_begin();
// so return NULL kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
return NULL; if( kptr->obj() != NULL ) {
} else { objects_to_revoke->append(Handle(THREAD, kptr->obj()));
// counter overflow in a loop => try to do on-stack-replacement
nmethod* osr_nm = method->lookup_osr_nmethod_for(bci);
NOT_PRODUCT(trace_osr_request(method, osr_nm, bci);)
// when code cache is full, we should not compile any more...
if (osr_nm == NULL && UseCompiler) {
const int branch_bci = method->bci_from(branch_bcp);
CompilationPolicy::policy()->method_back_branch_event(method, branch_bci, bci, CHECK_NULL);
osr_nm = method->lookup_osr_nmethod_for(bci);
}
if (osr_nm == NULL) {
CompilationPolicy::policy()->reset_counter_for_back_branch_event(method);
return NULL;
} else {
// We may need to do on-stack replacement which requires that no
// monitors in the activation are biased because their
// BasicObjectLocks will need to migrate during OSR. Force
// unbiasing of all monitors in the activation now (even though
// the OSR nmethod might be invalidated) because we don't have a
// safepoint opportunity later once the migration begins.
if (UseBiasedLocking) {
ResourceMark rm;
GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
kptr < fr.interpreter_frame_monitor_begin();
kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
if( kptr->obj() != NULL ) {
objects_to_revoke->append(Handle(THREAD, kptr->obj()));
}
} }
BiasedLocking::revoke(objects_to_revoke);
} }
return osr_nm; BiasedLocking::revoke(objects_to_revoke);
} }
} }
return osr_nm;
IRT_END IRT_END
IRT_LEAF(jint, InterpreterRuntime::bcp_to_di(methodOopDesc* method, address cur_bcp)) IRT_LEAF(jint, InterpreterRuntime::bcp_to_di(methodOopDesc* method, address cur_bcp))

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -40,8 +40,7 @@ void InvocationCounter::reset() {
} }
void InvocationCounter::set_carry() { void InvocationCounter::set_carry() {
_counter |= carry_mask; set_carry_flag();
// The carry bit now indicates that this counter had achieved a very // The carry bit now indicates that this counter had achieved a very
// large value. Now reduce the value, so that the method can be // large value. Now reduce the value, so that the method can be
// executed many more times before re-entering the VM. // executed many more times before re-entering the VM.
@ -52,7 +51,6 @@ void InvocationCounter::set_carry() {
if (old_count != new_count) set(state(), new_count); if (old_count != new_count) set(state(), new_count);
} }
void InvocationCounter::set_state(State state) { void InvocationCounter::set_state(State state) {
assert(0 <= state && state < number_of_states, "illegal state"); assert(0 <= state && state < number_of_states, "illegal state");
int init = _init[state]; int init = _init[state];
@ -82,11 +80,6 @@ int InvocationCounter::InterpreterInvocationLimit;
int InvocationCounter::InterpreterBackwardBranchLimit; int InvocationCounter::InterpreterBackwardBranchLimit;
int InvocationCounter::InterpreterProfileLimit; int InvocationCounter::InterpreterProfileLimit;
// Tier1 limits
int InvocationCounter::Tier1InvocationLimit;
int InvocationCounter::Tier1BackEdgeLimit;
const char* InvocationCounter::state_as_string(State state) { const char* InvocationCounter::state_as_string(State state) {
switch (state) { switch (state) {
@ -146,8 +139,6 @@ void InvocationCounter::reinitialize(bool delay_overflow) {
InterpreterInvocationLimit = CompileThreshold << number_of_noncount_bits; InterpreterInvocationLimit = CompileThreshold << number_of_noncount_bits;
InterpreterProfileLimit = ((CompileThreshold * InterpreterProfilePercentage) / 100)<< number_of_noncount_bits; InterpreterProfileLimit = ((CompileThreshold * InterpreterProfilePercentage) / 100)<< number_of_noncount_bits;
Tier1InvocationLimit = Tier2CompileThreshold << number_of_noncount_bits;
Tier1BackEdgeLimit = Tier2BackEdgeThreshold << number_of_noncount_bits;
// When methodData is collected, the backward branch limit is compared against a // When methodData is collected, the backward branch limit is compared against a
// methodData counter, rather than an InvocationCounter. In the former case, we // methodData counter, rather than an InvocationCounter. In the former case, we

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -43,7 +43,6 @@ class InvocationCounter VALUE_OBJ_CLASS_SPEC {
number_of_count_bits = BitsPerInt - number_of_noncount_bits, number_of_count_bits = BitsPerInt - number_of_noncount_bits,
state_limit = nth_bit(number_of_state_bits), state_limit = nth_bit(number_of_state_bits),
count_grain = nth_bit(number_of_state_bits + number_of_carry_bits), count_grain = nth_bit(number_of_state_bits + number_of_carry_bits),
count_limit = nth_bit(number_of_count_bits - 1),
carry_mask = right_n_bits(number_of_carry_bits) << number_of_state_bits, carry_mask = right_n_bits(number_of_carry_bits) << number_of_state_bits,
state_mask = right_n_bits(number_of_state_bits), state_mask = right_n_bits(number_of_state_bits),
status_mask = right_n_bits(number_of_state_bits + number_of_carry_bits), status_mask = right_n_bits(number_of_state_bits + number_of_carry_bits),
@ -52,18 +51,16 @@ class InvocationCounter VALUE_OBJ_CLASS_SPEC {
public: public:
static int InterpreterInvocationLimit; // CompileThreshold scaled for interpreter use static int InterpreterInvocationLimit; // CompileThreshold scaled for interpreter use
static int Tier1InvocationLimit; // CompileThreshold scaled for tier1 use
static int Tier1BackEdgeLimit; // BackEdgeThreshold scaled for tier1 use
static int InterpreterBackwardBranchLimit; // A separate threshold for on stack replacement static int InterpreterBackwardBranchLimit; // A separate threshold for on stack replacement
static int InterpreterProfileLimit; // Profiling threshold scaled for interpreter use static int InterpreterProfileLimit; // Profiling threshold scaled for interpreter use
typedef address (*Action)(methodHandle method, TRAPS); typedef address (*Action)(methodHandle method, TRAPS);
enum PublicConstants { enum PublicConstants {
count_increment = count_grain, // use this value to increment the 32bit _counter word count_increment = count_grain, // use this value to increment the 32bit _counter word
count_mask_value = count_mask // use this value to mask the backedge counter count_mask_value = count_mask, // use this value to mask the backedge counter
count_shift = number_of_noncount_bits,
count_limit = nth_bit(number_of_count_bits - 1)
}; };
enum State { enum State {
@ -79,6 +76,7 @@ class InvocationCounter VALUE_OBJ_CLASS_SPEC {
inline void set(State state, int count); // sets state and counter inline void set(State state, int count); // sets state and counter
inline void decay(); // decay counter (divide by two) inline void decay(); // decay counter (divide by two)
void set_carry(); // set the sticky carry bit void set_carry(); // set the sticky carry bit
void set_carry_flag() { _counter |= carry_mask; }
// Accessors // Accessors
State state() const { return (State)(_counter & state_mask); } State state() const { return (State)(_counter & state_mask); }
@ -135,3 +133,4 @@ inline void InvocationCounter::decay() {
if (c > 0 && new_count == 0) new_count = 1; if (c > 0 && new_count == 0) new_count = 1;
set(state(), new_count); set(state(), new_count);
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -83,12 +83,12 @@ void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass
_resolved_method = resolved_method; _resolved_method = resolved_method;
_selected_method = selected_method; _selected_method = selected_method;
_vtable_index = vtable_index; _vtable_index = vtable_index;
if (CompilationPolicy::mustBeCompiled(selected_method)) { if (CompilationPolicy::must_be_compiled(selected_method)) {
// This path is unusual, mostly used by the '-Xcomp' stress test mode. // This path is unusual, mostly used by the '-Xcomp' stress test mode.
// Note: with several active threads, the mustBeCompiled may be true // Note: with several active threads, the must_be_compiled may be true
// while canBeCompiled is false; remove assert // while can_be_compiled is false; remove assert
// assert(CompilationPolicy::canBeCompiled(selected_method), "cannot compile"); // assert(CompilationPolicy::can_be_compiled(selected_method), "cannot compile");
if (THREAD->is_Compiler_thread()) { if (THREAD->is_Compiler_thread()) {
// don't force compilation, resolve was on behalf of compiler // don't force compilation, resolve was on behalf of compiler
return; return;
@ -104,7 +104,8 @@ void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass
return; return;
} }
CompileBroker::compile_method(selected_method, InvocationEntryBci, CompileBroker::compile_method(selected_method, InvocationEntryBci,
methodHandle(), 0, "mustBeCompiled", CHECK); CompLevel_initial_compile,
methodHandle(), 0, "must_be_compiled", CHECK);
} }
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -2200,8 +2200,23 @@ void instanceKlass::add_osr_nmethod(nmethod* n) {
assert(n->is_osr_method(), "wrong kind of nmethod"); assert(n->is_osr_method(), "wrong kind of nmethod");
n->set_osr_link(osr_nmethods_head()); n->set_osr_link(osr_nmethods_head());
set_osr_nmethods_head(n); set_osr_nmethods_head(n);
// Raise the highest osr level if necessary
if (TieredCompilation) {
methodOop m = n->method();
m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level()));
}
// Remember to unlock again // Remember to unlock again
OsrList_lock->unlock(); OsrList_lock->unlock();
// Get rid of the osr methods for the same bci that have lower levels.
if (TieredCompilation) {
for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) {
nmethod *inv = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), l, true);
if (inv != NULL && inv->is_in_use()) {
inv->make_not_entrant();
}
}
}
} }
@ -2211,39 +2226,79 @@ void instanceKlass::remove_osr_nmethod(nmethod* n) {
assert(n->is_osr_method(), "wrong kind of nmethod"); assert(n->is_osr_method(), "wrong kind of nmethod");
nmethod* last = NULL; nmethod* last = NULL;
nmethod* cur = osr_nmethods_head(); nmethod* cur = osr_nmethods_head();
int max_level = CompLevel_none; // Find the max comp level excluding n
methodOop m = n->method();
// Search for match // Search for match
while(cur != NULL && cur != n) { while(cur != NULL && cur != n) {
if (TieredCompilation) {
// Find max level before n
max_level = MAX2(max_level, cur->comp_level());
}
last = cur; last = cur;
cur = cur->osr_link(); cur = cur->osr_link();
} }
nmethod* next = NULL;
if (cur == n) { if (cur == n) {
next = cur->osr_link();
if (last == NULL) { if (last == NULL) {
// Remove first element // Remove first element
set_osr_nmethods_head(osr_nmethods_head()->osr_link()); set_osr_nmethods_head(next);
} else { } else {
last->set_osr_link(cur->osr_link()); last->set_osr_link(next);
} }
} }
n->set_osr_link(NULL); n->set_osr_link(NULL);
if (TieredCompilation) {
cur = next;
while (cur != NULL) {
// Find max level after n
max_level = MAX2(max_level, cur->comp_level());
cur = cur->osr_link();
}
m->set_highest_osr_comp_level(max_level);
}
// Remember to unlock again // Remember to unlock again
OsrList_lock->unlock(); OsrList_lock->unlock();
} }
nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci) const { nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci, int comp_level, bool match_level) const {
// This is a short non-blocking critical region, so the no safepoint check is ok. // This is a short non-blocking critical region, so the no safepoint check is ok.
OsrList_lock->lock_without_safepoint_check(); OsrList_lock->lock_without_safepoint_check();
nmethod* osr = osr_nmethods_head(); nmethod* osr = osr_nmethods_head();
nmethod* best = NULL;
while (osr != NULL) { while (osr != NULL) {
assert(osr->is_osr_method(), "wrong kind of nmethod found in chain"); assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
// There can be a time when a c1 osr method exists but we are waiting
// for a c2 version. When c2 completes its osr nmethod we will trash
// the c1 version and only be able to find the c2 version. However
// while we overflow in the c1 code at back branches we don't want to
// try and switch to the same code as we are already running
if (osr->method() == m && if (osr->method() == m &&
(bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) { (bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) {
// Found a match - return it. if (match_level) {
OsrList_lock->unlock(); if (osr->comp_level() == comp_level) {
return osr; // Found a match - return it.
OsrList_lock->unlock();
return osr;
}
} else {
if (best == NULL || (osr->comp_level() > best->comp_level())) {
if (osr->comp_level() == CompLevel_highest_tier) {
// Found the best possible - return it.
OsrList_lock->unlock();
return osr;
}
best = osr;
}
}
} }
osr = osr->osr_link(); osr = osr->osr_link();
} }
OsrList_lock->unlock(); OsrList_lock->unlock();
if (best != NULL && best->comp_level() >= comp_level && match_level == false) {
return best;
}
return NULL; return NULL;
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -588,7 +588,7 @@ class instanceKlass: public Klass {
void set_osr_nmethods_head(nmethod* h) { _osr_nmethods_head = h; }; void set_osr_nmethods_head(nmethod* h) { _osr_nmethods_head = h; };
void add_osr_nmethod(nmethod* n); void add_osr_nmethod(nmethod* n);
void remove_osr_nmethod(nmethod* n); void remove_osr_nmethod(nmethod* n);
nmethod* lookup_osr_nmethod(const methodOop m, int bci) const; nmethod* lookup_osr_nmethod(const methodOop m, int bci, int level, bool match_level) const;
// Breakpoint support (see methods on methodOop for details) // Breakpoint support (see methods on methodOop for details)
BreakpointInfo* breakpoints() const { return _breakpoints; }; BreakpointInfo* breakpoints() const { return _breakpoints; };

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -283,11 +283,17 @@ void ReceiverTypeData::print_receiver_data_on(outputStream* st) {
if (receiver(row) != NULL) entries++; if (receiver(row) != NULL) entries++;
} }
st->print_cr("count(%u) entries(%u)", count(), entries); st->print_cr("count(%u) entries(%u)", count(), entries);
int total = count();
for (row = 0; row < row_limit(); row++) {
if (receiver(row) != NULL) {
total += receiver_count(row);
}
}
for (row = 0; row < row_limit(); row++) { for (row = 0; row < row_limit(); row++) {
if (receiver(row) != NULL) { if (receiver(row) != NULL) {
tab(st); tab(st);
receiver(row)->print_value_on(st); receiver(row)->print_value_on(st);
st->print_cr("(%u)", receiver_count(row)); st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
} }
} }
} }
@ -743,9 +749,18 @@ void methodDataOopDesc::post_initialize(BytecodeStream* stream) {
// Initialize the methodDataOop corresponding to a given method. // Initialize the methodDataOop corresponding to a given method.
void methodDataOopDesc::initialize(methodHandle method) { void methodDataOopDesc::initialize(methodHandle method) {
ResourceMark rm; ResourceMark rm;
// Set the method back-pointer. // Set the method back-pointer.
_method = method(); _method = method();
if (TieredCompilation) {
_invocation_counter.init();
_backedge_counter.init();
_num_loops = 0;
_num_blocks = 0;
_highest_comp_level = 0;
_highest_osr_comp_level = 0;
_would_profile = false;
}
set_creation_mileage(mileage_of(method())); set_creation_mileage(mileage_of(method()));
// Initialize flags and trap history. // Initialize flags and trap history.
@ -798,32 +813,25 @@ void methodDataOopDesc::initialize(methodHandle method) {
// Get a measure of how much mileage the method has on it. // Get a measure of how much mileage the method has on it.
int methodDataOopDesc::mileage_of(methodOop method) { int methodDataOopDesc::mileage_of(methodOop method) {
int mileage = 0; int mileage = 0;
int iic = method->interpreter_invocation_count(); if (TieredCompilation) {
if (mileage < iic) mileage = iic; mileage = MAX2(method->invocation_count(), method->backedge_count());
} else {
InvocationCounter* ic = method->invocation_counter(); int iic = method->interpreter_invocation_count();
InvocationCounter* bc = method->backedge_counter(); if (mileage < iic) mileage = iic;
InvocationCounter* ic = method->invocation_counter();
int icval = ic->count(); InvocationCounter* bc = method->backedge_counter();
if (ic->carry()) icval += CompileThreshold; int icval = ic->count();
if (mileage < icval) mileage = icval; if (ic->carry()) icval += CompileThreshold;
int bcval = bc->count(); if (mileage < icval) mileage = icval;
if (bc->carry()) bcval += CompileThreshold; int bcval = bc->count();
if (mileage < bcval) mileage = bcval; if (bc->carry()) bcval += CompileThreshold;
if (mileage < bcval) mileage = bcval;
}
return mileage; return mileage;
} }
bool methodDataOopDesc::is_mature() const { bool methodDataOopDesc::is_mature() const {
uint current = mileage_of(_method); return CompilationPolicy::policy()->is_mature(_method);
uint initial = creation_mileage();
if (current < initial)
return true; // some sort of overflow
uint target;
if (ProfileMaturityPercentage <= 0)
target = (uint) -ProfileMaturityPercentage; // absolute value
else
target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 );
return (current >= initial + target);
} }
// Translate a bci to its corresponding data index (di). // Translate a bci to its corresponding data index (di).

View file

@ -1206,7 +1206,25 @@ private:
intx _arg_stack; // bit set of stack-allocatable arguments intx _arg_stack; // bit set of stack-allocatable arguments
intx _arg_returned; // bit set of returned arguments intx _arg_returned; // bit set of returned arguments
int _creation_mileage; // method mileage at MDO creation int _creation_mileage; // method mileage at MDO creation
// How many invocations has this MDO seen?
// These counters are used to determine the exact age of MDO.
// We need those because in tiered a method can be concurrently
// executed at different levels.
InvocationCounter _invocation_counter;
// Same for backedges.
InvocationCounter _backedge_counter;
// Number of loops and blocks is computed when compiling the first
// time with C1. It is used to determine if method is trivial.
short _num_loops;
short _num_blocks;
// Highest compile level this method has ever seen.
u1 _highest_comp_level;
// Same for OSR level
u1 _highest_osr_comp_level;
// Does this method contain anything worth profiling?
bool _would_profile;
// Size of _data array in bytes. (Excludes header and extra_data fields.) // Size of _data array in bytes. (Excludes header and extra_data fields.)
int _data_size; int _data_size;
@ -1292,6 +1310,36 @@ public:
int creation_mileage() const { return _creation_mileage; } int creation_mileage() const { return _creation_mileage; }
void set_creation_mileage(int x) { _creation_mileage = x; } void set_creation_mileage(int x) { _creation_mileage = x; }
int invocation_count() {
if (invocation_counter()->carry()) {
return InvocationCounter::count_limit;
}
return invocation_counter()->count();
}
int backedge_count() {
if (backedge_counter()->carry()) {
return InvocationCounter::count_limit;
}
return backedge_counter()->count();
}
InvocationCounter* invocation_counter() { return &_invocation_counter; }
InvocationCounter* backedge_counter() { return &_backedge_counter; }
void set_would_profile(bool p) { _would_profile = p; }
bool would_profile() const { return _would_profile; }
int highest_comp_level() { return _highest_comp_level; }
void set_highest_comp_level(int level) { _highest_comp_level = level; }
int highest_osr_comp_level() { return _highest_osr_comp_level; }
void set_highest_osr_comp_level(int level) { _highest_osr_comp_level = level; }
int num_loops() const { return _num_loops; }
void set_num_loops(int n) { _num_loops = n; }
int num_blocks() const { return _num_blocks; }
void set_num_blocks(int n) { _num_blocks = n; }
bool is_mature() const; // consult mileage and ProfileMaturityPercentage bool is_mature() const; // consult mileage and ProfileMaturityPercentage
static int mileage_of(methodOop m); static int mileage_of(methodOop m);
@ -1413,7 +1461,7 @@ public:
void inc_decompile_count() { void inc_decompile_count() {
_nof_decompiles += 1; _nof_decompiles += 1;
if (decompile_count() > (uint)PerMethodRecompilationCutoff) { if (decompile_count() > (uint)PerMethodRecompilationCutoff) {
method()->set_not_compilable(); method()->set_not_compilable(CompLevel_full_optimization);
} }
} }
@ -1422,6 +1470,13 @@ public:
return byte_offset_of(methodDataOopDesc, _data[0]); return byte_offset_of(methodDataOopDesc, _data[0]);
} }
static ByteSize invocation_counter_offset() {
return byte_offset_of(methodDataOopDesc, _invocation_counter);
}
static ByteSize backedge_counter_offset() {
return byte_offset_of(methodDataOopDesc, _backedge_counter);
}
// GC support // GC support
oop* adr_method() const { return (oop*)&_method; } oop* adr_method() const { return (oop*)&_method; }
bool object_is_parsable() const { return _size != 0; } bool object_is_parsable() const { return _size != 0; }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -75,7 +75,6 @@ methodOop methodKlass::allocate(constMethodHandle xconst,
// Fix and bury in methodOop // Fix and bury in methodOop
m->set_interpreter_entry(NULL); // sets i2i entry and from_int m->set_interpreter_entry(NULL); // sets i2i entry and from_int
m->set_highest_tier_compile(CompLevel_none);
m->set_adapter_entry(NULL); m->set_adapter_entry(NULL);
m->clear_code(); // from_c/from_i get set to c2i/i2i m->clear_code(); // from_c/from_i get set to c2i/i2i
@ -89,6 +88,7 @@ methodOop methodKlass::allocate(constMethodHandle xconst,
m->invocation_counter()->init(); m->invocation_counter()->init();
m->backedge_counter()->init(); m->backedge_counter()->init();
m->clear_number_of_breakpoints(); m->clear_number_of_breakpoints();
assert(m->is_parsable(), "must be parsable here."); assert(m->is_parsable(), "must be parsable here.");
assert(m->size() == size, "wrong size for object"); assert(m->size() == size, "wrong size for object");
// We should not publish an uprasable object's reference // We should not publish an uprasable object's reference
@ -246,8 +246,8 @@ void methodKlass::oop_print_on(oop obj, outputStream* st) {
st->print_cr(" - method size: %d", m->method_size()); st->print_cr(" - method size: %d", m->method_size());
if (m->intrinsic_id() != vmIntrinsics::_none) if (m->intrinsic_id() != vmIntrinsics::_none)
st->print_cr(" - intrinsic id: %d %s", m->intrinsic_id(), vmIntrinsics::name_at(m->intrinsic_id())); st->print_cr(" - intrinsic id: %d %s", m->intrinsic_id(), vmIntrinsics::name_at(m->intrinsic_id()));
if (m->highest_tier_compile() != CompLevel_none) if (m->highest_comp_level() != CompLevel_none)
st->print_cr(" - highest tier: %d", m->highest_tier_compile()); st->print_cr(" - highest level: %d", m->highest_comp_level());
st->print_cr(" - vtable index: %d", m->_vtable_index); st->print_cr(" - vtable index: %d", m->_vtable_index);
st->print_cr(" - i2i entry: " INTPTR_FORMAT, m->interpreter_entry()); st->print_cr(" - i2i entry: " INTPTR_FORMAT, m->interpreter_entry());
st->print_cr(" - adapter: " INTPTR_FORMAT, m->adapter()); st->print_cr(" - adapter: " INTPTR_FORMAT, m->adapter());

View file

@ -233,7 +233,7 @@ void methodOopDesc::remove_unshareable_info() {
} }
bool methodOopDesc::was_executed_more_than(int n) const { bool methodOopDesc::was_executed_more_than(int n) {
// Invocation counter is reset when the methodOop is compiled. // Invocation counter is reset when the methodOop is compiled.
// If the method has compiled code we therefore assume it has // If the method has compiled code we therefore assume it has
// be excuted more than n times. // be excuted more than n times.
@ -241,7 +241,8 @@ bool methodOopDesc::was_executed_more_than(int n) const {
// interpreter doesn't bump invocation counter of trivial methods // interpreter doesn't bump invocation counter of trivial methods
// compiler does not bump invocation counter of compiled methods // compiler does not bump invocation counter of compiled methods
return true; return true;
} else if (_invocation_counter.carry()) { }
else if (_invocation_counter.carry() || (method_data() != NULL && method_data()->invocation_counter()->carry())) {
// The carry bit is set when the counter overflows and causes // The carry bit is set when the counter overflows and causes
// a compilation to occur. We don't know how many times // a compilation to occur. We don't know how many times
// the counter has been reset, so we simply assume it has // the counter has been reset, so we simply assume it has
@ -253,7 +254,7 @@ bool methodOopDesc::was_executed_more_than(int n) const {
} }
#ifndef PRODUCT #ifndef PRODUCT
void methodOopDesc::print_invocation_count() const { void methodOopDesc::print_invocation_count() {
if (is_static()) tty->print("static "); if (is_static()) tty->print("static ");
if (is_final()) tty->print("final "); if (is_final()) tty->print("final ");
if (is_synchronized()) tty->print("synchronized "); if (is_synchronized()) tty->print("synchronized ");
@ -574,16 +575,19 @@ bool methodOopDesc::is_not_compilable(int comp_level) const {
// compilers must recognize this method specially, or not at all // compilers must recognize this method specially, or not at all
return true; return true;
} }
if (number_of_breakpoints() > 0) {
#ifdef COMPILER2 return true;
if (is_tier1_compile(comp_level)) {
if (is_not_tier1_compilable()) {
return true;
}
} }
#endif // COMPILER2 if (comp_level == CompLevel_any) {
return (_invocation_counter.state() == InvocationCounter::wait_for_nothing) return is_not_c1_compilable() || is_not_c2_compilable();
|| (number_of_breakpoints() > 0); }
if (is_c1_compile(comp_level)) {
return is_not_c1_compilable();
}
if (is_c2_compile(comp_level)) {
return is_not_c2_compilable();
}
return false;
} }
// call this when compiler finds that this method is not compilable // call this when compiler finds that this method is not compilable
@ -604,15 +608,18 @@ void methodOopDesc::set_not_compilable(int comp_level, bool report) {
xtty->stamp(); xtty->stamp();
xtty->end_elem(); xtty->end_elem();
} }
#ifdef COMPILER2 if (comp_level == CompLevel_all) {
if (is_tier1_compile(comp_level)) { set_not_c1_compilable();
set_not_tier1_compilable(); set_not_c2_compilable();
return; } else {
if (is_c1_compile(comp_level)) {
set_not_c1_compilable();
} else
if (is_c2_compile(comp_level)) {
set_not_c2_compilable();
}
} }
#endif /* COMPILER2 */ CompilationPolicy::policy()->disable_compilation(this);
assert(comp_level == CompLevel_highest_tier, "unexpected compilation level");
invocation_counter()->set_state(InvocationCounter::wait_for_nothing);
backedge_counter()->set_state(InvocationCounter::wait_for_nothing);
} }
// Revert to using the interpreter and clear out the nmethod // Revert to using the interpreter and clear out the nmethod
@ -649,7 +656,6 @@ void methodOopDesc::unlink_method() {
set_method_data(NULL); set_method_data(NULL);
set_interpreter_throwout_count(0); set_interpreter_throwout_count(0);
set_interpreter_invocation_count(0); set_interpreter_invocation_count(0);
_highest_tier_compile = CompLevel_none;
} }
// Called when the method_holder is getting linked. Setup entrypoints so the method // Called when the method_holder is getting linked. Setup entrypoints so the method
@ -746,8 +752,8 @@ void methodOopDesc::set_code(methodHandle mh, nmethod *code) {
int comp_level = code->comp_level(); int comp_level = code->comp_level();
// In theory there could be a race here. In practice it is unlikely // In theory there could be a race here. In practice it is unlikely
// and not worth worrying about. // and not worth worrying about.
if (comp_level > mh->highest_tier_compile()) { if (comp_level > mh->highest_comp_level()) {
mh->set_highest_tier_compile(comp_level); mh->set_highest_comp_level(comp_level);
} }
OrderAccess::storestore(); OrderAccess::storestore();
@ -1442,6 +1448,64 @@ void methodOopDesc::clear_all_breakpoints() {
} }
int methodOopDesc::invocation_count() {
if (TieredCompilation) {
const methodDataOop mdo = method_data();
if (invocation_counter()->carry() || ((mdo != NULL) ? mdo->invocation_counter()->carry() : false)) {
return InvocationCounter::count_limit;
} else {
return invocation_counter()->count() + ((mdo != NULL) ? mdo->invocation_counter()->count() : 0);
}
} else {
return invocation_counter()->count();
}
}
int methodOopDesc::backedge_count() {
if (TieredCompilation) {
const methodDataOop mdo = method_data();
if (backedge_counter()->carry() || ((mdo != NULL) ? mdo->backedge_counter()->carry() : false)) {
return InvocationCounter::count_limit;
} else {
return backedge_counter()->count() + ((mdo != NULL) ? mdo->backedge_counter()->count() : 0);
}
} else {
return backedge_counter()->count();
}
}
int methodOopDesc::highest_comp_level() const {
methodDataOop mdo = method_data();
if (mdo != NULL) {
return mdo->highest_comp_level();
} else {
return CompLevel_none;
}
}
int methodOopDesc::highest_osr_comp_level() const {
methodDataOop mdo = method_data();
if (mdo != NULL) {
return mdo->highest_osr_comp_level();
} else {
return CompLevel_none;
}
}
void methodOopDesc::set_highest_comp_level(int level) {
methodDataOop mdo = method_data();
if (mdo != NULL) {
mdo->set_highest_comp_level(level);
}
}
void methodOopDesc::set_highest_osr_comp_level(int level) {
methodDataOop mdo = method_data();
if (mdo != NULL) {
mdo->set_highest_osr_comp_level(level);
}
}
BreakpointInfo::BreakpointInfo(methodOop m, int bci) { BreakpointInfo::BreakpointInfo(methodOop m, int bci) {
_bci = bci; _bci = bci;
_name_index = m->name_index(); _name_index = m->name_index();

View file

@ -62,9 +62,9 @@
// | method_size | max_stack | // | method_size | max_stack |
// | max_locals | size_of_parameters | // | max_locals | size_of_parameters |
// |------------------------------------------------------| // |------------------------------------------------------|
// | intrinsic_id, highest_tier | (unused) | // | intrinsic_id, (unused) | throwout_count |
// |------------------------------------------------------| // |------------------------------------------------------|
// | throwout_count | num_breakpoints | // | num_breakpoints | (unused) |
// |------------------------------------------------------| // |------------------------------------------------------|
// | invocation_counter | // | invocation_counter |
// | backedge_counter | // | backedge_counter |
@ -83,7 +83,6 @@
class CheckedExceptionElement; class CheckedExceptionElement;
class LocalVariableTableElement; class LocalVariableTableElement;
class AdapterHandlerEntry; class AdapterHandlerEntry;
class methodDataOopDesc; class methodDataOopDesc;
class methodOopDesc : public oopDesc { class methodOopDesc : public oopDesc {
@ -93,7 +92,7 @@ class methodOopDesc : public oopDesc {
constMethodOop _constMethod; // Method read-only data. constMethodOop _constMethod; // Method read-only data.
constantPoolOop _constants; // Constant pool constantPoolOop _constants; // Constant pool
methodDataOop _method_data; methodDataOop _method_data;
int _interpreter_invocation_count; // Count of times invoked int _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered)
AccessFlags _access_flags; // Access flags AccessFlags _access_flags; // Access flags
int _vtable_index; // vtable index of this method (see VtableIndexFlag) int _vtable_index; // vtable index of this method (see VtableIndexFlag)
// note: can have vtables with >2**16 elements (because of inheritance) // note: can have vtables with >2**16 elements (because of inheritance)
@ -105,11 +104,11 @@ class methodOopDesc : public oopDesc {
u2 _max_locals; // Number of local variables used by this method u2 _max_locals; // Number of local variables used by this method
u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words
u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none) u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
u1 _highest_tier_compile; // Highest compile level this method has ever seen.
u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
u2 _number_of_breakpoints; // fullspeed debugging support u2 _number_of_breakpoints; // fullspeed debugging support
InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations
InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations
#ifndef PRODUCT #ifndef PRODUCT
int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging) int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging)
#endif #endif
@ -221,8 +220,11 @@ class methodOopDesc : public oopDesc {
// max locals // max locals
int max_locals() const { return _max_locals; } int max_locals() const { return _max_locals; }
void set_max_locals(int size) { _max_locals = size; } void set_max_locals(int size) { _max_locals = size; }
int highest_tier_compile() { return _highest_tier_compile;}
void set_highest_tier_compile(int level) { _highest_tier_compile = level;} int highest_comp_level() const;
void set_highest_comp_level(int level);
int highest_osr_comp_level() const;
void set_highest_osr_comp_level(int level);
// Count of times method was exited via exception while interpreting // Count of times method was exited via exception while interpreting
void interpreter_throwout_increment() { void interpreter_throwout_increment() {
@ -276,21 +278,29 @@ class methodOopDesc : public oopDesc {
} }
// invocation counter // invocation counter
InvocationCounter* invocation_counter() { return &_invocation_counter; } InvocationCounter* invocation_counter() { return &_invocation_counter; }
InvocationCounter* backedge_counter() { return &_backedge_counter; } InvocationCounter* backedge_counter() { return &_backedge_counter; }
int invocation_count() const { return _invocation_counter.count(); }
int backedge_count() const { return _backedge_counter.count(); } int invocation_count();
bool was_executed_more_than(int n) const; int backedge_count();
bool was_never_executed() const { return !was_executed_more_than(0); }
bool was_executed_more_than(int n);
bool was_never_executed() { return !was_executed_more_than(0); }
static void build_interpreter_method_data(methodHandle method, TRAPS); static void build_interpreter_method_data(methodHandle method, TRAPS);
int interpreter_invocation_count() const { return _interpreter_invocation_count; } int interpreter_invocation_count() {
if (TieredCompilation) return invocation_count();
else return _interpreter_invocation_count;
}
void set_interpreter_invocation_count(int count) { _interpreter_invocation_count = count; } void set_interpreter_invocation_count(int count) { _interpreter_invocation_count = count; }
int increment_interpreter_invocation_count() { return ++_interpreter_invocation_count; } int increment_interpreter_invocation_count() {
if (TieredCompilation) ShouldNotReachHere();
return ++_interpreter_invocation_count;
}
#ifndef PRODUCT #ifndef PRODUCT
int compiled_invocation_count() const { return _compiled_invocation_count; } int compiled_invocation_count() const { return _compiled_invocation_count; }
void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; } void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; }
#endif // not PRODUCT #endif // not PRODUCT
@ -361,7 +371,7 @@ class methodOopDesc : public oopDesc {
#ifndef PRODUCT #ifndef PRODUCT
// operations on invocation counter // operations on invocation counter
void print_invocation_count() const; void print_invocation_count();
#endif #endif
// byte codes // byte codes
@ -587,8 +597,13 @@ class methodOopDesc : public oopDesc {
static vmSymbols::SID klass_id_for_intrinsics(klassOop holder); static vmSymbols::SID klass_id_for_intrinsics(klassOop holder);
// On-stack replacement support // On-stack replacement support
bool has_osr_nmethod() { return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci) != NULL; } bool has_osr_nmethod(int level, bool match_level) {
nmethod* lookup_osr_nmethod_for(int bci) { return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, bci); } return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
}
nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) {
return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, bci, level, match_level);
}
// Inline cache support // Inline cache support
void cleanup_inline_caches(); void cleanup_inline_caches();
@ -600,22 +615,24 @@ class methodOopDesc : public oopDesc {
// Indicates whether compilation failed earlier for this method, or // Indicates whether compilation failed earlier for this method, or
// whether it is not compilable for another reason like having a // whether it is not compilable for another reason like having a
// breakpoint set in it. // breakpoint set in it.
bool is_not_compilable(int comp_level = CompLevel_highest_tier) const; bool is_not_compilable(int comp_level = CompLevel_any) const;
void set_not_compilable(int comp_level = CompLevel_highest_tier, bool report = true); void set_not_compilable(int comp_level = CompLevel_all, bool report = true);
void set_not_compilable_quietly(int comp_level = CompLevel_highest_tier) { void set_not_compilable_quietly(int comp_level = CompLevel_all) {
set_not_compilable(comp_level, false); set_not_compilable(comp_level, false);
} }
bool is_not_osr_compilable(int comp_level = CompLevel_any) const {
bool is_not_osr_compilable() const { return is_not_compilable() || access_flags().is_not_osr_compilable(); } return is_not_compilable(comp_level) || access_flags().is_not_osr_compilable();
void set_not_osr_compilable() { _access_flags.set_not_osr_compilable(); } }
void set_not_osr_compilable() { _access_flags.set_not_osr_compilable(); }
bool is_not_tier1_compilable() const { return access_flags().is_not_tier1_compilable(); } bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); }
void set_not_tier1_compilable() { _access_flags.set_not_tier1_compilable(); } void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); }
bool is_not_c2_compilable() const { return access_flags().is_not_c2_compilable(); }
void set_not_c2_compilable() { _access_flags.set_not_c2_compilable(); }
// Background compilation support // Background compilation support
bool queued_for_compilation() const { return access_flags().queued_for_compilation(); } bool queued_for_compilation() const { return access_flags().queued_for_compilation(); }
void set_queued_for_compilation() { _access_flags.set_queued_for_compilation(); } void set_queued_for_compilation() { _access_flags.set_queued_for_compilation(); }
void clear_queued_for_compilation() { _access_flags.clear_queued_for_compilation(); } void clear_queued_for_compilation() { _access_flags.clear_queued_for_compilation(); }
static methodOop method_from_bcp(address bcp); static methodOop method_from_bcp(address bcp);

View file

@ -140,7 +140,7 @@ const char* InlineTree::shouldInline(ciMethod* callee_method, ciMethod* caller_m
} else { } else {
// Not hot. Check for medium-sized pre-existing nmethod at cold sites. // Not hot. Check for medium-sized pre-existing nmethod at cold sites.
if (callee_method->has_compiled_code() && if (callee_method->has_compiled_code() &&
callee_method->instructions_size() > InlineSmallCode/4) callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode/4)
return "already compiled into a medium method"; return "already compiled into a medium method";
} }
if (size > max_size) { if (size > max_size) {
@ -180,7 +180,7 @@ const char* InlineTree::shouldNotInline(ciMethod *callee_method, ciMethod* calle
} }
} }
if (callee_method->has_compiled_code() && callee_method->instructions_size() > InlineSmallCode) { if (callee_method->has_compiled_code() && callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode) {
wci_result->set_profit(wci_result->profit() * 0.1); wci_result->set_profit(wci_result->profit() * 0.1);
// %%% adjust wci_result->size()? // %%% adjust wci_result->size()?
} }
@ -206,7 +206,7 @@ const char* InlineTree::shouldNotInline(ciMethod *callee_method, ciMethod* calle
// Now perform checks which are heuristic // Now perform checks which are heuristic
if( callee_method->has_compiled_code() && callee_method->instructions_size() > InlineSmallCode ) if( callee_method->has_compiled_code() && callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode )
return "already compiled into a big method"; return "already compiled into a big method";
// don't inline exception code unless the top method belongs to an // don't inline exception code unless the top method belongs to an

View file

@ -850,25 +850,13 @@ void Compile::Init(int aliaslevel) {
set_decompile_count(0); set_decompile_count(0);
set_do_freq_based_layout(BlockLayoutByFrequency || method_has_option("BlockLayoutByFrequency")); set_do_freq_based_layout(BlockLayoutByFrequency || method_has_option("BlockLayoutByFrequency"));
// Compilation level related initialization set_num_loop_opts(LoopOptsCount);
if (env()->comp_level() == CompLevel_fast_compile) { set_do_inlining(Inline);
set_num_loop_opts(Tier1LoopOptsCount); set_max_inline_size(MaxInlineSize);
set_do_inlining(Tier1Inline != 0); set_freq_inline_size(FreqInlineSize);
set_max_inline_size(Tier1MaxInlineSize); set_do_scheduling(OptoScheduling);
set_freq_inline_size(Tier1FreqInlineSize); set_do_count_invocations(false);
set_do_scheduling(false); set_do_method_data_update(false);
set_do_count_invocations(Tier1CountInvocations);
set_do_method_data_update(Tier1UpdateMethodData);
} else {
assert(env()->comp_level() == CompLevel_full_optimization, "unknown comp level");
set_num_loop_opts(LoopOptsCount);
set_do_inlining(Inline);
set_max_inline_size(MaxInlineSize);
set_freq_inline_size(FreqInlineSize);
set_do_scheduling(OptoScheduling);
set_do_count_invocations(false);
set_do_method_data_update(false);
}
if (debug_info()->recording_non_safepoints()) { if (debug_info()->recording_non_safepoints()) {
set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*> set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>

View file

@ -979,7 +979,7 @@ MethodHandleCompiler::make_invoke(methodOop m, vmIntrinsics::ID iid,
// Inline the method. // Inline the method.
InvocationCounter* ic = m->invocation_counter(); InvocationCounter* ic = m->invocation_counter();
ic->set_carry(); ic->set_carry_flag();
for (int i = 0; i < argc; i++) { for (int i = 0; i < argc; i++) {
ArgToken arg = argv[i]; ArgToken arg = argv[i];
@ -1209,7 +1209,7 @@ methodHandle MethodHandleCompiler::get_method_oop(TRAPS) const {
// Set the carry bit of the invocation counter to force inlining of // Set the carry bit of the invocation counter to force inlining of
// the adapter. // the adapter.
InvocationCounter* ic = m->invocation_counter(); InvocationCounter* ic = m->invocation_counter();
ic->set_carry(); ic->set_carry_flag();
// Rewrite the method and set up the constant pool cache. // Rewrite the method and set up the constant pool cache.
objArrayOop m_array = oopFactory::new_system_objArray(1, CHECK_(nullHandle)); objArrayOop m_array = oopFactory::new_system_objArray(1, CHECK_(nullHandle));

View file

@ -50,7 +50,6 @@ bool Arguments::_AlwaysCompileLoopMethods = AlwaysCompileLoopMethods;
bool Arguments::_UseOnStackReplacement = UseOnStackReplacement; bool Arguments::_UseOnStackReplacement = UseOnStackReplacement;
bool Arguments::_BackgroundCompilation = BackgroundCompilation; bool Arguments::_BackgroundCompilation = BackgroundCompilation;
bool Arguments::_ClipInlining = ClipInlining; bool Arguments::_ClipInlining = ClipInlining;
intx Arguments::_Tier2CompileThreshold = Tier2CompileThreshold;
char* Arguments::SharedArchivePath = NULL; char* Arguments::SharedArchivePath = NULL;
@ -913,7 +912,6 @@ void Arguments::set_mode_flags(Mode mode) {
AlwaysCompileLoopMethods = Arguments::_AlwaysCompileLoopMethods; AlwaysCompileLoopMethods = Arguments::_AlwaysCompileLoopMethods;
UseOnStackReplacement = Arguments::_UseOnStackReplacement; UseOnStackReplacement = Arguments::_UseOnStackReplacement;
BackgroundCompilation = Arguments::_BackgroundCompilation; BackgroundCompilation = Arguments::_BackgroundCompilation;
Tier2CompileThreshold = Arguments::_Tier2CompileThreshold;
// Change from defaults based on mode // Change from defaults based on mode
switch (mode) { switch (mode) {
@ -950,6 +948,31 @@ static void no_shared_spaces() {
} }
} }
void Arguments::set_tiered_flags() {
if (FLAG_IS_DEFAULT(CompilationPolicyChoice)) {
FLAG_SET_DEFAULT(CompilationPolicyChoice, 2);
}
if (CompilationPolicyChoice < 2) {
vm_exit_during_initialization(
"Incompatible compilation policy selected", NULL);
}
#ifdef _LP64
if (FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops)) {
UseCompressedOops = false;
}
if (UseCompressedOops) {
vm_exit_during_initialization(
"Tiered compilation is not supported with compressed oops yet", NULL);
}
#endif
// Increase the code cache size - tiered compiles a lot more.
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
FLAG_SET_DEFAULT(ReservedCodeCacheSize, ReservedCodeCacheSize * 2);
}
}
#ifndef KERNEL #ifndef KERNEL
// If the user has chosen ParallelGCThreads > 0, we set UseParNewGC // If the user has chosen ParallelGCThreads > 0, we set UseParNewGC
// if it's not explictly set or unset. If the user has chosen // if it's not explictly set or unset. If the user has chosen
@ -1299,7 +1322,7 @@ void Arguments::set_ergonomics_flags() {
// Check that UseCompressedOops can be set with the max heap size allocated // Check that UseCompressedOops can be set with the max heap size allocated
// by ergonomics. // by ergonomics.
if (MaxHeapSize <= max_heap_for_compressed_oops()) { if (MaxHeapSize <= max_heap_for_compressed_oops()) {
#ifndef COMPILER1 #if !defined(COMPILER1) || defined(TIERED)
if (FLAG_IS_DEFAULT(UseCompressedOops) && !UseG1GC) { if (FLAG_IS_DEFAULT(UseCompressedOops) && !UseG1GC) {
FLAG_SET_ERGO(bool, UseCompressedOops, true); FLAG_SET_ERGO(bool, UseCompressedOops, true);
} }
@ -1933,7 +1956,6 @@ jint Arguments::parse_vm_init_args(const JavaVMInitArgs* args) {
Arguments::_UseOnStackReplacement = UseOnStackReplacement; Arguments::_UseOnStackReplacement = UseOnStackReplacement;
Arguments::_ClipInlining = ClipInlining; Arguments::_ClipInlining = ClipInlining;
Arguments::_BackgroundCompilation = BackgroundCompilation; Arguments::_BackgroundCompilation = BackgroundCompilation;
Arguments::_Tier2CompileThreshold = Tier2CompileThreshold;
// Parse JAVA_TOOL_OPTIONS environment variable (if present) // Parse JAVA_TOOL_OPTIONS environment variable (if present)
jint result = parse_java_tool_options_environment_variable(&scp, &scp_assembly_required); jint result = parse_java_tool_options_environment_variable(&scp, &scp_assembly_required);
@ -2651,23 +2673,6 @@ jint Arguments::finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_req
set_mode_flags(_int); set_mode_flags(_int);
} }
#ifdef TIERED
// If we are using tiered compilation in the tiered vm then c1 will
// do the profiling and we don't want to waste that time in the
// interpreter.
if (TieredCompilation) {
ProfileInterpreter = false;
} else {
// Since we are running vanilla server we must adjust the compile threshold
// unless the user has already adjusted it because the default threshold assumes
// we will run tiered.
if (FLAG_IS_DEFAULT(CompileThreshold)) {
CompileThreshold = Tier2CompileThreshold;
}
}
#endif // TIERED
#ifndef COMPILER2 #ifndef COMPILER2
// Don't degrade server performance for footprint // Don't degrade server performance for footprint
if (FLAG_IS_DEFAULT(UseLargePages) && if (FLAG_IS_DEFAULT(UseLargePages) &&
@ -2682,7 +2687,6 @@ jint Arguments::finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_req
// Tiered compilation is undefined with C1. // Tiered compilation is undefined with C1.
TieredCompilation = false; TieredCompilation = false;
#else #else
if (!FLAG_IS_DEFAULT(OptoLoopAlignment) && FLAG_IS_DEFAULT(MaxLoopPad)) { if (!FLAG_IS_DEFAULT(OptoLoopAlignment) && FLAG_IS_DEFAULT(MaxLoopPad)) {
FLAG_SET_DEFAULT(MaxLoopPad, OptoLoopAlignment-1); FLAG_SET_DEFAULT(MaxLoopPad, OptoLoopAlignment-1);
@ -2939,7 +2943,7 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
PrintGC = true; PrintGC = true;
} }
#if defined(_LP64) && defined(COMPILER1) #if defined(_LP64) && defined(COMPILER1) && !defined(TIERED)
UseCompressedOops = false; UseCompressedOops = false;
#endif #endif
@ -2970,6 +2974,16 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
return JNI_EINVAL; return JNI_EINVAL;
} }
if (TieredCompilation) {
set_tiered_flags();
} else {
// Check if the policy is valid. Policies 0 and 1 are valid for non-tiered setup.
if (CompilationPolicyChoice >= 2) {
vm_exit_during_initialization(
"Incompatible compilation policy selected", NULL);
}
}
#ifndef KERNEL #ifndef KERNEL
if (UseConcMarkSweepGC) { if (UseConcMarkSweepGC) {
// Set flags for CMS and ParNew. Check UseConcMarkSweep first // Set flags for CMS and ParNew. Check UseConcMarkSweep first

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -288,8 +288,9 @@ class Arguments : AllStatic {
static bool _BackgroundCompilation; static bool _BackgroundCompilation;
static bool _ClipInlining; static bool _ClipInlining;
static bool _CIDynamicCompilePriority; static bool _CIDynamicCompilePriority;
static intx _Tier2CompileThreshold;
// Tiered
static void set_tiered_flags();
// CMS/ParNew garbage collectors // CMS/ParNew garbage collectors
static void set_parnew_gc_flags(); static void set_parnew_gc_flags();
static void set_cms_and_parnew_gc_flags(); static void set_cms_and_parnew_gc_flags();

View file

@ -45,10 +45,17 @@ void compilationPolicy_init() {
Unimplemented(); Unimplemented();
#endif #endif
break; break;
case 2:
#ifdef TIERED
CompilationPolicy::set_policy(new SimpleThresholdPolicy());
#else
Unimplemented();
#endif
break;
default: default:
fatal("CompilationPolicyChoice must be in the range: [0-1]"); fatal("CompilationPolicyChoice must be in the range: [0-2]");
} }
CompilationPolicy::policy()->initialize();
} }
void CompilationPolicy::completed_vm_startup() { void CompilationPolicy::completed_vm_startup() {
@ -61,16 +68,16 @@ void CompilationPolicy::completed_vm_startup() {
// Returns true if m must be compiled before executing it // Returns true if m must be compiled before executing it
// This is intended to force compiles for methods (usually for // This is intended to force compiles for methods (usually for
// debugging) that would otherwise be interpreted for some reason. // debugging) that would otherwise be interpreted for some reason.
bool CompilationPolicy::mustBeCompiled(methodHandle m) { bool CompilationPolicy::must_be_compiled(methodHandle m, int comp_level) {
if (m->has_compiled_code()) return false; // already compiled if (m->has_compiled_code()) return false; // already compiled
if (!canBeCompiled(m)) return false; if (!can_be_compiled(m, comp_level)) return false;
return !UseInterpreter || // must compile all methods return !UseInterpreter || // must compile all methods
(UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods (UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
} }
// Returns true if m is allowed to be compiled // Returns true if m is allowed to be compiled
bool CompilationPolicy::canBeCompiled(methodHandle m) { bool CompilationPolicy::can_be_compiled(methodHandle m, int comp_level) {
if (m->is_abstract()) return false; if (m->is_abstract()) return false;
if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false; if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
@ -83,8 +90,16 @@ bool CompilationPolicy::canBeCompiled(methodHandle m) {
if (!AbstractInterpreter::can_be_compiled(m)) { if (!AbstractInterpreter::can_be_compiled(m)) {
return false; return false;
} }
if (comp_level == CompLevel_all) {
return !m->is_not_compilable(CompLevel_simple) && !m->is_not_compilable(CompLevel_full_optimization);
} else {
return !m->is_not_compilable(comp_level);
}
}
return !m->is_not_compilable(); bool CompilationPolicy::is_compilation_enabled() {
// NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler
return !delay_compilation_during_startup() && CompileBroker::should_compile_new_jobs();
} }
#ifndef PRODUCT #ifndef PRODUCT
@ -94,7 +109,7 @@ void CompilationPolicy::print_time() {
tty->print_cr (" Total: %3.3f sec.", _accumulated_time.seconds()); tty->print_cr (" Total: %3.3f sec.", _accumulated_time.seconds());
} }
static void trace_osr_completion(nmethod* osr_nm) { void NonTieredCompPolicy::trace_osr_completion(nmethod* osr_nm) {
if (TraceOnStackReplacement) { if (TraceOnStackReplacement) {
if (osr_nm == NULL) tty->print_cr("compilation failed"); if (osr_nm == NULL) tty->print_cr("compilation failed");
else tty->print_cr("nmethod " INTPTR_FORMAT, osr_nm); else tty->print_cr("nmethod " INTPTR_FORMAT, osr_nm);
@ -102,7 +117,35 @@ static void trace_osr_completion(nmethod* osr_nm) {
} }
#endif // !PRODUCT #endif // !PRODUCT
void CompilationPolicy::reset_counter_for_invocation_event(methodHandle m) { void NonTieredCompPolicy::initialize() {
// Setup the compiler thread numbers
if (CICompilerCountPerCPU) {
// Example: if CICompilerCountPerCPU is true, then we get
// max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
// May help big-app startup time.
_compiler_count = MAX2(log2_intptr(os::active_processor_count())-1,1);
} else {
_compiler_count = CICompilerCount;
}
}
int NonTieredCompPolicy::compiler_count(CompLevel comp_level) {
#ifdef COMPILER1
if (is_c1_compile(comp_level)) {
return _compiler_count;
}
#endif
#ifdef COMPILER2
if (is_c2_compile(comp_level)) {
return _compiler_count;
}
#endif
return 0;
}
void NonTieredCompPolicy::reset_counter_for_invocation_event(methodHandle m) {
// Make sure invocation and backedge counter doesn't overflow again right away // Make sure invocation and backedge counter doesn't overflow again right away
// as would be the case for native methods. // as would be the case for native methods.
@ -114,7 +157,7 @@ void CompilationPolicy::reset_counter_for_invocation_event(methodHandle m) {
assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed"); assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed");
} }
void CompilationPolicy::reset_counter_for_back_branch_event(methodHandle m) { void NonTieredCompPolicy::reset_counter_for_back_branch_event(methodHandle m) {
// Delay next back-branch event but pump up invocation counter to triger // Delay next back-branch event but pump up invocation counter to triger
// whole method compilation. // whole method compilation.
InvocationCounter* i = m->invocation_counter(); InvocationCounter* i = m->invocation_counter();
@ -128,6 +171,185 @@ void CompilationPolicy::reset_counter_for_back_branch_event(methodHandle m) {
b->set(b->state(), CompileThreshold / 2); b->set(b->state(), CompileThreshold / 2);
} }
//
// CounterDecay
//
// Interates through invocation counters and decrements them. This
// is done at each safepoint.
//
class CounterDecay : public AllStatic {
static jlong _last_timestamp;
static void do_method(methodOop m) {
m->invocation_counter()->decay();
}
public:
static void decay();
static bool is_decay_needed() {
return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength;
}
};
jlong CounterDecay::_last_timestamp = 0;
void CounterDecay::decay() {
_last_timestamp = os::javaTimeMillis();
// This operation is going to be performed only at the end of a safepoint
// and hence GC's will not be going on, all Java mutators are suspended
// at this point and hence SystemDictionary_lock is also not needed.
assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint");
int nclasses = SystemDictionary::number_of_classes();
double classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 /
CounterHalfLifeTime);
for (int i = 0; i < classes_per_tick; i++) {
klassOop k = SystemDictionary::try_get_next_class();
if (k != NULL && k->klass_part()->oop_is_instance()) {
instanceKlass::cast(k)->methods_do(do_method);
}
}
}
// Called at the end of the safepoint
void NonTieredCompPolicy::do_safepoint_work() {
if(UseCounterDecay && CounterDecay::is_decay_needed()) {
CounterDecay::decay();
}
}
void NonTieredCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
ScopeDesc* sd = trap_scope;
for (; !sd->is_top(); sd = sd->sender()) {
// Reset ICs of inlined methods, since they can trigger compilations also.
sd->method()->invocation_counter()->reset();
}
InvocationCounter* c = sd->method()->invocation_counter();
if (is_osr) {
// It was an OSR method, so bump the count higher.
c->set(c->state(), CompileThreshold);
} else {
c->reset();
}
sd->method()->backedge_counter()->reset();
}
// This method can be called by any component of the runtime to notify the policy
// that it's recommended to delay the complation of this method.
void NonTieredCompPolicy::delay_compilation(methodOop method) {
method->invocation_counter()->decay();
method->backedge_counter()->decay();
}
void NonTieredCompPolicy::disable_compilation(methodOop method) {
method->invocation_counter()->set_state(InvocationCounter::wait_for_nothing);
method->backedge_counter()->set_state(InvocationCounter::wait_for_nothing);
}
CompileTask* NonTieredCompPolicy::select_task(CompileQueue* compile_queue) {
return compile_queue->first();
}
bool NonTieredCompPolicy::is_mature(methodOop method) {
methodDataOop mdo = method->method_data();
assert(mdo != NULL, "Should be");
uint current = mdo->mileage_of(method);
uint initial = mdo->creation_mileage();
if (current < initial)
return true; // some sort of overflow
uint target;
if (ProfileMaturityPercentage <= 0)
target = (uint) -ProfileMaturityPercentage; // absolute value
else
target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 );
return (current >= initial + target);
}
nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS) {
assert(comp_level == CompLevel_none, "This should be only called from the interpreter");
NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci));
if (JvmtiExport::can_post_interpreter_events()) {
assert(THREAD->is_Java_thread(), "Wrong type of thread");
if (((JavaThread*)THREAD)->is_interp_only_mode()) {
// If certain JVMTI events (e.g. frame pop event) are requested then the
// thread is forced to remain in interpreted code. This is
// implemented partly by a check in the run_compiled_code
// section of the interpreter whether we should skip running
// compiled code, and partly by skipping OSR compiles for
// interpreted-only threads.
if (bci != InvocationEntryBci) {
reset_counter_for_back_branch_event(method);
return NULL;
}
}
}
if (bci == InvocationEntryBci) {
// when code cache is full, compilation gets switched off, UseCompiler
// is set to false
if (!method->has_compiled_code() && UseCompiler) {
method_invocation_event(method, CHECK_NULL);
} else {
// Force counter overflow on method entry, even if no compilation
// happened. (The method_invocation_event call does this also.)
reset_counter_for_invocation_event(method);
}
// compilation at an invocation overflow no longer goes and retries test for
// compiled method. We always run the loser of the race as interpreted.
// so return NULL
return NULL;
} else {
// counter overflow in a loop => try to do on-stack-replacement
nmethod* osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
NOT_PRODUCT(trace_osr_request(method, osr_nm, bci));
// when code cache is full, we should not compile any more...
if (osr_nm == NULL && UseCompiler) {
method_back_branch_event(method, bci, CHECK_NULL);
osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
}
if (osr_nm == NULL) {
reset_counter_for_back_branch_event(method);
return NULL;
}
return osr_nm;
}
return NULL;
}
#ifndef PRODUCT
void NonTieredCompPolicy::trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci) {
if (TraceInvocationCounterOverflow) {
InvocationCounter* ic = m->invocation_counter();
InvocationCounter* bc = m->backedge_counter();
ResourceMark rm;
const char* msg =
bci == InvocationEntryBci
? "comp-policy cntr ovfl @ %d in entry of "
: "comp-policy cntr ovfl @ %d in loop of ";
tty->print(msg, bci);
m->print_value();
tty->cr();
ic->print();
bc->print();
if (ProfileInterpreter) {
if (bci != InvocationEntryBci) {
methodDataOop mdo = m->method_data();
if (mdo != NULL) {
int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken();
tty->print_cr("back branch count = %d", count);
}
}
}
}
}
void NonTieredCompPolicy::trace_osr_request(methodHandle method, nmethod* osr, int bci) {
if (TraceOnStackReplacement) {
ResourceMark rm;
tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
method->print_short_name(tty);
tty->print_cr(" at bci %d", bci);
}
}
#endif // !PRODUCT
// SimpleCompPolicy - compile current method // SimpleCompPolicy - compile current method
void SimpleCompPolicy::method_invocation_event( methodHandle m, TRAPS) { void SimpleCompPolicy::method_invocation_event( methodHandle m, TRAPS) {
@ -137,59 +359,28 @@ void SimpleCompPolicy::method_invocation_event( methodHandle m, TRAPS) {
reset_counter_for_invocation_event(m); reset_counter_for_invocation_event(m);
const char* comment = "count"; const char* comment = "count";
if (!delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler && CompileBroker::should_compile_new_jobs()) { if (is_compilation_enabled() && can_be_compiled(m)) {
nmethod* nm = m->code(); nmethod* nm = m->code();
if (nm == NULL ) { if (nm == NULL ) {
const char* comment = "count"; const char* comment = "count";
CompileBroker::compile_method(m, InvocationEntryBci, CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_highest_tier,
m, hot_count, comment, CHECK); m, hot_count, comment, CHECK);
} else {
#ifdef TIERED
if (nm->is_compiled_by_c1()) {
const char* comment = "tier1 overflow";
CompileBroker::compile_method(m, InvocationEntryBci,
m, hot_count, comment, CHECK);
}
#endif // TIERED
} }
} }
} }
void SimpleCompPolicy::method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS) { void SimpleCompPolicy::method_back_branch_event(methodHandle m, int bci, TRAPS) {
assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now."); assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now.");
int hot_count = m->backedge_count(); int hot_count = m->backedge_count();
const char* comment = "backedge_count"; const char* comment = "backedge_count";
if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m) && CompileBroker::should_compile_new_jobs()) { if (is_compilation_enabled() && !m->is_not_osr_compilable() && can_be_compiled(m)) {
CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK); CompileBroker::compile_method(m, bci, CompLevel_highest_tier,
m, hot_count, comment, CHECK);
NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));) NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true));)
} }
} }
int SimpleCompPolicy::compilation_level(methodHandle m, int branch_bci)
{
#ifdef TIERED
if (!TieredCompilation) {
return CompLevel_highest_tier;
}
if (/* m()->tier1_compile_done() && */
// QQQ HACK FIX ME set tier1_compile_done!!
!m()->is_native()) {
// Grab the nmethod so it doesn't go away while it's being queried
nmethod* code = m()->code();
if (code != NULL && code->is_compiled_by_c1()) {
return CompLevel_highest_tier;
}
}
return CompLevel_fast_compile;
#else
return CompLevel_highest_tier;
#endif // TIERED
}
// StackWalkCompPolicy - walk up stack to find a suitable method to compile // StackWalkCompPolicy - walk up stack to find a suitable method to compile
#ifdef COMPILER2 #ifdef COMPILER2
@ -204,7 +395,7 @@ void StackWalkCompPolicy::method_invocation_event(methodHandle m, TRAPS) {
reset_counter_for_invocation_event(m); reset_counter_for_invocation_event(m);
const char* comment = "count"; const char* comment = "count";
if (m->code() == NULL && !delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler && CompileBroker::should_compile_new_jobs()) { if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m)) {
ResourceMark rm(THREAD); ResourceMark rm(THREAD);
JavaThread *thread = (JavaThread*)THREAD; JavaThread *thread = (JavaThread*)THREAD;
frame fr = thread->last_frame(); frame fr = thread->last_frame();
@ -224,10 +415,6 @@ void StackWalkCompPolicy::method_invocation_event(methodHandle m, TRAPS) {
if (first->top_method()->code() != NULL) { if (first->top_method()->code() != NULL) {
// called obsolete method/nmethod -- no need to recompile // called obsolete method/nmethod -- no need to recompile
if (TraceCompilationPolicy) tty->print_cr(" --> " INTPTR_FORMAT, first->top_method()->code()); if (TraceCompilationPolicy) tty->print_cr(" --> " INTPTR_FORMAT, first->top_method()->code());
} else if (compilation_level(m, InvocationEntryBci) == CompLevel_fast_compile) {
// Tier1 compilation policy avaoids stack walking.
CompileBroker::compile_method(m, InvocationEntryBci,
m, hot_count, comment, CHECK);
} else { } else {
if (TimeCompilationPolicy) accumulated_time()->start(); if (TimeCompilationPolicy) accumulated_time()->start();
GrowableArray<RFrame*>* stack = new GrowableArray<RFrame*>(50); GrowableArray<RFrame*>* stack = new GrowableArray<RFrame*>(50);
@ -236,53 +423,25 @@ void StackWalkCompPolicy::method_invocation_event(methodHandle m, TRAPS) {
if (TimeCompilationPolicy) accumulated_time()->stop(); if (TimeCompilationPolicy) accumulated_time()->stop();
assert(top != NULL, "findTopInlinableFrame returned null"); assert(top != NULL, "findTopInlinableFrame returned null");
if (TraceCompilationPolicy) top->print(); if (TraceCompilationPolicy) top->print();
CompileBroker::compile_method(top->top_method(), InvocationEntryBci, CompileBroker::compile_method(top->top_method(), InvocationEntryBci, CompLevel_highest_tier,
m, hot_count, comment, CHECK); m, hot_count, comment, CHECK);
} }
} }
} }
void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS) { void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int bci, TRAPS) {
assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now."); assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now.");
int hot_count = m->backedge_count(); int hot_count = m->backedge_count();
const char* comment = "backedge_count"; const char* comment = "backedge_count";
if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m) && CompileBroker::should_compile_new_jobs()) { if (is_compilation_enabled() && !m->is_not_osr_compilable() && can_be_compiled(m)) {
CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK); CompileBroker::compile_method(m, bci, CompLevel_highest_tier, m, hot_count, comment, CHECK);
NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));) NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true));)
} }
} }
int StackWalkCompPolicy::compilation_level(methodHandle m, int osr_bci)
{
int comp_level = CompLevel_full_optimization;
if (TieredCompilation && osr_bci == InvocationEntryBci) {
if (CompileTheWorld) {
// Under CTW, the first compile is tier1, the second tier2
if (m->highest_tier_compile() == CompLevel_none) {
comp_level = CompLevel_fast_compile;
}
} else if (!m->has_osr_nmethod()) {
// Before tier1 is done, use invocation_count + backedge_count to
// compare against the threshold. After that, the counters may/will
// be reset, so rely on the straight interpreter_invocation_count.
if (m->highest_tier_compile() == CompLevel_initial_compile) {
if (m->interpreter_invocation_count() < Tier2CompileThreshold) {
comp_level = CompLevel_fast_compile;
}
} else if (m->invocation_count() + m->backedge_count() <
Tier2CompileThreshold) {
comp_level = CompLevel_fast_compile;
}
}
}
return comp_level;
}
RFrame* StackWalkCompPolicy::findTopInlinableFrame(GrowableArray<RFrame*>* stack) { RFrame* StackWalkCompPolicy::findTopInlinableFrame(GrowableArray<RFrame*>* stack) {
// go up the stack until finding a frame that (probably) won't be inlined // go up the stack until finding a frame that (probably) won't be inlined
// into its caller // into its caller
@ -372,7 +531,7 @@ RFrame* StackWalkCompPolicy::findTopInlinableFrame(GrowableArray<RFrame*>* stack
// If the caller method is too big or something then we do not want to // If the caller method is too big or something then we do not want to
// compile it just to inline a method // compile it just to inline a method
if (!canBeCompiled(next_m)) { if (!can_be_compiled(next_m)) {
msg = "caller cannot be compiled"; msg = "caller cannot be compiled";
break; break;
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,53 +25,91 @@
// The CompilationPolicy selects which method (if any) should be compiled. // The CompilationPolicy selects which method (if any) should be compiled.
// It also decides which methods must always be compiled (i.e., are never // It also decides which methods must always be compiled (i.e., are never
// interpreted). // interpreted).
class CompileTask;
class CompileQueue;
class CompilationPolicy : public CHeapObj { class CompilationPolicy : public CHeapObj {
private:
static CompilationPolicy* _policy; static CompilationPolicy* _policy;
// Accumulated time // Accumulated time
static elapsedTimer _accumulated_time; static elapsedTimer _accumulated_time;
static bool _in_vm_startup; static bool _in_vm_startup;
public:
public:
virtual void method_invocation_event(methodHandle m, TRAPS) = 0;
virtual void method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS) = 0;
virtual int compilation_level(methodHandle m, int branch_bci) = 0;
void reset_counter_for_invocation_event(methodHandle method);
void reset_counter_for_back_branch_event(methodHandle method);
static void set_in_vm_startup(bool in_vm_startup) { _in_vm_startup = in_vm_startup; } static void set_in_vm_startup(bool in_vm_startup) { _in_vm_startup = in_vm_startup; }
static void completed_vm_startup(); static void completed_vm_startup();
static bool delayCompilationDuringStartup() { return _in_vm_startup; } static bool delay_compilation_during_startup() { return _in_vm_startup; }
static bool mustBeCompiled(methodHandle m); // m must be compiled before executing it
static bool canBeCompiled(methodHandle m); // m is allowed to be compiled
// m must be compiled before executing it
static bool must_be_compiled(methodHandle m, int comp_level = CompLevel_all);
// m is allowed to be compiled
static bool can_be_compiled(methodHandle m, int comp_level = CompLevel_all);
static bool is_compilation_enabled();
static void set_policy(CompilationPolicy* policy) { _policy = policy; } static void set_policy(CompilationPolicy* policy) { _policy = policy; }
static CompilationPolicy* policy() { return _policy; } static CompilationPolicy* policy() { return _policy; }
// Profiling // Profiling
elapsedTimer* accumulated_time() { return &_accumulated_time; } elapsedTimer* accumulated_time() { return &_accumulated_time; }
void print_time() PRODUCT_RETURN; void print_time() PRODUCT_RETURN;
virtual int compiler_count(CompLevel comp_level) = 0;
// main notification entry, return a pointer to an nmethod if the OSR is required,
// returns NULL otherwise.
virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS) = 0;
// safepoint() is called at the end of the safepoint
virtual void do_safepoint_work() = 0;
// reprofile request
virtual void reprofile(ScopeDesc* trap_scope, bool is_osr) = 0;
// delay_compilation(method) can be called by any component of the runtime to notify the policy
// that it's recommended to delay the complation of this method.
virtual void delay_compilation(methodOop method) = 0;
// disable_compilation() is called whenever the runtime decides to disable compilation of the
// specified method.
virtual void disable_compilation(methodOop method) = 0;
// Select task is called by CompileBroker. The queue is guaranteed to have at least one
// element and is locked. The function should select one and return it.
virtual CompileTask* select_task(CompileQueue* compile_queue) = 0;
// Tell the runtime if we think a given method is adequately profiled.
virtual bool is_mature(methodOop method) = 0;
// Do policy initialization
virtual void initialize() = 0;
}; };
class SimpleCompPolicy : public CompilationPolicy { // A base class for baseline policies.
class NonTieredCompPolicy : public CompilationPolicy {
int _compiler_count;
protected:
static void trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci);
static void trace_osr_request(methodHandle method, nmethod* osr, int bci);
static void trace_osr_completion(nmethod* osr_nm);
void reset_counter_for_invocation_event(methodHandle method);
void reset_counter_for_back_branch_event(methodHandle method);
public:
NonTieredCompPolicy() : _compiler_count(0) { }
virtual int compiler_count(CompLevel comp_level);
virtual void do_safepoint_work();
virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);
virtual void delay_compilation(methodOop method);
virtual void disable_compilation(methodOop method);
virtual bool is_mature(methodOop method);
virtual void initialize();
virtual CompileTask* select_task(CompileQueue* compile_queue);
virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS);
virtual void method_invocation_event(methodHandle m, TRAPS) = 0;
virtual void method_back_branch_event(methodHandle m, int bci, TRAPS) = 0;
};
class SimpleCompPolicy : public NonTieredCompPolicy {
public: public:
void method_invocation_event( methodHandle m, TRAPS); virtual void method_invocation_event(methodHandle m, TRAPS);
void method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS); virtual void method_back_branch_event(methodHandle m, int bci, TRAPS);
int compilation_level(methodHandle m, int branch_bci);
}; };
// StackWalkCompPolicy - existing C2 policy // StackWalkCompPolicy - existing C2 policy
#ifdef COMPILER2 #ifdef COMPILER2
class StackWalkCompPolicy : public CompilationPolicy { class StackWalkCompPolicy : public NonTieredCompPolicy {
public: public:
void method_invocation_event(methodHandle m, TRAPS); virtual void method_invocation_event(methodHandle m, TRAPS);
void method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS); virtual void method_back_branch_event(methodHandle m, int bci, TRAPS);
int compilation_level(methodHandle m, int branch_bci);
private: private:
RFrame* findTopInlinableFrame(GrowableArray<RFrame*>* stack); RFrame* findTopInlinableFrame(GrowableArray<RFrame*>* stack);

View file

@ -1301,7 +1301,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
bool update_trap_state = true; bool update_trap_state = true;
bool make_not_entrant = false; bool make_not_entrant = false;
bool make_not_compilable = false; bool make_not_compilable = false;
bool reset_counters = false; bool reprofile = false;
switch (action) { switch (action) {
case Action_none: case Action_none:
// Keep the old code. // Keep the old code.
@ -1328,7 +1328,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
// had been traps taken from compiled code. This will update // had been traps taken from compiled code. This will update
// the MDO trap history so that the next compilation will // the MDO trap history so that the next compilation will
// properly detect hot trap sites. // properly detect hot trap sites.
reset_counters = true; reprofile = true;
break; break;
case Action_make_not_entrant: case Action_make_not_entrant:
// Request immediate recompilation, and get rid of the old code. // Request immediate recompilation, and get rid of the old code.
@ -1422,7 +1422,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
// this trap point already, run the method in the interpreter // this trap point already, run the method in the interpreter
// for a while to exercise it more thoroughly. // for a while to exercise it more thoroughly.
if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) { if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) {
reset_counters = true; reprofile = true;
} }
} }
@ -1452,24 +1452,21 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
if (trap_method() == nm->method()) { if (trap_method() == nm->method()) {
make_not_compilable = true; make_not_compilable = true;
} else { } else {
trap_method->set_not_compilable(); trap_method->set_not_compilable(CompLevel_full_optimization);
// But give grace to the enclosing nm->method(). // But give grace to the enclosing nm->method().
} }
} }
} }
// Reset invocation counters // Reprofile
if (reset_counters) { if (reprofile) {
if (nm->is_osr_method()) CompilationPolicy::policy()->reprofile(trap_scope, nm->is_osr_method());
reset_invocation_counter(trap_scope, CompileThreshold);
else
reset_invocation_counter(trap_scope);
} }
// Give up compiling // Give up compiling
if (make_not_compilable && !nm->method()->is_not_compilable()) { if (make_not_compilable && !nm->method()->is_not_compilable(CompLevel_full_optimization)) {
assert(make_not_entrant, "consistent"); assert(make_not_entrant, "consistent");
nm->method()->set_not_compilable(); nm->method()->set_not_compilable(CompLevel_full_optimization);
} }
} // Free marked resources } // Free marked resources
@ -1569,22 +1566,6 @@ Deoptimization::update_method_data_from_interpreter(methodDataHandle trap_mdo, i
ignore_maybe_prior_recompile); ignore_maybe_prior_recompile);
} }
void Deoptimization::reset_invocation_counter(ScopeDesc* trap_scope, jint top_count) {
ScopeDesc* sd = trap_scope;
for (; !sd->is_top(); sd = sd->sender()) {
// Reset ICs of inlined methods, since they can trigger compilations also.
sd->method()->invocation_counter()->reset();
}
InvocationCounter* c = sd->method()->invocation_counter();
if (top_count != _no_count) {
// It was an OSR method, so bump the count higher.
c->set(c->state(), top_count);
} else {
c->reset();
}
sd->method()->backedge_counter()->reset();
}
Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request) { Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request) {
// Still in Java no safepoints // Still in Java no safepoints

View file

@ -311,12 +311,6 @@ class Deoptimization : AllStatic {
static void popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address); static void popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address);
private: private:
enum {
_no_count = -1
};
static void reset_invocation_counter(ScopeDesc* trap_scope, jint count = _no_count);
static methodDataOop get_method_data(JavaThread* thread, methodHandle m, bool create_if_missing); static methodDataOop get_method_data(JavaThread* thread, methodHandle m, bool create_if_missing);
// Update the mdo's count and per-BCI reason bits, returning previous state: // Update the mdo's count and per-BCI reason bits, returning previous state:
static ProfileData* query_update_method_data(methodDataHandle trap_mdo, static ProfileData* query_update_method_data(methodDataHandle trap_mdo,

View file

@ -65,7 +65,7 @@ jlong DTraceJSDT::activate(
THROW_MSG_0(vmSymbols::java_lang_RuntimeException(), THROW_MSG_0(vmSymbols::java_lang_RuntimeException(),
"Unable to register DTrace probes (CodeCache: no room for DTrace nmethods)."); "Unable to register DTrace probes (CodeCache: no room for DTrace nmethods).");
} }
h_method()->set_not_compilable(CompLevel_highest_tier); h_method()->set_not_compilable();
h_method()->set_code(h_method, nm); h_method()->set_code(h_method, nm);
probes->nmethod_at_put(count++, nm); probes->nmethod_at_put(count++, nm);
} }

View file

@ -35,14 +35,7 @@ define_pd_global(bool, ProfileTraps, false);
define_pd_global(bool, TieredCompilation, false); define_pd_global(bool, TieredCompilation, false);
define_pd_global(intx, CompileThreshold, 0); define_pd_global(intx, CompileThreshold, 0);
define_pd_global(intx, Tier2CompileThreshold, 0);
define_pd_global(intx, Tier3CompileThreshold, 0);
define_pd_global(intx, Tier4CompileThreshold, 0);
define_pd_global(intx, BackEdgeThreshold, 0); define_pd_global(intx, BackEdgeThreshold, 0);
define_pd_global(intx, Tier2BackEdgeThreshold, 0);
define_pd_global(intx, Tier3BackEdgeThreshold, 0);
define_pd_global(intx, Tier4BackEdgeThreshold, 0);
define_pd_global(intx, OnStackReplacePercentage, 0); define_pd_global(intx, OnStackReplacePercentage, 0);
define_pd_global(bool, ResizeTLAB, false); define_pd_global(bool, ResizeTLAB, false);
@ -1971,7 +1964,7 @@ class CommandLineFlags {
product(uintx, TenuredGenerationSizeSupplementDecay, 2, \ product(uintx, TenuredGenerationSizeSupplementDecay, 2, \
"Decay factor to TenuredGenerationSizeIncrement") \ "Decay factor to TenuredGenerationSizeIncrement") \
\ \
product(uintx, MaxGCPauseMillis, max_uintx, \ product(uintx, MaxGCPauseMillis, max_uintx, \
"Adaptive size policy maximum GC pause time goal in msec, " \ "Adaptive size policy maximum GC pause time goal in msec, " \
"or (G1 Only) the max. GC time per MMU time slice") \ "or (G1 Only) the max. GC time per MMU time slice") \
\ \
@ -2366,9 +2359,6 @@ class CommandLineFlags {
develop(bool, EagerInitialization, false, \ develop(bool, EagerInitialization, false, \
"Eagerly initialize classes if possible") \ "Eagerly initialize classes if possible") \
\ \
product(bool, Tier1UpdateMethodData, trueInTiered, \
"Update methodDataOops in Tier1-generated code") \
\
develop(bool, TraceMethodReplacement, false, \ develop(bool, TraceMethodReplacement, false, \
"Print when methods are replaced do to recompilation") \ "Print when methods are replaced do to recompilation") \
\ \
@ -2898,7 +2888,7 @@ class CommandLineFlags {
"if non-zero, start verifying C heap after Nth call to " \ "if non-zero, start verifying C heap after Nth call to " \
"malloc/realloc/free") \ "malloc/realloc/free") \
\ \
product(intx, TypeProfileWidth, 2, \ product(intx, TypeProfileWidth, 2, \
"number of receiver types to record in call/cast profile") \ "number of receiver types to record in call/cast profile") \
\ \
develop(intx, BciProfileWidth, 2, \ develop(intx, BciProfileWidth, 2, \
@ -3306,30 +3296,98 @@ class CommandLineFlags {
product_pd(intx, BackEdgeThreshold, \ product_pd(intx, BackEdgeThreshold, \
"Interpreter Back edge threshold at which an OSR compilation is invoked")\ "Interpreter Back edge threshold at which an OSR compilation is invoked")\
\ \
product(intx, Tier1BytecodeLimit, 10, \ product(intx, Tier0InvokeNotifyFreqLog, 7, \
"Must have at least this many bytecodes before tier1" \ "Interpreter (tier 0) invocation notification frequency.") \
"invocation counters are used") \
\ \
product_pd(intx, Tier2CompileThreshold, \ product(intx, Tier2InvokeNotifyFreqLog, 11, \
"threshold at which a tier 2 compilation is invoked") \ "C1 without MDO (tier 2) invocation notification frequency.") \
\ \
product_pd(intx, Tier2BackEdgeThreshold, \ product(intx, Tier3InvokeNotifyFreqLog, 10, \
"Back edge threshold at which a tier 2 compilation is invoked") \ "C1 with MDO profiling (tier 3) invocation notification " \
"frequency.") \
\ \
product_pd(intx, Tier3CompileThreshold, \ product(intx, Tier0BackedgeNotifyFreqLog, 10, \
"threshold at which a tier 3 compilation is invoked") \ "Interpreter (tier 0) invocation notification frequency.") \
\ \
product_pd(intx, Tier3BackEdgeThreshold, \ product(intx, Tier2BackedgeNotifyFreqLog, 14, \
"Back edge threshold at which a tier 3 compilation is invoked") \ "C1 without MDO (tier 2) invocation notification frequency.") \
\ \
product_pd(intx, Tier4CompileThreshold, \ product(intx, Tier3BackedgeNotifyFreqLog, 13, \
"threshold at which a tier 4 compilation is invoked") \ "C1 with MDO profiling (tier 3) invocation notification " \
"frequency.") \
\ \
product_pd(intx, Tier4BackEdgeThreshold, \ product(intx, Tier2CompileThreshold, 0, \
"Back edge threshold at which a tier 4 compilation is invoked") \ "threshold at which tier 2 compilation is invoked") \
\
product(intx, Tier2BackEdgeThreshold, 0, \
"Back edge threshold at which tier 2 compilation is invoked") \
\
product(intx, Tier3InvocationThreshold, 200, \
"Compile if number of method invocations crosses this " \
"threshold") \
\
product(intx, Tier3MinInvocationThreshold, 100, \
"Minimum invocation to compile at tier 3") \
\
product(intx, Tier3CompileThreshold, 2000, \
"Threshold at which tier 3 compilation is invoked (invocation " \
"minimum must be satisfied.") \
\
product(intx, Tier3BackEdgeThreshold, 7000, \
"Back edge threshold at which tier 3 OSR compilation is invoked") \
\
product(intx, Tier4InvocationThreshold, 5000, \
"Compile if number of method invocations crosses this " \
"threshold") \
\
product(intx, Tier4MinInvocationThreshold, 600, \
"Minimum invocation to compile at tier 4") \
\
product(intx, Tier4CompileThreshold, 15000, \
"Threshold at which tier 4 compilation is invoked (invocation " \
"minimum must be satisfied.") \
\
product(intx, Tier4BackEdgeThreshold, 40000, \
"Back edge threshold at which tier 4 OSR compilation is invoked") \
\
product(intx, Tier3DelayOn, 5, \
"If C2 queue size grows over this amount per compiler thread " \
"stop compiling at tier 3 and start compiling at tier 2") \
\
product(intx, Tier3DelayOff, 2, \
"If C2 queue size is less than this amount per compiler thread " \
"allow methods compiled at tier 2 transition to tier 3") \
\
product(intx, Tier3LoadFeedback, 5, \
"Tier 3 thresholds will increase twofold when C1 queue size " \
"reaches this amount per compiler thread") \
\
product(intx, Tier4LoadFeedback, 3, \
"Tier 4 thresholds will increase twofold when C2 queue size " \
"reaches this amount per compiler thread") \
\
product(intx, TieredCompileTaskTimeout, 50, \
"Kill compile task if method was not used within " \
"given timeout in milliseconds") \
\
product(intx, TieredStopAtLevel, 4, \
"Stop at given compilation level") \
\
product(intx, Tier0ProfilingStartPercentage, 200, \
"Start profiling in interpreter if the counters exceed tier 3" \
"thresholds by the specified percentage") \
\
product(intx, TieredRateUpdateMinTime, 1, \
"Minimum rate sampling interval (in milliseconds)") \
\
product(intx, TieredRateUpdateMaxTime, 25, \
"Maximum rate sampling interval (in milliseconds)") \
\ \
product_pd(bool, TieredCompilation, \ product_pd(bool, TieredCompilation, \
"Enable two-tier compilation") \ "Enable tiered compilation") \
\
product(bool, PrintTieredEvents, false, \
"Print tiered events notifications") \
\ \
product(bool, StressTieredRuntime, false, \ product(bool, StressTieredRuntime, false, \
"Alternate client and server compiler on compile requests") \ "Alternate client and server compiler on compile requests") \

View file

@ -198,7 +198,7 @@ void print_statistics() {
if (CountCompiledCalls) { if (CountCompiledCalls) {
print_method_invocation_histogram(); print_method_invocation_histogram();
} }
if (ProfileInterpreter || Tier1UpdateMethodData) { if (ProfileInterpreter || C1UpdateMethodData) {
print_method_profiling_data(); print_method_profiling_data();
} }
if (TimeCompiler) { if (TimeCompiler) {

View file

@ -329,9 +329,10 @@ void JavaCalls::call_helper(JavaValue* result, methodHandle* m, JavaCallArgument
assert(!thread->is_Compiler_thread(), "cannot compile from the compiler"); assert(!thread->is_Compiler_thread(), "cannot compile from the compiler");
if (CompilationPolicy::mustBeCompiled(method)) { if (CompilationPolicy::must_be_compiled(method)) {
CompileBroker::compile_method(method, InvocationEntryBci, CompileBroker::compile_method(method, InvocationEntryBci,
methodHandle(), 0, "mustBeCompiled", CHECK); CompLevel_initial_compile,
methodHandle(), 0, "must_be_compiled", CHECK);
} }
// Since the call stub sets up like the interpreter we call the from_interpreted_entry // Since the call stub sets up like the interpreter we call the from_interpreted_entry

View file

@ -430,29 +430,7 @@ bool SafepointSynchronize::is_cleanup_needed() {
return false; return false;
} }
jlong CounterDecay::_last_timestamp = 0;
static void do_method(methodOop m) {
m->invocation_counter()->decay();
}
void CounterDecay::decay() {
_last_timestamp = os::javaTimeMillis();
// This operation is going to be performed only at the end of a safepoint
// and hence GC's will not be going on, all Java mutators are suspended
// at this point and hence SystemDictionary_lock is also not needed.
assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint");
int nclasses = SystemDictionary::number_of_classes();
double classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 /
CounterHalfLifeTime);
for (int i = 0; i < classes_per_tick; i++) {
klassOop k = SystemDictionary::try_get_next_class();
if (k != NULL && k->klass_part()->oop_is_instance()) {
instanceKlass::cast(k)->methods_do(do_method);
}
}
}
// Various cleaning tasks that should be done periodically at safepoints // Various cleaning tasks that should be done periodically at safepoints
void SafepointSynchronize::do_cleanup_tasks() { void SafepointSynchronize::do_cleanup_tasks() {
@ -465,10 +443,9 @@ void SafepointSynchronize::do_cleanup_tasks() {
TraceTime t2("updating inline caches", TraceSafepointCleanupTime); TraceTime t2("updating inline caches", TraceSafepointCleanupTime);
InlineCacheBuffer::update_inline_caches(); InlineCacheBuffer::update_inline_caches();
} }
{
if(UseCounterDecay && CounterDecay::is_decay_needed()) { TraceTime t3("compilation policy safepoint handler", TraceSafepointCleanupTime);
TraceTime t3("decaying counter", TraceSafepointCleanupTime); CompilationPolicy::policy()->do_safepoint_work();
CounterDecay::decay();
} }
TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime); TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -147,6 +147,9 @@ public:
static long last_non_safepoint_interval() { static long last_non_safepoint_interval() {
return os::javaTimeMillis() - _end_of_last_safepoint; return os::javaTimeMillis() - _end_of_last_safepoint;
} }
static long end_of_last_safepoint() {
return _end_of_last_safepoint;
}
static bool is_cleanup_needed(); static bool is_cleanup_needed();
static void do_cleanup_tasks(); static void do_cleanup_tasks();
@ -228,15 +231,4 @@ class ThreadSafepointState: public CHeapObj {
} }
}; };
//
// CounterDecay
//
// Interates through invocation counters and decrements them. This
// is done at each safepoint.
//
class CounterDecay : public AllStatic {
static jlong _last_timestamp;
public:
static void decay();
static bool is_decay_needed() { return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength; }
};

View file

@ -0,0 +1,377 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
# include "incls/_precompiled.incl"
# include "incls/_simpleThresholdPolicy.cpp.incl"
// Print an event.
void SimpleThresholdPolicy::print_event(EventType type, methodHandle mh, methodHandle imh,
int bci, CompLevel level) {
bool inlinee_event = mh() != imh();
ttyLocker tty_lock;
tty->print("%lf: [", os::elapsedTime());
int invocation_count = mh->invocation_count();
int backedge_count = mh->backedge_count();
switch(type) {
case CALL:
tty->print("call");
break;
case LOOP:
tty->print("loop");
break;
case COMPILE:
tty->print("compile");
}
tty->print(" level: %d ", level);
ResourceMark rm;
char *method_name = mh->name_and_sig_as_C_string();
tty->print("[%s", method_name);
// We can have an inlinee, although currently we don't generate any notifications for the inlined methods.
if (inlinee_event) {
char *inlinee_name = imh->name_and_sig_as_C_string();
tty->print(" [%s]] ", inlinee_name);
}
else tty->print("] ");
tty->print("@%d queues: %d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile),
CompileBroker::queue_size(CompLevel_full_optimization));
print_specific(type, mh, imh, bci, level);
if (type != COMPILE) {
methodDataHandle mdh = mh->method_data();
int mdo_invocations = 0, mdo_backedges = 0;
if (mdh() != NULL) {
mdo_invocations = mdh->invocation_count();
mdo_backedges = mdh->backedge_count();
}
tty->print(" total: %d,%d mdo: %d,%d",
invocation_count, backedge_count,
mdo_invocations, mdo_backedges);
tty->print(" max levels: %d,%d",
mh->highest_comp_level(), mh->highest_osr_comp_level());
if (inlinee_event) {
tty->print(" inlinee max levels: %d,%d", imh->highest_comp_level(), imh->highest_osr_comp_level());
}
tty->print(" compilable: ");
bool need_comma = false;
if (!mh->is_not_compilable(CompLevel_full_profile)) {
tty->print("c1");
need_comma = true;
}
if (!mh->is_not_compilable(CompLevel_full_optimization)) {
if (need_comma) tty->print(", ");
tty->print("c2");
need_comma = true;
}
if (!mh->is_not_osr_compilable()) {
if (need_comma) tty->print(", ");
tty->print("osr");
}
tty->print(" status:");
if (mh->queued_for_compilation()) {
tty->print(" in queue");
} else tty->print(" idle");
}
tty->print_cr("]");
}
void SimpleThresholdPolicy::initialize() {
if (FLAG_IS_DEFAULT(CICompilerCount)) {
FLAG_SET_DEFAULT(CICompilerCount, 3);
}
int count = CICompilerCount;
if (CICompilerCountPerCPU) {
count = MAX2(log2_intptr(os::active_processor_count()), 1) * 3 / 2;
}
set_c1_count(MAX2(count / 3, 1));
set_c2_count(MAX2(count - count / 3, 1));
}
void SimpleThresholdPolicy::set_carry_if_necessary(InvocationCounter *counter) {
if (!counter->carry() && counter->count() > InvocationCounter::count_limit / 2) {
counter->set_carry_flag();
}
}
// Set carry flags on the counters if necessary
void SimpleThresholdPolicy::handle_counter_overflow(methodOop method) {
set_carry_if_necessary(method->invocation_counter());
set_carry_if_necessary(method->backedge_counter());
methodDataOop mdo = method->method_data();
if (mdo != NULL) {
set_carry_if_necessary(mdo->invocation_counter());
set_carry_if_necessary(mdo->backedge_counter());
}
}
// Called with the queue locked and with at least one element
CompileTask* SimpleThresholdPolicy::select_task(CompileQueue* compile_queue) {
return compile_queue->first();
}
nmethod* SimpleThresholdPolicy::event(methodHandle method, methodHandle inlinee,
int branch_bci, int bci, CompLevel comp_level, TRAPS) {
if (comp_level == CompLevel_none &&
JvmtiExport::can_post_interpreter_events()) {
assert(THREAD->is_Java_thread(), "Should be java thread");
if (((JavaThread*)THREAD)->is_interp_only_mode()) {
return NULL;
}
}
nmethod *osr_nm = NULL;
handle_counter_overflow(method());
if (method() != inlinee()) {
handle_counter_overflow(inlinee());
}
if (PrintTieredEvents) {
print_event(bci == InvocationEntryBci ? CALL : LOOP, method, inlinee, bci, comp_level);
}
if (bci == InvocationEntryBci) {
method_invocation_event(method, inlinee, comp_level, THREAD);
} else {
method_back_branch_event(method, inlinee, bci, comp_level, THREAD);
int highest_level = method->highest_osr_comp_level();
if (highest_level > comp_level) {
osr_nm = method->lookup_osr_nmethod_for(bci, highest_level, false);
}
}
return osr_nm;
}
// Check if the method can be compiled, change level if necessary
void SimpleThresholdPolicy::compile(methodHandle mh, int bci, CompLevel level, TRAPS) {
// Take the given ceiling into the account.
// NOTE: You can set it to 1 to get a pure C1 version.
if ((CompLevel)TieredStopAtLevel < level) {
level = (CompLevel)TieredStopAtLevel;
}
if (level == CompLevel_none) {
return;
}
// Check if the method can be compiled, if not - try different levels.
if (!can_be_compiled(mh, level)) {
if (level < CompLevel_full_optimization && can_be_compiled(mh, CompLevel_full_optimization)) {
compile(mh, bci, CompLevel_full_optimization, THREAD);
}
if (level == CompLevel_full_optimization && can_be_compiled(mh, CompLevel_simple)) {
compile(mh, bci, CompLevel_simple, THREAD);
}
return;
}
if (bci != InvocationEntryBci && mh->is_not_osr_compilable()) {
return;
}
if (PrintTieredEvents) {
print_event(COMPILE, mh, mh, bci, level);
}
if (!CompileBroker::compilation_is_in_queue(mh, bci)) {
submit_compile(mh, bci, level, THREAD);
}
}
// Tell the broker to compile the method
void SimpleThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS) {
int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", THREAD);
}
// Call and loop predicates determine whether a transition to a higher
// compilation level should be performed (pointers to predicate functions
// are passed to common() transition function).
bool SimpleThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) {
switch(cur_level) {
case CompLevel_none:
case CompLevel_limited_profile: {
return loop_predicate_helper<CompLevel_none>(i, b, 1.0);
}
case CompLevel_full_profile: {
return loop_predicate_helper<CompLevel_full_profile>(i, b, 1.0);
}
default:
return true;
}
}
bool SimpleThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) {
switch(cur_level) {
case CompLevel_none:
case CompLevel_limited_profile: {
return call_predicate_helper<CompLevel_none>(i, b, 1.0);
}
case CompLevel_full_profile: {
return call_predicate_helper<CompLevel_full_profile>(i, b, 1.0);
}
default:
return true;
}
}
// Determine is a method is mature.
bool SimpleThresholdPolicy::is_mature(methodOop method) {
if (is_trivial(method)) return true;
methodDataOop mdo = method->method_data();
if (mdo != NULL) {
int i = mdo->invocation_count();
int b = mdo->backedge_count();
double k = ProfileMaturityPercentage / 100.0;
return call_predicate_helper<CompLevel_full_profile>(i, b, k) ||
loop_predicate_helper<CompLevel_full_profile>(i, b, k);
}
return false;
}
// Common transition function. Given a predicate determines if a method should transition to another level.
CompLevel SimpleThresholdPolicy::common(Predicate p, methodOop method, CompLevel cur_level) {
CompLevel next_level = cur_level;
int i = method->invocation_count();
int b = method->backedge_count();
switch(cur_level) {
case CompLevel_none:
{
methodDataOop mdo = method->method_data();
if (mdo != NULL) {
int mdo_i = mdo->invocation_count();
int mdo_b = mdo->backedge_count();
// If we were at full profile level, would we switch to full opt?
if ((this->*p)(mdo_i, mdo_b, CompLevel_full_profile)) {
next_level = CompLevel_full_optimization;
}
}
}
if (next_level == cur_level && (this->*p)(i, b, cur_level)) {
if (is_trivial(method)) {
next_level = CompLevel_simple;
} else {
next_level = CompLevel_full_profile;
}
}
break;
case CompLevel_limited_profile:
case CompLevel_full_profile:
if (is_trivial(method)) {
next_level = CompLevel_simple;
} else {
methodDataOop mdo = method->method_data();
guarantee(mdo != NULL, "MDO should always exist");
if (mdo->would_profile()) {
int mdo_i = mdo->invocation_count();
int mdo_b = mdo->backedge_count();
if ((this->*p)(mdo_i, mdo_b, cur_level)) {
next_level = CompLevel_full_optimization;
}
} else {
next_level = CompLevel_full_optimization;
}
}
break;
}
return next_level;
}
// Determine if a method should be compiled with a normal entry point at a different level.
CompLevel SimpleThresholdPolicy::call_event(methodOop method, CompLevel cur_level) {
CompLevel highest_level = (CompLevel)method->highest_comp_level();
if (cur_level == CompLevel_none && highest_level > cur_level) {
// TODO: We may want to try to do more extensive reprofiling in this case.
return highest_level;
}
CompLevel osr_level = (CompLevel) method->highest_osr_comp_level();
CompLevel next_level = common(&SimpleThresholdPolicy::call_predicate, method, cur_level);
// If OSR method level is greater than the regular method level, the levels should be
// equalized by raising the regular method level in order to avoid OSRs during each
// invocation of the method.
if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
methodDataOop mdo = method->method_data();
guarantee(mdo != NULL, "MDO should not be NULL");
if (mdo->invocation_count() >= 1) {
next_level = CompLevel_full_optimization;
}
} else {
next_level = MAX2(osr_level, next_level);
}
return next_level;
}
// Determine if we should do an OSR compilation of a given method.
CompLevel SimpleThresholdPolicy::loop_event(methodOop method, CompLevel cur_level) {
if (cur_level == CompLevel_none) {
// If there is a live OSR method that means that we deopted to the interpreter
// for the transition.
CompLevel osr_level = (CompLevel)method->highest_osr_comp_level();
if (osr_level > CompLevel_none) {
return osr_level;
}
}
return common(&SimpleThresholdPolicy::loop_predicate, method, cur_level);
}
// Handle the invocation event.
void SimpleThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
CompLevel level, TRAPS) {
if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
CompLevel next_level = call_event(mh(), level);
if (next_level != level) {
compile(mh, InvocationEntryBci, next_level, THREAD);
}
}
}
// Handle the back branch event. Notice that we can compile the method
// with a regular entry from here.
void SimpleThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
int bci, CompLevel level, TRAPS) {
// If the method is already compiling, quickly bail out.
if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) {
// Use loop event as an opportinity to also check there's been
// enough calls.
CompLevel cur_level = comp_level(mh());
CompLevel next_level = call_event(mh(), cur_level);
CompLevel next_osr_level = loop_event(mh(), level);
next_level = MAX2(next_level,
next_osr_level < CompLevel_full_optimization ? next_osr_level : cur_level);
bool is_compiling = false;
if (next_level != cur_level) {
compile(mh, InvocationEntryBci, next_level, THREAD);
is_compiling = true;
}
// Do the OSR version
if (!is_compiling && next_osr_level != level) {
compile(mh, bci, next_osr_level, THREAD);
}
}
}

View file

@ -0,0 +1,107 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
class CompileTask;
class CompileQueue;
class SimpleThresholdPolicy : public CompilationPolicy {
int _c1_count, _c2_count;
// Check if the counter is big enough and set carry (effectively infinity).
inline void set_carry_if_necessary(InvocationCounter *counter);
// Set carry flags in the counters (in methodOop and MDO).
inline void handle_counter_overflow(methodOop method);
// Call and loop predicates determine whether a transition to a higher compilation
// level should be performed (pointers to predicate functions are passed to common_TF().
// Predicates also take compiler load into account.
typedef bool (SimpleThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level);
bool call_predicate(int i, int b, CompLevel cur_level);
bool loop_predicate(int i, int b, CompLevel cur_level);
// Common transition function. Given a predicate determines if a method should transition to another level.
CompLevel common(Predicate p, methodOop method, CompLevel cur_level);
// Transition functions.
// call_event determines if a method should be compiled at a different
// level with a regular invocation entry.
CompLevel call_event(methodOop method, CompLevel cur_level);
// loop_event checks if a method should be OSR compiled at a different
// level.
CompLevel loop_event(methodOop method, CompLevel cur_level);
protected:
int c1_count() const { return _c1_count; }
int c2_count() const { return _c2_count; }
void set_c1_count(int x) { _c1_count = x; }
void set_c2_count(int x) { _c2_count = x; }
enum EventType { CALL, LOOP, COMPILE };
void print_event(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level);
// Print policy-specific information if necessary
virtual void print_specific(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level) { }
// Check if the method can be compiled, change level if necessary
void compile(methodHandle mh, int bci, CompLevel level, TRAPS);
// Submit a given method for compilation
virtual void submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS);
// Simple methods are as good being compiled with C1 as C2.
// This function tells if it's such a function.
inline bool is_trivial(methodOop method);
// Predicate helpers are used by .*_predicate() methods as well as others.
// They check the given counter values, multiplied by the scale against the thresholds.
template<CompLevel level> static inline bool call_predicate_helper(int i, int b, double scale);
template<CompLevel level> static inline bool loop_predicate_helper(int i, int b, double scale);
// Get a compilation level for a given method.
static CompLevel comp_level(methodOop method) {
nmethod *nm = method->code();
if (nm != NULL && nm->is_in_use()) {
return (CompLevel)nm->comp_level();
}
return CompLevel_none;
}
virtual void method_invocation_event(methodHandle method, methodHandle inlinee,
CompLevel level, TRAPS);
virtual void method_back_branch_event(methodHandle method, methodHandle inlinee,
int bci, CompLevel level, TRAPS);
public:
SimpleThresholdPolicy() : _c1_count(0), _c2_count(0) { }
virtual int compiler_count(CompLevel comp_level) {
if (is_c1_compile(comp_level)) return c1_count();
if (is_c2_compile(comp_level)) return c2_count();
return 0;
}
virtual void do_safepoint_work() { }
virtual void delay_compilation(methodOop method) { }
virtual void disable_compilation(methodOop method) { }
// TODO: we should honour reprofiling requests in the future. Currently reprofiling
// would happen but not to the extent we would ideally like.
virtual void reprofile(ScopeDesc* trap_scope, bool is_osr) { }
virtual nmethod* event(methodHandle method, methodHandle inlinee,
int branch_bci, int bci, CompLevel comp_level, TRAPS);
// Select task is called by CompileBroker. We should return a task or NULL.
virtual CompileTask* select_task(CompileQueue* compile_queue);
// Tell the runtime if we think a given method is adequately profiled.
virtual bool is_mature(methodOop method);
// Initialize: set compiler thread count
virtual void initialize();
};

View file

@ -0,0 +1,64 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
template<CompLevel level>
bool SimpleThresholdPolicy::call_predicate_helper(int i, int b, double scale) {
switch(level) {
case CompLevel_none:
case CompLevel_limited_profile:
return (i > Tier3InvocationThreshold * scale) ||
(i > Tier3MinInvocationThreshold * scale && i + b > Tier3CompileThreshold * scale);
case CompLevel_full_profile:
return (i > Tier4InvocationThreshold * scale) ||
(i > Tier4MinInvocationThreshold * scale && i + b > Tier4CompileThreshold * scale);
}
return true;
}
template<CompLevel level>
bool SimpleThresholdPolicy::loop_predicate_helper(int i, int b, double scale) {
switch(level) {
case CompLevel_none:
case CompLevel_limited_profile:
return b > Tier3BackEdgeThreshold * scale;
case CompLevel_full_profile:
return b > Tier4BackEdgeThreshold * scale;
}
return true;
}
// Simple methods are as good being compiled with C1 as C2.
// Determine if a given method is such a case.
bool SimpleThresholdPolicy::is_trivial(methodOop method) {
if (method->is_accessor()) return true;
if (method->code() != NULL) {
methodDataOop mdo = method->method_data();
if (mdo != NULL && mdo->num_loops() == 0 &&
(method->code_size() < 5 || (mdo->num_blocks() < 4) && (method->code_size() < 15))) {
return !mdo->would_profile();
}
}
return false;
}

Some files were not shown because too many files have changed in this diff Show more