This commit is contained in:
Keith McGuigan 2010-09-22 12:54:51 -04:00
commit 9e9551fdd8
126 changed files with 8422 additions and 1977 deletions

View file

@ -61,11 +61,9 @@ include $(GAMMADIR)/make/defs.make
endif endif
include $(GAMMADIR)/make/$(OSNAME)/makefiles/rules.make include $(GAMMADIR)/make/$(OSNAME)/makefiles/rules.make
ifndef LP64
ifndef CC_INTERP ifndef CC_INTERP
FORCE_TIERED=1 FORCE_TIERED=1
endif endif
endif
ifdef LP64 ifdef LP64
ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","") ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")

View file

@ -1,5 +1,5 @@
# #
# Copyright (c) 1998, 2008, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -52,11 +52,9 @@ include $(GAMMADIR)/make/defs.make
endif endif
include $(GAMMADIR)/make/$(OSNAME)/makefiles/rules.make include $(GAMMADIR)/make/$(OSNAME)/makefiles/rules.make
ifndef LP64
ifndef CC_INTERP ifndef CC_INTERP
FORCE_TIERED=1 FORCE_TIERED=1
endif endif
endif
ifdef LP64 ifdef LP64
ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","") ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
# #
# Copyright (c) 1998, 2008, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -72,13 +72,11 @@ BUILDARCH=ia64
!endif !endif
!endif !endif
!if "$(BUILDARCH)" != "amd64"
!if "$(BUILDARCH)" != "ia64" !if "$(BUILDARCH)" != "ia64"
!ifndef CC_INTERP !ifndef CC_INTERP
FORCE_TIERED=1 FORCE_TIERED=1
!endif !endif
!endif !endif
!endif
!if "$(BUILDARCH)" == "amd64" !if "$(BUILDARCH)" == "amd64"
Platform_arch=x86 Platform_arch=x86

View file

@ -57,13 +57,12 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
#endif #endif
} }
#ifdef TIERED
void CounterOverflowStub::emit_code(LIR_Assembler* ce) { void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry); __ bind(_entry);
__ set(_bci, G4); __ set(_bci, G4);
__ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type); __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
__ delayed()->nop(); __ delayed()->mov_or_nop(_method->as_register(), G5);
ce->add_call_info_here(_info); ce->add_call_info_here(_info);
ce->verify_oop_map(_info); ce->verify_oop_map(_info);
@ -71,7 +70,6 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
__ delayed()->nop(); __ delayed()->nop();
} }
#endif // TIERED
void DivByZeroStub::emit_code(LIR_Assembler* ce) { void DivByZeroStub::emit_code(LIR_Assembler* ce) {
if (_offset != -1) { if (_offset != -1) {

View file

@ -73,6 +73,7 @@ FloatRegister FrameMap::_fpu_regs [FrameMap::nof_fpu_regs];
// some useful constant RInfo's: // some useful constant RInfo's:
LIR_Opr FrameMap::in_long_opr; LIR_Opr FrameMap::in_long_opr;
LIR_Opr FrameMap::out_long_opr; LIR_Opr FrameMap::out_long_opr;
LIR_Opr FrameMap::g1_long_single_opr;
LIR_Opr FrameMap::F0_opr; LIR_Opr FrameMap::F0_opr;
LIR_Opr FrameMap::F0_double_opr; LIR_Opr FrameMap::F0_double_opr;
@ -238,6 +239,7 @@ void FrameMap::initialize() {
in_long_opr = as_long_opr(I0); in_long_opr = as_long_opr(I0);
out_long_opr = as_long_opr(O0); out_long_opr = as_long_opr(O0);
g1_long_single_opr = as_long_single_opr(G1);
G0_opr = as_opr(G0); G0_opr = as_opr(G0);
G1_opr = as_opr(G1); G1_opr = as_opr(G1);

View file

@ -103,6 +103,7 @@
static LIR_Opr in_long_opr; static LIR_Opr in_long_opr;
static LIR_Opr out_long_opr; static LIR_Opr out_long_opr;
static LIR_Opr g1_long_single_opr;
static LIR_Opr F0_opr; static LIR_Opr F0_opr;
static LIR_Opr F0_double_opr; static LIR_Opr F0_double_opr;
@ -113,18 +114,25 @@
private: private:
static FloatRegister _fpu_regs [nof_fpu_regs]; static FloatRegister _fpu_regs [nof_fpu_regs];
static LIR_Opr as_long_single_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
}
static LIR_Opr as_long_pair_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r->successor()), cpu_reg2rnr(r));
}
public: public:
#ifdef _LP64 #ifdef _LP64
static LIR_Opr as_long_opr(Register r) { static LIR_Opr as_long_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r)); return as_long_single_opr(r);
} }
static LIR_Opr as_pointer_opr(Register r) { static LIR_Opr as_pointer_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r)); return as_long_single_opr(r);
} }
#else #else
static LIR_Opr as_long_opr(Register r) { static LIR_Opr as_long_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r->successor()), cpu_reg2rnr(r)); return as_long_pair_opr(r);
} }
static LIR_Opr as_pointer_opr(Register r) { static LIR_Opr as_pointer_opr(Register r) {
return as_opr(r); return as_opr(r);

View file

@ -1625,13 +1625,18 @@ void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
void LIR_Assembler::return_op(LIR_Opr result) { void LIR_Assembler::return_op(LIR_Opr result) {
// the poll may need a register so just pick one that isn't the return register // the poll may need a register so just pick one that isn't the return register
#ifdef TIERED #if defined(TIERED) && !defined(_LP64)
if (result->type_field() == LIR_OprDesc::long_type) { if (result->type_field() == LIR_OprDesc::long_type) {
// Must move the result to G1 // Must move the result to G1
// Must leave proper result in O0,O1 and G1 (TIERED only) // Must leave proper result in O0,O1 and G1 (TIERED only)
__ sllx(I0, 32, G1); // Shift bits into high G1 __ sllx(I0, 32, G1); // Shift bits into high G1
__ srl (I1, 0, I1); // Zero extend O1 (harmless?) __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
__ or3 (I1, G1, G1); // OR 64 bits into G1 __ or3 (I1, G1, G1); // OR 64 bits into G1
#ifdef ASSERT
// mangle it so any problems will show up
__ set(0xdeadbeef, I0);
__ set(0xdeadbeef, I1);
#endif
} }
#endif // TIERED #endif // TIERED
__ set((intptr_t)os::get_polling_page(), L0); __ set((intptr_t)os::get_polling_page(), L0);
@ -2424,6 +2429,192 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
} }
void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
ciMethodData *md, ciProfileData *data,
Register recv, Register tmp1, Label* update_done) {
uint i;
for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test;
// See if the receiver is receiver[n].
Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
mdo_offset_bias);
__ ld_ptr(receiver_addr, tmp1);
__ verify_oop(tmp1);
__ cmp(recv, tmp1);
__ brx(Assembler::notEqual, false, Assembler::pt, next_test);
__ delayed()->nop();
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
mdo_offset_bias);
__ ld_ptr(data_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1);
__ st_ptr(tmp1, data_addr);
__ ba(false, *update_done);
__ delayed()->nop();
__ bind(next_test);
}
// Didn't find receiver; find next empty slot and fill it in
for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test;
Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
mdo_offset_bias);
load(recv_addr, tmp1, T_OBJECT);
__ br_notnull(tmp1, false, Assembler::pt, next_test);
__ delayed()->nop();
__ st_ptr(recv, recv_addr);
__ set(DataLayout::counter_increment, tmp1);
__ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
mdo_offset_bias);
__ ba(false, *update_done);
__ delayed()->nop();
__ bind(next_test);
}
}
void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
md = method->method_data();
if (md == NULL) {
bailout("out of memory building methodDataOop");
return;
}
data = md->bci_to_data(bci);
assert(data != NULL, "need data for checkcast");
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
// The offset is large so bias the mdo by the base of the slot so
// that the ld can use simm13s to reference the slots of the data
mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
}
}
void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
// we always need a stub for the failure case.
CodeStub* stub = op->stub();
Register obj = op->object()->as_register();
Register k_RInfo = op->tmp1()->as_register();
Register klass_RInfo = op->tmp2()->as_register();
Register dst = op->result_opr()->as_register();
Register Rtmp1 = op->tmp3()->as_register();
ciKlass* k = op->klass();
if (obj == k_RInfo) {
k_RInfo = klass_RInfo;
klass_RInfo = obj;
}
ciMethodData* md;
ciProfileData* data;
int mdo_offset_bias = 0;
if (op->should_profile()) {
ciMethod* method = op->profiled_method();
assert(method != NULL, "Should have method");
setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
Label not_null;
__ br_notnull(obj, false, Assembler::pn, not_null);
__ delayed()->nop();
Register mdo = k_RInfo;
Register data_val = Rtmp1;
jobject2reg(md->constant_encoding(), mdo);
if (mdo_offset_bias > 0) {
__ set(mdo_offset_bias, data_val);
__ add(mdo, data_val, mdo);
}
Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
__ ldub(flags_addr, data_val);
__ or3(data_val, BitData::null_seen_byte_constant(), data_val);
__ stb(data_val, flags_addr);
__ ba(false, *obj_is_null);
__ delayed()->nop();
__ bind(not_null);
} else {
__ br_null(obj, false, Assembler::pn, *obj_is_null);
__ delayed()->nop();
}
Label profile_cast_failure, profile_cast_success;
Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
Label *success_target = op->should_profile() ? &profile_cast_success : success;
// patching may screw with our temporaries on sparc,
// so let's do it before loading the class
if (k->is_loaded()) {
jobject2reg(k->constant_encoding(), k_RInfo);
} else {
jobject2reg_with_patching(k_RInfo, op->info_for_patch());
}
assert(obj != k_RInfo, "must be different");
// get object class
// not a safepoint as obj null check happens earlier
load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
if (op->fast_check()) {
assert_different_registers(klass_RInfo, k_RInfo);
__ cmp(k_RInfo, klass_RInfo);
__ brx(Assembler::notEqual, false, Assembler::pt, *failure_target);
__ delayed()->nop();
} else {
bool need_slow_path = true;
if (k->is_loaded()) {
if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
need_slow_path = false;
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
(need_slow_path ? success_target : NULL),
failure_target, NULL,
RegisterOrConstant(k->super_check_offset()));
} else {
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target,
failure_target, NULL);
}
if (need_slow_path) {
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
__ delayed()->nop();
__ cmp(G3, 0);
__ br(Assembler::equal, false, Assembler::pn, *failure_target);
__ delayed()->nop();
// Fall through to success case
}
}
if (op->should_profile()) {
Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
assert_different_registers(obj, mdo, recv, tmp1);
__ bind(profile_cast_success);
jobject2reg(md->constant_encoding(), mdo);
if (mdo_offset_bias > 0) {
__ set(mdo_offset_bias, tmp1);
__ add(mdo, tmp1, mdo);
}
load(Address(obj, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success);
// Jump over the failure case
__ ba(false, *success);
__ delayed()->nop();
// Cast failure case
__ bind(profile_cast_failure);
jobject2reg(md->constant_encoding(), mdo);
if (mdo_offset_bias > 0) {
__ set(mdo_offset_bias, tmp1);
__ add(mdo, tmp1, mdo);
}
Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
__ ld_ptr(data_addr, tmp1);
__ sub(tmp1, DataLayout::counter_increment, tmp1);
__ st_ptr(tmp1, data_addr);
__ ba(false, *failure);
__ delayed()->nop();
}
__ ba(false, *success);
__ delayed()->nop();
}
void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
LIR_Code code = op->code(); LIR_Code code = op->code();
if (code == lir_store_check) { if (code == lir_store_check) {
@ -2434,193 +2625,106 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
Register Rtmp1 = op->tmp3()->as_register(); Register Rtmp1 = op->tmp3()->as_register();
__ verify_oop(value); __ verify_oop(value);
CodeStub* stub = op->stub(); CodeStub* stub = op->stub();
Label done; // check if it needs to be profiled
__ cmp(value, 0); ciMethodData* md;
__ br(Assembler::equal, false, Assembler::pn, done); ciProfileData* data;
int mdo_offset_bias = 0;
if (op->should_profile()) {
ciMethod* method = op->profiled_method();
assert(method != NULL, "Should have method");
setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
}
Label profile_cast_success, profile_cast_failure, done;
Label *success_target = op->should_profile() ? &profile_cast_success : &done;
Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
if (op->should_profile()) {
Label not_null;
__ br_notnull(value, false, Assembler::pn, not_null);
__ delayed()->nop(); __ delayed()->nop();
Register mdo = k_RInfo;
Register data_val = Rtmp1;
jobject2reg(md->constant_encoding(), mdo);
if (mdo_offset_bias > 0) {
__ set(mdo_offset_bias, data_val);
__ add(mdo, data_val, mdo);
}
Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
__ ldub(flags_addr, data_val);
__ or3(data_val, BitData::null_seen_byte_constant(), data_val);
__ stb(data_val, flags_addr);
__ ba(false, done);
__ delayed()->nop();
__ bind(not_null);
} else {
__ br_null(value, false, Assembler::pn, done);
__ delayed()->nop();
}
load(array, oopDesc::klass_offset_in_bytes(), k_RInfo, T_OBJECT, op->info_for_exception()); load(array, oopDesc::klass_offset_in_bytes(), k_RInfo, T_OBJECT, op->info_for_exception());
load(value, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL); load(value, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
// get instance klass // get instance klass
load(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc), k_RInfo, T_OBJECT, NULL); load(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc), k_RInfo, T_OBJECT, NULL);
// perform the fast part of the checking logic // perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, &done, stub->entry(), NULL); __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL);
// call out-of-line instance of __ check_klass_subtype_slow_path(...): // call out-of-line instance of __ check_klass_subtype_slow_path(...):
assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
__ delayed()->nop(); __ delayed()->nop();
__ cmp(G3, 0); __ cmp(G3, 0);
__ br(Assembler::equal, false, Assembler::pn, *stub->entry()); __ br(Assembler::equal, false, Assembler::pn, *failure_target);
__ delayed()->nop(); __ delayed()->nop();
__ bind(done); // fall through to the success case
} else if (op->code() == lir_checkcast) {
// we always need a stub for the failure case.
CodeStub* stub = op->stub();
Register obj = op->object()->as_register();
Register k_RInfo = op->tmp1()->as_register();
Register klass_RInfo = op->tmp2()->as_register();
Register dst = op->result_opr()->as_register();
Register Rtmp1 = op->tmp3()->as_register();
ciKlass* k = op->klass();
if (obj == k_RInfo) { if (op->should_profile()) {
k_RInfo = klass_RInfo; Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
klass_RInfo = obj; assert_different_registers(value, mdo, recv, tmp1);
} __ bind(profile_cast_success);
if (op->profiled_method() != NULL) {
ciMethod* method = op->profiled_method();
int bci = op->profiled_bci();
// We need two temporaries to perform this operation on SPARC,
// so to keep things simple we perform a redundant test here
Label profile_done;
__ cmp(obj, 0);
__ br(Assembler::notEqual, false, Assembler::pn, profile_done);
__ delayed()->nop();
// Object is null; update methodDataOop
ciMethodData* md = method->method_data();
if (md == NULL) {
bailout("out of memory building methodDataOop");
return;
}
ciProfileData* data = md->bci_to_data(bci);
assert(data != NULL, "need data for checkcast");
assert(data->is_BitData(), "need BitData for checkcast");
Register mdo = k_RInfo;
Register data_val = Rtmp1;
jobject2reg(md->constant_encoding(), mdo); jobject2reg(md->constant_encoding(), mdo);
if (mdo_offset_bias > 0) {
int mdo_offset_bias = 0; __ set(mdo_offset_bias, tmp1);
if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) { __ add(mdo, tmp1, mdo);
// The offset is large so bias the mdo by the base of the slot so
// that the ld can use simm13s to reference the slots of the data
mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
__ set(mdo_offset_bias, data_val);
__ add(mdo, data_val, mdo);
} }
load(Address(value, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done);
Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); __ ba(false, done);
__ ldub(flags_addr, data_val);
__ or3(data_val, BitData::null_seen_byte_constant(), data_val);
__ stb(data_val, flags_addr);
__ bind(profile_done);
}
Label done;
// patching may screw with our temporaries on sparc,
// so let's do it before loading the class
if (k->is_loaded()) {
jobject2reg(k->constant_encoding(), k_RInfo);
} else {
jobject2reg_with_patching(k_RInfo, op->info_for_patch());
}
assert(obj != k_RInfo, "must be different");
__ cmp(obj, 0);
__ br(Assembler::equal, false, Assembler::pn, done);
__ delayed()->nop(); __ delayed()->nop();
// Cast failure case
// get object class __ bind(profile_cast_failure);
// not a safepoint as obj null check happens earlier jobject2reg(md->constant_encoding(), mdo);
load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL); if (mdo_offset_bias > 0) {
if (op->fast_check()) { __ set(mdo_offset_bias, tmp1);
assert_different_registers(klass_RInfo, k_RInfo); __ add(mdo, tmp1, mdo);
__ cmp(k_RInfo, klass_RInfo);
__ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
__ delayed()->nop();
__ bind(done);
} else {
bool need_slow_path = true;
if (k->is_loaded()) {
if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
need_slow_path = false;
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
(need_slow_path ? &done : NULL),
stub->entry(), NULL,
RegisterOrConstant(k->super_check_offset()));
} else {
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7,
&done, stub->entry(), NULL);
} }
if (need_slow_path) { Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
// call out-of-line instance of __ check_klass_subtype_slow_path(...): __ ld_ptr(data_addr, tmp1);
assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); __ sub(tmp1, DataLayout::counter_increment, tmp1);
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); __ st_ptr(tmp1, data_addr);
__ delayed()->nop(); __ ba(false, *stub->entry());
__ cmp(G3, 0);
__ br(Assembler::equal, false, Assembler::pn, *stub->entry());
__ delayed()->nop(); __ delayed()->nop();
} }
__ bind(done); __ bind(done);
} } else if (code == lir_checkcast) {
Register obj = op->object()->as_register();
Register dst = op->result_opr()->as_register();
Label success;
emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
__ bind(success);
__ mov(obj, dst); __ mov(obj, dst);
} else if (code == lir_instanceof) { } else if (code == lir_instanceof) {
Register obj = op->object()->as_register(); Register obj = op->object()->as_register();
Register k_RInfo = op->tmp1()->as_register();
Register klass_RInfo = op->tmp2()->as_register();
Register dst = op->result_opr()->as_register(); Register dst = op->result_opr()->as_register();
Register Rtmp1 = op->tmp3()->as_register(); Label success, failure, done;
ciKlass* k = op->klass(); emit_typecheck_helper(op, &success, &failure, &failure);
__ bind(failure);
Label done;
if (obj == k_RInfo) {
k_RInfo = klass_RInfo;
klass_RInfo = obj;
}
// patching may screw with our temporaries on sparc,
// so let's do it before loading the class
if (k->is_loaded()) {
jobject2reg(k->constant_encoding(), k_RInfo);
} else {
jobject2reg_with_patching(k_RInfo, op->info_for_patch());
}
assert(obj != k_RInfo, "must be different");
__ cmp(obj, 0);
__ br(Assembler::equal, true, Assembler::pn, done);
__ delayed()->set(0, dst);
// get object class
// not a safepoint as obj null check happens earlier
load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
if (op->fast_check()) {
__ cmp(k_RInfo, klass_RInfo);
__ br(Assembler::equal, true, Assembler::pt, done);
__ delayed()->set(1, dst);
__ set(0, dst); __ set(0, dst);
__ bind(done); __ ba(false, done);
} else {
bool need_slow_path = true;
if (k->is_loaded()) {
if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
need_slow_path = false;
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, O7, noreg,
(need_slow_path ? &done : NULL),
(need_slow_path ? &done : NULL), NULL,
RegisterOrConstant(k->super_check_offset()),
dst);
} else {
assert(dst != klass_RInfo && dst != k_RInfo, "need 3 registers");
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, O7, dst,
&done, &done, NULL,
RegisterOrConstant(-1),
dst);
}
if (need_slow_path) {
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
__ delayed()->nop(); __ delayed()->nop();
__ mov(G3, dst); __ bind(success);
} __ set(1, dst);
__ bind(done); __ bind(done);
}
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
} }
@ -2776,9 +2880,14 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
ciProfileData* data = md->bci_to_data(bci); ciProfileData* data = md->bci_to_data(bci);
assert(data->is_CounterData(), "need CounterData for calls"); assert(data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
Register mdo = op->mdo()->as_register(); Register mdo = op->mdo()->as_register();
#ifdef _LP64
assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
Register tmp1 = op->tmp1()->as_register_lo();
#else
assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
Register tmp1 = op->tmp1()->as_register(); Register tmp1 = op->tmp1()->as_register();
#endif
jobject2reg(md->constant_encoding(), mdo); jobject2reg(md->constant_encoding(), mdo);
int mdo_offset_bias = 0; int mdo_offset_bias = 0;
if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) + if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
@ -2795,13 +2904,13 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
// Perform additional virtual call profiling for invokevirtual and // Perform additional virtual call profiling for invokevirtual and
// invokeinterface bytecodes // invokeinterface bytecodes
if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
Tier1ProfileVirtualCalls) { C1ProfileVirtualCalls) {
assert(op->recv()->is_single_cpu(), "recv must be allocated"); assert(op->recv()->is_single_cpu(), "recv must be allocated");
Register recv = op->recv()->as_register(); Register recv = op->recv()->as_register();
assert_different_registers(mdo, tmp1, recv); assert_different_registers(mdo, tmp1, recv);
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
ciKlass* known_klass = op->known_holder(); ciKlass* known_klass = op->known_holder();
if (Tier1OptimizeVirtualCallProfiling && known_klass != NULL) { if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
// We know the type that will be seen at this call site; we can // We know the type that will be seen at this call site; we can
// statically update the methodDataOop rather than needing to do // statically update the methodDataOop rather than needing to do
// dynamic tests on the receiver type // dynamic tests on the receiver type
@ -2816,9 +2925,9 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
Address data_addr(mdo, md->byte_offset_of_slot(data, Address data_addr(mdo, md->byte_offset_of_slot(data,
VirtualCallData::receiver_count_offset(i)) - VirtualCallData::receiver_count_offset(i)) -
mdo_offset_bias); mdo_offset_bias);
__ lduw(data_addr, tmp1); __ ld_ptr(data_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1); __ add(tmp1, DataLayout::counter_increment, tmp1);
__ stw(tmp1, data_addr); __ st_ptr(tmp1, data_addr);
return; return;
} }
} }
@ -2837,70 +2946,32 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
__ st_ptr(tmp1, recv_addr); __ st_ptr(tmp1, recv_addr);
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
mdo_offset_bias); mdo_offset_bias);
__ lduw(data_addr, tmp1); __ ld_ptr(data_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1); __ add(tmp1, DataLayout::counter_increment, tmp1);
__ stw(tmp1, data_addr); __ st_ptr(tmp1, data_addr);
return; return;
} }
} }
} else { } else {
load(Address(recv, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT); load(Address(recv, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
Label update_done; Label update_done;
uint i; type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test;
// See if the receiver is receiver[n].
Address receiver_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
mdo_offset_bias);
__ ld_ptr(receiver_addr, tmp1);
__ verify_oop(tmp1);
__ cmp(recv, tmp1);
__ brx(Assembler::notEqual, false, Assembler::pt, next_test);
__ delayed()->nop();
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
mdo_offset_bias);
__ lduw(data_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1);
__ stw(tmp1, data_addr);
__ br(Assembler::always, false, Assembler::pt, update_done);
__ delayed()->nop();
__ bind(next_test);
}
// Didn't find receiver; find next empty slot and fill it in
for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test;
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
mdo_offset_bias);
load(recv_addr, tmp1, T_OBJECT);
__ tst(tmp1);
__ brx(Assembler::notEqual, false, Assembler::pt, next_test);
__ delayed()->nop();
__ st_ptr(recv, recv_addr);
__ set(DataLayout::counter_increment, tmp1);
__ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
mdo_offset_bias);
__ br(Assembler::always, false, Assembler::pt, update_done);
__ delayed()->nop();
__ bind(next_test);
}
// Receiver did not match any saved receiver and there is no empty row for it. // Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case. // Increment total counter to indicate polymorphic case.
__ lduw(counter_addr, tmp1); __ ld_ptr(counter_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1); __ add(tmp1, DataLayout::counter_increment, tmp1);
__ stw(tmp1, counter_addr); __ st_ptr(tmp1, counter_addr);
__ bind(update_done); __ bind(update_done);
} }
} else { } else {
// Static call // Static call
__ lduw(counter_addr, tmp1); __ ld_ptr(counter_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1); __ add(tmp1, DataLayout::counter_increment, tmp1);
__ stw(tmp1, counter_addr); __ st_ptr(tmp1, counter_addr);
} }
} }
void LIR_Assembler::align_backward_branch_target() { void LIR_Assembler::align_backward_branch_target() {
__ align(OptoLoopAlignment); __ align(OptoLoopAlignment);
} }
@ -3093,31 +3164,36 @@ void LIR_Assembler::membar_release() {
// no-op on TSO // no-op on TSO
} }
// Macro to Pack two sequential registers containing 32 bit values // Pack two sequential registers containing 32 bit values
// into a single 64 bit register. // into a single 64 bit register.
// rs and rs->successor() are packed into rd // src and src->successor() are packed into dst
// rd and rs may be the same register. // src and dst may be the same register.
// Note: rs and rs->successor() are destroyed. // Note: src is destroyed
void LIR_Assembler::pack64( Register rs, Register rd ) { void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) {
Register rs = src->as_register();
Register rd = dst->as_register_lo();
__ sllx(rs, 32, rs); __ sllx(rs, 32, rs);
__ srl(rs->successor(), 0, rs->successor()); __ srl(rs->successor(), 0, rs->successor());
__ or3(rs, rs->successor(), rd); __ or3(rs, rs->successor(), rd);
} }
// Macro to unpack a 64 bit value in a register into // Unpack a 64 bit value in a register into
// two sequential registers. // two sequential registers.
// rd is unpacked into rd and rd->successor() // src is unpacked into dst and dst->successor()
void LIR_Assembler::unpack64( Register rd ) { void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) {
__ mov(rd, rd->successor()); Register rs = src->as_register_lo();
__ srax(rd, 32, rd); Register rd = dst->as_register_hi();
__ sra(rd->successor(), 0, rd->successor()); assert_different_registers(rs, rd, rd->successor());
__ srlx(rs, 32, rd);
__ srl (rs, 0, rd->successor());
} }
void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
LIR_Address* addr = addr_opr->as_address_ptr(); LIR_Address* addr = addr_opr->as_address_ptr();
assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet"); assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet");
__ add(addr->base()->as_register(), addr->disp(), dest->as_register());
__ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register());
} }
@ -3188,11 +3264,36 @@ void LIR_Assembler::peephole(LIR_List* lir) {
tty->cr(); tty->cr();
} }
#endif #endif
continue; } else {
}
LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info()); LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
inst->insert_before(i + 1, delay_op); inst->insert_before(i + 1, delay_op);
i++;
}
#if defined(TIERED) && !defined(_LP64)
// fixup the return value from G1 to O0/O1 for long returns.
// It's done here instead of in LIRGenerator because there's
// such a mismatch between the single reg and double reg
// calling convention.
LIR_OpJavaCall* callop = op->as_OpJavaCall();
if (callop->result_opr() == FrameMap::out_long_opr) {
LIR_OpJavaCall* call;
LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length());
for (int a = 0; a < arguments->length(); a++) {
arguments[a] = callop->arguments()[a];
}
if (op->code() == lir_virtual_call) {
call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
callop->vtable_offset(), arguments, callop->info());
} else {
call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
callop->addr(), arguments, callop->info());
}
inst->at_put(i - 1, call);
inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(),
T_LONG, lir_patch_none, NULL));
}
#endif
break; break;
} }
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -71,9 +71,16 @@
static bool is_single_instruction(LIR_Op* op); static bool is_single_instruction(LIR_Op* op);
// Record the type of the receiver in ReceiverTypeData
void type_profile_helper(Register mdo, int mdo_offset_bias,
ciMethodData *md, ciProfileData *data,
Register recv, Register tmp1, Label* update_done);
// Setup pointers to MDO, MDO slot, also compute offset bias to access the slot.
void setup_md_access(ciMethod* method, int bci,
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias);
public: public:
void pack64( Register rs, Register rd ); void pack64(LIR_Opr src, LIR_Opr dst);
void unpack64( Register rd ); void unpack64(LIR_Opr src, LIR_Opr dst);
enum { enum {
#ifdef _LP64 #ifdef _LP64

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -227,29 +227,37 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
} }
} }
LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
LIR_Opr r;
if (type == T_LONG) {
r = LIR_OprFact::longConst(x);
} else if (type == T_INT) {
r = LIR_OprFact::intConst(x);
} else {
ShouldNotReachHere();
}
if (!Assembler::is_simm13(x)) {
LIR_Opr tmp = new_register(type);
__ move(r, tmp);
return tmp;
}
return r;
}
void LIRGenerator::increment_counter(address counter, int step) { void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
LIR_Opr pointer = new_pointer_register(); LIR_Opr pointer = new_pointer_register();
__ move(LIR_OprFact::intptrConst(counter), pointer); __ move(LIR_OprFact::intptrConst(counter), pointer);
LIR_Address* addr = new LIR_Address(pointer, T_INT); LIR_Address* addr = new LIR_Address(pointer, type);
increment_counter(addr, step); increment_counter(addr, step);
} }
void LIRGenerator::increment_counter(LIR_Address* addr, int step) { void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
LIR_Opr temp = new_register(T_INT); LIR_Opr temp = new_register(addr->type());
__ move(addr, temp); __ move(addr, temp);
LIR_Opr c = LIR_OprFact::intConst(step); __ add(temp, load_immediate(step, addr->type()), temp);
if (Assembler::is_simm13(step)) {
__ add(temp, c, temp);
} else {
LIR_Opr temp2 = new_register(T_INT);
__ move(c, temp2);
__ add(temp, temp2, temp);
}
__ move(temp, addr); __ move(temp, addr);
} }
void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
LIR_Opr o7opr = FrameMap::O7_opr; LIR_Opr o7opr = FrameMap::O7_opr;
__ load(new LIR_Address(base, disp, T_INT), o7opr, info); __ load(new LIR_Address(base, disp, T_INT), o7opr, info);
@ -611,7 +619,6 @@ void LIRGenerator::do_CompareOp(CompareOp* x) {
left.load_item(); left.load_item();
right.load_item(); right.load_item();
LIR_Opr reg = rlock_result(x); LIR_Opr reg = rlock_result(x);
if (x->x()->type()->is_float_kind()) { if (x->x()->type()->is_float_kind()) {
Bytecodes::Code code = x->op(); Bytecodes::Code code = x->op();
__ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
@ -1040,7 +1047,9 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) {
LIR_Opr tmp1 = FrameMap::G1_oop_opr; LIR_Opr tmp1 = FrameMap::G1_oop_opr;
LIR_Opr tmp2 = FrameMap::G3_oop_opr; LIR_Opr tmp2 = FrameMap::G3_oop_opr;
LIR_Opr tmp3 = FrameMap::G4_oop_opr; LIR_Opr tmp3 = FrameMap::G4_oop_opr;
__ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3, x->direct_compare(), patching_info); __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
x->direct_compare(), patching_info,
x->profiled_method(), x->profiled_bci());
} }
@ -1089,12 +1098,12 @@ void LIRGenerator::do_If(If* x) {
// add safepoint before generating condition code so it can be recomputed // add safepoint before generating condition code so it can be recomputed
if (x->is_safepoint()) { if (x->is_safepoint()) {
// increment backedge counter if needed // increment backedge counter if needed
increment_backedge_counter(state_for(x, x->state_before())); increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
__ safepoint(new_register(T_INT), state_for(x, x->state_before())); __ safepoint(new_register(T_INT), state_for(x, x->state_before()));
} }
__ cmp(lir_cond(cond), left, right); __ cmp(lir_cond(cond), left, right);
// Generate branch profiling. Profiling code doesn't kill flags.
profile_branch(x, cond); profile_branch(x, cond);
move_to_phi(x->state()); move_to_phi(x->state());
if (x->x()->type()->is_float_kind()) { if (x->x()->type()->is_float_kind()) {

View file

@ -465,12 +465,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
break; break;
#ifdef TIERED
case counter_overflow_id: case counter_overflow_id:
// G4 contains bci // G4 contains bci, G5 contains method
oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4); oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4, G5);
break; break;
#endif // TIERED
case new_type_array_id: case new_type_array_id:
case new_object_array_id: case new_object_array_id:

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -34,14 +34,7 @@ define_pd_global(bool, ProfileTraps, false);
define_pd_global(bool, UseOnStackReplacement, true ); define_pd_global(bool, UseOnStackReplacement, true );
define_pd_global(bool, TieredCompilation, false); define_pd_global(bool, TieredCompilation, false);
define_pd_global(intx, CompileThreshold, 1000 ); // Design center runs on 1.3.1 define_pd_global(intx, CompileThreshold, 1000 ); // Design center runs on 1.3.1
define_pd_global(intx, Tier2CompileThreshold, 1500 );
define_pd_global(intx, Tier3CompileThreshold, 2000 );
define_pd_global(intx, Tier4CompileThreshold, 2500 );
define_pd_global(intx, BackEdgeThreshold, 100000); define_pd_global(intx, BackEdgeThreshold, 100000);
define_pd_global(intx, Tier2BackEdgeThreshold, 100000);
define_pd_global(intx, Tier3BackEdgeThreshold, 100000);
define_pd_global(intx, Tier4BackEdgeThreshold, 100000);
define_pd_global(intx, OnStackReplacePercentage, 1400 ); define_pd_global(intx, OnStackReplacePercentage, 1400 );
define_pd_global(bool, UseTLAB, true ); define_pd_global(bool, UseTLAB, true );

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -37,21 +37,8 @@ define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(bool, ProfileInterpreter, true); define_pd_global(bool, ProfileInterpreter, true);
#endif // CC_INTERP #endif // CC_INTERP
define_pd_global(bool, TieredCompilation, false); define_pd_global(bool, TieredCompilation, false);
#ifdef TIERED
define_pd_global(intx, CompileThreshold, 1000);
define_pd_global(intx, BackEdgeThreshold, 14000);
#else
define_pd_global(intx, CompileThreshold, 10000); define_pd_global(intx, CompileThreshold, 10000);
define_pd_global(intx, BackEdgeThreshold, 140000); define_pd_global(intx, BackEdgeThreshold, 140000);
#endif // TIERED
define_pd_global(intx, Tier2CompileThreshold, 10000); // unused level
define_pd_global(intx, Tier3CompileThreshold, 10000);
define_pd_global(intx, Tier4CompileThreshold, 40000);
define_pd_global(intx, Tier2BackEdgeThreshold, 100000);
define_pd_global(intx, Tier3BackEdgeThreshold, 100000);
define_pd_global(intx, Tier4BackEdgeThreshold, 100000);
define_pd_global(intx, OnStackReplacePercentage, 140); define_pd_global(intx, OnStackReplacePercentage, 140);
define_pd_global(intx, ConditionalMoveLimit, 4); define_pd_global(intx, ConditionalMoveLimit, 4);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -263,8 +263,7 @@
}; };
private: private:
constantPoolCacheOop* interpreter_frame_cpoolcache_addr() const;
constantPoolCacheOop* frame::interpreter_frame_cpoolcache_addr() const;
#ifndef CC_INTERP #ifndef CC_INTERP

View file

@ -2431,3 +2431,20 @@ void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_na
} }
#endif // CC_INTERP #endif // CC_INTERP
} }
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
int increment, int mask,
Register scratch1, Register scratch2,
Condition cond, Label *where) {
ld(counter_addr, scratch1);
add(scratch1, increment, scratch1);
if (is_simm13(mask)) {
andcc(scratch1, mask, G0);
} else {
set(mask, scratch2);
andcc(scratch1, scratch2, G0);
}
br(cond, false, Assembler::pn, *where);
delayed()->st(scratch1, counter_addr);
}

View file

@ -278,6 +278,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
void increment_mdp_data_at(Register reg, int constant, void increment_mdp_data_at(Register reg, int constant,
Register bumped_count, Register scratch2, Register bumped_count, Register scratch2,
bool decrement = false); bool decrement = false);
void increment_mask_and_jump(Address counter_addr,
int increment, int mask,
Register scratch1, Register scratch2,
Condition cond, Label *where);
void set_mdp_flag_at(int flag_constant, Register scratch); void set_mdp_flag_at(int flag_constant, Register scratch);
void test_mdp_data_at(int offset, Register value, Label& not_equal_continue, void test_mdp_data_at(int offset, Register value, Label& not_equal_continue,
Register scratch); Register scratch);
@ -321,4 +325,5 @@ class InterpreterMacroAssembler: public MacroAssembler {
void save_return_value(TosState state, bool is_native_call); void save_return_value(TosState state, bool is_native_call);
void restore_return_value(TosState state, bool is_native_call); void restore_return_value(TosState state, bool is_native_call);
}; };

View file

@ -3331,10 +3331,8 @@ void SharedRuntime::generate_deopt_blob() {
__ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr); __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
#if !defined(_LP64) #if !defined(_LP64)
#if defined(COMPILER2) #if defined(COMPILER2)
if (!TieredCompilation) {
// 32-bit 1-register longs return longs in G1 // 32-bit 1-register longs return longs in G1
__ stx(Greturn1, saved_Greturn1_addr); __ stx(Greturn1, saved_Greturn1_addr);
}
#endif #endif
__ set_last_Java_frame(SP, noreg); __ set_last_Java_frame(SP, noreg);
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode); __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
@ -3347,24 +3345,15 @@ void SharedRuntime::generate_deopt_blob() {
__ reset_last_Java_frame(); __ reset_last_Java_frame();
__ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0); __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
// In tiered we never use C2 to compile methods returning longs so
// the result is where we expect it already.
#if !defined(_LP64) && defined(COMPILER2) #if !defined(_LP64) && defined(COMPILER2)
// In 32 bit, C2 returns longs in G1 so restore the saved G1 into // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
// I0/I1 if the return value is long. In the tiered world there is // I0/I1 if the return value is long.
// a mismatch between how C1 and C2 return longs compiles and so
// currently compilation of methods which return longs is disabled
// for C2 and so is this code. Eventually C1 and C2 will do the
// same thing for longs in the tiered world.
if (!TieredCompilation) {
Label not_long; Label not_long;
__ cmp(O0,T_LONG); __ cmp(O0,T_LONG);
__ br(Assembler::notEqual, false, Assembler::pt, not_long); __ br(Assembler::notEqual, false, Assembler::pt, not_long);
__ delayed()->nop(); __ delayed()->nop();
__ ldd(saved_Greturn1_addr,I0); __ ldd(saved_Greturn1_addr,I0);
__ bind(not_long); __ bind(not_long);
}
#endif #endif
__ ret(); __ ret();
__ delayed()->restore(); __ delayed()->restore();

View file

@ -1609,7 +1609,7 @@ class StubGenerator: public StubCodeGenerator {
assert_clean_int(count, O3); // Make sure 'count' is clean int. assert_clean_int(count, O3); // Make sure 'count' is clean int.
Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte; Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
Label L_fill_2_bytes, L_fill_4_bytes, L_fill_32_bytes; Label L_fill_2_bytes, L_fill_elements, L_fill_32_bytes;
int shift = -1; int shift = -1;
switch (t) { switch (t) {
@ -1635,8 +1635,8 @@ class StubGenerator: public StubCodeGenerator {
} }
if (t == T_SHORT) { if (t == T_SHORT) {
// Zero extend value // Zero extend value
__ sethi(0xffff0000, O3); __ sllx(value, 48, value);
__ andn(value, O3, value); __ srlx(value, 48, value);
} }
if (t == T_BYTE || t == T_SHORT) { if (t == T_BYTE || t == T_SHORT) {
__ sllx(value, 16, O3); __ sllx(value, 16, O3);
@ -1644,8 +1644,8 @@ class StubGenerator: public StubCodeGenerator {
} }
__ cmp(count, 2<<shift); // Short arrays (< 8 bytes) fill by element __ cmp(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
__ brx(Assembler::lessUnsigned, false, Assembler::pn, L_fill_4_bytes); // use unsigned cmp __ brx(Assembler::lessUnsigned, false, Assembler::pn, L_fill_elements); // use unsigned cmp
__ delayed()->andcc(count, 1<<shift, G0); __ delayed()->andcc(count, 1, G0);
if (!aligned && (t == T_BYTE || t == T_SHORT)) { if (!aligned && (t == T_BYTE || t == T_SHORT)) {
// align source address at 4 bytes address boundary // align source address at 4 bytes address boundary
@ -1683,12 +1683,6 @@ class StubGenerator: public StubCodeGenerator {
} }
#endif #endif
Label L_check_fill_8_bytes;
// Fill 32-byte chunks
__ subcc(count, 8 << shift, count);
__ brx(Assembler::less, false, Assembler::pt, L_check_fill_8_bytes);
__ delayed()->nop();
if (t == T_INT) { if (t == T_INT) {
// Zero extend value // Zero extend value
__ srl(value, 0, value); __ srl(value, 0, value);
@ -1698,7 +1692,13 @@ class StubGenerator: public StubCodeGenerator {
__ or3(value, O3, value); __ or3(value, O3, value);
} }
Label L_fill_32_bytes_loop; Label L_check_fill_8_bytes;
// Fill 32-byte chunks
__ subcc(count, 8 << shift, count);
__ brx(Assembler::less, false, Assembler::pt, L_check_fill_8_bytes);
__ delayed()->nop();
Label L_fill_32_bytes_loop, L_fill_4_bytes;
__ align(16); __ align(16);
__ BIND(L_fill_32_bytes_loop); __ BIND(L_fill_32_bytes_loop);
@ -1730,6 +1730,9 @@ class StubGenerator: public StubCodeGenerator {
// fill trailing 4 bytes // fill trailing 4 bytes
__ andcc(count, 1<<shift, G0); // in delay slot of branches __ andcc(count, 1<<shift, G0); // in delay slot of branches
if (t == T_INT) {
__ BIND(L_fill_elements);
}
__ BIND(L_fill_4_bytes); __ BIND(L_fill_4_bytes);
__ brx(Assembler::zero, false, Assembler::pt, L_fill_2_bytes); __ brx(Assembler::zero, false, Assembler::pt, L_fill_2_bytes);
if (t == T_BYTE || t == T_SHORT) { if (t == T_BYTE || t == T_SHORT) {
@ -1762,7 +1765,48 @@ class StubGenerator: public StubCodeGenerator {
} }
__ BIND(L_exit); __ BIND(L_exit);
__ retl(); __ retl();
__ delayed()->mov(G0, O0); // return 0 __ delayed()->nop();
// Handle copies less than 8 bytes. Int is handled elsewhere.
if (t == T_BYTE) {
__ BIND(L_fill_elements);
Label L_fill_2, L_fill_4;
// in delay slot __ andcc(count, 1, G0);
__ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
__ delayed()->andcc(count, 2, G0);
__ stb(value, to, 0);
__ inc(to, 1);
__ BIND(L_fill_2);
__ brx(Assembler::zero, false, Assembler::pt, L_fill_4);
__ delayed()->andcc(count, 4, G0);
__ stb(value, to, 0);
__ stb(value, to, 1);
__ inc(to, 2);
__ BIND(L_fill_4);
__ brx(Assembler::zero, false, Assembler::pt, L_exit);
__ delayed()->nop();
__ stb(value, to, 0);
__ stb(value, to, 1);
__ stb(value, to, 2);
__ retl();
__ delayed()->stb(value, to, 3);
}
if (t == T_SHORT) {
Label L_fill_2;
__ BIND(L_fill_elements);
// in delay slot __ andcc(count, 1, G0);
__ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
__ delayed()->andcc(count, 2, G0);
__ sth(value, to, 0);
__ inc(to, 2);
__ BIND(L_fill_2);
__ brx(Assembler::zero, false, Assembler::pt, L_exit);
__ delayed()->nop();
__ sth(value, to, 0);
__ retl();
__ delayed()->sth(value, to, 2);
}
return start; return start;
} }

View file

@ -43,7 +43,7 @@ enum /* platform_dependent_constants */ {
// MethodHandles adapters // MethodHandles adapters
enum method_handles_platform_dependent_constants { enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 6000 method_handles_adapters_code_size = 12000
}; };
class Sparc { class Sparc {

View file

@ -294,10 +294,41 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state)
// ??: invocation counter // ??: invocation counter
// //
void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
// Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
if (TieredCompilation) {
const int increment = InvocationCounter::count_increment;
const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
Label no_mdo, done;
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
__ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch);
__ br_null(G4_scratch, false, Assembler::pn, no_mdo);
__ delayed()->nop();
// Increment counter
Address mdo_invocation_counter(G4_scratch,
in_bytes(methodDataOopDesc::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
G3_scratch, Lscratch,
Assembler::zero, overflow);
__ ba(false, done);
__ delayed()->nop();
}
// Increment counter in methodOop
__ bind(no_mdo);
Address invocation_counter(Lmethod,
in_bytes(methodOopDesc::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(invocation_counter, increment, mask,
G3_scratch, Lscratch,
Assembler::zero, overflow);
__ bind(done);
} else {
// Update standard invocation counters // Update standard invocation counters
__ increment_invocation_counter(O0, G3_scratch); __ increment_invocation_counter(O0, G3_scratch);
if (ProfileInterpreter) { // %%% Merge this into methodDataOop if (ProfileInterpreter) { // %%% Merge this into methodDataOop
Address interpreter_invocation_counter(Lmethod, methodOopDesc::interpreter_invocation_counter_offset()); Address interpreter_invocation_counter(Lmethod,in_bytes(methodOopDesc::interpreter_invocation_counter_offset()));
__ ld(interpreter_invocation_counter, G3_scratch); __ ld(interpreter_invocation_counter, G3_scratch);
__ inc(G3_scratch); __ inc(G3_scratch);
__ st(G3_scratch, interpreter_invocation_counter); __ st(G3_scratch, interpreter_invocation_counter);
@ -305,9 +336,8 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
if (ProfileInterpreter && profile_method != NULL) { if (ProfileInterpreter && profile_method != NULL) {
// Test to see if we should create a method data oop // Test to see if we should create a method data oop
AddressLiteral profile_limit(&InvocationCounter::InterpreterProfileLimit); AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit);
__ sethi(profile_limit, G3_scratch); __ load_contents(profile_limit, G3_scratch);
__ ld(G3_scratch, profile_limit.low10(), G3_scratch);
__ cmp(O0, G3_scratch); __ cmp(O0, G3_scratch);
__ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue); __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue);
__ delayed()->nop(); __ delayed()->nop();
@ -316,12 +346,12 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
__ test_method_data_pointer(*profile_method); __ test_method_data_pointer(*profile_method);
} }
AddressLiteral invocation_limit(&InvocationCounter::InterpreterInvocationLimit); AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
__ sethi(invocation_limit, G3_scratch); __ load_contents(invocation_limit, G3_scratch);
__ ld(G3_scratch, invocation_limit.low10(), G3_scratch);
__ cmp(O0, G3_scratch); __ cmp(O0, G3_scratch);
__ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
__ delayed()->nop(); __ delayed()->nop();
}
} }

View file

@ -1580,6 +1580,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
const Register O0_cur_bcp = O0; const Register O0_cur_bcp = O0;
__ mov( Lbcp, O0_cur_bcp ); __ mov( Lbcp, O0_cur_bcp );
bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
if ( increment_invocation_counter_for_backward_branches ) { if ( increment_invocation_counter_for_backward_branches ) {
Label Lforward; Label Lforward;
@ -1588,6 +1589,72 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// Bump bytecode pointer by displacement (take the branch) // Bump bytecode pointer by displacement (take the branch)
__ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
if (TieredCompilation) {
Label Lno_mdo, Loverflow;
int increment = InvocationCounter::count_increment;
int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
if (ProfileInterpreter) {
// If no method data exists, go to profile_continue.
__ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch);
__ br_null(G4_scratch, false, Assembler::pn, Lno_mdo);
__ delayed()->nop();
// Increment backedge counter in the MDO
Address mdo_backedge_counter(G4_scratch, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, Lscratch,
Assembler::notZero, &Lforward);
__ ba(false, Loverflow);
__ delayed()->nop();
}
// If there's no MDO, increment counter in methodOop
__ bind(Lno_mdo);
Address backedge_counter(Lmethod, in_bytes(methodOopDesc::backedge_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(backedge_counter, increment, mask, G3_scratch, Lscratch,
Assembler::notZero, &Lforward);
__ bind(Loverflow);
// notify point for loop, pass branch bytecode
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O0_cur_bcp);
// Was an OSR adapter generated?
// O0 = osr nmethod
__ br_null(O0, false, Assembler::pn, Lforward);
__ delayed()->nop();
// Has the nmethod been invalidated already?
__ ld(O0, nmethod::entry_bci_offset(), O2);
__ cmp(O2, InvalidOSREntryBci);
__ br(Assembler::equal, false, Assembler::pn, Lforward);
__ delayed()->nop();
// migrate the interpreter frame off of the stack
__ mov(G2_thread, L7);
// save nmethod
__ mov(O0, L6);
__ set_last_Java_frame(SP, noreg);
__ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
__ reset_last_Java_frame();
__ mov(L7, G2_thread);
// move OSR nmethod to I1
__ mov(L6, I1);
// OSR buffer to I0
__ mov(O0, I0);
// remove the interpreter frame
__ restore(I5_savedSP, 0, SP);
// Jump to the osr code.
__ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
__ jmp(O2, G0);
__ delayed()->nop();
} else {
// Update Backedge branch separately from invocations // Update Backedge branch separately from invocations
const Register G4_invoke_ctr = G4; const Register G4_invoke_ctr = G4;
__ increment_backedge_counter(G4_invoke_ctr, G1_scratch); __ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
@ -1601,6 +1668,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch); __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch);
} }
} }
}
__ bind(Lforward); __ bind(Lforward);
} else } else

View file

@ -4993,19 +4993,22 @@ void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rd
ttyLocker ttyl; ttyLocker ttyl;
tty->print_cr("eip = 0x%08x", eip); tty->print_cr("eip = 0x%08x", eip);
#ifndef PRODUCT #ifndef PRODUCT
if ((WizardMode || Verbose) && PrintMiscellaneous) {
tty->cr(); tty->cr();
findpc(eip); findpc(eip);
tty->cr(); tty->cr();
}
#endif #endif
tty->print_cr("rax, = 0x%08x", rax); tty->print_cr("rax = 0x%08x", rax);
tty->print_cr("rbx, = 0x%08x", rbx); tty->print_cr("rbx = 0x%08x", rbx);
tty->print_cr("rcx = 0x%08x", rcx); tty->print_cr("rcx = 0x%08x", rcx);
tty->print_cr("rdx = 0x%08x", rdx); tty->print_cr("rdx = 0x%08x", rdx);
tty->print_cr("rdi = 0x%08x", rdi); tty->print_cr("rdi = 0x%08x", rdi);
tty->print_cr("rsi = 0x%08x", rsi); tty->print_cr("rsi = 0x%08x", rsi);
tty->print_cr("rbp, = 0x%08x", rbp); tty->print_cr("rbp = 0x%08x", rbp);
tty->print_cr("rsp = 0x%08x", rsp); tty->print_cr("rsp = 0x%08x", rsp);
BREAKPOINT; BREAKPOINT;
assert(false, "start up GDB");
} }
} else { } else {
ttyLocker ttyl; ttyLocker ttyl;
@ -7677,11 +7680,19 @@ RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_ad
movptr(tmp, ExternalAddress((address) delayed_value_addr)); movptr(tmp, ExternalAddress((address) delayed_value_addr));
#ifdef ASSERT #ifdef ASSERT
Label L; { Label L;
testptr(tmp, tmp); testptr(tmp, tmp);
if (WizardMode) {
jcc(Assembler::notZero, L);
char* buf = new char[40];
sprintf(buf, "DelayedValue="INTPTR_FORMAT, delayed_value_addr[1]);
stop(buf);
} else {
jccb(Assembler::notZero, L); jccb(Assembler::notZero, L);
hlt(); hlt();
}
bind(L); bind(L);
}
#endif #endif
if (offset != 0) if (offset != 0)

View file

@ -68,19 +68,15 @@ void ConversionStub::emit_code(LIR_Assembler* ce) {
__ jmp(_continuation); __ jmp(_continuation);
} }
#ifdef TIERED
void CounterOverflowStub::emit_code(LIR_Assembler* ce) { void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry); __ bind(_entry);
ce->store_parameter(_method->as_register(), 1);
ce->store_parameter(_bci, 0); ce->store_parameter(_bci, 0);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id))); __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
ce->add_call_info_here(_info); ce->add_call_info_here(_info);
ce->verify_oop_map(_info); ce->verify_oop_map(_info);
__ jmp(_continuation); __ jmp(_continuation);
} }
#endif // TIERED
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
bool throw_index_out_of_bounds_exception) bool throw_index_out_of_bounds_exception)

View file

@ -1613,40 +1613,35 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
__ bind(*op->stub()->continuation()); __ bind(*op->stub()->continuation());
} }
void LIR_Assembler::type_profile_helper(Register mdo,
ciMethodData *md, ciProfileData *data,
Register recv, Label* update_done) {
uint i;
for (i = 0; i < ReceiverTypeData::row_limit(); i++) {
Label next_test;
// See if the receiver is receiver[n].
__ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
__ jccb(Assembler::notEqual, next_test);
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
__ addptr(data_addr, DataLayout::counter_increment);
__ jmp(*update_done);
__ bind(next_test);
}
// Didn't find receiver; find next empty slot and fill it in
for (i = 0; i < ReceiverTypeData::row_limit(); i++) {
Label next_test;
Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
__ cmpptr(recv_addr, (intptr_t)NULL_WORD);
__ jccb(Assembler::notEqual, next_test);
__ movptr(recv_addr, recv);
__ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment);
__ jmp(*update_done);
__ bind(next_test);
}
}
void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
LIR_Code code = op->code();
if (code == lir_store_check) {
Register value = op->object()->as_register();
Register array = op->array()->as_register();
Register k_RInfo = op->tmp1()->as_register();
Register klass_RInfo = op->tmp2()->as_register();
Register Rtmp1 = op->tmp3()->as_register();
CodeStub* stub = op->stub();
Label done;
__ cmpptr(value, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, done);
add_debug_info_for_null_check_here(op->info_for_exception());
__ movptr(k_RInfo, Address(array, oopDesc::klass_offset_in_bytes()));
__ movptr(klass_RInfo, Address(value, oopDesc::klass_offset_in_bytes()));
// get instance klass
__ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
// perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, &done, stub->entry(), NULL);
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
__ push(klass_RInfo);
__ push(k_RInfo);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
__ pop(klass_RInfo);
__ pop(k_RInfo);
// result is a boolean
__ cmpl(k_RInfo, 0);
__ jcc(Assembler::equal, *stub->entry());
__ bind(done);
} else if (op->code() == lir_checkcast) {
// we always need a stub for the failure case. // we always need a stub for the failure case.
CodeStub* stub = op->stub(); CodeStub* stub = op->stub();
Register obj = op->object()->as_register(); Register obj = op->object()->as_register();
@ -1656,7 +1651,27 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
ciKlass* k = op->klass(); ciKlass* k = op->klass();
Register Rtmp1 = noreg; Register Rtmp1 = noreg;
Label done; // check if it needs to be profiled
ciMethodData* md;
ciProfileData* data;
if (op->should_profile()) {
ciMethod* method = op->profiled_method();
assert(method != NULL, "Should have method");
int bci = op->profiled_bci();
md = method->method_data();
if (md == NULL) {
bailout("out of memory building methodDataOop");
return;
}
data = md->bci_to_data(bci);
assert(data != NULL, "need data for type check");
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
}
Label profile_cast_success, profile_cast_failure;
Label *success_target = op->should_profile() ? &profile_cast_success : success;
Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
if (obj == k_RInfo) { if (obj == k_RInfo) {
k_RInfo = dst; k_RInfo = dst;
} else if (obj == klass_RInfo) { } else if (obj == klass_RInfo) {
@ -1675,41 +1690,29 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
} else { } else {
#ifdef _LP64 #ifdef _LP64
__ movoop(k_RInfo, k->constant_encoding()); __ movoop(k_RInfo, k->constant_encoding());
#else
k_RInfo = noreg;
#endif // _LP64 #endif // _LP64
} }
assert(obj != k_RInfo, "must be different"); assert(obj != k_RInfo, "must be different");
__ cmpptr(obj, (int32_t)NULL_WORD);
if (op->profiled_method() != NULL) {
ciMethod* method = op->profiled_method();
int bci = op->profiled_bci();
Label profile_done; __ cmpptr(obj, (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, profile_done); if (op->should_profile()) {
// Object is null; update methodDataOop Label not_null;
ciMethodData* md = method->method_data(); __ jccb(Assembler::notEqual, not_null);
if (md == NULL) { // Object is null; update MDO and exit
bailout("out of memory building methodDataOop");
return;
}
ciProfileData* data = md->bci_to_data(bci);
assert(data != NULL, "need data for checkcast");
assert(data->is_BitData(), "need BitData for checkcast");
Register mdo = klass_RInfo; Register mdo = klass_RInfo;
__ movoop(mdo, md->constant_encoding()); __ movoop(mdo, md->constant_encoding());
Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
__ orl(data_addr, header_bits); __ orl(data_addr, header_bits);
__ jmp(done); __ jmp(*obj_is_null);
__ bind(profile_done); __ bind(not_null);
} else { } else {
__ jcc(Assembler::equal, done); __ jcc(Assembler::equal, *obj_is_null);
} }
__ verify_oop(obj); __ verify_oop(obj);
if (op->fast_check()) { if (op->fast_check()) {
// get object classo // get object class
// not a safepoint as obj null check happens earlier // not a safepoint as obj null check happens earlier
if (k->is_loaded()) { if (k->is_loaded()) {
#ifdef _LP64 #ifdef _LP64
@ -1719,10 +1722,9 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
#endif // _LP64 #endif // _LP64
} else { } else {
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
} }
__ jcc(Assembler::notEqual, *stub->entry()); __ jcc(Assembler::notEqual, *failure_target);
__ bind(done); // successful cast, fall through to profile or jump
} else { } else {
// get object class // get object class
// not a safepoint as obj null check happens earlier // not a safepoint as obj null check happens earlier
@ -1735,17 +1737,18 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding()); __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
#endif // _LP64 #endif // _LP64
if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) { if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
__ jcc(Assembler::notEqual, *stub->entry()); __ jcc(Assembler::notEqual, *failure_target);
// successful cast, fall through to profile or jump
} else { } else {
// See if we get an immediate positive hit // See if we get an immediate positive hit
__ jcc(Assembler::equal, done); __ jcc(Assembler::equal, *success_target);
// check for self // check for self
#ifdef _LP64 #ifdef _LP64
__ cmpptr(klass_RInfo, k_RInfo); __ cmpptr(klass_RInfo, k_RInfo);
#else #else
__ cmpoop(klass_RInfo, k->constant_encoding()); __ cmpoop(klass_RInfo, k->constant_encoding());
#endif // _LP64 #endif // _LP64
__ jcc(Assembler::equal, done); __ jcc(Assembler::equal, *success_target);
__ push(klass_RInfo); __ push(klass_RInfo);
#ifdef _LP64 #ifdef _LP64
@ -1758,12 +1761,12 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ pop(klass_RInfo); __ pop(klass_RInfo);
// result is a boolean // result is a boolean
__ cmpl(klass_RInfo, 0); __ cmpl(klass_RInfo, 0);
__ jcc(Assembler::equal, *stub->entry()); __ jcc(Assembler::equal, *failure_target);
// successful cast, fall through to profile or jump
} }
__ bind(done);
} else { } else {
// perform the fast part of the checking logic // perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, &done, stub->entry(), NULL); __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
// call out-of-line instance of __ check_klass_subtype_slow_path(...): // call out-of-line instance of __ check_klass_subtype_slow_path(...):
__ push(klass_RInfo); __ push(klass_RInfo);
__ push(k_RInfo); __ push(k_RInfo);
@ -1772,95 +1775,133 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
__ pop(k_RInfo); __ pop(k_RInfo);
// result is a boolean // result is a boolean
__ cmpl(k_RInfo, 0); __ cmpl(k_RInfo, 0);
__ jcc(Assembler::equal, *stub->entry()); __ jcc(Assembler::equal, *failure_target);
__ bind(done); // successful cast, fall through to profile or jump
} }
}
if (op->should_profile()) {
Register mdo = klass_RInfo, recv = k_RInfo;
__ bind(profile_cast_success);
__ movoop(mdo, md->constant_encoding());
__ movptr(recv, Address(obj, oopDesc::klass_offset_in_bytes()));
Label update_done;
type_profile_helper(mdo, md, data, recv, success);
__ jmp(*success);
__ bind(profile_cast_failure);
__ movoop(mdo, md->constant_encoding());
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
__ subptr(counter_addr, DataLayout::counter_increment);
__ jmp(*failure);
} }
if (dst != obj) { __ jmp(*success);
__ mov(dst, obj); }
}
} else if (code == lir_instanceof) {
Register obj = op->object()->as_register(); void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
LIR_Code code = op->code();
if (code == lir_store_check) {
Register value = op->object()->as_register();
Register array = op->array()->as_register();
Register k_RInfo = op->tmp1()->as_register(); Register k_RInfo = op->tmp1()->as_register();
Register klass_RInfo = op->tmp2()->as_register(); Register klass_RInfo = op->tmp2()->as_register();
Register dst = op->result_opr()->as_register(); Register Rtmp1 = op->tmp3()->as_register();
ciKlass* k = op->klass();
Label done; CodeStub* stub = op->stub();
Label zero;
Label one; // check if it needs to be profiled
if (obj == k_RInfo) { ciMethodData* md;
k_RInfo = klass_RInfo; ciProfileData* data;
klass_RInfo = obj;
if (op->should_profile()) {
ciMethod* method = op->profiled_method();
assert(method != NULL, "Should have method");
int bci = op->profiled_bci();
md = method->method_data();
if (md == NULL) {
bailout("out of memory building methodDataOop");
return;
} }
// patching may screw with our temporaries on sparc, data = md->bci_to_data(bci);
// so let's do it before loading the class assert(data != NULL, "need data for type check");
if (!k->is_loaded()) { assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
jobject2reg_with_patching(k_RInfo, op->info_for_patch());
} else {
LP64_ONLY(__ movoop(k_RInfo, k->constant_encoding()));
} }
assert(obj != k_RInfo, "must be different"); Label profile_cast_success, profile_cast_failure, done;
Label *success_target = op->should_profile() ? &profile_cast_success : &done;
Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
__ verify_oop(obj); __ cmpptr(value, (int32_t)NULL_WORD);
if (op->fast_check()) { if (op->should_profile()) {
__ cmpptr(obj, (int32_t)NULL_WORD); Label not_null;
__ jcc(Assembler::equal, zero); __ jccb(Assembler::notEqual, not_null);
// get object class // Object is null; update MDO and exit
// not a safepoint as obj null check happens earlier Register mdo = klass_RInfo;
if (LP64_ONLY(false &&) k->is_loaded()) { __ movoop(mdo, md->constant_encoding());
NOT_LP64(__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding())); Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
k_RInfo = noreg; int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
} else { __ orl(data_addr, header_bits);
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
}
__ jcc(Assembler::equal, one);
} else {
// get object class
// not a safepoint as obj null check happens earlier
__ cmpptr(obj, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, zero);
__ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
#ifndef _LP64
if (k->is_loaded()) {
// See if we get an immediate positive hit
__ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
__ jcc(Assembler::equal, one);
if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() == k->super_check_offset()) {
// check for self
__ cmpoop(klass_RInfo, k->constant_encoding());
__ jcc(Assembler::equal, one);
__ push(klass_RInfo);
__ pushoop(k->constant_encoding());
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
__ pop(klass_RInfo);
__ pop(dst);
__ jmp(done); __ jmp(done);
__ bind(not_null);
} else {
__ jcc(Assembler::equal, done);
} }
}
else // next block is unconditional if LP64:
#endif // LP64
{
assert(dst != klass_RInfo && dst != k_RInfo, "need 3 registers");
add_debug_info_for_null_check_here(op->info_for_exception());
__ movptr(k_RInfo, Address(array, oopDesc::klass_offset_in_bytes()));
__ movptr(klass_RInfo, Address(value, oopDesc::klass_offset_in_bytes()));
// get instance klass
__ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
// perform the fast part of the checking logic // perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, dst, &one, &zero, NULL); __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
// call out-of-line instance of __ check_klass_subtype_slow_path(...): // call out-of-line instance of __ check_klass_subtype_slow_path(...):
__ push(klass_RInfo); __ push(klass_RInfo);
__ push(k_RInfo); __ push(k_RInfo);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
__ pop(klass_RInfo); __ pop(klass_RInfo);
__ pop(dst); __ pop(k_RInfo);
__ jmp(done); // result is a boolean
__ cmpl(k_RInfo, 0);
__ jcc(Assembler::equal, *failure_target);
// fall through to the success case
if (op->should_profile()) {
Register mdo = klass_RInfo, recv = k_RInfo;
__ bind(profile_cast_success);
__ movoop(mdo, md->constant_encoding());
__ movptr(recv, Address(value, oopDesc::klass_offset_in_bytes()));
Label update_done;
type_profile_helper(mdo, md, data, recv, &done);
__ jmpb(done);
__ bind(profile_cast_failure);
__ movoop(mdo, md->constant_encoding());
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
__ subptr(counter_addr, DataLayout::counter_increment);
__ jmp(*stub->entry());
} }
__ bind(done);
} else
if (code == lir_checkcast) {
Register obj = op->object()->as_register();
Register dst = op->result_opr()->as_register();
Label success;
emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
__ bind(success);
if (dst != obj) {
__ mov(dst, obj);
} }
__ bind(zero); } else
if (code == lir_instanceof) {
Register obj = op->object()->as_register();
Register dst = op->result_opr()->as_register();
Label success, failure, done;
emit_typecheck_helper(op, &success, &failure, &failure);
__ bind(failure);
__ xorptr(dst, dst); __ xorptr(dst, dst);
__ jmp(done); __ jmpb(done);
__ bind(one); __ bind(success);
__ movptr(dst, 1); __ movptr(dst, 1);
__ bind(done); __ bind(done);
} else { } else {
@ -1922,7 +1963,6 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
} }
} }
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) { void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) {
Assembler::Condition acond, ncond; Assembler::Condition acond, ncond;
switch (condition) { switch (condition) {
@ -2014,11 +2054,11 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
jint c = right->as_constant_ptr()->as_jint(); jint c = right->as_constant_ptr()->as_jint();
switch (code) { switch (code) {
case lir_add: { case lir_add: {
__ increment(lreg, c); __ incrementl(lreg, c);
break; break;
} }
case lir_sub: { case lir_sub: {
__ decrement(lreg, c); __ decrementl(lreg, c);
break; break;
} }
default: ShouldNotReachHere(); default: ShouldNotReachHere();
@ -3253,13 +3293,13 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
// Perform additional virtual call profiling for invokevirtual and // Perform additional virtual call profiling for invokevirtual and
// invokeinterface bytecodes // invokeinterface bytecodes
if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
Tier1ProfileVirtualCalls) { C1ProfileVirtualCalls) {
assert(op->recv()->is_single_cpu(), "recv must be allocated"); assert(op->recv()->is_single_cpu(), "recv must be allocated");
Register recv = op->recv()->as_register(); Register recv = op->recv()->as_register();
assert_different_registers(mdo, recv); assert_different_registers(mdo, recv);
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
ciKlass* known_klass = op->known_holder(); ciKlass* known_klass = op->known_holder();
if (Tier1OptimizeVirtualCallProfiling && known_klass != NULL) { if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
// We know the type that will be seen at this call site; we can // We know the type that will be seen at this call site; we can
// statically update the methodDataOop rather than needing to do // statically update the methodDataOop rather than needing to do
// dynamic tests on the receiver type // dynamic tests on the receiver type
@ -3272,7 +3312,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
ciKlass* receiver = vc_data->receiver(i); ciKlass* receiver = vc_data->receiver(i);
if (known_klass->equals(receiver)) { if (known_klass->equals(receiver)) {
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
__ addl(data_addr, DataLayout::counter_increment); __ addptr(data_addr, DataLayout::counter_increment);
return; return;
} }
} }
@ -3288,49 +3328,26 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
__ movoop(recv_addr, known_klass->constant_encoding()); __ movoop(recv_addr, known_klass->constant_encoding());
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
__ addl(data_addr, DataLayout::counter_increment); __ addptr(data_addr, DataLayout::counter_increment);
return; return;
} }
} }
} else { } else {
__ movptr(recv, Address(recv, oopDesc::klass_offset_in_bytes())); __ movptr(recv, Address(recv, oopDesc::klass_offset_in_bytes()));
Label update_done; Label update_done;
uint i; type_profile_helper(mdo, md, data, recv, &update_done);
for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test;
// See if the receiver is receiver[n].
__ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))));
__ jcc(Assembler::notEqual, next_test);
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
__ addl(data_addr, DataLayout::counter_increment);
__ jmp(update_done);
__ bind(next_test);
}
// Didn't find receiver; find next empty slot and fill it in
for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test;
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
__ cmpptr(recv_addr, (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, next_test);
__ movptr(recv_addr, recv);
__ movl(Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))), DataLayout::counter_increment);
__ jmp(update_done);
__ bind(next_test);
}
// Receiver did not match any saved receiver and there is no empty row for it. // Receiver did not match any saved receiver and there is no empty row for it.
// Increment total counter to indicate polymorphic case. // Increment total counter to indicate polymorphic case.
__ addl(counter_addr, DataLayout::counter_increment); __ addptr(counter_addr, DataLayout::counter_increment);
__ bind(update_done); __ bind(update_done);
} }
} else { } else {
// Static call // Static call
__ addl(counter_addr, DataLayout::counter_increment); __ addptr(counter_addr, DataLayout::counter_increment);
} }
} }
void LIR_Assembler::emit_delay(LIR_OpDelay*) { void LIR_Assembler::emit_delay(LIR_OpDelay*) {
Unimplemented(); Unimplemented();
} }

View file

@ -42,7 +42,10 @@
// method. // method.
Address as_Address(LIR_Address* addr, Register tmp); Address as_Address(LIR_Address* addr, Register tmp);
// Record the type of the receiver in ReceiverTypeData
void type_profile_helper(Register mdo,
ciMethodData *md, ciProfileData *data,
Register recv, Label* update_done);
public: public:
void store_parameter(Register r, int offset_from_esp_in_words); void store_parameter(Register r, int offset_from_esp_in_words);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -182,10 +182,22 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
} }
void LIRGenerator::increment_counter(address counter, int step) { LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
LIR_Opr r;
if (type == T_LONG) {
r = LIR_OprFact::longConst(x);
} else if (type == T_INT) {
r = LIR_OprFact::intConst(x);
} else {
ShouldNotReachHere();
}
return r;
}
void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
LIR_Opr pointer = new_pointer_register(); LIR_Opr pointer = new_pointer_register();
__ move(LIR_OprFact::intptrConst(counter), pointer); __ move(LIR_OprFact::intptrConst(counter), pointer);
LIR_Address* addr = new LIR_Address(pointer, T_INT); LIR_Address* addr = new LIR_Address(pointer, type);
increment_counter(addr, step); increment_counter(addr, step);
} }
@ -194,7 +206,6 @@ void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
__ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr); __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr);
} }
void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
__ cmp_mem_int(condition, base, disp, c, info); __ cmp_mem_int(condition, base, disp, c, info);
} }
@ -1145,10 +1156,10 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) {
patching_info = state_for(x, x->state_before()); patching_info = state_for(x, x->state_before());
} }
obj.load_item(); obj.load_item();
LIR_Opr tmp = new_register(objectType);
__ instanceof(reg, obj.result(), x->klass(), __ instanceof(reg, obj.result(), x->klass(),
tmp, new_register(objectType), LIR_OprFact::illegalOpr, new_register(objectType), new_register(objectType),
x->direct_compare(), patching_info); !x->klass()->is_loaded() ? new_register(objectType) : LIR_OprFact::illegalOpr,
x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
} }
@ -1188,8 +1199,7 @@ void LIRGenerator::do_If(If* x) {
// add safepoint before generating condition code so it can be recomputed // add safepoint before generating condition code so it can be recomputed
if (x->is_safepoint()) { if (x->is_safepoint()) {
// increment backedge counter if needed // increment backedge counter if needed
increment_backedge_counter(state_for(x, x->state_before())); increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
__ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
} }
set_no_result(x); set_no_result(x);
@ -1197,6 +1207,7 @@ void LIRGenerator::do_If(If* x) {
LIR_Opr left = xin->result(); LIR_Opr left = xin->result();
LIR_Opr right = yin->result(); LIR_Opr right = yin->result();
__ cmp(lir_cond(cond), left, right); __ cmp(lir_cond(cond), left, right);
// Generate branch profiling. Profiling code doesn't kill flags.
profile_branch(x, cond); profile_branch(x, cond);
move_to_phi(x->state()); move_to_phi(x->state());
if (x->x()->type()->is_float_kind()) { if (x->x()->type()->is_float_kind()) {

View file

@ -1068,15 +1068,16 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
break; break;
#ifdef TIERED
case counter_overflow_id: case counter_overflow_id:
{ {
Register bci = rax; Register bci = rax, method = rbx;
__ enter(); __ enter();
OopMap* map = save_live_registers(sasm, 2); OopMap* map = save_live_registers(sasm, 3);
// Retrieve bci // Retrieve bci
__ movl(bci, Address(rbp, 2*BytesPerWord)); __ movl(bci, Address(rbp, 2*BytesPerWord));
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci); // And a pointer to the methodOop
__ movptr(method, Address(rbp, 3*BytesPerWord));
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
oop_maps = new OopMapSet(); oop_maps = new OopMapSet();
oop_maps->add_gc_map(call_offset, map); oop_maps->add_gc_map(call_offset, map);
restore_live_registers(sasm); restore_live_registers(sasm);
@ -1084,7 +1085,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ ret(0); __ ret(0);
} }
break; break;
#endif // TIERED
case new_type_array_id: case new_type_array_id:
case new_object_array_id: case new_object_array_id:

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -35,14 +35,7 @@ define_pd_global(bool, ProfileTraps, false);
define_pd_global(bool, UseOnStackReplacement, true ); define_pd_global(bool, UseOnStackReplacement, true );
define_pd_global(bool, TieredCompilation, false); define_pd_global(bool, TieredCompilation, false);
define_pd_global(intx, CompileThreshold, 1500 ); define_pd_global(intx, CompileThreshold, 1500 );
define_pd_global(intx, Tier2CompileThreshold, 1500 );
define_pd_global(intx, Tier3CompileThreshold, 2500 );
define_pd_global(intx, Tier4CompileThreshold, 4500 );
define_pd_global(intx, BackEdgeThreshold, 100000); define_pd_global(intx, BackEdgeThreshold, 100000);
define_pd_global(intx, Tier2BackEdgeThreshold, 100000);
define_pd_global(intx, Tier3BackEdgeThreshold, 100000);
define_pd_global(intx, Tier4BackEdgeThreshold, 100000);
define_pd_global(intx, OnStackReplacePercentage, 933 ); define_pd_global(intx, OnStackReplacePercentage, 933 );
define_pd_global(intx, FreqInlineSize, 325 ); define_pd_global(intx, FreqInlineSize, 325 );

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -39,19 +39,8 @@ define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(bool, ProfileInterpreter, true); define_pd_global(bool, ProfileInterpreter, true);
#endif // CC_INTERP #endif // CC_INTERP
define_pd_global(bool, TieredCompilation, false); define_pd_global(bool, TieredCompilation, false);
#ifdef TIERED
define_pd_global(intx, CompileThreshold, 1000);
#else
define_pd_global(intx, CompileThreshold, 10000); define_pd_global(intx, CompileThreshold, 10000);
#endif // TIERED
define_pd_global(intx, Tier2CompileThreshold, 10000);
define_pd_global(intx, Tier3CompileThreshold, 20000);
define_pd_global(intx, Tier4CompileThreshold, 40000);
define_pd_global(intx, BackEdgeThreshold, 100000); define_pd_global(intx, BackEdgeThreshold, 100000);
define_pd_global(intx, Tier2BackEdgeThreshold, 100000);
define_pd_global(intx, Tier3BackEdgeThreshold, 100000);
define_pd_global(intx, Tier4BackEdgeThreshold, 100000);
define_pd_global(intx, OnStackReplacePercentage, 140); define_pd_global(intx, OnStackReplacePercentage, 140);
define_pd_global(intx, ConditionalMoveLimit, 3); define_pd_global(intx, ConditionalMoveLimit, 3);

View file

@ -1397,3 +1397,17 @@ void InterpreterMacroAssembler::notify_method_exit(
NOT_CC_INTERP(pop(state)); NOT_CC_INTERP(pop(state));
} }
} }
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
int increment, int mask,
Register scratch, bool preloaded,
Condition cond, Label* where) {
if (!preloaded) {
movl(scratch, counter_addr);
}
incrementl(scratch, increment);
movl(counter_addr, scratch);
andl(scratch, mask);
jcc(cond, *where);
}

View file

@ -185,6 +185,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
bool decrement = false); bool decrement = false);
void increment_mdp_data_at(Register mdp_in, Register reg, int constant, void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
bool decrement = false); bool decrement = false);
void increment_mask_and_jump(Address counter_addr,
int increment, int mask,
Register scratch, bool preloaded,
Condition cond, Label* where);
void set_mdp_flag_at(Register mdp_in, int flag_constant); void set_mdp_flag_at(Register mdp_in, int flag_constant);
void test_mdp_data_at(Register mdp_in, int offset, Register value, void test_mdp_data_at(Register mdp_in, int offset, Register value,
Register test_value_out, Register test_value_out,

View file

@ -1480,3 +1480,17 @@ void InterpreterMacroAssembler::notify_method_exit(
NOT_CC_INTERP(pop(state)); NOT_CC_INTERP(pop(state));
} }
} }
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
int increment, int mask,
Register scratch, bool preloaded,
Condition cond, Label* where) {
if (!preloaded) {
movl(scratch, counter_addr);
}
incrementl(scratch, increment);
movl(counter_addr, scratch);
andl(scratch, mask);
jcc(cond, *where);
}

View file

@ -194,6 +194,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
bool decrement = false); bool decrement = false);
void increment_mdp_data_at(Register mdp_in, Register reg, int constant, void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
bool decrement = false); bool decrement = false);
void increment_mask_and_jump(Address counter_addr,
int increment, int mask,
Register scratch, bool preloaded,
Condition cond, Label* where);
void set_mdp_flag_at(Register mdp_in, int flag_constant); void set_mdp_flag_at(Register mdp_in, int flag_constant);
void test_mdp_data_at(Register mdp_in, int offset, Register value, void test_mdp_data_at(Register mdp_in, int offset, Register value,
Register test_value_out, Register test_value_out,

View file

@ -27,6 +27,14 @@
#define __ _masm-> #define __ _masm->
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm, address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
address interpreted_entry) { address interpreted_entry) {
// Just before the actual machine code entry point, allocate space // Just before the actual machine code entry point, allocate space
@ -64,6 +72,7 @@ static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
const char* error_message) { const char* error_message) {
// Verify that argslot lies within (rsp, rbp]. // Verify that argslot lies within (rsp, rbp].
Label L_ok, L_bad; Label L_ok, L_bad;
BLOCK_COMMENT("{ verify_argslot");
__ cmpptr(argslot_reg, rbp); __ cmpptr(argslot_reg, rbp);
__ jccb(Assembler::above, L_bad); __ jccb(Assembler::above, L_bad);
__ cmpptr(rsp, argslot_reg); __ cmpptr(rsp, argslot_reg);
@ -71,6 +80,7 @@ static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
__ bind(L_bad); __ bind(L_bad);
__ stop(error_message); __ stop(error_message);
__ bind(L_ok); __ bind(L_ok);
BLOCK_COMMENT("} verify_argslot");
} }
#endif #endif
@ -80,16 +90,21 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
// rbx: methodOop // rbx: methodOop
// rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots]) // rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots])
// rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted) // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
// rdx: garbage temp, blown away // rdx, rdi: garbage temp, blown away
Register rbx_method = rbx; Register rbx_method = rbx;
Register rcx_recv = rcx; Register rcx_recv = rcx;
Register rax_mtype = rax; Register rax_mtype = rax;
Register rdx_temp = rdx; Register rdx_temp = rdx;
Register rdi_temp = rdi;
// emit WrongMethodType path first, to enable jccb back-branch from main path // emit WrongMethodType path first, to enable jccb back-branch from main path
Label wrong_method_type; Label wrong_method_type;
__ bind(wrong_method_type); __ bind(wrong_method_type);
Label invoke_generic_slow_path;
assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");;
__ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeExact);
__ jcc(Assembler::notEqual, invoke_generic_slow_path);
__ push(rax_mtype); // required mtype __ push(rax_mtype); // required mtype
__ push(rcx_recv); // bad mh (1st stacked argument) __ push(rcx_recv); // bad mh (1st stacked argument)
__ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry())); __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
@ -106,17 +121,68 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
tem = rax_mtype; // in case there is another indirection tem = rax_mtype; // in case there is another indirection
} }
} }
Register rbx_temp = rbx_method; // done with incoming methodOop
// given the MethodType, find out where the MH argument is buried // given the MethodType, find out where the MH argument is buried
__ movptr(rdx_temp, Address(rax_mtype, __ movptr(rdx_temp, Address(rax_mtype,
__ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rbx_temp))); __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp)));
__ movl(rdx_temp, Address(rdx_temp, Register rdx_vmslots = rdx_temp;
__ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rbx_temp))); __ movl(rdx_vmslots, Address(rdx_temp,
__ movptr(rcx_recv, __ argument_address(rdx_temp)); __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp)));
__ movptr(rcx_recv, __ argument_address(rdx_vmslots));
__ check_method_handle_type(rax_mtype, rcx_recv, rdx_temp, wrong_method_type); trace_method_handle(_masm, "invokeExact");
__ jump_to_method_handle_entry(rcx_recv, rdx_temp);
__ check_method_handle_type(rax_mtype, rcx_recv, rdi_temp, wrong_method_type);
__ jump_to_method_handle_entry(rcx_recv, rdi_temp);
// for invokeGeneric (only), apply argument and result conversions on the fly
__ bind(invoke_generic_slow_path);
#ifdef ASSERT
{ Label L;
__ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeGeneric);
__ jcc(Assembler::equal, L);
__ stop("bad methodOop::intrinsic_id");
__ bind(L);
}
#endif //ASSERT
Register rbx_temp = rbx_method; // don't need it now
// make room on the stack for another pointer:
Register rcx_argslot = rcx_recv;
__ lea(rcx_argslot, __ argument_address(rdx_vmslots, 1));
insert_arg_slots(_masm, 2 * stack_move_unit(), _INSERT_REF_MASK,
rcx_argslot, rbx_temp, rdx_temp);
// load up an adapter from the calling type (Java weaves this)
__ movptr(rdx_temp, Address(rax_mtype,
__ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp)));
Register rdx_adapter = rdx_temp;
// movptr(rdx_adapter, Address(rdx_temp, java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes()));
// deal with old JDK versions:
__ lea(rdi_temp, Address(rdx_temp,
__ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp)));
__ cmpptr(rdi_temp, rdx_temp);
Label sorry_no_invoke_generic;
__ jccb(Assembler::below, sorry_no_invoke_generic);
__ movptr(rdx_adapter, Address(rdi_temp, 0));
__ testptr(rdx_adapter, rdx_adapter);
__ jccb(Assembler::zero, sorry_no_invoke_generic);
__ movptr(Address(rcx_argslot, 1 * Interpreter::stackElementSize), rdx_adapter);
// As a trusted first argument, pass the type being called, so the adapter knows
// the actual types of the arguments and return values.
// (Generic invokers are shared among form-families of method-type.)
__ movptr(Address(rcx_argslot, 0 * Interpreter::stackElementSize), rax_mtype);
// FIXME: assert that rdx_adapter is of the right method-type.
__ mov(rcx, rdx_adapter);
trace_method_handle(_masm, "invokeGeneric");
__ jump_to_method_handle_entry(rcx, rdi_temp);
__ bind(sorry_no_invoke_generic); // no invokeGeneric implementation available!
__ movptr(rcx_recv, Address(rcx_argslot, -1 * Interpreter::stackElementSize)); // recover original MH
__ push(rax_mtype); // required mtype
__ push(rcx_recv); // bad mh (1st stacked argument)
__ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
return entry_point; return entry_point;
} }
@ -164,11 +230,12 @@ void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
// for (rdx = rsp + size; rdx < argslot; rdx++) // for (rdx = rsp + size; rdx < argslot; rdx++)
// rdx[-size] = rdx[0] // rdx[-size] = rdx[0]
// argslot -= size; // argslot -= size;
BLOCK_COMMENT("insert_arg_slots {");
__ mov(rdx_temp, rsp); // source pointer for copy __ mov(rdx_temp, rsp); // source pointer for copy
__ lea(rsp, Address(rsp, arg_slots, Address::times_ptr)); __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
{ {
Label loop; Label loop;
__ bind(loop); __ BIND(loop);
// pull one word down each time through the loop // pull one word down each time through the loop
__ movptr(rbx_temp, Address(rdx_temp, 0)); __ movptr(rbx_temp, Address(rdx_temp, 0));
__ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp); __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
@ -179,6 +246,7 @@ void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
// Now move the argslot down, to point to the opened-up space. // Now move the argslot down, to point to the opened-up space.
__ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr)); __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
BLOCK_COMMENT("} insert_arg_slots");
} }
// Helper to remove argument slots from the stack. // Helper to remove argument slots from the stack.
@ -218,6 +286,7 @@ void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
} }
#endif #endif
BLOCK_COMMENT("remove_arg_slots {");
// Pull up everything shallower than rax_argslot. // Pull up everything shallower than rax_argslot.
// Then remove the excess space on the stack. // Then remove the excess space on the stack.
// The stacked return address gets pulled up with everything else. // The stacked return address gets pulled up with everything else.
@ -229,7 +298,7 @@ void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
__ lea(rdx_temp, Address(rax_argslot, -wordSize)); // source pointer for copy __ lea(rdx_temp, Address(rax_argslot, -wordSize)); // source pointer for copy
{ {
Label loop; Label loop;
__ bind(loop); __ BIND(loop);
// pull one word up each time through the loop // pull one word up each time through the loop
__ movptr(rbx_temp, Address(rdx_temp, 0)); __ movptr(rbx_temp, Address(rdx_temp, 0));
__ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp); __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
@ -242,12 +311,14 @@ void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
__ lea(rsp, Address(rsp, arg_slots, Address::times_ptr)); __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
// And adjust the argslot address to point at the deletion point. // And adjust the argslot address to point at the deletion point.
__ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr)); __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
BLOCK_COMMENT("} remove_arg_slots");
} }
#ifndef PRODUCT #ifndef PRODUCT
extern "C" void print_method_handle(oop mh); extern "C" void print_method_handle(oop mh);
void trace_method_handle_stub(const char* adaptername, void trace_method_handle_stub(const char* adaptername,
oop mh, oop mh,
intptr_t* saved_regs,
intptr_t* entry_sp, intptr_t* entry_sp,
intptr_t* saved_sp, intptr_t* saved_sp,
intptr_t* saved_bp) { intptr_t* saved_bp) {
@ -256,9 +327,47 @@ void trace_method_handle_stub(const char* adaptername,
intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset]; intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n", printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n",
adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp); adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
if (last_sp != saved_sp) if (last_sp != saved_sp && last_sp != NULL)
printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp); printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
if (Verbose) print_method_handle(mh); if (Verbose) {
printf(" reg dump: ");
int saved_regs_count = (entry_sp-1) - saved_regs;
// 32 bit: rdi rsi rbp rsp; rbx rdx rcx (*) rax
int i;
for (i = 0; i <= saved_regs_count; i++) {
if (i > 0 && i % 4 == 0 && i != saved_regs_count)
printf("\n + dump: ");
printf(" %d: "INTPTR_FORMAT, i, saved_regs[i]);
}
printf("\n");
int stack_dump_count = 16;
if (stack_dump_count < (int)(saved_bp + 2 - saved_sp))
stack_dump_count = (int)(saved_bp + 2 - saved_sp);
if (stack_dump_count > 64) stack_dump_count = 48;
for (i = 0; i < stack_dump_count; i += 4) {
printf(" dump at SP[%d] "INTPTR_FORMAT": "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT"\n",
i, &entry_sp[i+0], entry_sp[i+0], entry_sp[i+1], entry_sp[i+2], entry_sp[i+3]);
}
print_method_handle(mh);
}
}
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
if (!TraceMethodHandles) return;
BLOCK_COMMENT("trace_method_handle {");
__ push(rax);
__ lea(rax, Address(rsp, wordSize*6)); // entry_sp
__ pusha();
// arguments:
__ push(rbp); // interpreter frame pointer
__ push(rsi); // saved_sp
__ push(rax); // entry_sp
__ push(rcx); // mh
__ push(rcx);
__ movptr(Address(rsp, 0), (intptr_t) adaptername);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 5);
__ popa();
__ pop(rax);
BLOCK_COMMENT("} trace_method_handle");
} }
#endif //PRODUCT #endif //PRODUCT
@ -324,21 +433,9 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
address interp_entry = __ pc(); address interp_entry = __ pc();
if (UseCompressedOops) __ unimplemented("UseCompressedOops"); if (UseCompressedOops) __ unimplemented("UseCompressedOops");
#ifndef PRODUCT trace_method_handle(_masm, entry_name(ek));
if (TraceMethodHandles) {
__ push(rax); __ push(rbx); __ push(rcx); __ push(rdx); __ push(rsi); __ push(rdi); BLOCK_COMMENT(entry_name(ek));
__ lea(rax, Address(rsp, wordSize*6)); // entry_sp
// arguments:
__ push(rbp); // interpreter frame pointer
__ push(rsi); // saved_sp
__ push(rax); // entry_sp
__ push(rcx); // mh
__ push(rcx);
__ movptr(Address(rsp, 0), (intptr_t)entry_name(ek));
__ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 5);
__ pop(rdi); __ pop(rsi); __ pop(rdx); __ pop(rcx); __ pop(rbx); __ pop(rax);
}
#endif //PRODUCT
switch ((int) ek) { switch ((int) ek) {
case _raise_exception: case _raise_exception:

View file

@ -33,7 +33,7 @@ enum platform_dependent_constants {
// MethodHandles adapters // MethodHandles adapters
enum method_handles_platform_dependent_constants { enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 5000 method_handles_adapters_code_size = 10000
}; };
class x86 { class x86 {

View file

@ -35,7 +35,7 @@ enum platform_dependent_constants {
// MethodHandles adapters // MethodHandles adapters
enum method_handles_platform_dependent_constants { enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 13000 method_handles_adapters_code_size = 26000
}; };
class x86 { class x86 {

View file

@ -359,9 +359,31 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
// rcx: invocation counter // rcx: invocation counter
// //
void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) +
const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset()); in_bytes(InvocationCounter::counter_offset()));
const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset()); // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
if (TieredCompilation) {
int increment = InvocationCounter::count_increment;
int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
Label no_mdo, done;
if (ProfileInterpreter) {
// Are we profiling?
__ movptr(rax, Address(rbx, methodOopDesc::method_data_offset()));
__ testptr(rax, rax);
__ jccb(Assembler::zero, no_mdo);
// Increment counter in the MDO
const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
__ jmpb(done);
}
__ bind(no_mdo);
// Increment counter in methodOop (we don't need to load it, it's in rcx).
__ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow);
__ bind(done);
} else {
const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() +
InvocationCounter::counter_offset());
if (ProfileInterpreter) { // %%% Merge this into methodDataOop if (ProfileInterpreter) { // %%% Merge this into methodDataOop
__ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset())); __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
@ -392,7 +414,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
__ cmp32(rcx, __ cmp32(rcx,
ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
__ jcc(Assembler::aboveEqual, *overflow); __ jcc(Assembler::aboveEqual, *overflow);
}
} }
void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {

View file

@ -310,10 +310,29 @@ void InterpreterGenerator::generate_counter_incr(
Label* overflow, Label* overflow,
Label* profile_method, Label* profile_method,
Label* profile_method_continue) { Label* profile_method_continue) {
const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) +
const Address invocation_counter(rbx, in_bytes(InvocationCounter::counter_offset()));
methodOopDesc::invocation_counter_offset() + // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
InvocationCounter::counter_offset()); if (TieredCompilation) {
int increment = InvocationCounter::count_increment;
int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
Label no_mdo, done;
if (ProfileInterpreter) {
// Are we profiling?
__ movptr(rax, Address(rbx, methodOopDesc::method_data_offset()));
__ testptr(rax, rax);
__ jccb(Assembler::zero, no_mdo);
// Increment counter in the MDO
const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
__ jmpb(done);
}
__ bind(no_mdo);
// Increment counter in methodOop (we don't need to load it, it's in ecx).
__ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow);
__ bind(done);
} else {
const Address backedge_counter(rbx, const Address backedge_counter(rbx,
methodOopDesc::backedge_counter_offset() + methodOopDesc::backedge_counter_offset() +
InvocationCounter::counter_offset()); InvocationCounter::counter_offset());
@ -326,8 +345,7 @@ void InterpreterGenerator::generate_counter_incr(
__ movl(rax, backedge_counter); // load backedge counter __ movl(rax, backedge_counter); // load backedge counter
__ incrementl(rcx, InvocationCounter::count_increment); __ incrementl(rcx, InvocationCounter::count_increment);
__ andl(rax, InvocationCounter::count_mask_value); // mask out the __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
// status bits
__ movl(invocation_counter, rcx); // save invocation count __ movl(invocation_counter, rcx); // save invocation count
__ addl(rcx, rax); // add both counters __ addl(rcx, rax); // add both counters
@ -346,6 +364,7 @@ void InterpreterGenerator::generate_counter_incr(
__ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
__ jcc(Assembler::aboveEqual, *overflow); __ jcc(Assembler::aboveEqual, *overflow);
}
} }
void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {

View file

@ -1558,6 +1558,27 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ testl(rdx, rdx); // check if forward or backward branch __ testl(rdx, rdx); // check if forward or backward branch
__ jcc(Assembler::positive, dispatch); // count only if backward branch __ jcc(Assembler::positive, dispatch); // count only if backward branch
if (TieredCompilation) {
Label no_mdo;
int increment = InvocationCounter::count_increment;
int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
if (ProfileInterpreter) {
// Are we profiling?
__ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
__ testptr(rbx, rbx);
__ jccb(Assembler::zero, no_mdo);
// Increment the MDO backedge counter
const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
rax, false, Assembler::zero, &backedge_counter_overflow);
__ jmp(dispatch);
}
__ bind(no_mdo);
// Increment backedge counter in methodOop
__ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
rax, false, Assembler::zero, &backedge_counter_overflow);
} else {
// increment counter // increment counter
__ movl(rax, Address(rcx, be_offset)); // load backedge counter __ movl(rax, Address(rcx, be_offset)); // load backedge counter
__ incrementl(rax, InvocationCounter::count_increment); // increment counter __ incrementl(rax, InvocationCounter::count_increment); // increment counter
@ -1590,7 +1611,6 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
const int overflow_frequency = 1024; const int overflow_frequency = 1024;
__ andptr(rbx, overflow_frequency-1); __ andptr(rbx, overflow_frequency-1);
__ jcc(Assembler::zero, backedge_counter_overflow); __ jcc(Assembler::zero, backedge_counter_overflow);
} }
} else { } else {
if (UseOnStackReplacement) { if (UseOnStackReplacement) {
@ -1601,6 +1621,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
} }
} }
}
__ bind(dispatch); __ bind(dispatch);
} }

View file

@ -1583,11 +1583,30 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// r14: locals pointer // r14: locals pointer
__ testl(rdx, rdx); // check if forward or backward branch __ testl(rdx, rdx); // check if forward or backward branch
__ jcc(Assembler::positive, dispatch); // count only if backward branch __ jcc(Assembler::positive, dispatch); // count only if backward branch
if (TieredCompilation) {
Label no_mdo;
int increment = InvocationCounter::count_increment;
int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
if (ProfileInterpreter) {
// Are we profiling?
__ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
__ testptr(rbx, rbx);
__ jccb(Assembler::zero, no_mdo);
// Increment the MDO backedge counter
const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
rax, false, Assembler::zero, &backedge_counter_overflow);
__ jmp(dispatch);
}
__ bind(no_mdo);
// Increment backedge counter in methodOop
__ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
rax, false, Assembler::zero, &backedge_counter_overflow);
} else {
// increment counter // increment counter
__ movl(rax, Address(rcx, be_offset)); // load backedge counter __ movl(rax, Address(rcx, be_offset)); // load backedge counter
__ incrementl(rax, InvocationCounter::count_increment); // increment __ incrementl(rax, InvocationCounter::count_increment); // increment counter
// counter
__ movl(Address(rcx, be_offset), rax); // store counter __ movl(Address(rcx, be_offset), rax); // store counter
__ movl(rax, Address(rcx, inv_offset)); // load invocation counter __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
@ -1630,6 +1649,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
} }
} }
}
__ bind(dispatch); __ bind(dispatch);
} }
@ -2912,7 +2932,8 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
void TemplateTable::invokevirtual_helper(Register index, void TemplateTable::invokevirtual_helper(Register index,
Register recv, Register recv,
Register flags) { Register flags) {
// Uses temporary registers rax, rdx assert_different_registers(index, recv, rax, rdx); // Uses temporary registers rax, rdx
assert_different_registers(index, recv, rax, rdx);
// Test for an invoke of a final method // Test for an invoke of a final method
Label notFinal; Label notFinal;

View file

@ -296,14 +296,14 @@ protected:
result |= CPU_CX8; result |= CPU_CX8;
if (_cpuid_info.std_cpuid1_edx.bits.cmov != 0) if (_cpuid_info.std_cpuid1_edx.bits.cmov != 0)
result |= CPU_CMOV; result |= CPU_CMOV;
if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || is_amd() && if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || (is_amd() &&
_cpuid_info.ext_cpuid1_edx.bits.fxsr != 0) _cpuid_info.ext_cpuid1_edx.bits.fxsr != 0))
result |= CPU_FXSR; result |= CPU_FXSR;
// HT flag is set for multi-core processors also. // HT flag is set for multi-core processors also.
if (threads_per_core() > 1) if (threads_per_core() > 1)
result |= CPU_HT; result |= CPU_HT;
if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || is_amd() && if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || (is_amd() &&
_cpuid_info.ext_cpuid1_edx.bits.mmx != 0) _cpuid_info.ext_cpuid1_edx.bits.mmx != 0))
result |= CPU_MMX; result |= CPU_MMX;
if (_cpuid_info.std_cpuid1_edx.bits.sse != 0) if (_cpuid_info.std_cpuid1_edx.bits.sse != 0)
result |= CPU_SSE; result |= CPU_SSE;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -209,7 +209,7 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
(UseCompressedOops ? 16 : 0); // 1 leaq can be 3 bytes + 1 long (UseCompressedOops ? 16 : 0); // 1 leaq can be 3 bytes + 1 long
} else { } else {
// Itable stub size // Itable stub size
return (DebugVtables ? 512 : 72) + (CountCompiledCalls ? 13 : 0) + return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
(UseCompressedOops ? 32 : 0); // 2 leaqs (UseCompressedOops ? 32 : 0); // 2 leaqs
} }
// In order to tune these parameters, run the JVM with VM options // In order to tune these parameters, run the JVM with VM options

View file

@ -656,6 +656,16 @@ void Canonicalizer::do_If(If* x) {
if (cmp->x() == cmp->y()) { if (cmp->x() == cmp->y()) {
do_If(canon); do_If(canon);
} else { } else {
if (compilation()->profile_branches()) {
// TODO: If profiling, leave floating point comparisons unoptimized.
// We currently do not support profiling of the unordered case.
switch(cmp->op()) {
case Bytecodes::_fcmpl: case Bytecodes::_fcmpg:
case Bytecodes::_dcmpl: case Bytecodes::_dcmpg:
set_canonical(x);
return;
}
}
set_canonical(canon); set_canonical(canon);
set_bci(cmp->bci()); set_bci(cmp->bci());
} }
@ -663,6 +673,8 @@ void Canonicalizer::do_If(If* x) {
} else if (l->as_InstanceOf() != NULL) { } else if (l->as_InstanceOf() != NULL) {
// NOTE: Code permanently disabled for now since it leaves the old InstanceOf // NOTE: Code permanently disabled for now since it leaves the old InstanceOf
// instruction in the graph (it is pinned). Need to fix this at some point. // instruction in the graph (it is pinned). Need to fix this at some point.
// It should also be left in the graph when generating a profiled method version or Goto
// has to know that it was an InstanceOf.
return; return;
// pattern: If ((obj instanceof klass) cond rc) => simplify to: IfInstanceOf or: Goto // pattern: If ((obj instanceof klass) cond rc) => simplify to: IfInstanceOf or: Goto
InstanceOf* inst = l->as_InstanceOf(); InstanceOf* inst = l->as_InstanceOf();
@ -881,4 +893,5 @@ void Canonicalizer::do_UnsafePutObject(UnsafePutObject* x) {}
void Canonicalizer::do_UnsafePrefetchRead (UnsafePrefetchRead* x) {} void Canonicalizer::do_UnsafePrefetchRead (UnsafePrefetchRead* x) {}
void Canonicalizer::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {} void Canonicalizer::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
void Canonicalizer::do_ProfileCall(ProfileCall* x) {} void Canonicalizer::do_ProfileCall(ProfileCall* x) {}
void Canonicalizer::do_ProfileCounter(ProfileCounter* x) {} void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {}

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -24,9 +24,11 @@
class Canonicalizer: InstructionVisitor { class Canonicalizer: InstructionVisitor {
private: private:
Compilation *_compilation;
Instruction* _canonical; Instruction* _canonical;
int _bci; int _bci;
Compilation *compilation() { return _compilation; }
void set_canonical(Value x); void set_canonical(Value x);
void set_bci(int bci) { _bci = bci; } void set_bci(int bci) { _bci = bci; }
void set_constant(jint x) { set_canonical(new Constant(new IntConstant(x))); } void set_constant(jint x) { set_canonical(new Constant(new IntConstant(x))); }
@ -43,7 +45,9 @@ class Canonicalizer: InstructionVisitor {
int* scale); int* scale);
public: public:
Canonicalizer(Value x, int bci) { _canonical = x; _bci = bci; if (CanonicalizeNodes) x->visit(this); } Canonicalizer(Compilation* c, Value x, int bci) : _compilation(c), _canonical(x), _bci(bci) {
if (CanonicalizeNodes) x->visit(this);
}
Value canonical() const { return _canonical; } Value canonical() const { return _canonical; }
int bci() const { return _bci; } int bci() const { return _bci; }
@ -92,5 +96,5 @@ class Canonicalizer: InstructionVisitor {
virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x); virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x);
virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x); virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
virtual void do_ProfileCall (ProfileCall* x); virtual void do_ProfileCall (ProfileCall* x);
virtual void do_ProfileCounter (ProfileCounter* x); virtual void do_ProfileInvoke (ProfileInvoke* x);
}; };

View file

@ -80,20 +80,21 @@ class CodeStubList: public _CodeStubList {
} }
}; };
#ifdef TIERED
class CounterOverflowStub: public CodeStub { class CounterOverflowStub: public CodeStub {
private: private:
CodeEmitInfo* _info; CodeEmitInfo* _info;
int _bci; int _bci;
LIR_Opr _method;
public: public:
CounterOverflowStub(CodeEmitInfo* info, int bci) : _info(info), _bci(bci) { CounterOverflowStub(CodeEmitInfo* info, int bci, LIR_Opr method) : _info(info), _bci(bci), _method(method) {
} }
virtual void emit_code(LIR_Assembler* e); virtual void emit_code(LIR_Assembler* e);
virtual void visit(LIR_OpVisitState* visitor) { virtual void visit(LIR_OpVisitState* visitor) {
visitor->do_slow_case(_info); visitor->do_slow_case(_info);
visitor->do_input(_method);
} }
#ifndef PRODUCT #ifndef PRODUCT
@ -101,7 +102,6 @@ public:
#endif // PRODUCT #endif // PRODUCT
}; };
#endif // TIERED
class ConversionStub: public CodeStub { class ConversionStub: public CodeStub {
private: private:

View file

@ -290,6 +290,10 @@ int Compilation::compile_java_method() {
CHECK_BAILOUT_(no_frame_size); CHECK_BAILOUT_(no_frame_size);
if (is_profiling()) {
method()->build_method_data();
}
{ {
PhaseTraceTime timeit(_t_buildIR); PhaseTraceTime timeit(_t_buildIR);
build_hir(); build_hir();
@ -447,6 +451,7 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
, _masm(NULL) , _masm(NULL)
, _has_exception_handlers(false) , _has_exception_handlers(false)
, _has_fpu_code(true) // pessimistic assumption , _has_fpu_code(true) // pessimistic assumption
, _would_profile(false)
, _has_unsafe_access(false) , _has_unsafe_access(false)
, _has_method_handle_invokes(false) , _has_method_handle_invokes(false)
, _bailout_msg(NULL) , _bailout_msg(NULL)
@ -461,12 +466,16 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
#endif // PRODUCT #endif // PRODUCT
{ {
PhaseTraceTime timeit(_t_compile); PhaseTraceTime timeit(_t_compile);
_arena = Thread::current()->resource_area(); _arena = Thread::current()->resource_area();
_env->set_compiler_data(this); _env->set_compiler_data(this);
_exception_info_list = new ExceptionInfoList(); _exception_info_list = new ExceptionInfoList();
_implicit_exception_table.set_size(0); _implicit_exception_table.set_size(0);
compile_method(); compile_method();
if (is_profiling() && _would_profile) {
ciMethodData *md = method->method_data();
assert (md != NULL, "Should have MDO");
md->set_would_profile(_would_profile);
}
} }
Compilation::~Compilation() { Compilation::~Compilation() {

View file

@ -69,6 +69,7 @@ class Compilation: public StackObj {
bool _has_exception_handlers; bool _has_exception_handlers;
bool _has_fpu_code; bool _has_fpu_code;
bool _has_unsafe_access; bool _has_unsafe_access;
bool _would_profile;
bool _has_method_handle_invokes; // True if this method has MethodHandle invokes. bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
const char* _bailout_msg; const char* _bailout_msg;
ExceptionInfoList* _exception_info_list; ExceptionInfoList* _exception_info_list;
@ -143,6 +144,7 @@ class Compilation: public StackObj {
void set_has_exception_handlers(bool f) { _has_exception_handlers = f; } void set_has_exception_handlers(bool f) { _has_exception_handlers = f; }
void set_has_fpu_code(bool f) { _has_fpu_code = f; } void set_has_fpu_code(bool f) { _has_fpu_code = f; }
void set_has_unsafe_access(bool f) { _has_unsafe_access = f; } void set_has_unsafe_access(bool f) { _has_unsafe_access = f; }
void set_would_profile(bool f) { _would_profile = f; }
// Add a set of exception handlers covering the given PC offset // Add a set of exception handlers covering the given PC offset
void add_exception_handlers_for_pco(int pco, XHandlers* exception_handlers); void add_exception_handlers_for_pco(int pco, XHandlers* exception_handlers);
// Statistics gathering // Statistics gathering
@ -202,6 +204,30 @@ class Compilation: public StackObj {
void compile_only_this_scope(outputStream* st, IRScope* scope); void compile_only_this_scope(outputStream* st, IRScope* scope);
void exclude_this_method(); void exclude_this_method();
#endif // PRODUCT #endif // PRODUCT
bool is_profiling() {
return env()->comp_level() == CompLevel_full_profile ||
env()->comp_level() == CompLevel_limited_profile;
}
bool count_invocations() { return is_profiling(); }
bool count_backedges() { return is_profiling(); }
// Helpers for generation of profile information
bool profile_branches() {
return env()->comp_level() == CompLevel_full_profile &&
C1UpdateMethodData && C1ProfileBranches;
}
bool profile_calls() {
return env()->comp_level() == CompLevel_full_profile &&
C1UpdateMethodData && C1ProfileCalls;
}
bool profile_inlined_calls() {
return profile_calls() && C1ProfileInlinedCalls;
}
bool profile_checkcasts() {
return env()->comp_level() == CompLevel_full_profile &&
C1UpdateMethodData && C1ProfileCheckcasts;
}
}; };

View file

@ -39,9 +39,7 @@ class Compiler: public AbstractCompiler {
// Name of this compiler // Name of this compiler
virtual const char* name() { return "C1"; } virtual const char* name() { return "C1"; }
#ifdef TIERED
virtual bool is_c1() { return true; }; virtual bool is_c1() { return true; };
#endif // TIERED
BufferBlob* build_buffer_blob(); BufferBlob* build_buffer_blob();

View file

@ -967,6 +967,17 @@ void GraphBuilder::store_indexed(BasicType type) {
StoreIndexed* result = new StoreIndexed(array, index, length, type, value, lock_stack()); StoreIndexed* result = new StoreIndexed(array, index, length, type, value, lock_stack());
append(result); append(result);
_memory->store_value(value); _memory->store_value(value);
if (type == T_OBJECT && is_profiling()) {
// Note that we'd collect profile data in this method if we wanted it.
compilation()->set_would_profile(true);
if (profile_checkcasts()) {
result->set_profiled_method(method());
result->set_profiled_bci(bci());
result->set_should_profile(true);
}
}
} }
@ -1144,8 +1155,16 @@ void GraphBuilder::increment() {
void GraphBuilder::_goto(int from_bci, int to_bci) { void GraphBuilder::_goto(int from_bci, int to_bci) {
profile_bci(from_bci); Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci);
append(new Goto(block_at(to_bci), to_bci <= from_bci)); if (is_profiling()) {
compilation()->set_would_profile(true);
}
if (profile_branches()) {
x->set_profiled_method(method());
x->set_profiled_bci(bci());
x->set_should_profile(true);
}
append(x);
} }
@ -1153,11 +1172,45 @@ void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* sta
BlockBegin* tsux = block_at(stream()->get_dest()); BlockBegin* tsux = block_at(stream()->get_dest());
BlockBegin* fsux = block_at(stream()->next_bci()); BlockBegin* fsux = block_at(stream()->next_bci());
bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci(); bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
If* if_node = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb))->as_If(); Instruction *i = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb));
if (profile_branches() && (if_node != NULL)) {
if_node->set_profiled_method(method()); if (is_profiling()) {
If* if_node = i->as_If();
if (if_node != NULL) {
// Note that we'd collect profile data in this method if we wanted it.
compilation()->set_would_profile(true);
// At level 2 we need the proper bci to count backedges
if_node->set_profiled_bci(bci()); if_node->set_profiled_bci(bci());
if (profile_branches()) {
// Successors can be rotated by the canonicalizer, check for this case.
if_node->set_profiled_method(method());
if_node->set_should_profile(true); if_node->set_should_profile(true);
if (if_node->tsux() == fsux) {
if_node->set_swapped(true);
}
}
return;
}
// Check if this If was reduced to Goto.
Goto *goto_node = i->as_Goto();
if (goto_node != NULL) {
compilation()->set_would_profile(true);
if (profile_branches()) {
goto_node->set_profiled_method(method());
goto_node->set_profiled_bci(bci());
goto_node->set_should_profile(true);
// Find out which successor is used.
if (goto_node->default_sux() == tsux) {
goto_node->set_direction(Goto::taken);
} else if (goto_node->default_sux() == fsux) {
goto_node->set_direction(Goto::not_taken);
} else {
ShouldNotReachHere();
}
}
return;
}
} }
} }
@ -1698,8 +1751,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
if (recv != NULL && if (recv != NULL &&
(code == Bytecodes::_invokespecial || (code == Bytecodes::_invokespecial ||
!is_loaded || target->is_final() || !is_loaded || target->is_final())) {
profile_calls())) {
// invokespecial always needs a NULL check. invokevirtual where // invokespecial always needs a NULL check. invokevirtual where
// the target is final or where it's not known that whether the // the target is final or where it's not known that whether the
// target is final requires a NULL check. Otherwise normal // target is final requires a NULL check. Otherwise normal
@ -1709,6 +1761,13 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
null_check(recv); null_check(recv);
} }
if (is_profiling()) {
if (recv != NULL && profile_calls()) {
null_check(recv);
}
// Note that we'd collect profile data in this method if we wanted it.
compilation()->set_would_profile(true);
if (profile_calls()) { if (profile_calls()) {
assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set"); assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set");
ciKlass* target_klass = NULL; ciKlass* target_klass = NULL;
@ -1719,6 +1778,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
} }
profile_call(recv, target_klass); profile_call(recv, target_klass);
} }
}
Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before); Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before);
// push result // push result
@ -1782,11 +1842,17 @@ void GraphBuilder::check_cast(int klass_index) {
CheckCast* c = new CheckCast(klass, apop(), state_before); CheckCast* c = new CheckCast(klass, apop(), state_before);
apush(append_split(c)); apush(append_split(c));
c->set_direct_compare(direct_compare(klass)); c->set_direct_compare(direct_compare(klass));
if (is_profiling()) {
// Note that we'd collect profile data in this method if we wanted it.
compilation()->set_would_profile(true);
if (profile_checkcasts()) { if (profile_checkcasts()) {
c->set_profiled_method(method()); c->set_profiled_method(method());
c->set_profiled_bci(bci()); c->set_profiled_bci(bci());
c->set_should_profile(true); c->set_should_profile(true);
} }
}
} }
@ -1797,6 +1863,17 @@ void GraphBuilder::instance_of(int klass_index) {
InstanceOf* i = new InstanceOf(klass, apop(), state_before); InstanceOf* i = new InstanceOf(klass, apop(), state_before);
ipush(append_split(i)); ipush(append_split(i));
i->set_direct_compare(direct_compare(klass)); i->set_direct_compare(direct_compare(klass));
if (is_profiling()) {
// Note that we'd collect profile data in this method if we wanted it.
compilation()->set_would_profile(true);
if (profile_checkcasts()) {
i->set_profiled_method(method());
i->set_profiled_bci(bci());
i->set_should_profile(true);
}
}
} }
@ -1868,7 +1945,7 @@ Value GraphBuilder::round_fp(Value fp_value) {
Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) { Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) {
Canonicalizer canon(instr, bci); Canonicalizer canon(compilation(), instr, bci);
Instruction* i1 = canon.canonical(); Instruction* i1 = canon.canonical();
if (i1->bci() != -99) { if (i1->bci() != -99) {
// Canonicalizer returned an instruction which was already // Canonicalizer returned an instruction which was already
@ -2651,18 +2728,6 @@ BlockBegin* GraphBuilder::header_block(BlockBegin* entry, BlockBegin::Flag f, Va
h->set_depth_first_number(0); h->set_depth_first_number(0);
Value l = h; Value l = h;
if (profile_branches()) {
// Increment the invocation count on entry to the method. We
// can't use profile_invocation here because append isn't setup to
// work properly at this point. The instruction have to be
// appended to the instruction stream by hand.
Value m = new Constant(new ObjectConstant(compilation()->method()));
h->set_next(m, 0);
Value p = new ProfileCounter(m, methodOopDesc::interpreter_invocation_counter_offset_in_bytes(), 1);
m->set_next(p, 0);
l = p;
}
BlockEnd* g = new Goto(entry, false); BlockEnd* g = new Goto(entry, false);
l->set_next(g, entry->bci()); l->set_next(g, entry->bci());
h->set_end(g); h->set_end(g);
@ -2688,10 +2753,10 @@ BlockBegin* GraphBuilder::setup_start_block(int osr_bci, BlockBegin* std_entry,
// also necessary when profiling so that there's a single block that // also necessary when profiling so that there's a single block that
// can increment the interpreter_invocation_count. // can increment the interpreter_invocation_count.
BlockBegin* new_header_block; BlockBegin* new_header_block;
if (std_entry->number_of_preds() == 0 && !profile_branches()) { if (std_entry->number_of_preds() > 0 || count_invocations() || count_backedges()) {
new_header_block = std_entry;
} else {
new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state); new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state);
} else {
new_header_block = std_entry;
} }
// setup start block (root for the IR graph) // setup start block (root for the IR graph)
@ -3115,10 +3180,14 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
Values* args = state()->pop_arguments(callee->arg_size()); Values* args = state()->pop_arguments(callee->arg_size());
ValueStack* locks = lock_stack(); ValueStack* locks = lock_stack();
if (profile_calls()) {
if (is_profiling()) {
// Don't profile in the special case where the root method // Don't profile in the special case where the root method
// is the intrinsic // is the intrinsic
if (callee != method()) { if (callee != method()) {
// Note that we'd collect profile data in this method if we wanted it.
compilation()->set_would_profile(true);
if (profile_calls()) {
Value recv = NULL; Value recv = NULL;
if (has_receiver) { if (has_receiver) {
recv = args->at(0); recv = args->at(0);
@ -3127,6 +3196,7 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
profile_call(recv, NULL); profile_call(recv, NULL);
} }
} }
}
Intrinsic* result = new Intrinsic(result_type, id, args, has_receiver, lock_stack(), Intrinsic* result = new Intrinsic(result_type, id, args, has_receiver, lock_stack(),
preserves_state, cantrap); preserves_state, cantrap);
@ -3296,7 +3366,9 @@ void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool
bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) { bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
assert(!callee->is_native(), "callee must not be native"); assert(!callee->is_native(), "callee must not be native");
if (count_backedges() && callee->has_loops()) {
INLINE_BAILOUT("too complex for tiered");
}
// first perform tests of things it's not possible to inline // first perform tests of things it's not possible to inline
if (callee->has_exception_handlers() && if (callee->has_exception_handlers() &&
!InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers"); !InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers");
@ -3365,11 +3437,18 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
null_check(recv); null_check(recv);
} }
if (profile_inlined_calls()) { if (is_profiling()) {
// Note that we'd collect profile data in this method if we wanted it.
// this may be redundant here...
compilation()->set_would_profile(true);
if (profile_calls()) {
profile_call(recv, holder_known ? callee->holder() : NULL); profile_call(recv, holder_known ? callee->holder() : NULL);
} }
if (profile_inlined_calls()) {
profile_invocation(callee); profile_invocation(callee, state(), 0);
}
}
// Introduce a new callee continuation point - if the callee has // Introduce a new callee continuation point - if the callee has
// more than one return instruction or the return does not allow // more than one return instruction or the return does not allow
@ -3755,30 +3834,10 @@ void GraphBuilder::print_stats() {
} }
#endif // PRODUCT #endif // PRODUCT
void GraphBuilder::profile_call(Value recv, ciKlass* known_holder) { void GraphBuilder::profile_call(Value recv, ciKlass* known_holder) {
append(new ProfileCall(method(), bci(), recv, known_holder)); append(new ProfileCall(method(), bci(), recv, known_holder));
} }
void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state, int bci) {
void GraphBuilder::profile_invocation(ciMethod* callee) { append(new ProfileInvoke(callee, state, bci));
if (profile_calls()) {
// increment the interpreter_invocation_count for the inlinee
Value m = append(new Constant(new ObjectConstant(callee)));
append(new ProfileCounter(m, methodOopDesc::interpreter_invocation_counter_offset_in_bytes(), 1));
}
}
void GraphBuilder::profile_bci(int bci) {
if (profile_branches()) {
ciMethodData* md = method()->method_data();
if (md == NULL) {
BAILOUT("out of memory building methodDataOop");
}
ciProfileData* data = md->bci_to_data(bci);
assert(data != NULL && data->is_JumpData(), "need JumpData for goto");
Value mdo = append(new Constant(new ObjectConstant(md)));
append(new ProfileCounter(mdo, md->byte_offset_of_slot(data, JumpData::taken_offset()), 1));
}
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -342,27 +342,17 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
NOT_PRODUCT(void print_inline_result(ciMethod* callee, bool res);) NOT_PRODUCT(void print_inline_result(ciMethod* callee, bool res);)
// methodDataOop profiling helpers
void profile_call(Value recv, ciKlass* predicted_holder); void profile_call(Value recv, ciKlass* predicted_holder);
void profile_invocation(ciMethod* method); void profile_invocation(ciMethod* inlinee, ValueStack* state, int bci);
void profile_bci(int bci);
// Helpers for generation of profile information // Shortcuts to profiling control.
bool profile_branches() { bool is_profiling() { return _compilation->is_profiling(); }
return _compilation->env()->comp_level() == CompLevel_fast_compile && bool count_invocations() { return _compilation->count_invocations(); }
Tier1UpdateMethodData && Tier1ProfileBranches; bool count_backedges() { return _compilation->count_backedges(); }
} bool profile_branches() { return _compilation->profile_branches(); }
bool profile_calls() { bool profile_calls() { return _compilation->profile_calls(); }
return _compilation->env()->comp_level() == CompLevel_fast_compile && bool profile_inlined_calls() { return _compilation->profile_inlined_calls(); }
Tier1UpdateMethodData && Tier1ProfileCalls; bool profile_checkcasts() { return _compilation->profile_checkcasts(); }
}
bool profile_inlined_calls() {
return profile_calls() && Tier1ProfileInlinedCalls;
}
bool profile_checkcasts() {
return _compilation->env()->comp_level() == CompLevel_fast_compile &&
Tier1UpdateMethodData && Tier1ProfileCheckcasts;
}
public: public:
NOT_PRODUCT(void print_stats();) NOT_PRODUCT(void print_stats();)

View file

@ -296,6 +296,7 @@ IR::IR(Compilation* compilation, ciMethod* method, int osr_bci) :
void IR::optimize() { void IR::optimize() {
Optimizer opt(this); Optimizer opt(this);
if (!compilation()->profile_branches()) {
if (DoCEE) { if (DoCEE) {
opt.eliminate_conditional_expressions(); opt.eliminate_conditional_expressions();
#ifndef PRODUCT #ifndef PRODUCT
@ -310,6 +311,7 @@ void IR::optimize() {
if (PrintIR || PrintIR1 ) { tty->print_cr("IR after block elimination"); print(false); } if (PrintIR || PrintIR1 ) { tty->print_cr("IR after block elimination"); print(false); }
#endif #endif
} }
}
if (EliminateNullChecks) { if (EliminateNullChecks) {
opt.eliminate_null_checks(); opt.eliminate_null_checks();
#ifndef PRODUCT #ifndef PRODUCT
@ -484,6 +486,8 @@ class ComputeLinearScanOrder : public StackObj {
BitMap2D _loop_map; // two-dimensional bit set: a bit is set if a block is contained in a loop BitMap2D _loop_map; // two-dimensional bit set: a bit is set if a block is contained in a loop
BlockList _work_list; // temporary list (used in mark_loops and compute_order) BlockList _work_list; // temporary list (used in mark_loops and compute_order)
Compilation* _compilation;
// accessors for _visited_blocks and _active_blocks // accessors for _visited_blocks and _active_blocks
void init_visited() { _active_blocks.clear(); _visited_blocks.clear(); } void init_visited() { _active_blocks.clear(); _visited_blocks.clear(); }
bool is_visited(BlockBegin* b) const { return _visited_blocks.at(b->block_id()); } bool is_visited(BlockBegin* b) const { return _visited_blocks.at(b->block_id()); }
@ -526,8 +530,9 @@ class ComputeLinearScanOrder : public StackObj {
NOT_PRODUCT(void print_blocks();) NOT_PRODUCT(void print_blocks();)
DEBUG_ONLY(void verify();) DEBUG_ONLY(void verify();)
Compilation* compilation() const { return _compilation; }
public: public:
ComputeLinearScanOrder(BlockBegin* start_block); ComputeLinearScanOrder(Compilation* c, BlockBegin* start_block);
// accessors for final result // accessors for final result
BlockList* linear_scan_order() const { return _linear_scan_order; } BlockList* linear_scan_order() const { return _linear_scan_order; }
@ -535,7 +540,7 @@ class ComputeLinearScanOrder : public StackObj {
}; };
ComputeLinearScanOrder::ComputeLinearScanOrder(BlockBegin* start_block) : ComputeLinearScanOrder::ComputeLinearScanOrder(Compilation* c, BlockBegin* start_block) :
_max_block_id(BlockBegin::number_of_blocks()), _max_block_id(BlockBegin::number_of_blocks()),
_num_blocks(0), _num_blocks(0),
_num_loops(0), _num_loops(0),
@ -547,13 +552,18 @@ ComputeLinearScanOrder::ComputeLinearScanOrder(BlockBegin* start_block) :
_loop_end_blocks(8), _loop_end_blocks(8),
_work_list(8), _work_list(8),
_linear_scan_order(NULL), // initialized later with correct size _linear_scan_order(NULL), // initialized later with correct size
_loop_map(0, 0) // initialized later with correct size _loop_map(0, 0), // initialized later with correct size
_compilation(c)
{ {
TRACE_LINEAR_SCAN(2, "***** computing linear-scan block order"); TRACE_LINEAR_SCAN(2, "***** computing linear-scan block order");
init_visited(); init_visited();
count_edges(start_block, NULL); count_edges(start_block, NULL);
if (compilation()->is_profiling()) {
compilation()->method()->method_data()->set_compilation_stats(_num_loops, _num_blocks);
}
if (_num_loops > 0) { if (_num_loops > 0) {
mark_loops(); mark_loops();
clear_non_natural_loops(start_block); clear_non_natural_loops(start_block);
@ -1130,7 +1140,7 @@ void ComputeLinearScanOrder::verify() {
void IR::compute_code() { void IR::compute_code() {
assert(is_valid(), "IR must be valid"); assert(is_valid(), "IR must be valid");
ComputeLinearScanOrder compute_order(start()); ComputeLinearScanOrder compute_order(compilation(), start());
_num_loops = compute_order.num_loops(); _num_loops = compute_order.num_loops();
_code = compute_order.linear_scan_order(); _code = compute_order.linear_scan_order();
} }

View file

@ -1011,3 +1011,7 @@ int Phi::operand_count() const {
void Throw::state_values_do(ValueVisitor* f) { void Throw::state_values_do(ValueVisitor* f) {
BlockEnd::state_values_do(f); BlockEnd::state_values_do(f);
} }
void ProfileInvoke::state_values_do(ValueVisitor* f) {
if (state() != NULL) state()->values_do(f);
}

View file

@ -98,7 +98,7 @@ class UnsafePrefetch;
class UnsafePrefetchRead; class UnsafePrefetchRead;
class UnsafePrefetchWrite; class UnsafePrefetchWrite;
class ProfileCall; class ProfileCall;
class ProfileCounter; class ProfileInvoke;
// A Value is a reference to the instruction creating the value // A Value is a reference to the instruction creating the value
typedef Instruction* Value; typedef Instruction* Value;
@ -195,7 +195,7 @@ class InstructionVisitor: public StackObj {
virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x) = 0; virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x) = 0;
virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) = 0; virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) = 0;
virtual void do_ProfileCall (ProfileCall* x) = 0; virtual void do_ProfileCall (ProfileCall* x) = 0;
virtual void do_ProfileCounter (ProfileCounter* x) = 0; virtual void do_ProfileInvoke (ProfileInvoke* x) = 0;
}; };
@ -906,11 +906,13 @@ LEAF(StoreIndexed, AccessIndexed)
private: private:
Value _value; Value _value;
ciMethod* _profiled_method;
int _profiled_bci;
public: public:
// creation // creation
StoreIndexed(Value array, Value index, Value length, BasicType elt_type, Value value, ValueStack* lock_stack) StoreIndexed(Value array, Value index, Value length, BasicType elt_type, Value value, ValueStack* lock_stack)
: AccessIndexed(array, index, length, elt_type, lock_stack) : AccessIndexed(array, index, length, elt_type, lock_stack)
, _value(value) , _value(value), _profiled_method(NULL), _profiled_bci(0)
{ {
set_flag(NeedsWriteBarrierFlag, (as_ValueType(elt_type)->is_object())); set_flag(NeedsWriteBarrierFlag, (as_ValueType(elt_type)->is_object()));
set_flag(NeedsStoreCheckFlag, (as_ValueType(elt_type)->is_object())); set_flag(NeedsStoreCheckFlag, (as_ValueType(elt_type)->is_object()));
@ -923,7 +925,13 @@ LEAF(StoreIndexed, AccessIndexed)
IRScope* scope() const; // the state's scope IRScope* scope() const; // the state's scope
bool needs_write_barrier() const { return check_flag(NeedsWriteBarrierFlag); } bool needs_write_barrier() const { return check_flag(NeedsWriteBarrierFlag); }
bool needs_store_check() const { return check_flag(NeedsStoreCheckFlag); } bool needs_store_check() const { return check_flag(NeedsStoreCheckFlag); }
// Helpers for methodDataOop profiling
void set_should_profile(bool value) { set_flag(ProfileMDOFlag, value); }
void set_profiled_method(ciMethod* method) { _profiled_method = method; }
void set_profiled_bci(int bci) { _profiled_bci = bci; }
bool should_profile() const { return check_flag(ProfileMDOFlag); }
ciMethod* profiled_method() const { return _profiled_method; }
int profiled_bci() const { return _profiled_bci; }
// generic // generic
virtual void input_values_do(ValueVisitor* f) { AccessIndexed::input_values_do(f); f->visit(&_value); } virtual void input_values_do(ValueVisitor* f) { AccessIndexed::input_values_do(f); f->visit(&_value); }
}; };
@ -1297,9 +1305,14 @@ BASE(TypeCheck, StateSplit)
Value _obj; Value _obj;
ValueStack* _state_before; ValueStack* _state_before;
ciMethod* _profiled_method;
int _profiled_bci;
public: public:
// creation // creation
TypeCheck(ciKlass* klass, Value obj, ValueType* type, ValueStack* state_before) : StateSplit(type), _klass(klass), _obj(obj), _state_before(state_before) { TypeCheck(ciKlass* klass, Value obj, ValueType* type, ValueStack* state_before)
: StateSplit(type), _klass(klass), _obj(obj), _state_before(state_before),
_profiled_method(NULL), _profiled_bci(0) {
ASSERT_VALUES ASSERT_VALUES
set_direct_compare(false); set_direct_compare(false);
} }
@ -1318,27 +1331,6 @@ BASE(TypeCheck, StateSplit)
virtual bool can_trap() const { return true; } virtual bool can_trap() const { return true; }
virtual void input_values_do(ValueVisitor* f) { StateSplit::input_values_do(f); f->visit(&_obj); } virtual void input_values_do(ValueVisitor* f) { StateSplit::input_values_do(f); f->visit(&_obj); }
virtual void other_values_do(ValueVisitor* f); virtual void other_values_do(ValueVisitor* f);
};
LEAF(CheckCast, TypeCheck)
private:
ciMethod* _profiled_method;
int _profiled_bci;
public:
// creation
CheckCast(ciKlass* klass, Value obj, ValueStack* state_before)
: TypeCheck(klass, obj, objectType, state_before)
, _profiled_method(NULL)
, _profiled_bci(0) {}
void set_incompatible_class_change_check() {
set_flag(ThrowIncompatibleClassChangeErrorFlag, true);
}
bool is_incompatible_class_change_check() const {
return check_flag(ThrowIncompatibleClassChangeErrorFlag);
}
// Helpers for methodDataOop profiling // Helpers for methodDataOop profiling
void set_should_profile(bool value) { set_flag(ProfileMDOFlag, value); } void set_should_profile(bool value) { set_flag(ProfileMDOFlag, value); }
@ -1347,10 +1339,24 @@ LEAF(CheckCast, TypeCheck)
bool should_profile() const { return check_flag(ProfileMDOFlag); } bool should_profile() const { return check_flag(ProfileMDOFlag); }
ciMethod* profiled_method() const { return _profiled_method; } ciMethod* profiled_method() const { return _profiled_method; }
int profiled_bci() const { return _profiled_bci; } int profiled_bci() const { return _profiled_bci; }
};
LEAF(CheckCast, TypeCheck)
public:
// creation
CheckCast(ciKlass* klass, Value obj, ValueStack* state_before)
: TypeCheck(klass, obj, objectType, state_before) {}
void set_incompatible_class_change_check() {
set_flag(ThrowIncompatibleClassChangeErrorFlag, true);
}
bool is_incompatible_class_change_check() const {
return check_flag(ThrowIncompatibleClassChangeErrorFlag);
}
ciType* declared_type() const; ciType* declared_type() const;
ciType* exact_type() const; ciType* exact_type() const;
}; };
@ -1733,20 +1739,45 @@ BASE(BlockEnd, StateSplit)
LEAF(Goto, BlockEnd) LEAF(Goto, BlockEnd)
public:
enum Direction {
none, // Just a regular goto
taken, not_taken // Goto produced from If
};
private:
ciMethod* _profiled_method;
int _profiled_bci;
Direction _direction;
public: public:
// creation // creation
Goto(BlockBegin* sux, ValueStack* state_before, bool is_safepoint = false) : BlockEnd(illegalType, state_before, is_safepoint) { Goto(BlockBegin* sux, ValueStack* state_before, bool is_safepoint = false)
: BlockEnd(illegalType, state_before, is_safepoint)
, _direction(none)
, _profiled_method(NULL)
, _profiled_bci(0) {
BlockList* s = new BlockList(1); BlockList* s = new BlockList(1);
s->append(sux); s->append(sux);
set_sux(s); set_sux(s);
} }
Goto(BlockBegin* sux, bool is_safepoint) : BlockEnd(illegalType, NULL, is_safepoint) { Goto(BlockBegin* sux, bool is_safepoint) : BlockEnd(illegalType, NULL, is_safepoint)
, _direction(none)
, _profiled_method(NULL)
, _profiled_bci(0) {
BlockList* s = new BlockList(1); BlockList* s = new BlockList(1);
s->append(sux); s->append(sux);
set_sux(s); set_sux(s);
} }
bool should_profile() const { return check_flag(ProfileMDOFlag); }
ciMethod* profiled_method() const { return _profiled_method; } // set only for profiled branches
int profiled_bci() const { return _profiled_bci; }
Direction direction() const { return _direction; }
void set_should_profile(bool value) { set_flag(ProfileMDOFlag, value); }
void set_profiled_method(ciMethod* method) { _profiled_method = method; }
void set_profiled_bci(int bci) { _profiled_bci = bci; }
void set_direction(Direction d) { _direction = d; }
}; };
@ -1757,6 +1788,8 @@ LEAF(If, BlockEnd)
Value _y; Value _y;
ciMethod* _profiled_method; ciMethod* _profiled_method;
int _profiled_bci; // Canonicalizer may alter bci of If node int _profiled_bci; // Canonicalizer may alter bci of If node
bool _swapped; // Is the order reversed with respect to the original If in the
// bytecode stream?
public: public:
// creation // creation
// unordered_is_true is valid for float/double compares only // unordered_is_true is valid for float/double compares only
@ -1767,6 +1800,7 @@ LEAF(If, BlockEnd)
, _y(y) , _y(y)
, _profiled_method(NULL) , _profiled_method(NULL)
, _profiled_bci(0) , _profiled_bci(0)
, _swapped(false)
{ {
ASSERT_VALUES ASSERT_VALUES
set_flag(UnorderedIsTrueFlag, unordered_is_true); set_flag(UnorderedIsTrueFlag, unordered_is_true);
@ -1788,7 +1822,8 @@ LEAF(If, BlockEnd)
BlockBegin* usux() const { return sux_for(unordered_is_true()); } BlockBegin* usux() const { return sux_for(unordered_is_true()); }
bool should_profile() const { return check_flag(ProfileMDOFlag); } bool should_profile() const { return check_flag(ProfileMDOFlag); }
ciMethod* profiled_method() const { return _profiled_method; } // set only for profiled branches ciMethod* profiled_method() const { return _profiled_method; } // set only for profiled branches
int profiled_bci() const { return _profiled_bci; } // set only for profiled branches int profiled_bci() const { return _profiled_bci; } // set for profiled branches and tiered
bool is_swapped() const { return _swapped; }
// manipulation // manipulation
void swap_operands() { void swap_operands() {
@ -1807,7 +1842,7 @@ LEAF(If, BlockEnd)
void set_should_profile(bool value) { set_flag(ProfileMDOFlag, value); } void set_should_profile(bool value) { set_flag(ProfileMDOFlag, value); }
void set_profiled_method(ciMethod* method) { _profiled_method = method; } void set_profiled_method(ciMethod* method) { _profiled_method = method; }
void set_profiled_bci(int bci) { _profiled_bci = bci; } void set_profiled_bci(int bci) { _profiled_bci = bci; }
void set_swapped(bool value) { _swapped = value; }
// generic // generic
virtual void input_values_do(ValueVisitor* f) { BlockEnd::input_values_do(f); f->visit(&_x); f->visit(&_y); } virtual void input_values_do(ValueVisitor* f) { BlockEnd::input_values_do(f); f->visit(&_x); f->visit(&_y); }
}; };
@ -2235,7 +2270,6 @@ LEAF(UnsafePrefetchWrite, UnsafePrefetch)
} }
}; };
LEAF(ProfileCall, Instruction) LEAF(ProfileCall, Instruction)
private: private:
ciMethod* _method; ciMethod* _method;
@ -2263,35 +2297,32 @@ LEAF(ProfileCall, Instruction)
virtual void input_values_do(ValueVisitor* f) { if (_recv != NULL) f->visit(&_recv); } virtual void input_values_do(ValueVisitor* f) { if (_recv != NULL) f->visit(&_recv); }
}; };
// Use to trip invocation counter of an inlined method
// LEAF(ProfileInvoke, Instruction)
// Simple node representing a counter update generally used for updating MDOs
//
LEAF(ProfileCounter, Instruction)
private: private:
Value _mdo; ciMethod* _inlinee;
int _offset; ValueStack* _state;
int _increment; int _bci_of_invoke;
public: public:
ProfileCounter(Value mdo, int offset, int increment = 1) ProfileInvoke(ciMethod* inlinee, ValueStack* state, int bci)
: Instruction(voidType) : Instruction(voidType)
, _mdo(mdo) , _inlinee(inlinee)
, _offset(offset) , _bci_of_invoke(bci)
, _increment(increment) , _state(state)
{ {
// The ProfileCounter has side-effects and must occur precisely where located // The ProfileInvoke has side-effects and must occur precisely where located QQQ???
pin(); pin();
} }
Value mdo() { return _mdo; } ciMethod* inlinee() { return _inlinee; }
int offset() { return _offset; } ValueStack* state() { return _state; }
int increment() { return _increment; } int bci_of_invoke() { return _bci_of_invoke; }
virtual void input_values_do(ValueVisitor*) {}
virtual void input_values_do(ValueVisitor* f) { f->visit(&_mdo); } virtual void state_values_do(ValueVisitor*);
}; };
class BlockPair: public CompilationResourceObj { class BlockPair: public CompilationResourceObj {
private: private:
BlockBegin* _from; BlockBegin* _from;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -819,7 +819,6 @@ void InstructionPrinter::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
output()->put(')'); output()->put(')');
} }
void InstructionPrinter::do_ProfileCall(ProfileCall* x) { void InstructionPrinter::do_ProfileCall(ProfileCall* x) {
output()->print("profile "); output()->print("profile ");
print_value(x->recv()); print_value(x->recv());
@ -831,20 +830,11 @@ void InstructionPrinter::do_ProfileCall(ProfileCall* x) {
output()->put(')'); output()->put(')');
} }
void InstructionPrinter::do_ProfileInvoke(ProfileInvoke* x) {
output()->print("profile_invoke ");
output()->print(" %s.%s", x->inlinee()->holder()->name()->as_utf8(), x->inlinee()->name()->as_utf8());
output()->put(')');
void InstructionPrinter::do_ProfileCounter(ProfileCounter* x) {
ObjectConstant* oc = x->mdo()->type()->as_ObjectConstant();
if (oc != NULL && oc->value()->is_method() &&
x->offset() == methodOopDesc::interpreter_invocation_counter_offset_in_bytes()) {
print_value(x->mdo());
output()->print(".interpreter_invocation_count += %d", x->increment());
} else {
output()->print("counter [");
print_value(x->mdo());
output()->print(" + %d] += %d", x->offset(), x->increment());
}
} }
#endif // PRODUCT #endif // PRODUCT

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -123,6 +123,6 @@ class InstructionPrinter: public InstructionVisitor {
virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x); virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x);
virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x); virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
virtual void do_ProfileCall (ProfileCall* x); virtual void do_ProfileCall (ProfileCall* x);
virtual void do_ProfileCounter (ProfileCounter* x); virtual void do_ProfileInvoke (ProfileInvoke* x);
}; };
#endif // PRODUCT #endif // PRODUCT

View file

@ -345,9 +345,8 @@ void LIR_OpBranch::negate_cond() {
LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3,
bool fast_check, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, bool fast_check, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch,
CodeStub* stub, CodeStub* stub)
ciMethod* profiled_method,
int profiled_bci)
: LIR_Op(code, result, NULL) : LIR_Op(code, result, NULL)
, _object(object) , _object(object)
, _array(LIR_OprFact::illegalOpr) , _array(LIR_OprFact::illegalOpr)
@ -359,8 +358,10 @@ LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object,
, _stub(stub) , _stub(stub)
, _info_for_patch(info_for_patch) , _info_for_patch(info_for_patch)
, _info_for_exception(info_for_exception) , _info_for_exception(info_for_exception)
, _profiled_method(profiled_method) , _profiled_method(NULL)
, _profiled_bci(profiled_bci) { , _profiled_bci(-1)
, _should_profile(false)
{
if (code == lir_checkcast) { if (code == lir_checkcast) {
assert(info_for_exception != NULL, "checkcast throws exceptions"); assert(info_for_exception != NULL, "checkcast throws exceptions");
} else if (code == lir_instanceof) { } else if (code == lir_instanceof) {
@ -372,7 +373,7 @@ LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object,
LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci) LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception)
: LIR_Op(code, LIR_OprFact::illegalOpr, NULL) : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)
, _object(object) , _object(object)
, _array(array) , _array(array)
@ -384,8 +385,10 @@ LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array, L
, _stub(NULL) , _stub(NULL)
, _info_for_patch(NULL) , _info_for_patch(NULL)
, _info_for_exception(info_for_exception) , _info_for_exception(info_for_exception)
, _profiled_method(profiled_method) , _profiled_method(NULL)
, _profiled_bci(profiled_bci) { , _profiled_bci(-1)
, _should_profile(false)
{
if (code == lir_store_check) { if (code == lir_store_check) {
_stub = new ArrayStoreExceptionStub(info_for_exception); _stub = new ArrayStoreExceptionStub(info_for_exception);
assert(info_for_exception != NULL, "store_check throws exceptions"); assert(info_for_exception != NULL, "store_check throws exceptions");
@ -495,6 +498,8 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
case lir_monaddr: // input and result always valid, info always invalid case lir_monaddr: // input and result always valid, info always invalid
case lir_null_check: // input and info always valid, result always invalid case lir_null_check: // input and info always valid, result always invalid
case lir_move: // input and result always valid, may have info case lir_move: // input and result always valid, may have info
case lir_pack64: // input and result always valid
case lir_unpack64: // input and result always valid
case lir_prefetchr: // input always valid, result and info always invalid case lir_prefetchr: // input always valid, result and info always invalid
case lir_prefetchw: // input always valid, result and info always invalid case lir_prefetchw: // input always valid, result and info always invalid
{ {
@ -903,7 +908,6 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
assert(opProfileCall->_tmp1->is_valid(), "used"); do_temp(opProfileCall->_tmp1); assert(opProfileCall->_tmp1->is_valid(), "used"); do_temp(opProfileCall->_tmp1);
break; break;
} }
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
} }
@ -1041,12 +1045,10 @@ void LIR_OpDelay::emit_code(LIR_Assembler* masm) {
masm->emit_delay(this); masm->emit_delay(this);
} }
void LIR_OpProfileCall::emit_code(LIR_Assembler* masm) { void LIR_OpProfileCall::emit_code(LIR_Assembler* masm) {
masm->emit_profile_call(this); masm->emit_profile_call(this);
} }
// LIR_List // LIR_List
LIR_List::LIR_List(Compilation* compilation, BlockBegin* block) LIR_List::LIR_List(Compilation* compilation, BlockBegin* block)
: _operations(8) : _operations(8)
@ -1364,19 +1366,29 @@ void LIR_List::checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
ciMethod* profiled_method, int profiled_bci) { ciMethod* profiled_method, int profiled_bci) {
append(new LIR_OpTypeCheck(lir_checkcast, result, object, klass, LIR_OpTypeCheck* c = new LIR_OpTypeCheck(lir_checkcast, result, object, klass,
tmp1, tmp2, tmp3, fast_check, info_for_exception, info_for_patch, stub, tmp1, tmp2, tmp3, fast_check, info_for_exception, info_for_patch, stub);
profiled_method, profiled_bci)); if (profiled_method != NULL) {
c->set_profiled_method(profiled_method);
c->set_profiled_bci(profiled_bci);
c->set_should_profile(true);
}
append(c);
} }
void LIR_List::instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci) {
void LIR_List::instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch) { LIR_OpTypeCheck* c = new LIR_OpTypeCheck(lir_instanceof, result, object, klass, tmp1, tmp2, tmp3, fast_check, NULL, info_for_patch, NULL);
append(new LIR_OpTypeCheck(lir_instanceof, result, object, klass, tmp1, tmp2, tmp3, fast_check, NULL, info_for_patch, NULL, NULL, 0)); if (profiled_method != NULL) {
c->set_profiled_method(profiled_method);
c->set_profiled_bci(profiled_bci);
c->set_should_profile(true);
}
append(c);
} }
void LIR_List::store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception) { void LIR_List::store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception) {
append(new LIR_OpTypeCheck(lir_store_check, object, array, tmp1, tmp2, tmp3, info_for_exception, NULL, 0)); append(new LIR_OpTypeCheck(lir_store_check, object, array, tmp1, tmp2, tmp3, info_for_exception));
} }
@ -1611,6 +1623,8 @@ const char * LIR_Op::name() const {
case lir_convert: s = "convert"; break; case lir_convert: s = "convert"; break;
case lir_alloc_object: s = "alloc_obj"; break; case lir_alloc_object: s = "alloc_obj"; break;
case lir_monaddr: s = "mon_addr"; break; case lir_monaddr: s = "mon_addr"; break;
case lir_pack64: s = "pack64"; break;
case lir_unpack64: s = "unpack64"; break;
// LIR_Op2 // LIR_Op2
case lir_cmp: s = "cmp"; break; case lir_cmp: s = "cmp"; break;
case lir_cmp_l2i: s = "cmp_l2i"; break; case lir_cmp_l2i: s = "cmp_l2i"; break;
@ -1664,7 +1678,6 @@ const char * LIR_Op::name() const {
case lir_cas_int: s = "cas_int"; break; case lir_cas_int: s = "cas_int"; break;
// LIR_OpProfileCall // LIR_OpProfileCall
case lir_profile_call: s = "profile_call"; break; case lir_profile_call: s = "profile_call"; break;
case lir_none: ShouldNotReachHere();break; case lir_none: ShouldNotReachHere();break;
default: s = "illegal_op"; break; default: s = "illegal_op"; break;
} }
@ -1922,7 +1935,6 @@ void LIR_OpProfileCall::print_instr(outputStream* out) const {
tmp1()->print(out); out->print(" "); tmp1()->print(out); out->print(" ");
} }
#endif // PRODUCT #endif // PRODUCT
// Implementation of LIR_InsertionBuffer // Implementation of LIR_InsertionBuffer

View file

@ -849,6 +849,8 @@ enum LIR_Code {
, lir_monaddr , lir_monaddr
, lir_roundfp , lir_roundfp
, lir_safepoint , lir_safepoint
, lir_pack64
, lir_unpack64
, lir_unwind , lir_unwind
, end_op1 , end_op1
, begin_op2 , begin_op2
@ -1464,18 +1466,16 @@ class LIR_OpTypeCheck: public LIR_Op {
CodeEmitInfo* _info_for_patch; CodeEmitInfo* _info_for_patch;
CodeEmitInfo* _info_for_exception; CodeEmitInfo* _info_for_exception;
CodeStub* _stub; CodeStub* _stub;
// Helpers for Tier1UpdateMethodData
ciMethod* _profiled_method; ciMethod* _profiled_method;
int _profiled_bci; int _profiled_bci;
bool _should_profile;
public: public:
LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub);
ciMethod* profiled_method, int profiled_bci);
LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array, LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
ciMethod* profiled_method, int profiled_bci);
LIR_Opr object() const { return _object; } LIR_Opr object() const { return _object; }
LIR_Opr array() const { assert(code() == lir_store_check, "not valid"); return _array; } LIR_Opr array() const { assert(code() == lir_store_check, "not valid"); return _array; }
@ -1489,8 +1489,12 @@ public:
CodeStub* stub() const { return _stub; } CodeStub* stub() const { return _stub; }
// methodDataOop profiling // methodDataOop profiling
ciMethod* profiled_method() { return _profiled_method; } void set_profiled_method(ciMethod *method) { _profiled_method = method; }
int profiled_bci() { return _profiled_bci; } void set_profiled_bci(int bci) { _profiled_bci = bci; }
void set_should_profile(bool b) { _should_profile = b; }
ciMethod* profiled_method() const { return _profiled_method; }
int profiled_bci() const { return _profiled_bci; }
bool should_profile() const { return _should_profile; }
virtual void emit_code(LIR_Assembler* masm); virtual void emit_code(LIR_Assembler* masm);
virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; } virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
@ -1771,7 +1775,6 @@ class LIR_OpProfileCall : public LIR_Op {
virtual void print_instr(outputStream* out) const PRODUCT_RETURN; virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
}; };
class LIR_InsertionBuffer; class LIR_InsertionBuffer;
//--------------------------------LIR_List--------------------------------------------------- //--------------------------------LIR_List---------------------------------------------------
@ -1835,6 +1838,7 @@ class LIR_List: public CompilationResourceObj {
//---------- mutators --------------- //---------- mutators ---------------
void insert_before(int i, LIR_List* op_list) { _operations.insert_before(i, op_list->instructions_list()); } void insert_before(int i, LIR_List* op_list) { _operations.insert_before(i, op_list->instructions_list()); }
void insert_before(int i, LIR_Op* op) { _operations.insert_before(i, op); } void insert_before(int i, LIR_Op* op) { _operations.insert_before(i, op); }
void remove_at(int i) { _operations.remove_at(i); }
//---------- printing ------------- //---------- printing -------------
void print_instructions() PRODUCT_RETURN; void print_instructions() PRODUCT_RETURN;
@ -1908,6 +1912,9 @@ class LIR_List: public CompilationResourceObj {
void logical_or (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or, left, right, dst)); } void logical_or (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or, left, right, dst)); }
void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor, left, right, dst)); } void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor, left, right, dst)); }
void pack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_pack64, src, dst, T_LONG, lir_patch_none, NULL)); }
void unpack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_unpack64, src, dst, T_LONG, lir_patch_none, NULL)); }
void null_check(LIR_Opr opr, CodeEmitInfo* info) { append(new LIR_Op1(lir_null_check, opr, info)); } void null_check(LIR_Opr opr, CodeEmitInfo* info) { append(new LIR_Op1(lir_null_check, opr, info)); }
void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
@ -2034,15 +2041,17 @@ class LIR_List: public CompilationResourceObj {
void fpop_raw() { append(new LIR_Op0(lir_fpop_raw)); } void fpop_raw() { append(new LIR_Op0(lir_fpop_raw)); }
void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci);
void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass, void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
ciMethod* profiled_method, int profiled_bci); ciMethod* profiled_method, int profiled_bci);
void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch);
void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
// methodDataOop profiling // methodDataOop profiling
void profile_call(ciMethod* method, int bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) { append(new LIR_OpProfileCall(lir_profile_call, method, bci, mdo, recv, t1, cha_klass)); } void profile_call(ciMethod* method, int bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
append(new LIR_OpProfileCall(lir_profile_call, method, bci, mdo, recv, t1, cha_klass));
}
}; };
void print_LIR(BlockList* blocks); void print_LIR(BlockList* blocks);

View file

@ -548,6 +548,16 @@ void LIR_Assembler::emit_op1(LIR_Op1* op) {
monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr()); monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
break; break;
#ifdef SPARC
case lir_pack64:
pack64(op->in_opr(), op->result_opr());
break;
case lir_unpack64:
unpack64(op->in_opr(), op->result_opr());
break;
#endif
case lir_unwind: case lir_unwind:
unwind_op(op->in_opr()); unwind_op(op->in_opr());
break; break;

View file

@ -187,6 +187,7 @@ class LIR_Assembler: public CompilationResourceObj {
void emit_alloc_obj(LIR_OpAllocObj* op); void emit_alloc_obj(LIR_OpAllocObj* op);
void emit_alloc_array(LIR_OpAllocArray* op); void emit_alloc_array(LIR_OpAllocArray* op);
void emit_opTypeCheck(LIR_OpTypeCheck* op); void emit_opTypeCheck(LIR_OpTypeCheck* op);
void emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null);
void emit_compare_and_swap(LIR_OpCompareAndSwap* op); void emit_compare_and_swap(LIR_OpCompareAndSwap* op);
void emit_lock(LIR_OpLock* op); void emit_lock(LIR_OpLock* op);
void emit_call(LIR_OpJavaCall* op); void emit_call(LIR_OpJavaCall* op);

View file

@ -480,16 +480,6 @@ void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result
} }
// increment a counter returning the incremented value
LIR_Opr LIRGenerator::increment_and_return_counter(LIR_Opr base, int offset, int increment) {
LIR_Address* counter = new LIR_Address(base, offset, T_INT);
LIR_Opr result = new_register(T_INT);
__ load(counter, result);
__ add(result, LIR_OprFact::intConst(increment), result);
__ store(result, counter);
return result;
}
void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) { void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
LIR_Opr result_op = result; LIR_Opr result_op = result;
@ -821,7 +811,6 @@ LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
return tmp; return tmp;
} }
void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) { void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
if (if_instr->should_profile()) { if (if_instr->should_profile()) {
ciMethod* method = if_instr->profiled_method(); ciMethod* method = if_instr->profiled_method();
@ -836,24 +825,32 @@ void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
assert(data->is_BranchData(), "need BranchData for two-way branches"); assert(data->is_BranchData(), "need BranchData for two-way branches");
int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset()); int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset()); int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
if (if_instr->is_swapped()) {
int t = taken_count_offset;
taken_count_offset = not_taken_count_offset;
not_taken_count_offset = t;
}
LIR_Opr md_reg = new_register(T_OBJECT); LIR_Opr md_reg = new_register(T_OBJECT);
__ move(LIR_OprFact::oopConst(md->constant_encoding()), md_reg); __ oop2reg(md->constant_encoding(), md_reg);
LIR_Opr data_offset_reg = new_register(T_INT);
LIR_Opr data_offset_reg = new_pointer_register();
__ cmove(lir_cond(cond), __ cmove(lir_cond(cond),
LIR_OprFact::intConst(taken_count_offset), LIR_OprFact::intptrConst(taken_count_offset),
LIR_OprFact::intConst(not_taken_count_offset), LIR_OprFact::intptrConst(not_taken_count_offset),
data_offset_reg); data_offset_reg);
LIR_Opr data_reg = new_register(T_INT);
LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, T_INT); // MDO cells are intptr_t, so the data_reg width is arch-dependent.
LIR_Opr data_reg = new_pointer_register();
LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
__ move(LIR_OprFact::address(data_addr), data_reg); __ move(LIR_OprFact::address(data_addr), data_reg);
LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
// Use leal instead of add to avoid destroying condition codes on x86 // Use leal instead of add to avoid destroying condition codes on x86
LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
__ leal(LIR_OprFact::address(fake_incr_value), data_reg); __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
__ move(data_reg, LIR_OprFact::address(data_addr)); __ move(data_reg, LIR_OprFact::address(data_addr));
} }
} }
// Phi technique: // Phi technique:
// This is about passing live values from one basic block to the other. // This is about passing live values from one basic block to the other.
// In code generated with Java it is rather rare that more than one // In code generated with Java it is rather rare that more than one
@ -1305,8 +1302,6 @@ void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patc
LIR_Opr flag_val = new_register(T_INT); LIR_Opr flag_val = new_register(T_INT);
__ load(mark_active_flag_addr, flag_val); __ load(mark_active_flag_addr, flag_val);
LabelObj* start_store = new LabelObj();
LIR_PatchCode pre_val_patch_code = LIR_PatchCode pre_val_patch_code =
patch ? lir_patch_normal : lir_patch_none; patch ? lir_patch_normal : lir_patch_none;
@ -1757,7 +1752,7 @@ void LIRGenerator::do_Throw(Throw* x) {
#ifndef PRODUCT #ifndef PRODUCT
if (PrintC1Statistics) { if (PrintC1Statistics) {
increment_counter(Runtime1::throw_count_address()); increment_counter(Runtime1::throw_count_address(), T_INT);
} }
#endif #endif
@ -2191,12 +2186,41 @@ void LIRGenerator::do_Goto(Goto* x) {
ValueStack* state = x->state_before() ? x->state_before() : x->state(); ValueStack* state = x->state_before() ? x->state_before() : x->state();
// increment backedge counter if needed // increment backedge counter if needed
increment_backedge_counter(state_for(x, state)); CodeEmitInfo* info = state_for(x, state);
increment_backedge_counter(info, info->bci());
CodeEmitInfo* safepoint_info = state_for(x, state); CodeEmitInfo* safepoint_info = state_for(x, state);
__ safepoint(safepoint_poll_register(), safepoint_info); __ safepoint(safepoint_poll_register(), safepoint_info);
} }
// Gotos can be folded Ifs, handle this case.
if (x->should_profile()) {
ciMethod* method = x->profiled_method();
assert(method != NULL, "method should be set if branch is profiled");
ciMethodData* md = method->method_data();
if (md == NULL) {
bailout("out of memory building methodDataOop");
return;
}
ciProfileData* data = md->bci_to_data(x->profiled_bci());
assert(data != NULL, "must have profiling data");
int offset;
if (x->direction() == Goto::taken) {
assert(data->is_BranchData(), "need BranchData for two-way branches");
offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
} else if (x->direction() == Goto::not_taken) {
assert(data->is_BranchData(), "need BranchData for two-way branches");
offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
} else {
assert(data->is_JumpData(), "need JumpData for branches");
offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
}
LIR_Opr md_reg = new_register(T_OBJECT);
__ oop2reg(md->constant_encoding(), md_reg);
increment_counter(new LIR_Address(md_reg, offset,
NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
}
// emit phi-instruction move after safepoint since this simplifies // emit phi-instruction move after safepoint since this simplifies
// describing the state as the safepoint. // describing the state as the safepoint.
move_to_phi(x->state()); move_to_phi(x->state());
@ -2279,7 +2303,10 @@ void LIRGenerator::do_Base(Base* x) {
} }
// increment invocation counters if needed // increment invocation counters if needed
increment_invocation_counter(new CodeEmitInfo(0, scope()->start()->state(), NULL)); if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
CodeEmitInfo* info = new CodeEmitInfo(InvocationEntryBci, scope()->start()->state(), NULL);
increment_invocation_counter(info);
}
// all blocks with a successor must end with an unconditional jump // all blocks with a successor must end with an unconditional jump
// to the successor even if they are consecutive // to the successor even if they are consecutive
@ -2613,12 +2640,12 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
} }
} }
void LIRGenerator::do_ProfileCall(ProfileCall* x) { void LIRGenerator::do_ProfileCall(ProfileCall* x) {
// Need recv in a temporary register so it interferes with the other temporaries // Need recv in a temporary register so it interferes with the other temporaries
LIR_Opr recv = LIR_OprFact::illegalOpr; LIR_Opr recv = LIR_OprFact::illegalOpr;
LIR_Opr mdo = new_register(T_OBJECT); LIR_Opr mdo = new_register(T_OBJECT);
LIR_Opr tmp = new_register(T_INT); // tmp is used to hold the counters on SPARC
LIR_Opr tmp = new_pointer_register();
if (x->recv() != NULL) { if (x->recv() != NULL) {
LIRItem value(x->recv(), this); LIRItem value(x->recv(), this);
value.load_item(); value.load_item();
@ -2628,14 +2655,69 @@ void LIRGenerator::do_ProfileCall(ProfileCall* x) {
__ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder()); __ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder());
} }
void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
void LIRGenerator::do_ProfileCounter(ProfileCounter* x) { // We can safely ignore accessors here, since c2 will inline them anyway,
LIRItem mdo(x->mdo(), this); // accessors are also always mature.
mdo.load_item(); if (!x->inlinee()->is_accessor()) {
CodeEmitInfo* info = state_for(x, x->state(), true);
increment_counter(new LIR_Address(mdo.result(), x->offset(), T_INT), x->increment()); // Increment invocation counter, don't notify the runtime, because we don't inline loops,
increment_event_counter_impl(info, x->inlinee(), 0, InvocationEntryBci, false, false);
}
} }
void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
int freq_log;
int level = compilation()->env()->comp_level();
if (level == CompLevel_limited_profile) {
freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
} else if (level == CompLevel_full_profile) {
freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
} else {
ShouldNotReachHere();
}
// Increment the appropriate invocation/backedge counter and notify the runtime.
increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
}
void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
ciMethod *method, int frequency,
int bci, bool backedge, bool notify) {
assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
int level = _compilation->env()->comp_level();
assert(level > CompLevel_simple, "Shouldn't be here");
int offset = -1;
LIR_Opr counter_holder = new_register(T_OBJECT);
LIR_Opr meth;
if (level == CompLevel_limited_profile) {
offset = in_bytes(backedge ? methodOopDesc::backedge_counter_offset() :
methodOopDesc::invocation_counter_offset());
__ oop2reg(method->constant_encoding(), counter_holder);
meth = counter_holder;
} else if (level == CompLevel_full_profile) {
offset = in_bytes(backedge ? methodDataOopDesc::backedge_counter_offset() :
methodDataOopDesc::invocation_counter_offset());
__ oop2reg(method->method_data()->constant_encoding(), counter_holder);
meth = new_register(T_OBJECT);
__ oop2reg(method->constant_encoding(), meth);
} else {
ShouldNotReachHere();
}
LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
LIR_Opr result = new_register(T_INT);
__ load(counter, result);
__ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
__ store(result, counter);
if (notify) {
LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
__ logical_and(result, mask, result);
__ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
// The bci for info can point to cmp for if's we want the if bci
CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
__ branch(lir_cond_equal, T_INT, overflow);
__ branch_destination(overflow->continuation());
}
}
LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) { LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
LIRItemList args(1); LIRItemList args(1);
@ -2748,28 +2830,3 @@ LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
return result; return result;
} }
void LIRGenerator::increment_invocation_counter(CodeEmitInfo* info, bool backedge) {
#ifdef TIERED
if (_compilation->env()->comp_level() == CompLevel_fast_compile &&
(method()->code_size() >= Tier1BytecodeLimit || backedge)) {
int limit = InvocationCounter::Tier1InvocationLimit;
int offset = in_bytes(methodOopDesc::invocation_counter_offset() +
InvocationCounter::counter_offset());
if (backedge) {
limit = InvocationCounter::Tier1BackEdgeLimit;
offset = in_bytes(methodOopDesc::backedge_counter_offset() +
InvocationCounter::counter_offset());
}
LIR_Opr meth = new_register(T_OBJECT);
__ oop2reg(method()->constant_encoding(), meth);
LIR_Opr result = increment_and_return_counter(meth, offset, InvocationCounter::count_increment);
__ cmp(lir_cond_aboveEqual, result, LIR_OprFact::intConst(limit));
CodeStub* overflow = new CounterOverflowStub(info, info->bci());
__ branch(lir_cond_aboveEqual, T_INT, overflow);
__ branch_destination(overflow->continuation());
}
#endif
}

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -196,6 +196,9 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
LIR_Opr load_constant(Constant* x); LIR_Opr load_constant(Constant* x);
LIR_Opr load_constant(LIR_Const* constant); LIR_Opr load_constant(LIR_Const* constant);
// Given an immediate value, return an operand usable in logical ops.
LIR_Opr load_immediate(int x, BasicType type);
void set_result(Value x, LIR_Opr opr) { void set_result(Value x, LIR_Opr opr) {
assert(opr->is_valid(), "must set to valid value"); assert(opr->is_valid(), "must set to valid value");
assert(x->operand()->is_illegal(), "operand should never change"); assert(x->operand()->is_illegal(), "operand should never change");
@ -213,8 +216,6 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
LIR_Opr round_item(LIR_Opr opr); LIR_Opr round_item(LIR_Opr opr);
LIR_Opr force_to_spill(LIR_Opr value, BasicType t); LIR_Opr force_to_spill(LIR_Opr value, BasicType t);
void profile_branch(If* if_instr, If::Condition cond);
PhiResolverState& resolver_state() { return _resolver_state; } PhiResolverState& resolver_state() { return _resolver_state; }
void move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val); void move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val);
@ -285,12 +286,9 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
void arithmetic_call_op (Bytecodes::Code code, LIR_Opr result, LIR_OprList* args); void arithmetic_call_op (Bytecodes::Code code, LIR_Opr result, LIR_OprList* args);
void increment_counter(address counter, int step = 1); void increment_counter(address counter, BasicType type, int step = 1);
void increment_counter(LIR_Address* addr, int step = 1); void increment_counter(LIR_Address* addr, int step = 1);
// increment a counter returning the incremented value
LIR_Opr increment_and_return_counter(LIR_Opr base, int offset, int increment);
// is_strictfp is only needed for mul and div (and only generates different code on i486) // is_strictfp is only needed for mul and div (and only generates different code on i486)
void arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp, CodeEmitInfo* info = NULL); void arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp, CodeEmitInfo* info = NULL);
// machine dependent. returns true if it emitted code for the multiply // machine dependent. returns true if it emitted code for the multiply
@ -347,9 +345,21 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
bool can_store_as_constant(Value i, BasicType type) const; bool can_store_as_constant(Value i, BasicType type) const;
LIR_Opr safepoint_poll_register(); LIR_Opr safepoint_poll_register();
void increment_invocation_counter(CodeEmitInfo* info, bool backedge = false);
void increment_backedge_counter(CodeEmitInfo* info) { void profile_branch(If* if_instr, If::Condition cond);
increment_invocation_counter(info, true); void increment_event_counter_impl(CodeEmitInfo* info,
ciMethod *method, int frequency,
int bci, bool backedge, bool notify);
void increment_event_counter(CodeEmitInfo* info, int bci, bool backedge);
void increment_invocation_counter(CodeEmitInfo *info) {
if (compilation()->count_invocations()) {
increment_event_counter(info, InvocationEntryBci, false);
}
}
void increment_backedge_counter(CodeEmitInfo* info, int bci) {
if (compilation()->count_backedges()) {
increment_event_counter(info, bci, true);
}
} }
CodeEmitInfo* state_for(Instruction* x, ValueStack* state, bool ignore_xhandler = false); CodeEmitInfo* state_for(Instruction* x, ValueStack* state, bool ignore_xhandler = false);
@ -503,7 +513,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x); virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x);
virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x); virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
virtual void do_ProfileCall (ProfileCall* x); virtual void do_ProfileCall (ProfileCall* x);
virtual void do_ProfileCounter (ProfileCounter* x); virtual void do_ProfileInvoke (ProfileInvoke* x);
}; };

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -430,7 +430,7 @@ public:
void do_UnsafePrefetchRead (UnsafePrefetchRead* x); void do_UnsafePrefetchRead (UnsafePrefetchRead* x);
void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x); void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
void do_ProfileCall (ProfileCall* x); void do_ProfileCall (ProfileCall* x);
void do_ProfileCounter (ProfileCounter* x); void do_ProfileInvoke (ProfileInvoke* x);
}; };
@ -598,7 +598,7 @@ void NullCheckVisitor::do_UnsafePutObject(UnsafePutObject* x) {}
void NullCheckVisitor::do_UnsafePrefetchRead (UnsafePrefetchRead* x) {} void NullCheckVisitor::do_UnsafePrefetchRead (UnsafePrefetchRead* x) {}
void NullCheckVisitor::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {} void NullCheckVisitor::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
void NullCheckVisitor::do_ProfileCall (ProfileCall* x) { nce()->clear_last_explicit_null_check(); } void NullCheckVisitor::do_ProfileCall (ProfileCall* x) { nce()->clear_last_explicit_null_check(); }
void NullCheckVisitor::do_ProfileCounter (ProfileCounter* x) {} void NullCheckVisitor::do_ProfileInvoke (ProfileInvoke* x) {}
void NullCheckEliminator::visit(Value* p) { void NullCheckEliminator::visit(Value* p) {

View file

@ -140,9 +140,7 @@ void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
case slow_subtype_check_id: case slow_subtype_check_id:
case fpu2long_stub_id: case fpu2long_stub_id:
case unwind_exception_id: case unwind_exception_id:
#ifndef TIERED case counter_overflow_id:
case counter_overflow_id: // Not generated outside the tiered world
#endif
#if defined(SPARC) || defined(PPC) #if defined(SPARC) || defined(PPC)
case handle_exception_nofpu_id: // Unused on sparc case handle_exception_nofpu_id: // Unused on sparc
#endif #endif
@ -322,31 +320,60 @@ JRT_ENTRY(void, Runtime1::post_jvmti_exception_throw(JavaThread* thread))
} }
JRT_END JRT_END
#ifdef TIERED // This is a helper to allow us to safepoint but allow the outer entry
JRT_ENTRY(void, Runtime1::counter_overflow(JavaThread* thread, int bci)) // to be safepoint free if we need to do an osr
static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, methodOopDesc* m) {
nmethod* osr_nm = NULL;
methodHandle method(THREAD, m);
RegisterMap map(THREAD, false);
frame fr = THREAD->last_frame().sender(&map);
nmethod* nm = (nmethod*) fr.cb();
assert(nm!= NULL && nm->is_nmethod(), "Sanity check");
methodHandle enclosing_method(THREAD, nm->method());
CompLevel level = (CompLevel)nm->comp_level();
int bci = InvocationEntryBci;
if (branch_bci != InvocationEntryBci) {
// Compute desination bci
address pc = method()->code_base() + branch_bci;
Bytecodes::Code branch = Bytecodes::code_at(pc, method());
int offset = 0;
switch (branch) {
case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
case Bytecodes::_if_icmple: case Bytecodes::_ifle:
case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
offset = (int16_t)Bytes::get_Java_u2(pc + 1);
break;
case Bytecodes::_goto_w:
offset = Bytes::get_Java_u4(pc + 1);
break;
default: ;
}
bci = branch_bci + offset;
}
osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, THREAD);
return osr_nm;
}
JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* thread, int bci, methodOopDesc* method))
nmethod* osr_nm;
JRT_BLOCK
osr_nm = counter_overflow_helper(thread, bci, method);
if (osr_nm != NULL) {
RegisterMap map(thread, false); RegisterMap map(thread, false);
frame fr = thread->last_frame().sender(&map); frame fr = thread->last_frame().sender(&map);
nmethod* nm = (nmethod*) fr.cb(); VM_DeoptimizeFrame deopt(thread, fr.id());
assert(nm!= NULL && nm->is_nmethod(), "what?"); VMThread::execute(&deopt);
methodHandle method(thread, nm->method());
if (bci == 0) {
// invocation counter overflow
if (!Tier1CountOnly) {
CompilationPolicy::policy()->method_invocation_event(method, CHECK);
} else {
method()->invocation_counter()->reset();
}
} else {
if (!Tier1CountOnly) {
// Twe have a bci but not the destination bci and besides a backedge
// event is more for OSR which we don't want here.
CompilationPolicy::policy()->method_invocation_event(method, CHECK);
} else {
method()->backedge_counter()->reset();
}
} }
JRT_BLOCK_END
return NULL;
JRT_END JRT_END
#endif // TIERED
extern void vm_exit(int code); extern void vm_exit(int code);
@ -898,7 +925,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff); NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
assert(n_copy->data() == 0 || assert(n_copy->data() == 0 ||
n_copy->data() == (int)Universe::non_oop_word(), n_copy->data() == (intptr_t)Universe::non_oop_word(),
"illegal init value"); "illegal init value");
assert(load_klass() != NULL, "klass not set"); assert(load_klass() != NULL, "klass not set");
n_copy->set_data((intx) (load_klass())); n_copy->set_data((intx) (load_klass()));

View file

@ -123,9 +123,7 @@ class Runtime1: public AllStatic {
static void new_object_array(JavaThread* thread, klassOopDesc* klass, jint length); static void new_object_array(JavaThread* thread, klassOopDesc* klass, jint length);
static void new_multi_array (JavaThread* thread, klassOopDesc* klass, int rank, jint* dims); static void new_multi_array (JavaThread* thread, klassOopDesc* klass, int rank, jint* dims);
#ifdef TIERED static address counter_overflow(JavaThread* thread, int bci, methodOopDesc* method);
static void counter_overflow(JavaThread* thread, int bci);
#endif // TIERED
static void unimplemented_entry (JavaThread* thread, StubID id); static void unimplemented_entry (JavaThread* thread, StubID id);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -185,11 +185,11 @@ class ValueNumberingVisitor: public InstructionVisitor {
void do_ExceptionObject(ExceptionObject* x) { /* nothing to do */ } void do_ExceptionObject(ExceptionObject* x) { /* nothing to do */ }
void do_RoundFP (RoundFP* x) { /* nothing to do */ } void do_RoundFP (RoundFP* x) { /* nothing to do */ }
void do_UnsafeGetRaw (UnsafeGetRaw* x) { /* nothing to do */ } void do_UnsafeGetRaw (UnsafeGetRaw* x) { /* nothing to do */ }
void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do */ };
void do_UnsafeGetObject(UnsafeGetObject* x) { /* nothing to do */ } void do_UnsafeGetObject(UnsafeGetObject* x) { /* nothing to do */ }
void do_UnsafePrefetchRead (UnsafePrefetchRead* x) { /* nothing to do */ } void do_UnsafePrefetchRead (UnsafePrefetchRead* x) { /* nothing to do */ }
void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ } void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ }
void do_ProfileCall (ProfileCall* x) { /* nothing to do */ } void do_ProfileCall (ProfileCall* x) { /* nothing to do */ }
void do_ProfileCounter (ProfileCounter* x) { /* nothing to do */ }
}; };

View file

@ -25,12 +25,6 @@
// //
// Defines all global flags used by the client compiler. // Defines all global flags used by the client compiler.
// //
#ifndef TIERED
#define NOT_TIERED(x) x
#else
#define NOT_TIERED(x)
#endif
#define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct) \ #define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct) \
\ \
/* Printing */ \ /* Printing */ \
@ -279,41 +273,29 @@
product_pd(intx, SafepointPollOffset, \ product_pd(intx, SafepointPollOffset, \
"Offset added to polling address (Intel only)") \ "Offset added to polling address (Intel only)") \
\ \
product(bool, UseNewFeature1, false, \
"Enable new feature for testing. This is a dummy flag.") \
\
product(bool, UseNewFeature2, false, \
"Enable new feature for testing. This is a dummy flag.") \
\
product(bool, UseNewFeature3, false, \
"Enable new feature for testing. This is a dummy flag.") \
\
product(bool, UseNewFeature4, false, \
"Enable new feature for testing. This is a dummy flag.") \
\
develop(bool, ComputeExactFPURegisterUsage, true, \ develop(bool, ComputeExactFPURegisterUsage, true, \
"Compute additional live set for fpu registers to simplify fpu stack merge (Intel only)") \ "Compute additional live set for fpu registers to simplify fpu stack merge (Intel only)") \
\ \
product(bool, Tier1ProfileCalls, true, \ product(bool, C1ProfileCalls, true, \
"Profile calls when generating code for updating MDOs") \ "Profile calls when generating code for updating MDOs") \
\ \
product(bool, Tier1ProfileVirtualCalls, true, \ product(bool, C1ProfileVirtualCalls, true, \
"Profile virtual calls when generating code for updating MDOs") \ "Profile virtual calls when generating code for updating MDOs") \
\ \
product(bool, Tier1ProfileInlinedCalls, true, \ product(bool, C1ProfileInlinedCalls, true, \
"Profile inlined calls when generating code for updating MDOs") \ "Profile inlined calls when generating code for updating MDOs") \
\ \
product(bool, Tier1ProfileBranches, true, \ product(bool, C1ProfileBranches, true, \
"Profile branches when generating code for updating MDOs") \ "Profile branches when generating code for updating MDOs") \
\ \
product(bool, Tier1ProfileCheckcasts, true, \ product(bool, C1ProfileCheckcasts, true, \
"Profile checkcasts when generating code for updating MDOs") \ "Profile checkcasts when generating code for updating MDOs") \
\ \
product(bool, Tier1OptimizeVirtualCallProfiling, true, \ product(bool, C1OptimizeVirtualCallProfiling, true, \
"Use CHA and exact type results at call sites when updating MDOs") \ "Use CHA and exact type results at call sites when updating MDOs")\
\ \
develop(bool, Tier1CountOnly, false, \ product(bool, C1UpdateMethodData, trueInTiered, \
"Don't schedule tier 2 compiles. Enter VM only") \ "Update methodDataOops in Tier1-generated code") \
\ \
develop(bool, PrintCFGToFile, false, \ develop(bool, PrintCFGToFile, false, \
"print control flow graph to a separate file during compilation") \ "print control flow graph to a separate file during compilation") \

View file

@ -956,7 +956,7 @@ void ciEnv::register_method(ciMethod* target,
if (task() != NULL) task()->set_code(nm); if (task() != NULL) task()->set_code(nm);
if (entry_bci == InvocationEntryBci) { if (entry_bci == InvocationEntryBci) {
#ifdef TIERED if (TieredCompilation) {
// If there is an old version we're done with it // If there is an old version we're done with it
nmethod* old = method->code(); nmethod* old = method->code();
if (TraceMethodReplacement && old != NULL) { if (TraceMethodReplacement && old != NULL) {
@ -967,7 +967,7 @@ void ciEnv::register_method(ciMethod* target,
if (old != NULL ) { if (old != NULL ) {
old->make_not_entrant(); old->make_not_entrant();
} }
#endif // TIERED }
if (TraceNMethodInstalls ) { if (TraceNMethodInstalls ) {
ResourceMark rm; ResourceMark rm;
char *method_name = method->name_and_sig_as_C_string(); char *method_name = method->name_and_sig_as_C_string();
@ -1011,7 +1011,7 @@ ciKlass* ciEnv::find_system_klass(ciSymbol* klass_name) {
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciEnv::comp_level // ciEnv::comp_level
int ciEnv::comp_level() { int ciEnv::comp_level() {
if (task() == NULL) return CompLevel_full_optimization; if (task() == NULL) return CompLevel_highest_tier;
return task()->comp_level(); return task()->comp_level();
} }

View file

@ -49,7 +49,8 @@ ciMethod::ciMethod(methodHandle h_m) : ciObject(h_m) {
_handler_count = h_m()->exception_table()->length() / 4; _handler_count = h_m()->exception_table()->length() / 4;
_uses_monitors = h_m()->access_flags().has_monitor_bytecodes(); _uses_monitors = h_m()->access_flags().has_monitor_bytecodes();
_balanced_monitors = !_uses_monitors || h_m()->access_flags().is_monitor_matching(); _balanced_monitors = !_uses_monitors || h_m()->access_flags().is_monitor_matching();
_is_compilable = !h_m()->is_not_compilable(); _is_c1_compilable = !h_m()->is_not_c1_compilable();
_is_c2_compilable = !h_m()->is_not_c2_compilable();
// Lazy fields, filled in on demand. Require allocation. // Lazy fields, filled in on demand. Require allocation.
_code = NULL; _code = NULL;
_exception_handlers = NULL; _exception_handlers = NULL;
@ -61,11 +62,12 @@ ciMethod::ciMethod(methodHandle h_m) : ciObject(h_m) {
#endif // COMPILER2 || SHARK #endif // COMPILER2 || SHARK
ciEnv *env = CURRENT_ENV; ciEnv *env = CURRENT_ENV;
if (env->jvmti_can_hotswap_or_post_breakpoint() && _is_compilable) { if (env->jvmti_can_hotswap_or_post_breakpoint() && can_be_compiled()) {
// 6328518 check hotswap conditions under the right lock. // 6328518 check hotswap conditions under the right lock.
MutexLocker locker(Compile_lock); MutexLocker locker(Compile_lock);
if (Dependencies::check_evol_method(h_m()) != NULL) { if (Dependencies::check_evol_method(h_m()) != NULL) {
_is_compilable = false; _is_c1_compilable = false;
_is_c2_compilable = false;
} }
} else { } else {
CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
@ -93,7 +95,7 @@ ciMethod::ciMethod(methodHandle h_m) : ciObject(h_m) {
_signature = new (env->arena()) ciSignature(_holder, sig_symbol); _signature = new (env->arena()) ciSignature(_holder, sig_symbol);
_method_data = NULL; _method_data = NULL;
// Take a snapshot of these values, so they will be commensurate with the MDO. // Take a snapshot of these values, so they will be commensurate with the MDO.
if (ProfileInterpreter) { if (ProfileInterpreter || TieredCompilation) {
int invcnt = h_m()->interpreter_invocation_count(); int invcnt = h_m()->interpreter_invocation_count();
// if the value overflowed report it as max int // if the value overflowed report it as max int
_interpreter_invocation_count = invcnt < 0 ? max_jint : invcnt ; _interpreter_invocation_count = invcnt < 0 ? max_jint : invcnt ;
@ -437,11 +439,26 @@ ciCallProfile ciMethod::call_profile_at_bci(int bci) {
// In addition, virtual call sites have receiver type information // In addition, virtual call sites have receiver type information
int receivers_count_total = 0; int receivers_count_total = 0;
int morphism = 0; int morphism = 0;
// Precompute morphism for the possible fixup
for (uint i = 0; i < call->row_limit(); i++) { for (uint i = 0; i < call->row_limit(); i++) {
ciKlass* receiver = call->receiver(i); ciKlass* receiver = call->receiver(i);
if (receiver == NULL) continue; if (receiver == NULL) continue;
morphism += 1; morphism++;
int rcount = call->receiver_count(i); }
int epsilon = 0;
if (TieredCompilation && ProfileInterpreter) {
// Interpreter and C1 treat final and special invokes differently.
// C1 will record a type, whereas the interpreter will just
// increment the count. Detect this case.
if (morphism == 1 && count > 0) {
epsilon = count;
count = 0;
}
}
for (uint i = 0; i < call->row_limit(); i++) {
ciKlass* receiver = call->receiver(i);
if (receiver == NULL) continue;
int rcount = call->receiver_count(i) + epsilon;
if (rcount == 0) rcount = 1; // Should be valid value if (rcount == 0) rcount = 1; // Should be valid value
receivers_count_total += rcount; receivers_count_total += rcount;
// Add the receiver to result data. // Add the receiver to result data.
@ -687,10 +704,17 @@ int ciMethod::interpreter_call_site_count(int bci) {
// invocation counts in methods. // invocation counts in methods.
int ciMethod::scale_count(int count, float prof_factor) { int ciMethod::scale_count(int count, float prof_factor) {
if (count > 0 && method_data() != NULL) { if (count > 0 && method_data() != NULL) {
int counter_life;
int method_life = interpreter_invocation_count();
if (TieredCompilation) {
// In tiered the MDO's life is measured directly, so just use the snapshotted counters
counter_life = MAX2(method_data()->invocation_count(), method_data()->backedge_count());
} else {
int current_mileage = method_data()->current_mileage(); int current_mileage = method_data()->current_mileage();
int creation_mileage = method_data()->creation_mileage(); int creation_mileage = method_data()->creation_mileage();
int counter_life = current_mileage - creation_mileage; counter_life = current_mileage - creation_mileage;
int method_life = interpreter_invocation_count(); }
// counter_life due to backedge_counter could be > method_life // counter_life due to backedge_counter could be > method_life
if (counter_life > method_life) if (counter_life > method_life)
counter_life = method_life; counter_life = method_life;
@ -778,7 +802,8 @@ ciMethodData* ciMethod::method_data() {
Thread* my_thread = JavaThread::current(); Thread* my_thread = JavaThread::current();
methodHandle h_m(my_thread, get_methodOop()); methodHandle h_m(my_thread, get_methodOop());
if (Tier1UpdateMethodData && is_tier1_compile(env->comp_level())) { // Create an MDO for the inlinee
if (TieredCompilation && is_c1_compile(env->comp_level())) {
build_method_data(h_m); build_method_data(h_m);
} }
@ -885,7 +910,11 @@ bool ciMethod::has_option(const char* option) {
// Have previous compilations of this method succeeded? // Have previous compilations of this method succeeded?
bool ciMethod::can_be_compiled() { bool ciMethod::can_be_compiled() {
check_is_loaded(); check_is_loaded();
return _is_compilable; ciEnv* env = CURRENT_ENV;
if (is_c1_compile(env->comp_level())) {
return _is_c1_compilable;
}
return _is_c2_compilable;
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
@ -895,8 +924,13 @@ bool ciMethod::can_be_compiled() {
void ciMethod::set_not_compilable() { void ciMethod::set_not_compilable() {
check_is_loaded(); check_is_loaded();
VM_ENTRY_MARK; VM_ENTRY_MARK;
_is_compilable = false; ciEnv* env = CURRENT_ENV;
get_methodOop()->set_not_compilable(); if (is_c1_compile(env->comp_level())) {
_is_c1_compilable = false;
} else {
_is_c2_compilable = false;
}
get_methodOop()->set_not_compilable(env->comp_level());
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
@ -910,7 +944,8 @@ void ciMethod::set_not_compilable() {
bool ciMethod::can_be_osr_compiled(int entry_bci) { bool ciMethod::can_be_osr_compiled(int entry_bci) {
check_is_loaded(); check_is_loaded();
VM_ENTRY_MARK; VM_ENTRY_MARK;
return !get_methodOop()->access_flags().is_not_osr_compilable(); ciEnv* env = CURRENT_ENV;
return !get_methodOop()->is_not_osr_compilable(env->comp_level());
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
@ -920,6 +955,14 @@ bool ciMethod::has_compiled_code() {
return get_methodOop()->code() != NULL; return get_methodOop()->code() != NULL;
} }
int ciMethod::comp_level() {
check_is_loaded();
VM_ENTRY_MARK;
nmethod* nm = get_methodOop()->code();
if (nm != NULL) return nm->comp_level();
return 0;
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciMethod::instructions_size // ciMethod::instructions_size
// //
@ -928,18 +971,13 @@ bool ciMethod::has_compiled_code() {
// junk like exception handler, stubs, and constant table, which are // junk like exception handler, stubs, and constant table, which are
// not highly relevant to an inlined method. So we use the more // not highly relevant to an inlined method. So we use the more
// specific accessor nmethod::insts_size. // specific accessor nmethod::insts_size.
int ciMethod::instructions_size() { int ciMethod::instructions_size(int comp_level) {
GUARDED_VM_ENTRY( GUARDED_VM_ENTRY(
nmethod* code = get_methodOop()->code(); nmethod* code = get_methodOop()->code();
// if there's no compiled code or the code was produced by the if (code != NULL && (comp_level == CompLevel_any || comp_level == code->comp_level())) {
// tier1 profiler return 0 for the code size. This should return code->code_end() - code->verified_entry_point();
// probably be based on the compilation level of the nmethod but
// that currently isn't properly recorded.
if (code == NULL ||
(TieredCompilation && code->compiler() != NULL && code->compiler()->is_c1())) {
return 0;
} }
return code->insts_end() - code->verified_entry_point(); return 0;
) )
} }

View file

@ -61,7 +61,8 @@ class ciMethod : public ciObject {
bool _uses_monitors; bool _uses_monitors;
bool _balanced_monitors; bool _balanced_monitors;
bool _is_compilable; bool _is_c1_compilable;
bool _is_c2_compilable;
bool _can_be_statically_bound; bool _can_be_statically_bound;
// Lazy fields, filled in on demand // Lazy fields, filled in on demand
@ -127,6 +128,8 @@ class ciMethod : public ciObject {
int interpreter_invocation_count() const { check_is_loaded(); return _interpreter_invocation_count; } int interpreter_invocation_count() const { check_is_loaded(); return _interpreter_invocation_count; }
int interpreter_throwout_count() const { check_is_loaded(); return _interpreter_throwout_count; } int interpreter_throwout_count() const { check_is_loaded(); return _interpreter_throwout_count; }
int comp_level();
Bytecodes::Code java_code_at_bci(int bci) { Bytecodes::Code java_code_at_bci(int bci) {
address bcp = code() + bci; address bcp = code() + bci;
return Bytecodes::java_code_at(bcp); return Bytecodes::java_code_at(bcp);
@ -209,7 +212,7 @@ class ciMethod : public ciObject {
bool can_be_osr_compiled(int entry_bci); bool can_be_osr_compiled(int entry_bci);
void set_not_compilable(); void set_not_compilable();
bool has_compiled_code(); bool has_compiled_code();
int instructions_size(); int instructions_size(int comp_level = CompLevel_any);
void log_nmethod_identity(xmlStream* log); void log_nmethod_identity(xmlStream* log);
bool is_not_reached(int bci); bool is_not_reached(int bci);
bool was_executed_more_than(int times); bool was_executed_more_than(int times);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -37,6 +37,8 @@ ciMethodData::ciMethodData(methodDataHandle h_md) : ciObject(h_md) {
_data_size = 0; _data_size = 0;
_extra_data_size = 0; _extra_data_size = 0;
_current_mileage = 0; _current_mileage = 0;
_invocation_counter = 0;
_backedge_counter = 0;
_state = empty_state; _state = empty_state;
_saw_free_extra_data = false; _saw_free_extra_data = false;
// Set an initial hint. Don't use set_hint_di() because // Set an initial hint. Don't use set_hint_di() because
@ -56,6 +58,8 @@ ciMethodData::ciMethodData() : ciObject() {
_data_size = 0; _data_size = 0;
_extra_data_size = 0; _extra_data_size = 0;
_current_mileage = 0; _current_mileage = 0;
_invocation_counter = 0;
_backedge_counter = 0;
_state = empty_state; _state = empty_state;
_saw_free_extra_data = false; _saw_free_extra_data = false;
// Set an initial hint. Don't use set_hint_di() because // Set an initial hint. Don't use set_hint_di() because
@ -99,6 +103,8 @@ void ciMethodData::load_data() {
} }
// Note: Extra data are all BitData, and do not need translation. // Note: Extra data are all BitData, and do not need translation.
_current_mileage = methodDataOopDesc::mileage_of(mdo->method()); _current_mileage = methodDataOopDesc::mileage_of(mdo->method());
_invocation_counter = mdo->invocation_count();
_backedge_counter = mdo->backedge_count();
_state = mdo->is_mature()? mature_state: immature_state; _state = mdo->is_mature()? mature_state: immature_state;
_eflags = mdo->eflags(); _eflags = mdo->eflags();
@ -253,6 +259,23 @@ void ciMethodData::update_escape_info() {
} }
} }
void ciMethodData::set_compilation_stats(short loops, short blocks) {
VM_ENTRY_MARK;
methodDataOop mdo = get_methodDataOop();
if (mdo != NULL) {
mdo->set_num_loops(loops);
mdo->set_num_blocks(blocks);
}
}
void ciMethodData::set_would_profile(bool p) {
VM_ENTRY_MARK;
methodDataOop mdo = get_methodDataOop();
if (mdo != NULL) {
mdo->set_would_profile(p);
}
}
bool ciMethodData::has_escape_info() { bool ciMethodData::has_escape_info() {
return eflag_set(methodDataOopDesc::estimated); return eflag_set(methodDataOopDesc::estimated);
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -162,6 +162,12 @@ private:
// Maturity of the oop when the snapshot is taken. // Maturity of the oop when the snapshot is taken.
int _current_mileage; int _current_mileage;
// These counters hold the age of MDO in tiered. In tiered we can have the same method
// running at different compilation levels concurrently. So, in order to precisely measure
// its maturity we need separate counters.
int _invocation_counter;
int _backedge_counter;
// Coherent snapshot of original header. // Coherent snapshot of original header.
methodDataOopDesc _orig; methodDataOopDesc _orig;
@ -223,6 +229,16 @@ public:
int creation_mileage() { return _orig.creation_mileage(); } int creation_mileage() { return _orig.creation_mileage(); }
int current_mileage() { return _current_mileage; } int current_mileage() { return _current_mileage; }
int invocation_count() { return _invocation_counter; }
int backedge_count() { return _backedge_counter; }
// Transfer information about the method to methodDataOop.
// would_profile means we would like to profile this method,
// meaning it's not trivial.
void set_would_profile(bool p);
// Also set the numer of loops and blocks in the method.
// Again, this is used to determine if a method is trivial.
void set_compilation_stats(short loops, short blocks);
void load_data(); void load_data();
// Convert a dp (data pointer) to a di (data index). // Convert a dp (data pointer) to a di (data index).

View file

@ -1292,7 +1292,7 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
// Iterate over all methods in class // Iterate over all methods in class
for (int n = 0; n < k->methods()->length(); n++) { for (int n = 0; n < k->methods()->length(); n++) {
methodHandle m (THREAD, methodOop(k->methods()->obj_at(n))); methodHandle m (THREAD, methodOop(k->methods()->obj_at(n)));
if (CompilationPolicy::canBeCompiled(m)) { if (CompilationPolicy::can_be_compiled(m)) {
if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) { if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) {
// Give sweeper a chance to keep up with CTW // Give sweeper a chance to keep up with CTW
@ -1301,7 +1301,7 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
_codecache_sweep_counter = 0; _codecache_sweep_counter = 0;
} }
// Force compilation // Force compilation
CompileBroker::compile_method(m, InvocationEntryBci, CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_initial_compile,
methodHandle(), 0, "CTW", THREAD); methodHandle(), 0, "CTW", THREAD);
if (HAS_PENDING_EXCEPTION) { if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION; CLEAR_PENDING_EXCEPTION;
@ -1315,7 +1315,7 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
nm->make_not_entrant(); nm->make_not_entrant();
m->clear_code(); m->clear_code();
} }
CompileBroker::compile_method(m, InvocationEntryBci, CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_full_optimization,
methodHandle(), 0, "CTW", THREAD); methodHandle(), 0, "CTW", THREAD);
if (HAS_PENDING_EXCEPTION) { if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION; CLEAR_PENDING_EXCEPTION;

View file

@ -2424,12 +2424,15 @@ int java_dyn_MethodType::ptype_count(oop mt) {
int java_dyn_MethodTypeForm::_vmslots_offset; int java_dyn_MethodTypeForm::_vmslots_offset;
int java_dyn_MethodTypeForm::_erasedType_offset; int java_dyn_MethodTypeForm::_erasedType_offset;
int java_dyn_MethodTypeForm::_genericInvoker_offset;
void java_dyn_MethodTypeForm::compute_offsets() { void java_dyn_MethodTypeForm::compute_offsets() {
klassOop k = SystemDictionary::MethodTypeForm_klass(); klassOop k = SystemDictionary::MethodTypeForm_klass();
if (k != NULL) { if (k != NULL) {
compute_optional_offset(_vmslots_offset, k, vmSymbols::vmslots_name(), vmSymbols::int_signature(), true); compute_optional_offset(_vmslots_offset, k, vmSymbols::vmslots_name(), vmSymbols::int_signature(), true);
compute_optional_offset(_erasedType_offset, k, vmSymbols::erasedType_name(), vmSymbols::java_dyn_MethodType_signature(), true); compute_optional_offset(_erasedType_offset, k, vmSymbols::erasedType_name(), vmSymbols::java_dyn_MethodType_signature(), true);
compute_optional_offset(_genericInvoker_offset, k, vmSymbols::genericInvoker_name(), vmSymbols::java_dyn_MethodHandle_signature(), true);
if (_genericInvoker_offset == 0) _genericInvoker_offset = -1; // set to explicit "empty" value
} }
} }
@ -2443,6 +2446,11 @@ oop java_dyn_MethodTypeForm::erasedType(oop mtform) {
return mtform->obj_field(_erasedType_offset); return mtform->obj_field(_erasedType_offset);
} }
oop java_dyn_MethodTypeForm::genericInvoker(oop mtform) {
assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only");
return mtform->obj_field(_genericInvoker_offset);
}
// Support for java_dyn_CallSite // Support for java_dyn_CallSite

View file

@ -1048,6 +1048,7 @@ class java_dyn_MethodTypeForm: AllStatic {
private: private:
static int _vmslots_offset; // number of argument slots needed static int _vmslots_offset; // number of argument slots needed
static int _erasedType_offset; // erasedType = canonical MethodType static int _erasedType_offset; // erasedType = canonical MethodType
static int _genericInvoker_offset; // genericInvoker = adapter for invokeGeneric
static void compute_offsets(); static void compute_offsets();
@ -1055,10 +1056,12 @@ class java_dyn_MethodTypeForm: AllStatic {
// Accessors // Accessors
static int vmslots(oop mtform); static int vmslots(oop mtform);
static oop erasedType(oop mtform); static oop erasedType(oop mtform);
static oop genericInvoker(oop mtform);
// Accessors for code generation: // Accessors for code generation:
static int vmslots_offset_in_bytes() { return _vmslots_offset; } static int vmslots_offset_in_bytes() { return _vmslots_offset; }
static int erasedType_offset_in_bytes() { return _erasedType_offset; } static int erasedType_offset_in_bytes() { return _erasedType_offset; }
static int genericInvoker_offset_in_bytes() { return _genericInvoker_offset; }
}; };

View file

@ -2361,8 +2361,11 @@ methodOop SystemDictionary::find_method_handle_invoke(symbolHandle name,
// Must create lots of stuff here, but outside of the SystemDictionary lock. // Must create lots of stuff here, but outside of the SystemDictionary lock.
if (THREAD->is_Compiler_thread()) if (THREAD->is_Compiler_thread())
return NULL; // do not attempt from within compiler return NULL; // do not attempt from within compiler
bool for_invokeGeneric = (name_id == vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name));
bool found_on_bcp = false; bool found_on_bcp = false;
Handle mt = find_method_handle_type(signature(), accessing_klass, found_on_bcp, CHECK_NULL); Handle mt = find_method_handle_type(signature(), accessing_klass,
for_invokeGeneric,
found_on_bcp, CHECK_NULL);
KlassHandle mh_klass = SystemDictionaryHandles::MethodHandle_klass(); KlassHandle mh_klass = SystemDictionaryHandles::MethodHandle_klass();
methodHandle m = methodOopDesc::make_invoke_method(mh_klass, name, signature, methodHandle m = methodOopDesc::make_invoke_method(mh_klass, name, signature,
mt, CHECK_NULL); mt, CHECK_NULL);
@ -2393,6 +2396,7 @@ methodOop SystemDictionary::find_method_handle_invoke(symbolHandle name,
// consistent with this loader. // consistent with this loader.
Handle SystemDictionary::find_method_handle_type(symbolHandle signature, Handle SystemDictionary::find_method_handle_type(symbolHandle signature,
KlassHandle accessing_klass, KlassHandle accessing_klass,
bool for_invokeGeneric,
bool& return_bcp_flag, bool& return_bcp_flag,
TRAPS) { TRAPS) {
Handle class_loader, protection_domain; Handle class_loader, protection_domain;
@ -2448,10 +2452,26 @@ Handle SystemDictionary::find_method_handle_type(symbolHandle signature,
vmSymbols::findMethodHandleType_name(), vmSymbols::findMethodHandleType_name(),
vmSymbols::findMethodHandleType_signature(), vmSymbols::findMethodHandleType_signature(),
&args, CHECK_(empty)); &args, CHECK_(empty));
Handle method_type(THREAD, (oop) result.get_jobject());
if (for_invokeGeneric) {
// call sun.dyn.MethodHandleNatives::notifyGenericMethodType(MethodType) -> void
JavaCallArguments args(Handle(THREAD, method_type()));
JavaValue no_result(T_VOID);
JavaCalls::call_static(&no_result,
SystemDictionary::MethodHandleNatives_klass(),
vmSymbols::notifyGenericMethodType_name(),
vmSymbols::notifyGenericMethodType_signature(),
&args, THREAD);
if (HAS_PENDING_EXCEPTION) {
// If the notification fails, just kill it.
CLEAR_PENDING_EXCEPTION;
}
}
// report back to the caller with the MethodType and the "on_bcp" flag // report back to the caller with the MethodType and the "on_bcp" flag
return_bcp_flag = is_on_bcp; return_bcp_flag = is_on_bcp;
return Handle(THREAD, (oop) result.get_jobject()); return method_type;
} }
// Ask Java code to find or construct a method handle constant. // Ask Java code to find or construct a method handle constant.
@ -2466,7 +2486,7 @@ Handle SystemDictionary::link_method_handle_constant(KlassHandle caller,
Handle type; Handle type;
if (signature->utf8_length() > 0 && signature->byte_at(0) == '(') { if (signature->utf8_length() > 0 && signature->byte_at(0) == '(') {
bool ignore_is_on_bcp = false; bool ignore_is_on_bcp = false;
type = find_method_handle_type(signature, caller, ignore_is_on_bcp, CHECK_(empty)); type = find_method_handle_type(signature, caller, false, ignore_is_on_bcp, CHECK_(empty));
} else { } else {
SignatureStream ss(signature(), false); SignatureStream ss(signature(), false);
if (!ss.is_done()) { if (!ss.is_done()) {

View file

@ -471,6 +471,7 @@ public:
// ask Java to compute a java.dyn.MethodType object for a given signature // ask Java to compute a java.dyn.MethodType object for a given signature
static Handle find_method_handle_type(symbolHandle signature, static Handle find_method_handle_type(symbolHandle signature,
KlassHandle accessing_klass, KlassHandle accessing_klass,
bool for_invokeGeneric,
bool& return_bcp_flag, bool& return_bcp_flag,
TRAPS); TRAPS);
// ask Java to compute a java.dyn.MethodHandle object for a given CP entry // ask Java to compute a java.dyn.MethodHandle object for a given CP entry

View file

@ -246,6 +246,8 @@
/* internal up-calls made only by the JVM, via class sun.dyn.MethodHandleNatives: */ \ /* internal up-calls made only by the JVM, via class sun.dyn.MethodHandleNatives: */ \
template(findMethodHandleType_name, "findMethodHandleType") \ template(findMethodHandleType_name, "findMethodHandleType") \
template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/dyn/MethodType;") \ template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/dyn/MethodType;") \
template(notifyGenericMethodType_name, "notifyGenericMethodType") \
template(notifyGenericMethodType_signature, "(Ljava/dyn/MethodType;)V") \
template(linkMethodHandleConstant_name, "linkMethodHandleConstant") \ template(linkMethodHandleConstant_name, "linkMethodHandleConstant") \
template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/dyn/MethodHandle;") \ template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/dyn/MethodHandle;") \
template(makeDynamicCallSite_name, "makeDynamicCallSite") \ template(makeDynamicCallSite_name, "makeDynamicCallSite") \
@ -345,6 +347,7 @@
template(ptypes_name, "ptypes") \ template(ptypes_name, "ptypes") \
template(form_name, "form") \ template(form_name, "form") \
template(erasedType_name, "erasedType") \ template(erasedType_name, "erasedType") \
template(genericInvoker_name, "genericInvoker") \
template(append_name, "append") \ template(append_name, "append") \
\ \
/* non-intrinsic name/signature pairs: */ \ /* non-intrinsic name/signature pairs: */ \

View file

@ -867,9 +867,9 @@ void nmethod::log_identity(xmlStream* log) const {
if (compiler() != NULL) { if (compiler() != NULL) {
log->print(" compiler='%s'", compiler()->name()); log->print(" compiler='%s'", compiler()->name());
} }
#ifdef TIERED if (TieredCompilation) {
log->print(" level='%d'", comp_level()); log->print(" level='%d'", comp_level());
#endif // TIERED }
} }
@ -908,35 +908,73 @@ void nmethod::log_new_nmethod() const {
#undef LOG_OFFSET #undef LOG_OFFSET
void nmethod::print_compilation(outputStream *st, const char *method_name, const char *title,
methodOop method, bool is_blocking, int compile_id, int bci, int comp_level) {
bool is_synchronized = false, has_xhandler = false, is_native = false;
int code_size = -1;
if (method != NULL) {
is_synchronized = method->is_synchronized();
has_xhandler = method->has_exception_handler();
is_native = method->is_native();
code_size = method->code_size();
}
// print compilation number
st->print("%7d %3d", (int)tty->time_stamp().milliseconds(), compile_id);
// print method attributes
const bool is_osr = bci != InvocationEntryBci;
const char blocking_char = is_blocking ? 'b' : ' ';
const char compile_type = is_osr ? '%' : ' ';
const char sync_char = is_synchronized ? 's' : ' ';
const char exception_char = has_xhandler ? '!' : ' ';
const char native_char = is_native ? 'n' : ' ';
st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, native_char);
if (TieredCompilation) {
st->print("%d ", comp_level);
}
// print optional title
bool do_nl = false;
if (title != NULL) {
int tlen = (int) strlen(title);
bool do_nl = false;
if (tlen > 0 && title[tlen-1] == '\n') { tlen--; do_nl = true; }
st->print("%.*s", tlen, title);
} else {
do_nl = true;
}
// print method name string if given
if (method_name != NULL) {
st->print(method_name);
} else {
// otherwise as the method to print itself
if (method != NULL && !Universe::heap()->is_gc_active()) {
method->print_short_name(st);
} else {
st->print("(method)");
}
}
if (method != NULL) {
// print osr_bci if any
if (is_osr) st->print(" @ %d", bci);
// print method size
st->print(" (%d bytes)", code_size);
}
if (do_nl) st->cr();
}
// Print out more verbose output usually for a newly created nmethod. // Print out more verbose output usually for a newly created nmethod.
void nmethod::print_on(outputStream* st, const char* title) const { void nmethod::print_on(outputStream* st, const char* title) const {
if (st != NULL) { if (st != NULL) {
ttyLocker ttyl; ttyLocker ttyl;
// Print a little tag line that looks like +PrintCompilation output: print_compilation(st, /*method_name*/NULL, title,
int tlen = (int) strlen(title); method(), /*is_blocking*/false,
bool do_nl = false;
if (tlen > 0 && title[tlen-1] == '\n') { tlen--; do_nl = true; }
st->print("%3d%c %.*s",
compile_id(), compile_id(),
is_osr_method() ? '%' : is_osr_method() ? osr_entry_bci() : InvocationEntryBci,
method() != NULL && comp_level());
is_native_method() ? 'n' : ' ',
tlen, title);
#ifdef TIERED
st->print(" (%d) ", comp_level());
#endif // TIERED
if (WizardMode) st->print(" (" INTPTR_FORMAT ")", this); if (WizardMode) st->print(" (" INTPTR_FORMAT ")", this);
if (Universe::heap()->is_gc_active() && method() != NULL) {
st->print("(method)");
} else if (method() != NULL) {
method()->print_short_name(st);
if (is_osr_method())
st->print(" @ %d", osr_entry_bci());
if (method()->code_size() > 0)
st->print(" (%d bytes)", method()->code_size());
}
if (do_nl) st->cr();
} }
} }
@ -1137,6 +1175,7 @@ bool nmethod::can_not_entrant_be_converted() {
} }
void nmethod::inc_decompile_count() { void nmethod::inc_decompile_count() {
if (!is_compiled_by_c2()) return;
// Could be gated by ProfileTraps, but do not bother... // Could be gated by ProfileTraps, but do not bother...
methodOop m = method(); methodOop m = method();
if (m == NULL) return; if (m == NULL) return;

View file

@ -599,6 +599,10 @@ public:
void verify_scopes(); void verify_scopes();
void verify_interrupt_point(address interrupt_point); void verify_interrupt_point(address interrupt_point);
// print compilation helper
static void print_compilation(outputStream *st, const char *method_name, const char *title,
methodOop method, bool is_blocking, int compile_id, int bci, int comp_level);
// printing support // printing support
void print() const; void print() const;
void print_code(); void print_code();

View file

@ -123,20 +123,12 @@ int CompileBroker::_sum_standard_bytes_compiled = 0;
int CompileBroker::_sum_nmethod_size = 0; int CompileBroker::_sum_nmethod_size = 0;
int CompileBroker::_sum_nmethod_code_size = 0; int CompileBroker::_sum_nmethod_code_size = 0;
CompileQueue* CompileBroker::_method_queue = NULL; CompileQueue* CompileBroker::_c2_method_queue = NULL;
CompileQueue* CompileBroker::_c1_method_queue = NULL;
CompileTask* CompileBroker::_task_free_list = NULL; CompileTask* CompileBroker::_task_free_list = NULL;
GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL; GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL;
// CompileTaskWrapper
//
// Assign this task to the current thread. Deallocate the task
// when the compilation is complete.
class CompileTaskWrapper : StackObj {
public:
CompileTaskWrapper(CompileTask* task);
~CompileTaskWrapper();
};
CompileTaskWrapper::CompileTaskWrapper(CompileTask* task) { CompileTaskWrapper::CompileTaskWrapper(CompileTask* task) {
CompilerThread* thread = CompilerThread::current(); CompilerThread* thread = CompilerThread::current();
@ -246,6 +238,12 @@ void CompileTask::print() {
bool_to_str(_is_complete), bool_to_str(_is_success)); bool_to_str(_is_complete), bool_to_str(_is_success));
} }
void CompileTask::print_compilation(outputStream *st, methodOop method, char* method_name) {
nmethod::print_compilation(st, method_name,/*title*/ NULL, method,
is_blocking(), compile_id(), osr_bci(), comp_level());
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// CompileTask::print_line_on_error // CompileTask::print_line_on_error
// //
@ -258,32 +256,13 @@ void CompileTask::print() {
// //
void CompileTask::print_line_on_error(outputStream* st, char* buf, int buflen) { void CompileTask::print_line_on_error(outputStream* st, char* buf, int buflen) {
methodOop method = (methodOop)JNIHandles::resolve(_method); methodOop method = (methodOop)JNIHandles::resolve(_method);
// print compiler name // print compiler name
st->print("%s:", CompileBroker::compiler(comp_level())->name()); st->print("%s:", CompileBroker::compiler(comp_level())->name());
char* method_name = NULL;
// print compilation number if (method != NULL) {
st->print("%3d", compile_id()); method_name = method->name_and_sig_as_C_string(buf, buflen);
// print method attributes
const bool is_osr = osr_bci() != CompileBroker::standard_entry_bci;
{ const char blocking_char = is_blocking() ? 'b' : ' ';
const char compile_type = is_osr ? '%' : ' ';
const char sync_char = method->is_synchronized() ? 's' : ' ';
const char exception_char = method->has_exception_handler() ? '!' : ' ';
const char tier_char =
is_highest_tier_compile(comp_level()) ? ' ' : ('0' + comp_level());
st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, tier_char);
} }
print_compilation(st, method, method_name);
// Use buf to get method name and signature
if (method != NULL) st->print("%s", method->name_and_sig_as_C_string(buf, buflen));
// print osr_bci if any
if (is_osr) st->print(" @ %d", osr_bci());
// print method size
st->print_cr(" (%d bytes)", method->code_size());
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
@ -298,29 +277,7 @@ void CompileTask::print_line() {
// print compiler name if requested // print compiler name if requested
if (CIPrintCompilerName) tty->print("%s:", CompileBroker::compiler(comp_level())->name()); if (CIPrintCompilerName) tty->print("%s:", CompileBroker::compiler(comp_level())->name());
print_compilation(tty, method(), NULL);
// print compilation number
tty->print("%3d", compile_id());
// print method attributes
const bool is_osr = osr_bci() != CompileBroker::standard_entry_bci;
{ const char blocking_char = is_blocking() ? 'b' : ' ';
const char compile_type = is_osr ? '%' : ' ';
const char sync_char = method->is_synchronized() ? 's' : ' ';
const char exception_char = method->has_exception_handler() ? '!' : ' ';
const char tier_char =
is_highest_tier_compile(comp_level()) ? ' ' : ('0' + comp_level());
tty->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, tier_char);
}
// print method name
method->print_short_name(tty);
// print osr_bci if any
if (is_osr) tty->print(" @ %d", osr_bci());
// print method size
tty->print_cr(" (%d bytes)", method->code_size());
} }
@ -427,6 +384,7 @@ void CompileQueue::add(CompileTask* task) {
assert(lock()->owned_by_self(), "must own lock"); assert(lock()->owned_by_self(), "must own lock");
task->set_next(NULL); task->set_next(NULL);
task->set_prev(NULL);
if (_last == NULL) { if (_last == NULL) {
// The compile queue is empty. // The compile queue is empty.
@ -437,8 +395,10 @@ void CompileQueue::add(CompileTask* task) {
// Append the task to the queue. // Append the task to the queue.
assert(_last->next() == NULL, "not last"); assert(_last->next() == NULL, "not last");
_last->set_next(task); _last->set_next(task);
task->set_prev(_last);
_last = task; _last = task;
} }
++_size;
// Mark the method as being in the compile queue. // Mark the method as being in the compile queue.
((methodOop)JNIHandles::resolve(task->method_handle()))->set_queued_for_compilation(); ((methodOop)JNIHandles::resolve(task->method_handle()))->set_queued_for_compilation();
@ -452,10 +412,9 @@ void CompileQueue::add(CompileTask* task) {
} }
// Notify CompilerThreads that a task is available. // Notify CompilerThreads that a task is available.
lock()->notify(); lock()->notify_all();
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// CompileQueue::get // CompileQueue::get
// //
@ -464,7 +423,6 @@ CompileTask* CompileQueue::get() {
NMethodSweeper::possibly_sweep(); NMethodSweeper::possibly_sweep();
MutexLocker locker(lock()); MutexLocker locker(lock());
// Wait for an available CompileTask. // Wait for an available CompileTask.
while (_first == NULL) { while (_first == NULL) {
// There is no work to be done right now. Wait. // There is no work to be done right now. Wait.
@ -481,19 +439,31 @@ CompileTask* CompileQueue::get() {
lock()->wait(); lock()->wait();
} }
} }
CompileTask* task = CompilationPolicy::policy()->select_task(this);
CompileTask* task = _first; remove(task);
// Update queue first and last
_first =_first->next();
if (_first == NULL) {
_last = NULL;
}
return task; return task;
} }
void CompileQueue::remove(CompileTask* task)
{
assert(lock()->owned_by_self(), "must own lock");
if (task->prev() != NULL) {
task->prev()->set_next(task->next());
} else {
// max is the first element
assert(task == _first, "Sanity");
_first = task->next();
}
if (task->next() != NULL) {
task->next()->set_prev(task->prev());
} else {
// max is the last element
assert(task == _last, "Sanity");
_last = task->prev();
}
--_size;
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// CompileQueue::print // CompileQueue::print
@ -545,7 +515,6 @@ CompilerCounters::CompilerCounters(const char* thread_name, int instance, TRAPS)
} }
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// CompileBroker::compilation_init // CompileBroker::compilation_init
// //
@ -554,18 +523,18 @@ void CompileBroker::compilation_init() {
_last_method_compiled[0] = '\0'; _last_method_compiled[0] = '\0';
// Set the interface to the current compiler(s). // Set the interface to the current compiler(s).
int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
#ifdef COMPILER1 #ifdef COMPILER1
if (c1_count > 0) {
_compilers[0] = new Compiler(); _compilers[0] = new Compiler();
#ifndef COMPILER2 }
_compilers[1] = _compilers[0];
#endif
#endif // COMPILER1 #endif // COMPILER1
#ifdef COMPILER2 #ifdef COMPILER2
if (c2_count > 0) {
_compilers[1] = new C2Compiler(); _compilers[1] = new C2Compiler();
#ifndef COMPILER1 }
_compilers[0] = _compilers[1];
#endif
#endif // COMPILER2 #endif // COMPILER2
#ifdef SHARK #ifdef SHARK
@ -580,9 +549,7 @@ void CompileBroker::compilation_init() {
_task_free_list = NULL; _task_free_list = NULL;
// Start the CompilerThreads // Start the CompilerThreads
init_compiler_threads(compiler_count()); init_compiler_threads(c1_count, c2_count);
// totalTime performance counter is always created as it is required // totalTime performance counter is always created as it is required
// by the implementation of java.lang.management.CompilationMBean. // by the implementation of java.lang.management.CompilationMBean.
{ {
@ -770,23 +737,38 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
// CompileBroker::init_compiler_threads // CompileBroker::init_compiler_threads
// //
// Initialize the compilation queue // Initialize the compilation queue
void CompileBroker::init_compiler_threads(int compiler_count) { void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) {
EXCEPTION_MARK; EXCEPTION_MARK;
assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
if (c2_compiler_count > 0) {
_c2_method_queue = new CompileQueue("C2MethodQueue", MethodCompileQueue_lock);
}
if (c1_compiler_count > 0) {
_c1_method_queue = new CompileQueue("C1MethodQueue", MethodCompileQueue_lock);
}
int compiler_count = c1_compiler_count + c2_compiler_count;
_method_queue = new CompileQueue("MethodQueue", MethodCompileQueue_lock);
_method_threads = _method_threads =
new (ResourceObj::C_HEAP) GrowableArray<CompilerThread*>(compiler_count, true); new (ResourceObj::C_HEAP) GrowableArray<CompilerThread*>(compiler_count, true);
char name_buffer[256]; char name_buffer[256];
int i; for (int i = 0; i < c2_compiler_count; i++) {
for (i = 0; i < compiler_count; i++) {
// Create a name for our thread. // Create a name for our thread.
sprintf(name_buffer, "CompilerThread%d", i); sprintf(name_buffer, "C2 CompilerThread%d", i);
CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK); CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, CHECK);
CompilerThread* new_thread = make_compiler_thread(name_buffer, _method_queue, counters, CHECK);
_method_threads->append(new_thread); _method_threads->append(new_thread);
} }
for (int i = c2_compiler_count; i < compiler_count; i++) {
// Create a name for our thread.
sprintf(name_buffer, "C1 CompilerThread%d", i);
CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, CHECK);
_method_threads->append(new_thread);
}
if (UsePerfData) { if (UsePerfData) {
PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes,
compiler_count, CHECK); compiler_count, CHECK);
@ -796,7 +778,9 @@ void CompileBroker::init_compiler_threads(int compiler_count) {
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// CompileBroker::is_idle // CompileBroker::is_idle
bool CompileBroker::is_idle() { bool CompileBroker::is_idle() {
if (!_method_queue->is_empty()) { if (_c2_method_queue != NULL && !_c2_method_queue->is_empty()) {
return false;
} else if (_c1_method_queue != NULL && !_c1_method_queue->is_empty()) {
return false; return false;
} else { } else {
int num_threads = _method_threads->length(); int num_threads = _method_threads->length();
@ -859,6 +843,7 @@ void CompileBroker::compile_method_base(methodHandle method,
return; return;
} }
// If this method is already in the compile queue, then // If this method is already in the compile queue, then
// we do not block the current thread. // we do not block the current thread.
if (compilation_is_in_queue(method, osr_bci)) { if (compilation_is_in_queue(method, osr_bci)) {
@ -876,10 +861,11 @@ void CompileBroker::compile_method_base(methodHandle method,
// Outputs from the following MutexLocker block: // Outputs from the following MutexLocker block:
CompileTask* task = NULL; CompileTask* task = NULL;
bool blocking = false; bool blocking = false;
CompileQueue* queue = compile_queue(comp_level);
// Acquire our lock. // Acquire our lock.
{ {
MutexLocker locker(_method_queue->lock(), THREAD); MutexLocker locker(queue->lock(), THREAD);
// Make sure the method has not slipped into the queues since // Make sure the method has not slipped into the queues since
// last we checked; note that those checks were "fast bail-outs". // last we checked; note that those checks were "fast bail-outs".
@ -945,7 +931,7 @@ void CompileBroker::compile_method_base(methodHandle method,
// and in that case it's best to protect both the testing (here) of // and in that case it's best to protect both the testing (here) of
// these bits, and their updating (here and elsewhere) under a // these bits, and their updating (here and elsewhere) under a
// common lock. // common lock.
task = create_compile_task(_method_queue, task = create_compile_task(queue,
compile_id, method, compile_id, method,
osr_bci, comp_level, osr_bci, comp_level,
hot_method, hot_count, comment, hot_method, hot_count, comment,
@ -959,6 +945,7 @@ void CompileBroker::compile_method_base(methodHandle method,
nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci, nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
int comp_level,
methodHandle hot_method, int hot_count, methodHandle hot_method, int hot_count,
const char* comment, TRAPS) { const char* comment, TRAPS) {
// make sure arguments make sense // make sure arguments make sense
@ -967,26 +954,9 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods"); assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods");
assert(!instanceKlass::cast(method->method_holder())->is_not_initialized(), "method holder must be initialized"); assert(!instanceKlass::cast(method->method_holder())->is_not_initialized(), "method holder must be initialized");
int comp_level = CompilationPolicy::policy()->compilation_level(method, osr_bci); if (!TieredCompilation) {
comp_level = CompLevel_highest_tier;
#ifdef TIERED
if (TieredCompilation && StressTieredRuntime) {
static int flipper = 0;
if (is_even(flipper++)) {
comp_level = CompLevel_fast_compile;
} else {
comp_level = CompLevel_full_optimization;
} }
}
#ifdef SPARC
// QQQ FIX ME
// C2 only returns long results in G1 and c1 doesn't understand so disallow c2
// compiles of long results
if (TieredCompilation && method()->result_type() == T_LONG) {
comp_level = CompLevel_fast_compile;
}
#endif // SPARC
#endif // TIERED
// return quickly if possible // return quickly if possible
@ -1000,13 +970,11 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
if (osr_bci == InvocationEntryBci) { if (osr_bci == InvocationEntryBci) {
// standard compilation // standard compilation
nmethod* method_code = method->code(); nmethod* method_code = method->code();
if (method_code != NULL if (method_code != NULL) {
#ifdef TIERED if (compilation_is_complete(method, osr_bci, comp_level)) {
&& ( method_code->is_compiled_by_c2() || comp_level == CompLevel_fast_compile )
#endif // TIERED
) {
return method_code; return method_code;
} }
}
if (method->is_not_compilable(comp_level)) return NULL; if (method->is_not_compilable(comp_level)) return NULL;
if (UseCodeCacheFlushing) { if (UseCodeCacheFlushing) {
@ -1021,10 +989,11 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
// osr compilation // osr compilation
#ifndef TIERED #ifndef TIERED
// seems like an assert of dubious value // seems like an assert of dubious value
assert(comp_level == CompLevel_full_optimization, assert(comp_level == CompLevel_highest_tier,
"all OSR compiles are assumed to be at a single compilation lavel"); "all OSR compiles are assumed to be at a single compilation lavel");
#endif // TIERED #endif // TIERED
nmethod* nm = method->lookup_osr_nmethod_for(osr_bci); // We accept a higher level osr method
nmethod* nm = method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
if (nm != NULL) return nm; if (nm != NULL) return nm;
if (method->is_not_osr_compilable()) return NULL; if (method->is_not_osr_compilable()) return NULL;
} }
@ -1071,8 +1040,7 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
// If the compiler is shut off due to code cache flushing or otherwise, // If the compiler is shut off due to code cache flushing or otherwise,
// fail out now so blocking compiles dont hang the java thread // fail out now so blocking compiles dont hang the java thread
if (!should_compile_new_jobs() || (UseCodeCacheFlushing && CodeCache::needs_flushing())) { if (!should_compile_new_jobs() || (UseCodeCacheFlushing && CodeCache::needs_flushing())) {
method->invocation_counter()->decay(); CompilationPolicy::policy()->delay_compilation(method());
method->backedge_counter()->decay();
return NULL; return NULL;
} }
@ -1088,7 +1056,8 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
} }
// return requested nmethod // return requested nmethod
return osr_bci == InvocationEntryBci ? method->code() : method->lookup_osr_nmethod_for(osr_bci); // We accept a higher level osr method
return osr_bci == InvocationEntryBci ? method->code() : method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
} }
@ -1104,7 +1073,7 @@ bool CompileBroker::compilation_is_complete(methodHandle method,
if (method->is_not_osr_compilable()) { if (method->is_not_osr_compilable()) {
return true; return true;
} else { } else {
nmethod* result = method->lookup_osr_nmethod_for(osr_bci); nmethod* result = method->lookup_osr_nmethod_for(osr_bci, comp_level, true);
return (result != NULL); return (result != NULL);
} }
} else { } else {
@ -1113,15 +1082,7 @@ bool CompileBroker::compilation_is_complete(methodHandle method,
} else { } else {
nmethod* result = method->code(); nmethod* result = method->code();
if (result == NULL) return false; if (result == NULL) return false;
#ifdef TIERED return comp_level == result->comp_level();
if (comp_level == CompLevel_fast_compile) {
// At worst the code is from c1
return true;
}
// comp level must be full opt
return result->is_compiled_by_c2();
#endif // TIERED
return true;
} }
} }
} }
@ -1143,7 +1104,6 @@ bool CompileBroker::compilation_is_in_queue(methodHandle method,
return method->queued_for_compilation(); return method->queued_for_compilation();
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// CompileBroker::compilation_is_prohibited // CompileBroker::compilation_is_prohibited
// //
@ -1151,11 +1111,9 @@ bool CompileBroker::compilation_is_in_queue(methodHandle method,
bool CompileBroker::compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level) { bool CompileBroker::compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level) {
bool is_native = method->is_native(); bool is_native = method->is_native();
// Some compilers may not support the compilation of natives. // Some compilers may not support the compilation of natives.
// QQQ this needs some work ought to only record not compilable at
// the specified level
if (is_native && if (is_native &&
(!CICompileNatives || !compiler(comp_level)->supports_native())) { (!CICompileNatives || !compiler(comp_level)->supports_native())) {
method->set_not_compilable_quietly(); method->set_not_compilable_quietly(comp_level);
return true; return true;
} }
@ -1194,7 +1152,7 @@ bool CompileBroker::compilation_is_prohibited(methodHandle method, int osr_bci,
// compilations may be numbered separately from regular compilations // compilations may be numbered separately from regular compilations
// if certain debugging flags are used. // if certain debugging flags are used.
uint CompileBroker::assign_compile_id(methodHandle method, int osr_bci) { uint CompileBroker::assign_compile_id(methodHandle method, int osr_bci) {
assert(_method_queue->lock()->owner() == JavaThread::current(), assert(MethodCompileQueue_lock->owner() == Thread::current(),
"must hold the compilation queue lock"); "must hold the compilation queue lock");
bool is_osr = (osr_bci != standard_entry_bci); bool is_osr = (osr_bci != standard_entry_bci);
assert(!method->is_native(), "no longer compile natives"); assert(!method->is_native(), "no longer compile natives");
@ -1643,7 +1601,6 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
#endif #endif
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// CompileBroker::handle_full_code_cache // CompileBroker::handle_full_code_cache
// //
@ -1883,12 +1840,12 @@ void CompileBroker::print_times() {
CompileBroker::_t_standard_compilation.seconds() / CompileBroker::_total_standard_compile_count); CompileBroker::_t_standard_compilation.seconds() / CompileBroker::_total_standard_compile_count);
tty->print_cr(" On stack replacement : %6.3f s, Average : %2.3f", CompileBroker::_t_osr_compilation.seconds(), CompileBroker::_t_osr_compilation.seconds() / CompileBroker::_total_osr_compile_count); tty->print_cr(" On stack replacement : %6.3f s, Average : %2.3f", CompileBroker::_t_osr_compilation.seconds(), CompileBroker::_t_osr_compilation.seconds() / CompileBroker::_total_osr_compile_count);
if (compiler(CompLevel_fast_compile)) { if (compiler(CompLevel_simple) != NULL) {
compiler(CompLevel_fast_compile)->print_timers(); compiler(CompLevel_simple)->print_timers();
if (compiler(CompLevel_fast_compile) != compiler(CompLevel_highest_tier)) }
compiler(CompLevel_highest_tier)->print_timers(); if (compiler(CompLevel_full_optimization) != NULL) {
compiler(CompLevel_full_optimization)->print_timers();
} }
tty->cr(); tty->cr();
int tcb = CompileBroker::_sum_osr_bytes_compiled + CompileBroker::_sum_standard_bytes_compiled; int tcb = CompileBroker::_sum_osr_bytes_compiled + CompileBroker::_sum_standard_bytes_compiled;
tty->print_cr(" Total compiled bytecodes : %6d bytes", tcb); tty->print_cr(" Total compiled bytecodes : %6d bytes", tcb);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,7 @@ class CompileTask : public CHeapObj {
int _comp_level; int _comp_level;
int _num_inlined_bytecodes; int _num_inlined_bytecodes;
nmethodLocker* _code_handle; // holder of eventual result nmethodLocker* _code_handle; // holder of eventual result
CompileTask* _next; CompileTask* _next, *_prev;
// Fields used for logging why the compilation was initiated: // Fields used for logging why the compilation was initiated:
jlong _time_queued; // in units of os::elapsed_counter() jlong _time_queued; // in units of os::elapsed_counter()
@ -49,6 +49,7 @@ class CompileTask : public CHeapObj {
int _hot_count; // information about its invocation counter int _hot_count; // information about its invocation counter
const char* _comment; // more info about the task const char* _comment; // more info about the task
void print_compilation(outputStream *st, methodOop method, char* method_name);
public: public:
CompileTask() { CompileTask() {
_lock = new Monitor(Mutex::nonleaf+2, "CompileTaskLock"); _lock = new Monitor(Mutex::nonleaf+2, "CompileTaskLock");
@ -85,15 +86,17 @@ class CompileTask : public CHeapObj {
CompileTask* next() const { return _next; } CompileTask* next() const { return _next; }
void set_next(CompileTask* next) { _next = next; } void set_next(CompileTask* next) { _next = next; }
CompileTask* prev() const { return _prev; }
void set_prev(CompileTask* prev) { _prev = prev; }
void print(); void print();
void print_line(); void print_line();
void print_line_on_error(outputStream* st, char* buf, int buflen); void print_line_on_error(outputStream* st, char* buf, int buflen);
void log_task(xmlStream* log); void log_task(xmlStream* log);
void log_task_queued(); void log_task_queued();
void log_task_start(CompileLog* log); void log_task_start(CompileLog* log);
void log_task_done(CompileLog* log); void log_task_done(CompileLog* log);
}; };
// CompilerCounters // CompilerCounters
@ -141,7 +144,6 @@ class CompilerCounters : public CHeapObj {
PerfCounter* compile_counter() { return _perf_compiles; } PerfCounter* compile_counter() { return _perf_compiles; }
}; };
// CompileQueue // CompileQueue
// //
// A list of CompileTasks. // A list of CompileTasks.
@ -153,26 +155,42 @@ class CompileQueue : public CHeapObj {
CompileTask* _first; CompileTask* _first;
CompileTask* _last; CompileTask* _last;
int _size;
public: public:
CompileQueue(const char* name, Monitor* lock) { CompileQueue(const char* name, Monitor* lock) {
_name = name; _name = name;
_lock = lock; _lock = lock;
_first = NULL; _first = NULL;
_last = NULL; _last = NULL;
_size = 0;
} }
const char* name() const { return _name; } const char* name() const { return _name; }
Monitor* lock() const { return _lock; } Monitor* lock() const { return _lock; }
void add(CompileTask* task); void add(CompileTask* task);
void remove(CompileTask* task);
CompileTask* first() { return _first; }
CompileTask* last() { return _last; }
CompileTask* get(); CompileTask* get();
bool is_empty() const { return _first == NULL; } bool is_empty() const { return _first == NULL; }
int size() const { return _size; }
void print(); void print();
}; };
// CompileTaskWrapper
//
// Assign this task to the current thread. Deallocate the task
// when the compilation is complete.
class CompileTaskWrapper : StackObj {
public:
CompileTaskWrapper(CompileTask* task);
~CompileTaskWrapper();
};
// Compilation // Compilation
// //
@ -208,7 +226,8 @@ class CompileBroker: AllStatic {
static int _last_compile_level; static int _last_compile_level;
static char _last_method_compiled[name_buffer_length]; static char _last_method_compiled[name_buffer_length];
static CompileQueue* _method_queue; static CompileQueue* _c2_method_queue;
static CompileQueue* _c1_method_queue;
static CompileTask* _task_free_list; static CompileTask* _task_free_list;
static GrowableArray<CompilerThread*>* _method_threads; static GrowableArray<CompilerThread*>* _method_threads;
@ -256,19 +275,9 @@ class CompileBroker: AllStatic {
static int _sum_nmethod_size; static int _sum_nmethod_size;
static int _sum_nmethod_code_size; static int _sum_nmethod_code_size;
static int compiler_count() {
return CICompilerCountPerCPU
// Example: if CICompilerCountPerCPU is true, then we get
// max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
// May help big-app startup time.
? (MAX2(log2_intptr(os::active_processor_count())-1,1))
: CICompilerCount;
}
static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS); static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS);
static void init_compiler_threads(int compiler_count); static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
static bool compilation_is_complete (methodHandle method, int osr_bci, int comp_level); static bool compilation_is_complete (methodHandle method, int osr_bci, int comp_level);
static bool compilation_is_in_queue (methodHandle method, int osr_bci);
static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level); static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
static uint assign_compile_id (methodHandle method, int osr_bci); static uint assign_compile_id (methodHandle method, int osr_bci);
static bool is_compile_blocking (methodHandle method, int osr_bci); static bool is_compile_blocking (methodHandle method, int osr_bci);
@ -301,23 +310,35 @@ class CompileBroker: AllStatic {
int hot_count, int hot_count,
const char* comment, const char* comment,
TRAPS); TRAPS);
static CompileQueue* compile_queue(int comp_level) {
if (is_c2_compile(comp_level)) return _c2_method_queue;
if (is_c1_compile(comp_level)) return _c1_method_queue;
return NULL;
}
public: public:
enum { enum {
// The entry bci used for non-OSR compilations. // The entry bci used for non-OSR compilations.
standard_entry_bci = InvocationEntryBci standard_entry_bci = InvocationEntryBci
}; };
static AbstractCompiler* compiler(int level ) { static AbstractCompiler* compiler(int comp_level) {
if (level == CompLevel_fast_compile) return _compilers[0]; if (is_c2_compile(comp_level)) return _compilers[1]; // C2
assert(level == CompLevel_highest_tier, "what level?"); if (is_c1_compile(comp_level)) return _compilers[0]; // C1
return _compilers[1]; return NULL;
} }
static bool compilation_is_in_queue(methodHandle method, int osr_bci);
static int queue_size(int comp_level) {
CompileQueue *q = compile_queue(comp_level);
return q != NULL ? q->size() : 0;
}
static void compilation_init(); static void compilation_init();
static void init_compiler_thread_log(); static void init_compiler_thread_log();
static nmethod* compile_method(methodHandle method, int osr_bci, static nmethod* compile_method(methodHandle method,
methodHandle hot_method, int hot_count, int osr_bci,
int comp_level,
methodHandle hot_method,
int hot_count,
const char* comment, TRAPS); const char* comment, TRAPS);
static void compiler_thread_loop(); static void compiler_thread_loop();

View file

@ -20,7 +20,6 @@
// or visit www.oracle.com if you need additional information or have any // or visit www.oracle.com if you need additional information or have any
// questions. // questions.
// //
//
// NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps! // NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!

View file

@ -1,5 +1,5 @@
// //
// Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it

View file

@ -1081,6 +1081,8 @@ compilationPolicy.cpp nativeLookup.hpp
compilationPolicy.cpp nmethod.hpp compilationPolicy.cpp nmethod.hpp
compilationPolicy.cpp oop.inline.hpp compilationPolicy.cpp oop.inline.hpp
compilationPolicy.cpp rframe.hpp compilationPolicy.cpp rframe.hpp
compilationPolicy.cpp scopeDesc.hpp
compilationPolicy.cpp simpleThresholdPolicy.hpp
compilationPolicy.cpp stubRoutines.hpp compilationPolicy.cpp stubRoutines.hpp
compilationPolicy.cpp thread.hpp compilationPolicy.cpp thread.hpp
compilationPolicy.cpp timer.hpp compilationPolicy.cpp timer.hpp
@ -1451,6 +1453,7 @@ defaultStream.hpp xmlstream.hpp
deoptimization.cpp allocation.inline.hpp deoptimization.cpp allocation.inline.hpp
deoptimization.cpp biasedLocking.hpp deoptimization.cpp biasedLocking.hpp
deoptimization.cpp bytecode.hpp deoptimization.cpp bytecode.hpp
deoptimization.cpp compilationPolicy.hpp
deoptimization.cpp debugInfoRec.hpp deoptimization.cpp debugInfoRec.hpp
deoptimization.cpp deoptimization.hpp deoptimization.cpp deoptimization.hpp
deoptimization.cpp events.hpp deoptimization.cpp events.hpp
@ -2172,6 +2175,7 @@ interpreterRT_<arch>.hpp generate_platform_dependent_include
interpreterRuntime.cpp biasedLocking.hpp interpreterRuntime.cpp biasedLocking.hpp
interpreterRuntime.cpp collectedHeap.hpp interpreterRuntime.cpp collectedHeap.hpp
interpreterRuntime.cpp compileBroker.hpp
interpreterRuntime.cpp compilationPolicy.hpp interpreterRuntime.cpp compilationPolicy.hpp
interpreterRuntime.cpp constantPoolOop.hpp interpreterRuntime.cpp constantPoolOop.hpp
interpreterRuntime.cpp cpCacheOop.hpp interpreterRuntime.cpp cpCacheOop.hpp
@ -2829,6 +2833,7 @@ methodDataKlass.hpp klass.hpp
methodDataOop.cpp bytecode.hpp methodDataOop.cpp bytecode.hpp
methodDataOop.cpp bytecodeStream.hpp methodDataOop.cpp bytecodeStream.hpp
methodDataOop.cpp compilationPolicy.hpp
methodDataOop.cpp deoptimization.hpp methodDataOop.cpp deoptimization.hpp
methodDataOop.cpp handles.inline.hpp methodDataOop.cpp handles.inline.hpp
methodDataOop.cpp linkResolver.hpp methodDataOop.cpp linkResolver.hpp
@ -2841,6 +2846,7 @@ methodDataOop.hpp bytecodes.hpp
methodDataOop.hpp oop.hpp methodDataOop.hpp oop.hpp
methodDataOop.hpp orderAccess.hpp methodDataOop.hpp orderAccess.hpp
methodDataOop.hpp universe.hpp methodDataOop.hpp universe.hpp
methodDataOop.hpp methodOop.hpp
methodHandleWalk.hpp methodHandles.hpp methodHandleWalk.hpp methodHandles.hpp
@ -2906,6 +2912,7 @@ methodOop.cpp bytecodeStream.hpp
methodOop.cpp bytecodeTracer.hpp methodOop.cpp bytecodeTracer.hpp
methodOop.cpp bytecodes.hpp methodOop.cpp bytecodes.hpp
methodOop.cpp collectedHeap.inline.hpp methodOop.cpp collectedHeap.inline.hpp
methodOop.cpp compilationPolicy.hpp
methodOop.cpp debugInfoRec.hpp methodOop.cpp debugInfoRec.hpp
methodOop.cpp frame.inline.hpp methodOop.cpp frame.inline.hpp
methodOop.cpp gcLocker.hpp methodOop.cpp gcLocker.hpp
@ -3655,6 +3662,7 @@ runtimeService.hpp timer.hpp
safepoint.cpp codeCache.hpp safepoint.cpp codeCache.hpp
safepoint.cpp collectedHeap.hpp safepoint.cpp collectedHeap.hpp
safepoint.cpp compilationPolicy.hpp
safepoint.cpp deoptimization.hpp safepoint.cpp deoptimization.hpp
safepoint.cpp events.hpp safepoint.cpp events.hpp
safepoint.cpp frame.inline.hpp safepoint.cpp frame.inline.hpp
@ -3799,6 +3807,17 @@ signature.hpp allocation.hpp
signature.hpp methodOop.hpp signature.hpp methodOop.hpp
signature.hpp top.hpp signature.hpp top.hpp
simpleThresholdPolicy.cpp arguments.hpp
simpleThresholdPolicy.cpp compileBroker.hpp
simpleThresholdPolicy.cpp resourceArea.hpp
simpleThresholdPolicy.cpp simpleThresholdPolicy.hpp
simpleThresholdPolicy.cpp simpleThresholdPolicy.inline.hpp
simpleThresholdPolicy.hpp compilationPolicy.hpp
simpleThresholdPolicy.hpp globalDefinitions.hpp
simpleThresholdPolicy.hpp methodDataOop.hpp
simpleThresholdPolicy.hpp nmethod.hpp
sizes.cpp sizes.hpp sizes.cpp sizes.hpp
sizes.hpp allocation.hpp sizes.hpp allocation.hpp
@ -3977,6 +3996,7 @@ stubs.hpp os_<os_family>.inline.hpp
sweeper.cpp atomic.hpp sweeper.cpp atomic.hpp
sweeper.cpp codeCache.hpp sweeper.cpp codeCache.hpp
sweeper.cpp compilationPolicy.hpp
sweeper.cpp compileBroker.hpp sweeper.cpp compileBroker.hpp
sweeper.cpp events.hpp sweeper.cpp events.hpp
sweeper.cpp methodOop.hpp sweeper.cpp methodOop.hpp

View file

@ -200,6 +200,7 @@ IRT_END
void InterpreterRuntime::note_trap(JavaThread* thread, int reason, TRAPS) { void InterpreterRuntime::note_trap(JavaThread* thread, int reason, TRAPS) {
assert(ProfileTraps, "call me only if profiling"); assert(ProfileTraps, "call me only if profiling");
methodHandle trap_method(thread, method(thread)); methodHandle trap_method(thread, method(thread));
if (trap_method.not_null()) { if (trap_method.not_null()) {
methodDataHandle trap_mdo(thread, trap_method->method_data()); methodDataHandle trap_mdo(thread, trap_method->method_data());
if (trap_mdo.is_null()) { if (trap_mdo.is_null()) {
@ -777,43 +778,6 @@ IRT_END
// Miscellaneous // Miscellaneous
#ifndef PRODUCT
static void trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci, address branch_bcp) {
if (TraceInvocationCounterOverflow) {
InvocationCounter* ic = m->invocation_counter();
InvocationCounter* bc = m->backedge_counter();
ResourceMark rm;
const char* msg =
branch_bcp == NULL
? "comp-policy cntr ovfl @ %d in entry of "
: "comp-policy cntr ovfl @ %d in loop of ";
tty->print(msg, bci);
m->print_value();
tty->cr();
ic->print();
bc->print();
if (ProfileInterpreter) {
if (branch_bcp != NULL) {
methodDataOop mdo = m->method_data();
if (mdo != NULL) {
int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken();
tty->print_cr("back branch count = %d", count);
}
}
}
}
}
static void trace_osr_request(methodHandle method, nmethod* osr, int bci) {
if (TraceOnStackReplacement) {
ResourceMark rm;
tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
method->print_short_name(tty);
tty->print_cr(" at bci %d", bci);
}
}
#endif // !PRODUCT
nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp) { nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp) {
nmethod* nm = frequency_counter_overflow_inner(thread, branch_bcp); nmethod* nm = frequency_counter_overflow_inner(thread, branch_bcp);
assert(branch_bcp != NULL || nm == NULL, "always returns null for non OSR requests"); assert(branch_bcp != NULL || nm == NULL, "always returns null for non OSR requests");
@ -826,7 +790,7 @@ nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, addr
frame fr = thread->last_frame(); frame fr = thread->last_frame();
methodOop method = fr.interpreter_frame_method(); methodOop method = fr.interpreter_frame_method();
int bci = method->bci_from(fr.interpreter_frame_bcp()); int bci = method->bci_from(fr.interpreter_frame_bcp());
nm = method->lookup_osr_nmethod_for(bci); nm = method->lookup_osr_nmethod_for(bci, CompLevel_none, false);
} }
return nm; return nm;
} }
@ -840,53 +804,12 @@ IRT_ENTRY(nmethod*,
frame fr = thread->last_frame(); frame fr = thread->last_frame();
assert(fr.is_interpreted_frame(), "must come from interpreter"); assert(fr.is_interpreted_frame(), "must come from interpreter");
methodHandle method(thread, fr.interpreter_frame_method()); methodHandle method(thread, fr.interpreter_frame_method());
const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : 0; const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci;
const int bci = method->bci_from(fr.interpreter_frame_bcp()); const int bci = branch_bcp != NULL ? method->bci_from(fr.interpreter_frame_bcp()) : InvocationEntryBci;
NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci, branch_bcp);)
if (JvmtiExport::can_post_interpreter_events()) { nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, thread);
if (thread->is_interp_only_mode()) {
// If certain JVMTI events (e.g. frame pop event) are requested then the
// thread is forced to remain in interpreted code. This is
// implemented partly by a check in the run_compiled_code
// section of the interpreter whether we should skip running
// compiled code, and partly by skipping OSR compiles for
// interpreted-only threads.
if (branch_bcp != NULL) {
CompilationPolicy::policy()->reset_counter_for_back_branch_event(method);
return NULL;
}
}
}
if (branch_bcp == NULL) { if (osr_nm != NULL) {
// when code cache is full, compilation gets switched off, UseCompiler
// is set to false
if (!method->has_compiled_code() && UseCompiler) {
CompilationPolicy::policy()->method_invocation_event(method, CHECK_NULL);
} else {
// Force counter overflow on method entry, even if no compilation
// happened. (The method_invocation_event call does this also.)
CompilationPolicy::policy()->reset_counter_for_invocation_event(method);
}
// compilation at an invocation overflow no longer goes and retries test for
// compiled method. We always run the loser of the race as interpreted.
// so return NULL
return NULL;
} else {
// counter overflow in a loop => try to do on-stack-replacement
nmethod* osr_nm = method->lookup_osr_nmethod_for(bci);
NOT_PRODUCT(trace_osr_request(method, osr_nm, bci);)
// when code cache is full, we should not compile any more...
if (osr_nm == NULL && UseCompiler) {
const int branch_bci = method->bci_from(branch_bcp);
CompilationPolicy::policy()->method_back_branch_event(method, branch_bci, bci, CHECK_NULL);
osr_nm = method->lookup_osr_nmethod_for(bci);
}
if (osr_nm == NULL) {
CompilationPolicy::policy()->reset_counter_for_back_branch_event(method);
return NULL;
} else {
// We may need to do on-stack replacement which requires that no // We may need to do on-stack replacement which requires that no
// monitors in the activation are biased because their // monitors in the activation are biased because their
// BasicObjectLocks will need to migrate during OSR. Force // BasicObjectLocks will need to migrate during OSR. Force
@ -905,9 +828,8 @@ IRT_ENTRY(nmethod*,
} }
BiasedLocking::revoke(objects_to_revoke); BiasedLocking::revoke(objects_to_revoke);
} }
}
return osr_nm; return osr_nm;
}
}
IRT_END IRT_END
IRT_LEAF(jint, InterpreterRuntime::bcp_to_di(methodOopDesc* method, address cur_bcp)) IRT_LEAF(jint, InterpreterRuntime::bcp_to_di(methodOopDesc* method, address cur_bcp))

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -40,8 +40,7 @@ void InvocationCounter::reset() {
} }
void InvocationCounter::set_carry() { void InvocationCounter::set_carry() {
_counter |= carry_mask; set_carry_flag();
// The carry bit now indicates that this counter had achieved a very // The carry bit now indicates that this counter had achieved a very
// large value. Now reduce the value, so that the method can be // large value. Now reduce the value, so that the method can be
// executed many more times before re-entering the VM. // executed many more times before re-entering the VM.
@ -52,7 +51,6 @@ void InvocationCounter::set_carry() {
if (old_count != new_count) set(state(), new_count); if (old_count != new_count) set(state(), new_count);
} }
void InvocationCounter::set_state(State state) { void InvocationCounter::set_state(State state) {
assert(0 <= state && state < number_of_states, "illegal state"); assert(0 <= state && state < number_of_states, "illegal state");
int init = _init[state]; int init = _init[state];
@ -82,11 +80,6 @@ int InvocationCounter::InterpreterInvocationLimit;
int InvocationCounter::InterpreterBackwardBranchLimit; int InvocationCounter::InterpreterBackwardBranchLimit;
int InvocationCounter::InterpreterProfileLimit; int InvocationCounter::InterpreterProfileLimit;
// Tier1 limits
int InvocationCounter::Tier1InvocationLimit;
int InvocationCounter::Tier1BackEdgeLimit;
const char* InvocationCounter::state_as_string(State state) { const char* InvocationCounter::state_as_string(State state) {
switch (state) { switch (state) {
@ -146,8 +139,6 @@ void InvocationCounter::reinitialize(bool delay_overflow) {
InterpreterInvocationLimit = CompileThreshold << number_of_noncount_bits; InterpreterInvocationLimit = CompileThreshold << number_of_noncount_bits;
InterpreterProfileLimit = ((CompileThreshold * InterpreterProfilePercentage) / 100)<< number_of_noncount_bits; InterpreterProfileLimit = ((CompileThreshold * InterpreterProfilePercentage) / 100)<< number_of_noncount_bits;
Tier1InvocationLimit = Tier2CompileThreshold << number_of_noncount_bits;
Tier1BackEdgeLimit = Tier2BackEdgeThreshold << number_of_noncount_bits;
// When methodData is collected, the backward branch limit is compared against a // When methodData is collected, the backward branch limit is compared against a
// methodData counter, rather than an InvocationCounter. In the former case, we // methodData counter, rather than an InvocationCounter. In the former case, we

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -43,7 +43,6 @@ class InvocationCounter VALUE_OBJ_CLASS_SPEC {
number_of_count_bits = BitsPerInt - number_of_noncount_bits, number_of_count_bits = BitsPerInt - number_of_noncount_bits,
state_limit = nth_bit(number_of_state_bits), state_limit = nth_bit(number_of_state_bits),
count_grain = nth_bit(number_of_state_bits + number_of_carry_bits), count_grain = nth_bit(number_of_state_bits + number_of_carry_bits),
count_limit = nth_bit(number_of_count_bits - 1),
carry_mask = right_n_bits(number_of_carry_bits) << number_of_state_bits, carry_mask = right_n_bits(number_of_carry_bits) << number_of_state_bits,
state_mask = right_n_bits(number_of_state_bits), state_mask = right_n_bits(number_of_state_bits),
status_mask = right_n_bits(number_of_state_bits + number_of_carry_bits), status_mask = right_n_bits(number_of_state_bits + number_of_carry_bits),
@ -52,18 +51,16 @@ class InvocationCounter VALUE_OBJ_CLASS_SPEC {
public: public:
static int InterpreterInvocationLimit; // CompileThreshold scaled for interpreter use static int InterpreterInvocationLimit; // CompileThreshold scaled for interpreter use
static int Tier1InvocationLimit; // CompileThreshold scaled for tier1 use
static int Tier1BackEdgeLimit; // BackEdgeThreshold scaled for tier1 use
static int InterpreterBackwardBranchLimit; // A separate threshold for on stack replacement static int InterpreterBackwardBranchLimit; // A separate threshold for on stack replacement
static int InterpreterProfileLimit; // Profiling threshold scaled for interpreter use static int InterpreterProfileLimit; // Profiling threshold scaled for interpreter use
typedef address (*Action)(methodHandle method, TRAPS); typedef address (*Action)(methodHandle method, TRAPS);
enum PublicConstants { enum PublicConstants {
count_increment = count_grain, // use this value to increment the 32bit _counter word count_increment = count_grain, // use this value to increment the 32bit _counter word
count_mask_value = count_mask // use this value to mask the backedge counter count_mask_value = count_mask, // use this value to mask the backedge counter
count_shift = number_of_noncount_bits,
count_limit = nth_bit(number_of_count_bits - 1)
}; };
enum State { enum State {
@ -79,6 +76,7 @@ class InvocationCounter VALUE_OBJ_CLASS_SPEC {
inline void set(State state, int count); // sets state and counter inline void set(State state, int count); // sets state and counter
inline void decay(); // decay counter (divide by two) inline void decay(); // decay counter (divide by two)
void set_carry(); // set the sticky carry bit void set_carry(); // set the sticky carry bit
void set_carry_flag() { _counter |= carry_mask; }
// Accessors // Accessors
State state() const { return (State)(_counter & state_mask); } State state() const { return (State)(_counter & state_mask); }
@ -135,3 +133,4 @@ inline void InvocationCounter::decay() {
if (c > 0 && new_count == 0) new_count = 1; if (c > 0 && new_count == 0) new_count = 1;
set(state(), new_count); set(state(), new_count);
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -83,12 +83,12 @@ void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass
_resolved_method = resolved_method; _resolved_method = resolved_method;
_selected_method = selected_method; _selected_method = selected_method;
_vtable_index = vtable_index; _vtable_index = vtable_index;
if (CompilationPolicy::mustBeCompiled(selected_method)) { if (CompilationPolicy::must_be_compiled(selected_method)) {
// This path is unusual, mostly used by the '-Xcomp' stress test mode. // This path is unusual, mostly used by the '-Xcomp' stress test mode.
// Note: with several active threads, the mustBeCompiled may be true // Note: with several active threads, the must_be_compiled may be true
// while canBeCompiled is false; remove assert // while can_be_compiled is false; remove assert
// assert(CompilationPolicy::canBeCompiled(selected_method), "cannot compile"); // assert(CompilationPolicy::can_be_compiled(selected_method), "cannot compile");
if (THREAD->is_Compiler_thread()) { if (THREAD->is_Compiler_thread()) {
// don't force compilation, resolve was on behalf of compiler // don't force compilation, resolve was on behalf of compiler
return; return;
@ -104,7 +104,8 @@ void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass
return; return;
} }
CompileBroker::compile_method(selected_method, InvocationEntryBci, CompileBroker::compile_method(selected_method, InvocationEntryBci,
methodHandle(), 0, "mustBeCompiled", CHECK); CompLevel_initial_compile,
methodHandle(), 0, "must_be_compiled", CHECK);
} }
} }

View file

@ -32,7 +32,11 @@ void CollectorPolicy::initialize_flags() {
MaxPermSize = PermSize; MaxPermSize = PermSize;
} }
PermSize = MAX2(min_alignment(), align_size_down_(PermSize, min_alignment())); PermSize = MAX2(min_alignment(), align_size_down_(PermSize, min_alignment()));
MaxPermSize = align_size_up(MaxPermSize, max_alignment()); // Don't increase Perm size limit above specified.
MaxPermSize = align_size_down(MaxPermSize, max_alignment());
if (PermSize > MaxPermSize) {
PermSize = MaxPermSize;
}
MinPermHeapExpansion = MAX2(min_alignment(), align_size_down_(MinPermHeapExpansion, min_alignment())); MinPermHeapExpansion = MAX2(min_alignment(), align_size_down_(MinPermHeapExpansion, min_alignment()));
MaxPermHeapExpansion = MAX2(min_alignment(), align_size_down_(MaxPermHeapExpansion, min_alignment())); MaxPermHeapExpansion = MAX2(min_alignment(), align_size_down_(MaxPermHeapExpansion, min_alignment()));

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -346,7 +346,8 @@ class NoRefDiscovery: StackObj {
bool _was_discovering_refs; bool _was_discovering_refs;
public: public:
NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) { NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
if (_was_discovering_refs = _rp->discovery_enabled()) { _was_discovering_refs = _rp->discovery_enabled();
if (_was_discovering_refs) {
_rp->disable_discovery(); _rp->disable_discovery();
} }
} }

View file

@ -466,6 +466,7 @@ oop constantPoolOopDesc::resolve_constant_at_impl(constantPoolHandle this_oop, i
bool ignore_is_on_bcp = false; bool ignore_is_on_bcp = false;
Handle value = SystemDictionary::find_method_handle_type(signature, Handle value = SystemDictionary::find_method_handle_type(signature,
klass, klass,
false,
ignore_is_on_bcp, ignore_is_on_bcp,
CHECK_NULL); CHECK_NULL);
result_oop = value(); result_oop = value();

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -2200,8 +2200,23 @@ void instanceKlass::add_osr_nmethod(nmethod* n) {
assert(n->is_osr_method(), "wrong kind of nmethod"); assert(n->is_osr_method(), "wrong kind of nmethod");
n->set_osr_link(osr_nmethods_head()); n->set_osr_link(osr_nmethods_head());
set_osr_nmethods_head(n); set_osr_nmethods_head(n);
// Raise the highest osr level if necessary
if (TieredCompilation) {
methodOop m = n->method();
m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level()));
}
// Remember to unlock again // Remember to unlock again
OsrList_lock->unlock(); OsrList_lock->unlock();
// Get rid of the osr methods for the same bci that have lower levels.
if (TieredCompilation) {
for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) {
nmethod *inv = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), l, true);
if (inv != NULL && inv->is_in_use()) {
inv->make_not_entrant();
}
}
}
} }
@ -2211,39 +2226,79 @@ void instanceKlass::remove_osr_nmethod(nmethod* n) {
assert(n->is_osr_method(), "wrong kind of nmethod"); assert(n->is_osr_method(), "wrong kind of nmethod");
nmethod* last = NULL; nmethod* last = NULL;
nmethod* cur = osr_nmethods_head(); nmethod* cur = osr_nmethods_head();
int max_level = CompLevel_none; // Find the max comp level excluding n
methodOop m = n->method();
// Search for match // Search for match
while(cur != NULL && cur != n) { while(cur != NULL && cur != n) {
if (TieredCompilation) {
// Find max level before n
max_level = MAX2(max_level, cur->comp_level());
}
last = cur; last = cur;
cur = cur->osr_link(); cur = cur->osr_link();
} }
nmethod* next = NULL;
if (cur == n) { if (cur == n) {
next = cur->osr_link();
if (last == NULL) { if (last == NULL) {
// Remove first element // Remove first element
set_osr_nmethods_head(osr_nmethods_head()->osr_link()); set_osr_nmethods_head(next);
} else { } else {
last->set_osr_link(cur->osr_link()); last->set_osr_link(next);
} }
} }
n->set_osr_link(NULL); n->set_osr_link(NULL);
if (TieredCompilation) {
cur = next;
while (cur != NULL) {
// Find max level after n
max_level = MAX2(max_level, cur->comp_level());
cur = cur->osr_link();
}
m->set_highest_osr_comp_level(max_level);
}
// Remember to unlock again // Remember to unlock again
OsrList_lock->unlock(); OsrList_lock->unlock();
} }
nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci) const { nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci, int comp_level, bool match_level) const {
// This is a short non-blocking critical region, so the no safepoint check is ok. // This is a short non-blocking critical region, so the no safepoint check is ok.
OsrList_lock->lock_without_safepoint_check(); OsrList_lock->lock_without_safepoint_check();
nmethod* osr = osr_nmethods_head(); nmethod* osr = osr_nmethods_head();
nmethod* best = NULL;
while (osr != NULL) { while (osr != NULL) {
assert(osr->is_osr_method(), "wrong kind of nmethod found in chain"); assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
// There can be a time when a c1 osr method exists but we are waiting
// for a c2 version. When c2 completes its osr nmethod we will trash
// the c1 version and only be able to find the c2 version. However
// while we overflow in the c1 code at back branches we don't want to
// try and switch to the same code as we are already running
if (osr->method() == m && if (osr->method() == m &&
(bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) { (bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) {
if (match_level) {
if (osr->comp_level() == comp_level) {
// Found a match - return it. // Found a match - return it.
OsrList_lock->unlock(); OsrList_lock->unlock();
return osr; return osr;
} }
} else {
if (best == NULL || (osr->comp_level() > best->comp_level())) {
if (osr->comp_level() == CompLevel_highest_tier) {
// Found the best possible - return it.
OsrList_lock->unlock();
return osr;
}
best = osr;
}
}
}
osr = osr->osr_link(); osr = osr->osr_link();
} }
OsrList_lock->unlock(); OsrList_lock->unlock();
if (best != NULL && best->comp_level() >= comp_level && match_level == false) {
return best;
}
return NULL; return NULL;
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -588,7 +588,7 @@ class instanceKlass: public Klass {
void set_osr_nmethods_head(nmethod* h) { _osr_nmethods_head = h; }; void set_osr_nmethods_head(nmethod* h) { _osr_nmethods_head = h; };
void add_osr_nmethod(nmethod* n); void add_osr_nmethod(nmethod* n);
void remove_osr_nmethod(nmethod* n); void remove_osr_nmethod(nmethod* n);
nmethod* lookup_osr_nmethod(const methodOop m, int bci) const; nmethod* lookup_osr_nmethod(const methodOop m, int bci, int level, bool match_level) const;
// Breakpoint support (see methods on methodOop for details) // Breakpoint support (see methods on methodOop for details)
BreakpointInfo* breakpoints() const { return _breakpoints; }; BreakpointInfo* breakpoints() const { return _breakpoints; };

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -283,11 +283,17 @@ void ReceiverTypeData::print_receiver_data_on(outputStream* st) {
if (receiver(row) != NULL) entries++; if (receiver(row) != NULL) entries++;
} }
st->print_cr("count(%u) entries(%u)", count(), entries); st->print_cr("count(%u) entries(%u)", count(), entries);
int total = count();
for (row = 0; row < row_limit(); row++) {
if (receiver(row) != NULL) {
total += receiver_count(row);
}
}
for (row = 0; row < row_limit(); row++) { for (row = 0; row < row_limit(); row++) {
if (receiver(row) != NULL) { if (receiver(row) != NULL) {
tab(st); tab(st);
receiver(row)->print_value_on(st); receiver(row)->print_value_on(st);
st->print_cr("(%u)", receiver_count(row)); st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
} }
} }
} }
@ -743,9 +749,18 @@ void methodDataOopDesc::post_initialize(BytecodeStream* stream) {
// Initialize the methodDataOop corresponding to a given method. // Initialize the methodDataOop corresponding to a given method.
void methodDataOopDesc::initialize(methodHandle method) { void methodDataOopDesc::initialize(methodHandle method) {
ResourceMark rm; ResourceMark rm;
// Set the method back-pointer. // Set the method back-pointer.
_method = method(); _method = method();
if (TieredCompilation) {
_invocation_counter.init();
_backedge_counter.init();
_num_loops = 0;
_num_blocks = 0;
_highest_comp_level = 0;
_highest_osr_comp_level = 0;
_would_profile = false;
}
set_creation_mileage(mileage_of(method())); set_creation_mileage(mileage_of(method()));
// Initialize flags and trap history. // Initialize flags and trap history.
@ -798,32 +813,25 @@ void methodDataOopDesc::initialize(methodHandle method) {
// Get a measure of how much mileage the method has on it. // Get a measure of how much mileage the method has on it.
int methodDataOopDesc::mileage_of(methodOop method) { int methodDataOopDesc::mileage_of(methodOop method) {
int mileage = 0; int mileage = 0;
if (TieredCompilation) {
mileage = MAX2(method->invocation_count(), method->backedge_count());
} else {
int iic = method->interpreter_invocation_count(); int iic = method->interpreter_invocation_count();
if (mileage < iic) mileage = iic; if (mileage < iic) mileage = iic;
InvocationCounter* ic = method->invocation_counter(); InvocationCounter* ic = method->invocation_counter();
InvocationCounter* bc = method->backedge_counter(); InvocationCounter* bc = method->backedge_counter();
int icval = ic->count(); int icval = ic->count();
if (ic->carry()) icval += CompileThreshold; if (ic->carry()) icval += CompileThreshold;
if (mileage < icval) mileage = icval; if (mileage < icval) mileage = icval;
int bcval = bc->count(); int bcval = bc->count();
if (bc->carry()) bcval += CompileThreshold; if (bc->carry()) bcval += CompileThreshold;
if (mileage < bcval) mileage = bcval; if (mileage < bcval) mileage = bcval;
}
return mileage; return mileage;
} }
bool methodDataOopDesc::is_mature() const { bool methodDataOopDesc::is_mature() const {
uint current = mileage_of(_method); return CompilationPolicy::policy()->is_mature(_method);
uint initial = creation_mileage();
if (current < initial)
return true; // some sort of overflow
uint target;
if (ProfileMaturityPercentage <= 0)
target = (uint) -ProfileMaturityPercentage; // absolute value
else
target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 );
return (current >= initial + target);
} }
// Translate a bci to its corresponding data index (di). // Translate a bci to its corresponding data index (di).

View file

@ -1208,6 +1208,24 @@ private:
int _creation_mileage; // method mileage at MDO creation int _creation_mileage; // method mileage at MDO creation
// How many invocations has this MDO seen?
// These counters are used to determine the exact age of MDO.
// We need those because in tiered a method can be concurrently
// executed at different levels.
InvocationCounter _invocation_counter;
// Same for backedges.
InvocationCounter _backedge_counter;
// Number of loops and blocks is computed when compiling the first
// time with C1. It is used to determine if method is trivial.
short _num_loops;
short _num_blocks;
// Highest compile level this method has ever seen.
u1 _highest_comp_level;
// Same for OSR level
u1 _highest_osr_comp_level;
// Does this method contain anything worth profiling?
bool _would_profile;
// Size of _data array in bytes. (Excludes header and extra_data fields.) // Size of _data array in bytes. (Excludes header and extra_data fields.)
int _data_size; int _data_size;
@ -1292,6 +1310,36 @@ public:
int creation_mileage() const { return _creation_mileage; } int creation_mileage() const { return _creation_mileage; }
void set_creation_mileage(int x) { _creation_mileage = x; } void set_creation_mileage(int x) { _creation_mileage = x; }
int invocation_count() {
if (invocation_counter()->carry()) {
return InvocationCounter::count_limit;
}
return invocation_counter()->count();
}
int backedge_count() {
if (backedge_counter()->carry()) {
return InvocationCounter::count_limit;
}
return backedge_counter()->count();
}
InvocationCounter* invocation_counter() { return &_invocation_counter; }
InvocationCounter* backedge_counter() { return &_backedge_counter; }
void set_would_profile(bool p) { _would_profile = p; }
bool would_profile() const { return _would_profile; }
int highest_comp_level() { return _highest_comp_level; }
void set_highest_comp_level(int level) { _highest_comp_level = level; }
int highest_osr_comp_level() { return _highest_osr_comp_level; }
void set_highest_osr_comp_level(int level) { _highest_osr_comp_level = level; }
int num_loops() const { return _num_loops; }
void set_num_loops(int n) { _num_loops = n; }
int num_blocks() const { return _num_blocks; }
void set_num_blocks(int n) { _num_blocks = n; }
bool is_mature() const; // consult mileage and ProfileMaturityPercentage bool is_mature() const; // consult mileage and ProfileMaturityPercentage
static int mileage_of(methodOop m); static int mileage_of(methodOop m);
@ -1413,7 +1461,7 @@ public:
void inc_decompile_count() { void inc_decompile_count() {
_nof_decompiles += 1; _nof_decompiles += 1;
if (decompile_count() > (uint)PerMethodRecompilationCutoff) { if (decompile_count() > (uint)PerMethodRecompilationCutoff) {
method()->set_not_compilable(); method()->set_not_compilable(CompLevel_full_optimization);
} }
} }
@ -1422,6 +1470,13 @@ public:
return byte_offset_of(methodDataOopDesc, _data[0]); return byte_offset_of(methodDataOopDesc, _data[0]);
} }
static ByteSize invocation_counter_offset() {
return byte_offset_of(methodDataOopDesc, _invocation_counter);
}
static ByteSize backedge_counter_offset() {
return byte_offset_of(methodDataOopDesc, _backedge_counter);
}
// GC support // GC support
oop* adr_method() const { return (oop*)&_method; } oop* adr_method() const { return (oop*)&_method; }
bool object_is_parsable() const { return _size != 0; } bool object_is_parsable() const { return _size != 0; }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -75,7 +75,6 @@ methodOop methodKlass::allocate(constMethodHandle xconst,
// Fix and bury in methodOop // Fix and bury in methodOop
m->set_interpreter_entry(NULL); // sets i2i entry and from_int m->set_interpreter_entry(NULL); // sets i2i entry and from_int
m->set_highest_tier_compile(CompLevel_none);
m->set_adapter_entry(NULL); m->set_adapter_entry(NULL);
m->clear_code(); // from_c/from_i get set to c2i/i2i m->clear_code(); // from_c/from_i get set to c2i/i2i
@ -89,6 +88,7 @@ methodOop methodKlass::allocate(constMethodHandle xconst,
m->invocation_counter()->init(); m->invocation_counter()->init();
m->backedge_counter()->init(); m->backedge_counter()->init();
m->clear_number_of_breakpoints(); m->clear_number_of_breakpoints();
assert(m->is_parsable(), "must be parsable here."); assert(m->is_parsable(), "must be parsable here.");
assert(m->size() == size, "wrong size for object"); assert(m->size() == size, "wrong size for object");
// We should not publish an uprasable object's reference // We should not publish an uprasable object's reference
@ -246,8 +246,8 @@ void methodKlass::oop_print_on(oop obj, outputStream* st) {
st->print_cr(" - method size: %d", m->method_size()); st->print_cr(" - method size: %d", m->method_size());
if (m->intrinsic_id() != vmIntrinsics::_none) if (m->intrinsic_id() != vmIntrinsics::_none)
st->print_cr(" - intrinsic id: %d %s", m->intrinsic_id(), vmIntrinsics::name_at(m->intrinsic_id())); st->print_cr(" - intrinsic id: %d %s", m->intrinsic_id(), vmIntrinsics::name_at(m->intrinsic_id()));
if (m->highest_tier_compile() != CompLevel_none) if (m->highest_comp_level() != CompLevel_none)
st->print_cr(" - highest tier: %d", m->highest_tier_compile()); st->print_cr(" - highest level: %d", m->highest_comp_level());
st->print_cr(" - vtable index: %d", m->_vtable_index); st->print_cr(" - vtable index: %d", m->_vtable_index);
st->print_cr(" - i2i entry: " INTPTR_FORMAT, m->interpreter_entry()); st->print_cr(" - i2i entry: " INTPTR_FORMAT, m->interpreter_entry());
st->print_cr(" - adapter: " INTPTR_FORMAT, m->adapter()); st->print_cr(" - adapter: " INTPTR_FORMAT, m->adapter());

View file

@ -233,7 +233,7 @@ void methodOopDesc::remove_unshareable_info() {
} }
bool methodOopDesc::was_executed_more_than(int n) const { bool methodOopDesc::was_executed_more_than(int n) {
// Invocation counter is reset when the methodOop is compiled. // Invocation counter is reset when the methodOop is compiled.
// If the method has compiled code we therefore assume it has // If the method has compiled code we therefore assume it has
// be excuted more than n times. // be excuted more than n times.
@ -241,7 +241,8 @@ bool methodOopDesc::was_executed_more_than(int n) const {
// interpreter doesn't bump invocation counter of trivial methods // interpreter doesn't bump invocation counter of trivial methods
// compiler does not bump invocation counter of compiled methods // compiler does not bump invocation counter of compiled methods
return true; return true;
} else if (_invocation_counter.carry()) { }
else if (_invocation_counter.carry() || (method_data() != NULL && method_data()->invocation_counter()->carry())) {
// The carry bit is set when the counter overflows and causes // The carry bit is set when the counter overflows and causes
// a compilation to occur. We don't know how many times // a compilation to occur. We don't know how many times
// the counter has been reset, so we simply assume it has // the counter has been reset, so we simply assume it has
@ -253,7 +254,7 @@ bool methodOopDesc::was_executed_more_than(int n) const {
} }
#ifndef PRODUCT #ifndef PRODUCT
void methodOopDesc::print_invocation_count() const { void methodOopDesc::print_invocation_count() {
if (is_static()) tty->print("static "); if (is_static()) tty->print("static ");
if (is_final()) tty->print("final "); if (is_final()) tty->print("final ");
if (is_synchronized()) tty->print("synchronized "); if (is_synchronized()) tty->print("synchronized ");
@ -574,16 +575,19 @@ bool methodOopDesc::is_not_compilable(int comp_level) const {
// compilers must recognize this method specially, or not at all // compilers must recognize this method specially, or not at all
return true; return true;
} }
if (number_of_breakpoints() > 0) {
#ifdef COMPILER2
if (is_tier1_compile(comp_level)) {
if (is_not_tier1_compilable()) {
return true; return true;
} }
if (comp_level == CompLevel_any) {
return is_not_c1_compilable() || is_not_c2_compilable();
} }
#endif // COMPILER2 if (is_c1_compile(comp_level)) {
return (_invocation_counter.state() == InvocationCounter::wait_for_nothing) return is_not_c1_compilable();
|| (number_of_breakpoints() > 0); }
if (is_c2_compile(comp_level)) {
return is_not_c2_compilable();
}
return false;
} }
// call this when compiler finds that this method is not compilable // call this when compiler finds that this method is not compilable
@ -604,15 +608,18 @@ void methodOopDesc::set_not_compilable(int comp_level, bool report) {
xtty->stamp(); xtty->stamp();
xtty->end_elem(); xtty->end_elem();
} }
#ifdef COMPILER2 if (comp_level == CompLevel_all) {
if (is_tier1_compile(comp_level)) { set_not_c1_compilable();
set_not_tier1_compilable(); set_not_c2_compilable();
return; } else {
if (is_c1_compile(comp_level)) {
set_not_c1_compilable();
} else
if (is_c2_compile(comp_level)) {
set_not_c2_compilable();
} }
#endif /* COMPILER2 */ }
assert(comp_level == CompLevel_highest_tier, "unexpected compilation level"); CompilationPolicy::policy()->disable_compilation(this);
invocation_counter()->set_state(InvocationCounter::wait_for_nothing);
backedge_counter()->set_state(InvocationCounter::wait_for_nothing);
} }
// Revert to using the interpreter and clear out the nmethod // Revert to using the interpreter and clear out the nmethod
@ -649,7 +656,6 @@ void methodOopDesc::unlink_method() {
set_method_data(NULL); set_method_data(NULL);
set_interpreter_throwout_count(0); set_interpreter_throwout_count(0);
set_interpreter_invocation_count(0); set_interpreter_invocation_count(0);
_highest_tier_compile = CompLevel_none;
} }
// Called when the method_holder is getting linked. Setup entrypoints so the method // Called when the method_holder is getting linked. Setup entrypoints so the method
@ -746,8 +752,8 @@ void methodOopDesc::set_code(methodHandle mh, nmethod *code) {
int comp_level = code->comp_level(); int comp_level = code->comp_level();
// In theory there could be a race here. In practice it is unlikely // In theory there could be a race here. In practice it is unlikely
// and not worth worrying about. // and not worth worrying about.
if (comp_level > mh->highest_tier_compile()) { if (comp_level > mh->highest_comp_level()) {
mh->set_highest_tier_compile(comp_level); mh->set_highest_comp_level(comp_level);
} }
OrderAccess::storestore(); OrderAccess::storestore();
@ -813,11 +819,13 @@ bool methodOopDesc::should_not_be_cached() const {
bool methodOopDesc::is_method_handle_invoke_name(vmSymbols::SID name_sid) { bool methodOopDesc::is_method_handle_invoke_name(vmSymbols::SID name_sid) {
switch (name_sid) { switch (name_sid) {
case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name): // FIXME: remove this transitional form
case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name): case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name):
case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name): case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name):
return true; return true;
} }
if (AllowTransitionalJSR292
&& name_sid == vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name))
return true;
return false; return false;
} }
@ -905,12 +913,16 @@ methodHandle methodOopDesc::make_invoke_method(KlassHandle holder,
m->set_signature_index(_imcp_invoke_signature); m->set_signature_index(_imcp_invoke_signature);
assert(is_method_handle_invoke_name(m->name()), ""); assert(is_method_handle_invoke_name(m->name()), "");
assert(m->signature() == signature(), ""); assert(m->signature() == signature(), "");
assert(m->is_method_handle_invoke(), "");
#ifdef CC_INTERP #ifdef CC_INTERP
ResultTypeFinder rtf(signature()); ResultTypeFinder rtf(signature());
m->set_result_index(rtf.type()); m->set_result_index(rtf.type());
#endif #endif
m->compute_size_of_parameters(THREAD); m->compute_size_of_parameters(THREAD);
m->set_exception_table(Universe::the_empty_int_array()); m->set_exception_table(Universe::the_empty_int_array());
m->init_intrinsic_id();
assert(m->intrinsic_id() == vmIntrinsics::_invokeExact ||
m->intrinsic_id() == vmIntrinsics::_invokeGeneric, "must be an invoker");
// Finally, set up its entry points. // Finally, set up its entry points.
assert(m->method_handle_type() == method_type(), ""); assert(m->method_handle_type() == method_type(), "");
@ -1023,6 +1035,7 @@ void methodOopDesc::init_intrinsic_id() {
assert(_intrinsic_id == vmIntrinsics::_none, "do this just once"); assert(_intrinsic_id == vmIntrinsics::_none, "do this just once");
const uintptr_t max_id_uint = right_n_bits((int)(sizeof(_intrinsic_id) * BitsPerByte)); const uintptr_t max_id_uint = right_n_bits((int)(sizeof(_intrinsic_id) * BitsPerByte));
assert((uintptr_t)vmIntrinsics::ID_LIMIT <= max_id_uint, "else fix size"); assert((uintptr_t)vmIntrinsics::ID_LIMIT <= max_id_uint, "else fix size");
assert(intrinsic_id_size_in_bytes() == sizeof(_intrinsic_id), "");
// the klass name is well-known: // the klass name is well-known:
vmSymbols::SID klass_id = klass_id_for_intrinsics(method_holder()); vmSymbols::SID klass_id = klass_id_for_intrinsics(method_holder());
@ -1032,7 +1045,8 @@ void methodOopDesc::init_intrinsic_id() {
vmSymbols::SID name_id = vmSymbols::find_sid(name()); vmSymbols::SID name_id = vmSymbols::find_sid(name());
if (name_id == vmSymbols::NO_SID) return; if (name_id == vmSymbols::NO_SID) return;
vmSymbols::SID sig_id = vmSymbols::find_sid(signature()); vmSymbols::SID sig_id = vmSymbols::find_sid(signature());
if (sig_id == vmSymbols::NO_SID) return; if (klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_dyn_MethodHandle)
&& sig_id == vmSymbols::NO_SID) return;
jshort flags = access_flags().as_short(); jshort flags = access_flags().as_short();
vmIntrinsics::ID id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags); vmIntrinsics::ID id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
@ -1061,11 +1075,14 @@ void methodOopDesc::init_intrinsic_id() {
if (is_static() || !is_native()) break; if (is_static() || !is_native()) break;
switch (name_id) { switch (name_id) {
case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name): case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name):
id = vmIntrinsics::_invokeGeneric; break; id = vmIntrinsics::_invokeGeneric;
default: break;
if (is_method_handle_invoke_name(name())) case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name):
id = vmIntrinsics::_invokeExact; id = vmIntrinsics::_invokeExact;
break; break;
case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name):
if (AllowTransitionalJSR292) id = vmIntrinsics::_invokeExact;
break;
} }
break; break;
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_dyn_InvokeDynamic): case vmSymbols::VM_SYMBOL_ENUM_NAME(java_dyn_InvokeDynamic):
@ -1442,6 +1459,64 @@ void methodOopDesc::clear_all_breakpoints() {
} }
int methodOopDesc::invocation_count() {
if (TieredCompilation) {
const methodDataOop mdo = method_data();
if (invocation_counter()->carry() || ((mdo != NULL) ? mdo->invocation_counter()->carry() : false)) {
return InvocationCounter::count_limit;
} else {
return invocation_counter()->count() + ((mdo != NULL) ? mdo->invocation_counter()->count() : 0);
}
} else {
return invocation_counter()->count();
}
}
int methodOopDesc::backedge_count() {
if (TieredCompilation) {
const methodDataOop mdo = method_data();
if (backedge_counter()->carry() || ((mdo != NULL) ? mdo->backedge_counter()->carry() : false)) {
return InvocationCounter::count_limit;
} else {
return backedge_counter()->count() + ((mdo != NULL) ? mdo->backedge_counter()->count() : 0);
}
} else {
return backedge_counter()->count();
}
}
int methodOopDesc::highest_comp_level() const {
methodDataOop mdo = method_data();
if (mdo != NULL) {
return mdo->highest_comp_level();
} else {
return CompLevel_none;
}
}
int methodOopDesc::highest_osr_comp_level() const {
methodDataOop mdo = method_data();
if (mdo != NULL) {
return mdo->highest_osr_comp_level();
} else {
return CompLevel_none;
}
}
void methodOopDesc::set_highest_comp_level(int level) {
methodDataOop mdo = method_data();
if (mdo != NULL) {
mdo->set_highest_comp_level(level);
}
}
void methodOopDesc::set_highest_osr_comp_level(int level) {
methodDataOop mdo = method_data();
if (mdo != NULL) {
mdo->set_highest_osr_comp_level(level);
}
}
BreakpointInfo::BreakpointInfo(methodOop m, int bci) { BreakpointInfo::BreakpointInfo(methodOop m, int bci) {
_bci = bci; _bci = bci;
_name_index = m->name_index(); _name_index = m->name_index();

View file

@ -62,9 +62,9 @@
// | method_size | max_stack | // | method_size | max_stack |
// | max_locals | size_of_parameters | // | max_locals | size_of_parameters |
// |------------------------------------------------------| // |------------------------------------------------------|
// | intrinsic_id, highest_tier | (unused) | // | intrinsic_id, (unused) | throwout_count |
// |------------------------------------------------------| // |------------------------------------------------------|
// | throwout_count | num_breakpoints | // | num_breakpoints | (unused) |
// |------------------------------------------------------| // |------------------------------------------------------|
// | invocation_counter | // | invocation_counter |
// | backedge_counter | // | backedge_counter |
@ -83,7 +83,6 @@
class CheckedExceptionElement; class CheckedExceptionElement;
class LocalVariableTableElement; class LocalVariableTableElement;
class AdapterHandlerEntry; class AdapterHandlerEntry;
class methodDataOopDesc; class methodDataOopDesc;
class methodOopDesc : public oopDesc { class methodOopDesc : public oopDesc {
@ -93,7 +92,7 @@ class methodOopDesc : public oopDesc {
constMethodOop _constMethod; // Method read-only data. constMethodOop _constMethod; // Method read-only data.
constantPoolOop _constants; // Constant pool constantPoolOop _constants; // Constant pool
methodDataOop _method_data; methodDataOop _method_data;
int _interpreter_invocation_count; // Count of times invoked int _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered)
AccessFlags _access_flags; // Access flags AccessFlags _access_flags; // Access flags
int _vtable_index; // vtable index of this method (see VtableIndexFlag) int _vtable_index; // vtable index of this method (see VtableIndexFlag)
// note: can have vtables with >2**16 elements (because of inheritance) // note: can have vtables with >2**16 elements (because of inheritance)
@ -105,11 +104,11 @@ class methodOopDesc : public oopDesc {
u2 _max_locals; // Number of local variables used by this method u2 _max_locals; // Number of local variables used by this method
u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words
u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none) u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
u1 _highest_tier_compile; // Highest compile level this method has ever seen.
u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
u2 _number_of_breakpoints; // fullspeed debugging support u2 _number_of_breakpoints; // fullspeed debugging support
InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations
InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations
#ifndef PRODUCT #ifndef PRODUCT
int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging) int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging)
#endif #endif
@ -221,8 +220,11 @@ class methodOopDesc : public oopDesc {
// max locals // max locals
int max_locals() const { return _max_locals; } int max_locals() const { return _max_locals; }
void set_max_locals(int size) { _max_locals = size; } void set_max_locals(int size) { _max_locals = size; }
int highest_tier_compile() { return _highest_tier_compile;}
void set_highest_tier_compile(int level) { _highest_tier_compile = level;} int highest_comp_level() const;
void set_highest_comp_level(int level);
int highest_osr_comp_level() const;
void set_highest_osr_comp_level(int level);
// Count of times method was exited via exception while interpreting // Count of times method was exited via exception while interpreting
void interpreter_throwout_increment() { void interpreter_throwout_increment() {
@ -278,16 +280,24 @@ class methodOopDesc : public oopDesc {
// invocation counter // invocation counter
InvocationCounter* invocation_counter() { return &_invocation_counter; } InvocationCounter* invocation_counter() { return &_invocation_counter; }
InvocationCounter* backedge_counter() { return &_backedge_counter; } InvocationCounter* backedge_counter() { return &_backedge_counter; }
int invocation_count() const { return _invocation_counter.count(); }
int backedge_count() const { return _backedge_counter.count(); } int invocation_count();
bool was_executed_more_than(int n) const; int backedge_count();
bool was_never_executed() const { return !was_executed_more_than(0); }
bool was_executed_more_than(int n);
bool was_never_executed() { return !was_executed_more_than(0); }
static void build_interpreter_method_data(methodHandle method, TRAPS); static void build_interpreter_method_data(methodHandle method, TRAPS);
int interpreter_invocation_count() const { return _interpreter_invocation_count; } int interpreter_invocation_count() {
if (TieredCompilation) return invocation_count();
else return _interpreter_invocation_count;
}
void set_interpreter_invocation_count(int count) { _interpreter_invocation_count = count; } void set_interpreter_invocation_count(int count) { _interpreter_invocation_count = count; }
int increment_interpreter_invocation_count() { return ++_interpreter_invocation_count; } int increment_interpreter_invocation_count() {
if (TieredCompilation) ShouldNotReachHere();
return ++_interpreter_invocation_count;
}
#ifndef PRODUCT #ifndef PRODUCT
int compiled_invocation_count() const { return _compiled_invocation_count; } int compiled_invocation_count() const { return _compiled_invocation_count; }
@ -361,7 +371,7 @@ class methodOopDesc : public oopDesc {
#ifndef PRODUCT #ifndef PRODUCT
// operations on invocation counter // operations on invocation counter
void print_invocation_count() const; void print_invocation_count();
#endif #endif
// byte codes // byte codes
@ -506,6 +516,8 @@ class methodOopDesc : public oopDesc {
static int method_data_offset_in_bytes() { return offset_of(methodOopDesc, _method_data); } static int method_data_offset_in_bytes() { return offset_of(methodOopDesc, _method_data); }
static int interpreter_invocation_counter_offset_in_bytes() static int interpreter_invocation_counter_offset_in_bytes()
{ return offset_of(methodOopDesc, _interpreter_invocation_count); } { return offset_of(methodOopDesc, _interpreter_invocation_count); }
static int intrinsic_id_offset_in_bytes() { return offset_of(methodOopDesc, _intrinsic_id); }
static int intrinsic_id_size_in_bytes() { return sizeof(u1); }
// Static methods that are used to implement member methods where an exposed this pointer // Static methods that are used to implement member methods where an exposed this pointer
// is needed due to possible GCs // is needed due to possible GCs
@ -587,8 +599,13 @@ class methodOopDesc : public oopDesc {
static vmSymbols::SID klass_id_for_intrinsics(klassOop holder); static vmSymbols::SID klass_id_for_intrinsics(klassOop holder);
// On-stack replacement support // On-stack replacement support
bool has_osr_nmethod() { return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci) != NULL; } bool has_osr_nmethod(int level, bool match_level) {
nmethod* lookup_osr_nmethod_for(int bci) { return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, bci); } return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
}
nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) {
return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, bci, level, match_level);
}
// Inline cache support // Inline cache support
void cleanup_inline_caches(); void cleanup_inline_caches();
@ -600,17 +617,19 @@ class methodOopDesc : public oopDesc {
// Indicates whether compilation failed earlier for this method, or // Indicates whether compilation failed earlier for this method, or
// whether it is not compilable for another reason like having a // whether it is not compilable for another reason like having a
// breakpoint set in it. // breakpoint set in it.
bool is_not_compilable(int comp_level = CompLevel_highest_tier) const; bool is_not_compilable(int comp_level = CompLevel_any) const;
void set_not_compilable(int comp_level = CompLevel_highest_tier, bool report = true); void set_not_compilable(int comp_level = CompLevel_all, bool report = true);
void set_not_compilable_quietly(int comp_level = CompLevel_highest_tier) { void set_not_compilable_quietly(int comp_level = CompLevel_all) {
set_not_compilable(comp_level, false); set_not_compilable(comp_level, false);
} }
bool is_not_osr_compilable(int comp_level = CompLevel_any) const {
bool is_not_osr_compilable() const { return is_not_compilable() || access_flags().is_not_osr_compilable(); } return is_not_compilable(comp_level) || access_flags().is_not_osr_compilable();
}
void set_not_osr_compilable() { _access_flags.set_not_osr_compilable(); } void set_not_osr_compilable() { _access_flags.set_not_osr_compilable(); }
bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); }
bool is_not_tier1_compilable() const { return access_flags().is_not_tier1_compilable(); } void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); }
void set_not_tier1_compilable() { _access_flags.set_not_tier1_compilable(); } bool is_not_c2_compilable() const { return access_flags().is_not_c2_compilable(); }
void set_not_c2_compilable() { _access_flags.set_not_c2_compilable(); }
// Background compilation support // Background compilation support
bool queued_for_compilation() const { return access_flags().queued_for_compilation(); } bool queued_for_compilation() const { return access_flags().queued_for_compilation(); }

View file

@ -140,7 +140,7 @@ const char* InlineTree::shouldInline(ciMethod* callee_method, ciMethod* caller_m
} else { } else {
// Not hot. Check for medium-sized pre-existing nmethod at cold sites. // Not hot. Check for medium-sized pre-existing nmethod at cold sites.
if (callee_method->has_compiled_code() && if (callee_method->has_compiled_code() &&
callee_method->instructions_size() > InlineSmallCode/4) callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode/4)
return "already compiled into a medium method"; return "already compiled into a medium method";
} }
if (size > max_size) { if (size > max_size) {
@ -180,7 +180,7 @@ const char* InlineTree::shouldNotInline(ciMethod *callee_method, ciMethod* calle
} }
} }
if (callee_method->has_compiled_code() && callee_method->instructions_size() > InlineSmallCode) { if (callee_method->has_compiled_code() && callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode) {
wci_result->set_profit(wci_result->profit() * 0.1); wci_result->set_profit(wci_result->profit() * 0.1);
// %%% adjust wci_result->size()? // %%% adjust wci_result->size()?
} }
@ -206,7 +206,7 @@ const char* InlineTree::shouldNotInline(ciMethod *callee_method, ciMethod* calle
// Now perform checks which are heuristic // Now perform checks which are heuristic
if( callee_method->has_compiled_code() && callee_method->instructions_size() > InlineSmallCode ) if( callee_method->has_compiled_code() && callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode )
return "already compiled into a big method"; return "already compiled into a big method";
// don't inline exception code unless the top method belongs to an // don't inline exception code unless the top method belongs to an

View file

@ -850,17 +850,6 @@ void Compile::Init(int aliaslevel) {
set_decompile_count(0); set_decompile_count(0);
set_do_freq_based_layout(BlockLayoutByFrequency || method_has_option("BlockLayoutByFrequency")); set_do_freq_based_layout(BlockLayoutByFrequency || method_has_option("BlockLayoutByFrequency"));
// Compilation level related initialization
if (env()->comp_level() == CompLevel_fast_compile) {
set_num_loop_opts(Tier1LoopOptsCount);
set_do_inlining(Tier1Inline != 0);
set_max_inline_size(Tier1MaxInlineSize);
set_freq_inline_size(Tier1FreqInlineSize);
set_do_scheduling(false);
set_do_count_invocations(Tier1CountInvocations);
set_do_method_data_update(Tier1UpdateMethodData);
} else {
assert(env()->comp_level() == CompLevel_full_optimization, "unknown comp level");
set_num_loop_opts(LoopOptsCount); set_num_loop_opts(LoopOptsCount);
set_do_inlining(Inline); set_do_inlining(Inline);
set_max_inline_size(MaxInlineSize); set_max_inline_size(MaxInlineSize);
@ -868,7 +857,6 @@ void Compile::Init(int aliaslevel) {
set_do_scheduling(OptoScheduling); set_do_scheduling(OptoScheduling);
set_do_count_invocations(false); set_do_count_invocations(false);
set_do_method_data_update(false); set_do_method_data_update(false);
}
if (debug_info()->recording_non_safepoints()) { if (debug_info()->recording_non_safepoints()) {
set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*> set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>

Some files were not shown because too many files have changed in this diff Show more