mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-23 12:34:32 +02:00
Merge
This commit is contained in:
commit
bce45e9222
221 changed files with 9836 additions and 1296 deletions
1
.hgtags
1
.hgtags
|
@ -248,3 +248,4 @@ b32e2219736e42baaf45daf0ad67ed34f6033799 jdk9-b02
|
||||||
7f655f31f9bcee618cf832f08176ad8c1ed3fdd3 jdk9-b03
|
7f655f31f9bcee618cf832f08176ad8c1ed3fdd3 jdk9-b03
|
||||||
099891b1d86f3719e116ac717ffdafc90d037fb7 jdk9-b04
|
099891b1d86f3719e116ac717ffdafc90d037fb7 jdk9-b04
|
||||||
dd311791ad6895a3989020dd6c6c46db87972ab8 jdk9-b05
|
dd311791ad6895a3989020dd6c6c46db87972ab8 jdk9-b05
|
||||||
|
85dbdc227c5e11429b4fc4a8ba763f50107edd6e jdk9-b06
|
||||||
|
|
|
@ -248,3 +248,4 @@ cd3825b2983045784d6fc6d1729c799b08215752 jdk8-b120
|
||||||
fd8d51bdf9aadf7ae83e65e8655c53581017c363 jdk9-b03
|
fd8d51bdf9aadf7ae83e65e8655c53581017c363 jdk9-b03
|
||||||
cb4c3440bc2748101923e2488506e61009ab1bf5 jdk9-b04
|
cb4c3440bc2748101923e2488506e61009ab1bf5 jdk9-b04
|
||||||
8c63f0b6ada282f27e3a80125e53c3be603f9af7 jdk9-b05
|
8c63f0b6ada282f27e3a80125e53c3be603f9af7 jdk9-b05
|
||||||
|
d0b525cd31b87abeb6d5b7e3516953eeb13b323c jdk9-b06
|
||||||
|
|
|
@ -248,3 +248,4 @@ a7d3638deb2f4e33217b1ecf889479e90f9e5b50 jdk9-b00
|
||||||
d338b892a13db19b093f85cf5f949a4504e4d31f jdk9-b03
|
d338b892a13db19b093f85cf5f949a4504e4d31f jdk9-b03
|
||||||
1ed19de263e1e0772da0269118cdd9deeb9fff04 jdk9-b04
|
1ed19de263e1e0772da0269118cdd9deeb9fff04 jdk9-b04
|
||||||
167c39eb44731a5d66770d0f00e231164653a2ff jdk9-b05
|
167c39eb44731a5d66770d0f00e231164653a2ff jdk9-b05
|
||||||
|
a4bf701ac316946c2e5e83138ad8e687da6a4b30 jdk9-b06
|
||||||
|
|
|
@ -408,3 +408,4 @@ b188446de75bda5fc52d102cddf242c3ef5ecbdf jdk9-b02
|
||||||
b2fee789d23f3cdabb3db4e51af43038e5692d3a jdk9-b03
|
b2fee789d23f3cdabb3db4e51af43038e5692d3a jdk9-b03
|
||||||
3812c088b9456ee22c933e88aee1ece71f4e783a jdk9-b04
|
3812c088b9456ee22c933e88aee1ece71f4e783a jdk9-b04
|
||||||
bdc5311e1db7598589b77015119b821bf8c828bd jdk9-b05
|
bdc5311e1db7598589b77015119b821bf8c828bd jdk9-b05
|
||||||
|
52377a30a3f87b62d6135706997b8c7a47366e37 jdk9-b06
|
||||||
|
|
|
@ -630,11 +630,20 @@ class Assembler : public AbstractAssembler {
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
// Insert a nop if the previous is cbcond
|
||||||
|
void insert_nop_after_cbcond() {
|
||||||
|
if (UseCBCond && cbcond_before()) {
|
||||||
|
nop();
|
||||||
|
}
|
||||||
|
}
|
||||||
// Delay slot helpers
|
// Delay slot helpers
|
||||||
// cti is called when emitting control-transfer instruction,
|
// cti is called when emitting control-transfer instruction,
|
||||||
// BEFORE doing the emitting.
|
// BEFORE doing the emitting.
|
||||||
// Only effective when assertion-checking is enabled.
|
// Only effective when assertion-checking is enabled.
|
||||||
void cti() {
|
void cti() {
|
||||||
|
// A cbcond instruction immediately followed by a CTI
|
||||||
|
// instruction introduces pipeline stalls, we need to avoid that.
|
||||||
|
no_cbcond_before();
|
||||||
#ifdef CHECK_DELAY
|
#ifdef CHECK_DELAY
|
||||||
assert_not_delayed("cti should not be in delay slot");
|
assert_not_delayed("cti should not be in delay slot");
|
||||||
#endif
|
#endif
|
||||||
|
@ -658,7 +667,6 @@ class Assembler : public AbstractAssembler {
|
||||||
void no_cbcond_before() {
|
void no_cbcond_before() {
|
||||||
assert(offset() == 0 || !cbcond_before(), "cbcond should not follow an other cbcond");
|
assert(offset() == 0 || !cbcond_before(), "cbcond should not follow an other cbcond");
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
bool use_cbcond(Label& L) {
|
bool use_cbcond(Label& L) {
|
||||||
|
|
|
@ -54,33 +54,33 @@ inline void Assembler::emit_data(int x, RelocationHolder const& rspec) {
|
||||||
inline void Assembler::add(Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); }
|
inline void Assembler::add(Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); }
|
||||||
inline void Assembler::add(Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
inline void Assembler::add(Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||||
|
|
||||||
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); }
|
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); }
|
||||||
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { bpr( c, a, p, s1, target(L)); }
|
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { insert_nop_after_cbcond(); bpr( c, a, p, s1, target(L)); }
|
||||||
|
|
||||||
inline void Assembler::fb( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
|
inline void Assembler::fb( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
|
||||||
inline void Assembler::fb( Condition c, bool a, Label& L ) { fb(c, a, target(L)); }
|
inline void Assembler::fb( Condition c, bool a, Label& L ) { insert_nop_after_cbcond(); fb(c, a, target(L)); }
|
||||||
|
|
||||||
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
|
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
|
||||||
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); }
|
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { insert_nop_after_cbcond(); fbp(c, a, cc, p, target(L)); }
|
||||||
|
|
||||||
inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
|
inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
|
||||||
inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); }
|
inline void Assembler::br( Condition c, bool a, Label& L ) { insert_nop_after_cbcond(); br(c, a, target(L)); }
|
||||||
|
|
||||||
inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
|
inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); insert_nop_after_cbcond(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
|
||||||
inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { bp(c, a, cc, p, target(L)); }
|
inline void Assembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { insert_nop_after_cbcond(); bp(c, a, cc, p, target(L)); }
|
||||||
|
|
||||||
// compare and branch
|
// compare and branch
|
||||||
inline void Assembler::cbcond(Condition c, CC cc, Register s1, Register s2, Label& L) { cti(); no_cbcond_before(); emit_data(op(branch_op) | cond_cbcond(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | rs2(s2)); }
|
inline void Assembler::cbcond(Condition c, CC cc, Register s1, Register s2, Label& L) { cti(); no_cbcond_before(); emit_data(op(branch_op) | cond_cbcond(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | rs2(s2)); }
|
||||||
inline void Assembler::cbcond(Condition c, CC cc, Register s1, int simm5, Label& L) { cti(); no_cbcond_before(); emit_data(op(branch_op) | cond_cbcond(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | immed(true) | simm(simm5, 5)); }
|
inline void Assembler::cbcond(Condition c, CC cc, Register s1, int simm5, Label& L) { cti(); no_cbcond_before(); emit_data(op(branch_op) | cond_cbcond(c) | op2(bpr_op2) | branchcc(cc) | wdisp10(intptr_t(target(L)), intptr_t(pc())) | rs1(s1) | immed(true) | simm(simm5, 5)); }
|
||||||
|
|
||||||
inline void Assembler::call( address d, relocInfo::relocType rt ) { cti(); emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt); has_delay_slot(); assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); }
|
inline void Assembler::call( address d, relocInfo::relocType rt ) { insert_nop_after_cbcond(); cti(); emit_data( op(call_op) | wdisp(intptr_t(d), intptr_t(pc()), 30), rt); has_delay_slot(); assert(rt != relocInfo::virtual_call_type, "must use virtual_call_Relocation::spec"); }
|
||||||
inline void Assembler::call( Label& L, relocInfo::relocType rt ) { call( target(L), rt); }
|
inline void Assembler::call( Label& L, relocInfo::relocType rt ) { insert_nop_after_cbcond(); call( target(L), rt); }
|
||||||
|
|
||||||
inline void Assembler::flush( Register s1, Register s2) { emit_int32( op(arith_op) | op3(flush_op3) | rs1(s1) | rs2(s2)); }
|
inline void Assembler::flush( Register s1, Register s2) { emit_int32( op(arith_op) | op3(flush_op3) | rs1(s1) | rs2(s2)); }
|
||||||
inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op) | op3(flush_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op) | op3(flush_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||||
|
|
||||||
inline void Assembler::jmpl( Register s1, Register s2, Register d ) { cti(); emit_int32( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
|
inline void Assembler::jmpl( Register s1, Register s2, Register d ) { insert_nop_after_cbcond(); cti(); emit_int32( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
|
||||||
inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { cti(); emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); }
|
inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { insert_nop_after_cbcond(); cti(); emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); }
|
||||||
|
|
||||||
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
|
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
|
||||||
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
|
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
|
||||||
|
|
|
@ -233,6 +233,7 @@ inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, reloc
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
|
inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
|
||||||
|
insert_nop_after_cbcond();
|
||||||
br(c, a, p, target(L));
|
br(c, a, p, target(L));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -248,6 +249,7 @@ inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relo
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
|
inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
|
||||||
|
insert_nop_after_cbcond();
|
||||||
brx(c, a, p, target(L));
|
brx(c, a, p, target(L));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -269,6 +271,7 @@ inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, reloc
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
|
inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
|
||||||
|
insert_nop_after_cbcond();
|
||||||
fb(c, a, p, target(L));
|
fb(c, a, p, target(L));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -318,6 +321,7 @@ inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
|
inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
|
||||||
|
insert_nop_after_cbcond();
|
||||||
MacroAssembler::call( target(L), rt);
|
MacroAssembler::call( target(L), rt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1268,7 +1268,7 @@ int MachPrologNode::reloc() const {
|
||||||
void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
|
void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
|
||||||
Compile* C = ra_->C;
|
Compile* C = ra_->C;
|
||||||
|
|
||||||
if( do_polling() && ra_->C->is_method_compilation() ) {
|
if(do_polling() && ra_->C->is_method_compilation()) {
|
||||||
st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t");
|
st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t");
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
st->print("LDX [L0],G0\t!Poll for Safepointing\n\t");
|
st->print("LDX [L0],G0\t!Poll for Safepointing\n\t");
|
||||||
|
@ -1277,8 +1277,12 @@ void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
if( do_polling() )
|
if(do_polling()) {
|
||||||
|
if (UseCBCond && !ra_->C->is_method_compilation()) {
|
||||||
|
st->print("NOP\n\t");
|
||||||
|
}
|
||||||
st->print("RET\n\t");
|
st->print("RET\n\t");
|
||||||
|
}
|
||||||
|
|
||||||
st->print("RESTORE");
|
st->print("RESTORE");
|
||||||
}
|
}
|
||||||
|
@ -1291,15 +1295,20 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
|
||||||
__ verify_thread();
|
__ verify_thread();
|
||||||
|
|
||||||
// If this does safepoint polling, then do it here
|
// If this does safepoint polling, then do it here
|
||||||
if( do_polling() && ra_->C->is_method_compilation() ) {
|
if(do_polling() && ra_->C->is_method_compilation()) {
|
||||||
AddressLiteral polling_page(os::get_polling_page());
|
AddressLiteral polling_page(os::get_polling_page());
|
||||||
__ sethi(polling_page, L0);
|
__ sethi(polling_page, L0);
|
||||||
__ relocate(relocInfo::poll_return_type);
|
__ relocate(relocInfo::poll_return_type);
|
||||||
__ ld_ptr( L0, 0, G0 );
|
__ ld_ptr(L0, 0, G0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this is a return, then stuff the restore in the delay slot
|
// If this is a return, then stuff the restore in the delay slot
|
||||||
if( do_polling() ) {
|
if(do_polling()) {
|
||||||
|
if (UseCBCond && !ra_->C->is_method_compilation()) {
|
||||||
|
// Insert extra padding for the case when the epilogue is preceded by
|
||||||
|
// a cbcond jump, which can't be followed by a CTI instruction
|
||||||
|
__ nop();
|
||||||
|
}
|
||||||
__ ret();
|
__ ret();
|
||||||
__ delayed()->restore();
|
__ delayed()->restore();
|
||||||
} else {
|
} else {
|
||||||
|
@ -3330,7 +3339,18 @@ op_attrib op_cost(1); // Required cost attribute
|
||||||
//----------Instruction Attributes---------------------------------------------
|
//----------Instruction Attributes---------------------------------------------
|
||||||
ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
|
ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
|
||||||
ins_attrib ins_size(32); // Required size attribute (in bits)
|
ins_attrib ins_size(32); // Required size attribute (in bits)
|
||||||
ins_attrib ins_avoid_back_to_back(0); // instruction should not be generated back to back
|
|
||||||
|
// avoid_back_to_back attribute is an expression that must return
|
||||||
|
// one of the following values defined in MachNode:
|
||||||
|
// AVOID_NONE - instruction can be placed anywhere
|
||||||
|
// AVOID_BEFORE - instruction cannot be placed after an
|
||||||
|
// instruction with MachNode::AVOID_AFTER
|
||||||
|
// AVOID_AFTER - the next instruction cannot be the one
|
||||||
|
// with MachNode::AVOID_BEFORE
|
||||||
|
// AVOID_BEFORE_AND_AFTER - BEFORE and AFTER attributes at
|
||||||
|
// the same time
|
||||||
|
ins_attrib ins_avoid_back_to_back(MachNode::AVOID_NONE);
|
||||||
|
|
||||||
ins_attrib ins_short_branch(0); // Required flag: is this instruction a
|
ins_attrib ins_short_branch(0); // Required flag: is this instruction a
|
||||||
// non-matching short branch variant of some
|
// non-matching short branch variant of some
|
||||||
// long branch?
|
// long branch?
|
||||||
|
@ -6630,6 +6650,7 @@ instruct encodeHeapOop(iRegN dst, iRegP src) %{
|
||||||
ins_encode %{
|
ins_encode %{
|
||||||
__ encode_heap_oop($src$$Register, $dst$$Register);
|
__ encode_heap_oop($src$$Register, $dst$$Register);
|
||||||
%}
|
%}
|
||||||
|
ins_avoid_back_to_back(Universe::narrow_oop_base() == NULL ? AVOID_NONE : AVOID_BEFORE);
|
||||||
ins_pipe(ialu_reg);
|
ins_pipe(ialu_reg);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9199,6 +9220,7 @@ instruct branch(label labl) %{
|
||||||
__ ba(*L);
|
__ ba(*L);
|
||||||
__ delayed()->nop();
|
__ delayed()->nop();
|
||||||
%}
|
%}
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(br);
|
ins_pipe(br);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9217,7 +9239,7 @@ instruct branch_short(label labl) %{
|
||||||
__ ba_short(*L);
|
__ ba_short(*L);
|
||||||
%}
|
%}
|
||||||
ins_short_branch(1);
|
ins_short_branch(1);
|
||||||
ins_avoid_back_to_back(1);
|
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||||
ins_pipe(cbcond_reg_imm);
|
ins_pipe(cbcond_reg_imm);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9231,6 +9253,7 @@ instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{
|
||||||
format %{ "BP$cmp $icc,$labl" %}
|
format %{ "BP$cmp $icc,$labl" %}
|
||||||
// Prim = bits 24-22, Secnd = bits 31-30
|
// Prim = bits 24-22, Secnd = bits 31-30
|
||||||
ins_encode( enc_bp( labl, cmp, icc ) );
|
ins_encode( enc_bp( labl, cmp, icc ) );
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(br_cc);
|
ins_pipe(br_cc);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9242,6 +9265,7 @@ instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{
|
||||||
format %{ "BP$cmp $icc,$labl" %}
|
format %{ "BP$cmp $icc,$labl" %}
|
||||||
// Prim = bits 24-22, Secnd = bits 31-30
|
// Prim = bits 24-22, Secnd = bits 31-30
|
||||||
ins_encode( enc_bp( labl, cmp, icc ) );
|
ins_encode( enc_bp( labl, cmp, icc ) );
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(br_cc);
|
ins_pipe(br_cc);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9260,6 +9284,7 @@ instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{
|
||||||
__ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
|
__ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
|
||||||
__ delayed()->nop();
|
__ delayed()->nop();
|
||||||
%}
|
%}
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(br_cc);
|
ins_pipe(br_cc);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9278,6 +9303,7 @@ instruct branchConF(cmpOpF cmp, flagsRegF fcc, label labl) %{
|
||||||
__ fbp( (Assembler::Condition)($cmp$$cmpcode), false, (Assembler::CC)($fcc$$reg), predict_taken, *L);
|
__ fbp( (Assembler::Condition)($cmp$$cmpcode), false, (Assembler::CC)($fcc$$reg), predict_taken, *L);
|
||||||
__ delayed()->nop();
|
__ delayed()->nop();
|
||||||
%}
|
%}
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(br_fcc);
|
ins_pipe(br_fcc);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9290,6 +9316,7 @@ instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{
|
||||||
format %{ "BP$cmp $icc,$labl\t! Loop end" %}
|
format %{ "BP$cmp $icc,$labl\t! Loop end" %}
|
||||||
// Prim = bits 24-22, Secnd = bits 31-30
|
// Prim = bits 24-22, Secnd = bits 31-30
|
||||||
ins_encode( enc_bp( labl, cmp, icc ) );
|
ins_encode( enc_bp( labl, cmp, icc ) );
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(br_cc);
|
ins_pipe(br_cc);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9302,6 +9329,7 @@ instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{
|
||||||
format %{ "BP$cmp $icc,$labl\t! Loop end" %}
|
format %{ "BP$cmp $icc,$labl\t! Loop end" %}
|
||||||
// Prim = bits 24-22, Secnd = bits 31-30
|
// Prim = bits 24-22, Secnd = bits 31-30
|
||||||
ins_encode( enc_bp( labl, cmp, icc ) );
|
ins_encode( enc_bp( labl, cmp, icc ) );
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(br_cc);
|
ins_pipe(br_cc);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9552,7 +9580,7 @@ instruct cmpI_reg_branch_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flag
|
||||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
|
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
|
||||||
%}
|
%}
|
||||||
ins_short_branch(1);
|
ins_short_branch(1);
|
||||||
ins_avoid_back_to_back(1);
|
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||||
ins_pipe(cbcond_reg_reg);
|
ins_pipe(cbcond_reg_reg);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9570,7 +9598,7 @@ instruct cmpI_imm_branch_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flag
|
||||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
|
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
|
||||||
%}
|
%}
|
||||||
ins_short_branch(1);
|
ins_short_branch(1);
|
||||||
ins_avoid_back_to_back(1);
|
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||||
ins_pipe(cbcond_reg_imm);
|
ins_pipe(cbcond_reg_imm);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9588,7 +9616,7 @@ instruct cmpU_reg_branch_short(cmpOpU cmp, iRegI op1, iRegI op2, label labl, fla
|
||||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
|
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
|
||||||
%}
|
%}
|
||||||
ins_short_branch(1);
|
ins_short_branch(1);
|
||||||
ins_avoid_back_to_back(1);
|
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||||
ins_pipe(cbcond_reg_reg);
|
ins_pipe(cbcond_reg_reg);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9606,7 +9634,7 @@ instruct cmpU_imm_branch_short(cmpOpU cmp, iRegI op1, immI5 op2, label labl, fla
|
||||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
|
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
|
||||||
%}
|
%}
|
||||||
ins_short_branch(1);
|
ins_short_branch(1);
|
||||||
ins_avoid_back_to_back(1);
|
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||||
ins_pipe(cbcond_reg_imm);
|
ins_pipe(cbcond_reg_imm);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9624,7 +9652,7 @@ instruct cmpL_reg_branch_short(cmpOp cmp, iRegL op1, iRegL op2, label labl, flag
|
||||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$Register, *L);
|
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$Register, *L);
|
||||||
%}
|
%}
|
||||||
ins_short_branch(1);
|
ins_short_branch(1);
|
||||||
ins_avoid_back_to_back(1);
|
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||||
ins_pipe(cbcond_reg_reg);
|
ins_pipe(cbcond_reg_reg);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9642,7 +9670,7 @@ instruct cmpL_imm_branch_short(cmpOp cmp, iRegL op1, immL5 op2, label labl, flag
|
||||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L);
|
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L);
|
||||||
%}
|
%}
|
||||||
ins_short_branch(1);
|
ins_short_branch(1);
|
||||||
ins_avoid_back_to_back(1);
|
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||||
ins_pipe(cbcond_reg_imm);
|
ins_pipe(cbcond_reg_imm);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9665,7 +9693,7 @@ instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, fla
|
||||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L);
|
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L);
|
||||||
%}
|
%}
|
||||||
ins_short_branch(1);
|
ins_short_branch(1);
|
||||||
ins_avoid_back_to_back(1);
|
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||||
ins_pipe(cbcond_reg_reg);
|
ins_pipe(cbcond_reg_reg);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9687,7 +9715,7 @@ instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, f
|
||||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L);
|
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L);
|
||||||
%}
|
%}
|
||||||
ins_short_branch(1);
|
ins_short_branch(1);
|
||||||
ins_avoid_back_to_back(1);
|
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||||
ins_pipe(cbcond_reg_reg);
|
ins_pipe(cbcond_reg_reg);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9705,7 +9733,7 @@ instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flag
|
||||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
|
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
|
||||||
%}
|
%}
|
||||||
ins_short_branch(1);
|
ins_short_branch(1);
|
||||||
ins_avoid_back_to_back(1);
|
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||||
ins_pipe(cbcond_reg_reg);
|
ins_pipe(cbcond_reg_reg);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9723,7 +9751,7 @@ instruct cmpN_null_branch_short(cmpOp cmp, iRegN op1, immN0 null, label labl, fl
|
||||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, G0, *L);
|
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, G0, *L);
|
||||||
%}
|
%}
|
||||||
ins_short_branch(1);
|
ins_short_branch(1);
|
||||||
ins_avoid_back_to_back(1);
|
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||||
ins_pipe(cbcond_reg_reg);
|
ins_pipe(cbcond_reg_reg);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9742,7 +9770,7 @@ instruct cmpI_reg_branchLoopEnd_short(cmpOp cmp, iRegI op1, iRegI op2, label lab
|
||||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
|
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
|
||||||
%}
|
%}
|
||||||
ins_short_branch(1);
|
ins_short_branch(1);
|
||||||
ins_avoid_back_to_back(1);
|
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||||
ins_pipe(cbcond_reg_reg);
|
ins_pipe(cbcond_reg_reg);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9760,7 +9788,7 @@ instruct cmpI_imm_branchLoopEnd_short(cmpOp cmp, iRegI op1, immI5 op2, label lab
|
||||||
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
|
__ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
|
||||||
%}
|
%}
|
||||||
ins_short_branch(1);
|
ins_short_branch(1);
|
||||||
ins_avoid_back_to_back(1);
|
ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER);
|
||||||
ins_pipe(cbcond_reg_imm);
|
ins_pipe(cbcond_reg_imm);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9777,6 +9805,7 @@ instruct branchCon_regI(cmpOp_reg cmp, iRegI op1, immI0 zero, label labl) %{
|
||||||
ins_cost(BRANCH_COST);
|
ins_cost(BRANCH_COST);
|
||||||
format %{ "BR$cmp $op1,$labl" %}
|
format %{ "BR$cmp $op1,$labl" %}
|
||||||
ins_encode( enc_bpr( labl, cmp, op1 ) );
|
ins_encode( enc_bpr( labl, cmp, op1 ) );
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(br_reg);
|
ins_pipe(br_reg);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9789,6 +9818,7 @@ instruct branchCon_regP(cmpOp_reg cmp, iRegP op1, immP0 null, label labl) %{
|
||||||
ins_cost(BRANCH_COST);
|
ins_cost(BRANCH_COST);
|
||||||
format %{ "BR$cmp $op1,$labl" %}
|
format %{ "BR$cmp $op1,$labl" %}
|
||||||
ins_encode( enc_bpr( labl, cmp, op1 ) );
|
ins_encode( enc_bpr( labl, cmp, op1 ) );
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(br_reg);
|
ins_pipe(br_reg);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9801,6 +9831,7 @@ instruct branchCon_regL(cmpOp_reg cmp, iRegL op1, immL0 zero, label labl) %{
|
||||||
ins_cost(BRANCH_COST);
|
ins_cost(BRANCH_COST);
|
||||||
format %{ "BR$cmp $op1,$labl" %}
|
format %{ "BR$cmp $op1,$labl" %}
|
||||||
ins_encode( enc_bpr( labl, cmp, op1 ) );
|
ins_encode( enc_bpr( labl, cmp, op1 ) );
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(br_reg);
|
ins_pipe(br_reg);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9841,6 +9872,7 @@ instruct branchCon_long(cmpOp cmp, flagsRegL xcc, label labl) %{
|
||||||
__ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
|
__ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
|
||||||
__ delayed()->nop();
|
__ delayed()->nop();
|
||||||
%}
|
%}
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(br_cc);
|
ins_pipe(br_cc);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -9968,6 +10000,7 @@ instruct CallStaticJavaDirect( method meth ) %{
|
||||||
ins_cost(CALL_COST);
|
ins_cost(CALL_COST);
|
||||||
format %{ "CALL,static ; NOP ==> " %}
|
format %{ "CALL,static ; NOP ==> " %}
|
||||||
ins_encode( Java_Static_Call( meth ), call_epilog );
|
ins_encode( Java_Static_Call( meth ), call_epilog );
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(simple_call);
|
ins_pipe(simple_call);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -10004,6 +10037,7 @@ instruct CallRuntimeDirect(method meth, l7RegP l7) %{
|
||||||
format %{ "CALL,runtime" %}
|
format %{ "CALL,runtime" %}
|
||||||
ins_encode( Java_To_Runtime( meth ),
|
ins_encode( Java_To_Runtime( meth ),
|
||||||
call_epilog, adjust_long_from_native_call );
|
call_epilog, adjust_long_from_native_call );
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(simple_call);
|
ins_pipe(simple_call);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -10016,6 +10050,7 @@ instruct CallLeafDirect(method meth, l7RegP l7) %{
|
||||||
ins_encode( Java_To_Runtime( meth ),
|
ins_encode( Java_To_Runtime( meth ),
|
||||||
call_epilog,
|
call_epilog,
|
||||||
adjust_long_from_native_call );
|
adjust_long_from_native_call );
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(simple_call);
|
ins_pipe(simple_call);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -10028,6 +10063,7 @@ instruct CallLeafNoFPDirect(method meth, l7RegP l7) %{
|
||||||
ins_encode( Java_To_Runtime( meth ),
|
ins_encode( Java_To_Runtime( meth ),
|
||||||
call_epilog,
|
call_epilog,
|
||||||
adjust_long_from_native_call );
|
adjust_long_from_native_call );
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(simple_call);
|
ins_pipe(simple_call);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -10041,6 +10077,7 @@ instruct TailCalljmpInd(g3RegP jump_target, inline_cache_regP method_oop) %{
|
||||||
ins_cost(CALL_COST);
|
ins_cost(CALL_COST);
|
||||||
format %{ "Jmp $jump_target ; NOP \t! $method_oop holds method oop" %}
|
format %{ "Jmp $jump_target ; NOP \t! $method_oop holds method oop" %}
|
||||||
ins_encode(form_jmpl(jump_target));
|
ins_encode(form_jmpl(jump_target));
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(tail_call);
|
ins_pipe(tail_call);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -10072,6 +10109,7 @@ instruct tailjmpInd(g1RegP jump_target, i0RegP ex_oop) %{
|
||||||
// opcode(Assembler::jmpl_op3, Assembler::arith_op);
|
// opcode(Assembler::jmpl_op3, Assembler::arith_op);
|
||||||
// The hack duplicates the exception oop into G3, so that CreateEx can use it there.
|
// The hack duplicates the exception oop into G3, so that CreateEx can use it there.
|
||||||
// ins_encode( form3_rs1_simm13_rd( jump_target, 0x00, R_G0 ), move_return_pc_to_o1() );
|
// ins_encode( form3_rs1_simm13_rd( jump_target, 0x00, R_G0 ), move_return_pc_to_o1() );
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(tail_call);
|
ins_pipe(tail_call);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -10102,6 +10140,7 @@ instruct RethrowException()
|
||||||
// use the following format syntax
|
// use the following format syntax
|
||||||
format %{ "Jmp rethrow_stub" %}
|
format %{ "Jmp rethrow_stub" %}
|
||||||
ins_encode(enc_rethrow);
|
ins_encode(enc_rethrow);
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(tail_call);
|
ins_pipe(tail_call);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -10130,6 +10169,7 @@ instruct partialSubtypeCheck( o0RegP index, o1RegP sub, o2RegP super, flagsRegP
|
||||||
ins_cost(DEFAULT_COST*10);
|
ins_cost(DEFAULT_COST*10);
|
||||||
format %{ "CALL PartialSubtypeCheck\n\tNOP" %}
|
format %{ "CALL PartialSubtypeCheck\n\tNOP" %}
|
||||||
ins_encode( enc_PartialSubtypeCheck() );
|
ins_encode( enc_PartialSubtypeCheck() );
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(partial_subtype_check_pipe);
|
ins_pipe(partial_subtype_check_pipe);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
@ -10139,6 +10179,7 @@ instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, i
|
||||||
ins_cost(DEFAULT_COST*10);
|
ins_cost(DEFAULT_COST*10);
|
||||||
format %{ "CALL PartialSubtypeCheck\n\tNOP\t# (sets condition codes)" %}
|
format %{ "CALL PartialSubtypeCheck\n\tNOP\t# (sets condition codes)" %}
|
||||||
ins_encode( enc_PartialSubtypeCheck() );
|
ins_encode( enc_PartialSubtypeCheck() );
|
||||||
|
ins_avoid_back_to_back(AVOID_BEFORE);
|
||||||
ins_pipe(partial_subtype_check_pipe);
|
ins_pipe(partial_subtype_check_pipe);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
|
|
@ -162,7 +162,7 @@ define_pd_global(uintx, TypeProfileLevel, 111);
|
||||||
"Number of milliseconds to wait before start calculating aborts " \
|
"Number of milliseconds to wait before start calculating aborts " \
|
||||||
"for RTM locking") \
|
"for RTM locking") \
|
||||||
\
|
\
|
||||||
experimental(bool, UseRTMXendForLockBusy, false, \
|
experimental(bool, UseRTMXendForLockBusy, true, \
|
||||||
"Use RTM Xend instead of Xabort when lock busy") \
|
"Use RTM Xend instead of Xabort when lock busy") \
|
||||||
\
|
\
|
||||||
/* assembler */ \
|
/* assembler */ \
|
||||||
|
|
|
@ -1488,11 +1488,10 @@ void MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Registe
|
||||||
movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
|
movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
|
||||||
bind(L_rtm_retry);
|
bind(L_rtm_retry);
|
||||||
}
|
}
|
||||||
if (!UseRTMXendForLockBusy) {
|
|
||||||
movptr(tmpReg, Address(objReg, 0));
|
movptr(tmpReg, Address(objReg, 0));
|
||||||
testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
|
testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
|
||||||
jcc(Assembler::notZero, IsInflated);
|
jcc(Assembler::notZero, IsInflated);
|
||||||
}
|
|
||||||
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
|
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
|
||||||
Label L_noincrement;
|
Label L_noincrement;
|
||||||
if (RTMTotalCountIncrRate > 1) {
|
if (RTMTotalCountIncrRate > 1) {
|
||||||
|
@ -1512,10 +1511,7 @@ void MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Registe
|
||||||
Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
|
Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
|
||||||
if (UseRTMXendForLockBusy) {
|
if (UseRTMXendForLockBusy) {
|
||||||
xend();
|
xend();
|
||||||
movptr(tmpReg, Address(objReg, 0));
|
movptr(abort_status_Reg, 0x2); // Set the abort status to 2 (so we can retry)
|
||||||
testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
|
|
||||||
jcc(Assembler::notZero, IsInflated);
|
|
||||||
movptr(abort_status_Reg, 0x1); // Set the abort status to 1 (as xabort does)
|
|
||||||
jmp(L_decrement_retry);
|
jmp(L_decrement_retry);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
|
@ -2811,18 +2811,13 @@ void os::yield() {
|
||||||
|
|
||||||
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
|
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
|
||||||
|
|
||||||
void os::yield_all(int attempts) {
|
void os::yield_all() {
|
||||||
// Yields to all threads, including threads with lower priorities
|
// Yields to all threads, including threads with lower priorities
|
||||||
// Threads on Linux are all with same priority. The Solaris style
|
// Threads on Linux are all with same priority. The Solaris style
|
||||||
// os::yield_all() with nanosleep(1ms) is not necessary.
|
// os::yield_all() with nanosleep(1ms) is not necessary.
|
||||||
sched_yield();
|
sched_yield();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called from the tight loops to possibly influence time-sharing heuristics
|
|
||||||
void os::loop_breaker(int attempts) {
|
|
||||||
os::yield_all(attempts);
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
// thread priority support
|
// thread priority support
|
||||||
|
|
||||||
|
@ -3079,7 +3074,7 @@ static bool do_suspend(OSThread* osthread) {
|
||||||
|
|
||||||
for (int n = 0; !osthread->sr.is_suspended(); n++) {
|
for (int n = 0; !osthread->sr.is_suspended(); n++) {
|
||||||
for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
|
for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
|
||||||
os::yield_all(i);
|
os::yield_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
// timeout, try to cancel the request
|
// timeout, try to cancel the request
|
||||||
|
@ -3113,7 +3108,7 @@ static void do_resume(OSThread* osthread) {
|
||||||
if (sr_notify(osthread) == 0) {
|
if (sr_notify(osthread) == 0) {
|
||||||
for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
|
for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
|
||||||
for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
|
for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
|
||||||
os::yield_all(i);
|
os::yield_all();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -917,9 +917,20 @@ void os::free_thread(OSThread* osthread) {
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
// thread local storage
|
// thread local storage
|
||||||
|
|
||||||
|
// Restore the thread pointer if the destructor is called. This is in case
|
||||||
|
// someone from JNI code sets up a destructor with pthread_key_create to run
|
||||||
|
// detachCurrentThread on thread death. Unless we restore the thread pointer we
|
||||||
|
// will hang or crash. When detachCurrentThread is called the key will be set
|
||||||
|
// to null and we will not be called again. If detachCurrentThread is never
|
||||||
|
// called we could loop forever depending on the pthread implementation.
|
||||||
|
static void restore_thread_pointer(void* p) {
|
||||||
|
Thread* thread = (Thread*) p;
|
||||||
|
os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
|
||||||
|
}
|
||||||
|
|
||||||
int os::allocate_thread_local_storage() {
|
int os::allocate_thread_local_storage() {
|
||||||
pthread_key_t key;
|
pthread_key_t key;
|
||||||
int rslt = pthread_key_create(&key, NULL);
|
int rslt = pthread_key_create(&key, restore_thread_pointer);
|
||||||
assert(rslt == 0, "cannot allocate thread local storage");
|
assert(rslt == 0, "cannot allocate thread local storage");
|
||||||
return (int)key;
|
return (int)key;
|
||||||
}
|
}
|
||||||
|
@ -2551,18 +2562,13 @@ void os::yield() {
|
||||||
|
|
||||||
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN ;}
|
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN ;}
|
||||||
|
|
||||||
void os::yield_all(int attempts) {
|
void os::yield_all() {
|
||||||
// Yields to all threads, including threads with lower priorities
|
// Yields to all threads, including threads with lower priorities
|
||||||
// Threads on Bsd are all with same priority. The Solaris style
|
// Threads on Bsd are all with same priority. The Solaris style
|
||||||
// os::yield_all() with nanosleep(1ms) is not necessary.
|
// os::yield_all() with nanosleep(1ms) is not necessary.
|
||||||
sched_yield();
|
sched_yield();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called from the tight loops to possibly influence time-sharing heuristics
|
|
||||||
void os::loop_breaker(int attempts) {
|
|
||||||
os::yield_all(attempts);
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
// thread priority support
|
// thread priority support
|
||||||
|
|
||||||
|
|
|
@ -1032,9 +1032,20 @@ void os::free_thread(OSThread* osthread) {
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
// thread local storage
|
// thread local storage
|
||||||
|
|
||||||
|
// Restore the thread pointer if the destructor is called. This is in case
|
||||||
|
// someone from JNI code sets up a destructor with pthread_key_create to run
|
||||||
|
// detachCurrentThread on thread death. Unless we restore the thread pointer we
|
||||||
|
// will hang or crash. When detachCurrentThread is called the key will be set
|
||||||
|
// to null and we will not be called again. If detachCurrentThread is never
|
||||||
|
// called we could loop forever depending on the pthread implementation.
|
||||||
|
static void restore_thread_pointer(void* p) {
|
||||||
|
Thread* thread = (Thread*) p;
|
||||||
|
os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
|
||||||
|
}
|
||||||
|
|
||||||
int os::allocate_thread_local_storage() {
|
int os::allocate_thread_local_storage() {
|
||||||
pthread_key_t key;
|
pthread_key_t key;
|
||||||
int rslt = pthread_key_create(&key, NULL);
|
int rslt = pthread_key_create(&key, restore_thread_pointer);
|
||||||
assert(rslt == 0, "cannot allocate thread local storage");
|
assert(rslt == 0, "cannot allocate thread local storage");
|
||||||
return (int)key;
|
return (int)key;
|
||||||
}
|
}
|
||||||
|
@ -3781,18 +3792,13 @@ void os::yield() {
|
||||||
|
|
||||||
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN ;}
|
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN ;}
|
||||||
|
|
||||||
void os::yield_all(int attempts) {
|
void os::yield_all() {
|
||||||
// Yields to all threads, including threads with lower priorities
|
// Yields to all threads, including threads with lower priorities
|
||||||
// Threads on Linux are all with same priority. The Solaris style
|
// Threads on Linux are all with same priority. The Solaris style
|
||||||
// os::yield_all() with nanosleep(1ms) is not necessary.
|
// os::yield_all() with nanosleep(1ms) is not necessary.
|
||||||
sched_yield();
|
sched_yield();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called from the tight loops to possibly influence time-sharing heuristics
|
|
||||||
void os::loop_breaker(int attempts) {
|
|
||||||
os::yield_all(attempts);
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
// thread priority support
|
// thread priority support
|
||||||
|
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#include "services/dtraceAttacher.hpp"
|
#include "services/dtraceAttacher.hpp"
|
||||||
|
|
||||||
#include <door.h>
|
#include <door.h>
|
||||||
|
#include <limits.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <signal.h>
|
#include <signal.h>
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
|
@ -668,11 +669,13 @@ static jint enable_dprobes(AttachOperation* op, outputStream* out) {
|
||||||
out->print_cr("No probe specified");
|
out->print_cr("No probe specified");
|
||||||
return JNI_ERR;
|
return JNI_ERR;
|
||||||
} else {
|
} else {
|
||||||
int probe_typess = atoi(probe);
|
char *end;
|
||||||
if (errno) {
|
long val = strtol(probe, &end, 10);
|
||||||
|
if (end == probe || val < 0 || val > INT_MAX) {
|
||||||
out->print_cr("invalid probe type");
|
out->print_cr("invalid probe type");
|
||||||
return JNI_ERR;
|
return JNI_ERR;
|
||||||
} else {
|
} else {
|
||||||
|
int probe_typess = (int) val;
|
||||||
DTrace::enable_dprobes(probe_typess);
|
DTrace::enable_dprobes(probe_typess);
|
||||||
return JNI_OK;
|
return JNI_OK;
|
||||||
}
|
}
|
||||||
|
@ -703,8 +706,9 @@ jint AttachListener::pd_set_flag(AttachOperation* op, outputStream* out) {
|
||||||
bool flag = true;
|
bool flag = true;
|
||||||
const char* arg1;
|
const char* arg1;
|
||||||
if ((arg1 = op->arg(1)) != NULL) {
|
if ((arg1 = op->arg(1)) != NULL) {
|
||||||
flag = (atoi(arg1) != 0);
|
char *end;
|
||||||
if (errno) {
|
flag = (strtol(arg1, &end, 10) != 0);
|
||||||
|
if (arg1 == end) {
|
||||||
out->print_cr("flag value has to be an integer");
|
out->print_cr("flag value has to be an integer");
|
||||||
return JNI_ERR;
|
return JNI_ERR;
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,16 +49,6 @@ void OSThread::pd_destroy() {
|
||||||
|
|
||||||
// copied from synchronizer.cpp
|
// copied from synchronizer.cpp
|
||||||
|
|
||||||
void OSThread::handle_spinlock_contention(int tries) {
|
|
||||||
if (NoYieldsInMicrolock) return;
|
|
||||||
|
|
||||||
if (tries > 10) {
|
|
||||||
os::yield_all(tries); // Yield to threads of any priority
|
|
||||||
} else if (tries > 5) {
|
|
||||||
os::yield(); // Yield to threads of same or higher priority
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void OSThread::SR_handler(Thread* thread, ucontext_t* uc) {
|
void OSThread::SR_handler(Thread* thread, ucontext_t* uc) {
|
||||||
os::Solaris::SR_handler(thread, uc);
|
os::Solaris::SR_handler(thread, uc);
|
||||||
}
|
}
|
||||||
|
|
|
@ -82,8 +82,6 @@
|
||||||
void set_ucontext(ucontext_t* ptr) { _ucontext = ptr; }
|
void set_ucontext(ucontext_t* ptr) { _ucontext = ptr; }
|
||||||
static void SR_handler(Thread* thread, ucontext_t* uc);
|
static void SR_handler(Thread* thread, ucontext_t* uc);
|
||||||
|
|
||||||
static void handle_spinlock_contention(int tries); // Used for thread local eden locking
|
|
||||||
|
|
||||||
// ***************************************************************
|
// ***************************************************************
|
||||||
// Platform dependent initialization and cleanup
|
// Platform dependent initialization and cleanup
|
||||||
// ***************************************************************
|
// ***************************************************************
|
||||||
|
|
|
@ -969,9 +969,6 @@ bool os::create_main_thread(JavaThread* thread) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// _T2_libthread is true if we believe we are running with the newer
|
|
||||||
// SunSoft lwp/libthread.so (2.8 patch, 2.9 default)
|
|
||||||
bool os::Solaris::_T2_libthread = false;
|
|
||||||
|
|
||||||
bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
|
bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
|
||||||
// Allocate the OSThread object
|
// Allocate the OSThread object
|
||||||
|
@ -1056,71 +1053,10 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
|
||||||
thread->set_osthread(osthread);
|
thread->set_osthread(osthread);
|
||||||
|
|
||||||
// Create the Solaris thread
|
// Create the Solaris thread
|
||||||
// explicit THR_BOUND for T2_libthread case in case
|
|
||||||
// that assumption is not accurate, but our alternate signal stack
|
|
||||||
// handling is based on it which must have bound threads
|
|
||||||
thread_t tid = 0;
|
thread_t tid = 0;
|
||||||
long flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED
|
long flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED;
|
||||||
| ((UseBoundThreads || os::Solaris::T2_libthread() ||
|
|
||||||
(thr_type == vm_thread) ||
|
|
||||||
(thr_type == cgc_thread) ||
|
|
||||||
(thr_type == pgc_thread) ||
|
|
||||||
(thr_type == compiler_thread && BackgroundCompilation)) ?
|
|
||||||
THR_BOUND : 0);
|
|
||||||
int status;
|
int status;
|
||||||
|
|
||||||
// 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs.
|
|
||||||
//
|
|
||||||
// On multiprocessors systems, libthread sometimes under-provisions our
|
|
||||||
// process with LWPs. On a 30-way systems, for instance, we could have
|
|
||||||
// 50 user-level threads in ready state and only 2 or 3 LWPs assigned
|
|
||||||
// to our process. This can result in under utilization of PEs.
|
|
||||||
// I suspect the problem is related to libthread's LWP
|
|
||||||
// pool management and to the kernel's SIGBLOCKING "last LWP parked"
|
|
||||||
// upcall policy.
|
|
||||||
//
|
|
||||||
// The following code is palliative -- it attempts to ensure that our
|
|
||||||
// process has sufficient LWPs to take advantage of multiple PEs.
|
|
||||||
// Proper long-term cures include using user-level threads bound to LWPs
|
|
||||||
// (THR_BOUND) or using LWP-based synchronization. Note that there is a
|
|
||||||
// slight timing window with respect to sampling _os_thread_count, but
|
|
||||||
// the race is benign. Also, we should periodically recompute
|
|
||||||
// _processors_online as the min of SC_NPROCESSORS_ONLN and the
|
|
||||||
// the number of PEs in our partition. You might be tempted to use
|
|
||||||
// THR_NEW_LWP here, but I'd recommend against it as that could
|
|
||||||
// result in undesirable growth of the libthread's LWP pool.
|
|
||||||
// The fix below isn't sufficient; for instance, it doesn't take into count
|
|
||||||
// LWPs parked on IO. It does, however, help certain CPU-bound benchmarks.
|
|
||||||
//
|
|
||||||
// Some pathologies this scheme doesn't handle:
|
|
||||||
// * Threads can block, releasing the LWPs. The LWPs can age out.
|
|
||||||
// When a large number of threads become ready again there aren't
|
|
||||||
// enough LWPs available to service them. This can occur when the
|
|
||||||
// number of ready threads oscillates.
|
|
||||||
// * LWPs/Threads park on IO, thus taking the LWP out of circulation.
|
|
||||||
//
|
|
||||||
// Finally, we should call thr_setconcurrency() periodically to refresh
|
|
||||||
// the LWP pool and thwart the LWP age-out mechanism.
|
|
||||||
// The "+3" term provides a little slop -- we want to slightly overprovision.
|
|
||||||
|
|
||||||
if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) {
|
|
||||||
if (!(flags & THR_BOUND)) {
|
|
||||||
thr_setconcurrency (os::Solaris::_os_thread_count); // avoid starvation
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Although this doesn't hurt, we should warn of undefined behavior
|
|
||||||
// when using unbound T1 threads with schedctl(). This should never
|
|
||||||
// happen, as the compiler and VM threads are always created bound
|
|
||||||
DEBUG_ONLY(
|
|
||||||
if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) &&
|
|
||||||
(!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) &&
|
|
||||||
((thr_type == vm_thread) || (thr_type == cgc_thread) ||
|
|
||||||
(thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) {
|
|
||||||
warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound");
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
// Mark that we don't have an lwp or thread id yet.
|
// Mark that we don't have an lwp or thread id yet.
|
||||||
// In case we attempt to set the priority before the thread starts.
|
// In case we attempt to set the priority before the thread starts.
|
||||||
osthread->set_lwp_id(-1);
|
osthread->set_lwp_id(-1);
|
||||||
|
@ -1145,13 +1081,6 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
|
||||||
// Remember that we created this thread so we can set priority on it
|
// Remember that we created this thread so we can set priority on it
|
||||||
osthread->set_vm_created();
|
osthread->set_vm_created();
|
||||||
|
|
||||||
// Set the default thread priority. If using bound threads, setting
|
|
||||||
// lwp priority will be delayed until thread start.
|
|
||||||
set_native_priority(thread,
|
|
||||||
DefaultThreadPriority == -1 ?
|
|
||||||
java_to_os_priority[NormPriority] :
|
|
||||||
DefaultThreadPriority);
|
|
||||||
|
|
||||||
// Initial thread state is INITIALIZED, not SUSPENDED
|
// Initial thread state is INITIALIZED, not SUSPENDED
|
||||||
osthread->set_state(INITIALIZED);
|
osthread->set_state(INITIALIZED);
|
||||||
|
|
||||||
|
@ -1333,39 +1262,8 @@ void os::initialize_thread(Thread* thr) {
|
||||||
jt->set_stack_size(stack_size);
|
jt->set_stack_size(stack_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 5/22/01: Right now alternate signal stacks do not handle
|
// With the T2 libthread (T1 is no longer supported) threads are always bound
|
||||||
// throwing stack overflow exceptions, see bug 4463178
|
// and we use stackbanging in all cases.
|
||||||
// Until a fix is found for this, T2 will NOT imply alternate signal
|
|
||||||
// stacks.
|
|
||||||
// If using T2 libthread threads, install an alternate signal stack.
|
|
||||||
// Because alternate stacks associate with LWPs on Solaris,
|
|
||||||
// see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads
|
|
||||||
// we prefer to explicitly stack bang.
|
|
||||||
// If not using T2 libthread, but using UseBoundThreads any threads
|
|
||||||
// (primordial thread, jni_attachCurrentThread) we do not create,
|
|
||||||
// probably are not bound, therefore they can not have an alternate
|
|
||||||
// signal stack. Since our stack banging code is generated and
|
|
||||||
// is shared across threads, all threads must be bound to allow
|
|
||||||
// using alternate signal stacks. The alternative is to interpose
|
|
||||||
// on _lwp_create to associate an alt sig stack with each LWP,
|
|
||||||
// and this could be a problem when the JVM is embedded.
|
|
||||||
// We would prefer to use alternate signal stacks with T2
|
|
||||||
// Since there is currently no accurate way to detect T2
|
|
||||||
// we do not. Assuming T2 when running T1 causes sig 11s or assertions
|
|
||||||
// on installing alternate signal stacks
|
|
||||||
|
|
||||||
|
|
||||||
// 05/09/03: removed alternate signal stack support for Solaris
|
|
||||||
// The alternate signal stack mechanism is no longer needed to
|
|
||||||
// handle stack overflow. This is now handled by allocating
|
|
||||||
// guard pages (red zone) and stackbanging.
|
|
||||||
// Initially the alternate signal stack mechanism was removed because
|
|
||||||
// it did not work with T1 llibthread. Alternate
|
|
||||||
// signal stacks MUST have all threads bound to lwps. Applications
|
|
||||||
// can create their own threads and attach them without their being
|
|
||||||
// bound under T1. This is frequently the case for the primordial thread.
|
|
||||||
// If we were ever to reenable this mechanism we would need to
|
|
||||||
// use the dynamic check for T2 libthread.
|
|
||||||
|
|
||||||
os::Solaris::init_thread_fpu_state();
|
os::Solaris::init_thread_fpu_state();
|
||||||
std::set_terminate(_handle_uncaught_cxx_exception);
|
std::set_terminate(_handle_uncaught_cxx_exception);
|
||||||
|
@ -2092,12 +1990,7 @@ void os::Solaris::print_distro_info(outputStream* st) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void os::Solaris::print_libversion_info(outputStream* st) {
|
void os::Solaris::print_libversion_info(outputStream* st) {
|
||||||
if (os::Solaris::T2_libthread()) {
|
|
||||||
st->print(" (T2 libthread)");
|
st->print(" (T2 libthread)");
|
||||||
}
|
|
||||||
else {
|
|
||||||
st->print(" (T1 libthread)");
|
|
||||||
}
|
|
||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3323,48 +3216,20 @@ void os::yield() {
|
||||||
|
|
||||||
os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
|
os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
|
||||||
|
|
||||||
|
void os::yield_all() {
|
||||||
// On Solaris we found that yield_all doesn't always yield to all other threads.
|
|
||||||
// There have been cases where there is a thread ready to execute but it doesn't
|
|
||||||
// get an lwp as the VM thread continues to spin with sleeps of 1 millisecond.
|
|
||||||
// The 1 millisecond wait doesn't seem long enough for the kernel to issue a
|
|
||||||
// SIGWAITING signal which will cause a new lwp to be created. So we count the
|
|
||||||
// number of times yield_all is called in the one loop and increase the sleep
|
|
||||||
// time after 8 attempts. If this fails too we increase the concurrency level
|
|
||||||
// so that the starving thread would get an lwp
|
|
||||||
|
|
||||||
void os::yield_all(int attempts) {
|
|
||||||
// Yields to all threads, including threads with lower priorities
|
// Yields to all threads, including threads with lower priorities
|
||||||
if (attempts == 0) {
|
|
||||||
os::sleep(Thread::current(), 1, false);
|
os::sleep(Thread::current(), 1, false);
|
||||||
} else {
|
|
||||||
int iterations = attempts % 30;
|
|
||||||
if (iterations == 0 && !os::Solaris::T2_libthread()) {
|
|
||||||
// thr_setconcurrency and _getconcurrency make sense only under T1.
|
|
||||||
int noofLWPS = thr_getconcurrency();
|
|
||||||
if (noofLWPS < (Threads::number_of_threads() + 2)) {
|
|
||||||
thr_setconcurrency(thr_getconcurrency() + 1);
|
|
||||||
}
|
|
||||||
} else if (iterations < 25) {
|
|
||||||
os::sleep(Thread::current(), 1, false);
|
|
||||||
} else {
|
|
||||||
os::sleep(Thread::current(), 10, false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called from the tight loops to possibly influence time-sharing heuristics
|
|
||||||
void os::loop_breaker(int attempts) {
|
|
||||||
os::yield_all(attempts);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// Interface for setting lwp priorities. If we are using T2 libthread,
|
// Interface for setting lwp priorities. If we are using T2 libthread,
|
||||||
// which forces the use of BoundThreads or we manually set UseBoundThreads,
|
// which forces the use of BoundThreads or we manually set UseBoundThreads,
|
||||||
// all of our threads will be assigned to real lwp's. Using the thr_setprio
|
// all of our threads will be assigned to real lwp's. Using the thr_setprio
|
||||||
// function is meaningless in this mode so we must adjust the real lwp's priority
|
// function is meaningless in this mode so we must adjust the real lwp's priority
|
||||||
// The routines below implement the getting and setting of lwp priorities.
|
// The routines below implement the getting and setting of lwp priorities.
|
||||||
//
|
//
|
||||||
|
// Note: T2 is now the only supported libthread. UseBoundThreads flag is
|
||||||
|
// being deprecated and all threads are now BoundThreads
|
||||||
|
//
|
||||||
// Note: There are three priority scales used on Solaris. Java priotities
|
// Note: There are three priority scales used on Solaris. Java priotities
|
||||||
// which range from 1 to 10, libthread "thr_setprio" scale which range
|
// which range from 1 to 10, libthread "thr_setprio" scale which range
|
||||||
// from 0 to 127, and the current scheduling class of the process we
|
// from 0 to 127, and the current scheduling class of the process we
|
||||||
|
@ -3437,8 +3302,6 @@ static int lwp_priocntl_init () {
|
||||||
|
|
||||||
if (!UseThreadPriorities) return 0;
|
if (!UseThreadPriorities) return 0;
|
||||||
|
|
||||||
// We are using Bound threads, we need to determine our priority ranges
|
|
||||||
if (os::Solaris::T2_libthread() || UseBoundThreads) {
|
|
||||||
// If ThreadPriorityPolicy is 1, switch tables
|
// If ThreadPriorityPolicy is 1, switch tables
|
||||||
if (ThreadPriorityPolicy == 1) {
|
if (ThreadPriorityPolicy == 1) {
|
||||||
for (i = 0 ; i < CriticalPriority+1; i++)
|
for (i = 0 ; i < CriticalPriority+1; i++)
|
||||||
|
@ -3453,14 +3316,6 @@ static int lwp_priocntl_init () {
|
||||||
// Set negative to distinguish from other priorities
|
// Set negative to distinguish from other priorities
|
||||||
os::java_to_os_priority[MaxPriority] = -criticalPrio;
|
os::java_to_os_priority[MaxPriority] = -criticalPrio;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// Not using Bound Threads, set to ThreadPolicy 1
|
|
||||||
else {
|
|
||||||
for ( i = 0 ; i < CriticalPriority+1; i++ ) {
|
|
||||||
os::java_to_os_priority[i] = prio_policy1[i];
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get IDs for a set of well-known scheduling classes.
|
// Get IDs for a set of well-known scheduling classes.
|
||||||
// TODO-FIXME: GETCLINFO returns the current # of classes in the
|
// TODO-FIXME: GETCLINFO returns the current # of classes in the
|
||||||
|
@ -3583,10 +3438,6 @@ int scale_to_lwp_priority (int rMin, int rMax, int x)
|
||||||
|
|
||||||
|
|
||||||
// set_lwp_class_and_priority
|
// set_lwp_class_and_priority
|
||||||
//
|
|
||||||
// Set the class and priority of the lwp. This call should only
|
|
||||||
// be made when using bound threads (T2 threads are bound by default).
|
|
||||||
//
|
|
||||||
int set_lwp_class_and_priority(int ThreadID, int lwpid,
|
int set_lwp_class_and_priority(int ThreadID, int lwpid,
|
||||||
int newPrio, int new_class, bool scale) {
|
int newPrio, int new_class, bool scale) {
|
||||||
int rslt;
|
int rslt;
|
||||||
|
@ -3812,8 +3663,6 @@ OSReturn os::set_native_priority(Thread* thread, int newpri) {
|
||||||
status = thr_setprio(thread->osthread()->thread_id(), newpri);
|
status = thr_setprio(thread->osthread()->thread_id(), newpri);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (os::Solaris::T2_libthread() ||
|
|
||||||
(UseBoundThreads && osthread->is_vm_created())) {
|
|
||||||
int lwp_status =
|
int lwp_status =
|
||||||
set_lwp_class_and_priority(osthread->thread_id(),
|
set_lwp_class_and_priority(osthread->thread_id(),
|
||||||
osthread->lwp_id(),
|
osthread->lwp_id(),
|
||||||
|
@ -3828,7 +3677,6 @@ OSReturn os::set_native_priority(Thread* thread, int newpri) {
|
||||||
newpri, myClass, false);
|
newpri, myClass, false);
|
||||||
}
|
}
|
||||||
status |= lwp_status;
|
status |= lwp_status;
|
||||||
}
|
|
||||||
return (status == 0) ? OS_OK : OS_ERR;
|
return (status == 0) ? OS_OK : OS_ERR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4495,13 +4343,6 @@ const char* os::exception_name(int exception_code, char* buf, size_t size) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// (Static) wrappers for the new libthread API
|
|
||||||
int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate;
|
|
||||||
int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate;
|
|
||||||
int_fnP_thread_t_i os::Solaris::_thr_setmutator;
|
|
||||||
int_fnP_thread_t os::Solaris::_thr_suspend_mutator;
|
|
||||||
int_fnP_thread_t os::Solaris::_thr_continue_mutator;
|
|
||||||
|
|
||||||
// (Static) wrapper for getisax(2) call.
|
// (Static) wrapper for getisax(2) call.
|
||||||
os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
|
os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
|
||||||
|
|
||||||
|
@ -4536,78 +4377,9 @@ static address resolve_symbol(const char* name) {
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// isT2_libthread()
|
|
||||||
//
|
|
||||||
// Routine to determine if we are currently using the new T2 libthread.
|
|
||||||
//
|
|
||||||
// We determine if we are using T2 by reading /proc/self/lstatus and
|
|
||||||
// looking for a thread with the ASLWP bit set. If we find this status
|
|
||||||
// bit set, we must assume that we are NOT using T2. The T2 team
|
|
||||||
// has approved this algorithm.
|
|
||||||
//
|
|
||||||
// We need to determine if we are running with the new T2 libthread
|
|
||||||
// since setting native thread priorities is handled differently
|
|
||||||
// when using this library. All threads created using T2 are bound
|
|
||||||
// threads. Calling thr_setprio is meaningless in this case.
|
|
||||||
//
|
|
||||||
bool isT2_libthread() {
|
|
||||||
static prheader_t * lwpArray = NULL;
|
|
||||||
static int lwpSize = 0;
|
|
||||||
static int lwpFile = -1;
|
|
||||||
lwpstatus_t * that;
|
|
||||||
char lwpName [128];
|
|
||||||
bool isT2 = false;
|
|
||||||
|
|
||||||
#define ADR(x) ((uintptr_t)(x))
|
|
||||||
#define LWPINDEX(ary,ix) ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1))))
|
|
||||||
|
|
||||||
lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0);
|
|
||||||
if (lwpFile < 0) {
|
|
||||||
if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
lwpSize = 16*1024;
|
|
||||||
for (;;) {
|
|
||||||
::lseek64 (lwpFile, 0, SEEK_SET);
|
|
||||||
lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize, mtInternal);
|
|
||||||
if (::read(lwpFile, lwpArray, lwpSize) < 0) {
|
|
||||||
if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) {
|
|
||||||
// We got a good snapshot - now iterate over the list.
|
|
||||||
int aslwpcount = 0;
|
|
||||||
for (int i = 0; i < lwpArray->pr_nent; i++ ) {
|
|
||||||
that = LWPINDEX(lwpArray,i);
|
|
||||||
if (that->pr_flags & PR_ASLWP) {
|
|
||||||
aslwpcount++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (aslwpcount == 0) isT2 = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize;
|
|
||||||
FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal); // retry.
|
|
||||||
}
|
|
||||||
|
|
||||||
FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);
|
|
||||||
::close (lwpFile);
|
|
||||||
if (ThreadPriorityVerbose) {
|
|
||||||
if (isT2) tty->print_cr("We are running with a T2 libthread\n");
|
|
||||||
else tty->print_cr("We are not running with a T2 libthread\n");
|
|
||||||
}
|
|
||||||
return isT2;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void os::Solaris::libthread_init() {
|
void os::Solaris::libthread_init() {
|
||||||
address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
|
address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
|
||||||
|
|
||||||
// Determine if we are running with the new T2 libthread
|
|
||||||
os::Solaris::set_T2_libthread(isT2_libthread());
|
|
||||||
|
|
||||||
lwp_priocntl_init();
|
lwp_priocntl_init();
|
||||||
|
|
||||||
// RTLD_DEFAULT was not defined on some early versions of 5.5.1
|
// RTLD_DEFAULT was not defined on some early versions of 5.5.1
|
||||||
|
@ -4618,22 +4390,6 @@ void os::Solaris::libthread_init() {
|
||||||
guarantee(func != NULL, "libthread.so is too old.");
|
guarantee(func != NULL, "libthread.so is too old.");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize the new libthread getstate API wrappers
|
|
||||||
func = resolve_symbol("thr_getstate");
|
|
||||||
os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func));
|
|
||||||
|
|
||||||
func = resolve_symbol("thr_setstate");
|
|
||||||
os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func));
|
|
||||||
|
|
||||||
func = resolve_symbol("thr_setmutator");
|
|
||||||
os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func));
|
|
||||||
|
|
||||||
func = resolve_symbol("thr_suspend_mutator");
|
|
||||||
os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
|
|
||||||
|
|
||||||
func = resolve_symbol("thr_continue_mutator");
|
|
||||||
os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
|
|
||||||
|
|
||||||
int size;
|
int size;
|
||||||
void (*handler_info_func)(address *, int *);
|
void (*handler_info_func)(address *, int *);
|
||||||
handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
|
handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
|
||||||
|
@ -5536,11 +5292,7 @@ void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool os::is_thread_cpu_time_supported() {
|
bool os::is_thread_cpu_time_supported() {
|
||||||
if ( os::Solaris::T2_libthread() || UseBoundThreads ) {
|
|
||||||
return true;
|
return true;
|
||||||
} else {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// System loadavg support. Returns -1 if load average cannot be obtained.
|
// System loadavg support. Returns -1 if load average cannot be obtained.
|
||||||
|
|
|
@ -41,19 +41,6 @@ class Solaris {
|
||||||
#define TRS_LWPID 2
|
#define TRS_LWPID 2
|
||||||
#define TRS_INVALID 3
|
#define TRS_INVALID 3
|
||||||
|
|
||||||
// _T2_libthread is true if we believe we are running with the newer
|
|
||||||
// SunSoft lib/lwp/libthread: default Solaris 9, available Solaris 8
|
|
||||||
// which is a lightweight libthread that also supports all T1
|
|
||||||
static bool _T2_libthread;
|
|
||||||
// These refer to new libthread interface functions
|
|
||||||
// They get intialized if we dynamically detect new libthread
|
|
||||||
static int_fnP_thread_t_iP_uP_stack_tP_gregset_t _thr_getstate;
|
|
||||||
static int_fnP_thread_t_i_gregset_t _thr_setstate;
|
|
||||||
static int_fnP_thread_t_i _thr_setmutator;
|
|
||||||
static int_fnP_thread_t _thr_suspend_mutator;
|
|
||||||
static int_fnP_thread_t _thr_continue_mutator;
|
|
||||||
// libthread_init sets the above, if the new functionality is detected
|
|
||||||
|
|
||||||
// initialized to libthread or lwp synchronization primitives depending on UseLWPSychronization
|
// initialized to libthread or lwp synchronization primitives depending on UseLWPSychronization
|
||||||
static int_fnP_mutex_tP _mutex_lock;
|
static int_fnP_mutex_tP _mutex_lock;
|
||||||
static int_fnP_mutex_tP _mutex_trylock;
|
static int_fnP_mutex_tP _mutex_trylock;
|
||||||
|
@ -214,29 +201,6 @@ class Solaris {
|
||||||
static struct sigaction *get_chained_signal_action(int sig);
|
static struct sigaction *get_chained_signal_action(int sig);
|
||||||
static bool chained_handler(int sig, siginfo_t *siginfo, void *context);
|
static bool chained_handler(int sig, siginfo_t *siginfo, void *context);
|
||||||
|
|
||||||
// The following allow us to link against both the old and new libthread (2.8)
|
|
||||||
// and exploit the new libthread functionality if available.
|
|
||||||
|
|
||||||
static bool T2_libthread() { return _T2_libthread; }
|
|
||||||
static void set_T2_libthread(bool T2_libthread) { _T2_libthread = T2_libthread; }
|
|
||||||
|
|
||||||
static int thr_getstate(thread_t tid, int *flag, unsigned *lwp, stack_t *ss, gregset_t rs)
|
|
||||||
{ return _thr_getstate(tid, flag, lwp, ss, rs); }
|
|
||||||
static void set_thr_getstate(int_fnP_thread_t_iP_uP_stack_tP_gregset_t func)
|
|
||||||
{ _thr_getstate = func; }
|
|
||||||
|
|
||||||
static int thr_setstate(thread_t tid, int flag, gregset_t rs) { return _thr_setstate(tid, flag, rs); }
|
|
||||||
static void set_thr_setstate(int_fnP_thread_t_i_gregset_t func) { _thr_setstate = func; }
|
|
||||||
|
|
||||||
static int thr_setmutator(thread_t tid, int enabled) { return _thr_setmutator(tid, enabled); }
|
|
||||||
static void set_thr_setmutator(int_fnP_thread_t_i func) { _thr_setmutator = func; }
|
|
||||||
|
|
||||||
static int thr_suspend_mutator(thread_t tid) { return _thr_suspend_mutator(tid); }
|
|
||||||
static void set_thr_suspend_mutator(int_fnP_thread_t func) { _thr_suspend_mutator = func; }
|
|
||||||
|
|
||||||
static int thr_continue_mutator(thread_t tid) { return _thr_continue_mutator(tid); }
|
|
||||||
static void set_thr_continue_mutator(int_fnP_thread_t func) { _thr_continue_mutator = func; }
|
|
||||||
|
|
||||||
// Allows us to switch between lwp and thread -based synchronization
|
// Allows us to switch between lwp and thread -based synchronization
|
||||||
static int mutex_lock(mutex_t *mx) { return _mutex_lock(mx); }
|
static int mutex_lock(mutex_t *mx) { return _mutex_lock(mx); }
|
||||||
static int mutex_trylock(mutex_t *mx) { return _mutex_trylock(mx); }
|
static int mutex_trylock(mutex_t *mx) { return _mutex_trylock(mx); }
|
||||||
|
|
|
@ -3518,7 +3518,7 @@ os::YieldResult os::NakedYield() {
|
||||||
|
|
||||||
void os::yield() { os::NakedYield(); }
|
void os::yield() { os::NakedYield(); }
|
||||||
|
|
||||||
void os::yield_all(int attempts) {
|
void os::yield_all() {
|
||||||
// Yields to all threads, including threads with lower priorities
|
// Yields to all threads, including threads with lower priorities
|
||||||
Sleep(1);
|
Sleep(1);
|
||||||
}
|
}
|
||||||
|
@ -3864,12 +3864,6 @@ void os::init(void) {
|
||||||
win32::setmode_streams();
|
win32::setmode_streams();
|
||||||
init_page_sizes((size_t) win32::vm_page_size());
|
init_page_sizes((size_t) win32::vm_page_size());
|
||||||
|
|
||||||
// For better scalability on MP systems (must be called after initialize_system_info)
|
|
||||||
#ifndef PRODUCT
|
|
||||||
if (is_MP()) {
|
|
||||||
NoYieldsInMicrolock = true;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
// This may be overridden later when argument processing is done.
|
// This may be overridden later when argument processing is done.
|
||||||
FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation,
|
FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation,
|
||||||
os::win32::is_windows_2003());
|
os::win32::is_windows_2003());
|
||||||
|
|
|
@ -52,9 +52,6 @@ inline void* os::dll_lookup(void *lib, const char *name) {
|
||||||
return (void*)::GetProcAddress((HMODULE)lib, name);
|
return (void*)::GetProcAddress((HMODULE)lib, name);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Used to improve time-sharing on some systems
|
|
||||||
inline void os::loop_breaker(int attempts) {}
|
|
||||||
|
|
||||||
inline bool os::obsolete_option(const JavaVMOption *option) {
|
inline bool os::obsolete_option(const JavaVMOption *option) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -270,31 +270,6 @@ frame os::current_frame() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) {
|
|
||||||
char lwpstatusfile[PROCFILE_LENGTH];
|
|
||||||
int lwpfd, err;
|
|
||||||
|
|
||||||
if (err = os::Solaris::thr_getstate(tid, flags, lwp, ss, rs))
|
|
||||||
return (err);
|
|
||||||
if (*flags == TRS_LWPID) {
|
|
||||||
sprintf(lwpstatusfile, "/proc/%d/lwp/%d/lwpstatus", getpid(),
|
|
||||||
*lwp);
|
|
||||||
if ((lwpfd = ::open(lwpstatusfile, O_RDONLY)) < 0) {
|
|
||||||
perror("thr_mutator_status: open lwpstatus");
|
|
||||||
return (EINVAL);
|
|
||||||
}
|
|
||||||
if (pread(lwpfd, lwpstatus, sizeof (lwpstatus_t), (off_t)0) !=
|
|
||||||
sizeof (lwpstatus_t)) {
|
|
||||||
perror("thr_mutator_status: read lwpstatus");
|
|
||||||
(void) ::close(lwpfd);
|
|
||||||
return (EINVAL);
|
|
||||||
}
|
|
||||||
(void) ::close(lwpfd);
|
|
||||||
}
|
|
||||||
return (0);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
bool os::is_allocatable(size_t bytes) {
|
bool os::is_allocatable(size_t bytes) {
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -256,30 +256,6 @@ frame os::current_frame() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) {
|
|
||||||
char lwpstatusfile[PROCFILE_LENGTH];
|
|
||||||
int lwpfd, err;
|
|
||||||
|
|
||||||
if (err = os::Solaris::thr_getstate(tid, flags, lwp, ss, rs))
|
|
||||||
return (err);
|
|
||||||
if (*flags == TRS_LWPID) {
|
|
||||||
sprintf(lwpstatusfile, "/proc/%d/lwp/%d/lwpstatus", getpid(),
|
|
||||||
*lwp);
|
|
||||||
if ((lwpfd = open(lwpstatusfile, O_RDONLY)) < 0) {
|
|
||||||
perror("thr_mutator_status: open lwpstatus");
|
|
||||||
return (EINVAL);
|
|
||||||
}
|
|
||||||
if (pread(lwpfd, lwpstatus, sizeof (lwpstatus_t), (off_t)0) !=
|
|
||||||
sizeof (lwpstatus_t)) {
|
|
||||||
perror("thr_mutator_status: read lwpstatus");
|
|
||||||
(void) close(lwpfd);
|
|
||||||
return (EINVAL);
|
|
||||||
}
|
|
||||||
(void) close(lwpfd);
|
|
||||||
}
|
|
||||||
return (0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef AMD64
|
#ifndef AMD64
|
||||||
|
|
||||||
// Detecting SSE support by OS
|
// Detecting SSE support by OS
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
package com.sun.hotspot.tools.compiler;
|
package com.sun.hotspot.tools.compiler;
|
||||||
|
|
||||||
import java.io.PrintStream;
|
import java.io.PrintStream;
|
||||||
|
import java.util.ArrayDeque;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
@ -40,6 +41,7 @@ public class CallSite {
|
||||||
private int endNodes;
|
private int endNodes;
|
||||||
private int endLiveNodes;
|
private int endLiveNodes;
|
||||||
private double timeStamp;
|
private double timeStamp;
|
||||||
|
private long inlineId;
|
||||||
|
|
||||||
CallSite() {
|
CallSite() {
|
||||||
}
|
}
|
||||||
|
@ -94,7 +96,7 @@ public class CallSite {
|
||||||
|
|
||||||
public void print(PrintStream stream, int indent) {
|
public void print(PrintStream stream, int indent) {
|
||||||
emit(stream, indent);
|
emit(stream, indent);
|
||||||
String m = getMethod().getHolder().replace('/', '.') + "::" + getMethod().getName();
|
String m = getMethod().getHolder() + "::" + getMethod().getName();
|
||||||
if (getReason() == null) {
|
if (getReason() == null) {
|
||||||
stream.print(" @ " + getBci() + " " + m + " (" + getMethod().getBytes() + " bytes)");
|
stream.print(" @ " + getBci() + " " + m + " (" + getMethod().getBytes() + " bytes)");
|
||||||
|
|
||||||
|
@ -214,4 +216,45 @@ public class CallSite {
|
||||||
return timeStamp;
|
return timeStamp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private boolean matches(CallSite other) {
|
||||||
|
// Every late inline call site has a unique inline id. If the
|
||||||
|
// call site we're looking for has one then use it other rely
|
||||||
|
// on method name and bci.
|
||||||
|
if (other.inlineId != 0) {
|
||||||
|
return inlineId == other.inlineId;
|
||||||
|
}
|
||||||
|
return method.equals(other.method) && bci == other.bci;
|
||||||
|
}
|
||||||
|
|
||||||
|
public CallSite findCallSite(ArrayDeque<CallSite> sites) {
|
||||||
|
// Locate a late inline call site. Multiple chains of
|
||||||
|
// identical call sites with the same method name/bci are
|
||||||
|
// possible so we have to try them all until we find the late
|
||||||
|
// inline call site that has a matching inline id.
|
||||||
|
CallSite site = sites.pop();
|
||||||
|
for (CallSite c : calls) {
|
||||||
|
if (c.matches(site)) {
|
||||||
|
if (!sites.isEmpty()) {
|
||||||
|
CallSite res = c.findCallSite(sites);
|
||||||
|
if (res != null) {
|
||||||
|
sites.push(site);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
sites.push(site);
|
||||||
|
return c;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sites.push(site);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getInlineId() {
|
||||||
|
return inlineId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setInlineId(long inlineId) {
|
||||||
|
this.inlineId = inlineId;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,7 @@ package com.sun.hotspot.tools.compiler;
|
||||||
|
|
||||||
import java.io.FileReader;
|
import java.io.FileReader;
|
||||||
import java.io.Reader;
|
import java.io.Reader;
|
||||||
|
import java.util.ArrayDeque;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Comparator;
|
import java.util.Comparator;
|
||||||
|
@ -144,9 +145,12 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
|
||||||
private Stack<CallSite> scopes = new Stack<CallSite>();
|
private Stack<CallSite> scopes = new Stack<CallSite>();
|
||||||
private Compilation compile;
|
private Compilation compile;
|
||||||
private CallSite site;
|
private CallSite site;
|
||||||
|
private CallSite methodHandleSite;
|
||||||
private Stack<Phase> phaseStack = new Stack<Phase>();
|
private Stack<Phase> phaseStack = new Stack<Phase>();
|
||||||
private UncommonTrapEvent currentTrap;
|
private UncommonTrapEvent currentTrap;
|
||||||
private Stack<CallSite> late_inline_scope;
|
private Stack<CallSite> lateInlineScope;
|
||||||
|
private boolean lateInlining;
|
||||||
|
|
||||||
|
|
||||||
long parseLong(String l) {
|
long parseLong(String l) {
|
||||||
try {
|
try {
|
||||||
|
@ -330,18 +334,61 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
|
||||||
}
|
}
|
||||||
methods.put(id, m);
|
methods.put(id, m);
|
||||||
} else if (qname.equals("call")) {
|
} else if (qname.equals("call")) {
|
||||||
site = new CallSite(bci, method(search(atts, "method")));
|
if (methodHandleSite != null) {
|
||||||
|
methodHandleSite = null;
|
||||||
|
}
|
||||||
|
Method m = method(search(atts, "method"));
|
||||||
|
if (lateInlining && scopes.size() == 0) {
|
||||||
|
// re-attempting already seen call site (late inlining for MH invokes)
|
||||||
|
if (m != site.getMethod()) {
|
||||||
|
if (bci != site.getBci()) {
|
||||||
|
System.out.println(m + " bci: " + bci);
|
||||||
|
System.out.println(site.getMethod() + " bci: " + site.getBci());
|
||||||
|
throw new InternalError("bci mismatch after late inlining");
|
||||||
|
}
|
||||||
|
site.setMethod(m);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
site = new CallSite(bci, m);
|
||||||
|
}
|
||||||
site.setCount(Integer.parseInt(search(atts, "count", "0")));
|
site.setCount(Integer.parseInt(search(atts, "count", "0")));
|
||||||
String receiver = atts.getValue("receiver");
|
String receiver = atts.getValue("receiver");
|
||||||
if (receiver != null) {
|
if (receiver != null) {
|
||||||
site.setReceiver(type(receiver));
|
site.setReceiver(type(receiver));
|
||||||
site.setReceiver_count(Integer.parseInt(search(atts, "receiver_count")));
|
site.setReceiver_count(Integer.parseInt(search(atts, "receiver_count")));
|
||||||
}
|
}
|
||||||
|
int methodHandle = Integer.parseInt(search(atts, "method_handle_intrinsic", "0"));
|
||||||
|
if (lateInlining && scopes.size() == 0) {
|
||||||
|
// The call was added before this round of late inlining
|
||||||
|
} else if (methodHandle == 0) {
|
||||||
scopes.peek().add(site);
|
scopes.peek().add(site);
|
||||||
|
} else {
|
||||||
|
// method handle call site can be followed by another
|
||||||
|
// call (in case it is inlined). If that happens we
|
||||||
|
// discard the method handle call site. So we keep
|
||||||
|
// track of it but don't add it to the list yet.
|
||||||
|
methodHandleSite = site;
|
||||||
|
}
|
||||||
} else if (qname.equals("regalloc")) {
|
} else if (qname.equals("regalloc")) {
|
||||||
compile.setAttempts(Integer.parseInt(search(atts, "attempts")));
|
compile.setAttempts(Integer.parseInt(search(atts, "attempts")));
|
||||||
} else if (qname.equals("inline_fail")) {
|
} else if (qname.equals("inline_fail")) {
|
||||||
|
if (methodHandleSite != null) {
|
||||||
|
scopes.peek().add(methodHandleSite);
|
||||||
|
methodHandleSite = null;
|
||||||
|
}
|
||||||
|
if (lateInlining && scopes.size() == 0) {
|
||||||
|
site.setReason(search(atts, "reason"));
|
||||||
|
lateInlining = false;
|
||||||
|
} else {
|
||||||
scopes.peek().last().setReason(search(atts, "reason"));
|
scopes.peek().last().setReason(search(atts, "reason"));
|
||||||
|
}
|
||||||
|
} else if (qname.equals("inline_success")) {
|
||||||
|
if (methodHandleSite != null) {
|
||||||
|
throw new InternalError("method handle site should have been replaced");
|
||||||
|
}
|
||||||
|
if (lateInlining && scopes.size() == 0) {
|
||||||
|
site.setReason(null);
|
||||||
|
}
|
||||||
} else if (qname.equals("failure")) {
|
} else if (qname.equals("failure")) {
|
||||||
failureReason = search(atts, "reason");
|
failureReason = search(atts, "reason");
|
||||||
} else if (qname.equals("task_done")) {
|
} else if (qname.equals("task_done")) {
|
||||||
|
@ -371,22 +418,30 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
|
||||||
// ignore for now
|
// ignore for now
|
||||||
}
|
}
|
||||||
} else if (qname.equals("late_inline")) {
|
} else if (qname.equals("late_inline")) {
|
||||||
late_inline_scope = new Stack<CallSite>();
|
long inlineId = Long.parseLong(search(atts, "inline_id"));
|
||||||
|
lateInlineScope = new Stack<CallSite>();
|
||||||
site = new CallSite(-999, method(search(atts, "method")));
|
site = new CallSite(-999, method(search(atts, "method")));
|
||||||
late_inline_scope.push(site);
|
site.setInlineId(inlineId);
|
||||||
|
lateInlineScope.push(site);
|
||||||
} else if (qname.equals("jvms")) {
|
} else if (qname.equals("jvms")) {
|
||||||
// <jvms bci='4' method='java/io/DataInputStream readChar ()C' bytes='40' count='5815' iicount='20815'/>
|
// <jvms bci='4' method='java/io/DataInputStream readChar ()C' bytes='40' count='5815' iicount='20815'/>
|
||||||
if (currentTrap != null) {
|
if (currentTrap != null) {
|
||||||
currentTrap.addJVMS(atts.getValue("method"), Integer.parseInt(atts.getValue("bci")));
|
currentTrap.addJVMS(atts.getValue("method"), Integer.parseInt(atts.getValue("bci")));
|
||||||
} else if (late_inline_scope != null) {
|
} else if (lateInlineScope != null) {
|
||||||
bci = Integer.parseInt(search(atts, "bci"));
|
bci = Integer.parseInt(search(atts, "bci"));
|
||||||
site = new CallSite(bci, method(search(atts, "method")));
|
site = new CallSite(bci, method(search(atts, "method")));
|
||||||
late_inline_scope.push(site);
|
lateInlineScope.push(site);
|
||||||
} else {
|
} else {
|
||||||
// Ignore <eliminate_allocation type='667'>,
|
// Ignore <eliminate_allocation type='667'>,
|
||||||
// <eliminate_lock lock='1'>,
|
// <eliminate_lock lock='1'>,
|
||||||
// <replace_string_concat arguments='2' string_alloc='0' multiple='0'>
|
// <replace_string_concat arguments='2' string_alloc='0' multiple='0'>
|
||||||
}
|
}
|
||||||
|
} else if (qname.equals("inline_id")) {
|
||||||
|
if (methodHandleSite != null) {
|
||||||
|
throw new InternalError("method handle site should have been replaced");
|
||||||
|
}
|
||||||
|
long id = Long.parseLong(search(atts, "id"));
|
||||||
|
site.setInlineId(id);
|
||||||
} else if (qname.equals("nmethod")) {
|
} else if (qname.equals("nmethod")) {
|
||||||
String id = makeId(atts);
|
String id = makeId(atts);
|
||||||
NMethod nm = new NMethod(Double.parseDouble(search(atts, "stamp")),
|
NMethod nm = new NMethod(Double.parseDouble(search(atts, "stamp")),
|
||||||
|
@ -396,8 +451,18 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
|
||||||
nmethods.put(id, nm);
|
nmethods.put(id, nm);
|
||||||
events.add(nm);
|
events.add(nm);
|
||||||
} else if (qname.equals("parse")) {
|
} else if (qname.equals("parse")) {
|
||||||
|
if (methodHandleSite != null) {
|
||||||
|
throw new InternalError("method handle site should have been replaced");
|
||||||
|
}
|
||||||
Method m = method(search(atts, "method"));
|
Method m = method(search(atts, "method"));
|
||||||
if (scopes.size() == 0) {
|
if (lateInlining && scopes.size() == 0) {
|
||||||
|
if (site.getMethod() != m) {
|
||||||
|
System.out.println(site.getMethod());
|
||||||
|
System.out.println(m);
|
||||||
|
throw new InternalError("Unexpected method mismatch during late inlining");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (scopes.size() == 0 && !lateInlining) {
|
||||||
compile.setMethod(m);
|
compile.setMethod(m);
|
||||||
scopes.push(site);
|
scopes.push(site);
|
||||||
} else {
|
} else {
|
||||||
|
@ -427,14 +492,19 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
|
||||||
if (qname.equals("parse")) {
|
if (qname.equals("parse")) {
|
||||||
indent -= 2;
|
indent -= 2;
|
||||||
scopes.pop();
|
scopes.pop();
|
||||||
|
if (scopes.size() == 0) {
|
||||||
|
lateInlining = false;
|
||||||
|
}
|
||||||
} else if (qname.equals("uncommon_trap")) {
|
} else if (qname.equals("uncommon_trap")) {
|
||||||
currentTrap = null;
|
currentTrap = null;
|
||||||
} else if (qname.equals("late_inline")) {
|
} else if (qname.equals("late_inline")) {
|
||||||
// Populate late inlining info.
|
// Populate late inlining info.
|
||||||
|
if (scopes.size() != 0) {
|
||||||
// late_inline scopes are specified in reverse order:
|
throw new InternalError("scopes should be empty for late inline");
|
||||||
|
}
|
||||||
|
// late inline scopes are specified in reverse order:
|
||||||
// compiled method should be on top of stack.
|
// compiled method should be on top of stack.
|
||||||
CallSite caller = late_inline_scope.pop();
|
CallSite caller = lateInlineScope.pop();
|
||||||
Method m = compile.getMethod();
|
Method m = compile.getMethod();
|
||||||
if (m != caller.getMethod()) {
|
if (m != caller.getMethod()) {
|
||||||
System.out.println(m);
|
System.out.println(m);
|
||||||
|
@ -444,28 +514,42 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
|
||||||
|
|
||||||
// late_inline contains caller+bci info, convert it
|
// late_inline contains caller+bci info, convert it
|
||||||
// to bci+callee info used by LogCompilation.
|
// to bci+callee info used by LogCompilation.
|
||||||
site = compile.getLateInlineCall();
|
CallSite lateInlineSite = compile.getLateInlineCall();
|
||||||
|
ArrayDeque<CallSite> thisCallScopes = new ArrayDeque<CallSite>();
|
||||||
do {
|
do {
|
||||||
bci = caller.getBci();
|
bci = caller.getBci();
|
||||||
// Next inlined call.
|
// Next inlined call.
|
||||||
caller = late_inline_scope.pop();
|
caller = lateInlineScope.pop();
|
||||||
CallSite callee = new CallSite(bci, caller.getMethod());
|
CallSite callee = new CallSite(bci, caller.getMethod());
|
||||||
site.add(callee);
|
callee.setInlineId(caller.getInlineId());
|
||||||
site = callee;
|
thisCallScopes.addLast(callee);
|
||||||
} while (!late_inline_scope.empty());
|
lateInlineSite.add(callee);
|
||||||
|
lateInlineSite = callee;
|
||||||
|
} while (!lateInlineScope.empty());
|
||||||
|
|
||||||
|
site = compile.getCall().findCallSite(thisCallScopes);
|
||||||
|
if (site == null) {
|
||||||
|
System.out.println(caller.getMethod() + " bci: " + bci);
|
||||||
|
throw new InternalError("couldn't find call site");
|
||||||
|
}
|
||||||
|
lateInlining = true;
|
||||||
|
|
||||||
if (caller.getBci() != -999) {
|
if (caller.getBci() != -999) {
|
||||||
System.out.println(caller.getMethod());
|
System.out.println(caller.getMethod());
|
||||||
throw new InternalError("broken late_inline info");
|
throw new InternalError("broken late_inline info");
|
||||||
}
|
}
|
||||||
if (site.getMethod() != caller.getMethod()) {
|
if (site.getMethod() != caller.getMethod()) {
|
||||||
|
if (site.getInlineId() == caller.getInlineId()) {
|
||||||
|
site.setMethod(caller.getMethod());
|
||||||
|
} else {
|
||||||
System.out.println(site.getMethod());
|
System.out.println(site.getMethod());
|
||||||
System.out.println(caller.getMethod());
|
System.out.println(caller.getMethod());
|
||||||
throw new InternalError("call site and late_inline info don't match");
|
throw new InternalError("call site and late_inline info don't match");
|
||||||
}
|
}
|
||||||
|
}
|
||||||
// late_inline is followed by parse with scopes.size() == 0,
|
// late_inline is followed by parse with scopes.size() == 0,
|
||||||
// 'site' will be pushed to scopes.
|
// 'site' will be pushed to scopes.
|
||||||
late_inline_scope = null;
|
lateInlineScope = null;
|
||||||
} else if (qname.equals("task")) {
|
} else if (qname.equals("task")) {
|
||||||
types.clear();
|
types.clear();
|
||||||
methods.clear();
|
methods.clear();
|
||||||
|
|
|
@ -51,15 +51,15 @@ public class Method implements Constants {
|
||||||
|
|
||||||
String format(int osr_bci) {
|
String format(int osr_bci) {
|
||||||
if (osr_bci >= 0) {
|
if (osr_bci >= 0) {
|
||||||
return getHolder().replace('/', '.') + "::" + getName() + " @ " + osr_bci + " (" + getBytes() + " bytes)";
|
return getHolder() + "::" + getName() + " @ " + osr_bci + " (" + getBytes() + " bytes)";
|
||||||
} else {
|
} else {
|
||||||
return getHolder().replace('/', '.') + "::" + getName() + " (" + getBytes() + " bytes)";
|
return getHolder() + "::" + getName() + " (" + getBytes() + " bytes)";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return getHolder().replace('/', '.') + "::" + getName() + " (" + getBytes() + " bytes)";
|
return getHolder() + "::" + getName() + " (" + getBytes() + " bytes)";
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getHolder() {
|
public String getHolder() {
|
||||||
|
@ -117,4 +117,14 @@ public class Method implements Constants {
|
||||||
public void setFlags(String flags) {
|
public void setFlags(String flags) {
|
||||||
this.flags = flags;
|
this.flags = flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object o) {
|
||||||
|
if (o instanceof Method) {
|
||||||
|
Method other = (Method)o;
|
||||||
|
return holder.equals(other.holder) && name.equals(other.name) &&
|
||||||
|
arguments.equals(other.arguments) && returnType.equals(other.returnType);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1613,21 +1613,20 @@ void ArchDesc::declareClasses(FILE *fp) {
|
||||||
// Each instruction attribute results in a virtual call of same name.
|
// Each instruction attribute results in a virtual call of same name.
|
||||||
// The ins_cost is not handled here.
|
// The ins_cost is not handled here.
|
||||||
Attribute *attr = instr->_attribs;
|
Attribute *attr = instr->_attribs;
|
||||||
bool avoid_back_to_back = false;
|
Attribute *avoid_back_to_back_attr = NULL;
|
||||||
while (attr != NULL) {
|
while (attr != NULL) {
|
||||||
if (strcmp (attr->_ident, "ins_cost") != 0 &&
|
if (strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") == 0) {
|
||||||
|
fprintf(fp, " virtual bool is_TrapBasedCheckNode() const { return %s; }\n", attr->_val);
|
||||||
|
} else if (strcmp (attr->_ident, "ins_cost") != 0 &&
|
||||||
strncmp(attr->_ident, "ins_field_", 10) != 0 &&
|
strncmp(attr->_ident, "ins_field_", 10) != 0 &&
|
||||||
// Must match function in node.hpp: return type bool, no prefix "ins_".
|
// Must match function in node.hpp: return type bool, no prefix "ins_".
|
||||||
strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") != 0 &&
|
strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") != 0 &&
|
||||||
strcmp (attr->_ident, "ins_short_branch") != 0) {
|
strcmp (attr->_ident, "ins_short_branch") != 0) {
|
||||||
fprintf(fp, " virtual int %s() const { return %s; }\n", attr->_ident, attr->_val);
|
fprintf(fp, " virtual int %s() const { return %s; }\n", attr->_ident, attr->_val);
|
||||||
}
|
}
|
||||||
// Check value for ins_avoid_back_to_back, and if it is true (1), set the flag
|
if (strcmp(attr->_ident, "ins_avoid_back_to_back") == 0) {
|
||||||
if (!strcmp(attr->_ident, "ins_avoid_back_to_back") != 0 && attr->int_val(*this) != 0)
|
avoid_back_to_back_attr = attr;
|
||||||
avoid_back_to_back = true;
|
}
|
||||||
if (strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") == 0)
|
|
||||||
fprintf(fp, " virtual bool is_TrapBasedCheckNode() const { return %s; }\n", attr->_val);
|
|
||||||
|
|
||||||
attr = (Attribute *)attr->_next;
|
attr = (Attribute *)attr->_next;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1799,11 +1798,11 @@ void ArchDesc::declareClasses(FILE *fp) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// flag: if this instruction should not be generated back to back.
|
// flag: if this instruction should not be generated back to back.
|
||||||
if ( avoid_back_to_back ) {
|
if (avoid_back_to_back_attr != NULL) {
|
||||||
if ( node_flags_set ) {
|
if (node_flags_set) {
|
||||||
fprintf(fp," | Flag_avoid_back_to_back");
|
fprintf(fp," | (%s)", avoid_back_to_back_attr->_val);
|
||||||
} else {
|
} else {
|
||||||
fprintf(fp,"init_flags(Flag_avoid_back_to_back");
|
fprintf(fp,"init_flags((%s)", avoid_back_to_back_attr->_val);
|
||||||
node_flags_set = true;
|
node_flags_set = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -968,6 +968,7 @@ void CodeBuffer::verify_section_allocation() {
|
||||||
|
|
||||||
void CodeBuffer::log_section_sizes(const char* name) {
|
void CodeBuffer::log_section_sizes(const char* name) {
|
||||||
if (xtty != NULL) {
|
if (xtty != NULL) {
|
||||||
|
ttyLocker ttyl;
|
||||||
// log info about buffer usage
|
// log info about buffer usage
|
||||||
xtty->print_cr("<blob name='%s' size='%d'>", name, _total_size);
|
xtty->print_cr("<blob name='%s' size='%d'>", name, _total_size);
|
||||||
for (int n = (int) CodeBuffer::SECT_FIRST; n < (int) CodeBuffer::SECT_LIMIT; n++) {
|
for (int n = (int) CodeBuffer::SECT_FIRST; n < (int) CodeBuffer::SECT_LIMIT; n++) {
|
||||||
|
|
|
@ -237,3 +237,9 @@ void ciKlass::print_impl(outputStream* st) {
|
||||||
void ciKlass::print_name_on(outputStream* st) {
|
void ciKlass::print_name_on(outputStream* st) {
|
||||||
name()->print_symbol_on(st);
|
name()->print_symbol_on(st);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const char* ciKlass::external_name() const {
|
||||||
|
GUARDED_VM_ENTRY(
|
||||||
|
return get_Klass()->external_name();
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
|
@ -125,6 +125,8 @@ public:
|
||||||
virtual ciKlass* exact_klass() = 0;
|
virtual ciKlass* exact_klass() = 0;
|
||||||
|
|
||||||
void print_name_on(outputStream* st);
|
void print_name_on(outputStream* st);
|
||||||
|
|
||||||
|
const char* external_name() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_CI_CIKLASS_HPP
|
#endif // SHARE_VM_CI_CIKLASS_HPP
|
||||||
|
|
|
@ -123,6 +123,10 @@ void ciSymbol::print_symbol_on(outputStream *st) {
|
||||||
GUARDED_VM_ENTRY(get_symbol()->print_symbol_on(st);)
|
GUARDED_VM_ENTRY(get_symbol()->print_symbol_on(st);)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const char* ciSymbol::as_klass_external_name() const {
|
||||||
|
GUARDED_VM_ENTRY(return get_symbol()->as_klass_external_name(););
|
||||||
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
// ------------------------------------------------------------------
|
||||||
// ciSymbol::make_impl
|
// ciSymbol::make_impl
|
||||||
//
|
//
|
||||||
|
|
|
@ -90,6 +90,7 @@ public:
|
||||||
void print_symbol() {
|
void print_symbol() {
|
||||||
print_symbol_on(tty);
|
print_symbol_on(tty);
|
||||||
}
|
}
|
||||||
|
const char* as_klass_external_name() const;
|
||||||
|
|
||||||
// Make a ciSymbol from a C string.
|
// Make a ciSymbol from a C string.
|
||||||
// Consider adding to vmSymbols.hpp instead of using this constructor.
|
// Consider adding to vmSymbols.hpp instead of using this constructor.
|
||||||
|
|
|
@ -4180,8 +4180,12 @@ ClassFileParser::~ClassFileParser() {
|
||||||
|
|
||||||
clear_class_metadata();
|
clear_class_metadata();
|
||||||
|
|
||||||
// deallocate the klass if already created.
|
// deallocate the klass if already created. Don't directly deallocate, but add
|
||||||
MetadataFactory::free_metadata(_loader_data, _klass);
|
// to the deallocate list so that the klass is removed from the CLD::_klasses list
|
||||||
|
// at a safepoint.
|
||||||
|
if (_klass != NULL) {
|
||||||
|
_loader_data->add_to_deallocate_list(_klass);
|
||||||
|
}
|
||||||
_klass = NULL;
|
_klass = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -464,25 +464,26 @@ bool java_lang_String::equals(oop str1, oop str2) {
|
||||||
void java_lang_String::print(oop java_string, outputStream* st) {
|
void java_lang_String::print(oop java_string, outputStream* st) {
|
||||||
assert(java_string->klass() == SystemDictionary::String_klass(), "must be java_string");
|
assert(java_string->klass() == SystemDictionary::String_klass(), "must be java_string");
|
||||||
typeArrayOop value = java_lang_String::value(java_string);
|
typeArrayOop value = java_lang_String::value(java_string);
|
||||||
int offset = java_lang_String::offset(java_string);
|
|
||||||
int length = java_lang_String::length(java_string);
|
|
||||||
|
|
||||||
int end = MIN2(length, 100);
|
|
||||||
if (value == NULL) {
|
if (value == NULL) {
|
||||||
// This can happen if, e.g., printing a String
|
// This can happen if, e.g., printing a String
|
||||||
// object before its initializer has been called
|
// object before its initializer has been called
|
||||||
st->print_cr("NULL");
|
st->print("NULL");
|
||||||
} else {
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
int offset = java_lang_String::offset(java_string);
|
||||||
|
int length = java_lang_String::length(java_string);
|
||||||
|
|
||||||
st->print("\"");
|
st->print("\"");
|
||||||
for (int index = 0; index < length; index++) {
|
for (int index = 0; index < length; index++) {
|
||||||
st->print("%c", value->char_at(index + offset));
|
st->print("%c", value->char_at(index + offset));
|
||||||
}
|
}
|
||||||
st->print("\"");
|
st->print("\"");
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void initialize_static_field(fieldDescriptor* fd, TRAPS) {
|
|
||||||
Handle mirror (THREAD, fd->field_holder()->java_mirror());
|
static void initialize_static_field(fieldDescriptor* fd, Handle mirror, TRAPS) {
|
||||||
assert(mirror.not_null() && fd->is_static(), "just checking");
|
assert(mirror.not_null() && fd->is_static(), "just checking");
|
||||||
if (fd->has_initial_value()) {
|
if (fd->has_initial_value()) {
|
||||||
BasicType t = fd->field_type();
|
BasicType t = fd->field_type();
|
||||||
|
@ -549,21 +550,45 @@ void java_lang_Class::fixup_mirror(KlassHandle k, TRAPS) {
|
||||||
create_mirror(k, Handle(NULL), CHECK);
|
create_mirror(k, Handle(NULL), CHECK);
|
||||||
}
|
}
|
||||||
|
|
||||||
oop java_lang_Class::create_mirror(KlassHandle k, Handle protection_domain, TRAPS) {
|
void java_lang_Class::initialize_mirror_fields(KlassHandle k,
|
||||||
|
Handle mirror,
|
||||||
|
Handle protection_domain,
|
||||||
|
TRAPS) {
|
||||||
|
// Allocate a simple java object for a lock.
|
||||||
|
// This needs to be a java object because during class initialization
|
||||||
|
// it can be held across a java call.
|
||||||
|
typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK);
|
||||||
|
set_init_lock(mirror(), r);
|
||||||
|
|
||||||
|
// Set protection domain also
|
||||||
|
set_protection_domain(mirror(), protection_domain());
|
||||||
|
|
||||||
|
// Initialize static fields
|
||||||
|
InstanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, mirror, CHECK);
|
||||||
|
}
|
||||||
|
|
||||||
|
void java_lang_Class::create_mirror(KlassHandle k, Handle protection_domain, TRAPS) {
|
||||||
assert(k->java_mirror() == NULL, "should only assign mirror once");
|
assert(k->java_mirror() == NULL, "should only assign mirror once");
|
||||||
// Use this moment of initialization to cache modifier_flags also,
|
// Use this moment of initialization to cache modifier_flags also,
|
||||||
// to support Class.getModifiers(). Instance classes recalculate
|
// to support Class.getModifiers(). Instance classes recalculate
|
||||||
// the cached flags after the class file is parsed, but before the
|
// the cached flags after the class file is parsed, but before the
|
||||||
// class is put into the system dictionary.
|
// class is put into the system dictionary.
|
||||||
int computed_modifiers = k->compute_modifier_flags(CHECK_0);
|
int computed_modifiers = k->compute_modifier_flags(CHECK);
|
||||||
k->set_modifier_flags(computed_modifiers);
|
k->set_modifier_flags(computed_modifiers);
|
||||||
// Class_klass has to be loaded because it is used to allocate
|
// Class_klass has to be loaded because it is used to allocate
|
||||||
// the mirror.
|
// the mirror.
|
||||||
if (SystemDictionary::Class_klass_loaded()) {
|
if (SystemDictionary::Class_klass_loaded()) {
|
||||||
// Allocate mirror (java.lang.Class instance)
|
// Allocate mirror (java.lang.Class instance)
|
||||||
Handle mirror = InstanceMirrorKlass::cast(SystemDictionary::Class_klass())->allocate_instance(k, CHECK_0);
|
Handle mirror = InstanceMirrorKlass::cast(SystemDictionary::Class_klass())->allocate_instance(k, CHECK);
|
||||||
|
|
||||||
|
// Setup indirection from mirror->klass
|
||||||
|
if (!k.is_null()) {
|
||||||
|
java_lang_Class::set_klass(mirror(), k());
|
||||||
|
}
|
||||||
|
|
||||||
InstanceMirrorKlass* mk = InstanceMirrorKlass::cast(mirror->klass());
|
InstanceMirrorKlass* mk = InstanceMirrorKlass::cast(mirror->klass());
|
||||||
|
assert(oop_size(mirror()) == mk->instance_size(k), "should have been set");
|
||||||
|
|
||||||
java_lang_Class::set_static_oop_field_count(mirror(), mk->compute_static_oop_field_count(mirror()));
|
java_lang_Class::set_static_oop_field_count(mirror(), mk->compute_static_oop_field_count(mirror()));
|
||||||
|
|
||||||
// It might also have a component mirror. This mirror must already exist.
|
// It might also have a component mirror. This mirror must already exist.
|
||||||
|
@ -586,19 +611,22 @@ oop java_lang_Class::create_mirror(KlassHandle k, Handle protection_domain, TRAP
|
||||||
} else {
|
} else {
|
||||||
assert(k->oop_is_instance(), "Must be");
|
assert(k->oop_is_instance(), "Must be");
|
||||||
|
|
||||||
// Allocate a simple java object for a lock.
|
initialize_mirror_fields(k, mirror, protection_domain, THREAD);
|
||||||
// This needs to be a java object because during class initialization
|
if (HAS_PENDING_EXCEPTION) {
|
||||||
// it can be held across a java call.
|
// If any of the fields throws an exception like OOM remove the klass field
|
||||||
typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK_NULL);
|
// from the mirror so GC doesn't follow it after the klass has been deallocated.
|
||||||
set_init_lock(mirror(), r);
|
// This mirror looks like a primitive type, which logically it is because it
|
||||||
|
// it represents no class.
|
||||||
// Set protection domain also
|
java_lang_Class::set_klass(mirror(), NULL);
|
||||||
set_protection_domain(mirror(), protection_domain());
|
return;
|
||||||
|
}
|
||||||
// Initialize static fields
|
}
|
||||||
InstanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, CHECK_NULL);
|
|
||||||
|
// Setup indirection from klass->mirror last
|
||||||
|
// after any exceptions can happen during allocations.
|
||||||
|
if (!k.is_null()) {
|
||||||
|
k->set_java_mirror(mirror());
|
||||||
}
|
}
|
||||||
return mirror();
|
|
||||||
} else {
|
} else {
|
||||||
if (fixup_mirror_list() == NULL) {
|
if (fixup_mirror_list() == NULL) {
|
||||||
GrowableArray<Klass*>* list =
|
GrowableArray<Klass*>* list =
|
||||||
|
@ -606,12 +634,10 @@ oop java_lang_Class::create_mirror(KlassHandle k, Handle protection_domain, TRAP
|
||||||
set_fixup_mirror_list(list);
|
set_fixup_mirror_list(list);
|
||||||
}
|
}
|
||||||
fixup_mirror_list()->push(k());
|
fixup_mirror_list()->push(k());
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
int java_lang_Class::oop_size(oop java_class) {
|
int java_lang_Class::oop_size(oop java_class) {
|
||||||
assert(_oop_size_offset != 0, "must be set");
|
assert(_oop_size_offset != 0, "must be set");
|
||||||
return java_class->int_field(_oop_size_offset);
|
return java_class->int_field(_oop_size_offset);
|
||||||
|
|
|
@ -246,11 +246,12 @@ class java_lang_Class : AllStatic {
|
||||||
|
|
||||||
static void set_init_lock(oop java_class, oop init_lock);
|
static void set_init_lock(oop java_class, oop init_lock);
|
||||||
static void set_protection_domain(oop java_class, oop protection_domain);
|
static void set_protection_domain(oop java_class, oop protection_domain);
|
||||||
|
static void initialize_mirror_fields(KlassHandle k, Handle mirror, Handle protection_domain, TRAPS);
|
||||||
public:
|
public:
|
||||||
static void compute_offsets();
|
static void compute_offsets();
|
||||||
|
|
||||||
// Instance creation
|
// Instance creation
|
||||||
static oop create_mirror(KlassHandle k, Handle protection_domain, TRAPS);
|
static void create_mirror(KlassHandle k, Handle protection_domain, TRAPS);
|
||||||
static void fixup_mirror(KlassHandle k, TRAPS);
|
static void fixup_mirror(KlassHandle k, TRAPS);
|
||||||
static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
|
static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
|
||||||
// Conversion
|
// Conversion
|
||||||
|
|
|
@ -810,11 +810,11 @@ void StringTable::buckets_oops_do(OopClosure* f, int start_idx, int end_idx) {
|
||||||
const int limit = the_table()->table_size();
|
const int limit = the_table()->table_size();
|
||||||
|
|
||||||
assert(0 <= start_idx && start_idx <= limit,
|
assert(0 <= start_idx && start_idx <= limit,
|
||||||
err_msg("start_idx (" INT32_FORMAT ") is out of bounds", start_idx));
|
err_msg("start_idx (%d) is out of bounds", start_idx));
|
||||||
assert(0 <= end_idx && end_idx <= limit,
|
assert(0 <= end_idx && end_idx <= limit,
|
||||||
err_msg("end_idx (" INT32_FORMAT ") is out of bounds", end_idx));
|
err_msg("end_idx (%d) is out of bounds", end_idx));
|
||||||
assert(start_idx <= end_idx,
|
assert(start_idx <= end_idx,
|
||||||
err_msg("Index ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT,
|
err_msg("Index ordering: start_idx=%d, end_idx=%d",
|
||||||
start_idx, end_idx));
|
start_idx, end_idx));
|
||||||
|
|
||||||
for (int i = start_idx; i < end_idx; i += 1) {
|
for (int i = start_idx; i < end_idx; i += 1) {
|
||||||
|
@ -833,11 +833,11 @@ void StringTable::buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClos
|
||||||
const int limit = the_table()->table_size();
|
const int limit = the_table()->table_size();
|
||||||
|
|
||||||
assert(0 <= start_idx && start_idx <= limit,
|
assert(0 <= start_idx && start_idx <= limit,
|
||||||
err_msg("start_idx (" INT32_FORMAT ") is out of bounds", start_idx));
|
err_msg("start_idx (%d) is out of bounds", start_idx));
|
||||||
assert(0 <= end_idx && end_idx <= limit,
|
assert(0 <= end_idx && end_idx <= limit,
|
||||||
err_msg("end_idx (" INT32_FORMAT ") is out of bounds", end_idx));
|
err_msg("end_idx (%d) is out of bounds", end_idx));
|
||||||
assert(start_idx <= end_idx,
|
assert(start_idx <= end_idx,
|
||||||
err_msg("Index ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT,
|
err_msg("Index ordering: start_idx=%d, end_idx=%d",
|
||||||
start_idx, end_idx));
|
start_idx, end_idx));
|
||||||
|
|
||||||
for (int i = start_idx; i < end_idx; ++i) {
|
for (int i = start_idx; i < end_idx; ++i) {
|
||||||
|
|
|
@ -826,47 +826,6 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
|
||||||
}
|
}
|
||||||
} // load_instance_class loop
|
} // load_instance_class loop
|
||||||
|
|
||||||
if (HAS_PENDING_EXCEPTION) {
|
|
||||||
// An exception, such as OOM could have happened at various places inside
|
|
||||||
// load_instance_class. We might have partially initialized a shared class
|
|
||||||
// and need to clean it up.
|
|
||||||
if (class_loader.is_null()) {
|
|
||||||
// In some cases k may be null. Let's find the shared class again.
|
|
||||||
instanceKlassHandle ik(THREAD, find_shared_class(name));
|
|
||||||
if (ik.not_null()) {
|
|
||||||
if (ik->class_loader_data() == NULL) {
|
|
||||||
// We didn't go as far as Klass::restore_unshareable_info(),
|
|
||||||
// so nothing to clean up.
|
|
||||||
} else {
|
|
||||||
Klass *kk;
|
|
||||||
{
|
|
||||||
MutexLocker mu(SystemDictionary_lock, THREAD);
|
|
||||||
kk = find_class(d_index, d_hash, name, ik->class_loader_data());
|
|
||||||
}
|
|
||||||
if (kk != NULL) {
|
|
||||||
// No clean up is needed if the shared class has been entered
|
|
||||||
// into system dictionary, as load_shared_class() won't be called
|
|
||||||
// again.
|
|
||||||
} else {
|
|
||||||
// This must be done outside of the SystemDictionary_lock to
|
|
||||||
// avoid deadlock.
|
|
||||||
//
|
|
||||||
// Note that Klass::restore_unshareable_info (called via
|
|
||||||
// load_instance_class above) is also called outside
|
|
||||||
// of SystemDictionary_lock. Other threads are blocked from
|
|
||||||
// loading this class because they are waiting on the
|
|
||||||
// SystemDictionary_lock until this thread removes
|
|
||||||
// the placeholder below.
|
|
||||||
//
|
|
||||||
// This need to be re-thought when parallel-capable non-boot
|
|
||||||
// classloaders are supported by CDS (today they're not).
|
|
||||||
clean_up_shared_class(ik, class_loader, THREAD);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (load_instance_added == true) {
|
if (load_instance_added == true) {
|
||||||
// clean up placeholder entries for LOAD_INSTANCE success or error
|
// clean up placeholder entries for LOAD_INSTANCE success or error
|
||||||
// This brackets the SystemDictionary updates for both defining
|
// This brackets the SystemDictionary updates for both defining
|
||||||
|
@ -1272,19 +1231,6 @@ instanceKlassHandle SystemDictionary::load_shared_class(
|
||||||
return ik;
|
return ik;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SystemDictionary::clean_up_shared_class(instanceKlassHandle ik, Handle class_loader, TRAPS) {
|
|
||||||
// Updating methods must be done under a lock so multiple
|
|
||||||
// threads don't update these in parallel
|
|
||||||
// Shared classes are all currently loaded by the bootstrap
|
|
||||||
// classloader, so this will never cause a deadlock on
|
|
||||||
// a custom class loader lock.
|
|
||||||
{
|
|
||||||
Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
|
|
||||||
check_loader_lock_contention(lockObject, THREAD);
|
|
||||||
ObjectLocker ol(lockObject, THREAD, true);
|
|
||||||
ik->remove_unshareable_info();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Handle class_loader, TRAPS) {
|
instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Handle class_loader, TRAPS) {
|
||||||
instanceKlassHandle nh = instanceKlassHandle(); // null Handle
|
instanceKlassHandle nh = instanceKlassHandle(); // null Handle
|
||||||
|
|
|
@ -617,7 +617,6 @@ private:
|
||||||
Handle class_loader, TRAPS);
|
Handle class_loader, TRAPS);
|
||||||
static instanceKlassHandle load_shared_class(instanceKlassHandle ik,
|
static instanceKlassHandle load_shared_class(instanceKlassHandle ik,
|
||||||
Handle class_loader, TRAPS);
|
Handle class_loader, TRAPS);
|
||||||
static void clean_up_shared_class(instanceKlassHandle ik, Handle class_loader, TRAPS);
|
|
||||||
static instanceKlassHandle load_instance_class(Symbol* class_name, Handle class_loader, TRAPS);
|
static instanceKlassHandle load_instance_class(Symbol* class_name, Handle class_loader, TRAPS);
|
||||||
static Handle compute_loader_lock_object(Handle class_loader, TRAPS);
|
static Handle compute_loader_lock_object(Handle class_loader, TRAPS);
|
||||||
static void check_loader_lock_contention(Handle loader_lock, TRAPS);
|
static void check_loader_lock_contention(Handle loader_lock, TRAPS);
|
||||||
|
|
|
@ -106,7 +106,7 @@ int CompileLog::identify(ciBaseObject* obj) {
|
||||||
if (mobj->is_klass()) {
|
if (mobj->is_klass()) {
|
||||||
ciKlass* klass = mobj->as_klass();
|
ciKlass* klass = mobj->as_klass();
|
||||||
begin_elem("klass id='%d'", id);
|
begin_elem("klass id='%d'", id);
|
||||||
name(klass->name());
|
name(klass);
|
||||||
if (!klass->is_loaded()) {
|
if (!klass->is_loaded()) {
|
||||||
print(" unloaded='1'");
|
print(" unloaded='1'");
|
||||||
} else {
|
} else {
|
||||||
|
@ -171,6 +171,15 @@ void CompileLog::name(ciSymbol* name) {
|
||||||
print("'");
|
print("'");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void CompileLog::name(ciKlass* k) {
|
||||||
|
print(" name='");
|
||||||
|
if (!k->is_loaded()) {
|
||||||
|
text()->print(k->name()->as_klass_external_name());
|
||||||
|
} else {
|
||||||
|
text()->print(k->external_name());
|
||||||
|
}
|
||||||
|
print("'");
|
||||||
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
// ------------------------------------------------------------------
|
||||||
// CompileLog::clear_identities
|
// CompileLog::clear_identities
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include "utilities/xmlstream.hpp"
|
#include "utilities/xmlstream.hpp"
|
||||||
|
|
||||||
class ciBaseObject;
|
class ciBaseObject;
|
||||||
|
class ciKlass;
|
||||||
class ciObject;
|
class ciObject;
|
||||||
class ciMetadata;
|
class ciMetadata;
|
||||||
class ciSymbol;
|
class ciSymbol;
|
||||||
|
@ -72,6 +73,7 @@ class CompileLog : public xmlStream {
|
||||||
|
|
||||||
void name(ciSymbol* s); // name='s'
|
void name(ciSymbol* s); // name='s'
|
||||||
void name(Symbol* s) { xmlStream::name(s); }
|
void name(Symbol* s) { xmlStream::name(s); }
|
||||||
|
void name(ciKlass* k);
|
||||||
|
|
||||||
// Output an object description, return obj->ident().
|
// Output an object description, return obj->ident().
|
||||||
int identify(ciBaseObject* obj);
|
int identify(ciBaseObject* obj);
|
||||||
|
|
|
@ -57,10 +57,10 @@ ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) :
|
||||||
|
|
||||||
_threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads, mtGC);
|
_threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads, mtGC);
|
||||||
|
|
||||||
int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
|
uint worker_id_offset = DirtyCardQueueSet::num_par_ids();
|
||||||
|
|
||||||
ConcurrentG1RefineThread *next = NULL;
|
ConcurrentG1RefineThread *next = NULL;
|
||||||
for (int i = _n_threads - 1; i >= 0; i--) {
|
for (uint i = _n_threads - 1; i != UINT_MAX; i--) {
|
||||||
ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
|
ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
|
||||||
assert(t != NULL, "Conc refine should have been created");
|
assert(t != NULL, "Conc refine should have been created");
|
||||||
if (t->osthread() == NULL) {
|
if (t->osthread() == NULL) {
|
||||||
|
@ -87,7 +87,7 @@ void ConcurrentG1Refine::init() {
|
||||||
|
|
||||||
void ConcurrentG1Refine::stop() {
|
void ConcurrentG1Refine::stop() {
|
||||||
if (_threads != NULL) {
|
if (_threads != NULL) {
|
||||||
for (int i = 0; i < _n_threads; i++) {
|
for (uint i = 0; i < _n_threads; i++) {
|
||||||
_threads[i]->stop();
|
_threads[i]->stop();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -96,7 +96,7 @@ void ConcurrentG1Refine::stop() {
|
||||||
void ConcurrentG1Refine::reinitialize_threads() {
|
void ConcurrentG1Refine::reinitialize_threads() {
|
||||||
reset_threshold_step();
|
reset_threshold_step();
|
||||||
if (_threads != NULL) {
|
if (_threads != NULL) {
|
||||||
for (int i = 0; i < _n_threads; i++) {
|
for (uint i = 0; i < _n_threads; i++) {
|
||||||
_threads[i]->initialize();
|
_threads[i]->initialize();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -104,7 +104,7 @@ void ConcurrentG1Refine::reinitialize_threads() {
|
||||||
|
|
||||||
ConcurrentG1Refine::~ConcurrentG1Refine() {
|
ConcurrentG1Refine::~ConcurrentG1Refine() {
|
||||||
if (_threads != NULL) {
|
if (_threads != NULL) {
|
||||||
for (int i = 0; i < _n_threads; i++) {
|
for (uint i = 0; i < _n_threads; i++) {
|
||||||
delete _threads[i];
|
delete _threads[i];
|
||||||
}
|
}
|
||||||
FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads, mtGC);
|
FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads, mtGC);
|
||||||
|
@ -113,7 +113,7 @@ ConcurrentG1Refine::~ConcurrentG1Refine() {
|
||||||
|
|
||||||
void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
|
void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
|
||||||
if (_threads != NULL) {
|
if (_threads != NULL) {
|
||||||
for (int i = 0; i < _n_threads; i++) {
|
for (uint i = 0; i < _n_threads; i++) {
|
||||||
tc->do_thread(_threads[i]);
|
tc->do_thread(_threads[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -121,20 +121,20 @@ void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
|
||||||
|
|
||||||
void ConcurrentG1Refine::worker_threads_do(ThreadClosure * tc) {
|
void ConcurrentG1Refine::worker_threads_do(ThreadClosure * tc) {
|
||||||
if (_threads != NULL) {
|
if (_threads != NULL) {
|
||||||
for (int i = 0; i < worker_thread_num(); i++) {
|
for (uint i = 0; i < worker_thread_num(); i++) {
|
||||||
tc->do_thread(_threads[i]);
|
tc->do_thread(_threads[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int ConcurrentG1Refine::thread_num() {
|
uint ConcurrentG1Refine::thread_num() {
|
||||||
int n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads
|
uint n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads
|
||||||
: ParallelGCThreads;
|
: ParallelGCThreads;
|
||||||
return MAX2<int>(n_threads, 1);
|
return MAX2<uint>(n_threads, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
|
void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
|
||||||
for (int i = 0; i < _n_threads; ++i) {
|
for (uint i = 0; i < _n_threads; ++i) {
|
||||||
_threads[i]->print_on(st);
|
_threads[i]->print_on(st);
|
||||||
st->cr();
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,8 +39,8 @@ class DirtyCardQueue;
|
||||||
|
|
||||||
class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
||||||
ConcurrentG1RefineThread** _threads;
|
ConcurrentG1RefineThread** _threads;
|
||||||
int _n_threads;
|
uint _n_threads;
|
||||||
int _n_worker_threads;
|
uint _n_worker_threads;
|
||||||
/*
|
/*
|
||||||
* The value of the update buffer queue length falls into one of 3 zones:
|
* The value of the update buffer queue length falls into one of 3 zones:
|
||||||
* green, yellow, red. If the value is in [0, green) nothing is
|
* green, yellow, red. If the value is in [0, green) nothing is
|
||||||
|
@ -88,7 +88,7 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
||||||
// The RS sampling thread
|
// The RS sampling thread
|
||||||
ConcurrentG1RefineThread * sampling_thread() const;
|
ConcurrentG1RefineThread * sampling_thread() const;
|
||||||
|
|
||||||
static int thread_num();
|
static uint thread_num();
|
||||||
|
|
||||||
void print_worker_threads_on(outputStream* st) const;
|
void print_worker_threads_on(outputStream* st) const;
|
||||||
|
|
||||||
|
@ -100,8 +100,8 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
||||||
int yellow_zone() const { return _yellow_zone; }
|
int yellow_zone() const { return _yellow_zone; }
|
||||||
int red_zone() const { return _red_zone; }
|
int red_zone() const { return _red_zone; }
|
||||||
|
|
||||||
int total_thread_num() const { return _n_threads; }
|
uint total_thread_num() const { return _n_threads; }
|
||||||
int worker_thread_num() const { return _n_worker_threads; }
|
uint worker_thread_num() const { return _n_worker_threads; }
|
||||||
|
|
||||||
int thread_threshold_step() const { return _thread_threshold_step; }
|
int thread_threshold_step() const { return _thread_threshold_step; }
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@
|
||||||
|
|
||||||
ConcurrentG1RefineThread::
|
ConcurrentG1RefineThread::
|
||||||
ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *next,
|
ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *next,
|
||||||
int worker_id_offset, int worker_id) :
|
uint worker_id_offset, uint worker_id) :
|
||||||
ConcurrentGCThread(),
|
ConcurrentGCThread(),
|
||||||
_worker_id_offset(worker_id_offset),
|
_worker_id_offset(worker_id_offset),
|
||||||
_worker_id(worker_id),
|
_worker_id(worker_id),
|
||||||
|
|
|
@ -38,8 +38,8 @@ class ConcurrentG1RefineThread: public ConcurrentGCThread {
|
||||||
|
|
||||||
double _vtime_start; // Initial virtual time.
|
double _vtime_start; // Initial virtual time.
|
||||||
double _vtime_accum; // Initial virtual time.
|
double _vtime_accum; // Initial virtual time.
|
||||||
int _worker_id;
|
uint _worker_id;
|
||||||
int _worker_id_offset;
|
uint _worker_id_offset;
|
||||||
|
|
||||||
// The refinement threads collection is linked list. A predecessor can activate a successor
|
// The refinement threads collection is linked list. A predecessor can activate a successor
|
||||||
// when the number of the rset update buffer crosses a certain threshold. A successor
|
// when the number of the rset update buffer crosses a certain threshold. A successor
|
||||||
|
@ -71,7 +71,7 @@ public:
|
||||||
virtual void run();
|
virtual void run();
|
||||||
// Constructor
|
// Constructor
|
||||||
ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread* next,
|
ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread* next,
|
||||||
int worker_id_offset, int worker_id);
|
uint worker_id_offset, uint worker_id);
|
||||||
|
|
||||||
void initialize();
|
void initialize();
|
||||||
|
|
||||||
|
|
|
@ -567,8 +567,8 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
|
||||||
_root_regions.init(_g1h, this);
|
_root_regions.init(_g1h, this);
|
||||||
|
|
||||||
if (ConcGCThreads > ParallelGCThreads) {
|
if (ConcGCThreads > ParallelGCThreads) {
|
||||||
warning("Can't have more ConcGCThreads (" UINT32_FORMAT ") "
|
warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") "
|
||||||
"than ParallelGCThreads (" UINT32_FORMAT ").",
|
"than ParallelGCThreads (" UINTX_FORMAT ").",
|
||||||
ConcGCThreads, ParallelGCThreads);
|
ConcGCThreads, ParallelGCThreads);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1804,7 +1804,6 @@ class G1ParNoteEndTask;
|
||||||
|
|
||||||
class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
|
class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
|
||||||
G1CollectedHeap* _g1;
|
G1CollectedHeap* _g1;
|
||||||
int _worker_num;
|
|
||||||
size_t _max_live_bytes;
|
size_t _max_live_bytes;
|
||||||
uint _regions_claimed;
|
uint _regions_claimed;
|
||||||
size_t _freed_bytes;
|
size_t _freed_bytes;
|
||||||
|
@ -1817,10 +1816,9 @@ class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
|
G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
|
||||||
int worker_num,
|
|
||||||
FreeRegionList* local_cleanup_list,
|
FreeRegionList* local_cleanup_list,
|
||||||
HRRSCleanupTask* hrrs_cleanup_task) :
|
HRRSCleanupTask* hrrs_cleanup_task) :
|
||||||
_g1(g1), _worker_num(worker_num),
|
_g1(g1),
|
||||||
_max_live_bytes(0), _regions_claimed(0),
|
_max_live_bytes(0), _regions_claimed(0),
|
||||||
_freed_bytes(0),
|
_freed_bytes(0),
|
||||||
_claimed_region_time(0.0), _max_region_time(0.0),
|
_claimed_region_time(0.0), _max_region_time(0.0),
|
||||||
|
@ -1893,7 +1891,7 @@ public:
|
||||||
double start = os::elapsedTime();
|
double start = os::elapsedTime();
|
||||||
FreeRegionList local_cleanup_list("Local Cleanup List");
|
FreeRegionList local_cleanup_list("Local Cleanup List");
|
||||||
HRRSCleanupTask hrrs_cleanup_task;
|
HRRSCleanupTask hrrs_cleanup_task;
|
||||||
G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list,
|
G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
|
||||||
&hrrs_cleanup_task);
|
&hrrs_cleanup_task);
|
||||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
_g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
|
_g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
|
||||||
|
@ -2145,7 +2143,7 @@ void ConcurrentMark::completeCleanup() {
|
||||||
|
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
|
|
||||||
_cleanup_list.verify_list();
|
_cleanup_list.verify_optional();
|
||||||
FreeRegionList tmp_free_list("Tmp Free List");
|
FreeRegionList tmp_free_list("Tmp Free List");
|
||||||
|
|
||||||
if (G1ConcRegionFreeingVerbose) {
|
if (G1ConcRegionFreeingVerbose) {
|
||||||
|
|
|
@ -34,12 +34,12 @@
|
||||||
|
|
||||||
bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl,
|
bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl,
|
||||||
bool consume,
|
bool consume,
|
||||||
size_t worker_i) {
|
uint worker_i) {
|
||||||
bool res = true;
|
bool res = true;
|
||||||
if (_buf != NULL) {
|
if (_buf != NULL) {
|
||||||
res = apply_closure_to_buffer(cl, _buf, _index, _sz,
|
res = apply_closure_to_buffer(cl, _buf, _index, _sz,
|
||||||
consume,
|
consume,
|
||||||
(int) worker_i);
|
worker_i);
|
||||||
if (res && consume) _index = _sz;
|
if (res && consume) _index = _sz;
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
|
@ -49,7 +49,7 @@ bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl,
|
||||||
void** buf,
|
void** buf,
|
||||||
size_t index, size_t sz,
|
size_t index, size_t sz,
|
||||||
bool consume,
|
bool consume,
|
||||||
int worker_i) {
|
uint worker_i) {
|
||||||
if (cl == NULL) return true;
|
if (cl == NULL) return true;
|
||||||
for (size_t i = index; i < sz; i += oopSize) {
|
for (size_t i = index; i < sz; i += oopSize) {
|
||||||
int ind = byte_index_to_index((int)i);
|
int ind = byte_index_to_index((int)i);
|
||||||
|
@ -79,8 +79,8 @@ DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determines how many mutator threads can process the buffers in parallel.
|
// Determines how many mutator threads can process the buffers in parallel.
|
||||||
size_t DirtyCardQueueSet::num_par_ids() {
|
uint DirtyCardQueueSet::num_par_ids() {
|
||||||
return os::processor_count();
|
return (uint)os::processor_count();
|
||||||
}
|
}
|
||||||
|
|
||||||
void DirtyCardQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
|
void DirtyCardQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
|
||||||
|
@ -103,7 +103,7 @@ void DirtyCardQueueSet::set_closure(CardTableEntryClosure* closure) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void DirtyCardQueueSet::iterate_closure_all_threads(bool consume,
|
void DirtyCardQueueSet::iterate_closure_all_threads(bool consume,
|
||||||
size_t worker_i) {
|
uint worker_i) {
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
|
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
|
||||||
for(JavaThread* t = Threads::first(); t; t = t->next()) {
|
for(JavaThread* t = Threads::first(); t; t = t->next()) {
|
||||||
bool b = t->dirty_card_queue().apply_closure(_closure, consume);
|
bool b = t->dirty_card_queue().apply_closure(_closure, consume);
|
||||||
|
@ -126,11 +126,11 @@ bool DirtyCardQueueSet::mut_process_buffer(void** buf) {
|
||||||
|
|
||||||
// We get the the number of any par_id that this thread
|
// We get the the number of any par_id that this thread
|
||||||
// might have already claimed.
|
// might have already claimed.
|
||||||
int worker_i = thread->get_claimed_par_id();
|
uint worker_i = thread->get_claimed_par_id();
|
||||||
|
|
||||||
// If worker_i is not -1 then the thread has already claimed
|
// If worker_i is not UINT_MAX then the thread has already claimed
|
||||||
// a par_id. We make note of it using the already_claimed value
|
// a par_id. We make note of it using the already_claimed value
|
||||||
if (worker_i != -1) {
|
if (worker_i != UINT_MAX) {
|
||||||
already_claimed = true;
|
already_claimed = true;
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
|
@ -142,7 +142,7 @@ bool DirtyCardQueueSet::mut_process_buffer(void** buf) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool b = false;
|
bool b = false;
|
||||||
if (worker_i != -1) {
|
if (worker_i != UINT_MAX) {
|
||||||
b = DirtyCardQueue::apply_closure_to_buffer(_closure, buf, 0,
|
b = DirtyCardQueue::apply_closure_to_buffer(_closure, buf, 0,
|
||||||
_sz, true, worker_i);
|
_sz, true, worker_i);
|
||||||
if (b) Atomic::inc(&_processed_buffers_mut);
|
if (b) Atomic::inc(&_processed_buffers_mut);
|
||||||
|
@ -154,8 +154,8 @@ bool DirtyCardQueueSet::mut_process_buffer(void** buf) {
|
||||||
// we release the id
|
// we release the id
|
||||||
_free_ids->release_par_id(worker_i);
|
_free_ids->release_par_id(worker_i);
|
||||||
|
|
||||||
// and set the claimed_id in the thread to -1
|
// and set the claimed_id in the thread to UINT_MAX
|
||||||
thread->set_claimed_par_id(-1);
|
thread->set_claimed_par_id(UINT_MAX);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return b;
|
return b;
|
||||||
|
@ -186,7 +186,7 @@ DirtyCardQueueSet::get_completed_buffer(int stop_at) {
|
||||||
|
|
||||||
bool DirtyCardQueueSet::
|
bool DirtyCardQueueSet::
|
||||||
apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl,
|
apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl,
|
||||||
int worker_i,
|
uint worker_i,
|
||||||
BufferNode* nd) {
|
BufferNode* nd) {
|
||||||
if (nd != NULL) {
|
if (nd != NULL) {
|
||||||
void **buf = BufferNode::make_buffer_from_node(nd);
|
void **buf = BufferNode::make_buffer_from_node(nd);
|
||||||
|
@ -208,7 +208,7 @@ apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl,
|
||||||
}
|
}
|
||||||
|
|
||||||
bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
|
bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
|
||||||
int worker_i,
|
uint worker_i,
|
||||||
int stop_at,
|
int stop_at,
|
||||||
bool during_pause) {
|
bool during_pause) {
|
||||||
assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
|
assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
|
||||||
|
@ -218,7 +218,7 @@ bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure*
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool DirtyCardQueueSet::apply_closure_to_completed_buffer(int worker_i,
|
bool DirtyCardQueueSet::apply_closure_to_completed_buffer(uint worker_i,
|
||||||
int stop_at,
|
int stop_at,
|
||||||
bool during_pause) {
|
bool during_pause) {
|
||||||
return apply_closure_to_completed_buffer(_closure, worker_i,
|
return apply_closure_to_completed_buffer(_closure, worker_i,
|
||||||
|
|
|
@ -36,7 +36,7 @@ class CardTableEntryClosure: public CHeapObj<mtGC> {
|
||||||
public:
|
public:
|
||||||
// Process the card whose card table entry is "card_ptr". If returns
|
// Process the card whose card table entry is "card_ptr". If returns
|
||||||
// "false", terminate the iteration early.
|
// "false", terminate the iteration early.
|
||||||
virtual bool do_card_ptr(jbyte* card_ptr, int worker_i = 0) = 0;
|
virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i = 0) = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
// A ptrQueue whose elements are "oops", pointers to object heads.
|
// A ptrQueue whose elements are "oops", pointers to object heads.
|
||||||
|
@ -53,7 +53,7 @@ public:
|
||||||
// deletes processed entries from logs.
|
// deletes processed entries from logs.
|
||||||
bool apply_closure(CardTableEntryClosure* cl,
|
bool apply_closure(CardTableEntryClosure* cl,
|
||||||
bool consume = true,
|
bool consume = true,
|
||||||
size_t worker_i = 0);
|
uint worker_i = 0);
|
||||||
|
|
||||||
// Apply the closure to all elements of "buf", down to "index"
|
// Apply the closure to all elements of "buf", down to "index"
|
||||||
// (inclusive.) If returns "false", then a closure application returned
|
// (inclusive.) If returns "false", then a closure application returned
|
||||||
|
@ -63,7 +63,7 @@ public:
|
||||||
static bool apply_closure_to_buffer(CardTableEntryClosure* cl,
|
static bool apply_closure_to_buffer(CardTableEntryClosure* cl,
|
||||||
void** buf, size_t index, size_t sz,
|
void** buf, size_t index, size_t sz,
|
||||||
bool consume = true,
|
bool consume = true,
|
||||||
int worker_i = 0);
|
uint worker_i = 0);
|
||||||
void **get_buf() { return _buf;}
|
void **get_buf() { return _buf;}
|
||||||
void set_buf(void **buf) {_buf = buf;}
|
void set_buf(void **buf) {_buf = buf;}
|
||||||
size_t get_index() { return _index;}
|
size_t get_index() { return _index;}
|
||||||
|
@ -98,7 +98,7 @@ public:
|
||||||
|
|
||||||
// The number of parallel ids that can be claimed to allow collector or
|
// The number of parallel ids that can be claimed to allow collector or
|
||||||
// mutator threads to do card-processing work.
|
// mutator threads to do card-processing work.
|
||||||
static size_t num_par_ids();
|
static uint num_par_ids();
|
||||||
|
|
||||||
static void handle_zero_index_for_thread(JavaThread* t);
|
static void handle_zero_index_for_thread(JavaThread* t);
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ public:
|
||||||
// change in the future.) If "consume" is true, processed entries are
|
// change in the future.) If "consume" is true, processed entries are
|
||||||
// discarded.
|
// discarded.
|
||||||
void iterate_closure_all_threads(bool consume = true,
|
void iterate_closure_all_threads(bool consume = true,
|
||||||
size_t worker_i = 0);
|
uint worker_i = 0);
|
||||||
|
|
||||||
// If there exists some completed buffer, pop it, then apply the
|
// If there exists some completed buffer, pop it, then apply the
|
||||||
// registered closure to all its elements, nulling out those elements
|
// registered closure to all its elements, nulling out those elements
|
||||||
|
@ -124,7 +124,7 @@ public:
|
||||||
// but is only partially completed before a "yield" happens, the
|
// but is only partially completed before a "yield" happens, the
|
||||||
// partially completed buffer (with its processed elements set to NULL)
|
// partially completed buffer (with its processed elements set to NULL)
|
||||||
// is returned to the completed buffer set, and this call returns false.
|
// is returned to the completed buffer set, and this call returns false.
|
||||||
bool apply_closure_to_completed_buffer(int worker_i = 0,
|
bool apply_closure_to_completed_buffer(uint worker_i = 0,
|
||||||
int stop_at = 0,
|
int stop_at = 0,
|
||||||
bool during_pause = false);
|
bool during_pause = false);
|
||||||
|
|
||||||
|
@ -136,13 +136,13 @@ public:
|
||||||
// partially completed buffer (with its processed elements set to NULL)
|
// partially completed buffer (with its processed elements set to NULL)
|
||||||
// is returned to the completed buffer set, and this call returns false.
|
// is returned to the completed buffer set, and this call returns false.
|
||||||
bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
|
bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
|
||||||
int worker_i = 0,
|
uint worker_i = 0,
|
||||||
int stop_at = 0,
|
int stop_at = 0,
|
||||||
bool during_pause = false);
|
bool during_pause = false);
|
||||||
|
|
||||||
// Helper routine for the above.
|
// Helper routine for the above.
|
||||||
bool apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl,
|
bool apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl,
|
||||||
int worker_i,
|
uint worker_i,
|
||||||
BufferNode* nd);
|
BufferNode* nd);
|
||||||
|
|
||||||
BufferNode* get_completed_buffer(int stop_at);
|
BufferNode* get_completed_buffer(int stop_at);
|
||||||
|
|
|
@ -304,26 +304,26 @@ void G1BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) con
|
||||||
if (c - start_card > BlockOffsetArray::power_to_cards_back(1)) {
|
if (c - start_card > BlockOffsetArray::power_to_cards_back(1)) {
|
||||||
guarantee(entry > N_words,
|
guarantee(entry > N_words,
|
||||||
err_msg("Should be in logarithmic region - "
|
err_msg("Should be in logarithmic region - "
|
||||||
"entry: " UINT32_FORMAT ", "
|
"entry: %u, "
|
||||||
"_array->offset_array(c): " UINT32_FORMAT ", "
|
"_array->offset_array(c): %u, "
|
||||||
"N_words: " UINT32_FORMAT,
|
"N_words: %u",
|
||||||
entry, _array->offset_array(c), N_words));
|
(uint)entry, (uint)_array->offset_array(c), (uint)N_words));
|
||||||
}
|
}
|
||||||
size_t backskip = BlockOffsetArray::entry_to_cards_back(entry);
|
size_t backskip = BlockOffsetArray::entry_to_cards_back(entry);
|
||||||
size_t landing_card = c - backskip;
|
size_t landing_card = c - backskip;
|
||||||
guarantee(landing_card >= (start_card - 1), "Inv");
|
guarantee(landing_card >= (start_card - 1), "Inv");
|
||||||
if (landing_card >= start_card) {
|
if (landing_card >= start_card) {
|
||||||
guarantee(_array->offset_array(landing_card) <= entry,
|
guarantee(_array->offset_array(landing_card) <= entry,
|
||||||
err_msg("Monotonicity - landing_card offset: " UINT32_FORMAT ", "
|
err_msg("Monotonicity - landing_card offset: %u, "
|
||||||
"entry: " UINT32_FORMAT,
|
"entry: %u",
|
||||||
_array->offset_array(landing_card), entry));
|
(uint)_array->offset_array(landing_card), (uint)entry));
|
||||||
} else {
|
} else {
|
||||||
guarantee(landing_card == start_card - 1, "Tautology");
|
guarantee(landing_card == start_card - 1, "Tautology");
|
||||||
// Note that N_words is the maximum offset value
|
// Note that N_words is the maximum offset value
|
||||||
guarantee(_array->offset_array(landing_card) <= N_words,
|
guarantee(_array->offset_array(landing_card) <= N_words,
|
||||||
err_msg("landing card offset: " UINT32_FORMAT ", "
|
err_msg("landing card offset: %u, "
|
||||||
"N_words: " UINT32_FORMAT,
|
"N_words: %u",
|
||||||
_array->offset_array(landing_card), N_words));
|
(uint)_array->offset_array(landing_card), (uint)N_words));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -554,21 +554,20 @@ void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_
|
||||||
(_array->offset_array(orig_index) > 0 &&
|
(_array->offset_array(orig_index) > 0 &&
|
||||||
_array->offset_array(orig_index) <= N_words),
|
_array->offset_array(orig_index) <= N_words),
|
||||||
err_msg("offset array should have been set - "
|
err_msg("offset array should have been set - "
|
||||||
"orig_index offset: " UINT32_FORMAT ", "
|
"orig_index offset: %u, "
|
||||||
"blk_start: " PTR_FORMAT ", "
|
"blk_start: " PTR_FORMAT ", "
|
||||||
"boundary: " PTR_FORMAT,
|
"boundary: " PTR_FORMAT,
|
||||||
_array->offset_array(orig_index),
|
(uint)_array->offset_array(orig_index),
|
||||||
blk_start, boundary));
|
blk_start, boundary));
|
||||||
for (size_t j = orig_index + 1; j <= end_index; j++) {
|
for (size_t j = orig_index + 1; j <= end_index; j++) {
|
||||||
assert(_array->offset_array(j) > 0 &&
|
assert(_array->offset_array(j) > 0 &&
|
||||||
_array->offset_array(j) <=
|
_array->offset_array(j) <=
|
||||||
(u_char) (N_words+BlockOffsetArray::N_powers-1),
|
(u_char) (N_words+BlockOffsetArray::N_powers-1),
|
||||||
err_msg("offset array should have been set - "
|
err_msg("offset array should have been set - "
|
||||||
UINT32_FORMAT " not > 0 OR "
|
"%u not > 0 OR %u not <= %u",
|
||||||
UINT32_FORMAT " not <= " UINT32_FORMAT,
|
(uint) _array->offset_array(j),
|
||||||
_array->offset_array(j),
|
(uint) _array->offset_array(j),
|
||||||
_array->offset_array(j),
|
(uint) (N_words+BlockOffsetArray::N_powers-1)));
|
||||||
(u_char) (N_words+BlockOffsetArray::N_powers-1)));
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
|
@ -146,8 +146,8 @@ private:
|
||||||
void check_offset(size_t offset, const char* msg) const {
|
void check_offset(size_t offset, const char* msg) const {
|
||||||
assert(offset <= N_words,
|
assert(offset <= N_words,
|
||||||
err_msg("%s - "
|
err_msg("%s - "
|
||||||
"offset: " UINT32_FORMAT", N_words: " UINT32_FORMAT,
|
"offset: " SIZE_FORMAT", N_words: %u",
|
||||||
msg, offset, N_words));
|
msg, offset, (uint)N_words));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bounds checking accessors:
|
// Bounds checking accessors:
|
||||||
|
|
|
@ -102,7 +102,7 @@ public:
|
||||||
ConcurrentG1Refine* cg1r) :
|
ConcurrentG1Refine* cg1r) :
|
||||||
_sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
|
_sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
|
||||||
{}
|
{}
|
||||||
bool do_card_ptr(jbyte* card_ptr, int worker_i) {
|
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
|
||||||
bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);
|
bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);
|
||||||
// This path is executed by the concurrent refine or mutator threads,
|
// This path is executed by the concurrent refine or mutator threads,
|
||||||
// concurrently, and so we do not care if card_ptr contains references
|
// concurrently, and so we do not care if card_ptr contains references
|
||||||
|
@ -131,7 +131,7 @@ public:
|
||||||
{
|
{
|
||||||
for (int i = 0; i < 256; i++) _histo[i] = 0;
|
for (int i = 0; i < 256; i++) _histo[i] = 0;
|
||||||
}
|
}
|
||||||
bool do_card_ptr(jbyte* card_ptr, int worker_i) {
|
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
|
||||||
if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
|
if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
|
||||||
_calls++;
|
_calls++;
|
||||||
unsigned char* ujb = (unsigned char*)card_ptr;
|
unsigned char* ujb = (unsigned char*)card_ptr;
|
||||||
|
@ -160,7 +160,7 @@ public:
|
||||||
RedirtyLoggedCardTableEntryClosure() :
|
RedirtyLoggedCardTableEntryClosure() :
|
||||||
_calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {}
|
_calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {}
|
||||||
|
|
||||||
bool do_card_ptr(jbyte* card_ptr, int worker_i) {
|
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
|
||||||
if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
|
if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
|
||||||
_calls++;
|
_calls++;
|
||||||
*card_ptr = 0;
|
*card_ptr = 0;
|
||||||
|
@ -1288,7 +1288,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||||
print_heap_before_gc();
|
print_heap_before_gc();
|
||||||
trace_heap_before_gc(gc_tracer);
|
trace_heap_before_gc(gc_tracer);
|
||||||
|
|
||||||
size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
|
size_t metadata_prev_used = MetaspaceAux::used_bytes();
|
||||||
|
|
||||||
verify_region_sets_optional();
|
verify_region_sets_optional();
|
||||||
|
|
||||||
|
@ -2314,7 +2314,7 @@ void G1CollectedHeap::check_gc_time_stamps() {
|
||||||
void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
|
void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
|
||||||
DirtyCardQueue* into_cset_dcq,
|
DirtyCardQueue* into_cset_dcq,
|
||||||
bool concurrent,
|
bool concurrent,
|
||||||
int worker_i) {
|
uint worker_i) {
|
||||||
// Clean cards in the hot card cache
|
// Clean cards in the hot card cache
|
||||||
G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
|
G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
|
||||||
hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
|
hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
|
||||||
|
@ -2843,7 +2843,7 @@ void G1CollectedHeap::clear_cset_start_regions() {
|
||||||
|
|
||||||
// Given the id of a worker, obtain or calculate a suitable
|
// Given the id of a worker, obtain or calculate a suitable
|
||||||
// starting region for iterating over the current collection set.
|
// starting region for iterating over the current collection set.
|
||||||
HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
|
HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
|
||||||
assert(get_gc_time_stamp() > 0, "should have been updated by now");
|
assert(get_gc_time_stamp() > 0, "should have been updated by now");
|
||||||
|
|
||||||
HeapRegion* result = NULL;
|
HeapRegion* result = NULL;
|
||||||
|
@ -5103,7 +5103,7 @@ g1_process_strong_roots(bool is_scavenging,
|
||||||
OopClosure* scan_non_heap_roots,
|
OopClosure* scan_non_heap_roots,
|
||||||
OopsInHeapRegionClosure* scan_rs,
|
OopsInHeapRegionClosure* scan_rs,
|
||||||
G1KlassScanClosure* scan_klasses,
|
G1KlassScanClosure* scan_klasses,
|
||||||
int worker_i) {
|
uint worker_i) {
|
||||||
|
|
||||||
// First scan the strong roots
|
// First scan the strong roots
|
||||||
double ext_roots_start = os::elapsedTime();
|
double ext_roots_start = os::elapsedTime();
|
||||||
|
@ -5207,10 +5207,10 @@ public:
|
||||||
|
|
||||||
~G1StringSymbolTableUnlinkTask() {
|
~G1StringSymbolTableUnlinkTask() {
|
||||||
guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
|
guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
|
||||||
err_msg("claim value "INT32_FORMAT" after unlink less than initial string table size "INT32_FORMAT,
|
err_msg("claim value %d after unlink less than initial string table size %d",
|
||||||
StringTable::parallel_claimed_index(), _initial_string_table_size));
|
StringTable::parallel_claimed_index(), _initial_string_table_size));
|
||||||
guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
|
guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
|
||||||
err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT,
|
err_msg("claim value %d after unlink less than initial symbol table size %d",
|
||||||
SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
|
SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5275,7 +5275,7 @@ void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive
|
||||||
|
|
||||||
class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
|
class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
|
||||||
public:
|
public:
|
||||||
bool do_card_ptr(jbyte* card_ptr, int worker_i) {
|
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
|
||||||
*card_ptr = CardTableModRefBS::dirty_card_val();
|
*card_ptr = CardTableModRefBS::dirty_card_val();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -845,7 +845,7 @@ protected:
|
||||||
OopClosure* scan_non_heap_roots,
|
OopClosure* scan_non_heap_roots,
|
||||||
OopsInHeapRegionClosure* scan_rs,
|
OopsInHeapRegionClosure* scan_rs,
|
||||||
G1KlassScanClosure* scan_klasses,
|
G1KlassScanClosure* scan_klasses,
|
||||||
int worker_i);
|
uint worker_i);
|
||||||
|
|
||||||
// Notifies all the necessary spaces that the committed space has
|
// Notifies all the necessary spaces that the committed space has
|
||||||
// been updated (either expanded or shrunk). It should be called
|
// been updated (either expanded or shrunk). It should be called
|
||||||
|
@ -1139,7 +1139,7 @@ public:
|
||||||
|
|
||||||
void iterate_dirty_card_closure(CardTableEntryClosure* cl,
|
void iterate_dirty_card_closure(CardTableEntryClosure* cl,
|
||||||
DirtyCardQueue* into_cset_dcq,
|
DirtyCardQueue* into_cset_dcq,
|
||||||
bool concurrent, int worker_i);
|
bool concurrent, uint worker_i);
|
||||||
|
|
||||||
// The shared block offset table array.
|
// The shared block offset table array.
|
||||||
G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
|
G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
|
||||||
|
@ -1370,7 +1370,7 @@ public:
|
||||||
|
|
||||||
// Given the id of a worker, obtain or calculate a suitable
|
// Given the id of a worker, obtain or calculate a suitable
|
||||||
// starting region for iterating over the current collection set.
|
// starting region for iterating over the current collection set.
|
||||||
HeapRegion* start_cset_region_for_worker(int worker_i);
|
HeapRegion* start_cset_region_for_worker(uint worker_i);
|
||||||
|
|
||||||
// This is a convenience method that is used by the
|
// This is a convenience method that is used by the
|
||||||
// HeapRegionIterator classes to calculate the starting region for
|
// HeapRegionIterator classes to calculate the starting region for
|
||||||
|
|
|
@ -1204,7 +1204,7 @@ void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
|
||||||
(_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
|
(_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
|
||||||
|
|
||||||
if (full) {
|
if (full) {
|
||||||
_metaspace_used_bytes_before_gc = MetaspaceAux::allocated_used_bytes();
|
_metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -147,7 +147,7 @@ template <class T>
|
||||||
void WorkerDataArray<T>::verify() {
|
void WorkerDataArray<T>::verify() {
|
||||||
for (uint i = 0; i < _length; i++) {
|
for (uint i = 0; i < _length; i++) {
|
||||||
assert(_data[i] != _uninitialized,
|
assert(_data[i] != _uninitialized,
|
||||||
err_msg("Invalid data for worker " UINT32_FORMAT ", data: %lf, uninitialized: %lf",
|
err_msg("Invalid data for worker %u, data: %lf, uninitialized: %lf",
|
||||||
i, (double)_data[i], (double)_uninitialized));
|
i, (double)_data[i], (double)_uninitialized));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -246,8 +246,8 @@ void G1GCPhaseTimes::print_stats(int level, const char* str, double value) {
|
||||||
LineBuffer(level).append_and_print_cr("[%s: %.1lf ms]", str, value);
|
LineBuffer(level).append_and_print_cr("[%s: %.1lf ms]", str, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1GCPhaseTimes::print_stats(int level, const char* str, double value, int workers) {
|
void G1GCPhaseTimes::print_stats(int level, const char* str, double value, uint workers) {
|
||||||
LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %d]", str, value, workers);
|
LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %u]", str, value, workers);
|
||||||
}
|
}
|
||||||
|
|
||||||
double G1GCPhaseTimes::accounted_time_ms() {
|
double G1GCPhaseTimes::accounted_time_ms() {
|
||||||
|
|
|
@ -161,7 +161,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||||
|
|
||||||
// Helper methods for detailed logging
|
// Helper methods for detailed logging
|
||||||
void print_stats(int level, const char* str, double value);
|
void print_stats(int level, const char* str, double value);
|
||||||
void print_stats(int level, const char* str, double value, int workers);
|
void print_stats(int level, const char* str, double value, uint workers);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
G1GCPhaseTimes(uint max_gc_threads);
|
G1GCPhaseTimes(uint max_gc_threads);
|
||||||
|
|
|
@ -44,9 +44,9 @@ void G1HotCardCache::initialize() {
|
||||||
_hot_cache_idx = 0;
|
_hot_cache_idx = 0;
|
||||||
|
|
||||||
// For refining the cards in the hot cache in parallel
|
// For refining the cards in the hot cache in parallel
|
||||||
int n_workers = (ParallelGCThreads > 0 ?
|
uint n_workers = (ParallelGCThreads > 0 ?
|
||||||
_g1h->workers()->total_workers() : 1);
|
_g1h->workers()->total_workers() : 1);
|
||||||
_hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
|
_hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers);
|
||||||
_hot_cache_par_claimed_idx = 0;
|
_hot_cache_par_claimed_idx = 0;
|
||||||
|
|
||||||
_card_counts.initialize();
|
_card_counts.initialize();
|
||||||
|
@ -89,7 +89,7 @@ jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1HotCardCache::drain(int worker_i,
|
void G1HotCardCache::drain(uint worker_i,
|
||||||
G1RemSet* g1rs,
|
G1RemSet* g1rs,
|
||||||
DirtyCardQueue* into_cset_dcq) {
|
DirtyCardQueue* into_cset_dcq) {
|
||||||
if (!default_use_cache()) {
|
if (!default_use_cache()) {
|
||||||
|
@ -122,8 +122,8 @@ void G1HotCardCache::drain(int worker_i,
|
||||||
// RSet updating while within an evacuation pause.
|
// RSet updating while within an evacuation pause.
|
||||||
// In this case worker_i should be the id of a GC worker thread
|
// In this case worker_i should be the id of a GC worker thread
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
|
assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
|
||||||
assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads),
|
assert(worker_i < (ParallelGCThreads == 0 ? 1 : ParallelGCThreads),
|
||||||
err_msg("incorrect worker id: "INT32_FORMAT, worker_i));
|
err_msg("incorrect worker id: %u", worker_i));
|
||||||
|
|
||||||
into_cset_dcq->enqueue(card_ptr);
|
into_cset_dcq->enqueue(card_ptr);
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,7 +99,7 @@ class G1HotCardCache: public CHeapObj<mtGC> {
|
||||||
|
|
||||||
// Refine the cards that have delayed as a result of
|
// Refine the cards that have delayed as a result of
|
||||||
// being in the cache.
|
// being in the cache.
|
||||||
void drain(int worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
|
void drain(uint worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
|
||||||
|
|
||||||
// Set up for parallel processing of the cards in the hot cache
|
// Set up for parallel processing of the cards in the hot cache
|
||||||
void reset_hot_cache_claimed_index() {
|
void reset_hot_cache_claimed_index() {
|
||||||
|
|
|
@ -234,14 +234,14 @@ class G1UpdateRSOrPushRefOopClosure: public ExtendedOopClosure {
|
||||||
HeapRegion* _from;
|
HeapRegion* _from;
|
||||||
OopsInHeapRegionClosure* _push_ref_cl;
|
OopsInHeapRegionClosure* _push_ref_cl;
|
||||||
bool _record_refs_into_cset;
|
bool _record_refs_into_cset;
|
||||||
int _worker_i;
|
uint _worker_i;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
|
G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
|
||||||
G1RemSet* rs,
|
G1RemSet* rs,
|
||||||
OopsInHeapRegionClosure* push_ref_cl,
|
OopsInHeapRegionClosure* push_ref_cl,
|
||||||
bool record_refs_into_cset,
|
bool record_refs_into_cset,
|
||||||
int worker_i = 0);
|
uint worker_i = 0);
|
||||||
|
|
||||||
void set_from(HeapRegion* from) {
|
void set_from(HeapRegion* from) {
|
||||||
assert(from != NULL, "from region must be non-NULL");
|
assert(from != NULL, "from region must be non-NULL");
|
||||||
|
|
|
@ -113,14 +113,14 @@ class ScanRSClosure : public HeapRegionClosure {
|
||||||
G1SATBCardTableModRefBS *_ct_bs;
|
G1SATBCardTableModRefBS *_ct_bs;
|
||||||
|
|
||||||
double _strong_code_root_scan_time_sec;
|
double _strong_code_root_scan_time_sec;
|
||||||
int _worker_i;
|
uint _worker_i;
|
||||||
int _block_size;
|
int _block_size;
|
||||||
bool _try_claimed;
|
bool _try_claimed;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ScanRSClosure(OopsInHeapRegionClosure* oc,
|
ScanRSClosure(OopsInHeapRegionClosure* oc,
|
||||||
CodeBlobToOopClosure* code_root_cl,
|
CodeBlobToOopClosure* code_root_cl,
|
||||||
int worker_i) :
|
uint worker_i) :
|
||||||
_oc(oc),
|
_oc(oc),
|
||||||
_code_root_cl(code_root_cl),
|
_code_root_cl(code_root_cl),
|
||||||
_strong_code_root_scan_time_sec(0.0),
|
_strong_code_root_scan_time_sec(0.0),
|
||||||
|
@ -162,7 +162,7 @@ public:
|
||||||
|
|
||||||
void printCard(HeapRegion* card_region, size_t card_index,
|
void printCard(HeapRegion* card_region, size_t card_index,
|
||||||
HeapWord* card_start) {
|
HeapWord* card_start) {
|
||||||
gclog_or_tty->print_cr("T %d Region [" PTR_FORMAT ", " PTR_FORMAT ") "
|
gclog_or_tty->print_cr("T %u Region [" PTR_FORMAT ", " PTR_FORMAT ") "
|
||||||
"RS names card %p: "
|
"RS names card %p: "
|
||||||
"[" PTR_FORMAT ", " PTR_FORMAT ")",
|
"[" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||||
_worker_i,
|
_worker_i,
|
||||||
|
@ -241,7 +241,7 @@ public:
|
||||||
|
|
||||||
void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
|
void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
|
||||||
CodeBlobToOopClosure* code_root_cl,
|
CodeBlobToOopClosure* code_root_cl,
|
||||||
int worker_i) {
|
uint worker_i) {
|
||||||
double rs_time_start = os::elapsedTime();
|
double rs_time_start = os::elapsedTime();
|
||||||
HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
|
HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
|
||||||
|
|
||||||
|
@ -274,13 +274,13 @@ public:
|
||||||
DirtyCardQueue* into_cset_dcq) :
|
DirtyCardQueue* into_cset_dcq) :
|
||||||
_g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq)
|
_g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq)
|
||||||
{}
|
{}
|
||||||
bool do_card_ptr(jbyte* card_ptr, int worker_i) {
|
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
|
||||||
// The only time we care about recording cards that
|
// The only time we care about recording cards that
|
||||||
// contain references that point into the collection set
|
// contain references that point into the collection set
|
||||||
// is during RSet updating within an evacuation pause.
|
// is during RSet updating within an evacuation pause.
|
||||||
// In this case worker_i should be the id of a GC worker thread.
|
// In this case worker_i should be the id of a GC worker thread.
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
|
assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
|
||||||
assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
|
assert(worker_i < (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
|
||||||
|
|
||||||
if (_g1rs->refine_card(card_ptr, worker_i, true)) {
|
if (_g1rs->refine_card(card_ptr, worker_i, true)) {
|
||||||
// 'card_ptr' contains references that point into the collection
|
// 'card_ptr' contains references that point into the collection
|
||||||
|
@ -295,7 +295,7 @@ public:
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
|
void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) {
|
||||||
double start = os::elapsedTime();
|
double start = os::elapsedTime();
|
||||||
// Apply the given closure to all remaining log entries.
|
// Apply the given closure to all remaining log entries.
|
||||||
RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
|
RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
|
||||||
|
@ -320,14 +320,14 @@ void G1RemSet::cleanupHRRS() {
|
||||||
|
|
||||||
void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
|
void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
|
||||||
CodeBlobToOopClosure* code_root_cl,
|
CodeBlobToOopClosure* code_root_cl,
|
||||||
int worker_i) {
|
uint worker_i) {
|
||||||
#if CARD_REPEAT_HISTO
|
#if CARD_REPEAT_HISTO
|
||||||
ct_freq_update_histo_and_reset();
|
ct_freq_update_histo_and_reset();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// We cache the value of 'oc' closure into the appropriate slot in the
|
// We cache the value of 'oc' closure into the appropriate slot in the
|
||||||
// _cset_rs_update_cl for this worker
|
// _cset_rs_update_cl for this worker
|
||||||
assert(worker_i < (int)n_workers(), "sanity");
|
assert(worker_i < n_workers(), "sanity");
|
||||||
_cset_rs_update_cl[worker_i] = oc;
|
_cset_rs_update_cl[worker_i] = oc;
|
||||||
|
|
||||||
// A DirtyCardQueue that is used to hold cards containing references
|
// A DirtyCardQueue that is used to hold cards containing references
|
||||||
|
@ -399,7 +399,7 @@ public:
|
||||||
_g1(g1), _ct_bs(bs)
|
_g1(g1), _ct_bs(bs)
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
bool do_card_ptr(jbyte* card_ptr, int worker_i) {
|
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
|
||||||
// Construct the region representing the card.
|
// Construct the region representing the card.
|
||||||
HeapWord* start = _ct_bs->addr_for(card_ptr);
|
HeapWord* start = _ct_bs->addr_for(card_ptr);
|
||||||
// And find the region containing it.
|
// And find the region containing it.
|
||||||
|
@ -543,7 +543,7 @@ G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
|
||||||
G1RemSet* rs,
|
G1RemSet* rs,
|
||||||
OopsInHeapRegionClosure* push_ref_cl,
|
OopsInHeapRegionClosure* push_ref_cl,
|
||||||
bool record_refs_into_cset,
|
bool record_refs_into_cset,
|
||||||
int worker_i) :
|
uint worker_i) :
|
||||||
_g1(g1h), _g1_rem_set(rs), _from(NULL),
|
_g1(g1h), _g1_rem_set(rs), _from(NULL),
|
||||||
_record_refs_into_cset(record_refs_into_cset),
|
_record_refs_into_cset(record_refs_into_cset),
|
||||||
_push_ref_cl(push_ref_cl), _worker_i(worker_i) { }
|
_push_ref_cl(push_ref_cl), _worker_i(worker_i) { }
|
||||||
|
@ -552,7 +552,7 @@ G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
|
||||||
// into the collection set, if we're checking for such references;
|
// into the collection set, if we're checking for such references;
|
||||||
// false otherwise.
|
// false otherwise.
|
||||||
|
|
||||||
bool G1RemSet::refine_card(jbyte* card_ptr, int worker_i,
|
bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
|
||||||
bool check_for_refs_into_cset) {
|
bool check_for_refs_into_cset) {
|
||||||
|
|
||||||
// If the card is no longer dirty, nothing to do.
|
// If the card is no longer dirty, nothing to do.
|
||||||
|
|
|
@ -97,7 +97,7 @@ public:
|
||||||
// In the sequential case this param will be ignored.
|
// In the sequential case this param will be ignored.
|
||||||
void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
|
void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
|
||||||
CodeBlobToOopClosure* code_root_cl,
|
CodeBlobToOopClosure* code_root_cl,
|
||||||
int worker_i);
|
uint worker_i);
|
||||||
|
|
||||||
// Prepare for and cleanup after an oops_into_collection_set_do
|
// Prepare for and cleanup after an oops_into_collection_set_do
|
||||||
// call. Must call each of these once before and after (in sequential
|
// call. Must call each of these once before and after (in sequential
|
||||||
|
@ -109,9 +109,9 @@ public:
|
||||||
|
|
||||||
void scanRS(OopsInHeapRegionClosure* oc,
|
void scanRS(OopsInHeapRegionClosure* oc,
|
||||||
CodeBlobToOopClosure* code_root_cl,
|
CodeBlobToOopClosure* code_root_cl,
|
||||||
int worker_i);
|
uint worker_i);
|
||||||
|
|
||||||
void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
|
void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i);
|
||||||
|
|
||||||
CardTableModRefBS* ct_bs() { return _ct_bs; }
|
CardTableModRefBS* ct_bs() { return _ct_bs; }
|
||||||
size_t cardsScanned() { return _total_cards_scanned; }
|
size_t cardsScanned() { return _total_cards_scanned; }
|
||||||
|
@ -138,7 +138,7 @@ public:
|
||||||
// if the given card contains oops that have references into the
|
// if the given card contains oops that have references into the
|
||||||
// current collection set.
|
// current collection set.
|
||||||
virtual bool refine_card(jbyte* card_ptr,
|
virtual bool refine_card(jbyte* card_ptr,
|
||||||
int worker_i,
|
uint worker_i,
|
||||||
bool check_for_refs_into_cset);
|
bool check_for_refs_into_cset);
|
||||||
|
|
||||||
// Print accumulated summary info from the start of the VM.
|
// Print accumulated summary info from the start of the VM.
|
||||||
|
@ -171,12 +171,12 @@ public:
|
||||||
class UpdateRSOopClosure: public ExtendedOopClosure {
|
class UpdateRSOopClosure: public ExtendedOopClosure {
|
||||||
HeapRegion* _from;
|
HeapRegion* _from;
|
||||||
G1RemSet* _rs;
|
G1RemSet* _rs;
|
||||||
int _worker_i;
|
uint _worker_i;
|
||||||
|
|
||||||
template <class T> void do_oop_work(T* p);
|
template <class T> void do_oop_work(T* p);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
UpdateRSOopClosure(G1RemSet* rs, int worker_i = 0) :
|
UpdateRSOopClosure(G1RemSet* rs, uint worker_i = 0) :
|
||||||
_from(NULL), _rs(rs), _worker_i(worker_i)
|
_from(NULL), _rs(rs), _worker_i(worker_i)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
|
|
|
@ -390,7 +390,7 @@ void FromCardCache::shrink(uint new_num_regions) {
|
||||||
void FromCardCache::print(outputStream* out) {
|
void FromCardCache::print(outputStream* out) {
|
||||||
for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
|
for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
|
||||||
for (uint j = 0; j < _max_regions; j++) {
|
for (uint j = 0; j < _max_regions; j++) {
|
||||||
out->print_cr("_from_card_cache["UINT32_FORMAT"]["UINT32_FORMAT"] = "INT32_FORMAT".",
|
out->print_cr("_from_card_cache[%u][%u] = %d.",
|
||||||
i, j, at(i, j));
|
i, j, at(i, j));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -430,7 +430,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||||
int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
|
int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
|
||||||
|
|
||||||
if (G1TraceHeapRegionRememberedSet) {
|
if (G1TraceHeapRegionRememberedSet) {
|
||||||
gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = "INT32_FORMAT")",
|
gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
|
||||||
hr()->bottom(), from_card,
|
hr()->bottom(), from_card,
|
||||||
FromCardCache::at((uint)tid, cur_hrs_ind));
|
FromCardCache::at((uint)tid, cur_hrs_ind));
|
||||||
}
|
}
|
||||||
|
@ -853,13 +853,13 @@ OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
|
||||||
// This can be done by either mutator threads together with the
|
// This can be done by either mutator threads together with the
|
||||||
// concurrent refinement threads or GC threads.
|
// concurrent refinement threads or GC threads.
|
||||||
uint HeapRegionRemSet::num_par_rem_sets() {
|
uint HeapRegionRemSet::num_par_rem_sets() {
|
||||||
return (uint)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
|
return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), (uint)ParallelGCThreads);
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
|
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
|
||||||
HeapRegion* hr)
|
HeapRegion* hr)
|
||||||
: _bosa(bosa),
|
: _bosa(bosa),
|
||||||
_m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #"UINT32_FORMAT, hr->hrs_index()), true),
|
_m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrs_index()), true),
|
||||||
_code_roots(), _other_regions(hr, &_m) {
|
_code_roots(), _other_regions(hr, &_m) {
|
||||||
reset_for_par_iteration();
|
reset_for_par_iteration();
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,7 @@
|
||||||
inline void HeapRegionSetBase::add(HeapRegion* hr) {
|
inline void HeapRegionSetBase::add(HeapRegion* hr) {
|
||||||
check_mt_safety();
|
check_mt_safety();
|
||||||
assert(hr->containing_set() == NULL, hrs_ext_msg(this, "should not already have a containing set %u"));
|
assert(hr->containing_set() == NULL, hrs_ext_msg(this, "should not already have a containing set %u"));
|
||||||
assert(hr->next() == NULL, hrs_ext_msg(this, "should not already be linked"));
|
assert(hr->next() == NULL && hr->prev() == NULL, hrs_ext_msg(this, "should not already be linked"));
|
||||||
|
|
||||||
_count.increment(1u, hr->capacity());
|
_count.increment(1u, hr->capacity());
|
||||||
hr->set_containing_set(this);
|
hr->set_containing_set(this);
|
||||||
|
@ -40,7 +40,7 @@ inline void HeapRegionSetBase::add(HeapRegion* hr) {
|
||||||
inline void HeapRegionSetBase::remove(HeapRegion* hr) {
|
inline void HeapRegionSetBase::remove(HeapRegion* hr) {
|
||||||
check_mt_safety();
|
check_mt_safety();
|
||||||
verify_region(hr);
|
verify_region(hr);
|
||||||
assert(hr->next() == NULL, hrs_ext_msg(this, "should already be unlinked"));
|
assert(hr->next() == NULL && hr->prev() == NULL, hrs_ext_msg(this, "should already be unlinked"));
|
||||||
|
|
||||||
hr->set_containing_set(NULL);
|
hr->set_containing_set(NULL);
|
||||||
assert(_count.length() > 0, hrs_ext_msg(this, "pre-condition"));
|
assert(_count.length() > 0, hrs_ext_msg(this, "pre-condition"));
|
||||||
|
|
|
@ -290,7 +290,7 @@ void SATBMarkQueueSet::iterate_closure_all_threads() {
|
||||||
shared_satb_queue()->apply_closure_and_empty(_closure);
|
shared_satb_queue()->apply_closure_and_empty(_closure);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SATBMarkQueueSet::par_iterate_closure_all_threads(int worker) {
|
void SATBMarkQueueSet::par_iterate_closure_all_threads(uint worker) {
|
||||||
SharedHeap* sh = SharedHeap::heap();
|
SharedHeap* sh = SharedHeap::heap();
|
||||||
int parity = sh->strong_roots_parity();
|
int parity = sh->strong_roots_parity();
|
||||||
|
|
||||||
|
@ -315,7 +315,7 @@ void SATBMarkQueueSet::par_iterate_closure_all_threads(int worker) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
|
bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
|
||||||
int worker) {
|
uint worker) {
|
||||||
BufferNode* nd = NULL;
|
BufferNode* nd = NULL;
|
||||||
{
|
{
|
||||||
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
|
||||||
|
|
|
@ -84,7 +84,7 @@ class SATBMarkQueueSet: public PtrQueueSet {
|
||||||
// Utility function to support sequential and parallel versions. If
|
// Utility function to support sequential and parallel versions. If
|
||||||
// "par" is true, then "worker" is the par thread id; if "false", worker
|
// "par" is true, then "worker" is the par thread id; if "false", worker
|
||||||
// is ignored.
|
// is ignored.
|
||||||
bool apply_closure_to_completed_buffer_work(bool par, int worker);
|
bool apply_closure_to_completed_buffer_work(bool par, uint worker);
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
void dump_active_states(bool expected_active);
|
void dump_active_states(bool expected_active);
|
||||||
|
@ -124,7 +124,7 @@ public:
|
||||||
// be called serially and at a safepoint.
|
// be called serially and at a safepoint.
|
||||||
void iterate_closure_all_threads();
|
void iterate_closure_all_threads();
|
||||||
// Parallel version of the above.
|
// Parallel version of the above.
|
||||||
void par_iterate_closure_all_threads(int worker);
|
void par_iterate_closure_all_threads(uint worker);
|
||||||
|
|
||||||
// If there exists some completed buffer, pop it, then apply the
|
// If there exists some completed buffer, pop it, then apply the
|
||||||
// registered closure to all its elements, and return true. If no
|
// registered closure to all its elements, and return true. If no
|
||||||
|
@ -133,7 +133,7 @@ public:
|
||||||
return apply_closure_to_completed_buffer_work(false, 0);
|
return apply_closure_to_completed_buffer_work(false, 0);
|
||||||
}
|
}
|
||||||
// Parallel version of the above.
|
// Parallel version of the above.
|
||||||
bool par_apply_closure_to_completed_buffer(int worker) {
|
bool par_apply_closure_to_completed_buffer(uint worker) {
|
||||||
return apply_closure_to_completed_buffer_work(true, worker);
|
return apply_closure_to_completed_buffer_work(true, worker);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -184,7 +184,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
||||||
size_t prev_used = heap->used();
|
size_t prev_used = heap->used();
|
||||||
|
|
||||||
// Capture metadata size before collection for sizing.
|
// Capture metadata size before collection for sizing.
|
||||||
size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
|
size_t metadata_prev_used = MetaspaceAux::used_bytes();
|
||||||
|
|
||||||
// For PrintGCDetails
|
// For PrintGCDetails
|
||||||
size_t old_gen_prev_used = old_gen->used_in_bytes();
|
size_t old_gen_prev_used = old_gen->used_in_bytes();
|
||||||
|
|
|
@ -928,7 +928,7 @@ public:
|
||||||
_heap_used = heap->used();
|
_heap_used = heap->used();
|
||||||
_young_gen_used = heap->young_gen()->used_in_bytes();
|
_young_gen_used = heap->young_gen()->used_in_bytes();
|
||||||
_old_gen_used = heap->old_gen()->used_in_bytes();
|
_old_gen_used = heap->old_gen()->used_in_bytes();
|
||||||
_metadata_used = MetaspaceAux::allocated_used_bytes();
|
_metadata_used = MetaspaceAux::used_bytes();
|
||||||
};
|
};
|
||||||
|
|
||||||
size_t heap_used() const { return _heap_used; }
|
size_t heap_used() const { return _heap_used; }
|
||||||
|
|
|
@ -62,16 +62,16 @@ public:
|
||||||
};
|
};
|
||||||
|
|
||||||
class MetaspaceSizes : public StackObj {
|
class MetaspaceSizes : public StackObj {
|
||||||
size_t _capacity;
|
size_t _committed;
|
||||||
size_t _used;
|
size_t _used;
|
||||||
size_t _reserved;
|
size_t _reserved;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
MetaspaceSizes() : _capacity(0), _used(0), _reserved(0) {}
|
MetaspaceSizes() : _committed(0), _used(0), _reserved(0) {}
|
||||||
MetaspaceSizes(size_t capacity, size_t used, size_t reserved) :
|
MetaspaceSizes(size_t committed, size_t used, size_t reserved) :
|
||||||
_capacity(capacity), _used(used), _reserved(reserved) {}
|
_committed(committed), _used(used), _reserved(reserved) {}
|
||||||
|
|
||||||
size_t capacity() const { return _capacity; }
|
size_t committed() const { return _committed; }
|
||||||
size_t used() const { return _used; }
|
size_t used() const { return _used; }
|
||||||
size_t reserved() const { return _reserved; }
|
size_t reserved() const { return _reserved; }
|
||||||
};
|
};
|
||||||
|
|
|
@ -258,7 +258,7 @@ void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary
|
||||||
static TraceStructMetaspaceSizes to_trace_struct(const MetaspaceSizes& sizes) {
|
static TraceStructMetaspaceSizes to_trace_struct(const MetaspaceSizes& sizes) {
|
||||||
TraceStructMetaspaceSizes meta_sizes;
|
TraceStructMetaspaceSizes meta_sizes;
|
||||||
|
|
||||||
meta_sizes.set_capacity(sizes.capacity());
|
meta_sizes.set_committed(sizes.committed());
|
||||||
meta_sizes.set_used(sizes.used());
|
meta_sizes.set_used(sizes.used());
|
||||||
meta_sizes.set_reserved(sizes.reserved());
|
meta_sizes.set_reserved(sizes.reserved());
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -85,16 +85,16 @@ GCHeapSummary CollectedHeap::create_heap_summary() {
|
||||||
|
|
||||||
MetaspaceSummary CollectedHeap::create_metaspace_summary() {
|
MetaspaceSummary CollectedHeap::create_metaspace_summary() {
|
||||||
const MetaspaceSizes meta_space(
|
const MetaspaceSizes meta_space(
|
||||||
MetaspaceAux::allocated_capacity_bytes(),
|
MetaspaceAux::committed_bytes(),
|
||||||
MetaspaceAux::allocated_used_bytes(),
|
MetaspaceAux::used_bytes(),
|
||||||
MetaspaceAux::reserved_bytes());
|
MetaspaceAux::reserved_bytes());
|
||||||
const MetaspaceSizes data_space(
|
const MetaspaceSizes data_space(
|
||||||
MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType),
|
MetaspaceAux::committed_bytes(Metaspace::NonClassType),
|
||||||
MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType),
|
MetaspaceAux::used_bytes(Metaspace::NonClassType),
|
||||||
MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
|
MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
|
||||||
const MetaspaceSizes class_space(
|
const MetaspaceSizes class_space(
|
||||||
MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType),
|
MetaspaceAux::committed_bytes(Metaspace::ClassType),
|
||||||
MetaspaceAux::allocated_used_bytes(Metaspace::ClassType),
|
MetaspaceAux::used_bytes(Metaspace::ClassType),
|
||||||
MetaspaceAux::reserved_bytes(Metaspace::ClassType));
|
MetaspaceAux::reserved_bytes(Metaspace::ClassType));
|
||||||
|
|
||||||
const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
|
const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
|
||||||
|
@ -582,36 +582,6 @@ void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
oop CollectedHeap::Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS) {
|
|
||||||
debug_only(check_for_valid_allocation_state());
|
|
||||||
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
|
|
||||||
assert(size >= 0, "int won't convert to size_t");
|
|
||||||
HeapWord* obj;
|
|
||||||
assert(ScavengeRootsInCode > 0, "must be");
|
|
||||||
obj = common_mem_allocate_init(real_klass, size, CHECK_NULL);
|
|
||||||
post_allocation_setup_common(klass, obj);
|
|
||||||
assert(Universe::is_bootstrapping() ||
|
|
||||||
!((oop)obj)->is_array(), "must not be an array");
|
|
||||||
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
|
|
||||||
oop mirror = (oop)obj;
|
|
||||||
|
|
||||||
java_lang_Class::set_oop_size(mirror, size);
|
|
||||||
|
|
||||||
// Setup indirections
|
|
||||||
if (!real_klass.is_null()) {
|
|
||||||
java_lang_Class::set_klass(mirror, real_klass());
|
|
||||||
real_klass->set_java_mirror(mirror);
|
|
||||||
}
|
|
||||||
|
|
||||||
InstanceMirrorKlass* mk = InstanceMirrorKlass::cast(mirror->klass());
|
|
||||||
assert(size == mk->instance_size(real_klass), "should have been set");
|
|
||||||
|
|
||||||
// notify jvmti and dtrace
|
|
||||||
post_allocation_notify(klass, (oop)obj);
|
|
||||||
|
|
||||||
return mirror;
|
|
||||||
}
|
|
||||||
|
|
||||||
/////////////// Unit tests ///////////////
|
/////////////// Unit tests ///////////////
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -312,9 +312,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||||
// May be overridden to set additional parallelism.
|
// May be overridden to set additional parallelism.
|
||||||
virtual void set_par_threads(uint t) { _n_par_threads = t; };
|
virtual void set_par_threads(uint t) { _n_par_threads = t; };
|
||||||
|
|
||||||
// Allocate and initialize instances of Class
|
|
||||||
static oop Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS);
|
|
||||||
|
|
||||||
// General obj/array allocation facilities.
|
// General obj/array allocation facilities.
|
||||||
inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
|
inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
|
||||||
inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
|
inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
|
||||||
|
|
|
@ -257,6 +257,12 @@ void GenCollectorPolicy::assert_size_info() {
|
||||||
assert(_min_gen0_size % _gen_alignment == 0, "_min_gen0_size alignment");
|
assert(_min_gen0_size % _gen_alignment == 0, "_min_gen0_size alignment");
|
||||||
assert(_initial_gen0_size % _gen_alignment == 0, "_initial_gen0_size alignment");
|
assert(_initial_gen0_size % _gen_alignment == 0, "_initial_gen0_size alignment");
|
||||||
assert(_max_gen0_size % _gen_alignment == 0, "_max_gen0_size alignment");
|
assert(_max_gen0_size % _gen_alignment == 0, "_max_gen0_size alignment");
|
||||||
|
assert(_min_gen0_size <= bound_minus_alignment(_min_gen0_size, _min_heap_byte_size),
|
||||||
|
"Ergonomics made minimum young generation larger than minimum heap");
|
||||||
|
assert(_initial_gen0_size <= bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size),
|
||||||
|
"Ergonomics made initial young generation larger than initial heap");
|
||||||
|
assert(_max_gen0_size <= bound_minus_alignment(_max_gen0_size, _max_heap_byte_size),
|
||||||
|
"Ergonomics made maximum young generation lager than maximum heap");
|
||||||
}
|
}
|
||||||
|
|
||||||
void TwoGenerationCollectorPolicy::assert_size_info() {
|
void TwoGenerationCollectorPolicy::assert_size_info() {
|
||||||
|
@ -267,6 +273,9 @@ void TwoGenerationCollectorPolicy::assert_size_info() {
|
||||||
assert(_max_gen1_size % _gen_alignment == 0, "_max_gen1_size alignment");
|
assert(_max_gen1_size % _gen_alignment == 0, "_max_gen1_size alignment");
|
||||||
assert(_initial_gen1_size % _gen_alignment == 0, "_initial_gen1_size alignment");
|
assert(_initial_gen1_size % _gen_alignment == 0, "_initial_gen1_size alignment");
|
||||||
assert(_max_heap_byte_size <= (_max_gen0_size + _max_gen1_size), "Total maximum heap sizes must be sum of generation maximum sizes");
|
assert(_max_heap_byte_size <= (_max_gen0_size + _max_gen1_size), "Total maximum heap sizes must be sum of generation maximum sizes");
|
||||||
|
assert(_min_gen0_size + _min_gen1_size <= _min_heap_byte_size, "Minimum generation sizes exceed minimum heap size");
|
||||||
|
assert(_initial_gen0_size + _initial_gen1_size == _initial_heap_byte_size, "Initial generation sizes should match initial heap size");
|
||||||
|
assert(_max_gen0_size + _max_gen1_size == _max_heap_byte_size, "Maximum generation sizes should match maximum heap size");
|
||||||
}
|
}
|
||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
|
|
||||||
|
@ -303,20 +312,26 @@ void GenCollectorPolicy::initialize_flags() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make sure NewSize allows an old generation to fit even if set on the command line
|
||||||
|
if (FLAG_IS_CMDLINE(NewSize) && NewSize >= _initial_heap_byte_size) {
|
||||||
|
warning("NewSize was set larger than initial heap size, will use initial heap size.");
|
||||||
|
NewSize = bound_minus_alignment(NewSize, _initial_heap_byte_size);
|
||||||
|
}
|
||||||
|
|
||||||
// Now take the actual NewSize into account. We will silently increase NewSize
|
// Now take the actual NewSize into account. We will silently increase NewSize
|
||||||
// if the user specified a smaller or unaligned value.
|
// if the user specified a smaller or unaligned value.
|
||||||
smallest_new_size = MAX2(smallest_new_size, (uintx)align_size_down(NewSize, _gen_alignment));
|
uintx bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize);
|
||||||
if (smallest_new_size != NewSize) {
|
bounded_new_size = MAX2(smallest_new_size, (uintx)align_size_down(bounded_new_size, _gen_alignment));
|
||||||
|
if (bounded_new_size != NewSize) {
|
||||||
// Do not use FLAG_SET_ERGO to update NewSize here, since this will override
|
// Do not use FLAG_SET_ERGO to update NewSize here, since this will override
|
||||||
// if NewSize was set on the command line or not. This information is needed
|
// if NewSize was set on the command line or not. This information is needed
|
||||||
// later when setting the initial and minimum young generation size.
|
// later when setting the initial and minimum young generation size.
|
||||||
NewSize = smallest_new_size;
|
NewSize = bounded_new_size;
|
||||||
}
|
}
|
||||||
|
_min_gen0_size = smallest_new_size;
|
||||||
_initial_gen0_size = NewSize;
|
_initial_gen0_size = NewSize;
|
||||||
|
|
||||||
if (!FLAG_IS_DEFAULT(MaxNewSize)) {
|
if (!FLAG_IS_DEFAULT(MaxNewSize)) {
|
||||||
uintx min_new_size = MAX2(_gen_alignment, _min_gen0_size);
|
|
||||||
|
|
||||||
if (MaxNewSize >= MaxHeapSize) {
|
if (MaxNewSize >= MaxHeapSize) {
|
||||||
// Make sure there is room for an old generation
|
// Make sure there is room for an old generation
|
||||||
uintx smaller_max_new_size = MaxHeapSize - _gen_alignment;
|
uintx smaller_max_new_size = MaxHeapSize - _gen_alignment;
|
||||||
|
@ -330,8 +345,8 @@ void GenCollectorPolicy::initialize_flags() {
|
||||||
FLAG_SET_ERGO(uintx, NewSize, MaxNewSize);
|
FLAG_SET_ERGO(uintx, NewSize, MaxNewSize);
|
||||||
_initial_gen0_size = NewSize;
|
_initial_gen0_size = NewSize;
|
||||||
}
|
}
|
||||||
} else if (MaxNewSize < min_new_size) {
|
} else if (MaxNewSize < _initial_gen0_size) {
|
||||||
FLAG_SET_ERGO(uintx, MaxNewSize, min_new_size);
|
FLAG_SET_ERGO(uintx, MaxNewSize, _initial_gen0_size);
|
||||||
} else if (!is_size_aligned(MaxNewSize, _gen_alignment)) {
|
} else if (!is_size_aligned(MaxNewSize, _gen_alignment)) {
|
||||||
FLAG_SET_ERGO(uintx, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment));
|
FLAG_SET_ERGO(uintx, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment));
|
||||||
}
|
}
|
||||||
|
@ -361,7 +376,9 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
|
||||||
GenCollectorPolicy::initialize_flags();
|
GenCollectorPolicy::initialize_flags();
|
||||||
|
|
||||||
if (!is_size_aligned(OldSize, _gen_alignment)) {
|
if (!is_size_aligned(OldSize, _gen_alignment)) {
|
||||||
FLAG_SET_ERGO(uintx, OldSize, align_size_down(OldSize, _gen_alignment));
|
// Setting OldSize directly to preserve information about the possible
|
||||||
|
// setting of OldSize on the command line.
|
||||||
|
OldSize = align_size_down(OldSize, _gen_alignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) {
|
if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) {
|
||||||
|
@ -400,6 +417,20 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Update NewSize, if possible, to avoid sizing gen0 to small when only
|
||||||
|
// OldSize is set on the command line.
|
||||||
|
if (FLAG_IS_CMDLINE(OldSize) && !FLAG_IS_CMDLINE(NewSize)) {
|
||||||
|
if (OldSize < _initial_heap_byte_size) {
|
||||||
|
size_t new_size = _initial_heap_byte_size - OldSize;
|
||||||
|
// Need to compare against the flag value for max since _max_gen0_size
|
||||||
|
// might not have been set yet.
|
||||||
|
if (new_size >= _min_gen0_size && new_size <= MaxNewSize) {
|
||||||
|
FLAG_SET_ERGO(uintx, NewSize, new_size);
|
||||||
|
_initial_gen0_size = NewSize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
always_do_update_barrier = UseConcMarkSweepGC;
|
always_do_update_barrier = UseConcMarkSweepGC;
|
||||||
|
|
||||||
DEBUG_ONLY(TwoGenerationCollectorPolicy::assert_flags();)
|
DEBUG_ONLY(TwoGenerationCollectorPolicy::assert_flags();)
|
||||||
|
@ -441,57 +472,37 @@ void GenCollectorPolicy::initialize_size_info() {
|
||||||
// Given the maximum gen0 size, determine the initial and
|
// Given the maximum gen0 size, determine the initial and
|
||||||
// minimum gen0 sizes.
|
// minimum gen0 sizes.
|
||||||
|
|
||||||
|
if (_max_heap_byte_size == _initial_heap_byte_size) {
|
||||||
|
// The maxium and initial heap sizes are the same so the generation's
|
||||||
|
// initial size must be the same as it maximum size. Use NewSize as the
|
||||||
|
// size if set on command line.
|
||||||
|
size_t fixed_young_size = FLAG_IS_CMDLINE(NewSize) ? NewSize : max_new_size;
|
||||||
|
|
||||||
|
_initial_gen0_size = fixed_young_size;
|
||||||
|
_max_gen0_size = fixed_young_size;
|
||||||
|
|
||||||
|
// Also update the minimum size if min == initial == max.
|
||||||
if (_max_heap_byte_size == _min_heap_byte_size) {
|
if (_max_heap_byte_size == _min_heap_byte_size) {
|
||||||
// The maximum and minimum heap sizes are the same so the generations
|
_min_gen0_size = fixed_young_size;
|
||||||
// minimum and initial must be the same as its maximum.
|
}
|
||||||
_min_gen0_size = max_new_size;
|
|
||||||
_initial_gen0_size = max_new_size;
|
|
||||||
_max_gen0_size = max_new_size;
|
|
||||||
} else {
|
} else {
|
||||||
size_t desired_new_size = 0;
|
size_t desired_new_size = 0;
|
||||||
if (FLAG_IS_CMDLINE(NewSize)) {
|
if (FLAG_IS_CMDLINE(NewSize)) {
|
||||||
// If NewSize is set on the command line, we must use it as
|
// If NewSize is set on the command line, we should use it as
|
||||||
// the initial size and it also makes sense to use it as the
|
// the initial size, but make sure it is within the heap bounds.
|
||||||
// lower limit.
|
|
||||||
_min_gen0_size = NewSize;
|
|
||||||
desired_new_size = NewSize;
|
|
||||||
max_new_size = MAX2(max_new_size, NewSize);
|
|
||||||
} else if (FLAG_IS_ERGO(NewSize)) {
|
|
||||||
// If NewSize is set ergonomically, we should use it as a lower
|
|
||||||
// limit, but use NewRatio to calculate the initial size.
|
|
||||||
_min_gen0_size = NewSize;
|
|
||||||
desired_new_size =
|
desired_new_size =
|
||||||
MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
|
MIN2(max_new_size, bound_minus_alignment(NewSize, _initial_heap_byte_size));
|
||||||
max_new_size = MAX2(max_new_size, NewSize);
|
_min_gen0_size = bound_minus_alignment(desired_new_size, _min_heap_byte_size);
|
||||||
} else {
|
} else {
|
||||||
// For the case where NewSize is the default, use NewRatio
|
// For the case where NewSize is not set on the command line, use
|
||||||
// to size the minimum and initial generation sizes.
|
// NewRatio to size the initial generation size. Use the current
|
||||||
// Use the default NewSize as the floor for these values. If
|
// NewSize as the floor, because if NewRatio is overly large, the resulting
|
||||||
// NewRatio is overly large, the resulting sizes can be too small.
|
// size can be too small.
|
||||||
_min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize);
|
|
||||||
desired_new_size =
|
desired_new_size =
|
||||||
MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
|
MIN2(max_new_size, MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize));
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(_min_gen0_size > 0, "Sanity check");
|
|
||||||
_initial_gen0_size = desired_new_size;
|
_initial_gen0_size = desired_new_size;
|
||||||
_max_gen0_size = max_new_size;
|
_max_gen0_size = max_new_size;
|
||||||
|
|
||||||
// At this point the desirable initial and minimum sizes have been
|
|
||||||
// determined without regard to the maximum sizes.
|
|
||||||
|
|
||||||
// Bound the sizes by the corresponding overall heap sizes.
|
|
||||||
_min_gen0_size = bound_minus_alignment(_min_gen0_size, _min_heap_byte_size);
|
|
||||||
_initial_gen0_size = bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size);
|
|
||||||
_max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size);
|
|
||||||
|
|
||||||
// At this point all three sizes have been checked against the
|
|
||||||
// maximum sizes but have not been checked for consistency among the three.
|
|
||||||
|
|
||||||
// Final check min <= initial <= max
|
|
||||||
_min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size);
|
|
||||||
_initial_gen0_size = MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size);
|
|
||||||
_min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write back to flags if necessary.
|
// Write back to flags if necessary.
|
||||||
|
@ -512,33 +523,6 @@ void GenCollectorPolicy::initialize_size_info() {
|
||||||
DEBUG_ONLY(GenCollectorPolicy::assert_size_info();)
|
DEBUG_ONLY(GenCollectorPolicy::assert_size_info();)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call this method during the sizing of the gen1 to make
|
|
||||||
// adjustments to gen0 because of gen1 sizing policy. gen0 initially has
|
|
||||||
// the most freedom in sizing because it is done before the
|
|
||||||
// policy for gen1 is applied. Once gen1 policies have been applied,
|
|
||||||
// there may be conflicts in the shape of the heap and this method
|
|
||||||
// is used to make the needed adjustments. The application of the
|
|
||||||
// policies could be more sophisticated (iterative for example) but
|
|
||||||
// keeping it simple also seems a worthwhile goal.
|
|
||||||
bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
|
|
||||||
size_t* gen1_size_ptr,
|
|
||||||
const size_t heap_size) {
|
|
||||||
bool result = false;
|
|
||||||
|
|
||||||
if ((*gen0_size_ptr + *gen1_size_ptr) > heap_size) {
|
|
||||||
uintx smallest_new_size = young_gen_size_lower_bound();
|
|
||||||
if ((heap_size < (*gen0_size_ptr + _min_gen1_size)) &&
|
|
||||||
(heap_size >= _min_gen1_size + smallest_new_size)) {
|
|
||||||
// Adjust gen0 down to accommodate _min_gen1_size
|
|
||||||
*gen0_size_ptr = align_size_down_bounded(heap_size - _min_gen1_size, _gen_alignment);
|
|
||||||
result = true;
|
|
||||||
} else {
|
|
||||||
*gen1_size_ptr = align_size_down_bounded(heap_size - *gen0_size_ptr, _gen_alignment);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Minimum sizes of the generations may be different than
|
// Minimum sizes of the generations may be different than
|
||||||
// the initial sizes. An inconsistency is permitted here
|
// the initial sizes. An inconsistency is permitted here
|
||||||
// in the total size that can be specified explicitly by
|
// in the total size that can be specified explicitly by
|
||||||
|
@ -564,56 +548,63 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
|
||||||
// with the overall heap size). In either case make
|
// with the overall heap size). In either case make
|
||||||
// the minimum, maximum and initial sizes consistent
|
// the minimum, maximum and initial sizes consistent
|
||||||
// with the gen0 sizes and the overall heap sizes.
|
// with the gen0 sizes and the overall heap sizes.
|
||||||
_min_gen1_size = MAX2(_min_heap_byte_size - _min_gen0_size, _gen_alignment);
|
_min_gen1_size = _gen_alignment;
|
||||||
_initial_gen1_size = MAX2(_initial_heap_byte_size - _initial_gen0_size, _gen_alignment);
|
_initial_gen1_size = MIN2(_max_gen1_size, MAX2(_initial_heap_byte_size - _initial_gen0_size, _min_gen1_size));
|
||||||
// _max_gen1_size has already been made consistent above
|
// _max_gen1_size has already been made consistent above
|
||||||
FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size);
|
FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size);
|
||||||
} else {
|
} else {
|
||||||
// OldSize has been explicitly set on the command line. Use the
|
// OldSize has been explicitly set on the command line. Use it
|
||||||
// OldSize and then determine the consequences.
|
// for the initial size but make sure the minimum allow a young
|
||||||
_min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size);
|
// generation to fit as well.
|
||||||
_initial_gen1_size = OldSize;
|
|
||||||
|
|
||||||
// If the user has explicitly set an OldSize that is inconsistent
|
// If the user has explicitly set an OldSize that is inconsistent
|
||||||
// with other command line flags, issue a warning.
|
// with other command line flags, issue a warning.
|
||||||
// The generation minimums and the overall heap minimum should
|
// The generation minimums and the overall heap minimum should
|
||||||
// be within one generation alignment.
|
// be within one generation alignment.
|
||||||
if ((_min_gen1_size + _min_gen0_size + _gen_alignment) < _min_heap_byte_size) {
|
|
||||||
warning("Inconsistency between minimum heap size and minimum "
|
|
||||||
"generation sizes: using minimum heap = " SIZE_FORMAT,
|
|
||||||
_min_heap_byte_size);
|
|
||||||
}
|
|
||||||
if (OldSize > _max_gen1_size) {
|
if (OldSize > _max_gen1_size) {
|
||||||
warning("Inconsistency between maximum heap size and maximum "
|
warning("Inconsistency between maximum heap size and maximum "
|
||||||
"generation sizes: using maximum heap = " SIZE_FORMAT
|
"generation sizes: using maximum heap = " SIZE_FORMAT
|
||||||
" -XX:OldSize flag is being ignored",
|
" -XX:OldSize flag is being ignored",
|
||||||
_max_heap_byte_size);
|
_max_heap_byte_size);
|
||||||
|
FLAG_SET_ERGO(uintx, OldSize, _max_gen1_size);
|
||||||
}
|
}
|
||||||
// If there is an inconsistency between the OldSize and the minimum and/or
|
|
||||||
// initial size of gen0, since OldSize was explicitly set, OldSize wins.
|
_min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size);
|
||||||
if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, _min_heap_byte_size)) {
|
_initial_gen1_size = OldSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The initial generation sizes should match the initial heap size,
|
||||||
|
// if not issue a warning and resize the generations. This behavior
|
||||||
|
// differs from JDK8 where the generation sizes have higher priority
|
||||||
|
// than the initial heap size.
|
||||||
|
if ((_initial_gen1_size + _initial_gen0_size) != _initial_heap_byte_size) {
|
||||||
|
warning("Inconsistency between generation sizes and heap size, resizing "
|
||||||
|
"the generations to fit the heap.");
|
||||||
|
|
||||||
|
size_t desired_gen0_size = _initial_heap_byte_size - _initial_gen1_size;
|
||||||
|
if (_initial_heap_byte_size < _initial_gen1_size) {
|
||||||
|
// Old want all memory, use minimum for young and rest for old
|
||||||
|
_initial_gen0_size = _min_gen0_size;
|
||||||
|
_initial_gen1_size = _initial_heap_byte_size - _min_gen0_size;
|
||||||
|
} else if (desired_gen0_size > _max_gen0_size) {
|
||||||
|
// Need to increase both young and old generation
|
||||||
|
_initial_gen0_size = _max_gen0_size;
|
||||||
|
_initial_gen1_size = _initial_heap_byte_size - _max_gen0_size;
|
||||||
|
} else if (desired_gen0_size < _min_gen0_size) {
|
||||||
|
// Need to decrease both young and old generation
|
||||||
|
_initial_gen0_size = _min_gen0_size;
|
||||||
|
_initial_gen1_size = _initial_heap_byte_size - _min_gen0_size;
|
||||||
|
} else {
|
||||||
|
// The young generation boundaries allow us to only update the
|
||||||
|
// young generation.
|
||||||
|
_initial_gen0_size = desired_gen0_size;
|
||||||
|
}
|
||||||
|
|
||||||
if (PrintGCDetails && Verbose) {
|
if (PrintGCDetails && Verbose) {
|
||||||
gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
|
gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
|
||||||
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
|
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
|
||||||
_min_gen0_size, _initial_gen0_size, _max_gen0_size);
|
_min_gen0_size, _initial_gen0_size, _max_gen0_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// The same as above for the old gen initial size.
|
|
||||||
if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
|
|
||||||
_initial_heap_byte_size)) {
|
|
||||||
if (PrintGCDetails && Verbose) {
|
|
||||||
gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
|
|
||||||
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
|
|
||||||
_min_gen0_size, _initial_gen0_size, _max_gen0_size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size);
|
|
||||||
|
|
||||||
// Make sure that min gen1 <= initial gen1 <= max gen1.
|
|
||||||
_initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size);
|
|
||||||
_initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size);
|
|
||||||
|
|
||||||
// Write back to flags if necessary
|
// Write back to flags if necessary
|
||||||
if (NewSize != _initial_gen0_size) {
|
if (NewSize != _initial_gen0_size) {
|
||||||
|
@ -994,56 +985,88 @@ void MarkSweepPolicy::initialize_gc_policy_counters() {
|
||||||
// verify that there are some basic rules for NewSize honored by the policies.
|
// verify that there are some basic rules for NewSize honored by the policies.
|
||||||
class TestGenCollectorPolicy {
|
class TestGenCollectorPolicy {
|
||||||
public:
|
public:
|
||||||
static void test() {
|
static void test_new_size() {
|
||||||
size_t flag_value;
|
size_t flag_value;
|
||||||
|
|
||||||
save_flags();
|
save_flags();
|
||||||
|
|
||||||
// Set some limits that makes the math simple.
|
|
||||||
FLAG_SET_ERGO(uintx, MaxHeapSize, 180 * M);
|
|
||||||
FLAG_SET_ERGO(uintx, InitialHeapSize, 120 * M);
|
|
||||||
Arguments::set_min_heap_size(40 * M);
|
|
||||||
|
|
||||||
// If NewSize is set on the command line, it should be used
|
// If NewSize is set on the command line, it should be used
|
||||||
// for both min and initial young size if less than min heap.
|
// for both min and initial young size if less than min heap.
|
||||||
flag_value = 20 * M;
|
flag_value = 20 * M;
|
||||||
|
set_basic_flag_values();
|
||||||
FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
|
FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
|
||||||
verify_min(flag_value);
|
verify_gen0_min(flag_value);
|
||||||
verify_initial(flag_value);
|
|
||||||
|
set_basic_flag_values();
|
||||||
|
FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
|
||||||
|
verify_gen0_initial(flag_value);
|
||||||
|
|
||||||
// If NewSize is set on command line, but is larger than the min
|
// If NewSize is set on command line, but is larger than the min
|
||||||
// heap size, it should only be used for initial young size.
|
// heap size, it should only be used for initial young size.
|
||||||
flag_value = 80 * M;
|
flag_value = 80 * M;
|
||||||
|
set_basic_flag_values();
|
||||||
FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
|
FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
|
||||||
verify_initial(flag_value);
|
verify_gen0_initial(flag_value);
|
||||||
|
|
||||||
// If NewSize has been ergonomically set, the collector policy
|
// If NewSize has been ergonomically set, the collector policy
|
||||||
// should use it for min but calculate the initial young size
|
// should use it for min but calculate the initial young size
|
||||||
// using NewRatio.
|
// using NewRatio.
|
||||||
flag_value = 20 * M;
|
flag_value = 20 * M;
|
||||||
|
set_basic_flag_values();
|
||||||
FLAG_SET_ERGO(uintx, NewSize, flag_value);
|
FLAG_SET_ERGO(uintx, NewSize, flag_value);
|
||||||
verify_min(flag_value);
|
verify_gen0_min(flag_value);
|
||||||
verify_scaled_initial(InitialHeapSize);
|
|
||||||
|
set_basic_flag_values();
|
||||||
|
FLAG_SET_ERGO(uintx, NewSize, flag_value);
|
||||||
|
verify_scaled_gen0_initial(InitialHeapSize);
|
||||||
|
|
||||||
restore_flags();
|
restore_flags();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_old_size() {
|
||||||
|
size_t flag_value;
|
||||||
|
|
||||||
|
save_flags();
|
||||||
|
|
||||||
|
// If OldSize is set on the command line, it should be used
|
||||||
|
// for both min and initial old size if less than min heap.
|
||||||
|
flag_value = 20 * M;
|
||||||
|
set_basic_flag_values();
|
||||||
|
FLAG_SET_CMDLINE(uintx, OldSize, flag_value);
|
||||||
|
verify_gen1_min(flag_value);
|
||||||
|
|
||||||
|
set_basic_flag_values();
|
||||||
|
FLAG_SET_CMDLINE(uintx, OldSize, flag_value);
|
||||||
|
verify_gen1_initial(flag_value);
|
||||||
|
|
||||||
|
// If MaxNewSize is large, the maximum OldSize will be less than
|
||||||
|
// what's requested on the command line and it should be reset
|
||||||
|
// ergonomically.
|
||||||
|
flag_value = 30 * M;
|
||||||
|
set_basic_flag_values();
|
||||||
|
FLAG_SET_CMDLINE(uintx, OldSize, flag_value);
|
||||||
|
FLAG_SET_CMDLINE(uintx, MaxNewSize, 170*M);
|
||||||
|
// Calculate what we expect the flag to be.
|
||||||
|
flag_value = MaxHeapSize - MaxNewSize;
|
||||||
|
verify_gen1_initial(flag_value);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void verify_min(size_t expected) {
|
static void verify_gen0_min(size_t expected) {
|
||||||
MarkSweepPolicy msp;
|
MarkSweepPolicy msp;
|
||||||
msp.initialize_all();
|
msp.initialize_all();
|
||||||
|
|
||||||
assert(msp.min_gen0_size() <= expected, err_msg("%zu > %zu", msp.min_gen0_size(), expected));
|
assert(msp.min_gen0_size() <= expected, err_msg("%zu > %zu", msp.min_gen0_size(), expected));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void verify_initial(size_t expected) {
|
static void verify_gen0_initial(size_t expected) {
|
||||||
MarkSweepPolicy msp;
|
MarkSweepPolicy msp;
|
||||||
msp.initialize_all();
|
msp.initialize_all();
|
||||||
|
|
||||||
assert(msp.initial_gen0_size() == expected, err_msg("%zu != %zu", msp.initial_gen0_size(), expected));
|
assert(msp.initial_gen0_size() == expected, err_msg("%zu != %zu", msp.initial_gen0_size(), expected));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void verify_scaled_initial(size_t initial_heap_size) {
|
static void verify_scaled_gen0_initial(size_t initial_heap_size) {
|
||||||
MarkSweepPolicy msp;
|
MarkSweepPolicy msp;
|
||||||
msp.initialize_all();
|
msp.initialize_all();
|
||||||
|
|
||||||
|
@ -1053,6 +1076,21 @@ public:
|
||||||
err_msg("NewSize should have been set ergonomically to %zu, but was %zu", expected, NewSize));
|
err_msg("NewSize should have been set ergonomically to %zu, but was %zu", expected, NewSize));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void verify_gen1_min(size_t expected) {
|
||||||
|
MarkSweepPolicy msp;
|
||||||
|
msp.initialize_all();
|
||||||
|
|
||||||
|
assert(msp.min_gen1_size() <= expected, err_msg("%zu > %zu", msp.min_gen1_size(), expected));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void verify_gen1_initial(size_t expected) {
|
||||||
|
MarkSweepPolicy msp;
|
||||||
|
msp.initialize_all();
|
||||||
|
|
||||||
|
assert(msp.initial_gen1_size() == expected, err_msg("%zu != %zu", msp.initial_gen1_size(), expected));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static size_t original_InitialHeapSize;
|
static size_t original_InitialHeapSize;
|
||||||
static size_t original_MaxHeapSize;
|
static size_t original_MaxHeapSize;
|
||||||
|
@ -1061,6 +1099,15 @@ private:
|
||||||
static size_t original_NewSize;
|
static size_t original_NewSize;
|
||||||
static size_t original_OldSize;
|
static size_t original_OldSize;
|
||||||
|
|
||||||
|
static void set_basic_flag_values() {
|
||||||
|
FLAG_SET_ERGO(uintx, MaxHeapSize, 180 * M);
|
||||||
|
FLAG_SET_ERGO(uintx, InitialHeapSize, 100 * M);
|
||||||
|
FLAG_SET_ERGO(uintx, OldSize, 4 * M);
|
||||||
|
FLAG_SET_ERGO(uintx, NewSize, 1 * M);
|
||||||
|
FLAG_SET_ERGO(uintx, MaxNewSize, 80 * M);
|
||||||
|
Arguments::set_min_heap_size(40 * M);
|
||||||
|
}
|
||||||
|
|
||||||
static void save_flags() {
|
static void save_flags() {
|
||||||
original_InitialHeapSize = InitialHeapSize;
|
original_InitialHeapSize = InitialHeapSize;
|
||||||
original_MaxHeapSize = MaxHeapSize;
|
original_MaxHeapSize = MaxHeapSize;
|
||||||
|
@ -1088,7 +1135,11 @@ size_t TestGenCollectorPolicy::original_NewSize = 0;
|
||||||
size_t TestGenCollectorPolicy::original_OldSize = 0;
|
size_t TestGenCollectorPolicy::original_OldSize = 0;
|
||||||
|
|
||||||
void TestNewSize_test() {
|
void TestNewSize_test() {
|
||||||
TestGenCollectorPolicy::test();
|
TestGenCollectorPolicy::test_new_size();
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestOldSize_test() {
|
||||||
|
TestGenCollectorPolicy::test_old_size();
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -335,10 +335,6 @@ class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
|
||||||
virtual CollectorPolicy::Name kind() {
|
virtual CollectorPolicy::Name kind() {
|
||||||
return CollectorPolicy::TwoGenerationCollectorPolicyKind;
|
return CollectorPolicy::TwoGenerationCollectorPolicyKind;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns true if gen0 sizes were adjusted
|
|
||||||
bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr,
|
|
||||||
const size_t heap_size);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class MarkSweepPolicy : public TwoGenerationCollectorPolicy {
|
class MarkSweepPolicy : public TwoGenerationCollectorPolicy {
|
||||||
|
|
|
@ -374,7 +374,7 @@ void GenCollectedHeap::do_collection(bool full,
|
||||||
|
|
||||||
ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
|
ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
|
||||||
|
|
||||||
const size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
|
const size_t metadata_prev_used = MetaspaceAux::used_bytes();
|
||||||
|
|
||||||
print_heap_before_gc();
|
print_heap_before_gc();
|
||||||
|
|
||||||
|
|
|
@ -1447,7 +1447,7 @@ void MetaspaceGC::compute_new_size() {
|
||||||
uint current_shrink_factor = _shrink_factor;
|
uint current_shrink_factor = _shrink_factor;
|
||||||
_shrink_factor = 0;
|
_shrink_factor = 0;
|
||||||
|
|
||||||
const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
|
const size_t used_after_gc = MetaspaceAux::capacity_bytes();
|
||||||
const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
|
const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
|
||||||
|
|
||||||
const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
|
const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
|
||||||
|
@ -2538,8 +2538,8 @@ void SpaceManager::mangle_freed_chunks() {
|
||||||
// MetaspaceAux
|
// MetaspaceAux
|
||||||
|
|
||||||
|
|
||||||
size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
|
size_t MetaspaceAux::_capacity_words[] = {0, 0};
|
||||||
size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
|
size_t MetaspaceAux::_used_words[] = {0, 0};
|
||||||
|
|
||||||
size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
|
size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
|
||||||
VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
|
VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
|
||||||
|
@ -2552,38 +2552,38 @@ size_t MetaspaceAux::free_bytes() {
|
||||||
|
|
||||||
void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
|
void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
|
||||||
assert_lock_strong(SpaceManager::expand_lock());
|
assert_lock_strong(SpaceManager::expand_lock());
|
||||||
assert(words <= allocated_capacity_words(mdtype),
|
assert(words <= capacity_words(mdtype),
|
||||||
err_msg("About to decrement below 0: words " SIZE_FORMAT
|
err_msg("About to decrement below 0: words " SIZE_FORMAT
|
||||||
" is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
|
" is greater than _capacity_words[%u] " SIZE_FORMAT,
|
||||||
words, mdtype, allocated_capacity_words(mdtype)));
|
words, mdtype, capacity_words(mdtype)));
|
||||||
_allocated_capacity_words[mdtype] -= words;
|
_capacity_words[mdtype] -= words;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
|
void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
|
||||||
assert_lock_strong(SpaceManager::expand_lock());
|
assert_lock_strong(SpaceManager::expand_lock());
|
||||||
// Needs to be atomic
|
// Needs to be atomic
|
||||||
_allocated_capacity_words[mdtype] += words;
|
_capacity_words[mdtype] += words;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
|
void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
|
||||||
assert(words <= allocated_used_words(mdtype),
|
assert(words <= used_words(mdtype),
|
||||||
err_msg("About to decrement below 0: words " SIZE_FORMAT
|
err_msg("About to decrement below 0: words " SIZE_FORMAT
|
||||||
" is greater than _allocated_used_words[%u] " SIZE_FORMAT,
|
" is greater than _used_words[%u] " SIZE_FORMAT,
|
||||||
words, mdtype, allocated_used_words(mdtype)));
|
words, mdtype, used_words(mdtype)));
|
||||||
// For CMS deallocation of the Metaspaces occurs during the
|
// For CMS deallocation of the Metaspaces occurs during the
|
||||||
// sweep which is a concurrent phase. Protection by the expand_lock()
|
// sweep which is a concurrent phase. Protection by the expand_lock()
|
||||||
// is not enough since allocation is on a per Metaspace basis
|
// is not enough since allocation is on a per Metaspace basis
|
||||||
// and protected by the Metaspace lock.
|
// and protected by the Metaspace lock.
|
||||||
jlong minus_words = (jlong) - (jlong) words;
|
jlong minus_words = (jlong) - (jlong) words;
|
||||||
Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]);
|
Atomic::add_ptr(minus_words, &_used_words[mdtype]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
|
void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
|
||||||
// _allocated_used_words tracks allocations for
|
// _used_words tracks allocations for
|
||||||
// each piece of metadata. Those allocations are
|
// each piece of metadata. Those allocations are
|
||||||
// generally done concurrently by different application
|
// generally done concurrently by different application
|
||||||
// threads so must be done atomically.
|
// threads so must be done atomically.
|
||||||
Atomic::add_ptr(words, &_allocated_used_words[mdtype]);
|
Atomic::add_ptr(words, &_used_words[mdtype]);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
|
size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
|
||||||
|
@ -2630,16 +2630,16 @@ size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
|
||||||
|
|
||||||
size_t MetaspaceAux::capacity_bytes_slow() {
|
size_t MetaspaceAux::capacity_bytes_slow() {
|
||||||
#ifdef PRODUCT
|
#ifdef PRODUCT
|
||||||
// Use allocated_capacity_bytes() in PRODUCT instead of this function.
|
// Use capacity_bytes() in PRODUCT instead of this function.
|
||||||
guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
|
guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
|
||||||
#endif
|
#endif
|
||||||
size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
|
size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
|
||||||
size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
|
size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
|
||||||
assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
|
assert(capacity_bytes() == class_capacity + non_class_capacity,
|
||||||
err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
|
err_msg("bad accounting: capacity_bytes() " SIZE_FORMAT
|
||||||
" class_capacity + non_class_capacity " SIZE_FORMAT
|
" class_capacity + non_class_capacity " SIZE_FORMAT
|
||||||
" class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
|
" class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
|
||||||
allocated_capacity_bytes(), class_capacity + non_class_capacity,
|
capacity_bytes(), class_capacity + non_class_capacity,
|
||||||
class_capacity, non_class_capacity));
|
class_capacity, non_class_capacity));
|
||||||
|
|
||||||
return class_capacity + non_class_capacity;
|
return class_capacity + non_class_capacity;
|
||||||
|
@ -2699,14 +2699,14 @@ void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
|
||||||
"->" SIZE_FORMAT
|
"->" SIZE_FORMAT
|
||||||
"(" SIZE_FORMAT ")",
|
"(" SIZE_FORMAT ")",
|
||||||
prev_metadata_used,
|
prev_metadata_used,
|
||||||
allocated_used_bytes(),
|
used_bytes(),
|
||||||
reserved_bytes());
|
reserved_bytes());
|
||||||
} else {
|
} else {
|
||||||
gclog_or_tty->print(" " SIZE_FORMAT "K"
|
gclog_or_tty->print(" " SIZE_FORMAT "K"
|
||||||
"->" SIZE_FORMAT "K"
|
"->" SIZE_FORMAT "K"
|
||||||
"(" SIZE_FORMAT "K)",
|
"(" SIZE_FORMAT "K)",
|
||||||
prev_metadata_used/K,
|
prev_metadata_used/K,
|
||||||
allocated_used_bytes()/K,
|
used_bytes()/K,
|
||||||
reserved_bytes()/K);
|
reserved_bytes()/K);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2722,8 +2722,8 @@ void MetaspaceAux::print_on(outputStream* out) {
|
||||||
"capacity " SIZE_FORMAT "K, "
|
"capacity " SIZE_FORMAT "K, "
|
||||||
"committed " SIZE_FORMAT "K, "
|
"committed " SIZE_FORMAT "K, "
|
||||||
"reserved " SIZE_FORMAT "K",
|
"reserved " SIZE_FORMAT "K",
|
||||||
allocated_used_bytes()/K,
|
used_bytes()/K,
|
||||||
allocated_capacity_bytes()/K,
|
capacity_bytes()/K,
|
||||||
committed_bytes()/K,
|
committed_bytes()/K,
|
||||||
reserved_bytes()/K);
|
reserved_bytes()/K);
|
||||||
|
|
||||||
|
@ -2734,8 +2734,8 @@ void MetaspaceAux::print_on(outputStream* out) {
|
||||||
"capacity " SIZE_FORMAT "K, "
|
"capacity " SIZE_FORMAT "K, "
|
||||||
"committed " SIZE_FORMAT "K, "
|
"committed " SIZE_FORMAT "K, "
|
||||||
"reserved " SIZE_FORMAT "K",
|
"reserved " SIZE_FORMAT "K",
|
||||||
allocated_used_bytes(ct)/K,
|
used_bytes(ct)/K,
|
||||||
allocated_capacity_bytes(ct)/K,
|
capacity_bytes(ct)/K,
|
||||||
committed_bytes(ct)/K,
|
committed_bytes(ct)/K,
|
||||||
reserved_bytes(ct)/K);
|
reserved_bytes(ct)/K);
|
||||||
}
|
}
|
||||||
|
@ -2837,42 +2837,42 @@ void MetaspaceAux::verify_free_chunks() {
|
||||||
|
|
||||||
void MetaspaceAux::verify_capacity() {
|
void MetaspaceAux::verify_capacity() {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
size_t running_sum_capacity_bytes = allocated_capacity_bytes();
|
size_t running_sum_capacity_bytes = capacity_bytes();
|
||||||
// For purposes of the running sum of capacity, verify against capacity
|
// For purposes of the running sum of capacity, verify against capacity
|
||||||
size_t capacity_in_use_bytes = capacity_bytes_slow();
|
size_t capacity_in_use_bytes = capacity_bytes_slow();
|
||||||
assert(running_sum_capacity_bytes == capacity_in_use_bytes,
|
assert(running_sum_capacity_bytes == capacity_in_use_bytes,
|
||||||
err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
|
err_msg("capacity_words() * BytesPerWord " SIZE_FORMAT
|
||||||
" capacity_bytes_slow()" SIZE_FORMAT,
|
" capacity_bytes_slow()" SIZE_FORMAT,
|
||||||
running_sum_capacity_bytes, capacity_in_use_bytes));
|
running_sum_capacity_bytes, capacity_in_use_bytes));
|
||||||
for (Metaspace::MetadataType i = Metaspace::ClassType;
|
for (Metaspace::MetadataType i = Metaspace::ClassType;
|
||||||
i < Metaspace:: MetadataTypeCount;
|
i < Metaspace:: MetadataTypeCount;
|
||||||
i = (Metaspace::MetadataType)(i + 1)) {
|
i = (Metaspace::MetadataType)(i + 1)) {
|
||||||
size_t capacity_in_use_bytes = capacity_bytes_slow(i);
|
size_t capacity_in_use_bytes = capacity_bytes_slow(i);
|
||||||
assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
|
assert(capacity_bytes(i) == capacity_in_use_bytes,
|
||||||
err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
|
err_msg("capacity_bytes(%u) " SIZE_FORMAT
|
||||||
" capacity_bytes_slow(%u)" SIZE_FORMAT,
|
" capacity_bytes_slow(%u)" SIZE_FORMAT,
|
||||||
i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
|
i, capacity_bytes(i), i, capacity_in_use_bytes));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void MetaspaceAux::verify_used() {
|
void MetaspaceAux::verify_used() {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
size_t running_sum_used_bytes = allocated_used_bytes();
|
size_t running_sum_used_bytes = used_bytes();
|
||||||
// For purposes of the running sum of used, verify against used
|
// For purposes of the running sum of used, verify against used
|
||||||
size_t used_in_use_bytes = used_bytes_slow();
|
size_t used_in_use_bytes = used_bytes_slow();
|
||||||
assert(allocated_used_bytes() == used_in_use_bytes,
|
assert(used_bytes() == used_in_use_bytes,
|
||||||
err_msg("allocated_used_bytes() " SIZE_FORMAT
|
err_msg("used_bytes() " SIZE_FORMAT
|
||||||
" used_bytes_slow()" SIZE_FORMAT,
|
" used_bytes_slow()" SIZE_FORMAT,
|
||||||
allocated_used_bytes(), used_in_use_bytes));
|
used_bytes(), used_in_use_bytes));
|
||||||
for (Metaspace::MetadataType i = Metaspace::ClassType;
|
for (Metaspace::MetadataType i = Metaspace::ClassType;
|
||||||
i < Metaspace:: MetadataTypeCount;
|
i < Metaspace:: MetadataTypeCount;
|
||||||
i = (Metaspace::MetadataType)(i + 1)) {
|
i = (Metaspace::MetadataType)(i + 1)) {
|
||||||
size_t used_in_use_bytes = used_bytes_slow(i);
|
size_t used_in_use_bytes = used_bytes_slow(i);
|
||||||
assert(allocated_used_bytes(i) == used_in_use_bytes,
|
assert(used_bytes(i) == used_in_use_bytes,
|
||||||
err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
|
err_msg("used_bytes(%u) " SIZE_FORMAT
|
||||||
" used_bytes_slow(%u)" SIZE_FORMAT,
|
" used_bytes_slow(%u)" SIZE_FORMAT,
|
||||||
i, allocated_used_bytes(i), i, used_in_use_bytes));
|
i, used_bytes(i), i, used_in_use_bytes));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
|
@ -280,11 +280,11 @@ class MetaspaceAux : AllStatic {
|
||||||
// allocated to a Metaspace. This is used instead of
|
// allocated to a Metaspace. This is used instead of
|
||||||
// iterating over all the classloaders. One for each
|
// iterating over all the classloaders. One for each
|
||||||
// type of Metadata
|
// type of Metadata
|
||||||
static size_t _allocated_capacity_words[Metaspace:: MetadataTypeCount];
|
static size_t _capacity_words[Metaspace:: MetadataTypeCount];
|
||||||
// Running sum of space in all Metachunks that have
|
// Running sum of space in all Metachunks that
|
||||||
// are being used for metadata. One for each
|
// are being used for metadata. One for each
|
||||||
// type of Metadata.
|
// type of Metadata.
|
||||||
static size_t _allocated_used_words[Metaspace:: MetadataTypeCount];
|
static size_t _used_words[Metaspace:: MetadataTypeCount];
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Decrement and increment _allocated_capacity_words
|
// Decrement and increment _allocated_capacity_words
|
||||||
|
@ -308,32 +308,32 @@ class MetaspaceAux : AllStatic {
|
||||||
static size_t free_chunks_total_bytes();
|
static size_t free_chunks_total_bytes();
|
||||||
static size_t free_chunks_total_bytes(Metaspace::MetadataType mdtype);
|
static size_t free_chunks_total_bytes(Metaspace::MetadataType mdtype);
|
||||||
|
|
||||||
static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) {
|
static size_t capacity_words(Metaspace::MetadataType mdtype) {
|
||||||
return _allocated_capacity_words[mdtype];
|
return _capacity_words[mdtype];
|
||||||
}
|
}
|
||||||
static size_t allocated_capacity_words() {
|
static size_t capacity_words() {
|
||||||
return allocated_capacity_words(Metaspace::NonClassType) +
|
return capacity_words(Metaspace::NonClassType) +
|
||||||
allocated_capacity_words(Metaspace::ClassType);
|
capacity_words(Metaspace::ClassType);
|
||||||
}
|
}
|
||||||
static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
|
static size_t capacity_bytes(Metaspace::MetadataType mdtype) {
|
||||||
return allocated_capacity_words(mdtype) * BytesPerWord;
|
return capacity_words(mdtype) * BytesPerWord;
|
||||||
}
|
}
|
||||||
static size_t allocated_capacity_bytes() {
|
static size_t capacity_bytes() {
|
||||||
return allocated_capacity_words() * BytesPerWord;
|
return capacity_words() * BytesPerWord;
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t allocated_used_words(Metaspace::MetadataType mdtype) {
|
static size_t used_words(Metaspace::MetadataType mdtype) {
|
||||||
return _allocated_used_words[mdtype];
|
return _used_words[mdtype];
|
||||||
}
|
}
|
||||||
static size_t allocated_used_words() {
|
static size_t used_words() {
|
||||||
return allocated_used_words(Metaspace::NonClassType) +
|
return used_words(Metaspace::NonClassType) +
|
||||||
allocated_used_words(Metaspace::ClassType);
|
used_words(Metaspace::ClassType);
|
||||||
}
|
}
|
||||||
static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
|
static size_t used_bytes(Metaspace::MetadataType mdtype) {
|
||||||
return allocated_used_words(mdtype) * BytesPerWord;
|
return used_words(mdtype) * BytesPerWord;
|
||||||
}
|
}
|
||||||
static size_t allocated_used_bytes() {
|
static size_t used_bytes() {
|
||||||
return allocated_used_words() * BytesPerWord;
|
return used_words() * BytesPerWord;
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t free_bytes();
|
static size_t free_bytes();
|
||||||
|
|
|
@ -66,7 +66,7 @@ class MetaspacePerfCounters: public CHeapObj<mtInternal> {
|
||||||
MetaspacePerfCounters* MetaspaceCounters::_perf_counters = NULL;
|
MetaspacePerfCounters* MetaspaceCounters::_perf_counters = NULL;
|
||||||
|
|
||||||
size_t MetaspaceCounters::used() {
|
size_t MetaspaceCounters::used() {
|
||||||
return MetaspaceAux::allocated_used_bytes();
|
return MetaspaceAux::used_bytes();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t MetaspaceCounters::capacity() {
|
size_t MetaspaceCounters::capacity() {
|
||||||
|
@ -98,7 +98,7 @@ void MetaspaceCounters::update_performance_counters() {
|
||||||
MetaspacePerfCounters* CompressedClassSpaceCounters::_perf_counters = NULL;
|
MetaspacePerfCounters* CompressedClassSpaceCounters::_perf_counters = NULL;
|
||||||
|
|
||||||
size_t CompressedClassSpaceCounters::used() {
|
size_t CompressedClassSpaceCounters::used() {
|
||||||
return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
|
return MetaspaceAux::used_bytes(Metaspace::ClassType);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t CompressedClassSpaceCounters::capacity() {
|
size_t CompressedClassSpaceCounters::capacity() {
|
||||||
|
|
|
@ -144,6 +144,10 @@ void ConstantPool::initialize_resolved_references(ClassLoaderData* loader_data,
|
||||||
// CDS support. Create a new resolved_references array.
|
// CDS support. Create a new resolved_references array.
|
||||||
void ConstantPool::restore_unshareable_info(TRAPS) {
|
void ConstantPool::restore_unshareable_info(TRAPS) {
|
||||||
|
|
||||||
|
// Only create the new resolved references array and lock if it hasn't been
|
||||||
|
// attempted before
|
||||||
|
if (resolved_references() != NULL) return;
|
||||||
|
|
||||||
// restore the C++ vtable from the shared archive
|
// restore the C++ vtable from the shared archive
|
||||||
restore_vtable();
|
restore_vtable();
|
||||||
|
|
||||||
|
|
|
@ -1289,17 +1289,18 @@ void InstanceKlass::do_local_static_fields(FieldClosure* cl) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void InstanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) {
|
void InstanceKlass::do_local_static_fields(void f(fieldDescriptor*, Handle, TRAPS), Handle mirror, TRAPS) {
|
||||||
instanceKlassHandle h_this(THREAD, this);
|
instanceKlassHandle h_this(THREAD, this);
|
||||||
do_local_static_fields_impl(h_this, f, CHECK);
|
do_local_static_fields_impl(h_this, f, mirror, CHECK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void InstanceKlass::do_local_static_fields_impl(instanceKlassHandle this_k, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
|
void InstanceKlass::do_local_static_fields_impl(instanceKlassHandle this_k,
|
||||||
|
void f(fieldDescriptor* fd, Handle, TRAPS), Handle mirror, TRAPS) {
|
||||||
for (JavaFieldStream fs(this_k()); !fs.done(); fs.next()) {
|
for (JavaFieldStream fs(this_k()); !fs.done(); fs.next()) {
|
||||||
if (fs.access_flags().is_static()) {
|
if (fs.access_flags().is_static()) {
|
||||||
fieldDescriptor& fd = fs.field_descriptor();
|
fieldDescriptor& fd = fs.field_descriptor();
|
||||||
f(&fd, CHECK);
|
f(&fd, mirror, CHECK);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2240,9 +2241,7 @@ void InstanceKlass::restore_unshareable_info(TRAPS) {
|
||||||
int num_methods = methods->length();
|
int num_methods = methods->length();
|
||||||
for (int index2 = 0; index2 < num_methods; ++index2) {
|
for (int index2 = 0; index2 < num_methods; ++index2) {
|
||||||
methodHandle m(THREAD, methods->at(index2));
|
methodHandle m(THREAD, methods->at(index2));
|
||||||
m()->link_method(m, CHECK);
|
m->restore_unshareable_info(CHECK);
|
||||||
// restore method's vtable by calling a virtual function
|
|
||||||
m->restore_vtable();
|
|
||||||
}
|
}
|
||||||
if (JvmtiExport::has_redefined_a_class()) {
|
if (JvmtiExport::has_redefined_a_class()) {
|
||||||
// Reinitialize vtable because RedefineClasses may have changed some
|
// Reinitialize vtable because RedefineClasses may have changed some
|
||||||
|
@ -3409,6 +3408,10 @@ static void purge_previous_versions_internal(InstanceKlass* ik, int emcp_method_
|
||||||
("purge: %s(%s): prev method @%d in version @%d is alive",
|
("purge: %s(%s): prev method @%d in version @%d is alive",
|
||||||
method->name()->as_C_string(),
|
method->name()->as_C_string(),
|
||||||
method->signature()->as_C_string(), j, i));
|
method->signature()->as_C_string(), j, i));
|
||||||
|
if (method->method_data() != NULL) {
|
||||||
|
// Clean out any weak method links
|
||||||
|
method->method_data()->clean_weak_method_links();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3418,6 +3421,14 @@ static void purge_previous_versions_internal(InstanceKlass* ik, int emcp_method_
|
||||||
("purge: previous version stats: live=%d, deleted=%d", live_count,
|
("purge: previous version stats: live=%d, deleted=%d", live_count,
|
||||||
deleted_count));
|
deleted_count));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Array<Method*>* methods = ik->methods();
|
||||||
|
int num_methods = methods->length();
|
||||||
|
for (int index2 = 0; index2 < num_methods; ++index2) {
|
||||||
|
if (methods->at(index2)->method_data() != NULL) {
|
||||||
|
methods->at(index2)->method_data()->clean_weak_method_links();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// External interface for use during class unloading.
|
// External interface for use during class unloading.
|
||||||
|
|
|
@ -802,7 +802,7 @@ class InstanceKlass: public Klass {
|
||||||
// Iterators
|
// Iterators
|
||||||
void do_local_static_fields(FieldClosure* cl);
|
void do_local_static_fields(FieldClosure* cl);
|
||||||
void do_nonstatic_fields(FieldClosure* cl); // including inherited fields
|
void do_nonstatic_fields(FieldClosure* cl); // including inherited fields
|
||||||
void do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS);
|
void do_local_static_fields(void f(fieldDescriptor*, Handle, TRAPS), Handle, TRAPS);
|
||||||
|
|
||||||
void methods_do(void f(Method* method));
|
void methods_do(void f(Method* method));
|
||||||
void array_klasses_do(void f(Klass* k));
|
void array_klasses_do(void f(Klass* k));
|
||||||
|
@ -1010,7 +1010,7 @@ private:
|
||||||
static void set_initialization_state_and_notify_impl (instanceKlassHandle this_k, ClassState state, TRAPS);
|
static void set_initialization_state_and_notify_impl (instanceKlassHandle this_k, ClassState state, TRAPS);
|
||||||
static void call_class_initializer_impl (instanceKlassHandle this_k, TRAPS);
|
static void call_class_initializer_impl (instanceKlassHandle this_k, TRAPS);
|
||||||
static Klass* array_klass_impl (instanceKlassHandle this_k, bool or_null, int n, TRAPS);
|
static Klass* array_klass_impl (instanceKlassHandle this_k, bool or_null, int n, TRAPS);
|
||||||
static void do_local_static_fields_impl (instanceKlassHandle this_k, void f(fieldDescriptor* fd, TRAPS), TRAPS);
|
static void do_local_static_fields_impl (instanceKlassHandle this_k, void f(fieldDescriptor* fd, Handle, TRAPS), Handle, TRAPS);
|
||||||
/* jni_id_for_impl for jfieldID only */
|
/* jni_id_for_impl for jfieldID only */
|
||||||
static JNIid* jni_id_for_impl (instanceKlassHandle this_k, int offset);
|
static JNIid* jni_id_for_impl (instanceKlassHandle this_k, int offset);
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -367,7 +367,12 @@ instanceOop InstanceMirrorKlass::allocate_instance(KlassHandle k, TRAPS) {
|
||||||
// Query before forming handle.
|
// Query before forming handle.
|
||||||
int size = instance_size(k);
|
int size = instance_size(k);
|
||||||
KlassHandle h_k(THREAD, this);
|
KlassHandle h_k(THREAD, this);
|
||||||
instanceOop i = (instanceOop) CollectedHeap::Class_obj_allocate(h_k, size, k, CHECK_NULL);
|
instanceOop i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
|
||||||
|
|
||||||
|
// Since mirrors can be variable sized because of the static fields, store
|
||||||
|
// the size in the mirror itself.
|
||||||
|
java_lang_Class::set_oop_size(i, size);
|
||||||
|
|
||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -475,12 +475,8 @@ void Klass::oops_do(OopClosure* cl) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Klass::remove_unshareable_info() {
|
void Klass::remove_unshareable_info() {
|
||||||
if (!DumpSharedSpaces) {
|
assert (DumpSharedSpaces, "only called for DumpSharedSpaces");
|
||||||
// Clean up after OOM during class loading
|
|
||||||
if (class_loader_data() != NULL) {
|
|
||||||
class_loader_data()->remove_class(this);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
set_subklass(NULL);
|
set_subklass(NULL);
|
||||||
set_next_sibling(NULL);
|
set_next_sibling(NULL);
|
||||||
// Clear the java mirror
|
// Clear the java mirror
|
||||||
|
@ -492,6 +488,10 @@ void Klass::remove_unshareable_info() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Klass::restore_unshareable_info(TRAPS) {
|
void Klass::restore_unshareable_info(TRAPS) {
|
||||||
|
// If an exception happened during CDS restore, some of these fields may already be
|
||||||
|
// set. We leave the class on the CLD list, even if incomplete so that we don't
|
||||||
|
// modify the CLD list outside a safepoint.
|
||||||
|
if (class_loader_data() == NULL) {
|
||||||
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
|
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
|
||||||
// Restore class_loader_data to the null class loader data
|
// Restore class_loader_data to the null class loader data
|
||||||
set_class_loader_data(loader_data);
|
set_class_loader_data(loader_data);
|
||||||
|
@ -499,10 +499,15 @@ void Klass::restore_unshareable_info(TRAPS) {
|
||||||
// Add to null class loader list first before creating the mirror
|
// Add to null class loader list first before creating the mirror
|
||||||
// (same order as class file parsing)
|
// (same order as class file parsing)
|
||||||
loader_data->add_class(this);
|
loader_data->add_class(this);
|
||||||
|
}
|
||||||
|
|
||||||
// Recreate the class mirror. The protection_domain is always null for
|
// Recreate the class mirror. The protection_domain is always null for
|
||||||
// boot loader, for now.
|
// boot loader, for now.
|
||||||
|
// Only recreate it if not present. A previous attempt to restore may have
|
||||||
|
// gotten an OOM later but keep the mirror if it was created.
|
||||||
|
if (java_mirror() == NULL) {
|
||||||
java_lang_Class::create_mirror(this, Handle(NULL), CHECK);
|
java_lang_Class::create_mirror(this, Handle(NULL), CHECK);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Klass* Klass::array_klass_or_null(int rank) {
|
Klass* Klass::array_klass_or_null(int rank) {
|
||||||
|
|
|
@ -903,6 +903,19 @@ address Method::make_adapters(methodHandle mh, TRAPS) {
|
||||||
return adapter->get_c2i_entry();
|
return adapter->get_c2i_entry();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Method::restore_unshareable_info(TRAPS) {
|
||||||
|
// Since restore_unshareable_info can be called more than once for a method, don't
|
||||||
|
// redo any work. If this field is restored, there is nothing to do.
|
||||||
|
if (_from_compiled_entry == NULL) {
|
||||||
|
// restore method's vtable by calling a virtual function
|
||||||
|
restore_vtable();
|
||||||
|
|
||||||
|
methodHandle mh(THREAD, this);
|
||||||
|
link_method(mh, CHECK);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// The verified_code_entry() must be called when a invoke is resolved
|
// The verified_code_entry() must be called when a invoke is resolved
|
||||||
// on this method.
|
// on this method.
|
||||||
|
|
||||||
|
|
|
@ -123,6 +123,8 @@ class Method : public Metadata {
|
||||||
void restore_vtable() { guarantee(is_method(), "vtable restored by this call"); }
|
void restore_vtable() { guarantee(is_method(), "vtable restored by this call"); }
|
||||||
bool is_method() const volatile { return true; }
|
bool is_method() const volatile { return true; }
|
||||||
|
|
||||||
|
void restore_unshareable_info(TRAPS);
|
||||||
|
|
||||||
// accessors for instance variables
|
// accessors for instance variables
|
||||||
|
|
||||||
ConstMethod* constMethod() const { return _constMethod; }
|
ConstMethod* constMethod() const { return _constMethod; }
|
||||||
|
|
|
@ -1531,9 +1531,35 @@ void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove SpeculativeTrapData entries that reference an unloaded
|
class CleanExtraDataClosure : public StackObj {
|
||||||
// method
|
public:
|
||||||
void MethodData::clean_extra_data(BoolObjectClosure* is_alive) {
|
virtual bool is_live(Method* m) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Check for entries that reference an unloaded method
|
||||||
|
class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
|
||||||
|
private:
|
||||||
|
BoolObjectClosure* _is_alive;
|
||||||
|
public:
|
||||||
|
CleanExtraDataKlassClosure(BoolObjectClosure* is_alive) : _is_alive(is_alive) {}
|
||||||
|
bool is_live(Method* m) {
|
||||||
|
return m->method_holder()->is_loader_alive(_is_alive);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Check for entries that reference a redefined method
|
||||||
|
class CleanExtraDataMethodClosure : public CleanExtraDataClosure {
|
||||||
|
public:
|
||||||
|
CleanExtraDataMethodClosure() {}
|
||||||
|
bool is_live(Method* m) {
|
||||||
|
return m->on_stack();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
// Remove SpeculativeTrapData entries that reference an unloaded or
|
||||||
|
// redefined method
|
||||||
|
void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
|
||||||
DataLayout* dp = extra_data_base();
|
DataLayout* dp = extra_data_base();
|
||||||
DataLayout* end = extra_data_limit();
|
DataLayout* end = extra_data_limit();
|
||||||
|
|
||||||
|
@ -1544,7 +1570,7 @@ void MethodData::clean_extra_data(BoolObjectClosure* is_alive) {
|
||||||
SpeculativeTrapData* data = new SpeculativeTrapData(dp);
|
SpeculativeTrapData* data = new SpeculativeTrapData(dp);
|
||||||
Method* m = data->method();
|
Method* m = data->method();
|
||||||
assert(m != NULL, "should have a method");
|
assert(m != NULL, "should have a method");
|
||||||
if (!m->method_holder()->is_loader_alive(is_alive)) {
|
if (!cl->is_live(m)) {
|
||||||
// "shift" accumulates the number of cells for dead
|
// "shift" accumulates the number of cells for dead
|
||||||
// SpeculativeTrapData entries that have been seen so
|
// SpeculativeTrapData entries that have been seen so
|
||||||
// far. Following entries must be shifted left by that many
|
// far. Following entries must be shifted left by that many
|
||||||
|
@ -1575,9 +1601,9 @@ void MethodData::clean_extra_data(BoolObjectClosure* is_alive) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify there's no unloaded method referenced by a
|
// Verify there's no unloaded or redefined method referenced by a
|
||||||
// SpeculativeTrapData entry
|
// SpeculativeTrapData entry
|
||||||
void MethodData::verify_extra_data_clean(BoolObjectClosure* is_alive) {
|
void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
DataLayout* dp = extra_data_base();
|
DataLayout* dp = extra_data_base();
|
||||||
DataLayout* end = extra_data_limit();
|
DataLayout* end = extra_data_limit();
|
||||||
|
@ -1587,7 +1613,7 @@ void MethodData::verify_extra_data_clean(BoolObjectClosure* is_alive) {
|
||||||
case DataLayout::speculative_trap_data_tag: {
|
case DataLayout::speculative_trap_data_tag: {
|
||||||
SpeculativeTrapData* data = new SpeculativeTrapData(dp);
|
SpeculativeTrapData* data = new SpeculativeTrapData(dp);
|
||||||
Method* m = data->method();
|
Method* m = data->method();
|
||||||
assert(m != NULL && m->method_holder()->is_loader_alive(is_alive), "Method should exist");
|
assert(m != NULL && cl->is_live(m), "Method should exist");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case DataLayout::bit_data_tag:
|
case DataLayout::bit_data_tag:
|
||||||
|
@ -1613,6 +1639,19 @@ void MethodData::clean_method_data(BoolObjectClosure* is_alive) {
|
||||||
parameters->clean_weak_klass_links(is_alive);
|
parameters->clean_weak_klass_links(is_alive);
|
||||||
}
|
}
|
||||||
|
|
||||||
clean_extra_data(is_alive);
|
CleanExtraDataKlassClosure cl(is_alive);
|
||||||
verify_extra_data_clean(is_alive);
|
clean_extra_data(&cl);
|
||||||
|
verify_extra_data_clean(&cl);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MethodData::clean_weak_method_links() {
|
||||||
|
for (ProfileData* data = first_data();
|
||||||
|
is_valid(data);
|
||||||
|
data = next_data(data)) {
|
||||||
|
data->clean_weak_method_links();
|
||||||
|
}
|
||||||
|
|
||||||
|
CleanExtraDataMethodClosure cl;
|
||||||
|
clean_extra_data(&cl);
|
||||||
|
verify_extra_data_clean(&cl);
|
||||||
}
|
}
|
||||||
|
|
|
@ -251,6 +251,9 @@ public:
|
||||||
|
|
||||||
// GC support
|
// GC support
|
||||||
void clean_weak_klass_links(BoolObjectClosure* cl);
|
void clean_weak_klass_links(BoolObjectClosure* cl);
|
||||||
|
|
||||||
|
// Redefinition support
|
||||||
|
void clean_weak_method_links();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -506,6 +509,9 @@ public:
|
||||||
// GC support
|
// GC support
|
||||||
virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {}
|
virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {}
|
||||||
|
|
||||||
|
// Redefinition support
|
||||||
|
virtual void clean_weak_method_links() {}
|
||||||
|
|
||||||
// CI translation: ProfileData can represent both MethodDataOop data
|
// CI translation: ProfileData can represent both MethodDataOop data
|
||||||
// as well as CIMethodData data. This function is provided for translating
|
// as well as CIMethodData data. This function is provided for translating
|
||||||
// an oop in a ProfileData to the ci equivalent. Generally speaking,
|
// an oop in a ProfileData to the ci equivalent. Generally speaking,
|
||||||
|
@ -1989,6 +1995,7 @@ public:
|
||||||
//
|
//
|
||||||
|
|
||||||
CC_INTERP_ONLY(class BytecodeInterpreter;)
|
CC_INTERP_ONLY(class BytecodeInterpreter;)
|
||||||
|
class CleanExtraDataClosure;
|
||||||
|
|
||||||
class MethodData : public Metadata {
|
class MethodData : public Metadata {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
|
@ -2146,9 +2153,9 @@ private:
|
||||||
static bool profile_parameters_jsr292_only();
|
static bool profile_parameters_jsr292_only();
|
||||||
static bool profile_all_parameters();
|
static bool profile_all_parameters();
|
||||||
|
|
||||||
void clean_extra_data(BoolObjectClosure* is_alive);
|
void clean_extra_data(CleanExtraDataClosure* cl);
|
||||||
void clean_extra_data_helper(DataLayout* dp, int shift, bool reset = false);
|
void clean_extra_data_helper(DataLayout* dp, int shift, bool reset = false);
|
||||||
void verify_extra_data_clean(BoolObjectClosure* is_alive);
|
void verify_extra_data_clean(CleanExtraDataClosure* cl);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static int header_size() {
|
static int header_size() {
|
||||||
|
@ -2440,6 +2447,8 @@ public:
|
||||||
static bool profile_return_jsr292_only();
|
static bool profile_return_jsr292_only();
|
||||||
|
|
||||||
void clean_method_data(BoolObjectClosure* is_alive);
|
void clean_method_data(BoolObjectClosure* is_alive);
|
||||||
|
|
||||||
|
void clean_weak_method_links();
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_OOPS_METHODDATAOOP_HPP
|
#endif // SHARE_VM_OOPS_METHODDATAOOP_HPP
|
||||||
|
|
|
@ -70,6 +70,7 @@ public:
|
||||||
|
|
||||||
JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
Compile* C = Compile::current();
|
Compile* C = Compile::current();
|
||||||
|
C->print_inlining_update(this);
|
||||||
|
|
||||||
if (is_osr()) {
|
if (is_osr()) {
|
||||||
// The JVMS for a OSR has a single argument (see its TypeFunc).
|
// The JVMS for a OSR has a single argument (see its TypeFunc).
|
||||||
|
@ -126,6 +127,7 @@ class DirectCallGenerator : public CallGenerator {
|
||||||
|
|
||||||
JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
GraphKit kit(jvms);
|
GraphKit kit(jvms);
|
||||||
|
kit.C->print_inlining_update(this);
|
||||||
bool is_static = method()->is_static();
|
bool is_static = method()->is_static();
|
||||||
address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
|
address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
|
||||||
: SharedRuntime::get_resolve_opt_virtual_call_stub();
|
: SharedRuntime::get_resolve_opt_virtual_call_stub();
|
||||||
|
@ -178,6 +180,8 @@ JVMState* VirtualCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
GraphKit kit(jvms);
|
GraphKit kit(jvms);
|
||||||
Node* receiver = kit.argument(0);
|
Node* receiver = kit.argument(0);
|
||||||
|
|
||||||
|
kit.C->print_inlining_update(this);
|
||||||
|
|
||||||
if (kit.C->log() != NULL) {
|
if (kit.C->log() != NULL) {
|
||||||
kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
|
kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
|
||||||
}
|
}
|
||||||
|
@ -262,14 +266,17 @@ CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
|
||||||
|
|
||||||
// Allow inlining decisions to be delayed
|
// Allow inlining decisions to be delayed
|
||||||
class LateInlineCallGenerator : public DirectCallGenerator {
|
class LateInlineCallGenerator : public DirectCallGenerator {
|
||||||
|
private:
|
||||||
|
// unique id for log compilation
|
||||||
|
jlong _unique_id;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
CallGenerator* _inline_cg;
|
CallGenerator* _inline_cg;
|
||||||
|
|
||||||
virtual bool do_late_inline_check(JVMState* jvms) { return true; }
|
virtual bool do_late_inline_check(JVMState* jvms) { return true; }
|
||||||
|
|
||||||
public:
|
public:
|
||||||
LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
|
LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
|
||||||
DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
|
DirectCallGenerator(method, true), _inline_cg(inline_cg), _unique_id(0) {}
|
||||||
|
|
||||||
virtual bool is_late_inline() const { return true; }
|
virtual bool is_late_inline() const { return true; }
|
||||||
|
|
||||||
|
@ -278,7 +285,8 @@ class LateInlineCallGenerator : public DirectCallGenerator {
|
||||||
|
|
||||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
Compile *C = Compile::current();
|
Compile *C = Compile::current();
|
||||||
C->print_inlining_skip(this);
|
|
||||||
|
C->log_inline_id(this);
|
||||||
|
|
||||||
// Record that this call site should be revisited once the main
|
// Record that this call site should be revisited once the main
|
||||||
// parse is finished.
|
// parse is finished.
|
||||||
|
@ -296,10 +304,19 @@ class LateInlineCallGenerator : public DirectCallGenerator {
|
||||||
virtual void print_inlining_late(const char* msg) {
|
virtual void print_inlining_late(const char* msg) {
|
||||||
CallNode* call = call_node();
|
CallNode* call = call_node();
|
||||||
Compile* C = Compile::current();
|
Compile* C = Compile::current();
|
||||||
C->print_inlining_insert(this);
|
C->print_inlining_assert_ready();
|
||||||
C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
|
C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
|
||||||
|
C->print_inlining_move_to(this);
|
||||||
|
C->print_inlining_update_delayed(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual void set_unique_id(jlong id) {
|
||||||
|
_unique_id = id;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual jlong unique_id() const {
|
||||||
|
return _unique_id;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
void LateInlineCallGenerator::do_late_inline() {
|
void LateInlineCallGenerator::do_late_inline() {
|
||||||
|
@ -360,6 +377,12 @@ void LateInlineCallGenerator::do_late_inline() {
|
||||||
map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
|
map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
C->print_inlining_assert_ready();
|
||||||
|
|
||||||
|
C->print_inlining_move_to(this);
|
||||||
|
|
||||||
|
C->log_late_inline(this);
|
||||||
|
|
||||||
// This check is done here because for_method_handle_inline() method
|
// This check is done here because for_method_handle_inline() method
|
||||||
// needs jvms for inlined state.
|
// needs jvms for inlined state.
|
||||||
if (!do_late_inline_check(jvms)) {
|
if (!do_late_inline_check(jvms)) {
|
||||||
|
@ -367,19 +390,6 @@ void LateInlineCallGenerator::do_late_inline() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
C->print_inlining_insert(this);
|
|
||||||
|
|
||||||
CompileLog* log = C->log();
|
|
||||||
if (log != NULL) {
|
|
||||||
log->head("late_inline method='%d'", log->identify(method()));
|
|
||||||
JVMState* p = jvms;
|
|
||||||
while (p != NULL) {
|
|
||||||
log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
|
|
||||||
p = p->caller();
|
|
||||||
}
|
|
||||||
log->tail("late_inline");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setup default node notes to be picked up by the inlining
|
// Setup default node notes to be picked up by the inlining
|
||||||
Node_Notes* old_nn = C->default_node_notes();
|
Node_Notes* old_nn = C->default_node_notes();
|
||||||
if (old_nn != NULL) {
|
if (old_nn != NULL) {
|
||||||
|
@ -388,7 +398,7 @@ void LateInlineCallGenerator::do_late_inline() {
|
||||||
C->set_default_node_notes(entry_nn);
|
C->set_default_node_notes(entry_nn);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now perform the inling using the synthesized JVMState
|
// Now perform the inlining using the synthesized JVMState
|
||||||
JVMState* new_jvms = _inline_cg->generate(jvms, NULL);
|
JVMState* new_jvms = _inline_cg->generate(jvms, NULL);
|
||||||
if (new_jvms == NULL) return; // no change
|
if (new_jvms == NULL) return; // no change
|
||||||
if (C->failing()) return;
|
if (C->failing()) return;
|
||||||
|
@ -431,25 +441,24 @@ class LateInlineMHCallGenerator : public LateInlineCallGenerator {
|
||||||
|
|
||||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser);
|
JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser);
|
||||||
|
|
||||||
|
Compile* C = Compile::current();
|
||||||
if (_input_not_const) {
|
if (_input_not_const) {
|
||||||
// inlining won't be possible so no need to enqueue right now.
|
// inlining won't be possible so no need to enqueue right now.
|
||||||
call_node()->set_generator(this);
|
call_node()->set_generator(this);
|
||||||
} else {
|
} else {
|
||||||
Compile::current()->add_late_inline(this);
|
C->add_late_inline(this);
|
||||||
}
|
}
|
||||||
return new_jvms;
|
return new_jvms;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void print_inlining_late(const char* msg) {
|
|
||||||
if (!_input_not_const) return;
|
|
||||||
LateInlineCallGenerator::print_inlining_late(msg);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
|
bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
|
||||||
|
|
||||||
CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
|
CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
|
||||||
|
|
||||||
|
Compile::current()->print_inlining_update_delayed(this);
|
||||||
|
|
||||||
if (!_input_not_const) {
|
if (!_input_not_const) {
|
||||||
_attempt++;
|
_attempt++;
|
||||||
}
|
}
|
||||||
|
@ -479,7 +488,8 @@ class LateInlineStringCallGenerator : public LateInlineCallGenerator {
|
||||||
|
|
||||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
Compile *C = Compile::current();
|
Compile *C = Compile::current();
|
||||||
C->print_inlining_skip(this);
|
|
||||||
|
C->log_inline_id(this);
|
||||||
|
|
||||||
C->add_string_late_inline(this);
|
C->add_string_late_inline(this);
|
||||||
|
|
||||||
|
@ -502,7 +512,8 @@ class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
|
||||||
|
|
||||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
Compile *C = Compile::current();
|
Compile *C = Compile::current();
|
||||||
C->print_inlining_skip(this);
|
|
||||||
|
C->log_inline_id(this);
|
||||||
|
|
||||||
C->add_boxing_late_inline(this);
|
C->add_boxing_late_inline(this);
|
||||||
|
|
||||||
|
@ -554,6 +565,8 @@ CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
|
||||||
|
|
||||||
JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
Compile* C = Compile::current();
|
Compile* C = Compile::current();
|
||||||
|
C->print_inlining_update(this);
|
||||||
|
|
||||||
if (C->log() != NULL) {
|
if (C->log() != NULL) {
|
||||||
C->log()->elem("warm_call bci='%d'", jvms->bci());
|
C->log()->elem("warm_call bci='%d'", jvms->bci());
|
||||||
}
|
}
|
||||||
|
@ -632,6 +645,7 @@ CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
|
||||||
|
|
||||||
JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
GraphKit kit(jvms);
|
GraphKit kit(jvms);
|
||||||
|
kit.C->print_inlining_update(this);
|
||||||
PhaseGVN& gvn = kit.gvn();
|
PhaseGVN& gvn = kit.gvn();
|
||||||
// We need an explicit receiver null_check before checking its type.
|
// We need an explicit receiver null_check before checking its type.
|
||||||
// We share a map with the caller, so his JVMS gets adjusted.
|
// We share a map with the caller, so his JVMS gets adjusted.
|
||||||
|
@ -779,6 +793,10 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
|
||||||
assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
|
assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
|
||||||
if (cg != NULL && cg->is_inline())
|
if (cg != NULL && cg->is_inline())
|
||||||
return cg;
|
return cg;
|
||||||
|
} else {
|
||||||
|
const char* msg = "receiver not constant";
|
||||||
|
if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
|
||||||
|
C->log_inline_failure(msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -844,11 +862,14 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
|
||||||
// provide us with a type
|
// provide us with a type
|
||||||
speculative_receiver_type = receiver_type->speculative_type();
|
speculative_receiver_type = receiver_type->speculative_type();
|
||||||
}
|
}
|
||||||
|
|
||||||
CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true);
|
CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true);
|
||||||
assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
|
assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
|
||||||
if (cg != NULL && cg->is_inline())
|
if (cg != NULL && cg->is_inline())
|
||||||
return cg;
|
return cg;
|
||||||
|
} else {
|
||||||
|
const char* msg = "member_name not constant";
|
||||||
|
if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
|
||||||
|
C->log_inline_failure(msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -904,6 +925,7 @@ JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_pa
|
||||||
if (kit.failing())
|
if (kit.failing())
|
||||||
return NULL; // might happen because of NodeCountInliningCutoff
|
return NULL; // might happen because of NodeCountInliningCutoff
|
||||||
|
|
||||||
|
kit.C->print_inlining_update(this);
|
||||||
SafePointNode* slow_map = NULL;
|
SafePointNode* slow_map = NULL;
|
||||||
JVMState* slow_jvms;
|
JVMState* slow_jvms;
|
||||||
if (slow_ctl != NULL) {
|
if (slow_ctl != NULL) {
|
||||||
|
@ -1017,6 +1039,7 @@ CallGenerator::for_uncommon_trap(ciMethod* m,
|
||||||
|
|
||||||
JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
GraphKit kit(jvms);
|
GraphKit kit(jvms);
|
||||||
|
kit.C->print_inlining_update(this);
|
||||||
// Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
|
// Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
|
||||||
int nargs = method()->arg_size();
|
int nargs = method()->arg_size();
|
||||||
kit.inc_sp(nargs);
|
kit.inc_sp(nargs);
|
||||||
|
|
|
@ -84,6 +84,9 @@ class CallGenerator : public ResourceObj {
|
||||||
|
|
||||||
virtual CallStaticJavaNode* call_node() const { ShouldNotReachHere(); return NULL; }
|
virtual CallStaticJavaNode* call_node() const { ShouldNotReachHere(); return NULL; }
|
||||||
|
|
||||||
|
virtual void set_unique_id(jlong id) { fatal("unique id only for late inlines"); };
|
||||||
|
virtual jlong unique_id() const { fatal("unique id only for late inlines"); return 0; };
|
||||||
|
|
||||||
// Note: It is possible for a CG to be both inline and virtual.
|
// Note: It is possible for a CG to be both inline and virtual.
|
||||||
// (The hashCode intrinsic does a vtable check and an inlined fast path.)
|
// (The hashCode intrinsic does a vtable check and an inlined fast path.)
|
||||||
|
|
||||||
|
|
|
@ -662,6 +662,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
||||||
_inlining_progress(false),
|
_inlining_progress(false),
|
||||||
_inlining_incrementally(false),
|
_inlining_incrementally(false),
|
||||||
_print_inlining_list(NULL),
|
_print_inlining_list(NULL),
|
||||||
|
_print_inlining_stream(NULL),
|
||||||
_print_inlining_idx(0),
|
_print_inlining_idx(0),
|
||||||
_preserve_jvm_state(0) {
|
_preserve_jvm_state(0) {
|
||||||
C = this;
|
C = this;
|
||||||
|
@ -723,9 +724,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
||||||
PhaseGVN gvn(node_arena(), estimated_size);
|
PhaseGVN gvn(node_arena(), estimated_size);
|
||||||
set_initial_gvn(&gvn);
|
set_initial_gvn(&gvn);
|
||||||
|
|
||||||
if (print_inlining() || print_intrinsics()) {
|
print_inlining_init();
|
||||||
_print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
|
|
||||||
}
|
|
||||||
{ // Scope for timing the parser
|
{ // Scope for timing the parser
|
||||||
TracePhase t3("parse", &_t_parser, true);
|
TracePhase t3("parse", &_t_parser, true);
|
||||||
|
|
||||||
|
@ -967,6 +966,7 @@ Compile::Compile( ciEnv* ci_env,
|
||||||
_inlining_progress(false),
|
_inlining_progress(false),
|
||||||
_inlining_incrementally(false),
|
_inlining_incrementally(false),
|
||||||
_print_inlining_list(NULL),
|
_print_inlining_list(NULL),
|
||||||
|
_print_inlining_stream(NULL),
|
||||||
_print_inlining_idx(0),
|
_print_inlining_idx(0),
|
||||||
_preserve_jvm_state(0),
|
_preserve_jvm_state(0),
|
||||||
_allowed_reasons(0) {
|
_allowed_reasons(0) {
|
||||||
|
@ -2023,6 +2023,8 @@ void Compile::Optimize() {
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
int loop_opts_cnt;
|
int loop_opts_cnt;
|
||||||
|
|
||||||
|
print_inlining_reinit();
|
||||||
|
|
||||||
NOT_PRODUCT( verify_graph_edges(); )
|
NOT_PRODUCT( verify_graph_edges(); )
|
||||||
|
|
||||||
print_method(PHASE_AFTER_PARSING);
|
print_method(PHASE_AFTER_PARSING);
|
||||||
|
@ -3755,36 +3757,163 @@ void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Compile::dump_inlining() {
|
// The message about the current inlining is accumulated in
|
||||||
|
// _print_inlining_stream and transfered into the _print_inlining_list
|
||||||
|
// once we know whether inlining succeeds or not. For regular
|
||||||
|
// inlining, messages are appended to the buffer pointed by
|
||||||
|
// _print_inlining_idx in the _print_inlining_list. For late inlining,
|
||||||
|
// a new buffer is added after _print_inlining_idx in the list. This
|
||||||
|
// way we can update the inlining message for late inlining call site
|
||||||
|
// when the inlining is attempted again.
|
||||||
|
void Compile::print_inlining_init() {
|
||||||
if (print_inlining() || print_intrinsics()) {
|
if (print_inlining() || print_intrinsics()) {
|
||||||
|
_print_inlining_stream = new stringStream();
|
||||||
|
_print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Compile::print_inlining_reinit() {
|
||||||
|
if (print_inlining() || print_intrinsics()) {
|
||||||
|
// Re allocate buffer when we change ResourceMark
|
||||||
|
_print_inlining_stream = new stringStream();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Compile::print_inlining_reset() {
|
||||||
|
_print_inlining_stream->reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Compile::print_inlining_commit() {
|
||||||
|
assert(print_inlining() || print_intrinsics(), "PrintInlining off?");
|
||||||
|
// Transfer the message from _print_inlining_stream to the current
|
||||||
|
// _print_inlining_list buffer and clear _print_inlining_stream.
|
||||||
|
_print_inlining_list->at(_print_inlining_idx).ss()->write(_print_inlining_stream->as_string(), _print_inlining_stream->size());
|
||||||
|
print_inlining_reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Compile::print_inlining_push() {
|
||||||
|
// Add new buffer to the _print_inlining_list at current position
|
||||||
|
_print_inlining_idx++;
|
||||||
|
_print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer());
|
||||||
|
}
|
||||||
|
|
||||||
|
Compile::PrintInliningBuffer& Compile::print_inlining_current() {
|
||||||
|
return _print_inlining_list->at(_print_inlining_idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Compile::print_inlining_update(CallGenerator* cg) {
|
||||||
|
if (print_inlining() || print_intrinsics()) {
|
||||||
|
if (!cg->is_late_inline()) {
|
||||||
|
if (print_inlining_current().cg() != NULL) {
|
||||||
|
print_inlining_push();
|
||||||
|
}
|
||||||
|
print_inlining_commit();
|
||||||
|
} else {
|
||||||
|
if (print_inlining_current().cg() != cg &&
|
||||||
|
(print_inlining_current().cg() != NULL ||
|
||||||
|
print_inlining_current().ss()->size() != 0)) {
|
||||||
|
print_inlining_push();
|
||||||
|
}
|
||||||
|
print_inlining_commit();
|
||||||
|
print_inlining_current().set_cg(cg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Compile::print_inlining_move_to(CallGenerator* cg) {
|
||||||
|
// We resume inlining at a late inlining call site. Locate the
|
||||||
|
// corresponding inlining buffer so that we can update it.
|
||||||
|
if (print_inlining()) {
|
||||||
|
for (int i = 0; i < _print_inlining_list->length(); i++) {
|
||||||
|
if (_print_inlining_list->adr_at(i)->cg() == cg) {
|
||||||
|
_print_inlining_idx = i;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ShouldNotReachHere();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Compile::print_inlining_update_delayed(CallGenerator* cg) {
|
||||||
|
if (print_inlining()) {
|
||||||
|
assert(_print_inlining_stream->size() > 0, "missing inlining msg");
|
||||||
|
assert(print_inlining_current().cg() == cg, "wrong entry");
|
||||||
|
// replace message with new message
|
||||||
|
_print_inlining_list->at_put(_print_inlining_idx, PrintInliningBuffer());
|
||||||
|
print_inlining_commit();
|
||||||
|
print_inlining_current().set_cg(cg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Compile::print_inlining_assert_ready() {
|
||||||
|
assert(!_print_inlining || _print_inlining_stream->size() == 0, "loosing data");
|
||||||
|
}
|
||||||
|
|
||||||
|
void Compile::dump_inlining() {
|
||||||
|
bool do_print_inlining = print_inlining() || print_intrinsics();
|
||||||
|
if (do_print_inlining || log() != NULL) {
|
||||||
// Print inlining message for candidates that we couldn't inline
|
// Print inlining message for candidates that we couldn't inline
|
||||||
// for lack of space or non constant receiver
|
// for lack of space
|
||||||
for (int i = 0; i < _late_inlines.length(); i++) {
|
for (int i = 0; i < _late_inlines.length(); i++) {
|
||||||
CallGenerator* cg = _late_inlines.at(i);
|
CallGenerator* cg = _late_inlines.at(i);
|
||||||
cg->print_inlining_late("live nodes > LiveNodeCountInliningCutoff");
|
if (!cg->is_mh_late_inline()) {
|
||||||
|
const char* msg = "live nodes > LiveNodeCountInliningCutoff";
|
||||||
|
if (do_print_inlining) {
|
||||||
|
cg->print_inlining_late(msg);
|
||||||
}
|
}
|
||||||
Unique_Node_List useful;
|
log_late_inline_failure(cg, msg);
|
||||||
useful.push(root());
|
|
||||||
for (uint next = 0; next < useful.size(); ++next) {
|
|
||||||
Node* n = useful.at(next);
|
|
||||||
if (n->is_Call() && n->as_Call()->generator() != NULL && n->as_Call()->generator()->call_node() == n) {
|
|
||||||
CallNode* call = n->as_Call();
|
|
||||||
CallGenerator* cg = call->generator();
|
|
||||||
cg->print_inlining_late("receiver not constant");
|
|
||||||
}
|
|
||||||
uint max = n->len();
|
|
||||||
for ( uint i = 0; i < max; ++i ) {
|
|
||||||
Node *m = n->in(i);
|
|
||||||
if ( m == NULL ) continue;
|
|
||||||
useful.push(m);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
if (do_print_inlining) {
|
||||||
for (int i = 0; i < _print_inlining_list->length(); i++) {
|
for (int i = 0; i < _print_inlining_list->length(); i++) {
|
||||||
tty->print(_print_inlining_list->adr_at(i)->ss()->as_string());
|
tty->print(_print_inlining_list->adr_at(i)->ss()->as_string());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Compile::log_late_inline(CallGenerator* cg) {
|
||||||
|
if (log() != NULL) {
|
||||||
|
log()->head("late_inline method='%d' inline_id='" JLONG_FORMAT "'", log()->identify(cg->method()),
|
||||||
|
cg->unique_id());
|
||||||
|
JVMState* p = cg->call_node()->jvms();
|
||||||
|
while (p != NULL) {
|
||||||
|
log()->elem("jvms bci='%d' method='%d'", p->bci(), log()->identify(p->method()));
|
||||||
|
p = p->caller();
|
||||||
|
}
|
||||||
|
log()->tail("late_inline");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Compile::log_late_inline_failure(CallGenerator* cg, const char* msg) {
|
||||||
|
log_late_inline(cg);
|
||||||
|
if (log() != NULL) {
|
||||||
|
log()->inline_fail(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Compile::log_inline_id(CallGenerator* cg) {
|
||||||
|
if (log() != NULL) {
|
||||||
|
// The LogCompilation tool needs a unique way to identify late
|
||||||
|
// inline call sites. This id must be unique for this call site in
|
||||||
|
// this compilation. Try to have it unique across compilations as
|
||||||
|
// well because it can be convenient when grepping through the log
|
||||||
|
// file.
|
||||||
|
// Distinguish OSR compilations from others in case CICountOSR is
|
||||||
|
// on.
|
||||||
|
jlong id = ((jlong)unique()) + (((jlong)compile_id()) << 33) + (CICountOSR && is_osr_compilation() ? ((jlong)1) << 32 : 0);
|
||||||
|
cg->set_unique_id(id);
|
||||||
|
log()->elem("inline_id id='" JLONG_FORMAT "'", id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Compile::log_inline_failure(const char* msg) {
|
||||||
|
if (C->log() != NULL) {
|
||||||
|
C->log()->inline_fail(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// Dump inlining replay data to the stream.
|
// Dump inlining replay data to the stream.
|
||||||
// Don't change thread state and acquire any locks.
|
// Don't change thread state and acquire any locks.
|
||||||
void Compile::dump_inline_data(outputStream* out) {
|
void Compile::dump_inline_data(outputStream* out) {
|
||||||
|
@ -3962,8 +4091,8 @@ void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
|
||||||
worklist.push(root());
|
worklist.push(root());
|
||||||
for (uint next = 0; next < worklist.size(); ++next) {
|
for (uint next = 0; next < worklist.size(); ++next) {
|
||||||
Node *n = worklist.at(next);
|
Node *n = worklist.at(next);
|
||||||
const Type* t = igvn.type(n);
|
const Type* t = igvn.type_or_null(n);
|
||||||
assert(t == t->remove_speculative(), "no more speculative types");
|
assert((t == NULL) || (t == t->remove_speculative()), "no more speculative types");
|
||||||
if (n->is_Type()) {
|
if (n->is_Type()) {
|
||||||
t = n->as_Type()->type();
|
t = n->as_Type()->type();
|
||||||
assert(t == t->remove_speculative(), "no more speculative types");
|
assert(t == t->remove_speculative(), "no more speculative types");
|
||||||
|
|
|
@ -416,6 +416,7 @@ class Compile : public Phase {
|
||||||
void set_cg(CallGenerator* cg) { _cg = cg; }
|
void set_cg(CallGenerator* cg) { _cg = cg; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
stringStream* _print_inlining_stream;
|
||||||
GrowableArray<PrintInliningBuffer>* _print_inlining_list;
|
GrowableArray<PrintInliningBuffer>* _print_inlining_list;
|
||||||
int _print_inlining_idx;
|
int _print_inlining_idx;
|
||||||
|
|
||||||
|
@ -433,33 +434,26 @@ class Compile : public Phase {
|
||||||
|
|
||||||
void* _replay_inline_data; // Pointer to data loaded from file
|
void* _replay_inline_data; // Pointer to data loaded from file
|
||||||
|
|
||||||
|
void print_inlining_init();
|
||||||
|
void print_inlining_reinit();
|
||||||
|
void print_inlining_commit();
|
||||||
|
void print_inlining_push();
|
||||||
|
PrintInliningBuffer& print_inlining_current();
|
||||||
|
|
||||||
|
void log_late_inline_failure(CallGenerator* cg, const char* msg);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
outputStream* print_inlining_stream() const {
|
outputStream* print_inlining_stream() const {
|
||||||
return _print_inlining_list->adr_at(_print_inlining_idx)->ss();
|
assert(print_inlining() || print_intrinsics(), "PrintInlining off?");
|
||||||
|
return _print_inlining_stream;
|
||||||
}
|
}
|
||||||
|
|
||||||
void print_inlining_skip(CallGenerator* cg) {
|
void print_inlining_update(CallGenerator* cg);
|
||||||
if (_print_inlining) {
|
void print_inlining_update_delayed(CallGenerator* cg);
|
||||||
_print_inlining_list->adr_at(_print_inlining_idx)->set_cg(cg);
|
void print_inlining_move_to(CallGenerator* cg);
|
||||||
_print_inlining_idx++;
|
void print_inlining_assert_ready();
|
||||||
_print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer());
|
void print_inlining_reset();
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void print_inlining_insert(CallGenerator* cg) {
|
|
||||||
if (_print_inlining) {
|
|
||||||
for (int i = 0; i < _print_inlining_list->length(); i++) {
|
|
||||||
if (_print_inlining_list->adr_at(i)->cg() == cg) {
|
|
||||||
_print_inlining_list->insert_before(i+1, PrintInliningBuffer());
|
|
||||||
_print_inlining_idx = i+1;
|
|
||||||
_print_inlining_list->adr_at(i)->set_cg(NULL);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ShouldNotReachHere();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) {
|
void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) {
|
||||||
stringStream ss;
|
stringStream ss;
|
||||||
|
@ -467,6 +461,10 @@ class Compile : public Phase {
|
||||||
print_inlining_stream()->print(ss.as_string());
|
print_inlining_stream()->print(ss.as_string());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void log_late_inline(CallGenerator* cg);
|
||||||
|
void log_inline_id(CallGenerator* cg);
|
||||||
|
void log_inline_failure(const char* msg);
|
||||||
|
|
||||||
void* replay_inline_data() const { return _replay_inline_data; }
|
void* replay_inline_data() const { return _replay_inline_data; }
|
||||||
|
|
||||||
// Dump inlining replay data to the stream.
|
// Dump inlining replay data to the stream.
|
||||||
|
|
|
@ -104,6 +104,9 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
||||||
log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));
|
log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (callee->is_method_handle_intrinsic()) {
|
||||||
|
log->print(" method_handle_intrinsic='1'");
|
||||||
|
}
|
||||||
log->end_elem();
|
log->end_elem();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -294,6 +297,9 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
||||||
// There was no special inlining tactic, or it bailed out.
|
// There was no special inlining tactic, or it bailed out.
|
||||||
// Use a more generic tactic, like a simple call.
|
// Use a more generic tactic, like a simple call.
|
||||||
if (call_does_dispatch) {
|
if (call_does_dispatch) {
|
||||||
|
const char* msg = "virtual call";
|
||||||
|
if (PrintInlining) print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
|
||||||
|
C->log_inline_failure(msg);
|
||||||
return CallGenerator::for_virtual_call(callee, vtable_index);
|
return CallGenerator::for_virtual_call(callee, vtable_index);
|
||||||
} else {
|
} else {
|
||||||
// Class Hierarchy Analysis or Type Profile reveals a unique target,
|
// Class Hierarchy Analysis or Type Profile reveals a unique target,
|
||||||
|
@ -396,6 +402,8 @@ void Parse::do_call() {
|
||||||
// our contribution to it is cleaned up right here.
|
// our contribution to it is cleaned up right here.
|
||||||
kill_dead_locals();
|
kill_dead_locals();
|
||||||
|
|
||||||
|
C->print_inlining_assert_ready();
|
||||||
|
|
||||||
// Set frequently used booleans
|
// Set frequently used booleans
|
||||||
const bool is_virtual = bc() == Bytecodes::_invokevirtual;
|
const bool is_virtual = bc() == Bytecodes::_invokevirtual;
|
||||||
const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
|
const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
|
||||||
|
@ -531,7 +539,8 @@ void Parse::do_call() {
|
||||||
// intrinsic was expecting to optimize. Should always be possible to
|
// intrinsic was expecting to optimize. Should always be possible to
|
||||||
// get a normal java call that may inline in that case
|
// get a normal java call that may inline in that case
|
||||||
cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false);
|
cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false);
|
||||||
if ((new_jvms = cg->generate(jvms, this)) == NULL) {
|
new_jvms = cg->generate(jvms, this);
|
||||||
|
if (new_jvms == NULL) {
|
||||||
guarantee(failing(), "call failed to generate: calls should work");
|
guarantee(failing(), "call failed to generate: calls should work");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -620,6 +620,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
}
|
}
|
||||||
// Push the result from the inlined method onto the stack.
|
// Push the result from the inlined method onto the stack.
|
||||||
kit.push_result();
|
kit.push_result();
|
||||||
|
C->print_inlining_update(this);
|
||||||
return kit.transfer_exceptions_into_jvms();
|
return kit.transfer_exceptions_into_jvms();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -637,6 +638,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
|
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
|
||||||
|
C->print_inlining_update(this);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -210,7 +210,14 @@ public:
|
||||||
bool may_be_short_branch() const { return (flags() & Flag_may_be_short_branch) != 0; }
|
bool may_be_short_branch() const { return (flags() & Flag_may_be_short_branch) != 0; }
|
||||||
|
|
||||||
// Avoid back to back some instructions on some CPUs.
|
// Avoid back to back some instructions on some CPUs.
|
||||||
bool avoid_back_to_back() const { return (flags() & Flag_avoid_back_to_back) != 0; }
|
enum AvoidBackToBackFlag { AVOID_NONE = 0,
|
||||||
|
AVOID_BEFORE = Flag_avoid_back_to_back_before,
|
||||||
|
AVOID_AFTER = Flag_avoid_back_to_back_after,
|
||||||
|
AVOID_BEFORE_AND_AFTER = AVOID_BEFORE | AVOID_AFTER };
|
||||||
|
|
||||||
|
bool avoid_back_to_back(AvoidBackToBackFlag flag_value) const {
|
||||||
|
return (flags() & flag_value) == flag_value;
|
||||||
|
}
|
||||||
|
|
||||||
// instruction implemented with a call
|
// instruction implemented with a call
|
||||||
bool has_call() const { return (flags() & Flag_has_call) != 0; }
|
bool has_call() const { return (flags() & Flag_has_call) != 0; }
|
||||||
|
|
|
@ -653,8 +653,9 @@ public:
|
||||||
Flag_is_cisc_alternate = Flag_is_Con << 1,
|
Flag_is_cisc_alternate = Flag_is_Con << 1,
|
||||||
Flag_is_dead_loop_safe = Flag_is_cisc_alternate << 1,
|
Flag_is_dead_loop_safe = Flag_is_cisc_alternate << 1,
|
||||||
Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1,
|
Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1,
|
||||||
Flag_avoid_back_to_back = Flag_may_be_short_branch << 1,
|
Flag_avoid_back_to_back_before = Flag_may_be_short_branch << 1,
|
||||||
Flag_has_call = Flag_avoid_back_to_back << 1,
|
Flag_avoid_back_to_back_after = Flag_avoid_back_to_back_before << 1,
|
||||||
|
Flag_has_call = Flag_avoid_back_to_back_after << 1,
|
||||||
Flag_is_expensive = Flag_has_call << 1,
|
Flag_is_expensive = Flag_has_call << 1,
|
||||||
_max_flags = (Flag_is_expensive << 1) - 1 // allow flags combination
|
_max_flags = (Flag_is_expensive << 1) - 1 // allow flags combination
|
||||||
};
|
};
|
||||||
|
|
|
@ -411,7 +411,7 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
|
||||||
blk_size += nop_size;
|
blk_size += nop_size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (mach->avoid_back_to_back()) {
|
if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
|
||||||
// Nop is inserted between "avoid back to back" instructions.
|
// Nop is inserted between "avoid back to back" instructions.
|
||||||
// ScheduleAndBundle() can rearrange nodes in a block,
|
// ScheduleAndBundle() can rearrange nodes in a block,
|
||||||
// check for all offsets inside this block.
|
// check for all offsets inside this block.
|
||||||
|
@ -439,7 +439,7 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
|
||||||
last_call_adr = blk_starts[i]+blk_size;
|
last_call_adr = blk_starts[i]+blk_size;
|
||||||
}
|
}
|
||||||
// Remember end of avoid_back_to_back offset
|
// Remember end of avoid_back_to_back offset
|
||||||
if (nj->is_Mach() && nj->as_Mach()->avoid_back_to_back()) {
|
if (nj->is_Mach() && nj->as_Mach()->avoid_back_to_back(MachNode::AVOID_AFTER)) {
|
||||||
last_avoid_back_to_back_adr = blk_starts[i]+blk_size;
|
last_avoid_back_to_back_adr = blk_starts[i]+blk_size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -525,11 +525,11 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
|
||||||
int new_size = replacement->size(_regalloc);
|
int new_size = replacement->size(_regalloc);
|
||||||
int diff = br_size - new_size;
|
int diff = br_size - new_size;
|
||||||
assert(diff >= (int)nop_size, "short_branch size should be smaller");
|
assert(diff >= (int)nop_size, "short_branch size should be smaller");
|
||||||
// Conservatively take into accound padding between
|
// Conservatively take into account padding between
|
||||||
// avoid_back_to_back branches. Previous branch could be
|
// avoid_back_to_back branches. Previous branch could be
|
||||||
// converted into avoid_back_to_back branch during next
|
// converted into avoid_back_to_back branch during next
|
||||||
// rounds.
|
// rounds.
|
||||||
if (needs_padding && replacement->avoid_back_to_back()) {
|
if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
|
||||||
jmp_offset[i] += nop_size;
|
jmp_offset[i] += nop_size;
|
||||||
diff -= nop_size;
|
diff -= nop_size;
|
||||||
}
|
}
|
||||||
|
@ -548,7 +548,7 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
|
||||||
}
|
}
|
||||||
} // (mach->may_be_short_branch())
|
} // (mach->may_be_short_branch())
|
||||||
if (mach != NULL && (mach->may_be_short_branch() ||
|
if (mach != NULL && (mach->may_be_short_branch() ||
|
||||||
mach->avoid_back_to_back())) {
|
mach->avoid_back_to_back(MachNode::AVOID_AFTER))) {
|
||||||
last_may_be_short_branch_adr = blk_starts[i] + jmp_offset[i] + jmp_size[i];
|
last_may_be_short_branch_adr = blk_starts[i] + jmp_offset[i] + jmp_size[i];
|
||||||
}
|
}
|
||||||
blk_starts[i+1] -= adjust_block_start;
|
blk_starts[i+1] -= adjust_block_start;
|
||||||
|
@ -1313,7 +1313,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
|
||||||
if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
|
if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
|
||||||
padding = nop_size;
|
padding = nop_size;
|
||||||
}
|
}
|
||||||
if (padding == 0 && mach->avoid_back_to_back() &&
|
if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
|
||||||
current_offset == last_avoid_back_to_back_offset) {
|
current_offset == last_avoid_back_to_back_offset) {
|
||||||
// Avoid back to back some instructions.
|
// Avoid back to back some instructions.
|
||||||
padding = nop_size;
|
padding = nop_size;
|
||||||
|
@ -1407,7 +1407,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
|
||||||
int new_size = replacement->size(_regalloc);
|
int new_size = replacement->size(_regalloc);
|
||||||
assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
|
assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
|
||||||
// Insert padding between avoid_back_to_back branches.
|
// Insert padding between avoid_back_to_back branches.
|
||||||
if (needs_padding && replacement->avoid_back_to_back()) {
|
if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
|
||||||
MachNode *nop = new (this) MachNopNode();
|
MachNode *nop = new (this) MachNopNode();
|
||||||
block->insert_node(nop, j++);
|
block->insert_node(nop, j++);
|
||||||
_cfg->map_node_to_block(nop, block);
|
_cfg->map_node_to_block(nop, block);
|
||||||
|
@ -1515,7 +1515,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
|
||||||
last_call_offset = current_offset;
|
last_call_offset = current_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (n->is_Mach() && n->as_Mach()->avoid_back_to_back()) {
|
if (n->is_Mach() && n->as_Mach()->avoid_back_to_back(MachNode::AVOID_AFTER)) {
|
||||||
// Avoid back to back some instructions.
|
// Avoid back to back some instructions.
|
||||||
last_avoid_back_to_back_offset = current_offset;
|
last_avoid_back_to_back_offset = current_offset;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1529,6 +1529,21 @@ void PhaseCCP::do_transform() {
|
||||||
C->set_root( transform(C->root())->as_Root() );
|
C->set_root( transform(C->root())->as_Root() );
|
||||||
assert( C->top(), "missing TOP node" );
|
assert( C->top(), "missing TOP node" );
|
||||||
assert( C->root(), "missing root" );
|
assert( C->root(), "missing root" );
|
||||||
|
|
||||||
|
// Eagerly remove castPP nodes here. CastPP nodes might not be
|
||||||
|
// removed in the subsequent IGVN phase if a node that changes
|
||||||
|
// in(1) of a castPP is processed prior to the castPP node.
|
||||||
|
for (uint i = 0; i < _worklist.size(); i++) {
|
||||||
|
Node* n = _worklist.at(i);
|
||||||
|
|
||||||
|
if (n->is_ConstraintCast()) {
|
||||||
|
Node* nn = n->Identity(this);
|
||||||
|
if (nn != n) {
|
||||||
|
replace_node(n, nn);
|
||||||
|
--i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------transform--------------------------------------
|
//------------------------------transform--------------------------------------
|
||||||
|
|
|
@ -3877,6 +3877,7 @@ void TestMetaspaceAux_test();
|
||||||
void TestMetachunk_test();
|
void TestMetachunk_test();
|
||||||
void TestVirtualSpaceNode_test();
|
void TestVirtualSpaceNode_test();
|
||||||
void TestNewSize_test();
|
void TestNewSize_test();
|
||||||
|
void TestOldSize_test();
|
||||||
void TestKlass_test();
|
void TestKlass_test();
|
||||||
void TestBitMap_test();
|
void TestBitMap_test();
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
|
@ -3903,6 +3904,7 @@ void execute_internal_vm_tests() {
|
||||||
run_unit_test(AltHashing::test_alt_hash());
|
run_unit_test(AltHashing::test_alt_hash());
|
||||||
run_unit_test(test_loggc_filename());
|
run_unit_test(test_loggc_filename());
|
||||||
run_unit_test(TestNewSize_test());
|
run_unit_test(TestNewSize_test());
|
||||||
|
run_unit_test(TestOldSize_test());
|
||||||
run_unit_test(TestKlass_test());
|
run_unit_test(TestKlass_test());
|
||||||
run_unit_test(TestBitMap_test());
|
run_unit_test(TestBitMap_test());
|
||||||
#if INCLUDE_VM_STRUCTS
|
#if INCLUDE_VM_STRUCTS
|
||||||
|
|
|
@ -438,6 +438,30 @@ WB_ENTRY(jboolean, WB_EnqueueMethodForCompilation(JNIEnv* env, jobject o, jobjec
|
||||||
return (mh->queued_for_compilation() || nm != NULL);
|
return (mh->queued_for_compilation() || nm != NULL);
|
||||||
WB_END
|
WB_END
|
||||||
|
|
||||||
|
class VM_WhiteBoxOperation : public VM_Operation {
|
||||||
|
public:
|
||||||
|
VM_WhiteBoxOperation() { }
|
||||||
|
VMOp_Type type() const { return VMOp_WhiteBoxOperation; }
|
||||||
|
bool allow_nested_vm_operations() const { return true; }
|
||||||
|
};
|
||||||
|
|
||||||
|
class AlwaysFalseClosure : public BoolObjectClosure {
|
||||||
|
public:
|
||||||
|
bool do_object_b(oop p) { return false; }
|
||||||
|
};
|
||||||
|
|
||||||
|
static AlwaysFalseClosure always_false;
|
||||||
|
|
||||||
|
class VM_WhiteBoxCleanMethodData : public VM_WhiteBoxOperation {
|
||||||
|
public:
|
||||||
|
VM_WhiteBoxCleanMethodData(MethodData* mdo) : _mdo(mdo) { }
|
||||||
|
void doit() {
|
||||||
|
_mdo->clean_method_data(&always_false);
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
MethodData* _mdo;
|
||||||
|
};
|
||||||
|
|
||||||
WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method))
|
WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method))
|
||||||
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
|
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
|
||||||
CHECK_JNI_EXCEPTION(env);
|
CHECK_JNI_EXCEPTION(env);
|
||||||
|
@ -453,6 +477,8 @@ WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method))
|
||||||
for (int i = 0; i < arg_count; i++) {
|
for (int i = 0; i < arg_count; i++) {
|
||||||
mdo->set_arg_modified(i, 0);
|
mdo->set_arg_modified(i, 0);
|
||||||
}
|
}
|
||||||
|
VM_WhiteBoxCleanMethodData op(mdo);
|
||||||
|
VMThread::execute(&op);
|
||||||
}
|
}
|
||||||
|
|
||||||
mh->clear_not_c1_compilable();
|
mh->clear_not_c1_compilable();
|
||||||
|
|
|
@ -53,7 +53,8 @@ void AdvancedThresholdPolicy::initialize() {
|
||||||
}
|
}
|
||||||
|
|
||||||
set_c1_count(MAX2(count / 3, 1));
|
set_c1_count(MAX2(count / 3, 1));
|
||||||
set_c2_count(MAX2(count - count / 3, 1));
|
set_c2_count(MAX2(count - c1_count(), 1));
|
||||||
|
FLAG_SET_ERGO(intx, CICompilerCount, c1_count() + c2_count());
|
||||||
|
|
||||||
// Some inlining tuning
|
// Some inlining tuning
|
||||||
#ifdef X86
|
#ifdef X86
|
||||||
|
|
|
@ -307,6 +307,9 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
|
||||||
JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
|
JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
|
||||||
#endif // PRODUCT
|
#endif // PRODUCT
|
||||||
{ "UseVMInterruptibleIO", JDK_Version::jdk(8), JDK_Version::jdk(9) },
|
{ "UseVMInterruptibleIO", JDK_Version::jdk(8), JDK_Version::jdk(9) },
|
||||||
|
{ "UseBoundThreads", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||||
|
{ "DefaultThreadPriority", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||||
|
{ "NoYieldsInMicrolock", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||||
{ NULL, JDK_Version(0), JDK_Version(0) }
|
{ NULL, JDK_Version(0), JDK_Version(0) }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2078,17 +2081,6 @@ bool Arguments::check_vm_args_consistency() {
|
||||||
// Note: Needs platform-dependent factoring.
|
// Note: Needs platform-dependent factoring.
|
||||||
bool status = true;
|
bool status = true;
|
||||||
|
|
||||||
// Allow both -XX:-UseStackBanging and -XX:-UseBoundThreads in non-product
|
|
||||||
// builds so the cost of stack banging can be measured.
|
|
||||||
#if (defined(PRODUCT) && defined(SOLARIS))
|
|
||||||
if (!UseBoundThreads && !UseStackBanging) {
|
|
||||||
jio_fprintf(defaultStream::error_stream(),
|
|
||||||
"-UseStackBanging conflicts with -UseBoundThreads\n");
|
|
||||||
|
|
||||||
status = false;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (TLABRefillWasteFraction == 0) {
|
if (TLABRefillWasteFraction == 0) {
|
||||||
jio_fprintf(defaultStream::error_stream(),
|
jio_fprintf(defaultStream::error_stream(),
|
||||||
"TLABRefillWasteFraction should be a denominator, "
|
"TLABRefillWasteFraction should be a denominator, "
|
||||||
|
@ -2410,6 +2402,10 @@ bool Arguments::check_vm_args_consistency() {
|
||||||
const int num_min_compiler_threads = (TieredCompilation && (TieredStopAtLevel >= CompLevel_full_optimization)) ? 2 : 1;
|
const int num_min_compiler_threads = (TieredCompilation && (TieredStopAtLevel >= CompLevel_full_optimization)) ? 2 : 1;
|
||||||
status &=verify_min_value(CICompilerCount, num_min_compiler_threads, "CICompilerCount");
|
status &=verify_min_value(CICompilerCount, num_min_compiler_threads, "CICompilerCount");
|
||||||
|
|
||||||
|
if (!FLAG_IS_DEFAULT(CICompilerCount) && !FLAG_IS_DEFAULT(CICompilerCountPerCPU) && CICompilerCountPerCPU) {
|
||||||
|
warning("The VM option CICompilerCountPerCPU overrides CICompilerCount.");
|
||||||
|
}
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -182,6 +182,7 @@ void NonTieredCompPolicy::initialize() {
|
||||||
// max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
|
// max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
|
||||||
// May help big-app startup time.
|
// May help big-app startup time.
|
||||||
_compiler_count = MAX2(log2_intptr(os::active_processor_count())-1,1);
|
_compiler_count = MAX2(log2_intptr(os::active_processor_count())-1,1);
|
||||||
|
FLAG_SET_ERGO(intx, CICompilerCount, _compiler_count);
|
||||||
} else {
|
} else {
|
||||||
_compiler_count = CICompilerCount;
|
_compiler_count = CICompilerCount;
|
||||||
}
|
}
|
||||||
|
|
|
@ -929,6 +929,10 @@ void os::print_cpu_info(outputStream* st) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void os::print_date_and_time(outputStream *st) {
|
void os::print_date_and_time(outputStream *st) {
|
||||||
|
const int secs_per_day = 86400;
|
||||||
|
const int secs_per_hour = 3600;
|
||||||
|
const int secs_per_min = 60;
|
||||||
|
|
||||||
time_t tloc;
|
time_t tloc;
|
||||||
(void)time(&tloc);
|
(void)time(&tloc);
|
||||||
st->print("time: %s", ctime(&tloc)); // ctime adds newline.
|
st->print("time: %s", ctime(&tloc)); // ctime adds newline.
|
||||||
|
@ -937,7 +941,17 @@ void os::print_date_and_time(outputStream *st) {
|
||||||
// NOTE: It tends to crash after a SEGV if we want to printf("%f",...) in
|
// NOTE: It tends to crash after a SEGV if we want to printf("%f",...) in
|
||||||
// Linux. Must be a bug in glibc ? Workaround is to round "t" to int
|
// Linux. Must be a bug in glibc ? Workaround is to round "t" to int
|
||||||
// before printf. We lost some precision, but who cares?
|
// before printf. We lost some precision, but who cares?
|
||||||
st->print_cr("elapsed time: %d seconds", (int)t);
|
int eltime = (int)t; // elapsed time in seconds
|
||||||
|
|
||||||
|
// print elapsed time in a human-readable format:
|
||||||
|
int eldays = eltime / secs_per_day;
|
||||||
|
int day_secs = eldays * secs_per_day;
|
||||||
|
int elhours = (eltime - day_secs) / secs_per_hour;
|
||||||
|
int hour_secs = elhours * secs_per_hour;
|
||||||
|
int elmins = (eltime - day_secs - hour_secs) / secs_per_min;
|
||||||
|
int minute_secs = elmins * secs_per_min;
|
||||||
|
int elsecs = (eltime - day_secs - hour_secs - minute_secs);
|
||||||
|
st->print_cr("elapsed time: %d seconds (%dd %dh %dm %ds)", eltime, eldays, elhours, elmins, elsecs);
|
||||||
}
|
}
|
||||||
|
|
||||||
// moved from debug.cpp (used to be find()) but still called from there
|
// moved from debug.cpp (used to be find()) but still called from there
|
||||||
|
|
|
@ -450,8 +450,8 @@ class os: AllStatic {
|
||||||
// yield that can be used in lieu of blocking.
|
// yield that can be used in lieu of blocking.
|
||||||
} ;
|
} ;
|
||||||
static YieldResult NakedYield () ;
|
static YieldResult NakedYield () ;
|
||||||
static void yield_all(int attempts = 0); // Yields to all other threads including lower priority
|
static void yield_all(); // Yields to all other threads including lower priority
|
||||||
static void loop_breaker(int attempts); // called from within tight loops to possibly influence time-sharing
|
// (for the default scheduling policy)
|
||||||
static OSReturn set_priority(Thread* thread, ThreadPriority priority);
|
static OSReturn set_priority(Thread* thread, ThreadPriority priority);
|
||||||
static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority);
|
static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority);
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue