mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-22 20:14:43 +02:00
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
This commit is contained in:
parent
680ecf1611
commit
4a831d45f0
273 changed files with 6585 additions and 2993 deletions
|
@ -1779,7 +1779,7 @@ void MacroAssembler::verify_oop_subroutine() {
|
|||
|
||||
// Check the klassOop of this object for being in the right area of memory.
|
||||
// Cannot do the load in the delay above slot in case O0 is null
|
||||
ld_ptr(Address(O0_obj, 0, oopDesc::klass_offset_in_bytes()), O0_obj);
|
||||
load_klass(O0_obj, O0_obj);
|
||||
// assert((klass & klass_mask) == klass_bits);
|
||||
if( Universe::verify_klass_mask() != Universe::verify_oop_mask() )
|
||||
set(Universe::verify_klass_mask(), O2_mask);
|
||||
|
@ -1788,8 +1788,9 @@ void MacroAssembler::verify_oop_subroutine() {
|
|||
and3(O0_obj, O2_mask, O4_temp);
|
||||
cmp(O4_temp, O3_bits);
|
||||
brx(notEqual, false, pn, fail);
|
||||
delayed()->nop();
|
||||
// Check the klass's klass
|
||||
delayed()->ld_ptr(Address(O0_obj, 0, oopDesc::klass_offset_in_bytes()), O0_obj);
|
||||
load_klass(O0_obj, O0_obj);
|
||||
and3(O0_obj, O2_mask, O4_temp);
|
||||
cmp(O4_temp, O3_bits);
|
||||
brx(notEqual, false, pn, fail);
|
||||
|
@ -2588,8 +2589,9 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, R
|
|||
and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
|
||||
cmp(temp_reg, markOopDesc::biased_lock_pattern);
|
||||
brx(Assembler::notEqual, false, Assembler::pn, cas_label);
|
||||
delayed()->nop();
|
||||
|
||||
delayed()->ld_ptr(Address(obj_reg, 0, oopDesc::klass_offset_in_bytes()), temp_reg);
|
||||
load_klass(obj_reg, temp_reg);
|
||||
ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
|
||||
or3(G2_thread, temp_reg, temp_reg);
|
||||
xor3(mark_reg, temp_reg, temp_reg);
|
||||
|
@ -2668,7 +2670,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, R
|
|||
//
|
||||
// FIXME: due to a lack of registers we currently blow away the age
|
||||
// bits in this situation. Should attempt to preserve them.
|
||||
ld_ptr(Address(obj_reg, 0, oopDesc::klass_offset_in_bytes()), temp_reg);
|
||||
load_klass(obj_reg, temp_reg);
|
||||
ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
|
||||
or3(G2_thread, temp_reg, temp_reg);
|
||||
casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
|
||||
|
@ -2700,7 +2702,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, R
|
|||
//
|
||||
// FIXME: due to a lack of registers we currently blow away the age
|
||||
// bits in this situation. Should attempt to preserve them.
|
||||
ld_ptr(Address(obj_reg, 0, oopDesc::klass_offset_in_bytes()), temp_reg);
|
||||
load_klass(obj_reg, temp_reg);
|
||||
ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
|
||||
casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
|
||||
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
||||
|
@ -3406,7 +3408,7 @@ void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case
|
|||
// set klass to intArrayKlass
|
||||
set((intptr_t)Universe::intArrayKlassObj_addr(), t2);
|
||||
ld_ptr(t2, 0, t2);
|
||||
st_ptr(t2, top, oopDesc::klass_offset_in_bytes());
|
||||
store_klass(t2, top);
|
||||
sub(t1, typeArrayOopDesc::header_size(T_INT), t1);
|
||||
add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1);
|
||||
sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1);
|
||||
|
@ -3534,3 +3536,139 @@ void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp,
|
|||
st(G0, Rtsp, Rscratch);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::load_klass(Register s, Register d) {
|
||||
// The number of bytes in this code is used by
|
||||
// MachCallDynamicJavaNode::ret_addr_offset()
|
||||
// if this changes, change that.
|
||||
if (UseCompressedOops) {
|
||||
lduw(s, oopDesc::klass_offset_in_bytes(), d);
|
||||
decode_heap_oop_not_null(d);
|
||||
} else {
|
||||
ld_ptr(s, oopDesc::klass_offset_in_bytes(), d);
|
||||
}
|
||||
}
|
||||
|
||||
// ??? figure out src vs. dst!
|
||||
void MacroAssembler::store_klass(Register d, Register s1) {
|
||||
if (UseCompressedOops) {
|
||||
assert(s1 != d, "not enough registers");
|
||||
encode_heap_oop_not_null(d);
|
||||
// Zero out entire klass field first.
|
||||
st_ptr(G0, s1, oopDesc::klass_offset_in_bytes());
|
||||
st(d, s1, oopDesc::klass_offset_in_bytes());
|
||||
} else {
|
||||
st_ptr(d, s1, oopDesc::klass_offset_in_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::load_heap_oop(const Address& s, Register d, int offset) {
|
||||
if (UseCompressedOops) {
|
||||
lduw(s, d, offset);
|
||||
decode_heap_oop(d);
|
||||
} else {
|
||||
ld_ptr(s, d, offset);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) {
|
||||
if (UseCompressedOops) {
|
||||
lduw(s1, s2, d);
|
||||
decode_heap_oop(d, d);
|
||||
} else {
|
||||
ld_ptr(s1, s2, d);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) {
|
||||
if (UseCompressedOops) {
|
||||
lduw(s1, simm13a, d);
|
||||
decode_heap_oop(d, d);
|
||||
} else {
|
||||
ld_ptr(s1, simm13a, d);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) {
|
||||
if (UseCompressedOops) {
|
||||
assert(s1 != d && s2 != d, "not enough registers");
|
||||
encode_heap_oop(d);
|
||||
st(d, s1, s2);
|
||||
} else {
|
||||
st_ptr(d, s1, s2);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) {
|
||||
if (UseCompressedOops) {
|
||||
assert(s1 != d, "not enough registers");
|
||||
encode_heap_oop(d);
|
||||
st(d, s1, simm13a);
|
||||
} else {
|
||||
st_ptr(d, s1, simm13a);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) {
|
||||
if (UseCompressedOops) {
|
||||
assert(a.base() != d, "not enough registers");
|
||||
encode_heap_oop(d);
|
||||
st(d, a, offset);
|
||||
} else {
|
||||
st_ptr(d, a, offset);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::encode_heap_oop(Register src, Register dst) {
|
||||
assert (UseCompressedOops, "must be compressed");
|
||||
Label done;
|
||||
if (src == dst) {
|
||||
// optimize for frequent case src == dst
|
||||
bpr(rc_nz, true, Assembler::pt, src, done);
|
||||
delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken
|
||||
bind(done);
|
||||
srlx(src, LogMinObjAlignmentInBytes, dst);
|
||||
} else {
|
||||
bpr(rc_z, false, Assembler::pn, src, done);
|
||||
delayed() -> mov(G0, dst);
|
||||
// could be moved before branch, and annulate delay,
|
||||
// but may add some unneeded work decoding null
|
||||
sub(src, G6_heapbase, dst);
|
||||
srlx(dst, LogMinObjAlignmentInBytes, dst);
|
||||
bind(done);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::encode_heap_oop_not_null(Register r) {
|
||||
assert (UseCompressedOops, "must be compressed");
|
||||
sub(r, G6_heapbase, r);
|
||||
srlx(r, LogMinObjAlignmentInBytes, r);
|
||||
}
|
||||
|
||||
// Same algorithm as oops.inline.hpp decode_heap_oop.
|
||||
void MacroAssembler::decode_heap_oop(Register src, Register dst) {
|
||||
assert (UseCompressedOops, "must be compressed");
|
||||
Label done;
|
||||
sllx(src, LogMinObjAlignmentInBytes, dst);
|
||||
bpr(rc_nz, true, Assembler::pt, dst, done);
|
||||
delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken
|
||||
bind(done);
|
||||
}
|
||||
|
||||
void MacroAssembler::decode_heap_oop_not_null(Register r) {
|
||||
// Do not add assert code to this unless you change vtableStubs_sparc.cpp
|
||||
// pd_code_size_limit.
|
||||
assert (UseCompressedOops, "must be compressed");
|
||||
sllx(r, LogMinObjAlignmentInBytes, r);
|
||||
add(r, G6_heapbase, r);
|
||||
}
|
||||
|
||||
void MacroAssembler::reinit_heapbase() {
|
||||
if (UseCompressedOops) {
|
||||
// call indirectly to solve generation ordering problem
|
||||
Address base(G6_heapbase, (address)Universe::heap_base_addr());
|
||||
load_ptr_contents(base, G6_heapbase);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -59,6 +59,7 @@ class BiasedLockingCounters;
|
|||
// This global always holds the current JavaThread pointer:
|
||||
|
||||
REGISTER_DECLARATION(Register, G2_thread , G2);
|
||||
REGISTER_DECLARATION(Register, G6_heapbase , G6);
|
||||
|
||||
// The following globals are part of the Java calling convention:
|
||||
|
||||
|
@ -1975,6 +1976,29 @@ class MacroAssembler: public Assembler {
|
|||
inline void tstbool( Register s ) { tst(s); }
|
||||
inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }
|
||||
|
||||
// klass oop manipulations if compressed
|
||||
void load_klass(Register src_oop, Register dst);
|
||||
void store_klass(Register dst_oop, Register s1);
|
||||
|
||||
// oop manipulations
|
||||
void load_heap_oop(const Address& s, Register d, int offset = 0);
|
||||
void load_heap_oop(Register s1, Register s2, Register d);
|
||||
void load_heap_oop(Register s1, int simm13a, Register d);
|
||||
void store_heap_oop(Register d, Register s1, Register s2);
|
||||
void store_heap_oop(Register d, Register s1, int simm13a);
|
||||
void store_heap_oop(Register d, const Address& a, int offset = 0);
|
||||
|
||||
void encode_heap_oop(Register src, Register dst);
|
||||
void encode_heap_oop(Register r) {
|
||||
encode_heap_oop(r, r);
|
||||
}
|
||||
void decode_heap_oop(Register src, Register dst);
|
||||
void decode_heap_oop(Register r) {
|
||||
decode_heap_oop(r, r);
|
||||
}
|
||||
void encode_heap_oop_not_null(Register r);
|
||||
void decode_heap_oop_not_null(Register r);
|
||||
|
||||
// Support for managing the JavaThread pointer (i.e.; the reference to
|
||||
// thread-local information).
|
||||
void get_thread(); // load G2_thread
|
||||
|
@ -2050,6 +2074,9 @@ class MacroAssembler: public Assembler {
|
|||
void push_CPU_state();
|
||||
void pop_CPU_state();
|
||||
|
||||
// if heap base register is used - reinit it with the correct value
|
||||
void reinit_heapbase();
|
||||
|
||||
// Debugging
|
||||
void _verify_oop(Register reg, const char * msg, const char * file, int line);
|
||||
void _verify_oop_addr(Address addr, const char * msg, const char * file, int line);
|
||||
|
|
|
@ -236,7 +236,7 @@ void C1_MacroAssembler::initialize_object(
|
|||
Register t1, // temp register
|
||||
Register t2 // temp register
|
||||
) {
|
||||
const int hdr_size_in_bytes = oopDesc::header_size_in_bytes();
|
||||
const int hdr_size_in_bytes = instanceOopDesc::base_offset_in_bytes();
|
||||
|
||||
initialize_header(obj, klass, noreg, t1, t2);
|
||||
|
||||
|
|
|
@ -137,24 +137,20 @@ static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count)
|
|||
}
|
||||
|
||||
static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
|
||||
#if 0
|
||||
if (HeapWordsPerLong == 1 ||
|
||||
(HeapWordsPerLong == 2 &&
|
||||
mask_bits((uintptr_t)tohw, right_n_bits(LogBytesPerLong)) == 0 &&
|
||||
((count & 1) ? false : count >>= 1))) {
|
||||
julong* to = (julong*)tohw;
|
||||
julong v = ((julong)value << 32) | value;
|
||||
while (count-- > 0) {
|
||||
*to++ = v;
|
||||
}
|
||||
} else {
|
||||
#endif
|
||||
juint* to = (juint*)tohw;
|
||||
count *= HeapWordSize / BytesPerInt;
|
||||
while (count-- > 0) {
|
||||
*to++ = value;
|
||||
}
|
||||
// }
|
||||
#ifdef _LP64
|
||||
guarantee(mask_bits((uintptr_t)tohw, right_n_bits(LogBytesPerLong)) == 0,
|
||||
"unaligned fill words");
|
||||
julong* to = (julong*)tohw;
|
||||
julong v = ((julong)value << 32) | value;
|
||||
while (count-- > 0) {
|
||||
*to++ = v;
|
||||
}
|
||||
#else // _LP64
|
||||
juint* to = (juint*)tohw;
|
||||
while (count-- > 0) {
|
||||
*to++ = value;
|
||||
}
|
||||
#endif // _LP64
|
||||
}
|
||||
|
||||
static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
|
||||
|
|
|
@ -859,7 +859,7 @@ void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, R
|
|||
|
||||
|
||||
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
|
||||
// a subtype of super_klass. Blows registers Rsub_klass, tmp1, tmp2.
|
||||
// a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.
|
||||
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
|
||||
Register Rsuper_klass,
|
||||
Register Rtmp1,
|
||||
|
@ -891,6 +891,9 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
|
|||
// Now do a linear scan of the secondary super-klass chain.
|
||||
delayed()->ld_ptr( Rsub_klass, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes(), Rtmp2 );
|
||||
|
||||
// compress superclass
|
||||
if (UseCompressedOops) encode_heap_oop(Rsuper_klass);
|
||||
|
||||
// Rtmp2 holds the objArrayOop of secondary supers.
|
||||
ld( Rtmp2, arrayOopDesc::length_offset_in_bytes(), Rtmp1 );// Load the array length
|
||||
// Check for empty secondary super list
|
||||
|
@ -900,20 +903,28 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
|
|||
bind( loop );
|
||||
br( Assembler::equal, false, Assembler::pn, not_subtype );
|
||||
delayed()->nop();
|
||||
// load next super to check
|
||||
ld_ptr( Rtmp2, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rtmp3 );
|
||||
|
||||
// Bump array pointer forward one oop
|
||||
add( Rtmp2, wordSize, Rtmp2 );
|
||||
// load next super to check
|
||||
if (UseCompressedOops) {
|
||||
ld( Rtmp2, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rtmp3);
|
||||
// Bump array pointer forward one oop
|
||||
add( Rtmp2, 4, Rtmp2 );
|
||||
} else {
|
||||
ld_ptr( Rtmp2, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rtmp3);
|
||||
// Bump array pointer forward one oop
|
||||
add( Rtmp2, wordSize, Rtmp2);
|
||||
}
|
||||
// Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
|
||||
cmp( Rtmp3, Rsuper_klass );
|
||||
// A miss means we are NOT a subtype and need to keep looping
|
||||
brx( Assembler::notEqual, false, Assembler::pt, loop );
|
||||
delayed()->deccc( Rtmp1 ); // dec trip counter in delay slot
|
||||
// Falling out the bottom means we found a hit; we ARE a subtype
|
||||
if (UseCompressedOops) decode_heap_oop(Rsuper_klass);
|
||||
br( Assembler::always, false, Assembler::pt, ok_is_subtype );
|
||||
// Update the cache
|
||||
delayed()->st_ptr( Rsuper_klass, Rsub_klass, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() );
|
||||
delayed()->st_ptr( Rsuper_klass, Rsub_klass,
|
||||
sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() );
|
||||
|
||||
bind(not_subtype);
|
||||
profile_typecheck_failed(Rtmp1);
|
||||
|
|
|
@ -131,6 +131,7 @@ REGISTER_DEFINITION(FloatRegister, Ftos_d2);
|
|||
|
||||
|
||||
REGISTER_DEFINITION(Register, G2_thread);
|
||||
REGISTER_DEFINITION(Register, G6_heapbase);
|
||||
REGISTER_DEFINITION(Register, G5_method);
|
||||
REGISTER_DEFINITION(Register, G5_megamorphic_method);
|
||||
REGISTER_DEFINITION(Register, G5_inline_cache_reg);
|
||||
|
|
|
@ -160,18 +160,24 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
|
|||
map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
|
||||
#endif /* _LP64 */
|
||||
|
||||
|
||||
#ifdef _LP64
|
||||
int debug_offset = 0;
|
||||
#else
|
||||
int debug_offset = 4;
|
||||
#endif
|
||||
// Save the G's
|
||||
__ stx(G1, SP, g1_offset+STACK_BIAS);
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + 4)>>2), G1->as_VMReg());
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
|
||||
|
||||
__ stx(G3, SP, g3_offset+STACK_BIAS);
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + 4)>>2), G3->as_VMReg());
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
|
||||
|
||||
__ stx(G4, SP, g4_offset+STACK_BIAS);
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + 4)>>2), G4->as_VMReg());
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
|
||||
|
||||
__ stx(G5, SP, g5_offset+STACK_BIAS);
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + 4)>>2), G5->as_VMReg());
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
|
||||
|
||||
// This is really a waste but we'll keep things as they were for now
|
||||
if (true) {
|
||||
|
@ -182,11 +188,11 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
|
|||
map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
|
||||
#endif /* _LP64 */
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
|
||||
#endif /* _LP64 */
|
||||
}
|
||||
|
||||
|
||||
|
@ -1217,7 +1223,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
|
|||
|
||||
__ verify_oop(O0);
|
||||
__ verify_oop(G5_method);
|
||||
__ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch);
|
||||
__ load_klass(O0, G3_scratch);
|
||||
__ verify_oop(G3_scratch);
|
||||
|
||||
#if !defined(_LP64) && defined(COMPILER2)
|
||||
|
@ -1820,7 +1826,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||
const Register temp_reg = G3_scratch;
|
||||
Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub());
|
||||
__ verify_oop(O0);
|
||||
__ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
|
||||
__ load_klass(O0, temp_reg);
|
||||
__ cmp(temp_reg, G5_inline_cache_reg);
|
||||
__ brx(Assembler::equal, true, Assembler::pt, L);
|
||||
__ delayed()->nop();
|
||||
|
|
|
@ -544,11 +544,19 @@ int MachCallDynamicJavaNode::ret_addr_offset() {
|
|||
assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
|
||||
int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
|
||||
int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
|
||||
int klass_load_size;
|
||||
if (UseCompressedOops) {
|
||||
klass_load_size = 3*BytesPerInstWord; // see MacroAssembler::load_klass()
|
||||
} else {
|
||||
klass_load_size = 1*BytesPerInstWord;
|
||||
}
|
||||
if( Assembler::is_simm13(v_off) ) {
|
||||
return (3*BytesPerInstWord + // ld_ptr, ld_ptr, ld_ptr
|
||||
return klass_load_size +
|
||||
(2*BytesPerInstWord + // ld_ptr, ld_ptr
|
||||
NativeCall::instruction_size); // call; delay slot
|
||||
} else {
|
||||
return (5*BytesPerInstWord + // ld_ptr, set_hi, set, ld_ptr, ld_ptr
|
||||
return klass_load_size +
|
||||
(4*BytesPerInstWord + // set_hi, set, ld_ptr, ld_ptr
|
||||
NativeCall::instruction_size); // call; delay slot
|
||||
}
|
||||
}
|
||||
|
@ -1591,7 +1599,13 @@ uint reloc_java_to_interp() {
|
|||
void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
|
||||
st->print_cr("\nUEP:");
|
||||
#ifdef _LP64
|
||||
st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
|
||||
if (UseCompressedOops) {
|
||||
st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
|
||||
st->print_cr("\tSLL R_G5,3,R_G5");
|
||||
st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5");
|
||||
} else {
|
||||
st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
|
||||
}
|
||||
st->print_cr("\tCMP R_G5,R_G3" );
|
||||
st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2");
|
||||
#else // _LP64
|
||||
|
@ -1610,7 +1624,7 @@ void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
|
|||
assert( G5_ic_reg != temp_reg, "conflicting registers" );
|
||||
|
||||
// Load klass from reciever
|
||||
__ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
|
||||
__ load_klass(O0, temp_reg);
|
||||
// Compare against expected klass
|
||||
__ cmp(temp_reg, G5_ic_reg);
|
||||
// Branch to miss code, checks xcc or icc depending
|
||||
|
@ -1811,6 +1825,11 @@ bool Matcher::can_be_java_arg( int reg ) {
|
|||
reg == R_I3H_num ||
|
||||
reg == R_I4H_num ||
|
||||
reg == R_I5H_num ) return true;
|
||||
|
||||
if ((UseCompressedOops) && (reg == R_G6_num || reg == R_G6H_num)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#else
|
||||
// 32-bit builds with longs-in-one-entry pass longs in G1 & G4.
|
||||
// Longs cannot be passed in O regs, because O regs become I regs
|
||||
|
@ -2474,7 +2493,13 @@ encode %{
|
|||
// get receiver klass (receiver already checked for non-null)
|
||||
// If we end up going thru a c2i adapter interpreter expects method in G5
|
||||
int off = __ offset();
|
||||
__ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch);
|
||||
__ load_klass(O0, G3_scratch);
|
||||
int klass_load_size;
|
||||
if (UseCompressedOops) {
|
||||
klass_load_size = 3*BytesPerInstWord;
|
||||
} else {
|
||||
klass_load_size = 1*BytesPerInstWord;
|
||||
}
|
||||
int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
|
||||
int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
|
||||
if( __ is_simm13(v_off) ) {
|
||||
|
@ -2484,7 +2509,8 @@ encode %{
|
|||
__ Assembler::sethi(v_off & ~0x3ff, G5_method);
|
||||
__ or3(G5_method, v_off & 0x3ff, G5_method);
|
||||
// ld_ptr, set_hi, set
|
||||
assert(__ offset() - off == 3*BytesPerInstWord, "Unexpected instruction size(s)");
|
||||
assert(__ offset() - off == klass_load_size + 2*BytesPerInstWord,
|
||||
"Unexpected instruction size(s)");
|
||||
__ ld_ptr(G3, G5_method, G5_method);
|
||||
}
|
||||
// NOTE: for vtable dispatches, the vtable entry will never be null.
|
||||
|
@ -2860,12 +2886,12 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
|
|||
int count_offset = java_lang_String:: count_offset_in_bytes();
|
||||
|
||||
// load str1 (jchar*) base address into tmp1_reg
|
||||
__ ld_ptr(Address(str1_reg, 0, value_offset), tmp1_reg);
|
||||
__ load_heap_oop(Address(str1_reg, 0, value_offset), tmp1_reg);
|
||||
__ ld(Address(str1_reg, 0, offset_offset), result_reg);
|
||||
__ add(tmp1_reg, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1_reg);
|
||||
__ ld(Address(str1_reg, 0, count_offset), str1_reg); // hoisted
|
||||
__ sll(result_reg, exact_log2(sizeof(jchar)), result_reg);
|
||||
__ ld_ptr(Address(str2_reg, 0, value_offset), tmp2_reg); // hoisted
|
||||
__ load_heap_oop(Address(str2_reg, 0, value_offset), tmp2_reg); // hoisted
|
||||
__ add(result_reg, tmp1_reg, tmp1_reg);
|
||||
|
||||
// load str2 (jchar*) base address into tmp2_reg
|
||||
|
@ -3016,6 +3042,7 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
|
|||
MacroAssembler _masm(&cbuf);
|
||||
__ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
|
||||
%}
|
||||
|
||||
enc_class enc_repl8b( iRegI src, iRegL dst ) %{
|
||||
MacroAssembler _masm(&cbuf);
|
||||
Register src_reg = reg_to_register_object($src$$reg);
|
||||
|
@ -3189,15 +3216,15 @@ frame %{
|
|||
c_return_value %{
|
||||
assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
|
||||
#ifdef _LP64
|
||||
static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
|
||||
static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
|
||||
static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
|
||||
static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
|
||||
static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
|
||||
static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
|
||||
static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
|
||||
static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
|
||||
#else // !_LP64
|
||||
static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
|
||||
static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
|
||||
static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
|
||||
static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
|
||||
static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
|
||||
static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
|
||||
static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
|
||||
static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
|
||||
#endif
|
||||
return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
|
||||
(is_outgoing?lo_out:lo_in)[ideal_reg] );
|
||||
|
@ -3207,15 +3234,15 @@ frame %{
|
|||
return_value %{
|
||||
assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
|
||||
#ifdef _LP64
|
||||
static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
|
||||
static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
|
||||
static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
|
||||
static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
|
||||
static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
|
||||
static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
|
||||
static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
|
||||
static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
|
||||
#else // !_LP64
|
||||
static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
|
||||
static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
|
||||
static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
|
||||
static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
|
||||
static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
|
||||
static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
|
||||
static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
|
||||
static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
|
||||
#endif
|
||||
return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
|
||||
(is_outgoing?lo_out:lo_in)[ideal_reg] );
|
||||
|
@ -3408,6 +3435,27 @@ operand immP_poll() %{
|
|||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Pointer Immediate
|
||||
operand immN()
|
||||
%{
|
||||
match(ConN);
|
||||
|
||||
op_cost(10);
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// NULL Pointer Immediate
|
||||
operand immN0()
|
||||
%{
|
||||
predicate(n->get_narrowcon() == 0);
|
||||
match(ConN);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immL() %{
|
||||
match(ConL);
|
||||
op_cost(40);
|
||||
|
@ -3672,6 +3720,14 @@ operand o7RegI() %{
|
|||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand iRegN() %{
|
||||
constraint(ALLOC_IN_RC(int_reg));
|
||||
match(RegN);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
// Long Register
|
||||
operand iRegL() %{
|
||||
constraint(ALLOC_IN_RC(long_reg));
|
||||
|
@ -5392,9 +5448,30 @@ instruct loadP(iRegP dst, memory mem) %{
|
|||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load Compressed Pointer
|
||||
instruct loadN(iRegN dst, memory mem) %{
|
||||
match(Set dst (LoadN mem));
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(4);
|
||||
|
||||
format %{ "LDUW $mem,$dst\t! compressed ptr" %}
|
||||
ins_encode %{
|
||||
Register base = as_Register($mem$$base);
|
||||
Register index = as_Register($mem$$index);
|
||||
Register dst = $dst$$Register;
|
||||
if (index != G0) {
|
||||
__ lduw(base, index, dst);
|
||||
} else {
|
||||
__ lduw(base, $mem$$disp, dst);
|
||||
}
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load Klass Pointer
|
||||
instruct loadKlass(iRegP dst, memory mem) %{
|
||||
match(Set dst (LoadKlass mem));
|
||||
predicate(!n->in(MemNode::Address)->bottom_type()->is_narrow());
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(4);
|
||||
|
||||
|
@ -5409,6 +5486,30 @@ instruct loadKlass(iRegP dst, memory mem) %{
|
|||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load Klass Pointer
|
||||
instruct loadKlassComp(iRegP dst, memory mem) %{
|
||||
match(Set dst (LoadKlass mem));
|
||||
predicate(n->in(MemNode::Address)->bottom_type()->is_narrow());
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "LDUW $mem,$dst\t! compressed klass ptr" %}
|
||||
|
||||
ins_encode %{
|
||||
Register base = as_Register($mem$$base);
|
||||
Register index = as_Register($mem$$index);
|
||||
Register dst = $dst$$Register;
|
||||
if (index != G0) {
|
||||
__ lduw(base, index, dst);
|
||||
} else {
|
||||
__ lduw(base, $mem$$disp, dst);
|
||||
}
|
||||
// klass oop never null but this is generated for nonheader klass loads
|
||||
// too which can be null.
|
||||
__ decode_heap_oop(dst);
|
||||
%}
|
||||
ins_pipe(iload_mem);
|
||||
%}
|
||||
|
||||
// Load Short (16bit signed)
|
||||
instruct loadS(iRegI dst, memory mem) %{
|
||||
match(Set dst (LoadS mem));
|
||||
|
@ -5508,6 +5609,24 @@ instruct loadConP_poll(iRegP dst, immP_poll src) %{
|
|||
ins_pipe(loadConP_poll);
|
||||
%}
|
||||
|
||||
instruct loadConN(iRegN dst, immN src) %{
|
||||
match(Set dst src);
|
||||
ins_cost(DEFAULT_COST * 2);
|
||||
format %{ "SET $src,$dst\t!ptr" %}
|
||||
ins_encode %{
|
||||
address con = (address)$src$$constant;
|
||||
Register dst = $dst$$Register;
|
||||
if (con == NULL) {
|
||||
__ mov(G0, dst);
|
||||
} else {
|
||||
__ set_oop((jobject)$src$$constant, dst);
|
||||
__ encode_heap_oop(dst);
|
||||
}
|
||||
%}
|
||||
ins_pipe(loadConP);
|
||||
|
||||
%}
|
||||
|
||||
instruct loadConL(iRegL dst, immL src, o7RegL tmp) %{
|
||||
// %%% maybe this should work like loadConD
|
||||
match(Set dst src);
|
||||
|
@ -5741,6 +5860,44 @@ instruct storeP0(memory dst, immP0 src) %{
|
|||
ins_pipe(istore_mem_zero);
|
||||
%}
|
||||
|
||||
// Store Compressed Pointer
|
||||
instruct storeN(memory dst, iRegN src) %{
|
||||
match(Set dst (StoreN dst src));
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(4);
|
||||
|
||||
format %{ "STW $src,$dst\t! compressed ptr" %}
|
||||
ins_encode %{
|
||||
Register base = as_Register($dst$$base);
|
||||
Register index = as_Register($dst$$index);
|
||||
Register src = $src$$Register;
|
||||
if (index != G0) {
|
||||
__ stw(src, base, index);
|
||||
} else {
|
||||
__ stw(src, base, $dst$$disp);
|
||||
}
|
||||
%}
|
||||
ins_pipe(istore_mem_spORreg);
|
||||
%}
|
||||
|
||||
instruct storeN0(memory dst, immN0 src) %{
|
||||
match(Set dst (StoreN dst src));
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
size(4);
|
||||
|
||||
format %{ "STW $src,$dst\t! compressed ptr" %}
|
||||
ins_encode %{
|
||||
Register base = as_Register($dst$$base);
|
||||
Register index = as_Register($dst$$index);
|
||||
if (index != G0) {
|
||||
__ stw(0, base, index);
|
||||
} else {
|
||||
__ stw(0, base, $dst$$disp);
|
||||
}
|
||||
%}
|
||||
ins_pipe(istore_mem_zero);
|
||||
%}
|
||||
|
||||
// Store Double
|
||||
instruct storeD( memory mem, regD src) %{
|
||||
match(Set mem (StoreD mem src));
|
||||
|
@ -5798,6 +5955,26 @@ instruct storeA8B(memory mem, regD src) %{
|
|||
ins_pipe(fstoreD_mem_reg);
|
||||
%}
|
||||
|
||||
// Convert oop pointer into compressed form
|
||||
instruct encodeHeapOop(iRegN dst, iRegP src) %{
|
||||
match(Set dst (EncodeP src));
|
||||
format %{ "SRL $src,3,$dst\t encodeHeapOop" %}
|
||||
ins_encode %{
|
||||
__ encode_heap_oop($src$$Register, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct decodeHeapOop(iRegP dst, iRegN src) %{
|
||||
match(Set dst (DecodeN src));
|
||||
format %{ "decode_heap_oop $src, $dst" %}
|
||||
ins_encode %{
|
||||
__ decode_heap_oop($src$$Register, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
|
||||
// Store Zero into Aligned Packed Bytes
|
||||
instruct storeA8B0(memory mem, immI0 zero) %{
|
||||
match(Set mem (Store8B mem zero));
|
||||
|
@ -6434,17 +6611,27 @@ instruct compareAndSwapI_bool(iRegP mem_ptr, iRegI oldval, iRegI newval, iRegI r
|
|||
instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
|
||||
match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
|
||||
effect( USE mem_ptr, KILL ccr, KILL tmp1);
|
||||
#ifdef _LP64
|
||||
format %{
|
||||
"MOV $newval,O7\n\t"
|
||||
"CASXA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
|
||||
"CASA_PTR [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
|
||||
"CMP $oldval,O7\t\t! See if we made progress\n\t"
|
||||
"MOV 1,$res\n\t"
|
||||
"MOVne xcc,R_G0,$res"
|
||||
%}
|
||||
#ifdef _LP64
|
||||
ins_encode( enc_casx(mem_ptr, oldval, newval),
|
||||
enc_lflags_ne_to_boolean(res) );
|
||||
#else
|
||||
ins_encode( enc_casi(mem_ptr, oldval, newval),
|
||||
enc_iflags_ne_to_boolean(res) );
|
||||
#endif
|
||||
ins_pipe( long_memory_op );
|
||||
%}
|
||||
|
||||
instruct compareAndSwapN_bool_comp(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI res, o7RegI tmp, flagsReg ccr ) %{
|
||||
match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
|
||||
effect( USE mem_ptr, KILL ccr, KILL tmp);
|
||||
|
||||
format %{
|
||||
"MOV $newval,O7\n\t"
|
||||
"CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
|
||||
|
@ -6452,9 +6639,18 @@ instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI r
|
|||
"MOV 1,$res\n\t"
|
||||
"MOVne icc,R_G0,$res"
|
||||
%}
|
||||
ins_encode( enc_casi(mem_ptr, oldval, newval),
|
||||
enc_iflags_ne_to_boolean(res) );
|
||||
#endif
|
||||
ins_encode %{
|
||||
Register Rmem = reg_to_register_object($mem_ptr$$reg);
|
||||
Register Rold = reg_to_register_object($oldval$$reg);
|
||||
Register Rnew = reg_to_register_object($newval$$reg);
|
||||
Register Rres = reg_to_register_object($res$$reg);
|
||||
|
||||
__ cas(Rmem, Rold, Rnew);
|
||||
__ cmp( Rold, Rnew );
|
||||
__ mov(1, Rres);
|
||||
__ movcc( Assembler::notEqual, false, Assembler::icc, G0, Rres );
|
||||
%}
|
||||
|
||||
ins_pipe( long_memory_op );
|
||||
%}
|
||||
|
||||
|
@ -8607,6 +8803,17 @@ instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, i
|
|||
ins_pipe(partial_subtype_check_pipe);
|
||||
%}
|
||||
|
||||
|
||||
instruct compP_iRegN_immN0(flagsRegP pcc, iRegN op1, immN0 op2 ) %{
|
||||
match(Set pcc (CmpN op1 op2));
|
||||
|
||||
size(4);
|
||||
format %{ "CMP $op1,$op2\t! ptr" %}
|
||||
opcode(Assembler::subcc_op3, Assembler::arith_op);
|
||||
ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
|
||||
ins_pipe(ialu_cconly_reg_imm);
|
||||
%}
|
||||
|
||||
// ============================================================================
|
||||
// inlined locking and unlocking
|
||||
|
||||
|
@ -8648,9 +8855,10 @@ instruct clear_array(iRegX cnt, iRegP base, iRegX temp, Universe dummy, flagsReg
|
|||
ins_pipe(long_memory_op);
|
||||
%}
|
||||
|
||||
instruct string_compare(o0RegP str1, o1RegP str2, g3RegP tmp1, g4RegP tmp2, notemp_iRegI result, flagsReg ccr) %{
|
||||
instruct string_compare(o0RegP str1, o1RegP str2, g3RegP tmp1, g4RegP tmp2, notemp_iRegI result,
|
||||
o7RegI tmp3, flagsReg ccr) %{
|
||||
match(Set result (StrComp str1 str2));
|
||||
effect(USE_KILL str1, USE_KILL str2, KILL tmp1, KILL tmp2, KILL ccr);
|
||||
effect(USE_KILL str1, USE_KILL str2, KILL tmp1, KILL tmp2, KILL ccr, KILL tmp3);
|
||||
ins_cost(300);
|
||||
format %{ "String Compare $str1,$str2 -> $result" %}
|
||||
ins_encode( enc_String_Compare(str1, str2, tmp1, tmp2, result) );
|
||||
|
|
|
@ -127,6 +127,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
|
||||
// setup thread register
|
||||
__ ld_ptr(thread.as_address(), G2_thread);
|
||||
__ reinit_heapbase();
|
||||
|
||||
#ifdef ASSERT
|
||||
// make sure we have no pending exceptions
|
||||
|
@ -896,6 +897,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
// super: O2, argument, not changed
|
||||
// raddr: O7, blown by call
|
||||
address generate_partial_subtype_check() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
|
||||
address start = __ pc();
|
||||
Label loop, miss;
|
||||
|
@ -914,7 +916,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
|
||||
#if defined(COMPILER2) && !defined(_LP64)
|
||||
// Do not use a 'save' because it blows the 64-bit O registers.
|
||||
__ add(SP,-4*wordSize,SP); // Make space for 4 temps
|
||||
__ add(SP,-4*wordSize,SP); // Make space for 4 temps (stack must be 2 words aligned)
|
||||
__ st_ptr(L0,SP,(frame::register_save_words+0)*wordSize);
|
||||
__ st_ptr(L1,SP,(frame::register_save_words+1)*wordSize);
|
||||
__ st_ptr(L2,SP,(frame::register_save_words+2)*wordSize);
|
||||
|
@ -934,6 +936,17 @@ class StubGenerator: public StubCodeGenerator {
|
|||
Register L2_super = L2;
|
||||
Register L3_index = L3;
|
||||
|
||||
#ifdef _LP64
|
||||
Register L4_ooptmp = L4;
|
||||
|
||||
if (UseCompressedOops) {
|
||||
// this must be under UseCompressedOops check, as we rely upon fact
|
||||
// that L4 not clobbered in C2 on 32-bit platforms, where we do explicit save
|
||||
// on stack, see several lines above
|
||||
__ encode_heap_oop(Rsuper, L4_ooptmp);
|
||||
}
|
||||
#endif
|
||||
|
||||
inc_counter_np(SharedRuntime::_partial_subtype_ctr, L0, L1);
|
||||
|
||||
__ ld_ptr( Rsub, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes(), L3 );
|
||||
|
@ -942,18 +955,33 @@ class StubGenerator: public StubCodeGenerator {
|
|||
__ clr(L3_index); // zero index
|
||||
// Load a little early; will load 1 off the end of the array.
|
||||
// Ok for now; revisit if we have other uses of this routine.
|
||||
__ ld_ptr(L1_ary_ptr,0,L2_super);// Will load a little early
|
||||
__ align(CodeEntryAlignment);
|
||||
if (UseCompressedOops) {
|
||||
__ ld(L1_ary_ptr,0,L2_super);// Will load a little early
|
||||
} else {
|
||||
__ ld_ptr(L1_ary_ptr,0,L2_super);// Will load a little early
|
||||
}
|
||||
|
||||
assert(heapOopSize != 0, "heapOopSize should be initialized");
|
||||
// The scan loop
|
||||
__ BIND(loop);
|
||||
__ add(L1_ary_ptr,wordSize,L1_ary_ptr); // Bump by OOP size
|
||||
__ add(L1_ary_ptr, heapOopSize, L1_ary_ptr); // Bump by OOP size
|
||||
__ cmp(L3_index,L0_ary_len);
|
||||
__ br(Assembler::equal,false,Assembler::pn,miss);
|
||||
__ delayed()->inc(L3_index); // Bump index
|
||||
__ subcc(L2_super,Rsuper,Rret); // Check for match; zero in Rret for a hit
|
||||
__ brx( Assembler::notEqual, false, Assembler::pt, loop );
|
||||
__ delayed()->ld_ptr(L1_ary_ptr,0,L2_super); // Will load a little early
|
||||
|
||||
if (UseCompressedOops) {
|
||||
#ifdef _LP64
|
||||
__ subcc(L2_super,L4_ooptmp,Rret); // Check for match; zero in Rret for a hit
|
||||
__ br( Assembler::notEqual, false, Assembler::pt, loop );
|
||||
__ delayed()->ld(L1_ary_ptr,0,L2_super);// Will load a little early
|
||||
#else
|
||||
ShouldNotReachHere();
|
||||
#endif
|
||||
} else {
|
||||
__ subcc(L2_super,Rsuper,Rret); // Check for match; zero in Rret for a hit
|
||||
__ brx( Assembler::notEqual, false, Assembler::pt, loop );
|
||||
__ delayed()->ld_ptr(L1_ary_ptr,0,L2_super);// Will load a little early
|
||||
}
|
||||
|
||||
// Got a hit; report success; set cache. Cache load doesn't
|
||||
// happen here; for speed it is directly emitted by the compiler.
|
||||
|
@ -1107,7 +1135,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||
}
|
||||
#endif // 0
|
||||
}
|
||||
|
||||
//
|
||||
// Generate post-write barrier for array.
|
||||
//
|
||||
|
@ -1148,8 +1175,8 @@ class StubGenerator: public StubCodeGenerator {
|
|||
|
||||
Label L_loop;
|
||||
|
||||
__ sll_ptr(count, LogBytesPerOop, count);
|
||||
__ sub(count, BytesPerOop, count);
|
||||
__ sll_ptr(count, LogBytesPerHeapOop, count);
|
||||
__ sub(count, BytesPerHeapOop, count);
|
||||
__ add(count, addr, count);
|
||||
// Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
|
||||
__ srl_ptr(addr, CardTableModRefBS::card_shift, addr);
|
||||
|
@ -1171,7 +1198,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||
ShouldNotReachHere();
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
@ -2226,7 +2252,12 @@ class StubGenerator: public StubCodeGenerator {
|
|||
__ mov(count, G5);
|
||||
gen_write_ref_array_pre_barrier(G1, G5);
|
||||
#ifdef _LP64
|
||||
generate_disjoint_long_copy_core(aligned);
|
||||
assert_clean_int(count, O3); // Make sure 'count' is clean int.
|
||||
if (UseCompressedOops) {
|
||||
generate_disjoint_int_copy_core(aligned);
|
||||
} else {
|
||||
generate_disjoint_long_copy_core(aligned);
|
||||
}
|
||||
#else
|
||||
generate_disjoint_int_copy_core(aligned);
|
||||
#endif
|
||||
|
@ -2274,10 +2305,14 @@ class StubGenerator: public StubCodeGenerator {
|
|||
StubRoutines::arrayof_oop_disjoint_arraycopy() :
|
||||
disjoint_oop_copy_entry;
|
||||
|
||||
array_overlap_test(nooverlap_target, LogBytesPerWord);
|
||||
array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
|
||||
|
||||
#ifdef _LP64
|
||||
generate_conjoint_long_copy_core(aligned);
|
||||
if (UseCompressedOops) {
|
||||
generate_conjoint_int_copy_core(aligned);
|
||||
} else {
|
||||
generate_conjoint_long_copy_core(aligned);
|
||||
}
|
||||
#else
|
||||
generate_conjoint_int_copy_core(aligned);
|
||||
#endif
|
||||
|
@ -2377,8 +2412,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
||||
int klass_off = oopDesc::klass_offset_in_bytes();
|
||||
|
||||
gen_write_ref_array_pre_barrier(G1, G5);
|
||||
|
||||
|
||||
|
@ -2395,7 +2428,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
{ Label L;
|
||||
__ mov(O3, G1); // spill: overlap test smashes O3
|
||||
__ mov(O4, G4); // spill: overlap test smashes O4
|
||||
array_overlap_test(L, LogBytesPerWord);
|
||||
array_overlap_test(L, LogBytesPerHeapOop);
|
||||
__ stop("checkcast_copy within a single array");
|
||||
__ bind(L);
|
||||
__ mov(G1, O3);
|
||||
|
@ -2429,18 +2462,18 @@ class StubGenerator: public StubCodeGenerator {
|
|||
|
||||
__ bind(store_element);
|
||||
// deccc(G1_remain); // decrement the count (hoisted)
|
||||
__ st_ptr(G3_oop, O1_to, O5_offset); // store the oop
|
||||
__ inc(O5_offset, wordSize); // step to next offset
|
||||
__ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop
|
||||
__ inc(O5_offset, heapOopSize); // step to next offset
|
||||
__ brx(Assembler::zero, true, Assembler::pt, do_card_marks);
|
||||
__ delayed()->set(0, O0); // return -1 on success
|
||||
|
||||
// ======== loop entry is here ========
|
||||
__ bind(load_element);
|
||||
__ ld_ptr(O0_from, O5_offset, G3_oop); // load the oop
|
||||
__ load_heap_oop(O0_from, O5_offset, G3_oop); // load the oop
|
||||
__ br_null(G3_oop, true, Assembler::pt, store_element);
|
||||
__ delayed()->deccc(G1_remain); // decrement the count
|
||||
|
||||
__ ld_ptr(G3_oop, klass_off, G4_klass); // query the object klass
|
||||
__ load_klass(G3_oop, G4_klass); // query the object klass
|
||||
|
||||
generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super,
|
||||
// branch to this on success:
|
||||
|
@ -2642,17 +2675,23 @@ class StubGenerator: public StubCodeGenerator {
|
|||
|
||||
BLOCK_COMMENT("arraycopy argument klass checks");
|
||||
// get src->klass()
|
||||
__ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass);
|
||||
if (UseCompressedOops) {
|
||||
__ delayed()->nop(); // ??? not good
|
||||
__ load_klass(src, G3_src_klass);
|
||||
} else {
|
||||
__ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// assert(src->klass() != NULL);
|
||||
BLOCK_COMMENT("assert klasses not null");
|
||||
{ Label L_a, L_b;
|
||||
__ br_notnull(G3_src_klass, false, Assembler::pt, L_b); // it is broken if klass is NULL
|
||||
__ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
|
||||
__ delayed()->nop();
|
||||
__ bind(L_a);
|
||||
__ stop("broken null klass");
|
||||
__ bind(L_b);
|
||||
__ load_klass(dst, G4_dst_klass);
|
||||
__ br_null(G4_dst_klass, false, Assembler::pn, L_a); // this would be broken also
|
||||
__ delayed()->mov(G0, G4_dst_klass); // scribble the temp
|
||||
BLOCK_COMMENT("assert done");
|
||||
|
@ -2673,12 +2712,19 @@ class StubGenerator: public StubCodeGenerator {
|
|||
// Load 32-bits signed value. Use br() instruction with it to check icc.
|
||||
__ lduw(G3_src_klass, lh_offset, G5_lh);
|
||||
|
||||
if (UseCompressedOops) {
|
||||
__ load_klass(dst, G4_dst_klass);
|
||||
}
|
||||
// Handle objArrays completely differently...
|
||||
juint objArray_lh = Klass::array_layout_helper(T_OBJECT);
|
||||
__ set(objArray_lh, O5_temp);
|
||||
__ cmp(G5_lh, O5_temp);
|
||||
__ br(Assembler::equal, false, Assembler::pt, L_objArray);
|
||||
__ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
|
||||
if (UseCompressedOops) {
|
||||
__ delayed()->nop();
|
||||
} else {
|
||||
__ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
|
||||
}
|
||||
|
||||
// if (src->klass() != dst->klass()) return -1;
|
||||
__ cmp(G3_src_klass, G4_dst_klass);
|
||||
|
@ -2777,8 +2823,8 @@ class StubGenerator: public StubCodeGenerator {
|
|||
|
||||
__ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
|
||||
__ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
|
||||
__ sll_ptr(src_pos, LogBytesPerOop, src_pos);
|
||||
__ sll_ptr(dst_pos, LogBytesPerOop, dst_pos);
|
||||
__ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
|
||||
__ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
|
||||
__ add(src, src_pos, from); // src_addr
|
||||
__ add(dst, dst_pos, to); // dst_addr
|
||||
__ BIND(L_plain_copy);
|
||||
|
@ -2801,8 +2847,8 @@ class StubGenerator: public StubCodeGenerator {
|
|||
// Marshal the base address arguments now, freeing registers.
|
||||
__ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
|
||||
__ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
|
||||
__ sll_ptr(src_pos, LogBytesPerOop, src_pos);
|
||||
__ sll_ptr(dst_pos, LogBytesPerOop, dst_pos);
|
||||
__ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
|
||||
__ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
|
||||
__ add(src, src_pos, from); // src_addr
|
||||
__ add(dst, dst_pos, to); // dst_addr
|
||||
__ signx(length, count); // length (reloaded)
|
||||
|
|
|
@ -591,7 +591,10 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
|||
address entry = __ pc();
|
||||
Label slow_path;
|
||||
|
||||
if ( UseFastAccessorMethods) {
|
||||
|
||||
// XXX: for compressed oops pointer loading and decoding doesn't fit in
|
||||
// delay slot and damages G1
|
||||
if ( UseFastAccessorMethods && !UseCompressedOops ) {
|
||||
// Check if we need to reach a safepoint and generate full interpreter
|
||||
// frame if so.
|
||||
Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
|
||||
|
@ -953,6 +956,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||
// Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
|
||||
|
||||
__ restore_thread(L7_thread_cache); // restore G2_thread
|
||||
__ reinit_heapbase();
|
||||
|
||||
// must we block?
|
||||
|
||||
|
|
|
@ -462,8 +462,8 @@ void TemplateTable::aaload() {
|
|||
transition(itos, atos);
|
||||
// Otos_i: index
|
||||
// tos: array
|
||||
__ index_check(O2, Otos_i, LogBytesPerWord, G3_scratch, O3);
|
||||
__ ld_ptr(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
|
||||
__ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3);
|
||||
__ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
|
||||
__ verify_oop(Otos_i);
|
||||
}
|
||||
|
||||
|
@ -736,15 +736,16 @@ void TemplateTable::aastore() {
|
|||
// O2: index
|
||||
// O3: array
|
||||
__ verify_oop(Otos_i);
|
||||
__ index_check_without_pop(O3, O2, LogBytesPerWord, G3_scratch, O1);
|
||||
__ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1);
|
||||
|
||||
// do array store check - check for NULL value first
|
||||
__ br_null( Otos_i, false, Assembler::pn, is_null );
|
||||
__ delayed()->
|
||||
ld_ptr(O3, oopDesc::klass_offset_in_bytes(), O4); // get array klass
|
||||
__ delayed()->nop();
|
||||
|
||||
__ load_klass(O3, O4); // get array klass
|
||||
__ load_klass(Otos_i, O5); // get value klass
|
||||
|
||||
// do fast instanceof cache test
|
||||
__ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), O5); // get value klass
|
||||
|
||||
__ ld_ptr(O4, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes(), O4);
|
||||
|
||||
|
@ -766,7 +767,7 @@ void TemplateTable::aastore() {
|
|||
|
||||
// Store is OK.
|
||||
__ bind(store_ok);
|
||||
__ st_ptr(Otos_i, O1, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
|
||||
__ store_heap_oop(Otos_i, O1, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
|
||||
// Quote from rememberedSet.hpp: For objArrays, the precise card
|
||||
// corresponding to the pointer store is dirtied so we don't need to
|
||||
// scavenge the entire array.
|
||||
|
@ -777,7 +778,7 @@ void TemplateTable::aastore() {
|
|||
__ delayed()->inc(Lesp, 3* Interpreter::stackElementSize()); // adj sp (pops array, index and value)
|
||||
|
||||
__ bind(is_null);
|
||||
__ st_ptr(Otos_i, element);
|
||||
__ store_heap_oop(Otos_i, element);
|
||||
__ profile_null_seen(G3_scratch);
|
||||
__ inc(Lesp, 3* Interpreter::stackElementSize()); // adj sp (pops array, index and value)
|
||||
__ bind(done);
|
||||
|
@ -1833,7 +1834,7 @@ void TemplateTable::_return(TosState state) {
|
|||
assert(state == vtos, "only valid state");
|
||||
__ mov(G0, G3_scratch);
|
||||
__ access_local_ptr(G3_scratch, Otos_i);
|
||||
__ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), O2);
|
||||
__ load_klass(Otos_i, O2);
|
||||
__ set(JVM_ACC_HAS_FINALIZER, G3);
|
||||
__ ld(O2, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), O2);
|
||||
__ andcc(G3, O2, G0);
|
||||
|
@ -2078,7 +2079,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
|
|||
__ delayed() ->cmp(Rflags, itos);
|
||||
|
||||
// atos
|
||||
__ ld_ptr(Rclass, Roffset, Otos_i);
|
||||
__ load_heap_oop(Rclass, Roffset, Otos_i);
|
||||
__ verify_oop(Otos_i);
|
||||
__ push(atos);
|
||||
if (!is_static) {
|
||||
|
@ -2259,7 +2260,7 @@ void TemplateTable::fast_accessfield(TosState state) {
|
|||
__ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d);
|
||||
break;
|
||||
case Bytecodes::_fast_agetfield:
|
||||
__ ld_ptr(Otos_i, Roffset, Otos_i);
|
||||
__ load_heap_oop(Otos_i, Roffset, Otos_i);
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
|
@ -2448,7 +2449,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
|
|||
// atos
|
||||
__ pop_ptr();
|
||||
__ verify_oop(Otos_i);
|
||||
__ st_ptr(Otos_i, Rclass, Roffset);
|
||||
__ store_heap_oop(Otos_i, Rclass, Roffset);
|
||||
__ store_check(G1_scratch, Rclass, Roffset);
|
||||
__ ba(false, checkVolatile);
|
||||
__ delayed()->tst(Lscratch);
|
||||
|
@ -2490,7 +2491,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
|
|||
__ pop_ptr();
|
||||
pop_and_check_object(Rclass);
|
||||
__ verify_oop(Otos_i);
|
||||
__ st_ptr(Otos_i, Rclass, Roffset);
|
||||
__ store_heap_oop(Otos_i, Rclass, Roffset);
|
||||
__ store_check(G1_scratch, Rclass, Roffset);
|
||||
patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch);
|
||||
__ ba(false, checkVolatile);
|
||||
|
@ -2645,7 +2646,7 @@ void TemplateTable::fast_storefield(TosState state) {
|
|||
__ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
|
||||
break;
|
||||
case Bytecodes::_fast_aputfield:
|
||||
__ st_ptr(Otos_i, Rclass, Roffset);
|
||||
__ store_heap_oop(Otos_i, Rclass, Roffset);
|
||||
__ store_check(G1_scratch, Rclass, Roffset);
|
||||
break;
|
||||
default:
|
||||
|
@ -2688,7 +2689,7 @@ void TemplateTable::fast_xaccess(TosState state) {
|
|||
__ verify_oop(Rreceiver);
|
||||
__ null_check(Rreceiver);
|
||||
if (state == atos) {
|
||||
__ ld_ptr(Rreceiver, Roffset, Otos_i);
|
||||
__ load_heap_oop(Rreceiver, Roffset, Otos_i);
|
||||
} else if (state == itos) {
|
||||
__ ld (Rreceiver, Roffset, Otos_i) ;
|
||||
} else if (state == ftos) {
|
||||
|
@ -2790,7 +2791,7 @@ void TemplateTable::invokevirtual(int byte_no) {
|
|||
|
||||
// get receiver klass
|
||||
__ null_check(O0, oopDesc::klass_offset_in_bytes());
|
||||
__ ld_ptr(Address(O0, 0, oopDesc::klass_offset_in_bytes()), Rrecv);
|
||||
__ load_klass(O0, Rrecv);
|
||||
__ verify_oop(Rrecv);
|
||||
|
||||
__ profile_virtual_call(Rrecv, O4);
|
||||
|
@ -2958,7 +2959,7 @@ void TemplateTable::invokeinterface(int byte_no) {
|
|||
|
||||
// get receiver klass
|
||||
__ null_check(O0, oopDesc::klass_offset_in_bytes());
|
||||
__ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), RklassOop);
|
||||
__ load_klass(O0, RklassOop);
|
||||
__ verify_oop(RklassOop);
|
||||
|
||||
// Special case of invokeinterface called for virtual method of
|
||||
|
@ -3221,7 +3222,7 @@ void TemplateTable::_new() {
|
|||
__ set((intptr_t)markOopDesc::prototype(), G4_scratch);
|
||||
}
|
||||
__ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark
|
||||
__ st_ptr(RinstanceKlass, RallocatedObject, oopDesc::klass_offset_in_bytes()); // klass
|
||||
__ store_klass(RinstanceKlass, RallocatedObject); // klass
|
||||
|
||||
{
|
||||
SkipIfEqual skip_if(
|
||||
|
@ -3277,7 +3278,7 @@ void TemplateTable::checkcast() {
|
|||
__ delayed()->nop();
|
||||
|
||||
// Get value klass in RobjKlass
|
||||
__ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass
|
||||
__ load_klass(Otos_i, RobjKlass); // get value klass
|
||||
|
||||
// Get constant pool tag
|
||||
__ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
|
||||
|
@ -3295,13 +3296,14 @@ void TemplateTable::checkcast() {
|
|||
__ pop_ptr(Otos_i, G3_scratch); // restore receiver
|
||||
|
||||
__ br(Assembler::always, false, Assembler::pt, resolved);
|
||||
__ delayed()->ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass
|
||||
__ delayed()->nop();
|
||||
|
||||
// Extract target class from constant pool
|
||||
__ bind(quicked);
|
||||
__ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
|
||||
__ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
|
||||
__ bind(resolved);
|
||||
__ load_klass(Otos_i, RobjKlass); // get value klass
|
||||
|
||||
// Generate a fast subtype check. Branch to cast_ok if no
|
||||
// failure. Throw exception if failure.
|
||||
|
@ -3334,7 +3336,7 @@ void TemplateTable::instanceof() {
|
|||
__ delayed()->nop();
|
||||
|
||||
// Get value klass in RobjKlass
|
||||
__ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass
|
||||
__ load_klass(Otos_i, RobjKlass); // get value klass
|
||||
|
||||
// Get constant pool tag
|
||||
__ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
|
||||
|
@ -3352,7 +3354,7 @@ void TemplateTable::instanceof() {
|
|||
__ pop_ptr(Otos_i, G3_scratch); // restore receiver
|
||||
|
||||
__ br(Assembler::always, false, Assembler::pt, resolved);
|
||||
__ delayed()->ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass
|
||||
__ delayed()->nop();
|
||||
|
||||
|
||||
// Extract target class from constant pool
|
||||
|
@ -3361,6 +3363,7 @@ void TemplateTable::instanceof() {
|
|||
__ get_constant_pool(Lscratch);
|
||||
__ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
|
||||
__ bind(resolved);
|
||||
__ load_klass(Otos_i, RobjKlass); // get value klass
|
||||
|
||||
// Generate a fast subtype check. Branch to cast_ok if no
|
||||
// failure. Return 0 if failure.
|
||||
|
|
|
@ -64,6 +64,15 @@ void VM_Version::initialize() {
|
|||
if (FLAG_IS_DEFAULT(UseInlineCaches)) {
|
||||
UseInlineCaches = false;
|
||||
}
|
||||
#ifdef _LP64
|
||||
// Single issue niagara1 is slower for CompressedOops
|
||||
// but niagaras after that it's fine.
|
||||
if (!is_niagara1_plus()) {
|
||||
if (FLAG_IS_DEFAULT(UseCompressedOops)) {
|
||||
FLAG_SET_ERGO(bool, UseCompressedOops, false);
|
||||
}
|
||||
}
|
||||
#endif // _LP64
|
||||
#ifdef COMPILER2
|
||||
// Indirect branch is the same cost as direct
|
||||
if (FLAG_IS_DEFAULT(UseJumpTables)) {
|
||||
|
|
|
@ -60,7 +60,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
|||
|
||||
// get receiver klass
|
||||
address npe_addr = __ pc();
|
||||
__ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch);
|
||||
__ load_klass(O0, G3_scratch);
|
||||
|
||||
// set methodOop (in case of interpreted method), and destination address
|
||||
int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
|
||||
|
@ -131,7 +131,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
|||
|
||||
// get receiver klass (also an implicit null-check)
|
||||
address npe_addr = __ pc();
|
||||
__ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_klassOop);
|
||||
__ load_klass(O0, G3_klassOop);
|
||||
__ verify_oop(G3_klassOop);
|
||||
|
||||
// Push a new window to get some temp registers. This chops the head of all
|
||||
|
@ -237,11 +237,16 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
|
|||
else {
|
||||
const int slop = 2*BytesPerInstWord; // sethi;add (needed for long offsets)
|
||||
if (is_vtable_stub) {
|
||||
const int basic = 5*BytesPerInstWord; // ld;ld;ld,jmp,nop
|
||||
// ld;ld;ld,jmp,nop
|
||||
const int basic = 5*BytesPerInstWord +
|
||||
// shift;add for load_klass
|
||||
(UseCompressedOops ? 2*BytesPerInstWord : 0);
|
||||
return basic + slop;
|
||||
} else {
|
||||
// save, ld, ld, sll, and, add, add, ld, cmp, br, add, ld, add, ld, ld, jmp, restore, sethi, jmpl, restore
|
||||
const int basic = (20 LP64_ONLY(+ 6)) * BytesPerInstWord;
|
||||
const int basic = (20 LP64_ONLY(+ 6)) * BytesPerInstWord +
|
||||
// shift;add for load_klass
|
||||
(UseCompressedOops ? 2*BytesPerInstWord : 0);
|
||||
return (basic + slop);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -127,6 +127,7 @@ int AbstractAssembler::code_fill_byte() {
|
|||
|
||||
bool Assembler::reachable(AddressLiteral adr) {
|
||||
int64_t disp;
|
||||
|
||||
// None will force a 64bit literal to the code stream. Likely a placeholder
|
||||
// for something that will be patched later and we need to certain it will
|
||||
// always be reachable.
|
||||
|
@ -636,7 +637,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
|
|||
case 0x8A: // movb r, a
|
||||
case 0x8B: // movl r, a
|
||||
case 0x8F: // popl a
|
||||
debug_only(has_disp32 = true);
|
||||
debug_only(has_disp32 = true;)
|
||||
break;
|
||||
|
||||
case 0x68: // pushq #32
|
||||
|
@ -2891,7 +2892,7 @@ void Assembler::rep_set() {
|
|||
}
|
||||
|
||||
// scans rcx double words (m64) at [rdi] for occurance of rax
|
||||
void Assembler::repne_scan() {
|
||||
void Assembler::repne_scanq() {
|
||||
// REPNE/REPNZ
|
||||
emit_byte(0xF2);
|
||||
// SCASQ
|
||||
|
@ -2899,6 +2900,14 @@ void Assembler::repne_scan() {
|
|||
emit_byte(0xAF);
|
||||
}
|
||||
|
||||
void Assembler::repne_scanl() {
|
||||
// REPNE/REPNZ
|
||||
emit_byte(0xF2);
|
||||
// SCASL
|
||||
emit_byte(0xAF);
|
||||
}
|
||||
|
||||
|
||||
void Assembler::setb(Condition cc, Register dst) {
|
||||
assert(0 <= cc && cc < 16, "illegal cc");
|
||||
int encode = prefix_and_encode(dst->encoding(), true);
|
||||
|
@ -4597,7 +4606,6 @@ void MacroAssembler::verify_oop(Register reg, const char* s) {
|
|||
|
||||
// pass args on stack, only touch rax
|
||||
pushq(reg);
|
||||
|
||||
// avoid using pushptr, as it modifies scratch registers
|
||||
// and our contract is not to modify anything
|
||||
ExternalAddress buffer((address)b);
|
||||
|
@ -4664,9 +4672,9 @@ void MacroAssembler::debug(char* msg, int64_t pc, int64_t regs[]) {
|
|||
JavaThread* thread = JavaThread::current();
|
||||
JavaThreadState saved_state = thread->thread_state();
|
||||
thread->set_thread_state(_thread_in_vm);
|
||||
ttyLocker ttyl;
|
||||
#ifndef PRODUCT
|
||||
if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
|
||||
ttyLocker ttyl;
|
||||
BytecodeCounter::print();
|
||||
}
|
||||
#endif
|
||||
|
@ -4674,6 +4682,7 @@ void MacroAssembler::debug(char* msg, int64_t pc, int64_t regs[]) {
|
|||
// XXX correct this offset for amd64
|
||||
// This is the value of eip which points to where verify_oop will return.
|
||||
if (os::message_box(msg, "Execution stopped, print registers?")) {
|
||||
ttyLocker ttyl;
|
||||
tty->print_cr("rip = 0x%016lx", pc);
|
||||
tty->print_cr("rax = 0x%016lx", regs[15]);
|
||||
tty->print_cr("rbx = 0x%016lx", regs[12]);
|
||||
|
@ -4695,6 +4704,7 @@ void MacroAssembler::debug(char* msg, int64_t pc, int64_t regs[]) {
|
|||
}
|
||||
ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
|
||||
} else {
|
||||
ttyLocker ttyl;
|
||||
::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
|
||||
msg);
|
||||
}
|
||||
|
@ -4891,7 +4901,7 @@ void MacroAssembler::tlab_refill(Label& retry,
|
|||
movq(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
|
||||
// set klass to intArrayKlass
|
||||
movptr(t1, ExternalAddress((address) Universe::intArrayKlassObj_addr()));
|
||||
movq(Address(top, oopDesc::klass_offset_in_bytes()), t1);
|
||||
store_klass(top, t1);
|
||||
|
||||
// refill the tlab with an eden allocation
|
||||
bind(do_refill);
|
||||
|
@ -4938,7 +4948,6 @@ int MacroAssembler::biased_locking_enter(Register lock_reg, Register obj_reg, Re
|
|||
assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
|
||||
assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
|
||||
Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
|
||||
Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes());
|
||||
Address saved_mark_addr(lock_reg, 0);
|
||||
|
||||
if (PrintBiasedLockingStatistics && counters == NULL)
|
||||
|
@ -4962,7 +4971,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg, Register obj_reg, Re
|
|||
jcc(Assembler::notEqual, cas_label);
|
||||
// The bias pattern is present in the object's header. Need to check
|
||||
// whether the bias owner and the epoch are both still current.
|
||||
movq(tmp_reg, klass_addr);
|
||||
load_klass(tmp_reg, obj_reg);
|
||||
movq(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
|
||||
orq(tmp_reg, r15_thread);
|
||||
xorq(tmp_reg, swap_reg);
|
||||
|
@ -5037,7 +5046,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg, Register obj_reg, Re
|
|||
//
|
||||
// FIXME: due to a lack of registers we currently blow away the age
|
||||
// bits in this situation. Should attempt to preserve them.
|
||||
movq(tmp_reg, klass_addr);
|
||||
load_klass(tmp_reg, obj_reg);
|
||||
movq(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
|
||||
orq(tmp_reg, r15_thread);
|
||||
if (os::is_MP()) {
|
||||
|
@ -5068,7 +5077,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg, Register obj_reg, Re
|
|||
//
|
||||
// FIXME: due to a lack of registers we currently blow away the age
|
||||
// bits in this situation. Should attempt to preserve them.
|
||||
movq(tmp_reg, klass_addr);
|
||||
load_klass(tmp_reg, obj_reg);
|
||||
movq(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
|
||||
if (os::is_MP()) {
|
||||
lock();
|
||||
|
@ -5104,6 +5113,113 @@ void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, La
|
|||
}
|
||||
|
||||
|
||||
void MacroAssembler::load_klass(Register dst, Register src) {
|
||||
if (UseCompressedOops) {
|
||||
movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
|
||||
decode_heap_oop_not_null(dst);
|
||||
} else {
|
||||
movq(dst, Address(src, oopDesc::klass_offset_in_bytes()));
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::store_klass(Register dst, Register src) {
|
||||
if (UseCompressedOops) {
|
||||
encode_heap_oop_not_null(src);
|
||||
// zero the entire klass field first as the gap needs to be zeroed too.
|
||||
movptr(Address(dst, oopDesc::klass_offset_in_bytes()), NULL_WORD);
|
||||
movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
|
||||
} else {
|
||||
movq(Address(dst, oopDesc::klass_offset_in_bytes()), src);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::load_heap_oop(Register dst, Address src) {
|
||||
if (UseCompressedOops) {
|
||||
movl(dst, src);
|
||||
decode_heap_oop(dst);
|
||||
} else {
|
||||
movq(dst, src);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::store_heap_oop(Address dst, Register src) {
|
||||
if (UseCompressedOops) {
|
||||
assert(!dst.uses(src), "not enough registers");
|
||||
encode_heap_oop(src);
|
||||
movl(dst, src);
|
||||
} else {
|
||||
movq(dst, src);
|
||||
}
|
||||
}
|
||||
|
||||
// Algorithm must match oop.inline.hpp encode_heap_oop.
|
||||
void MacroAssembler::encode_heap_oop(Register r) {
|
||||
assert (UseCompressedOops, "should be compressed");
|
||||
#ifdef ASSERT
|
||||
Label ok;
|
||||
pushq(rscratch1); // cmpptr trashes rscratch1
|
||||
cmpptr(r12_heapbase, ExternalAddress((address)Universe::heap_base_addr()));
|
||||
jcc(Assembler::equal, ok);
|
||||
stop("MacroAssembler::encode_heap_oop: heap base corrupted?");
|
||||
bind(ok);
|
||||
popq(rscratch1);
|
||||
#endif
|
||||
verify_oop(r);
|
||||
testq(r, r);
|
||||
cmovq(Assembler::equal, r, r12_heapbase);
|
||||
subq(r, r12_heapbase);
|
||||
shrq(r, LogMinObjAlignmentInBytes);
|
||||
}
|
||||
|
||||
void MacroAssembler::encode_heap_oop_not_null(Register r) {
|
||||
assert (UseCompressedOops, "should be compressed");
|
||||
#ifdef ASSERT
|
||||
Label ok;
|
||||
testq(r, r);
|
||||
jcc(Assembler::notEqual, ok);
|
||||
stop("null oop passed to encode_heap_oop_not_null");
|
||||
bind(ok);
|
||||
#endif
|
||||
verify_oop(r);
|
||||
subq(r, r12_heapbase);
|
||||
shrq(r, LogMinObjAlignmentInBytes);
|
||||
}
|
||||
|
||||
void MacroAssembler::decode_heap_oop(Register r) {
|
||||
assert (UseCompressedOops, "should be compressed");
|
||||
#ifdef ASSERT
|
||||
Label ok;
|
||||
pushq(rscratch1);
|
||||
cmpptr(r12_heapbase,
|
||||
ExternalAddress((address)Universe::heap_base_addr()));
|
||||
jcc(Assembler::equal, ok);
|
||||
stop("MacroAssembler::decode_heap_oop: heap base corrupted?");
|
||||
bind(ok);
|
||||
popq(rscratch1);
|
||||
#endif
|
||||
|
||||
Label done;
|
||||
shlq(r, LogMinObjAlignmentInBytes);
|
||||
jccb(Assembler::equal, done);
|
||||
addq(r, r12_heapbase);
|
||||
#if 0
|
||||
// alternate decoding probably a wash.
|
||||
testq(r, r);
|
||||
jccb(Assembler::equal, done);
|
||||
leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
|
||||
#endif
|
||||
bind(done);
|
||||
verify_oop(r);
|
||||
}
|
||||
|
||||
void MacroAssembler::decode_heap_oop_not_null(Register r) {
|
||||
assert (UseCompressedOops, "should only be used for compressed headers");
|
||||
// Cannot assert, unverified entry point counts instructions (see .ad file)
|
||||
// vtableStubs also counts instructions in pd_code_size_limit.
|
||||
assert(Address::times_8 == LogMinObjAlignmentInBytes, "decode alg wrong");
|
||||
leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
|
||||
}
|
||||
|
||||
Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
|
||||
switch (cond) {
|
||||
// Note some conditions are synonyms for others
|
||||
|
@ -5173,3 +5289,9 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
|
|||
movq(Address(tmp, (-i*os::vm_page_size())), size );
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::reinit_heapbase() {
|
||||
if (UseCompressedOops) {
|
||||
movptr(r12_heapbase, ExternalAddress((address)Universe::heap_base_addr()));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ class Argument VALUE_OBJ_CLASS_SPEC {
|
|||
#else
|
||||
n_int_register_parameters_c = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
|
||||
n_float_register_parameters_c = 8, // xmm0 - xmm7 (c_farg0, c_farg1, ... )
|
||||
#endif
|
||||
#endif // _WIN64
|
||||
n_int_register_parameters_j = 6, // j_rarg0, j_rarg1, ...
|
||||
n_float_register_parameters_j = 8 // j_farg0, j_farg1, ...
|
||||
};
|
||||
|
@ -77,7 +77,7 @@ REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5);
|
|||
REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6);
|
||||
REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7);
|
||||
|
||||
#endif
|
||||
#endif // _WIN64
|
||||
|
||||
// Symbolically name the register arguments used by the Java calling convention.
|
||||
// We have control over the convention for java so we can do what we please.
|
||||
|
@ -105,7 +105,7 @@ REGISTER_DECLARATION(Register, j_rarg4, rsi);
|
|||
#else
|
||||
REGISTER_DECLARATION(Register, j_rarg3, c_rarg4);
|
||||
REGISTER_DECLARATION(Register, j_rarg4, c_rarg5);
|
||||
#endif /* _WIN64 */
|
||||
#endif // _WIN64
|
||||
REGISTER_DECLARATION(Register, j_rarg5, c_rarg0);
|
||||
|
||||
REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0);
|
||||
|
@ -120,7 +120,8 @@ REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7);
|
|||
REGISTER_DECLARATION(Register, rscratch1, r10); // volatile
|
||||
REGISTER_DECLARATION(Register, rscratch2, r11); // volatile
|
||||
|
||||
REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
|
||||
REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved
|
||||
REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
|
||||
|
||||
#endif // _LP64
|
||||
|
||||
|
@ -785,7 +786,8 @@ class Assembler : public AbstractAssembler {
|
|||
void rep_movl();
|
||||
void rep_movq();
|
||||
void rep_set();
|
||||
void repne_scan();
|
||||
void repne_scanl();
|
||||
void repne_scanq();
|
||||
void setb(Condition cc, Register dst);
|
||||
|
||||
void clflush(Address adr);
|
||||
|
@ -1099,6 +1101,17 @@ class MacroAssembler : public Assembler {
|
|||
void movbool(Address dst, Register src);
|
||||
void testbool(Register dst);
|
||||
|
||||
// oop manipulations
|
||||
void load_klass(Register dst, Register src);
|
||||
void store_klass(Register dst, Register src);
|
||||
|
||||
void load_heap_oop(Register dst, Address src);
|
||||
void store_heap_oop(Address dst, Register src);
|
||||
void encode_heap_oop(Register r);
|
||||
void decode_heap_oop(Register r);
|
||||
void encode_heap_oop_not_null(Register r);
|
||||
void decode_heap_oop_not_null(Register r);
|
||||
|
||||
// Stack frame creation/removal
|
||||
void enter();
|
||||
void leave();
|
||||
|
@ -1250,6 +1263,9 @@ class MacroAssembler : public Assembler {
|
|||
void verify_oop(Register reg, const char* s = "broken oop");
|
||||
void verify_oop_addr(Address addr, const char * s = "broken oop addr");
|
||||
|
||||
// if heap base register is used - reinit it with the correct value
|
||||
void reinit_heapbase();
|
||||
|
||||
// only if +VerifyFPU
|
||||
void verify_FPU(int stack_depth, const char* s = "illegal FPU state") {}
|
||||
|
||||
|
|
|
@ -218,7 +218,7 @@ void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2,
|
|||
void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2) {
|
||||
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
|
||||
"con_size_in_bytes is not multiple of alignment");
|
||||
const int hdr_size_in_bytes = oopDesc::header_size_in_bytes();
|
||||
const int hdr_size_in_bytes = instanceOopDesc::base_offset_in_bytes();
|
||||
|
||||
initialize_header(obj, klass, noreg, t1, t2);
|
||||
|
||||
|
|
|
@ -267,15 +267,29 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
|
|||
addq(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
|
||||
// Scan rcx words at [rdi] for occurance of rax
|
||||
// Set NZ/Z based on last compare
|
||||
repne_scan();
|
||||
// Not equal?
|
||||
jcc(Assembler::notEqual, not_subtype);
|
||||
|
||||
// this part is kind tricky, as values in supers array could be 32 or 64 bit wide
|
||||
// and we store values in objArrays always encoded, thus we need to encode value
|
||||
// before repne
|
||||
if (UseCompressedOops) {
|
||||
encode_heap_oop(rax);
|
||||
repne_scanl();
|
||||
// Not equal?
|
||||
jcc(Assembler::notEqual, not_subtype);
|
||||
// decode heap oop here for movq
|
||||
decode_heap_oop(rax);
|
||||
} else {
|
||||
repne_scanq();
|
||||
jcc(Assembler::notEqual, not_subtype);
|
||||
}
|
||||
// Must be equal but missed in cache. Update cache.
|
||||
movq(Address(Rsub_klass, sizeof(oopDesc) +
|
||||
Klass::secondary_super_cache_offset_in_bytes()), rax);
|
||||
jmp(ok_is_subtype);
|
||||
|
||||
bind(not_subtype);
|
||||
// decode heap oop here for miss
|
||||
if (UseCompressedOops) decode_heap_oop(rax);
|
||||
profile_typecheck_failed(rcx); // blows rcx
|
||||
}
|
||||
|
||||
|
|
|
@ -375,7 +375,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
|
|||
__ cmpl(rdx, atos);
|
||||
__ jcc(Assembler::notEqual, notObj);
|
||||
// atos
|
||||
__ movq(rax, field_address);
|
||||
__ load_heap_oop(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notObj);
|
||||
|
|
|
@ -106,6 +106,7 @@ REGISTER_DEFINITION(XMMRegister, j_farg7);
|
|||
REGISTER_DEFINITION(Register, rscratch1);
|
||||
REGISTER_DEFINITION(Register, rscratch2);
|
||||
|
||||
REGISTER_DEFINITION(Register, r12_heapbase);
|
||||
REGISTER_DEFINITION(Register, r15_thread);
|
||||
#endif // AMD64
|
||||
|
||||
|
|
|
@ -789,7 +789,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
|
|||
|
||||
{
|
||||
__ verify_oop(holder);
|
||||
__ movq(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(temp, receiver);
|
||||
__ verify_oop(temp);
|
||||
|
||||
__ cmpq(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset()));
|
||||
|
@ -1297,21 +1297,26 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
|||
|
||||
const Register ic_reg = rax;
|
||||
const Register receiver = j_rarg0;
|
||||
const Register tmp = rdx;
|
||||
|
||||
Label ok;
|
||||
Label exception_pending;
|
||||
|
||||
__ verify_oop(receiver);
|
||||
__ cmpq(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
|
||||
__ pushq(tmp); // spill (any other registers free here???)
|
||||
__ load_klass(tmp, receiver);
|
||||
__ cmpq(ic_reg, tmp);
|
||||
__ jcc(Assembler::equal, ok);
|
||||
|
||||
__ popq(tmp);
|
||||
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
|
||||
|
||||
__ bind(ok);
|
||||
__ popq(tmp);
|
||||
|
||||
// Verified entry point must be aligned
|
||||
__ align(8);
|
||||
|
||||
__ bind(ok);
|
||||
|
||||
int vep_offset = ((intptr_t)__ pc()) - start;
|
||||
|
||||
// The instruction at the verified entry point must be 5 bytes or longer
|
||||
|
@ -1663,6 +1668,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
|||
__ andq(rsp, -16); // align stack as required by ABI
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
|
||||
__ movq(rsp, r12); // restore sp
|
||||
__ reinit_heapbase();
|
||||
// Restore any method result value
|
||||
restore_native_result(masm, ret_type, stack_slots);
|
||||
__ bind(Continue);
|
||||
|
@ -1725,7 +1731,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
|||
__ bind(done);
|
||||
|
||||
}
|
||||
|
||||
{
|
||||
SkipIfEqual skip(masm, &DTraceMethodProbes, false);
|
||||
save_native_result(masm, ret_type, stack_slots);
|
||||
|
@ -1829,6 +1834,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
|||
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
|
||||
__ movq(rsp, r12); // restore sp
|
||||
__ reinit_heapbase();
|
||||
#ifdef ASSERT
|
||||
{
|
||||
Label L;
|
||||
|
@ -1859,6 +1865,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
|||
__ andq(rsp, -16); // align stack as required by ABI
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
|
||||
__ movq(rsp, r12); // restore sp
|
||||
__ reinit_heapbase();
|
||||
restore_native_result(masm, ret_type, stack_slots);
|
||||
// and continue
|
||||
__ jmp(reguard_done);
|
||||
|
@ -1941,9 +1948,8 @@ void SharedRuntime::generate_deopt_blob() {
|
|||
map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
|
||||
|
||||
// Normal deoptimization. Save exec mode for unpack_frames.
|
||||
__ movl(r12, Deoptimization::Unpack_deopt); // callee-saved
|
||||
__ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
|
||||
__ jmp(cont);
|
||||
|
||||
int exception_offset = __ pc() - start;
|
||||
|
||||
// Prolog for exception case
|
||||
|
@ -1955,7 +1961,7 @@ void SharedRuntime::generate_deopt_blob() {
|
|||
map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
|
||||
|
||||
// Deopt during an exception. Save exec mode for unpack_frames.
|
||||
__ movl(r12, Deoptimization::Unpack_exception); // callee-saved
|
||||
__ movl(r14, Deoptimization::Unpack_exception); // callee-saved
|
||||
|
||||
__ bind(cont);
|
||||
|
||||
|
@ -2088,7 +2094,7 @@ void SharedRuntime::generate_deopt_blob() {
|
|||
__ set_last_Java_frame(noreg, rbp, NULL);
|
||||
|
||||
__ movq(c_rarg0, r15_thread);
|
||||
__ movl(c_rarg1, r12); // second arg: exec_mode
|
||||
__ movl(c_rarg1, r14); // second arg: exec_mode
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
|
||||
|
||||
// Set an oopmap for the call site
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
// see the comment in stubRoutines.hpp
|
||||
|
||||
#define __ _masm->
|
||||
#define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) /* nothing */
|
||||
|
@ -252,6 +253,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
|
||||
// Load up thread register
|
||||
__ movq(r15_thread, thread);
|
||||
__ reinit_heapbase();
|
||||
|
||||
#ifdef ASSERT
|
||||
// make sure we have no pending exceptions
|
||||
|
@ -945,7 +947,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
__ jcc(Assembler::notZero, error);
|
||||
|
||||
// make sure klass is 'reasonable'
|
||||
__ movq(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass
|
||||
__ load_klass(rax, rax); // get klass
|
||||
__ testq(rax, rax);
|
||||
__ jcc(Assembler::zero, error); // if klass is NULL it is broken
|
||||
// Check if the klass is in the right area of memory
|
||||
|
@ -957,7 +959,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
__ jcc(Assembler::notZero, error);
|
||||
|
||||
// make sure klass' klass is 'reasonable'
|
||||
__ movq(rax, Address(rax, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(rax, rax);
|
||||
__ testq(rax, rax);
|
||||
__ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
|
||||
// Check if the klass' klass is in the right area of memory
|
||||
|
@ -1001,6 +1003,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
BLOCK_COMMENT("call MacroAssembler::debug");
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug)));
|
||||
__ movq(rsp, r12); // restore rsp
|
||||
__ reinit_heapbase(); // r12 is heapbase
|
||||
__ popaq(); // pop registers
|
||||
__ ret(3 * wordSize); // pop caller saved stuff
|
||||
|
||||
|
@ -1652,6 +1655,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
// Arguments:
|
||||
// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
|
||||
// ignored
|
||||
// is_oop - true => oop array, so generate store check code
|
||||
// name - stub name string
|
||||
//
|
||||
// Inputs:
|
||||
|
@ -1665,9 +1669,9 @@ class StubGenerator: public StubCodeGenerator {
|
|||
//
|
||||
// Side Effects:
|
||||
// disjoint_int_copy_entry is set to the no-overlap entry point
|
||||
// used by generate_conjoint_int_copy().
|
||||
// used by generate_conjoint_int_oop_copy().
|
||||
//
|
||||
address generate_disjoint_int_copy(bool aligned, const char *name) {
|
||||
address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
@ -1680,19 +1684,30 @@ class StubGenerator: public StubCodeGenerator {
|
|||
const Register qword_count = count;
|
||||
const Register end_from = from; // source array end address
|
||||
const Register end_to = to; // destination array end address
|
||||
const Register saved_to = r11; // saved destination array address
|
||||
// End pointers are inclusive, and if count is not zero they point
|
||||
// to the last unit copied: end_to[0] := end_from[0]
|
||||
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
|
||||
|
||||
disjoint_int_copy_entry = __ pc();
|
||||
(is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry) = __ pc();
|
||||
|
||||
if (is_oop) {
|
||||
// no registers are destroyed by this call
|
||||
gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2);
|
||||
}
|
||||
|
||||
BLOCK_COMMENT("Entry:");
|
||||
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
|
||||
|
||||
setup_arg_regs(); // from => rdi, to => rsi, count => rdx
|
||||
// r9 and r10 may be used to save non-volatile registers
|
||||
|
||||
if (is_oop) {
|
||||
__ movq(saved_to, to);
|
||||
}
|
||||
|
||||
// 'from', 'to' and 'count' are now valid
|
||||
__ movq(dword_count, count);
|
||||
__ shrq(count, 1); // count => qword_count
|
||||
|
@ -1718,6 +1733,10 @@ class StubGenerator: public StubCodeGenerator {
|
|||
__ movl(Address(end_to, 8), rax);
|
||||
|
||||
__ BIND(L_exit);
|
||||
if (is_oop) {
|
||||
__ leaq(end_to, Address(saved_to, dword_count, Address::times_4, -4));
|
||||
gen_write_ref_array_post_barrier(saved_to, end_to, rax);
|
||||
}
|
||||
inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
|
||||
restore_arg_regs();
|
||||
__ xorq(rax, rax); // return 0
|
||||
|
@ -1734,6 +1753,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
// Arguments:
|
||||
// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
|
||||
// ignored
|
||||
// is_oop - true => oop array, so generate store check code
|
||||
// name - stub name string
|
||||
//
|
||||
// Inputs:
|
||||
|
@ -1745,12 +1765,12 @@ class StubGenerator: public StubCodeGenerator {
|
|||
// the hardware handle it. The two dwords within qwords that span
|
||||
// cache line boundaries will still be loaded and stored atomicly.
|
||||
//
|
||||
address generate_conjoint_int_copy(bool aligned, const char *name) {
|
||||
address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
||||
Label L_copy_32_bytes, L_copy_8_bytes, L_copy_2_bytes;
|
||||
Label L_copy_32_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit;
|
||||
const Register from = rdi; // source array address
|
||||
const Register to = rsi; // destination array address
|
||||
const Register count = rdx; // elements count
|
||||
|
@ -1760,14 +1780,21 @@ class StubGenerator: public StubCodeGenerator {
|
|||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
|
||||
|
||||
int_copy_entry = __ pc();
|
||||
if (is_oop) {
|
||||
// no registers are destroyed by this call
|
||||
gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2);
|
||||
}
|
||||
|
||||
(is_oop ? oop_copy_entry : int_copy_entry) = __ pc();
|
||||
BLOCK_COMMENT("Entry:");
|
||||
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
|
||||
|
||||
array_overlap_test(disjoint_int_copy_entry, Address::times_4);
|
||||
array_overlap_test(is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry,
|
||||
Address::times_4);
|
||||
setup_arg_regs(); // from => rdi, to => rsi, count => rdx
|
||||
// r9 and r10 may be used to save non-volatile registers
|
||||
|
||||
assert_clean_int(count, rax); // Make sure 'count' is clean int.
|
||||
// 'from', 'to' and 'count' are now valid
|
||||
__ movq(dword_count, count);
|
||||
__ shrq(count, 1); // count => qword_count
|
||||
|
@ -1789,6 +1816,9 @@ class StubGenerator: public StubCodeGenerator {
|
|||
__ jcc(Assembler::notZero, L_copy_8_bytes);
|
||||
|
||||
inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
|
||||
if (is_oop) {
|
||||
__ jmp(L_exit);
|
||||
}
|
||||
restore_arg_regs();
|
||||
__ xorq(rax, rax); // return 0
|
||||
__ leave(); // required for proper stackwalking of RuntimeStub frame
|
||||
|
@ -1797,7 +1827,13 @@ class StubGenerator: public StubCodeGenerator {
|
|||
// Copy in 32-bytes chunks
|
||||
copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
|
||||
|
||||
inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
|
||||
inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
|
||||
__ bind(L_exit);
|
||||
if (is_oop) {
|
||||
Register end_to = rdx;
|
||||
__ leaq(end_to, Address(to, dword_count, Address::times_4, -4));
|
||||
gen_write_ref_array_post_barrier(to, end_to, rax);
|
||||
}
|
||||
restore_arg_regs();
|
||||
__ xorq(rax, rax); // return 0
|
||||
__ leave(); // required for proper stackwalking of RuntimeStub frame
|
||||
|
@ -1817,7 +1853,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
// c_rarg1 - destination array address
|
||||
// c_rarg2 - element count, treated as ssize_t, can be zero
|
||||
//
|
||||
// Side Effects:
|
||||
// Side Effects:
|
||||
// disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the
|
||||
// no-overlap entry point used by generate_conjoint_long_oop_copy().
|
||||
//
|
||||
|
@ -1857,7 +1893,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
|
||||
// Copy from low to high addresses. Use 'to' as scratch.
|
||||
__ leaq(end_from, Address(from, qword_count, Address::times_8, -8));
|
||||
__ leaq(end_to, Address(to, qword_count, Address::times_8, -8));
|
||||
__ leaq(end_to, Address(to, qword_count, Address::times_8, -8));
|
||||
__ negq(qword_count);
|
||||
__ jmp(L_copy_32_bytes);
|
||||
|
||||
|
@ -1923,11 +1959,14 @@ class StubGenerator: public StubCodeGenerator {
|
|||
|
||||
address disjoint_copy_entry = NULL;
|
||||
if (is_oop) {
|
||||
assert(!UseCompressedOops, "shouldn't be called for compressed oops");
|
||||
disjoint_copy_entry = disjoint_oop_copy_entry;
|
||||
oop_copy_entry = __ pc();
|
||||
array_overlap_test(disjoint_oop_copy_entry, Address::times_8);
|
||||
} else {
|
||||
disjoint_copy_entry = disjoint_long_copy_entry;
|
||||
long_copy_entry = __ pc();
|
||||
array_overlap_test(disjoint_long_copy_entry, Address::times_8);
|
||||
}
|
||||
BLOCK_COMMENT("Entry:");
|
||||
// caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
|
||||
|
@ -1945,8 +1984,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||
gen_write_ref_array_pre_barrier(to, saved_count);
|
||||
}
|
||||
|
||||
// Copy from high to low addresses. Use rcx as scratch.
|
||||
|
||||
__ jmp(L_copy_32_bytes);
|
||||
|
||||
// Copy trailing qwords
|
||||
|
@ -2038,7 +2075,14 @@ class StubGenerator: public StubCodeGenerator {
|
|||
// Scan rcx words at [rdi] for occurance of rax
|
||||
// Set NZ/Z based on last compare
|
||||
__ movq(rax, super_klass);
|
||||
__ repne_scan();
|
||||
if (UseCompressedOops) {
|
||||
// Compare against compressed form. Don't need to uncompress because
|
||||
// looks like orig rax is restored in popq below.
|
||||
__ encode_heap_oop(rax);
|
||||
__ repne_scanl();
|
||||
} else {
|
||||
__ repne_scanq();
|
||||
}
|
||||
|
||||
// Unspill the temp. registers:
|
||||
__ popq(rdi);
|
||||
|
@ -2115,7 +2159,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
// caller guarantees that the arrays really are different
|
||||
// otherwise, we would have to make conjoint checks
|
||||
{ Label L;
|
||||
array_overlap_test(L, Address::times_8);
|
||||
array_overlap_test(L, TIMES_OOP);
|
||||
__ stop("checkcast_copy within a single array");
|
||||
__ bind(L);
|
||||
}
|
||||
|
@ -2160,12 +2204,11 @@ class StubGenerator: public StubCodeGenerator {
|
|||
#endif //ASSERT
|
||||
|
||||
// Loop-invariant addresses. They are exclusive end pointers.
|
||||
Address end_from_addr(from, length, Address::times_8, 0);
|
||||
Address end_to_addr(to, length, Address::times_8, 0);
|
||||
Address end_from_addr(from, length, TIMES_OOP, 0);
|
||||
Address end_to_addr(to, length, TIMES_OOP, 0);
|
||||
// Loop-variant addresses. They assume post-incremented count < 0.
|
||||
Address from_element_addr(end_from, count, Address::times_8, 0);
|
||||
Address to_element_addr(end_to, count, Address::times_8, 0);
|
||||
Address oop_klass_addr(rax_oop, oopDesc::klass_offset_in_bytes());
|
||||
Address from_element_addr(end_from, count, TIMES_OOP, 0);
|
||||
Address to_element_addr(end_to, count, TIMES_OOP, 0);
|
||||
|
||||
gen_write_ref_array_pre_barrier(to, count);
|
||||
|
||||
|
@ -2189,17 +2232,17 @@ class StubGenerator: public StubCodeGenerator {
|
|||
__ align(16);
|
||||
|
||||
__ BIND(L_store_element);
|
||||
__ movq(to_element_addr, rax_oop); // store the oop
|
||||
__ store_heap_oop(to_element_addr, rax_oop); // store the oop
|
||||
__ incrementq(count); // increment the count toward zero
|
||||
__ jcc(Assembler::zero, L_do_card_marks);
|
||||
|
||||
// ======== loop entry is here ========
|
||||
__ BIND(L_load_element);
|
||||
__ movq(rax_oop, from_element_addr); // load the oop
|
||||
__ load_heap_oop(rax_oop, from_element_addr); // load the oop
|
||||
__ testq(rax_oop, rax_oop);
|
||||
__ jcc(Assembler::zero, L_store_element);
|
||||
|
||||
__ movq(r11_klass, oop_klass_addr); // query the object klass
|
||||
__ load_klass(r11_klass, rax_oop);// query the object klass
|
||||
generate_type_check(r11_klass, ckoff, ckval, L_store_element);
|
||||
// ======== end loop ========
|
||||
|
||||
|
@ -2425,15 +2468,14 @@ class StubGenerator: public StubCodeGenerator {
|
|||
// registers used as temp
|
||||
const Register r11_length = r11; // elements count to copy
|
||||
const Register r10_src_klass = r10; // array klass
|
||||
const Register r9_dst_klass = r9; // dest array klass
|
||||
|
||||
// if (length < 0) return -1;
|
||||
__ movl(r11_length, C_RARG4); // length (elements count, 32-bits value)
|
||||
__ testl(r11_length, r11_length);
|
||||
__ jccb(Assembler::negative, L_failed_0);
|
||||
|
||||
Address src_klass_addr(src, oopDesc::klass_offset_in_bytes());
|
||||
Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes());
|
||||
__ movq(r10_src_klass, src_klass_addr);
|
||||
__ load_klass(r10_src_klass, src);
|
||||
#ifdef ASSERT
|
||||
// assert(src->klass() != NULL);
|
||||
BLOCK_COMMENT("assert klasses not null");
|
||||
|
@ -2443,7 +2485,8 @@ class StubGenerator: public StubCodeGenerator {
|
|||
__ bind(L1);
|
||||
__ stop("broken null klass");
|
||||
__ bind(L2);
|
||||
__ cmpq(dst_klass_addr, 0);
|
||||
__ load_klass(r9_dst_klass, dst);
|
||||
__ cmpq(r9_dst_klass, 0);
|
||||
__ jcc(Assembler::equal, L1); // this would be broken also
|
||||
BLOCK_COMMENT("assert done");
|
||||
}
|
||||
|
@ -2470,7 +2513,8 @@ class StubGenerator: public StubCodeGenerator {
|
|||
__ jcc(Assembler::equal, L_objArray);
|
||||
|
||||
// if (src->klass() != dst->klass()) return -1;
|
||||
__ cmpq(r10_src_klass, dst_klass_addr);
|
||||
__ load_klass(r9_dst_klass, dst);
|
||||
__ cmpq(r10_src_klass, r9_dst_klass);
|
||||
__ jcc(Assembler::notEqual, L_failed);
|
||||
|
||||
// if (!src->is_Array()) return -1;
|
||||
|
@ -2559,17 +2603,18 @@ class StubGenerator: public StubCodeGenerator {
|
|||
|
||||
Label L_plain_copy, L_checkcast_copy;
|
||||
// test array classes for subtyping
|
||||
__ cmpq(r10_src_klass, dst_klass_addr); // usual case is exact equality
|
||||
__ load_klass(r9_dst_klass, dst);
|
||||
__ cmpq(r10_src_klass, r9_dst_klass); // usual case is exact equality
|
||||
__ jcc(Assembler::notEqual, L_checkcast_copy);
|
||||
|
||||
// Identically typed arrays can be copied without element-wise checks.
|
||||
arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
|
||||
r10, L_failed);
|
||||
|
||||
__ leaq(from, Address(src, src_pos, Address::times_8,
|
||||
__ leaq(from, Address(src, src_pos, TIMES_OOP,
|
||||
arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
|
||||
__ leaq(to, Address(dst, dst_pos, Address::times_8,
|
||||
arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
|
||||
__ leaq(to, Address(dst, dst_pos, TIMES_OOP,
|
||||
arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
|
||||
__ movslq(count, r11_length); // length
|
||||
__ BIND(L_plain_copy);
|
||||
__ jump(RuntimeAddress(oop_copy_entry));
|
||||
|
@ -2579,7 +2624,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
{
|
||||
// assert(r11_length == C_RARG4); // will reload from here
|
||||
Register r11_dst_klass = r11;
|
||||
__ movq(r11_dst_klass, dst_klass_addr);
|
||||
__ load_klass(r11_dst_klass, dst);
|
||||
|
||||
// Before looking at dst.length, make sure dst is also an objArray.
|
||||
__ cmpl(Address(r11_dst_klass, lh_offset), objArray_lh);
|
||||
|
@ -2593,13 +2638,13 @@ class StubGenerator: public StubCodeGenerator {
|
|||
__ movl(r11_length, C_RARG4); // reload
|
||||
arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
|
||||
rax, L_failed);
|
||||
__ movl(r11_dst_klass, dst_klass_addr); // reload
|
||||
__ load_klass(r11_dst_klass, dst); // reload
|
||||
#endif
|
||||
|
||||
// Marshal the base address arguments now, freeing registers.
|
||||
__ leaq(from, Address(src, src_pos, Address::times_8,
|
||||
__ leaq(from, Address(src, src_pos, TIMES_OOP,
|
||||
arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
|
||||
__ leaq(to, Address(dst, dst_pos, Address::times_8,
|
||||
__ leaq(to, Address(dst, dst_pos, TIMES_OOP,
|
||||
arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
|
||||
__ movl(count, C_RARG4); // length (reloaded)
|
||||
Register sco_temp = c_rarg3; // this register is free now
|
||||
|
@ -2648,14 +2693,20 @@ class StubGenerator: public StubCodeGenerator {
|
|||
StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
|
||||
StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy");
|
||||
|
||||
StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
|
||||
StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, "jint_arraycopy");
|
||||
StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, "jint_disjoint_arraycopy");
|
||||
StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, "jint_arraycopy");
|
||||
|
||||
StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, "jlong_disjoint_arraycopy");
|
||||
StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, "jlong_arraycopy");
|
||||
|
||||
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, "oop_disjoint_arraycopy");
|
||||
StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, "oop_arraycopy");
|
||||
|
||||
if (UseCompressedOops) {
|
||||
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, "oop_disjoint_arraycopy");
|
||||
StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, "oop_arraycopy");
|
||||
} else {
|
||||
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, "oop_disjoint_arraycopy");
|
||||
StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, "oop_arraycopy");
|
||||
}
|
||||
|
||||
StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy");
|
||||
StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy");
|
||||
|
|
|
@ -664,7 +664,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||
|
||||
// work registers
|
||||
const Register method = rbx;
|
||||
const Register t = r12;
|
||||
const Register t = r11;
|
||||
|
||||
// allocate space for parameters
|
||||
__ get_method(method);
|
||||
|
@ -844,6 +844,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||
__ andq(rsp, -16); // align stack as required by ABI
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
|
||||
__ movq(rsp, r12); // restore sp
|
||||
__ reinit_heapbase();
|
||||
__ bind(Continue);
|
||||
}
|
||||
|
||||
|
@ -891,6 +892,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
|||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
|
||||
__ movq(rsp, r12); // restore sp
|
||||
__ popaq(); // XXX only restore smashed registers
|
||||
__ reinit_heapbase();
|
||||
|
||||
__ bind(no_reguard);
|
||||
}
|
||||
|
@ -1360,6 +1362,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
|||
// rdx: return address/pc that threw exception
|
||||
__ restore_bcp(); // r13 points to call/send
|
||||
__ restore_locals();
|
||||
__ reinit_heapbase(); // restore r12 as heapbase.
|
||||
// Entry point for exceptions thrown within interpreter code
|
||||
Interpreter::_throw_exception_entry = __ pc();
|
||||
// expression stack is undefined here
|
||||
|
@ -1658,6 +1661,7 @@ void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
|
|||
__ andq(rsp, -16); // align stack as required by ABI
|
||||
__ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
|
||||
__ movq(rsp, r12); // restore sp
|
||||
__ reinit_heapbase();
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -557,8 +557,8 @@ void TemplateTable::aaload() {
|
|||
// eax: index
|
||||
// rdx: array
|
||||
index_check(rdx, rax); // kills rbx
|
||||
__ movq(rax, Address(rdx, rax,
|
||||
Address::times_8,
|
||||
__ load_heap_oop(rax, Address(rdx, rax,
|
||||
UseCompressedOops ? Address::times_4 : Address::times_8,
|
||||
arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
|
||||
}
|
||||
|
||||
|
@ -870,15 +870,15 @@ void TemplateTable::aastore() {
|
|||
__ jcc(Assembler::zero, is_null);
|
||||
|
||||
// Move subklass into rbx
|
||||
__ movq(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(rbx, rax);
|
||||
// Move superklass into rax
|
||||
__ movq(rax, Address(rdx, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(rax, rdx);
|
||||
__ movq(rax, Address(rax,
|
||||
sizeof(oopDesc) +
|
||||
objArrayKlass::element_klass_offset_in_bytes()));
|
||||
// Compress array + index*8 + 12 into a single register. Frees rcx.
|
||||
// Compress array + index*oopSize + 12 into a single register. Frees rcx.
|
||||
__ leaq(rdx, Address(rdx, rcx,
|
||||
Address::times_8,
|
||||
UseCompressedOops ? Address::times_4 : Address::times_8,
|
||||
arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
|
||||
|
||||
// Generate subtype check. Blows rcx, rdi
|
||||
|
@ -892,17 +892,17 @@ void TemplateTable::aastore() {
|
|||
// Come here on success
|
||||
__ bind(ok_is_subtype);
|
||||
__ movq(rax, at_tos()); // Value
|
||||
__ movq(Address(rdx, 0), rax);
|
||||
__ store_heap_oop(Address(rdx, 0), rax);
|
||||
__ store_check(rdx);
|
||||
__ jmp(done);
|
||||
|
||||
// Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
|
||||
__ bind(is_null);
|
||||
__ profile_null_seen(rbx);
|
||||
__ movq(Address(rdx, rcx,
|
||||
Address::times_8,
|
||||
arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
|
||||
rax);
|
||||
__ store_heap_oop(Address(rdx, rcx,
|
||||
UseCompressedOops ? Address::times_4 : Address::times_8,
|
||||
arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
|
||||
rax);
|
||||
|
||||
// Pop stack arguments
|
||||
__ bind(done);
|
||||
|
@ -1934,7 +1934,7 @@ void TemplateTable::_return(TosState state) {
|
|||
if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
|
||||
assert(state == vtos, "only valid state");
|
||||
__ movq(c_rarg1, aaddress(0));
|
||||
__ movq(rdi, Address(c_rarg1, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(rdi, c_rarg1);
|
||||
__ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
|
||||
__ testl(rdi, JVM_ACC_HAS_FINALIZER);
|
||||
Label skip_register_finalizer;
|
||||
|
@ -2184,7 +2184,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
|
|||
__ cmpl(flags, atos);
|
||||
__ jcc(Assembler::notEqual, notObj);
|
||||
// atos
|
||||
__ movq(rax, field);
|
||||
__ load_heap_oop(rax, field);
|
||||
__ push(atos);
|
||||
if (!is_static) {
|
||||
patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
|
||||
|
@ -2394,7 +2394,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
|
|||
// atos
|
||||
__ pop(atos);
|
||||
if (!is_static) pop_and_check_object(obj);
|
||||
__ movq(field, rax);
|
||||
__ store_heap_oop(field, rax);
|
||||
__ store_check(obj, field); // Need to mark card
|
||||
if (!is_static) {
|
||||
patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx);
|
||||
|
@ -2515,7 +2515,7 @@ void TemplateTable::jvmti_post_fast_field_mod() {
|
|||
const Address field(c_rarg3, 0);
|
||||
|
||||
switch (bytecode()) { // load values into the jvalue object
|
||||
case Bytecodes::_fast_aputfield: // fall through
|
||||
case Bytecodes::_fast_aputfield: __ movq(field, rax); break;
|
||||
case Bytecodes::_fast_lputfield: __ movq(field, rax); break;
|
||||
case Bytecodes::_fast_iputfield: __ movl(field, rax); break;
|
||||
case Bytecodes::_fast_bputfield: __ movb(field, rax); break;
|
||||
|
@ -2582,7 +2582,7 @@ void TemplateTable::fast_storefield(TosState state) {
|
|||
// access field
|
||||
switch (bytecode()) {
|
||||
case Bytecodes::_fast_aputfield:
|
||||
__ movq(field, rax);
|
||||
__ store_heap_oop(field, rax);
|
||||
__ store_check(rcx, field);
|
||||
break;
|
||||
case Bytecodes::_fast_lputfield:
|
||||
|
@ -2631,8 +2631,8 @@ void TemplateTable::fast_accessfield(TosState state) {
|
|||
__ jcc(Assembler::zero, L1);
|
||||
// access constant pool cache entry
|
||||
__ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1);
|
||||
__ movq(r12, rax); // save object pointer before call_VM() clobbers it
|
||||
__ verify_oop(rax);
|
||||
__ movq(r12, rax); // save object pointer before call_VM() clobbers it
|
||||
__ movq(c_rarg1, rax);
|
||||
// c_rarg1: object pointer copied above
|
||||
// c_rarg2: cache entry pointer
|
||||
|
@ -2641,6 +2641,7 @@ void TemplateTable::fast_accessfield(TosState state) {
|
|||
InterpreterRuntime::post_field_access),
|
||||
c_rarg1, c_rarg2);
|
||||
__ movq(rax, r12); // restore object pointer
|
||||
__ reinit_heapbase();
|
||||
__ bind(L1);
|
||||
}
|
||||
|
||||
|
@ -2667,7 +2668,7 @@ void TemplateTable::fast_accessfield(TosState state) {
|
|||
// access field
|
||||
switch (bytecode()) {
|
||||
case Bytecodes::_fast_agetfield:
|
||||
__ movq(rax, field);
|
||||
__ load_heap_oop(rax, field);
|
||||
__ verify_oop(rax);
|
||||
break;
|
||||
case Bytecodes::_fast_lgetfield:
|
||||
|
@ -2725,7 +2726,7 @@ void TemplateTable::fast_xaccess(TosState state) {
|
|||
__ movl(rax, Address(rax, rbx, Address::times_1));
|
||||
break;
|
||||
case atos:
|
||||
__ movq(rax, Address(rax, rbx, Address::times_1));
|
||||
__ load_heap_oop(rax, Address(rax, rbx, Address::times_1));
|
||||
__ verify_oop(rax);
|
||||
break;
|
||||
case ftos:
|
||||
|
@ -2787,7 +2788,8 @@ void TemplateTable::prepare_invoke(Register method,
|
|||
__ movl(recv, flags);
|
||||
__ andl(recv, 0xFF);
|
||||
if (TaggedStackInterpreter) __ shll(recv, 1); // index*2
|
||||
__ movq(recv, Address(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1)));
|
||||
__ movq(recv, Address(rsp, recv, Address::times_8,
|
||||
-Interpreter::expr_offset_in_bytes(1)));
|
||||
__ verify_oop(recv);
|
||||
}
|
||||
|
||||
|
@ -2854,7 +2856,7 @@ void TemplateTable::invokevirtual_helper(Register index,
|
|||
|
||||
// get receiver klass
|
||||
__ null_check(recv, oopDesc::klass_offset_in_bytes());
|
||||
__ movq(rax, Address(recv, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(rax, recv);
|
||||
|
||||
__ verify_oop(rax);
|
||||
|
||||
|
@ -2866,8 +2868,8 @@ void TemplateTable::invokevirtual_helper(Register index,
|
|||
assert(vtableEntry::size() * wordSize == 8,
|
||||
"adjust the scaling in the code below");
|
||||
__ movq(method, Address(rax, index,
|
||||
Address::times_8,
|
||||
base + vtableEntry::method_offset_in_bytes()));
|
||||
Address::times_8,
|
||||
base + vtableEntry::method_offset_in_bytes()));
|
||||
__ movq(rdx, Address(method, methodOopDesc::interpreter_entry_offset()));
|
||||
__ jump_from_interpreted(method, rdx);
|
||||
}
|
||||
|
@ -2932,7 +2934,7 @@ void TemplateTable::invokeinterface(int byte_no) {
|
|||
|
||||
// Get receiver klass into rdx - also a null check
|
||||
__ restore_locals(); // restore r14
|
||||
__ movq(rdx, Address(rcx, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(rdx, rcx);
|
||||
__ verify_oop(rdx);
|
||||
|
||||
// profile this call
|
||||
|
@ -3161,7 +3163,7 @@ void TemplateTable::_new() {
|
|||
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
|
||||
(intptr_t) markOopDesc::prototype()); // header (address 0x1)
|
||||
}
|
||||
__ movq(Address(rax, oopDesc::klass_offset_in_bytes()), rsi); // klass
|
||||
__ store_klass(rax, rsi); // klass
|
||||
__ jmp(done);
|
||||
}
|
||||
|
||||
|
@ -3223,12 +3225,12 @@ void TemplateTable::checkcast() {
|
|||
typeArrayOopDesc::header_size(T_BYTE) * wordSize),
|
||||
JVM_CONSTANT_Class);
|
||||
__ jcc(Assembler::equal, quicked);
|
||||
|
||||
__ movq(r12, rcx); // save rcx XXX
|
||||
__ push(atos); // save receiver for result, and for GC
|
||||
__ movq(r12, rcx); // save rcx XXX
|
||||
call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
|
||||
__ pop_ptr(rdx); // restore receiver
|
||||
__ movq(rcx, r12); // restore rcx XXX
|
||||
__ reinit_heapbase();
|
||||
__ pop_ptr(rdx); // restore receiver
|
||||
__ jmpb(resolved);
|
||||
|
||||
// Get superklass in rax and subklass in rbx
|
||||
|
@ -3238,7 +3240,7 @@ void TemplateTable::checkcast() {
|
|||
Address::times_8, sizeof(constantPoolOopDesc)));
|
||||
|
||||
__ bind(resolved);
|
||||
__ movq(rbx, Address(rdx, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(rbx, rdx);
|
||||
|
||||
// Generate subtype check. Blows rcx, rdi. Object in rdx.
|
||||
// Superklass in rax. Subklass in rbx.
|
||||
|
@ -3280,19 +3282,20 @@ void TemplateTable::instanceof() {
|
|||
JVM_CONSTANT_Class);
|
||||
__ jcc(Assembler::equal, quicked);
|
||||
|
||||
__ movq(r12, rcx); // save rcx
|
||||
__ push(atos); // save receiver for result, and for GC
|
||||
__ movq(r12, rcx); // save rcx
|
||||
call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
|
||||
__ pop_ptr(rdx); // restore receiver
|
||||
__ movq(rdx, Address(rdx, oopDesc::klass_offset_in_bytes()));
|
||||
__ movq(rcx, r12); // restore rcx
|
||||
__ reinit_heapbase();
|
||||
__ pop_ptr(rdx); // restore receiver
|
||||
__ load_klass(rdx, rdx);
|
||||
__ jmpb(resolved);
|
||||
|
||||
// Get superklass in rax and subklass in rdx
|
||||
__ bind(quicked);
|
||||
__ movq(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(rdx, rax);
|
||||
__ movq(rax, Address(rcx, rbx,
|
||||
Address::times_8, sizeof(constantPoolOopDesc)));
|
||||
Address::times_8, sizeof(constantPoolOopDesc)));
|
||||
|
||||
__ bind(resolved);
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
|||
|
||||
// get receiver klass
|
||||
address npe_addr = __ pc();
|
||||
__ movq(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(rax, j_rarg0);
|
||||
|
||||
// compute entry offset (in words)
|
||||
int entry_offset =
|
||||
|
@ -131,7 +131,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
|||
// get receiver klass (also an implicit null-check)
|
||||
address npe_addr = __ pc();
|
||||
|
||||
__ movq(rbx, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(rbx, j_rarg0);
|
||||
|
||||
// If we take a trap while this arg is on the stack we will not
|
||||
// be able to walk the stack properly. This is not an issue except
|
||||
|
@ -181,7 +181,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
|||
// Get methodOop and entrypoint for compiler
|
||||
|
||||
// Get klass pointer again
|
||||
__ movq(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(rax, j_rarg0);
|
||||
|
||||
const Register method = rbx;
|
||||
__ movq(method, Address(rax, j_rarg1, Address::times_1, method_offset));
|
||||
|
@ -226,10 +226,12 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
|||
int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
|
||||
if (is_vtable_stub) {
|
||||
// Vtable stub size
|
||||
return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0);
|
||||
return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
|
||||
(UseCompressedOops ? 16 : 0); // 1 leaq can be 3 bytes + 1 long
|
||||
} else {
|
||||
// Itable stub size
|
||||
return (DebugVtables ? 636 : 72) + (CountCompiledCalls ? 13 : 0);
|
||||
return (DebugVtables ? 636 : 72) + (CountCompiledCalls ? 13 : 0) +
|
||||
(UseCompressedOops ? 32 : 0); // 2 leaqs
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -4538,8 +4538,8 @@ frame %{
|
|||
// Location of C & interpreter return values
|
||||
c_return_value %{
|
||||
assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
|
||||
static int lo[Op_RegL+1] = { 0, 0, EAX_num, EAX_num, FPR1L_num, FPR1L_num, EAX_num };
|
||||
static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num };
|
||||
static int lo[Op_RegL+1] = { 0, 0, OptoReg::Bad, EAX_num, EAX_num, FPR1L_num, FPR1L_num, EAX_num };
|
||||
static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num };
|
||||
|
||||
// in SSE2+ mode we want to keep the FPU stack clean so pretend
|
||||
// that C functions return float and double results in XMM0.
|
||||
|
@ -4554,8 +4554,8 @@ frame %{
|
|||
// Location of return values
|
||||
return_value %{
|
||||
assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
|
||||
static int lo[Op_RegL+1] = { 0, 0, EAX_num, EAX_num, FPR1L_num, FPR1L_num, EAX_num };
|
||||
static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num };
|
||||
static int lo[Op_RegL+1] = { 0, 0, OptoReg::Bad, EAX_num, EAX_num, FPR1L_num, FPR1L_num, EAX_num };
|
||||
static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, FPR1H_num, EDX_num };
|
||||
if( ideal_reg == Op_RegD && UseSSE>=2 )
|
||||
return OptoRegPair(XMM0b_num,XMM0a_num);
|
||||
if( ideal_reg == Op_RegF && UseSSE>=1 )
|
||||
|
|
|
@ -312,7 +312,6 @@ reg_class ptr_reg(RAX, RAX_H,
|
|||
R9, R9_H,
|
||||
R10, R10_H,
|
||||
R11, R11_H,
|
||||
R12, R12_H,
|
||||
R13, R13_H,
|
||||
R14, R14_H);
|
||||
|
||||
|
@ -392,7 +391,6 @@ reg_class long_reg(RAX, RAX_H,
|
|||
R9, R9_H,
|
||||
R10, R10_H,
|
||||
R11, R11_H,
|
||||
R12, R12_H,
|
||||
R13, R13_H,
|
||||
R14, R14_H);
|
||||
|
||||
|
@ -406,7 +404,6 @@ reg_class long_no_rax_rdx_reg(RBP, RBP_H,
|
|||
R9, R9_H,
|
||||
R10, R10_H,
|
||||
R11, R11_H,
|
||||
R12, R12_H,
|
||||
R13, R13_H,
|
||||
R14, R14_H);
|
||||
|
||||
|
@ -421,7 +418,6 @@ reg_class long_no_rcx_reg(RBP, RBP_H,
|
|||
R9, R9_H,
|
||||
R10, R10_H,
|
||||
R11, R11_H,
|
||||
R12, R12_H,
|
||||
R13, R13_H,
|
||||
R14, R14_H);
|
||||
|
||||
|
@ -436,7 +432,6 @@ reg_class long_no_rax_reg(RBP, RBP_H,
|
|||
R9, R9_H,
|
||||
R10, R10_H,
|
||||
R11, R11_H,
|
||||
R12, R12_H,
|
||||
R13, R13_H,
|
||||
R14, R14_H);
|
||||
|
||||
|
@ -449,6 +444,9 @@ reg_class long_rcx_reg(RCX, RCX_H);
|
|||
// Singleton class for RDX long register
|
||||
reg_class long_rdx_reg(RDX, RDX_H);
|
||||
|
||||
// Singleton class for R12 long register
|
||||
reg_class long_r12_reg(R12, R12_H);
|
||||
|
||||
// Class for all int registers (except RSP)
|
||||
reg_class int_reg(RAX,
|
||||
RDX,
|
||||
|
@ -461,7 +459,6 @@ reg_class int_reg(RAX,
|
|||
R9,
|
||||
R10,
|
||||
R11,
|
||||
R12,
|
||||
R13,
|
||||
R14);
|
||||
|
||||
|
@ -476,7 +473,6 @@ reg_class int_no_rcx_reg(RAX,
|
|||
R9,
|
||||
R10,
|
||||
R11,
|
||||
R12,
|
||||
R13,
|
||||
R14);
|
||||
|
||||
|
@ -490,7 +486,6 @@ reg_class int_no_rax_rdx_reg(RBP,
|
|||
R9,
|
||||
R10,
|
||||
R11,
|
||||
R12,
|
||||
R13,
|
||||
R14);
|
||||
|
||||
|
@ -1844,8 +1839,14 @@ uint reloc_java_to_interp()
|
|||
#ifndef PRODUCT
|
||||
void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
|
||||
{
|
||||
st->print_cr("cmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t"
|
||||
"# Inline cache check", oopDesc::klass_offset_in_bytes());
|
||||
if (UseCompressedOops) {
|
||||
st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t", oopDesc::klass_offset_in_bytes());
|
||||
st->print_cr("leaq rscratch1, [r12_heapbase, r, Address::times_8, 0]");
|
||||
st->print_cr("cmpq rax, rscratch1\t # Inline cache check");
|
||||
} else {
|
||||
st->print_cr("cmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t"
|
||||
"# Inline cache check", oopDesc::klass_offset_in_bytes());
|
||||
}
|
||||
st->print_cr("\tjne SharedRuntime::_ic_miss_stub");
|
||||
st->print_cr("\tnop");
|
||||
if (!OptoBreakpoint) {
|
||||
|
@ -1860,7 +1861,12 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
|
|||
#ifdef ASSERT
|
||||
uint code_size = cbuf.code_size();
|
||||
#endif
|
||||
masm.cmpq(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
|
||||
if (UseCompressedOops) {
|
||||
masm.load_klass(rscratch1, j_rarg0);
|
||||
masm.cmpq(rax, rscratch1);
|
||||
} else {
|
||||
masm.cmpq(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
|
||||
}
|
||||
|
||||
masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
|
||||
|
||||
|
@ -1871,6 +1877,10 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
|
|||
// Leave space for int3
|
||||
nops_cnt += 1;
|
||||
}
|
||||
if (UseCompressedOops) {
|
||||
// ??? divisible by 4 is aligned?
|
||||
nops_cnt += 1;
|
||||
}
|
||||
masm.nop(nops_cnt);
|
||||
|
||||
assert(cbuf.code_size() - code_size == size(ra_),
|
||||
|
@ -1879,7 +1889,11 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
|
|||
|
||||
uint MachUEPNode::size(PhaseRegAlloc* ra_) const
|
||||
{
|
||||
return OptoBreakpoint ? 11 : 12;
|
||||
if (UseCompressedOops) {
|
||||
return OptoBreakpoint ? 19 : 20;
|
||||
} else {
|
||||
return OptoBreakpoint ? 11 : 12;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -2052,6 +2066,7 @@ bool Matcher::can_be_java_arg(int reg)
|
|||
reg == RCX_num || reg == RCX_H_num ||
|
||||
reg == R8_num || reg == R8_H_num ||
|
||||
reg == R9_num || reg == R9_H_num ||
|
||||
reg == R12_num || reg == R12_H_num ||
|
||||
reg == XMM0_num || reg == XMM0_H_num ||
|
||||
reg == XMM1_num || reg == XMM1_H_num ||
|
||||
reg == XMM2_num || reg == XMM2_H_num ||
|
||||
|
@ -2087,6 +2102,17 @@ RegMask Matcher::modL_proj_mask() {
|
|||
return LONG_RDX_REG_mask;
|
||||
}
|
||||
|
||||
static Address build_address(int b, int i, int s, int d) {
|
||||
Register index = as_Register(i);
|
||||
Address::ScaleFactor scale = (Address::ScaleFactor)s;
|
||||
if (index == rsp) {
|
||||
index = noreg;
|
||||
scale = Address::no_scale;
|
||||
}
|
||||
Address addr(as_Register(b), index, scale, d);
|
||||
return addr;
|
||||
}
|
||||
|
||||
%}
|
||||
|
||||
//----------ENCODING BLOCK-----------------------------------------------------
|
||||
|
@ -2545,7 +2571,7 @@ encode %{
|
|||
Register Rrax = as_Register(RAX_enc); // super class
|
||||
Register Rrcx = as_Register(RCX_enc); // killed
|
||||
Register Rrsi = as_Register(RSI_enc); // sub class
|
||||
Label hit, miss;
|
||||
Label hit, miss, cmiss;
|
||||
|
||||
MacroAssembler _masm(&cbuf);
|
||||
// Compare super with sub directly, since super is not in its own SSA.
|
||||
|
@ -2562,12 +2588,27 @@ encode %{
|
|||
Klass::secondary_supers_offset_in_bytes()));
|
||||
__ movl(Rrcx, Address(Rrdi, arrayOopDesc::length_offset_in_bytes()));
|
||||
__ addq(Rrdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
|
||||
__ repne_scan();
|
||||
__ jcc(Assembler::notEqual, miss);
|
||||
__ movq(Address(Rrsi,
|
||||
sizeof(oopDesc) +
|
||||
Klass::secondary_super_cache_offset_in_bytes()),
|
||||
Rrax);
|
||||
if (UseCompressedOops) {
|
||||
__ encode_heap_oop(Rrax);
|
||||
__ repne_scanl();
|
||||
__ jcc(Assembler::notEqual, cmiss);
|
||||
__ decode_heap_oop(Rrax);
|
||||
__ movq(Address(Rrsi,
|
||||
sizeof(oopDesc) +
|
||||
Klass::secondary_super_cache_offset_in_bytes()),
|
||||
Rrax);
|
||||
__ jmp(hit);
|
||||
__ bind(cmiss);
|
||||
__ decode_heap_oop(Rrax);
|
||||
__ jmp(miss);
|
||||
} else {
|
||||
__ repne_scanq();
|
||||
__ jcc(Assembler::notEqual, miss);
|
||||
__ movq(Address(Rrsi,
|
||||
sizeof(oopDesc) +
|
||||
Klass::secondary_super_cache_offset_in_bytes()),
|
||||
Rrax);
|
||||
}
|
||||
__ bind(hit);
|
||||
if ($primary) {
|
||||
__ xorq(Rrdi, Rrdi);
|
||||
|
@ -3693,10 +3734,10 @@ encode %{
|
|||
int count_offset = java_lang_String::count_offset_in_bytes();
|
||||
int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
|
||||
|
||||
masm.movq(rax, Address(rsi, value_offset));
|
||||
masm.load_heap_oop(rax, Address(rsi, value_offset));
|
||||
masm.movl(rcx, Address(rsi, offset_offset));
|
||||
masm.leaq(rax, Address(rax, rcx, Address::times_2, base_offset));
|
||||
masm.movq(rbx, Address(rdi, value_offset));
|
||||
masm.load_heap_oop(rbx, Address(rdi, value_offset));
|
||||
masm.movl(rcx, Address(rdi, offset_offset));
|
||||
masm.leaq(rbx, Address(rbx, rcx, Address::times_2, base_offset));
|
||||
|
||||
|
@ -4120,6 +4161,7 @@ encode %{
|
|||
%}
|
||||
|
||||
|
||||
|
||||
//----------FRAME--------------------------------------------------------------
|
||||
// Definition of frame structure and management information.
|
||||
//
|
||||
|
@ -4255,6 +4297,7 @@ frame
|
|||
static const int lo[Op_RegL + 1] = {
|
||||
0,
|
||||
0,
|
||||
RAX_num, // Op_RegN
|
||||
RAX_num, // Op_RegI
|
||||
RAX_num, // Op_RegP
|
||||
XMM0_num, // Op_RegF
|
||||
|
@ -4264,13 +4307,14 @@ frame
|
|||
static const int hi[Op_RegL + 1] = {
|
||||
0,
|
||||
0,
|
||||
OptoReg::Bad, // Op_RegN
|
||||
OptoReg::Bad, // Op_RegI
|
||||
RAX_H_num, // Op_RegP
|
||||
OptoReg::Bad, // Op_RegF
|
||||
XMM0_H_num, // Op_RegD
|
||||
RAX_H_num // Op_RegL
|
||||
};
|
||||
|
||||
assert(ARRAY_SIZE(hi) == _last_machine_leaf - 1, "missing type");
|
||||
return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
|
||||
%}
|
||||
%}
|
||||
|
@ -4417,9 +4461,25 @@ operand immP0()
|
|||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Unsigned 31-bit Pointer Immediate
|
||||
// Can be used in both 32-bit signed and 32-bit unsigned insns.
|
||||
// Works for nulls and markOops; not for relocatable (oop) pointers.
|
||||
// Pointer Immediate
|
||||
operand immN() %{
|
||||
match(ConN);
|
||||
|
||||
op_cost(10);
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// NULL Pointer Immediate
|
||||
operand immN0() %{
|
||||
predicate(n->get_narrowcon() == 0);
|
||||
match(ConN);
|
||||
|
||||
op_cost(5);
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immP31()
|
||||
%{
|
||||
predicate(!n->as_Type()->type()->isa_oopptr()
|
||||
|
@ -4431,6 +4491,7 @@ operand immP31()
|
|||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
|
||||
// Long Immediate
|
||||
operand immL()
|
||||
%{
|
||||
|
@ -4767,6 +4828,23 @@ operand rRegP()
|
|||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
|
||||
operand r12RegL() %{
|
||||
constraint(ALLOC_IN_RC(long_r12_reg));
|
||||
match(RegL);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand rRegN() %{
|
||||
constraint(ALLOC_IN_RC(int_reg));
|
||||
match(RegN);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
// Question: Why is r15_RegP (the read-only TLS register) a match for rRegP?
|
||||
// Answer: Operand match rules govern the DFA as it processes instruction inputs.
|
||||
// It's fine for an instruction input which expects rRegP to match a r15_RegP.
|
||||
|
@ -4822,6 +4900,18 @@ operand rax_RegP()
|
|||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
// Special Registers
|
||||
// Return a compressed pointer value
|
||||
operand rax_RegN()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(int_rax_reg));
|
||||
match(RegN);
|
||||
match(rRegN);
|
||||
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
// Used in AtomicAdd
|
||||
operand rbx_RegP()
|
||||
%{
|
||||
|
@ -5112,6 +5202,21 @@ operand indIndexScaleOffset(any_RegP reg, immL32 off, rRegL lreg, immI2 scale)
|
|||
%}
|
||||
%}
|
||||
|
||||
// Indirect Memory Times Scale Plus Index Register Plus Offset Operand
|
||||
operand indIndexScaleOffsetComp(rRegN src, immL32 off, r12RegL base) %{
|
||||
constraint(ALLOC_IN_RC(ptr_reg));
|
||||
match(AddP (DecodeN src base) off);
|
||||
|
||||
op_cost(10);
|
||||
format %{"[$base + $src << 3 + $off] (compressed)" %}
|
||||
interface(MEMORY_INTER) %{
|
||||
base($base);
|
||||
index($src);
|
||||
scale(0x3);
|
||||
disp($off);
|
||||
%}
|
||||
%}
|
||||
|
||||
// Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand
|
||||
operand indPosIndexScaleOffset(any_RegP reg, immL32 off, rRegI idx, immI2 scale)
|
||||
%{
|
||||
|
@ -5259,7 +5364,8 @@ operand cmpOpU()
|
|||
// case of this is memory operands.
|
||||
|
||||
opclass memory(indirect, indOffset8, indOffset32, indIndexOffset, indIndex,
|
||||
indIndexScale, indIndexScaleOffset, indPosIndexScaleOffset);
|
||||
indIndexScale, indIndexScaleOffset, indPosIndexScaleOffset,
|
||||
indIndexScaleOffsetComp);
|
||||
|
||||
//----------PIPELINE-----------------------------------------------------------
|
||||
// Rules which define the behavior of the target architectures pipeline.
|
||||
|
@ -5937,10 +6043,28 @@ instruct loadP(rRegP dst, memory mem)
|
|||
ins_pipe(ialu_reg_mem); // XXX
|
||||
%}
|
||||
|
||||
// Load Compressed Pointer
|
||||
instruct loadN(rRegN dst, memory mem, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (LoadN mem));
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(125); // XXX
|
||||
format %{ "movl $dst, $mem\t# compressed ptr" %}
|
||||
ins_encode %{
|
||||
Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
|
||||
Register dst = as_Register($dst$$reg);
|
||||
__ movl(dst, addr);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem); // XXX
|
||||
%}
|
||||
|
||||
|
||||
// Load Klass Pointer
|
||||
instruct loadKlass(rRegP dst, memory mem)
|
||||
%{
|
||||
match(Set dst (LoadKlass mem));
|
||||
predicate(!n->in(MemNode::Address)->bottom_type()->is_narrow());
|
||||
|
||||
ins_cost(125); // XXX
|
||||
format %{ "movq $dst, $mem\t# class" %}
|
||||
|
@ -5949,6 +6073,25 @@ instruct loadKlass(rRegP dst, memory mem)
|
|||
ins_pipe(ialu_reg_mem); // XXX
|
||||
%}
|
||||
|
||||
// Load Klass Pointer
|
||||
instruct loadKlassComp(rRegP dst, memory mem)
|
||||
%{
|
||||
match(Set dst (LoadKlass mem));
|
||||
predicate(n->in(MemNode::Address)->bottom_type()->is_narrow());
|
||||
|
||||
ins_cost(125); // XXX
|
||||
format %{ "movl $dst, $mem\t# compressed class" %}
|
||||
ins_encode %{
|
||||
Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
|
||||
Register dst = as_Register($dst$$reg);
|
||||
__ movl(dst, addr);
|
||||
// klass is never null in the header but this is generated for all
|
||||
// klass loads not just the _klass field in the header.
|
||||
__ decode_heap_oop(dst);
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem); // XXX
|
||||
%}
|
||||
|
||||
// Load Float
|
||||
instruct loadF(regF dst, memory mem)
|
||||
%{
|
||||
|
@ -6203,6 +6346,35 @@ instruct loadConF(regF dst, immF src)
|
|||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct loadConN0(rRegN dst, immN0 src, rFlagsReg cr) %{
|
||||
match(Set dst src);
|
||||
effect(KILL cr);
|
||||
format %{ "xorq $dst, $src\t# compressed ptr" %}
|
||||
ins_encode %{
|
||||
Register dst = $dst$$Register;
|
||||
__ xorq(dst, dst);
|
||||
%}
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct loadConN(rRegN dst, immN src) %{
|
||||
match(Set dst src);
|
||||
|
||||
ins_cost(125);
|
||||
format %{ "movl $dst, $src\t# compressed ptr" %}
|
||||
ins_encode %{
|
||||
address con = (address)$src$$constant;
|
||||
Register dst = $dst$$Register;
|
||||
if (con == NULL) {
|
||||
ShouldNotReachHere();
|
||||
} else {
|
||||
__ movoop(dst, (jobject)$src$$constant);
|
||||
__ encode_heap_oop_not_null(dst);
|
||||
}
|
||||
%}
|
||||
ins_pipe(ialu_reg_fat); // XXX
|
||||
%}
|
||||
|
||||
instruct loadConF0(regF dst, immF0 src)
|
||||
%{
|
||||
match(Set dst src);
|
||||
|
@ -6458,6 +6630,22 @@ instruct storeImmP(memory mem, immP31 src)
|
|||
ins_pipe(ialu_mem_imm);
|
||||
%}
|
||||
|
||||
// Store Compressed Pointer
|
||||
instruct storeN(memory mem, rRegN src, rFlagsReg cr)
|
||||
%{
|
||||
match(Set mem (StoreN mem src));
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(125); // XXX
|
||||
format %{ "movl $mem, $src\t# ptr" %}
|
||||
ins_encode %{
|
||||
Address addr = build_address($mem$$base, $mem$$index, $mem$$scale, $mem$$disp);
|
||||
Register src = as_Register($src$$reg);
|
||||
__ movl(addr, src);
|
||||
%}
|
||||
ins_pipe(ialu_mem_reg);
|
||||
%}
|
||||
|
||||
// Store Integer Immediate
|
||||
instruct storeImmI(memory mem, immI src)
|
||||
%{
|
||||
|
@ -6805,6 +6993,39 @@ instruct castP2X(rRegL dst, rRegP src)
|
|||
ins_pipe(ialu_reg_reg); // XXX
|
||||
%}
|
||||
|
||||
|
||||
// Convert oop pointer into compressed form
|
||||
instruct encodeHeapOop(rRegN dst, rRegP src, rFlagsReg cr) %{
|
||||
match(Set dst (EncodeP src));
|
||||
effect(KILL cr);
|
||||
format %{ "encode_heap_oop $dst,$src" %}
|
||||
ins_encode %{
|
||||
Register s = $src$$Register;
|
||||
Register d = $dst$$Register;
|
||||
if (s != d) {
|
||||
__ movq(d, s);
|
||||
}
|
||||
__ encode_heap_oop(d);
|
||||
%}
|
||||
ins_pipe(ialu_reg_long);
|
||||
%}
|
||||
|
||||
instruct decodeHeapOop(rRegP dst, rRegN src, rFlagsReg cr) %{
|
||||
match(Set dst (DecodeN src));
|
||||
effect(KILL cr);
|
||||
format %{ "decode_heap_oop $dst,$src" %}
|
||||
ins_encode %{
|
||||
Register s = $src$$Register;
|
||||
Register d = $dst$$Register;
|
||||
if (s != d) {
|
||||
__ movq(d, s);
|
||||
}
|
||||
__ decode_heap_oop(d);
|
||||
%}
|
||||
ins_pipe(ialu_reg_long);
|
||||
%}
|
||||
|
||||
|
||||
//----------Conditional Move---------------------------------------------------
|
||||
// Jump
|
||||
// dummy instruction for generating temp registers
|
||||
|
@ -7521,6 +7742,28 @@ instruct compareAndSwapI(rRegI res,
|
|||
%}
|
||||
|
||||
|
||||
instruct compareAndSwapN(rRegI res,
|
||||
memory mem_ptr,
|
||||
rax_RegN oldval, rRegN newval,
|
||||
rFlagsReg cr) %{
|
||||
match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
|
||||
effect(KILL cr, KILL oldval);
|
||||
|
||||
format %{ "cmpxchgl $mem_ptr,$newval\t# "
|
||||
"If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
|
||||
"sete $res\n\t"
|
||||
"movzbl $res, $res" %}
|
||||
opcode(0x0F, 0xB1);
|
||||
ins_encode(lock_prefix,
|
||||
REX_reg_mem(newval, mem_ptr),
|
||||
OpcP, OpcS,
|
||||
reg_mem(newval, mem_ptr),
|
||||
REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
|
||||
REX_reg_breg(res, res), // movzbl
|
||||
Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
%}
|
||||
|
||||
//----------Subtraction Instructions-------------------------------------------
|
||||
|
||||
// Integer Subtraction Instructions
|
||||
|
@ -10771,6 +11014,14 @@ instruct testP_reg_mem(rFlagsReg cr, memory op, immP0 zero)
|
|||
ins_pipe(ialu_cr_reg_imm);
|
||||
%}
|
||||
|
||||
instruct testN_reg(rFlagsReg cr, rRegN src, immN0 zero) %{
|
||||
match(Set cr (CmpN src zero));
|
||||
|
||||
format %{ "testl $src, $src" %}
|
||||
ins_encode %{ __ testl($src$$Register, $src$$Register); %}
|
||||
ins_pipe(ialu_cr_reg_imm);
|
||||
%}
|
||||
|
||||
// Yanked all unsigned pointer compare operations.
|
||||
// Pointer compares are done with CmpP which is already unsigned.
|
||||
|
||||
|
@ -11018,6 +11269,7 @@ instruct partialSubtypeCheck_vs_Zero(rFlagsReg cr,
|
|||
rdi_RegP result)
|
||||
%{
|
||||
match(Set cr (CmpP (PartialSubtypeCheck sub super) zero));
|
||||
predicate(!UseCompressedOops); // decoding oop kills condition codes
|
||||
effect(KILL rcx, KILL result);
|
||||
|
||||
ins_cost(1000);
|
||||
|
|
|
@ -196,7 +196,7 @@ int generateJvmOffsets(GEN_variant gen_variant) {
|
|||
printf("\n");
|
||||
|
||||
GEN_VALUE(OFFSET_HeapBlockHeader_used, offset_of(HeapBlock::Header, _used));
|
||||
GEN_OFFS(oopDesc, _klass);
|
||||
GEN_OFFS(oopDesc, _metadata);
|
||||
printf("\n");
|
||||
|
||||
GEN_VALUE(AccessFlags_NATIVE, JVM_ACC_NATIVE);
|
||||
|
|
|
@ -46,6 +46,7 @@ extern pointer __JvmOffsets;
|
|||
extern pointer __1cJCodeCacheF_heap_;
|
||||
extern pointer __1cIUniverseP_methodKlassObj_;
|
||||
extern pointer __1cIUniverseO_collectedHeap_;
|
||||
extern pointer __1cIUniverseK_heap_base_;
|
||||
|
||||
extern pointer __1cHnmethodG__vtbl_;
|
||||
extern pointer __1cKBufferBlobG__vtbl_;
|
||||
|
@ -107,7 +108,7 @@ dtrace:helper:ustack:
|
|||
copyin_offset(OFFSET_constantPoolOopDesc_pool_holder);
|
||||
|
||||
copyin_offset(OFFSET_HeapBlockHeader_used);
|
||||
copyin_offset(OFFSET_oopDesc_klass);
|
||||
copyin_offset(OFFSET_oopDesc_metadata);
|
||||
|
||||
copyin_offset(OFFSET_symbolOopDesc_length);
|
||||
copyin_offset(OFFSET_symbolOopDesc_body);
|
||||
|
@ -150,6 +151,7 @@ dtrace:helper:ustack:
|
|||
|
||||
this->Universe_methodKlassOop = copyin_ptr(&``__1cIUniverseP_methodKlassObj_);
|
||||
this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_);
|
||||
this->Universe_heap_base = copyin_ptr(&``__1cIUniverseK_heap_base_);
|
||||
|
||||
/* Reading volatile values */
|
||||
this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address +
|
||||
|
@ -293,10 +295,27 @@ dtrace:helper:ustack:
|
|||
|
||||
dtrace:helper:ustack:
|
||||
/!this->done && this->vtbl == this->BufferBlob_vtbl &&
|
||||
this->Universe_heap_base == NULL &&
|
||||
this->methodOopPtr > this->heap_start && this->methodOopPtr < this->heap_end/
|
||||
{
|
||||
MARK_LINE;
|
||||
this->klass = copyin_ptr(this->methodOopPtr + OFFSET_oopDesc_klass);
|
||||
this->klass = copyin_ptr(this->methodOopPtr + OFFSET_oopDesc_metadata);
|
||||
this->methodOop = this->klass == this->Universe_methodKlassOop;
|
||||
this->done = !this->methodOop;
|
||||
}
|
||||
|
||||
dtrace:helper:ustack:
|
||||
/!this->done && this->vtbl == this->BufferBlob_vtbl &&
|
||||
this->Universe_heap_base != NULL &&
|
||||
this->methodOopPtr > this->heap_start && this->methodOopPtr < this->heap_end/
|
||||
{
|
||||
MARK_LINE;
|
||||
/*
|
||||
* Read compressed pointer and decode heap oop, same as oop.inline.hpp
|
||||
*/
|
||||
this->cklass = copyin_uint32(this->methodOopPtr + OFFSET_oopDesc_metadata);
|
||||
this->klass = (uint64_t)((uintptr_t)this->Universe_heap_base +
|
||||
((uintptr_t)this->cklass << 3));
|
||||
this->methodOop = this->klass == this->Universe_methodKlassOop;
|
||||
this->done = !this->methodOop;
|
||||
}
|
||||
|
|
|
@ -148,9 +148,11 @@ struct jvm_agent {
|
|||
|
||||
uint64_t Universe_methodKlassObj_address;
|
||||
uint64_t CodeCache_heap_address;
|
||||
uint64_t Universe_heap_base_address;
|
||||
|
||||
/* Volatiles */
|
||||
uint64_t Universe_methodKlassObj;
|
||||
uint64_t Universe_heap_base;
|
||||
uint64_t CodeCache_low;
|
||||
uint64_t CodeCache_high;
|
||||
uint64_t CodeCache_segmap_low;
|
||||
|
@ -166,7 +168,6 @@ struct jvm_agent {
|
|||
Frame_t curr_fr;
|
||||
};
|
||||
|
||||
|
||||
static int
|
||||
read_string(struct ps_prochandle *P,
|
||||
char *buf, /* caller's buffer */
|
||||
|
@ -185,6 +186,14 @@ read_string(struct ps_prochandle *P,
|
|||
return -1;
|
||||
}
|
||||
|
||||
static int read_compressed_pointer(jvm_agent_t* J, uint64_t base, uint32_t *ptr) {
|
||||
int err = -1;
|
||||
uint32_t ptr32;
|
||||
err = ps_pread(J->P, base, &ptr32, sizeof(uint32_t));
|
||||
*ptr = ptr32;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int read_pointer(jvm_agent_t* J, uint64_t base, uint64_t* ptr) {
|
||||
int err = -1;
|
||||
uint32_t ptr32;
|
||||
|
@ -270,6 +279,9 @@ static int parse_vmstructs(jvm_agent_t* J) {
|
|||
if (strcmp("_methodKlassObj", vmp->fieldName) == 0) {
|
||||
J->Universe_methodKlassObj_address = vmp->address;
|
||||
}
|
||||
if (strcmp("_heap_base", vmp->fieldName) == 0) {
|
||||
J->Universe_heap_base_address = vmp->address;
|
||||
}
|
||||
}
|
||||
CHECK_FAIL(err);
|
||||
|
||||
|
@ -292,6 +304,8 @@ static int read_volatiles(jvm_agent_t* J) {
|
|||
|
||||
err = read_pointer(J, J->Universe_methodKlassObj_address, &J->Universe_methodKlassObj);
|
||||
CHECK_FAIL(err);
|
||||
err = read_pointer(J, J->Universe_heap_base_address, &J->Universe_heap_base);
|
||||
CHECK_FAIL(err);
|
||||
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
|
||||
OFFSET_VirtualSpace_low, &J->CodeCache_low);
|
||||
CHECK_FAIL(err);
|
||||
|
@ -444,7 +458,17 @@ void Jagent_destroy(jvm_agent_t *J) {
|
|||
static int is_methodOop(jvm_agent_t* J, uint64_t methodOopPtr) {
|
||||
uint64_t klass;
|
||||
int err;
|
||||
err = read_pointer(J, methodOopPtr + OFFSET_oopDesc_klass, &klass);
|
||||
// If heap_base is nonnull, this was a compressed oop.
|
||||
if (J->Universe_heap_base != NULL) {
|
||||
uint32_t cklass;
|
||||
err = read_compressed_pointer(J, methodOopPtr + OFFSET_oopDesc_metadata,
|
||||
&cklass);
|
||||
// decode heap oop, same as oop.inline.hpp
|
||||
klass = (uint64_t)((uintptr_t)J->Universe_heap_base +
|
||||
((uintptr_t)cklass << 3));
|
||||
} else {
|
||||
err = read_pointer(J, methodOopPtr + OFFSET_oopDesc_metadata, &klass);
|
||||
}
|
||||
if (err != PS_OK) goto fail;
|
||||
return klass == J->Universe_methodKlassObj;
|
||||
|
||||
|
|
|
@ -3116,7 +3116,7 @@ jint os::init_2(void) {
|
|||
// as reserve size, since on a 64-bit platform we'll run into that more
|
||||
// often than running out of virtual memory space. We can use the
|
||||
// lower value of the two calculations as the os_thread_limit.
|
||||
size_t max_address_space = ((size_t)1 << (BitsPerOop - 1)) - (200 * K * K);
|
||||
size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
|
||||
win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
|
||||
|
||||
// at exit methods are called in the reverse order of their registration.
|
||||
|
|
|
@ -33,7 +33,9 @@
|
|||
!! by the .il "call", in some cases optimizing the code, completely eliding it,
|
||||
!! or by moving the code from the "call site".
|
||||
|
||||
|
||||
!! ASM better know we may use G6 for our own purposes
|
||||
.register %g6, #ignore
|
||||
|
||||
.globl SafeFetch32
|
||||
.align 32
|
||||
.global Fetch32PFI, Fetch32Resume
|
||||
|
@ -106,6 +108,7 @@ SpinPause:
|
|||
.globl _raw_thread_id
|
||||
.align 32
|
||||
_raw_thread_id:
|
||||
.register %g7, #scratch
|
||||
retl
|
||||
mov %g7, %o0
|
||||
|
||||
|
|
|
@ -867,6 +867,7 @@ const char *ArchDesc::reg_mask(InstructForm &inForm) {
|
|||
Form *form = (Form*)_globalNames[result];
|
||||
assert( form, "Result operand must be defined");
|
||||
OperandForm *oper = form->is_operand();
|
||||
if (oper == NULL) form->dump();
|
||||
assert( oper, "Result must be an OperandForm");
|
||||
return reg_mask( *oper );
|
||||
}
|
||||
|
@ -908,6 +909,7 @@ const char *ArchDesc::getIdealType(const char *idealOp) {
|
|||
switch( last_char ) {
|
||||
case 'I': return "TypeInt::INT";
|
||||
case 'P': return "TypePtr::BOTTOM";
|
||||
case 'N': return "TypeNarrowOop::BOTTOM";
|
||||
case 'F': return "Type::FLOAT";
|
||||
case 'D': return "Type::DOUBLE";
|
||||
case 'L': return "TypeLong::LONG";
|
||||
|
@ -944,7 +946,7 @@ void ArchDesc::initBaseOpTypes() {
|
|||
// Create InstructForm and assign type for each ideal instruction.
|
||||
for ( int j = _last_machine_leaf+1; j < _last_opcode; ++j) {
|
||||
char *ident = (char *)NodeClassNames[j];
|
||||
if(!strcmp(ident, "ConI") || !strcmp(ident, "ConP") ||
|
||||
if(!strcmp(ident, "ConI") || !strcmp(ident, "ConP") || !strcmp(ident, "ConN") ||
|
||||
!strcmp(ident, "ConF") || !strcmp(ident, "ConD") ||
|
||||
!strcmp(ident, "ConL") || !strcmp(ident, "Con" ) ||
|
||||
!strcmp(ident, "Bool") ) {
|
||||
|
@ -1109,6 +1111,7 @@ void ArchDesc::buildMustCloneMap(FILE *fp_hpp, FILE *fp_cpp) {
|
|||
if ( strcmp(idealName,"CmpI") == 0
|
||||
|| strcmp(idealName,"CmpU") == 0
|
||||
|| strcmp(idealName,"CmpP") == 0
|
||||
|| strcmp(idealName,"CmpN") == 0
|
||||
|| strcmp(idealName,"CmpL") == 0
|
||||
|| strcmp(idealName,"CmpD") == 0
|
||||
|| strcmp(idealName,"CmpF") == 0
|
||||
|
|
|
@ -211,6 +211,7 @@ Form::DataType Form::ideal_to_const_type(const char *name) const {
|
|||
|
||||
if (strcmp(name,"ConI")==0) return Form::idealI;
|
||||
if (strcmp(name,"ConP")==0) return Form::idealP;
|
||||
if (strcmp(name,"ConN")==0) return Form::idealN;
|
||||
if (strcmp(name,"ConL")==0) return Form::idealL;
|
||||
if (strcmp(name,"ConF")==0) return Form::idealF;
|
||||
if (strcmp(name,"ConD")==0) return Form::idealD;
|
||||
|
@ -256,6 +257,7 @@ Form::DataType Form::is_load_from_memory(const char *opType) const {
|
|||
if( strcmp(opType,"LoadPLocked")==0 ) return Form::idealP;
|
||||
if( strcmp(opType,"LoadLLocked")==0 ) return Form::idealL;
|
||||
if( strcmp(opType,"LoadP")==0 ) return Form::idealP;
|
||||
if( strcmp(opType,"LoadN")==0 ) return Form::idealN;
|
||||
if( strcmp(opType,"LoadRange")==0 ) return Form::idealI;
|
||||
if( strcmp(opType,"LoadS")==0 ) return Form::idealS;
|
||||
if( strcmp(opType,"Load16B")==0 ) return Form::idealB;
|
||||
|
@ -286,6 +288,7 @@ Form::DataType Form::is_store_to_memory(const char *opType) const {
|
|||
if( strcmp(opType,"StoreI")==0) return Form::idealI;
|
||||
if( strcmp(opType,"StoreL")==0) return Form::idealL;
|
||||
if( strcmp(opType,"StoreP")==0) return Form::idealP;
|
||||
if( strcmp(opType,"StoreN")==0) return Form::idealN;
|
||||
if( strcmp(opType,"Store16B")==0) return Form::idealB;
|
||||
if( strcmp(opType,"Store8B")==0) return Form::idealB;
|
||||
if( strcmp(opType,"Store4B")==0) return Form::idealB;
|
||||
|
|
|
@ -168,7 +168,8 @@ public:
|
|||
idealD = 5, // Double type
|
||||
idealB = 6, // Byte type
|
||||
idealC = 7, // Char type
|
||||
idealS = 8 // String type
|
||||
idealS = 8, // String type
|
||||
idealN = 9 // Narrow oop types
|
||||
};
|
||||
// Convert ideal name to a DataType, return DataType::none if not a 'ConX'
|
||||
Form::DataType ideal_to_const_type(const char *ideal_type_name) const;
|
||||
|
|
|
@ -726,6 +726,9 @@ bool InstructForm::captures_bottom_type() const {
|
|||
if( _matrule && _matrule->_rChild &&
|
||||
(!strcmp(_matrule->_rChild->_opType,"CastPP") || // new result type
|
||||
!strcmp(_matrule->_rChild->_opType,"CastX2P") || // new result type
|
||||
!strcmp(_matrule->_rChild->_opType,"DecodeN") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"EncodeP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"LoadN") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"CreateEx") || // type of exception
|
||||
!strcmp(_matrule->_rChild->_opType,"CheckCastPP")) ) return true;
|
||||
else if ( is_ideal_load() == Form::idealP ) return true;
|
||||
|
@ -2101,6 +2104,7 @@ bool OperandForm::is_bound_register() const {
|
|||
if (strcmp(name,"RegF")==0) size = 1;
|
||||
if (strcmp(name,"RegD")==0) size = 2;
|
||||
if (strcmp(name,"RegL")==0) size = 2;
|
||||
if (strcmp(name,"RegN")==0) size = 1;
|
||||
if (strcmp(name,"RegP")==0) size = globalAD->get_preproc_def("_LP64") ? 2 : 1;
|
||||
if (size == 0) return false;
|
||||
return size == reg_class->size();
|
||||
|
@ -2365,11 +2369,12 @@ void OperandForm::ext_format(FILE *fp, FormDict &globals, uint index) {
|
|||
|
||||
void OperandForm::format_constant(FILE *fp, uint const_index, uint const_type) {
|
||||
switch(const_type) {
|
||||
case Form::idealI: fprintf(fp,"st->print(\"#%%d\", _c%d);\n", const_index); break;
|
||||
case Form::idealP: fprintf(fp,"_c%d->dump_on(st);\n", const_index); break;
|
||||
case Form::idealL: fprintf(fp,"st->print(\"#%%lld\", _c%d);\n", const_index); break;
|
||||
case Form::idealF: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
|
||||
case Form::idealD: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
|
||||
case Form::idealI: fprintf(fp,"st->print(\"#%%d\", _c%d);\n", const_index); break;
|
||||
case Form::idealP: fprintf(fp,"_c%d->dump_on(st);\n", const_index); break;
|
||||
case Form::idealN: fprintf(fp,"_c%d->dump_on(st);\n", const_index); break;
|
||||
case Form::idealL: fprintf(fp,"st->print(\"#%%lld\", _c%d);\n", const_index); break;
|
||||
case Form::idealF: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
|
||||
case Form::idealD: fprintf(fp,"st->print(\"#%%f\", _c%d);\n", const_index); break;
|
||||
default:
|
||||
assert( false, "ShouldNotReachHere()");
|
||||
}
|
||||
|
@ -3300,9 +3305,9 @@ void MatchNode::output(FILE *fp) {
|
|||
|
||||
int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
|
||||
static const char *needs_ideal_memory_list[] = {
|
||||
"StoreI","StoreL","StoreP","StoreD","StoreF" ,
|
||||
"StoreI","StoreL","StoreP","StoreN","StoreD","StoreF" ,
|
||||
"StoreB","StoreC","Store" ,"StoreFP",
|
||||
"LoadI" ,"LoadL", "LoadP" ,"LoadD" ,"LoadF" ,
|
||||
"LoadI" ,"LoadL", "LoadP" ,"LoadN", "LoadD" ,"LoadF" ,
|
||||
"LoadB" ,"LoadC" ,"LoadS" ,"Load" ,
|
||||
"Store4I","Store2I","Store2L","Store2D","Store4F","Store2F","Store16B",
|
||||
"Store8B","Store4B","Store8C","Store4C","Store2C",
|
||||
|
@ -3311,7 +3316,7 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
|
|||
"LoadRange", "LoadKlass", "LoadL_unaligned", "LoadD_unaligned",
|
||||
"LoadPLocked", "LoadLLocked",
|
||||
"StorePConditional", "StoreLConditional",
|
||||
"CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP",
|
||||
"CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN",
|
||||
"StoreCM",
|
||||
"ClearArray"
|
||||
};
|
||||
|
@ -3712,6 +3717,7 @@ bool MatchRule::is_base_register(FormDict &globals) const {
|
|||
if( base_operand(position, globals, result, name, opType) &&
|
||||
(strcmp(opType,"RegI")==0 ||
|
||||
strcmp(opType,"RegP")==0 ||
|
||||
strcmp(opType,"RegN")==0 ||
|
||||
strcmp(opType,"RegL")==0 ||
|
||||
strcmp(opType,"RegF")==0 ||
|
||||
strcmp(opType,"RegD")==0 ||
|
||||
|
|
|
@ -1546,6 +1546,18 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
|
|||
|
||||
// Build a mapping from operand index to input edges
|
||||
fprintf(fp," unsigned idx0 = oper_input_base();\n");
|
||||
|
||||
// The order in which inputs are added to a node is very
|
||||
// strange. Store nodes get a memory input before Expand is
|
||||
// called and all other nodes get it afterwards so
|
||||
// oper_input_base is wrong during expansion. This code adjusts
|
||||
// is so that expansion will work correctly.
|
||||
bool missing_memory_edge = node->_matrule->needs_ideal_memory_edge(_globalNames) &&
|
||||
node->is_ideal_store() == Form::none;
|
||||
if (missing_memory_edge) {
|
||||
fprintf(fp," idx0--; // Adjust base because memory edge hasn't been inserted yet\n");
|
||||
}
|
||||
|
||||
for( i = 0; i < node->num_opnds(); i++ ) {
|
||||
fprintf(fp," unsigned idx%d = idx%d + num%d;\n",
|
||||
i+1,i,i);
|
||||
|
@ -1600,8 +1612,10 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
|
|||
int node_mem_op = node->memory_operand(_globalNames);
|
||||
assert( node_mem_op != InstructForm::NO_MEMORY_OPERAND,
|
||||
"expand rule member needs memory but top-level inst doesn't have any" );
|
||||
// Copy memory edge
|
||||
fprintf(fp," n%d->add_req(_in[1]);\t// Add memory edge\n", cnt);
|
||||
if (!missing_memory_edge) {
|
||||
// Copy memory edge
|
||||
fprintf(fp," n%d->add_req(_in[1]);\t// Add memory edge\n", cnt);
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate over the new instruction's operands
|
||||
|
@ -2363,6 +2377,8 @@ void ArchDesc::defineSize(FILE *fp, InstructForm &inst) {
|
|||
fprintf(fp,"uint %sNode::size(PhaseRegAlloc *ra_) const {\n",
|
||||
inst._ident);
|
||||
|
||||
fprintf(fp, " assert(VerifyOops || MachNode::size(ra_) <= %s, \"bad fixed size\");\n", inst._size);
|
||||
|
||||
//(2)
|
||||
// Print the size
|
||||
fprintf(fp, " return (VerifyOops ? MachNode::size(ra_) : %s);\n", inst._size);
|
||||
|
@ -3426,6 +3442,8 @@ static void path_to_constant(FILE *fp, FormDict &globals,
|
|||
fprintf(fp, "_leaf->get_int()");
|
||||
} else if ( (strcmp(optype,"ConP") == 0) ) {
|
||||
fprintf(fp, "_leaf->bottom_type()->is_ptr()");
|
||||
} else if ( (strcmp(optype,"ConN") == 0) ) {
|
||||
fprintf(fp, "_leaf->bottom_type()->is_narrowoop()");
|
||||
} else if ( (strcmp(optype,"ConF") == 0) ) {
|
||||
fprintf(fp, "_leaf->getf()");
|
||||
} else if ( (strcmp(optype,"ConD") == 0) ) {
|
||||
|
|
|
@ -203,6 +203,10 @@ static void declareConstStorage(FILE *fp, FormDict &globals, OperandForm *oper)
|
|||
if (i > 0) fprintf(fp,", ");
|
||||
fprintf(fp," const TypePtr *_c%d;\n", i);
|
||||
}
|
||||
else if (!strcmp(type, "ConN")) {
|
||||
if (i > 0) fprintf(fp,", ");
|
||||
fprintf(fp," const TypeNarrowOop *_c%d;\n", i);
|
||||
}
|
||||
else if (!strcmp(type, "ConL")) {
|
||||
if (i > 0) fprintf(fp,", ");
|
||||
fprintf(fp," jlong _c%d;\n", i);
|
||||
|
@ -235,6 +239,10 @@ static void declareConstStorage(FILE *fp, FormDict &globals, OperandForm *oper)
|
|||
fprintf(fp," const TypePtr *_c%d;\n", i);
|
||||
i++;
|
||||
}
|
||||
else if (!strcmp(comp->base_type(globals), "ConN")) {
|
||||
fprintf(fp," const TypePtr *_c%d;\n", i);
|
||||
i++;
|
||||
}
|
||||
else if (!strcmp(comp->base_type(globals), "ConL")) {
|
||||
fprintf(fp," jlong _c%d;\n", i);
|
||||
i++;
|
||||
|
@ -280,6 +288,7 @@ static void defineConstructor(FILE *fp, const char *name, uint num_consts,
|
|||
fprintf(fp,is_ideal_bool ? "BoolTest::mask c%d" : "int32 c%d", i);
|
||||
break;
|
||||
}
|
||||
case Form::idealN : { fprintf(fp,"const TypeNarrowOop *c%d", i); break; }
|
||||
case Form::idealP : { fprintf(fp,"const TypePtr *c%d", i); break; }
|
||||
case Form::idealL : { fprintf(fp,"jlong c%d", i); break; }
|
||||
case Form::idealF : { fprintf(fp,"jfloat c%d", i); break; }
|
||||
|
@ -302,6 +311,11 @@ static void defineConstructor(FILE *fp, const char *name, uint num_consts,
|
|||
fprintf(fp,"const TypePtr *c%d", i);
|
||||
i++;
|
||||
}
|
||||
else if (!strcmp(comp->base_type(globals), "ConN")) {
|
||||
if (i > 0) fprintf(fp,", ");
|
||||
fprintf(fp,"const TypePtr *c%d", i);
|
||||
i++;
|
||||
}
|
||||
else if (!strcmp(comp->base_type(globals), "ConL")) {
|
||||
if (i > 0) fprintf(fp,", ");
|
||||
fprintf(fp,"jlong c%d", i);
|
||||
|
@ -360,6 +374,10 @@ static uint dump_spec_constant(FILE *fp, const char *ideal_type, uint i) {
|
|||
fprintf(fp," _c%d->dump_on(st);\n", i);
|
||||
++i;
|
||||
}
|
||||
else if (!strcmp(ideal_type, "ConN")) {
|
||||
fprintf(fp," _c%d->dump();\n", i);
|
||||
++i;
|
||||
}
|
||||
else if (!strcmp(ideal_type, "ConL")) {
|
||||
fprintf(fp," st->print(\"#\" INT64_FORMAT, _c%d);\n", i);
|
||||
++i;
|
||||
|
@ -417,8 +435,13 @@ void gen_oper_format(FILE *fp, FormDict &globals, OperandForm &oper, bool for_c_
|
|||
// Replacement variable
|
||||
const char *rep_var = oper._format->_rep_vars.iter();
|
||||
// Check that it is a local name, and an operand
|
||||
OperandForm *op = oper._localNames[rep_var]->is_operand();
|
||||
assert( op, "replacement variable was not found in local names");
|
||||
const Form* form = oper._localNames[rep_var];
|
||||
if (form == NULL) {
|
||||
globalAD->syntax_err(oper._linenum,
|
||||
"\'%s\' not found in format for %s\n", rep_var, oper._ident);
|
||||
assert(form, "replacement variable was not found in local names");
|
||||
}
|
||||
OperandForm *op = form->is_operand();
|
||||
// Get index if register or constant
|
||||
if ( op->_matrule && op->_matrule->is_base_register(globals) ) {
|
||||
idx = oper.register_position( globals, rep_var);
|
||||
|
@ -483,9 +506,14 @@ void gen_oper_format(FILE *fp, FormDict &globals, OperandForm &oper, bool for_c_
|
|||
} else {
|
||||
// Replacement variable
|
||||
const char *rep_var = oper._format->_rep_vars.iter();
|
||||
// Check that it is a local name, and an operand
|
||||
OperandForm *op = oper._localNames[rep_var]->is_operand();
|
||||
assert( op, "replacement variable was not found in local names");
|
||||
// Check that it is a local name, and an operand
|
||||
const Form* form = oper._localNames[rep_var];
|
||||
if (form == NULL) {
|
||||
globalAD->syntax_err(oper._linenum,
|
||||
"\'%s\' not found in format for %s\n", rep_var, oper._ident);
|
||||
assert(form, "replacement variable was not found in local names");
|
||||
}
|
||||
OperandForm *op = form->is_operand();
|
||||
// Get index if register or constant
|
||||
if ( op->_matrule && op->_matrule->is_base_register(globals) ) {
|
||||
idx = oper.register_position( globals, rep_var);
|
||||
|
@ -1163,7 +1191,7 @@ void ArchDesc::declareClasses(FILE *fp) {
|
|||
if( type != NULL ) {
|
||||
Form::DataType data_type = oper->is_base_constant(_globalNames);
|
||||
// Check if we are an ideal pointer type
|
||||
if( data_type == Form::idealP ) {
|
||||
if( data_type == Form::idealP || data_type == Form::idealN ) {
|
||||
// Return the ideal type we already have: <TypePtr *>
|
||||
fprintf(fp," return _c0;");
|
||||
} else {
|
||||
|
@ -1291,6 +1319,16 @@ void ArchDesc::declareClasses(FILE *fp) {
|
|||
fprintf(fp, " return _c0->isa_oop_ptr();");
|
||||
fprintf(fp, " }\n");
|
||||
}
|
||||
else if (!strcmp(oper->ideal_type(_globalNames), "ConN")) {
|
||||
// Access the locally stored constant
|
||||
fprintf(fp," virtual intptr_t constant() const {");
|
||||
fprintf(fp, " return _c0->make_oopptr()->get_con();");
|
||||
fprintf(fp, " }\n");
|
||||
// Generate query to determine if this pointer is an oop
|
||||
fprintf(fp," virtual bool constant_is_oop() const {");
|
||||
fprintf(fp, " return _c0->make_oopptr()->isa_oop_ptr();");
|
||||
fprintf(fp, " }\n");
|
||||
}
|
||||
else if (!strcmp(oper->ideal_type(_globalNames), "ConL")) {
|
||||
fprintf(fp," virtual intptr_t constant() const {");
|
||||
// We don't support addressing modes with > 4Gig offsets.
|
||||
|
@ -1748,6 +1786,7 @@ void ArchDesc::declareClasses(FILE *fp) {
|
|||
fprintf(fp," return TypeInt::make(opnd_array(1)->constant());\n");
|
||||
break;
|
||||
case Form::idealP:
|
||||
case Form::idealN:
|
||||
fprintf(fp," return opnd_array(1)->type();\n",result);
|
||||
break;
|
||||
case Form::idealD:
|
||||
|
|
|
@ -281,8 +281,10 @@ address CodeSection::target(Label& L, address branch_pc) {
|
|||
|
||||
// Need to return a pc, doesn't matter what it is since it will be
|
||||
// replaced during resolution later.
|
||||
// (Don't return NULL or badAddress, since branches shouldn't overflow.)
|
||||
return base;
|
||||
// Don't return NULL or badAddress, since branches shouldn't overflow.
|
||||
// Don't return base either because that could overflow displacements
|
||||
// for shorter branches. It will get checked when bound.
|
||||
return branch_pc;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1074,6 +1074,43 @@ JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
|
|||
JRT_END
|
||||
|
||||
|
||||
// Array copy return codes.
|
||||
enum {
|
||||
ac_failed = -1, // arraycopy failed
|
||||
ac_ok = 0 // arraycopy succeeded
|
||||
};
|
||||
|
||||
|
||||
template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
|
||||
oopDesc* dst, T* dst_addr,
|
||||
int length) {
|
||||
|
||||
// For performance reasons, we assume we are using a card marking write
|
||||
// barrier. The assert will fail if this is not the case.
|
||||
// Note that we use the non-virtual inlineable variant of write_ref_array.
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->has_write_ref_array_opt(),
|
||||
"Barrier set must have ref array opt");
|
||||
if (src == dst) {
|
||||
// same object, no check
|
||||
Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
|
||||
bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
|
||||
(HeapWord*)(dst_addr + length)));
|
||||
return ac_ok;
|
||||
} else {
|
||||
klassOop bound = objArrayKlass::cast(dst->klass())->element_klass();
|
||||
klassOop stype = objArrayKlass::cast(src->klass())->element_klass();
|
||||
if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
|
||||
// Elements are guaranteed to be subtypes, so no check necessary
|
||||
Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
|
||||
bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
|
||||
(HeapWord*)(dst_addr + length)));
|
||||
return ac_ok;
|
||||
}
|
||||
}
|
||||
return ac_failed;
|
||||
}
|
||||
|
||||
// fast and direct copy of arrays; returning -1, means that an exception may be thrown
|
||||
// and we did not copy anything
|
||||
JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length))
|
||||
|
@ -1081,11 +1118,6 @@ JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int d
|
|||
_generic_arraycopy_cnt++; // Slow-path oop array copy
|
||||
#endif
|
||||
|
||||
enum {
|
||||
ac_failed = -1, // arraycopy failed
|
||||
ac_ok = 0 // arraycopy succeeded
|
||||
};
|
||||
|
||||
if (src == NULL || dst == NULL || src_pos < 0 || dst_pos < 0 || length < 0) return ac_failed;
|
||||
if (!dst->is_array() || !src->is_array()) return ac_failed;
|
||||
if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed;
|
||||
|
@ -1105,30 +1137,14 @@ JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int d
|
|||
memmove(dst_addr, src_addr, length << l2es);
|
||||
return ac_ok;
|
||||
} else if (src->is_objArray() && dst->is_objArray()) {
|
||||
oop* src_addr = objArrayOop(src)->obj_at_addr(src_pos);
|
||||
oop* dst_addr = objArrayOop(dst)->obj_at_addr(dst_pos);
|
||||
// For performance reasons, we assume we are using a card marking write
|
||||
// barrier. The assert will fail if this is not the case.
|
||||
// Note that we use the non-virtual inlineable variant of write_ref_array.
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->has_write_ref_array_opt(),
|
||||
"Barrier set must have ref array opt");
|
||||
if (src == dst) {
|
||||
// same object, no check
|
||||
Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
|
||||
bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
|
||||
(HeapWord*)(dst_addr + length)));
|
||||
return ac_ok;
|
||||
if (UseCompressedOops) { // will need for tiered
|
||||
narrowOop *src_addr = objArrayOop(src)->obj_at_addr<narrowOop>(src_pos);
|
||||
narrowOop *dst_addr = objArrayOop(dst)->obj_at_addr<narrowOop>(dst_pos);
|
||||
return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
|
||||
} else {
|
||||
klassOop bound = objArrayKlass::cast(dst->klass())->element_klass();
|
||||
klassOop stype = objArrayKlass::cast(src->klass())->element_klass();
|
||||
if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
|
||||
// Elements are guaranteed to be subtypes, so no check necessary
|
||||
Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
|
||||
bs->write_ref_array(MemRegion((HeapWord*)dst_addr,
|
||||
(HeapWord*)(dst_addr + length)));
|
||||
return ac_ok;
|
||||
}
|
||||
oop *src_addr = objArrayOop(src)->obj_at_addr<oop>(src_pos);
|
||||
oop *dst_addr = objArrayOop(dst)->obj_at_addr<oop>(dst_pos);
|
||||
return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
|
||||
}
|
||||
}
|
||||
return ac_failed;
|
||||
|
|
|
@ -48,6 +48,7 @@ ciInstanceKlass::ciInstanceKlass(KlassHandle h_k) :
|
|||
// Next line must follow and use the result of the previous line:
|
||||
_is_linked = _is_initialized || ik->is_linked();
|
||||
_nonstatic_field_size = ik->nonstatic_field_size();
|
||||
_has_nonstatic_fields = ik->has_nonstatic_fields();
|
||||
_nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
|
||||
|
||||
_nof_implementors = ik->nof_implementors();
|
||||
|
@ -93,6 +94,7 @@ ciInstanceKlass::ciInstanceKlass(ciSymbol* name,
|
|||
_is_initialized = false;
|
||||
_is_linked = false;
|
||||
_nonstatic_field_size = -1;
|
||||
_has_nonstatic_fields = false;
|
||||
_nonstatic_fields = NULL;
|
||||
_nof_implementors = -1;
|
||||
_loader = loader;
|
||||
|
@ -201,7 +203,7 @@ ciInstanceKlass* ciInstanceKlass::get_canonical_holder(int offset) {
|
|||
assert(offset >= 0 && offset < layout_helper(), "offset must be tame");
|
||||
#endif
|
||||
|
||||
if (offset < (instanceOopDesc::header_size() * wordSize)) {
|
||||
if (offset < instanceOopDesc::base_offset_in_bytes()) {
|
||||
// All header offsets belong properly to java/lang/Object.
|
||||
return CURRENT_ENV->Object_klass();
|
||||
}
|
||||
|
@ -210,7 +212,8 @@ ciInstanceKlass* ciInstanceKlass::get_canonical_holder(int offset) {
|
|||
for (;;) {
|
||||
assert(self->is_loaded(), "must be loaded to have size");
|
||||
ciInstanceKlass* super = self->super();
|
||||
if (super == NULL || !super->contains_field_offset(offset)) {
|
||||
if (super == NULL || super->nof_nonstatic_fields() == 0 ||
|
||||
!super->contains_field_offset(offset)) {
|
||||
return self;
|
||||
} else {
|
||||
self = super; // return super->get_canonical_holder(offset)
|
||||
|
@ -381,31 +384,28 @@ int ciInstanceKlass::compute_nonstatic_fields() {
|
|||
if (_nonstatic_fields != NULL)
|
||||
return _nonstatic_fields->length();
|
||||
|
||||
// Size in bytes of my fields, including inherited fields.
|
||||
// About equal to size_helper() - sizeof(oopDesc).
|
||||
int fsize = nonstatic_field_size() * wordSize;
|
||||
if (fsize == 0) { // easy shortcut
|
||||
if (!has_nonstatic_fields()) {
|
||||
Arena* arena = CURRENT_ENV->arena();
|
||||
_nonstatic_fields = new (arena) GrowableArray<ciField*>(arena, 0, 0, NULL);
|
||||
return 0;
|
||||
}
|
||||
assert(!is_java_lang_Object(), "bootstrap OK");
|
||||
|
||||
// Size in bytes of my fields, including inherited fields.
|
||||
int fsize = nonstatic_field_size() * wordSize;
|
||||
|
||||
ciInstanceKlass* super = this->super();
|
||||
int super_fsize = 0;
|
||||
int super_flen = 0;
|
||||
GrowableArray<ciField*>* super_fields = NULL;
|
||||
if (super != NULL) {
|
||||
super_fsize = super->nonstatic_field_size() * wordSize;
|
||||
super_flen = super->nof_nonstatic_fields();
|
||||
if (super != NULL && super->has_nonstatic_fields()) {
|
||||
int super_fsize = super->nonstatic_field_size() * wordSize;
|
||||
int super_flen = super->nof_nonstatic_fields();
|
||||
super_fields = super->_nonstatic_fields;
|
||||
assert(super_flen == 0 || super_fields != NULL, "first get nof_fields");
|
||||
}
|
||||
|
||||
// See if I am no larger than my super; if so, I can use his fields.
|
||||
if (fsize == super_fsize) {
|
||||
_nonstatic_fields = super_fields;
|
||||
return super_fields->length();
|
||||
// See if I am no larger than my super; if so, I can use his fields.
|
||||
if (fsize == super_fsize) {
|
||||
_nonstatic_fields = super_fields;
|
||||
return super_fields->length();
|
||||
}
|
||||
}
|
||||
|
||||
GrowableArray<ciField*>* fields = NULL;
|
||||
|
@ -425,11 +425,11 @@ int ciInstanceKlass::compute_nonstatic_fields() {
|
|||
// (In principle, they could mix with superclass fields.)
|
||||
fields->sort(sort_field_by_offset);
|
||||
#ifdef ASSERT
|
||||
int last_offset = sizeof(oopDesc);
|
||||
int last_offset = instanceOopDesc::base_offset_in_bytes();
|
||||
for (int i = 0; i < fields->length(); i++) {
|
||||
ciField* field = fields->at(i);
|
||||
int offset = field->offset_in_bytes();
|
||||
int size = (field->_type == NULL) ? oopSize : field->size_in_bytes();
|
||||
int size = (field->_type == NULL) ? heapOopSize : field->size_in_bytes();
|
||||
assert(last_offset <= offset, "no field overlap");
|
||||
if (last_offset > (int)sizeof(oopDesc))
|
||||
assert((offset - last_offset) < BytesPerLong, "no big holes");
|
||||
|
|
|
@ -35,15 +35,16 @@ class ciInstanceKlass : public ciKlass {
|
|||
friend class ciBytecodeStream;
|
||||
|
||||
private:
|
||||
bool _is_shared;
|
||||
|
||||
jobject _loader;
|
||||
jobject _protection_domain;
|
||||
|
||||
bool _is_shared;
|
||||
bool _is_initialized;
|
||||
bool _is_linked;
|
||||
bool _has_finalizer;
|
||||
bool _has_subklass;
|
||||
bool _has_nonstatic_fields;
|
||||
|
||||
ciFlags _flags;
|
||||
jint _nonstatic_field_size;
|
||||
jint _nonstatic_oop_map_size;
|
||||
|
@ -132,6 +133,9 @@ public:
|
|||
jint nonstatic_field_size() {
|
||||
assert(is_loaded(), "must be loaded");
|
||||
return _nonstatic_field_size; }
|
||||
jint has_nonstatic_fields() {
|
||||
assert(is_loaded(), "must be loaded");
|
||||
return _has_nonstatic_fields; }
|
||||
jint nonstatic_oop_map_size() {
|
||||
assert(is_loaded(), "must be loaded");
|
||||
return _nonstatic_oop_map_size; }
|
||||
|
@ -164,8 +168,7 @@ public:
|
|||
bool has_finalizable_subclass();
|
||||
|
||||
bool contains_field_offset(int offset) {
|
||||
return (offset/wordSize) >= instanceOopDesc::header_size()
|
||||
&& (offset/wordSize)-instanceOopDesc::header_size() < nonstatic_field_size();
|
||||
return instanceOopDesc::contains_field_offset(offset, nonstatic_field_size());
|
||||
}
|
||||
|
||||
// Get the instance of java.lang.Class corresponding to
|
||||
|
|
|
@ -121,7 +121,7 @@ void ciObjectFactory::init_shared_objects() {
|
|||
|
||||
for (int i = T_BOOLEAN; i <= T_CONFLICT; i++) {
|
||||
BasicType t = (BasicType)i;
|
||||
if (type2name(t) != NULL && t != T_OBJECT && t != T_ARRAY) {
|
||||
if (type2name(t) != NULL && t != T_OBJECT && t != T_ARRAY && t != T_NARROWOOP) {
|
||||
ciType::_basic_types[t] = new (_arena) ciType(t);
|
||||
init_ident_of(ciType::_basic_types[t]);
|
||||
}
|
||||
|
|
|
@ -2341,7 +2341,7 @@ void ClassFileParser::java_lang_Class_fix_post(int* next_nonstatic_oop_offset_pt
|
|||
// Incrementing next_nonstatic_oop_offset here advances the
|
||||
// location where the real java fields are placed.
|
||||
const int extra = java_lang_Class::number_of_fake_oop_fields;
|
||||
(*next_nonstatic_oop_offset_ptr) += (extra * wordSize);
|
||||
(*next_nonstatic_oop_offset_ptr) += (extra * heapOopSize);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2647,7 +2647,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
|||
align_object_offset(vtable_size) +
|
||||
align_object_offset(itable_size)) * wordSize;
|
||||
next_static_double_offset = next_static_oop_offset +
|
||||
(fac.static_oop_count * oopSize);
|
||||
(fac.static_oop_count * heapOopSize);
|
||||
if ( fac.static_double_count &&
|
||||
(Universe::field_type_should_be_aligned(T_DOUBLE) ||
|
||||
Universe::field_type_should_be_aligned(T_LONG)) ) {
|
||||
|
@ -2687,6 +2687,14 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
|||
int nonstatic_byte_count = fac.nonstatic_byte_count;
|
||||
int nonstatic_oop_count = fac.nonstatic_oop_count;
|
||||
|
||||
bool super_has_nonstatic_fields =
|
||||
(super_klass() != NULL && super_klass->has_nonstatic_fields());
|
||||
bool has_nonstatic_fields = super_has_nonstatic_fields ||
|
||||
((nonstatic_double_count + nonstatic_word_count +
|
||||
nonstatic_short_count + nonstatic_byte_count +
|
||||
nonstatic_oop_count) != 0);
|
||||
|
||||
|
||||
// Prepare list of oops for oop maps generation.
|
||||
u2* nonstatic_oop_offsets;
|
||||
u2* nonstatic_oop_length;
|
||||
|
@ -2703,7 +2711,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
|||
java_lang_Class_fix_post(&next_nonstatic_field_offset);
|
||||
nonstatic_oop_offsets[0] = (u2)first_nonstatic_field_offset;
|
||||
int fake_oop_count = (( next_nonstatic_field_offset -
|
||||
first_nonstatic_field_offset ) / oopSize);
|
||||
first_nonstatic_field_offset ) / heapOopSize);
|
||||
nonstatic_oop_length [0] = (u2)fake_oop_count;
|
||||
nonstatic_oop_map_count = 1;
|
||||
nonstatic_oop_count -= fake_oop_count;
|
||||
|
@ -2715,7 +2723,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
|||
#ifndef PRODUCT
|
||||
if( PrintCompactFieldsSavings ) {
|
||||
next_nonstatic_double_offset = next_nonstatic_field_offset +
|
||||
(nonstatic_oop_count * oopSize);
|
||||
(nonstatic_oop_count * heapOopSize);
|
||||
if ( nonstatic_double_count > 0 ) {
|
||||
next_nonstatic_double_offset = align_size_up(next_nonstatic_double_offset, BytesPerLong);
|
||||
}
|
||||
|
@ -2749,7 +2757,15 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
|||
class_name() == vmSymbols::java_lang_ref_SoftReference() ||
|
||||
class_name() == vmSymbols::java_lang_StackTraceElement() ||
|
||||
class_name() == vmSymbols::java_lang_String() ||
|
||||
class_name() == vmSymbols::java_lang_Throwable()) ) {
|
||||
class_name() == vmSymbols::java_lang_Throwable() ||
|
||||
class_name() == vmSymbols::java_lang_Boolean() ||
|
||||
class_name() == vmSymbols::java_lang_Character() ||
|
||||
class_name() == vmSymbols::java_lang_Float() ||
|
||||
class_name() == vmSymbols::java_lang_Double() ||
|
||||
class_name() == vmSymbols::java_lang_Byte() ||
|
||||
class_name() == vmSymbols::java_lang_Short() ||
|
||||
class_name() == vmSymbols::java_lang_Integer() ||
|
||||
class_name() == vmSymbols::java_lang_Long())) {
|
||||
allocation_style = 0; // Allocate oops first
|
||||
compact_fields = false; // Don't compact fields
|
||||
}
|
||||
|
@ -2758,7 +2774,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
|||
// Fields order: oops, longs/doubles, ints, shorts/chars, bytes
|
||||
next_nonstatic_oop_offset = next_nonstatic_field_offset;
|
||||
next_nonstatic_double_offset = next_nonstatic_oop_offset +
|
||||
(nonstatic_oop_count * oopSize);
|
||||
(nonstatic_oop_count * heapOopSize);
|
||||
} else if( allocation_style == 1 ) {
|
||||
// Fields order: longs/doubles, ints, shorts/chars, bytes, oops
|
||||
next_nonstatic_double_offset = next_nonstatic_field_offset;
|
||||
|
@ -2775,8 +2791,18 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
|||
int nonstatic_short_space_offset;
|
||||
int nonstatic_byte_space_offset;
|
||||
|
||||
if( nonstatic_double_count > 0 ) {
|
||||
int offset = next_nonstatic_double_offset;
|
||||
bool compact_into_header = (UseCompressedOops &&
|
||||
allocation_style == 1 && compact_fields &&
|
||||
!super_has_nonstatic_fields);
|
||||
|
||||
if( compact_into_header || nonstatic_double_count > 0 ) {
|
||||
int offset;
|
||||
// Pack something in with the header if no super klass has done so.
|
||||
if (compact_into_header) {
|
||||
offset = oopDesc::klass_gap_offset_in_bytes();
|
||||
} else {
|
||||
offset = next_nonstatic_double_offset;
|
||||
}
|
||||
next_nonstatic_double_offset = align_size_up(offset, BytesPerLong);
|
||||
if( compact_fields && offset != next_nonstatic_double_offset ) {
|
||||
// Allocate available fields into the gap before double field.
|
||||
|
@ -2804,12 +2830,13 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
|||
}
|
||||
// Allocate oop field in the gap if there are no other fields for that.
|
||||
nonstatic_oop_space_offset = offset;
|
||||
if( length >= oopSize && nonstatic_oop_count > 0 &&
|
||||
if(!compact_into_header && length >= heapOopSize &&
|
||||
nonstatic_oop_count > 0 &&
|
||||
allocation_style != 0 ) { // when oop fields not first
|
||||
nonstatic_oop_count -= 1;
|
||||
nonstatic_oop_space_count = 1; // Only one will fit
|
||||
length -= oopSize;
|
||||
offset += oopSize;
|
||||
length -= heapOopSize;
|
||||
offset += heapOopSize;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2828,9 +2855,9 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
|||
next_nonstatic_oop_offset = next_nonstatic_byte_offset + nonstatic_byte_count;
|
||||
if( nonstatic_oop_count > 0 ) {
|
||||
notaligned_offset = next_nonstatic_oop_offset;
|
||||
next_nonstatic_oop_offset = align_size_up(next_nonstatic_oop_offset, oopSize);
|
||||
next_nonstatic_oop_offset = align_size_up(next_nonstatic_oop_offset, heapOopSize);
|
||||
}
|
||||
notaligned_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * oopSize);
|
||||
notaligned_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize);
|
||||
}
|
||||
next_nonstatic_type_offset = align_size_up(notaligned_offset, wordSize );
|
||||
nonstatic_field_size = nonstatic_field_size + ((next_nonstatic_type_offset
|
||||
|
@ -2846,7 +2873,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
|||
switch (atype) {
|
||||
case STATIC_OOP:
|
||||
real_offset = next_static_oop_offset;
|
||||
next_static_oop_offset += oopSize;
|
||||
next_static_oop_offset += heapOopSize;
|
||||
break;
|
||||
case STATIC_BYTE:
|
||||
real_offset = next_static_byte_offset;
|
||||
|
@ -2868,16 +2895,16 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
|||
case NONSTATIC_OOP:
|
||||
if( nonstatic_oop_space_count > 0 ) {
|
||||
real_offset = nonstatic_oop_space_offset;
|
||||
nonstatic_oop_space_offset += oopSize;
|
||||
nonstatic_oop_space_offset += heapOopSize;
|
||||
nonstatic_oop_space_count -= 1;
|
||||
} else {
|
||||
real_offset = next_nonstatic_oop_offset;
|
||||
next_nonstatic_oop_offset += oopSize;
|
||||
next_nonstatic_oop_offset += heapOopSize;
|
||||
}
|
||||
// Update oop maps
|
||||
if( nonstatic_oop_map_count > 0 &&
|
||||
nonstatic_oop_offsets[nonstatic_oop_map_count - 1] ==
|
||||
(u2)(real_offset - nonstatic_oop_length[nonstatic_oop_map_count - 1] * oopSize) ) {
|
||||
(u2)(real_offset - nonstatic_oop_length[nonstatic_oop_map_count - 1] * heapOopSize) ) {
|
||||
// Extend current oop map
|
||||
nonstatic_oop_length[nonstatic_oop_map_count - 1] += 1;
|
||||
} else {
|
||||
|
@ -2970,6 +2997,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
|||
//this_klass->set_super(super_klass());
|
||||
this_klass->set_class_loader(class_loader());
|
||||
this_klass->set_nonstatic_field_size(nonstatic_field_size);
|
||||
this_klass->set_has_nonstatic_fields(has_nonstatic_fields);
|
||||
this_klass->set_static_oop_field_size(fac.static_oop_count);
|
||||
cp->set_pool_holder(this_klass());
|
||||
this_klass->set_constants(cp());
|
||||
|
@ -3128,7 +3156,7 @@ int ClassFileParser::compute_oop_map_size(instanceKlassHandle super, int nonstat
|
|||
OopMapBlock* first_map = super->start_of_nonstatic_oop_maps();
|
||||
OopMapBlock* last_map = first_map + map_size - 1;
|
||||
|
||||
int next_offset = last_map->offset() + (last_map->length() * oopSize);
|
||||
int next_offset = last_map->offset() + (last_map->length() * heapOopSize);
|
||||
if (next_offset == first_nonstatic_oop_offset) {
|
||||
// There is no gap bettwen superklass's last oop field and first
|
||||
// local oop field, merge maps.
|
||||
|
|
|
@ -520,16 +520,12 @@ void java_lang_Thread::compute_offsets() {
|
|||
|
||||
|
||||
JavaThread* java_lang_Thread::thread(oop java_thread) {
|
||||
return (JavaThread*) java_thread->obj_field(_eetop_offset);
|
||||
return (JavaThread*)java_thread->address_field(_eetop_offset);
|
||||
}
|
||||
|
||||
|
||||
void java_lang_Thread::set_thread(oop java_thread, JavaThread* thread) {
|
||||
// We are storing a JavaThread* (malloc'ed data) into a long field in the thread
|
||||
// object. The store has to be 64-bit wide so we use a pointer store, but we
|
||||
// cannot call oopDesc::obj_field_put since it includes a write barrier!
|
||||
oop* addr = java_thread->obj_field_addr(_eetop_offset);
|
||||
*addr = (oop) thread;
|
||||
java_thread->address_field_put(_eetop_offset, (address)thread);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1038,8 +1034,8 @@ class BacktraceBuilder: public StackObj {
|
|||
if (_dirty && _methods != NULL) {
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
|
||||
bs->write_ref_array(MemRegion((HeapWord*)_methods->obj_at_addr(0),
|
||||
_methods->length() * HeapWordsPerOop));
|
||||
bs->write_ref_array(MemRegion((HeapWord*)_methods->base(),
|
||||
_methods->array_size()));
|
||||
_dirty = false;
|
||||
}
|
||||
}
|
||||
|
@ -1083,8 +1079,9 @@ class BacktraceBuilder: public StackObj {
|
|||
method = mhandle();
|
||||
}
|
||||
|
||||
// _methods->obj_at_put(_index, method);
|
||||
*_methods->obj_at_addr(_index) = method;
|
||||
_methods->obj_at_put(_index, method);
|
||||
// bad for UseCompressedOops
|
||||
// *_methods->obj_at_addr(_index) = method;
|
||||
_bcis->ushort_at_put(_index, bci);
|
||||
_index++;
|
||||
_dirty = true;
|
||||
|
@ -1973,39 +1970,30 @@ BasicType java_lang_boxing_object::set_value(oop box, jvalue* value) {
|
|||
|
||||
|
||||
// Support for java_lang_ref_Reference
|
||||
|
||||
void java_lang_ref_Reference::set_referent(oop ref, oop value) {
|
||||
ref->obj_field_put(referent_offset, value);
|
||||
}
|
||||
|
||||
oop* java_lang_ref_Reference::referent_addr(oop ref) {
|
||||
return ref->obj_field_addr(referent_offset);
|
||||
}
|
||||
|
||||
void java_lang_ref_Reference::set_next(oop ref, oop value) {
|
||||
ref->obj_field_put(next_offset, value);
|
||||
}
|
||||
|
||||
oop* java_lang_ref_Reference::next_addr(oop ref) {
|
||||
return ref->obj_field_addr(next_offset);
|
||||
}
|
||||
|
||||
void java_lang_ref_Reference::set_discovered(oop ref, oop value) {
|
||||
ref->obj_field_put(discovered_offset, value);
|
||||
}
|
||||
|
||||
oop* java_lang_ref_Reference::discovered_addr(oop ref) {
|
||||
return ref->obj_field_addr(discovered_offset);
|
||||
}
|
||||
|
||||
oop* java_lang_ref_Reference::pending_list_lock_addr() {
|
||||
oop java_lang_ref_Reference::pending_list_lock() {
|
||||
instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
|
||||
return (oop*)(((char *)ik->start_of_static_fields()) + static_lock_offset);
|
||||
char *addr = (((char *)ik->start_of_static_fields()) + static_lock_offset);
|
||||
if (UseCompressedOops) {
|
||||
return oopDesc::load_decode_heap_oop((narrowOop *)addr);
|
||||
} else {
|
||||
return oopDesc::load_decode_heap_oop((oop*)addr);
|
||||
}
|
||||
}
|
||||
|
||||
oop* java_lang_ref_Reference::pending_list_addr() {
|
||||
HeapWord *java_lang_ref_Reference::pending_list_addr() {
|
||||
instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
|
||||
return (oop *)(((char *)ik->start_of_static_fields()) + static_pending_offset);
|
||||
char *addr = (((char *)ik->start_of_static_fields()) + static_pending_offset);
|
||||
// XXX This might not be HeapWord aligned, almost rather be char *.
|
||||
return (HeapWord*)addr;
|
||||
}
|
||||
|
||||
oop java_lang_ref_Reference::pending_list() {
|
||||
char *addr = (char *)pending_list_addr();
|
||||
if (UseCompressedOops) {
|
||||
return oopDesc::load_decode_heap_oop((narrowOop *)addr);
|
||||
} else {
|
||||
return oopDesc::load_decode_heap_oop((oop*)addr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -2291,8 +2279,11 @@ oop java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(
|
|||
// Invoked before SystemDictionary::initialize, so pre-loaded classes
|
||||
// are not available to determine the offset_of_static_fields.
|
||||
void JavaClasses::compute_hard_coded_offsets() {
|
||||
const int x = wordSize;
|
||||
const int header = instanceOopDesc::header_size_in_bytes();
|
||||
const int x = heapOopSize;
|
||||
// Objects don't get allocated in the gap in the header with compressed oops
|
||||
// for these special classes because hard coded offsets can't be conditional
|
||||
// so base_offset_in_bytes() is wrong here, allocate after the header.
|
||||
const int header = sizeof(instanceOopDesc);
|
||||
|
||||
// Do the String Class
|
||||
java_lang_String::value_offset = java_lang_String::hc_value_offset * x + header;
|
||||
|
|
|
@ -691,24 +691,47 @@ class java_lang_ref_Reference: AllStatic {
|
|||
static int number_of_fake_oop_fields;
|
||||
|
||||
// Accessors
|
||||
static oop referent(oop ref) { return *referent_addr(ref); }
|
||||
static void set_referent(oop ref, oop value);
|
||||
static oop* referent_addr(oop ref);
|
||||
|
||||
static oop next(oop ref) { return *next_addr(ref); }
|
||||
static void set_next(oop ref, oop value);
|
||||
static oop* next_addr(oop ref);
|
||||
|
||||
static oop discovered(oop ref) { return *discovered_addr(ref); }
|
||||
static void set_discovered(oop ref, oop value);
|
||||
static oop* discovered_addr(oop ref);
|
||||
|
||||
static oop referent(oop ref) {
|
||||
return ref->obj_field(referent_offset);
|
||||
}
|
||||
static void set_referent(oop ref, oop value) {
|
||||
ref->obj_field_put(referent_offset, value);
|
||||
}
|
||||
static void set_referent_raw(oop ref, oop value) {
|
||||
ref->obj_field_raw_put(referent_offset, value);
|
||||
}
|
||||
static HeapWord* referent_addr(oop ref) {
|
||||
return ref->obj_field_addr<HeapWord>(referent_offset);
|
||||
}
|
||||
static oop next(oop ref) {
|
||||
return ref->obj_field(next_offset);
|
||||
}
|
||||
static void set_next(oop ref, oop value) {
|
||||
ref->obj_field_put(next_offset, value);
|
||||
}
|
||||
static void set_next_raw(oop ref, oop value) {
|
||||
ref->obj_field_raw_put(next_offset, value);
|
||||
}
|
||||
static HeapWord* next_addr(oop ref) {
|
||||
return ref->obj_field_addr<HeapWord>(next_offset);
|
||||
}
|
||||
static oop discovered(oop ref) {
|
||||
return ref->obj_field(discovered_offset);
|
||||
}
|
||||
static void set_discovered(oop ref, oop value) {
|
||||
ref->obj_field_put(discovered_offset, value);
|
||||
}
|
||||
static void set_discovered_raw(oop ref, oop value) {
|
||||
ref->obj_field_raw_put(discovered_offset, value);
|
||||
}
|
||||
static HeapWord* discovered_addr(oop ref) {
|
||||
return ref->obj_field_addr<HeapWord>(discovered_offset);
|
||||
}
|
||||
// Accessors for statics
|
||||
static oop pending_list_lock() { return *pending_list_lock_addr(); }
|
||||
static oop pending_list() { return *pending_list_addr(); }
|
||||
static oop pending_list_lock();
|
||||
static oop pending_list();
|
||||
|
||||
static oop* pending_list_lock_addr();
|
||||
static oop* pending_list_addr();
|
||||
static HeapWord* pending_list_addr();
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -169,11 +169,8 @@ void OopMap::set_value(VMReg reg) {
|
|||
}
|
||||
|
||||
|
||||
void OopMap::set_dead(VMReg reg) {
|
||||
// At this time, we only need dead entries in our OopMap when ZapDeadCompiledLocals is active.
|
||||
if (ZapDeadCompiledLocals) {
|
||||
set_xxx(reg, OopMapValue::dead_value, VMRegImpl::Bad());
|
||||
}
|
||||
void OopMap::set_narrowoop(VMReg reg) {
|
||||
set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad());
|
||||
}
|
||||
|
||||
|
||||
|
@ -305,7 +302,9 @@ OopMap* OopMapSet::find_map_at_offset(int pc_offset) const {
|
|||
}
|
||||
|
||||
class DoNothingClosure: public OopClosure {
|
||||
public: void do_oop(oop* p) {}
|
||||
public:
|
||||
void do_oop(oop* p) {}
|
||||
void do_oop(narrowOop* p) {}
|
||||
};
|
||||
static DoNothingClosure do_nothing;
|
||||
|
||||
|
@ -349,23 +348,21 @@ static void trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) {
|
|||
|
||||
void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f) {
|
||||
// add derived oops to a table
|
||||
all_do(fr, reg_map, f, add_derived_oop, &do_nothing, &do_nothing);
|
||||
all_do(fr, reg_map, f, add_derived_oop, &do_nothing);
|
||||
}
|
||||
|
||||
|
||||
void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
||||
OopClosure* oop_fn, void derived_oop_fn(oop*, oop*),
|
||||
OopClosure* value_fn, OopClosure* dead_fn) {
|
||||
OopClosure* value_fn) {
|
||||
CodeBlob* cb = fr->cb();
|
||||
{
|
||||
assert(cb != NULL, "no codeblob");
|
||||
}
|
||||
assert(cb != NULL, "no codeblob");
|
||||
|
||||
NOT_PRODUCT(if (TraceCodeBlobStacks) trace_codeblob_maps(fr, reg_map);)
|
||||
|
||||
OopMapSet* maps = cb->oop_maps();
|
||||
OopMap* map = cb->oop_map_for_return_address(fr->pc());
|
||||
assert(map != NULL, " no ptr map found");
|
||||
OopMap* map = cb->oop_map_for_return_address(fr->pc());
|
||||
assert(map != NULL, "no ptr map found");
|
||||
|
||||
// handle derived pointers first (otherwise base pointer may be
|
||||
// changed before derived pointer offset has been collected)
|
||||
|
@ -393,8 +390,8 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
|||
}
|
||||
}
|
||||
|
||||
// We want dead, value and oop oop_types
|
||||
int mask = OopMapValue::oop_value | OopMapValue::value_value | OopMapValue::dead_value;
|
||||
// We want coop, value and oop oop_types
|
||||
int mask = OopMapValue::oop_value | OopMapValue::value_value | OopMapValue::narrowoop_value;
|
||||
{
|
||||
for (OopMapStream oms(map,mask); !oms.is_done(); oms.next()) {
|
||||
omv = oms.current();
|
||||
|
@ -402,11 +399,15 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
|||
if ( loc != NULL ) {
|
||||
if ( omv.type() == OopMapValue::oop_value ) {
|
||||
#ifdef ASSERT
|
||||
if (COMPILER2_PRESENT(!DoEscapeAnalysis &&) !Universe::heap()->is_in_or_null(*loc)) {
|
||||
if (COMPILER2_PRESENT(!DoEscapeAnalysis &&)
|
||||
(((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
|
||||
!Universe::heap()->is_in_or_null(*loc)) {
|
||||
tty->print_cr("# Found non oop pointer. Dumping state at failure");
|
||||
// try to dump out some helpful debugging information
|
||||
trace_codeblob_maps(fr, reg_map);
|
||||
omv.print();
|
||||
tty->print_cr("register r");
|
||||
omv.reg()->print();
|
||||
tty->print_cr("loc = %p *loc = %p\n", loc, (address)*loc);
|
||||
// do the real assert.
|
||||
assert(Universe::heap()->is_in_or_null(*loc), "found non oop pointer");
|
||||
|
@ -415,8 +416,17 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
|||
oop_fn->do_oop(loc);
|
||||
} else if ( omv.type() == OopMapValue::value_value ) {
|
||||
value_fn->do_oop(loc);
|
||||
} else if ( omv.type() == OopMapValue::dead_value ) {
|
||||
dead_fn->do_oop(loc);
|
||||
} else if ( omv.type() == OopMapValue::narrowoop_value ) {
|
||||
narrowOop *nl = (narrowOop*)loc;
|
||||
#ifndef VM_LITTLE_ENDIAN
|
||||
if (!omv.reg()->is_stack()) {
|
||||
// compressed oops in registers only take up 4 bytes of an
|
||||
// 8 byte register but they are in the wrong part of the
|
||||
// word so adjust loc to point at the right place.
|
||||
nl = (narrowOop*)((address)nl + 4);
|
||||
}
|
||||
#endif
|
||||
oop_fn->do_oop(nl);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -519,8 +529,8 @@ void print_register_type(OopMapValue::oop_types x, VMReg optional,
|
|||
case OopMapValue::value_value:
|
||||
st->print("Value" );
|
||||
break;
|
||||
case OopMapValue::dead_value:
|
||||
st->print("Dead" );
|
||||
case OopMapValue::narrowoop_value:
|
||||
tty->print("NarrowOop" );
|
||||
break;
|
||||
case OopMapValue::callee_saved_value:
|
||||
st->print("Callers_" );
|
||||
|
|
|
@ -61,7 +61,7 @@ public:
|
|||
unused_value =0, // powers of 2, for masking OopMapStream
|
||||
oop_value = 1,
|
||||
value_value = 2,
|
||||
dead_value = 4,
|
||||
narrowoop_value = 4,
|
||||
callee_saved_value = 8,
|
||||
derived_oop_value= 16,
|
||||
stack_obj = 32 };
|
||||
|
@ -90,14 +90,14 @@ public:
|
|||
// Querying
|
||||
bool is_oop() { return mask_bits(value(), type_mask_in_place) == oop_value; }
|
||||
bool is_value() { return mask_bits(value(), type_mask_in_place) == value_value; }
|
||||
bool is_dead() { return mask_bits(value(), type_mask_in_place) == dead_value; }
|
||||
bool is_narrowoop() { return mask_bits(value(), type_mask_in_place) == narrowoop_value; }
|
||||
bool is_callee_saved() { return mask_bits(value(), type_mask_in_place) == callee_saved_value; }
|
||||
bool is_derived_oop() { return mask_bits(value(), type_mask_in_place) == derived_oop_value; }
|
||||
bool is_stack_obj() { return mask_bits(value(), type_mask_in_place) == stack_obj; }
|
||||
|
||||
void set_oop() { set_value((value() & register_mask_in_place) | oop_value); }
|
||||
void set_value() { set_value((value() & register_mask_in_place) | value_value); }
|
||||
void set_dead() { set_value((value() & register_mask_in_place) | dead_value); }
|
||||
void set_narrowoop() { set_value((value() & register_mask_in_place) | narrowoop_value); }
|
||||
void set_callee_saved() { set_value((value() & register_mask_in_place) | callee_saved_value); }
|
||||
void set_derived_oop() { set_value((value() & register_mask_in_place) | derived_oop_value); }
|
||||
void set_stack_obj() { set_value((value() & register_mask_in_place) | stack_obj); }
|
||||
|
@ -176,6 +176,7 @@ class OopMap: public ResourceObj {
|
|||
// slots to hold 4-byte values like ints and floats in the LP64 build.
|
||||
void set_oop ( VMReg local);
|
||||
void set_value( VMReg local);
|
||||
void set_narrowoop(VMReg local);
|
||||
void set_dead ( VMReg local);
|
||||
void set_callee_saved( VMReg local, VMReg caller_machine_register );
|
||||
void set_derived_oop ( VMReg local, VMReg derived_from_local_register );
|
||||
|
@ -245,7 +246,7 @@ class OopMapSet : public ResourceObj {
|
|||
static void all_do(const frame* fr, const RegisterMap* reg_map,
|
||||
OopClosure* oop_fn,
|
||||
void derived_oop_fn(oop* base, oop* derived),
|
||||
OopClosure* value_fn, OopClosure* dead_fn);
|
||||
OopClosure* value_fn);
|
||||
|
||||
// Printing
|
||||
void print_on(outputStream* st) const;
|
||||
|
|
|
@ -29,22 +29,34 @@ class ConcurrentMarkSweepGeneration;
|
|||
class CMSBitMap;
|
||||
class CMSMarkStack;
|
||||
class CMSCollector;
|
||||
template<class E> class GenericTaskQueue;
|
||||
typedef GenericTaskQueue<oop> OopTaskQueue;
|
||||
template<class E> class GenericTaskQueueSet;
|
||||
typedef GenericTaskQueueSet<oop> OopTaskQueueSet;
|
||||
class MarkFromRootsClosure;
|
||||
class Par_MarkFromRootsClosure;
|
||||
|
||||
// Decode the oop and call do_oop on it.
|
||||
#define DO_OOP_WORK_DEFN \
|
||||
void do_oop(oop obj); \
|
||||
template <class T> inline void do_oop_work(T* p) { \
|
||||
T heap_oop = oopDesc::load_heap_oop(p); \
|
||||
if (!oopDesc::is_null(heap_oop)) { \
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
|
||||
do_oop(obj); \
|
||||
} \
|
||||
}
|
||||
|
||||
class MarkRefsIntoClosure: public OopsInGenClosure {
|
||||
const MemRegion _span;
|
||||
CMSBitMap* _bitMap;
|
||||
const bool _should_do_nmethods;
|
||||
private:
|
||||
const MemRegion _span;
|
||||
CMSBitMap* _bitMap;
|
||||
const bool _should_do_nmethods;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap,
|
||||
bool should_do_nmethods);
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop(p); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
|
||||
bool do_header() { return true; }
|
||||
virtual const bool do_nmethods() const {
|
||||
return _should_do_nmethods;
|
||||
|
@ -57,15 +69,20 @@ class MarkRefsIntoClosure: public OopsInGenClosure {
|
|||
// A variant of the above used in certain kinds of CMS
|
||||
// marking verification.
|
||||
class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
|
||||
const MemRegion _span;
|
||||
CMSBitMap* _verification_bm;
|
||||
CMSBitMap* _cms_bm;
|
||||
const bool _should_do_nmethods;
|
||||
private:
|
||||
const MemRegion _span;
|
||||
CMSBitMap* _verification_bm;
|
||||
CMSBitMap* _cms_bm;
|
||||
const bool _should_do_nmethods;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
|
||||
CMSBitMap* cms_bm, bool should_do_nmethods);
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop(p); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
|
||||
bool do_header() { return true; }
|
||||
virtual const bool do_nmethods() const {
|
||||
return _should_do_nmethods;
|
||||
|
@ -75,37 +92,40 @@ class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
|
|||
}
|
||||
};
|
||||
|
||||
|
||||
// The non-parallel version (the parallel version appears further below).
|
||||
class PushAndMarkClosure: public OopClosure {
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
CMSBitMap* _mod_union_table;
|
||||
CMSMarkStack* _mark_stack;
|
||||
CMSMarkStack* _revisit_stack;
|
||||
bool _concurrent_precleaning;
|
||||
bool const _should_remember_klasses;
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
CMSBitMap* _mod_union_table;
|
||||
CMSMarkStack* _mark_stack;
|
||||
CMSMarkStack* _revisit_stack;
|
||||
bool _concurrent_precleaning;
|
||||
bool const _should_remember_klasses;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
PushAndMarkClosure(CMSCollector* collector,
|
||||
MemRegion span,
|
||||
ReferenceProcessor* rp,
|
||||
CMSBitMap* bit_map,
|
||||
CMSBitMap* mod_union_table,
|
||||
CMSMarkStack* mark_stack,
|
||||
CMSMarkStack* revisit_stack,
|
||||
bool concurrent_precleaning);
|
||||
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop(p); }
|
||||
CMSMarkStack* mark_stack,
|
||||
CMSMarkStack* revisit_stack,
|
||||
bool concurrent_precleaning);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
|
||||
bool do_header() { return true; }
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
const bool should_remember_klasses() const {
|
||||
virtual const bool should_remember_klasses() const {
|
||||
return _should_remember_klasses;
|
||||
}
|
||||
void remember_klass(Klass* k);
|
||||
virtual void remember_klass(Klass* k);
|
||||
};
|
||||
|
||||
// In the parallel case, the revisit stack, the bit map and the
|
||||
|
@ -115,12 +135,15 @@ class PushAndMarkClosure: public OopClosure {
|
|||
// used in the non-parallel case above is here replaced with
|
||||
// an OopTaskQueue structure to allow efficient work stealing.
|
||||
class Par_PushAndMarkClosure: public OopClosure {
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
OopTaskQueue* _work_queue;
|
||||
CMSMarkStack* _revisit_stack;
|
||||
bool const _should_remember_klasses;
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
OopTaskQueue* _work_queue;
|
||||
CMSMarkStack* _revisit_stack;
|
||||
bool const _should_remember_klasses;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
Par_PushAndMarkClosure(CMSCollector* collector,
|
||||
MemRegion span,
|
||||
|
@ -128,43 +151,48 @@ class Par_PushAndMarkClosure: public OopClosure {
|
|||
CMSBitMap* bit_map,
|
||||
OopTaskQueue* work_queue,
|
||||
CMSMarkStack* revisit_stack);
|
||||
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop(p); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
|
||||
bool do_header() { return true; }
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
const bool should_remember_klasses() const {
|
||||
virtual const bool should_remember_klasses() const {
|
||||
return _should_remember_klasses;
|
||||
}
|
||||
void remember_klass(Klass* k);
|
||||
virtual void remember_klass(Klass* k);
|
||||
};
|
||||
|
||||
|
||||
// The non-parallel version (the parallel version appears further below).
|
||||
class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
CMSMarkStack* _mark_stack;
|
||||
PushAndMarkClosure _pushAndMarkClosure;
|
||||
CMSCollector* _collector;
|
||||
bool _yield;
|
||||
private:
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
CMSMarkStack* _mark_stack;
|
||||
PushAndMarkClosure _pushAndMarkClosure;
|
||||
CMSCollector* _collector;
|
||||
Mutex* _freelistLock;
|
||||
bool _yield;
|
||||
// Whether closure is being used for concurrent precleaning
|
||||
bool _concurrent_precleaning;
|
||||
Mutex* _freelistLock;
|
||||
bool _concurrent_precleaning;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
MarkRefsIntoAndScanClosure(MemRegion span,
|
||||
ReferenceProcessor* rp,
|
||||
CMSBitMap* bit_map,
|
||||
CMSBitMap* mod_union_table,
|
||||
CMSMarkStack* mark_stack,
|
||||
CMSMarkStack* revisit_stack,
|
||||
CMSMarkStack* mark_stack,
|
||||
CMSMarkStack* revisit_stack,
|
||||
CMSCollector* collector,
|
||||
bool should_yield,
|
||||
bool concurrent_precleaning);
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop(p); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
bool do_header() { return true; }
|
||||
virtual const bool do_nmethods() const { return true; }
|
||||
Prefetch::style prefetch_style() {
|
||||
|
@ -185,11 +213,14 @@ class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
|
|||
// sycnhronized. An OopTaskQueue structure, supporting efficient
|
||||
// workstealing, replaces a CMSMarkStack for storing grey objects.
|
||||
class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
OopTaskQueue* _work_queue;
|
||||
const uint _low_water_mark;
|
||||
Par_PushAndMarkClosure _par_pushAndMarkClosure;
|
||||
private:
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
OopTaskQueue* _work_queue;
|
||||
const uint _low_water_mark;
|
||||
Par_PushAndMarkClosure _par_pushAndMarkClosure;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
|
||||
MemRegion span,
|
||||
|
@ -197,8 +228,10 @@ class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
|
|||
CMSBitMap* bit_map,
|
||||
OopTaskQueue* work_queue,
|
||||
CMSMarkStack* revisit_stack);
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop(p); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
bool do_header() { return true; }
|
||||
virtual const bool do_nmethods() const { return true; }
|
||||
Prefetch::style prefetch_style() {
|
||||
|
@ -211,28 +244,34 @@ class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
|
|||
// following the first checkpoint. Its use is buried in
|
||||
// the closure MarkFromRootsClosure.
|
||||
class PushOrMarkClosure: public OopClosure {
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bitMap;
|
||||
CMSMarkStack* _markStack;
|
||||
CMSMarkStack* _revisitStack;
|
||||
HeapWord* const _finger;
|
||||
MarkFromRootsClosure* const _parent;
|
||||
bool const _should_remember_klasses;
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bitMap;
|
||||
CMSMarkStack* _markStack;
|
||||
CMSMarkStack* _revisitStack;
|
||||
HeapWord* const _finger;
|
||||
MarkFromRootsClosure* const
|
||||
_parent;
|
||||
bool const _should_remember_klasses;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
PushOrMarkClosure(CMSCollector* cms_collector,
|
||||
MemRegion span,
|
||||
CMSBitMap* bitMap,
|
||||
CMSMarkStack* markStack,
|
||||
CMSMarkStack* revisitStack,
|
||||
HeapWord* finger,
|
||||
CMSMarkStack* markStack,
|
||||
CMSMarkStack* revisitStack,
|
||||
HeapWord* finger,
|
||||
MarkFromRootsClosure* parent);
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop(p); }
|
||||
const bool should_remember_klasses() const {
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
|
||||
virtual const bool should_remember_klasses() const {
|
||||
return _should_remember_klasses;
|
||||
}
|
||||
void remember_klass(Klass* k);
|
||||
virtual void remember_klass(Klass* k);
|
||||
// Deal with a stack overflow condition
|
||||
void handle_stack_overflow(HeapWord* lost);
|
||||
private:
|
||||
|
@ -244,6 +283,7 @@ class PushOrMarkClosure: public OopClosure {
|
|||
// following the first checkpoint. Its use is buried in
|
||||
// the closure Par_MarkFromRootsClosure.
|
||||
class Par_PushOrMarkClosure: public OopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _whole_span;
|
||||
MemRegion _span; // local chunk
|
||||
|
@ -253,24 +293,29 @@ class Par_PushOrMarkClosure: public OopClosure {
|
|||
CMSMarkStack* _revisit_stack;
|
||||
HeapWord* const _finger;
|
||||
HeapWord** const _global_finger_addr;
|
||||
Par_MarkFromRootsClosure* const _parent;
|
||||
bool const _should_remember_klasses;
|
||||
Par_MarkFromRootsClosure* const
|
||||
_parent;
|
||||
bool const _should_remember_klasses;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
Par_PushOrMarkClosure(CMSCollector* cms_collector,
|
||||
MemRegion span,
|
||||
CMSBitMap* bit_map,
|
||||
OopTaskQueue* work_queue,
|
||||
CMSMarkStack* mark_stack,
|
||||
CMSMarkStack* revisit_stack,
|
||||
HeapWord* finger,
|
||||
HeapWord** global_finger_addr,
|
||||
Par_MarkFromRootsClosure* parent);
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop(p); }
|
||||
const bool should_remember_klasses() const {
|
||||
MemRegion span,
|
||||
CMSBitMap* bit_map,
|
||||
OopTaskQueue* work_queue,
|
||||
CMSMarkStack* mark_stack,
|
||||
CMSMarkStack* revisit_stack,
|
||||
HeapWord* finger,
|
||||
HeapWord** global_finger_addr,
|
||||
Par_MarkFromRootsClosure* parent);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
|
||||
virtual const bool should_remember_klasses() const {
|
||||
return _should_remember_klasses;
|
||||
}
|
||||
void remember_klass(Klass* k);
|
||||
virtual void remember_klass(Klass* k);
|
||||
// Deal with a stack overflow condition
|
||||
void handle_stack_overflow(HeapWord* lost);
|
||||
private:
|
||||
|
@ -282,10 +327,13 @@ class Par_PushOrMarkClosure: public OopClosure {
|
|||
// This is currently used during the (weak) reference object
|
||||
// processing phase of the CMS final checkpoint step.
|
||||
class CMSKeepAliveClosure: public OopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSMarkStack* _mark_stack;
|
||||
CMSBitMap* _bit_map;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
|
||||
CMSBitMap* bit_map, CMSMarkStack* mark_stack):
|
||||
|
@ -293,16 +341,20 @@ class CMSKeepAliveClosure: public OopClosure {
|
|||
_span(span),
|
||||
_bit_map(bit_map),
|
||||
_mark_stack(mark_stack) { }
|
||||
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop(p); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
|
||||
};
|
||||
|
||||
class CMSInnerParMarkAndPushClosure: public OopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
OopTaskQueue* _work_queue;
|
||||
CMSBitMap* _bit_map;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
CMSInnerParMarkAndPushClosure(CMSCollector* collector,
|
||||
MemRegion span, CMSBitMap* bit_map,
|
||||
|
@ -311,24 +363,32 @@ class CMSInnerParMarkAndPushClosure: public OopClosure {
|
|||
_span(span),
|
||||
_bit_map(bit_map),
|
||||
_work_queue(work_queue) { }
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop(p); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
|
||||
};
|
||||
|
||||
// A parallel (MT) version of the above, used when
|
||||
// reference processing is parallel; the only difference
|
||||
// is in the do_oop method.
|
||||
class CMSParKeepAliveClosure: public OopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
OopTaskQueue* _work_queue;
|
||||
CMSBitMap* _bit_map;
|
||||
CMSInnerParMarkAndPushClosure _mark_and_push;
|
||||
CMSInnerParMarkAndPushClosure
|
||||
_mark_and_push;
|
||||
const uint _low_water_mark;
|
||||
void trim_queue(uint max);
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
|
||||
CMSBitMap* bit_map, OopTaskQueue* work_queue);
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop(p); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
|
||||
};
|
||||
|
|
|
@ -177,7 +177,7 @@ HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
|
|||
assert(q->forwardee() == NULL, "should be forwarded to NULL");
|
||||
}
|
||||
|
||||
debug_only(MarkSweep::register_live_oop(q, adjusted_size));
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
|
||||
compact_top += adjusted_size;
|
||||
|
||||
// we need to update the offset table so that the beginnings of objects can be
|
||||
|
@ -1211,7 +1211,7 @@ FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
|
|||
return fc;
|
||||
}
|
||||
|
||||
oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size, oop* ref) {
|
||||
oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
|
||||
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
|
||||
assert_locked();
|
||||
|
||||
|
@ -2116,7 +2116,6 @@ void CompactibleFreeListSpace::split(size_t from, size_t to1) {
|
|||
splitBirth(to2);
|
||||
}
|
||||
|
||||
|
||||
void CompactibleFreeListSpace::print() const {
|
||||
tty->print(" CompactibleFreeListSpace");
|
||||
Space::print();
|
||||
|
@ -2130,6 +2129,7 @@ void CompactibleFreeListSpace::prepare_for_verify() {
|
|||
}
|
||||
|
||||
class VerifyAllBlksClosure: public BlkClosure {
|
||||
private:
|
||||
const CompactibleFreeListSpace* _sp;
|
||||
const MemRegion _span;
|
||||
|
||||
|
@ -2137,7 +2137,7 @@ class VerifyAllBlksClosure: public BlkClosure {
|
|||
VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
|
||||
MemRegion span) : _sp(sp), _span(span) { }
|
||||
|
||||
size_t do_blk(HeapWord* addr) {
|
||||
virtual size_t do_blk(HeapWord* addr) {
|
||||
size_t res;
|
||||
if (_sp->block_is_obj(addr)) {
|
||||
oop p = oop(addr);
|
||||
|
@ -2160,12 +2160,54 @@ class VerifyAllBlksClosure: public BlkClosure {
|
|||
};
|
||||
|
||||
class VerifyAllOopsClosure: public OopClosure {
|
||||
private:
|
||||
const CMSCollector* _collector;
|
||||
const CompactibleFreeListSpace* _sp;
|
||||
const MemRegion _span;
|
||||
const bool _past_remark;
|
||||
const CMSBitMap* _bit_map;
|
||||
|
||||
protected:
|
||||
void do_oop(void* p, oop obj) {
|
||||
if (_span.contains(obj)) { // the interior oop points into CMS heap
|
||||
if (!_span.contains(p)) { // reference from outside CMS heap
|
||||
// Should be a valid object; the first disjunct below allows
|
||||
// us to sidestep an assertion in block_is_obj() that insists
|
||||
// that p be in _sp. Note that several generations (and spaces)
|
||||
// are spanned by _span (CMS heap) above.
|
||||
guarantee(!_sp->is_in_reserved(obj) ||
|
||||
_sp->block_is_obj((HeapWord*)obj),
|
||||
"Should be an object");
|
||||
guarantee(obj->is_oop(), "Should be an oop");
|
||||
obj->verify();
|
||||
if (_past_remark) {
|
||||
// Remark has been completed, the object should be marked
|
||||
_bit_map->isMarked((HeapWord*)obj);
|
||||
}
|
||||
} else { // reference within CMS heap
|
||||
if (_past_remark) {
|
||||
// Remark has been completed -- so the referent should have
|
||||
// been marked, if referring object is.
|
||||
if (_bit_map->isMarked(_collector->block_start(p))) {
|
||||
guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (_sp->is_in_reserved(p)) {
|
||||
// the reference is from FLS, and points out of FLS
|
||||
guarantee(obj->is_oop(), "Should be an oop");
|
||||
obj->verify();
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
do_oop(p, obj);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
VerifyAllOopsClosure(const CMSCollector* collector,
|
||||
const CompactibleFreeListSpace* sp, MemRegion span,
|
||||
|
@ -2173,40 +2215,8 @@ class VerifyAllOopsClosure: public OopClosure {
|
|||
OopClosure(), _collector(collector), _sp(sp), _span(span),
|
||||
_past_remark(past_remark), _bit_map(bit_map) { }
|
||||
|
||||
void do_oop(oop* ptr) {
|
||||
oop p = *ptr;
|
||||
if (p != NULL) {
|
||||
if (_span.contains(p)) { // the interior oop points into CMS heap
|
||||
if (!_span.contains(ptr)) { // reference from outside CMS heap
|
||||
// Should be a valid object; the first disjunct below allows
|
||||
// us to sidestep an assertion in block_is_obj() that insists
|
||||
// that p be in _sp. Note that several generations (and spaces)
|
||||
// are spanned by _span (CMS heap) above.
|
||||
guarantee(!_sp->is_in_reserved(p) || _sp->block_is_obj((HeapWord*)p),
|
||||
"Should be an object");
|
||||
guarantee(p->is_oop(), "Should be an oop");
|
||||
p->verify();
|
||||
if (_past_remark) {
|
||||
// Remark has been completed, the object should be marked
|
||||
_bit_map->isMarked((HeapWord*)p);
|
||||
}
|
||||
}
|
||||
else { // reference within CMS heap
|
||||
if (_past_remark) {
|
||||
// Remark has been completed -- so the referent should have
|
||||
// been marked, if referring object is.
|
||||
if (_bit_map->isMarked(_collector->block_start(ptr))) {
|
||||
guarantee(_bit_map->isMarked((HeapWord*)p), "Marking error?");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (_sp->is_in_reserved(ptr)) {
|
||||
// the reference is from FLS, and points out of FLS
|
||||
guarantee(p->is_oop(), "Should be an oop");
|
||||
p->verify();
|
||||
}
|
||||
}
|
||||
}
|
||||
virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
|
||||
};
|
||||
|
||||
void CompactibleFreeListSpace::verify(bool ignored) const {
|
||||
|
|
|
@ -540,7 +540,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
HeapWord* allocate(size_t size);
|
||||
HeapWord* par_allocate(size_t size);
|
||||
|
||||
oop promote(oop obj, size_t obj_size, oop* ref);
|
||||
oop promote(oop obj, size_t obj_size);
|
||||
void gc_prologue();
|
||||
void gc_epilogue();
|
||||
|
||||
|
|
|
@ -1226,7 +1226,7 @@ CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) {
|
||||
oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
|
||||
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
|
||||
// allocate, copy and if necessary update promoinfo --
|
||||
// delegate to underlying space.
|
||||
|
@ -1238,7 +1238,7 @@ oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) {
|
|||
}
|
||||
#endif // #ifndef PRODUCT
|
||||
|
||||
oop res = _cmsSpace->promote(obj, obj_size, ref);
|
||||
oop res = _cmsSpace->promote(obj, obj_size);
|
||||
if (res == NULL) {
|
||||
// expand and retry
|
||||
size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
|
||||
|
@ -1249,7 +1249,7 @@ oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) {
|
|||
assert(next_gen() == NULL, "assumption, based upon which no attempt "
|
||||
"is made to pass on a possibly failing "
|
||||
"promotion to next generation");
|
||||
res = _cmsSpace->promote(obj, obj_size, ref);
|
||||
res = _cmsSpace->promote(obj, obj_size);
|
||||
}
|
||||
if (res != NULL) {
|
||||
// See comment in allocate() about when objects should
|
||||
|
@ -3922,13 +3922,15 @@ void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
|
|||
}
|
||||
|
||||
class Par_ConcMarkingClosure: public OopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
CMSMarkStack* _overflow_stack;
|
||||
CMSMarkStack* _revisit_stack; // XXXXXX Check proper use
|
||||
OopTaskQueue* _work_queue;
|
||||
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
|
||||
CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
|
||||
|
@ -3937,8 +3939,8 @@ class Par_ConcMarkingClosure: public OopClosure {
|
|||
_work_queue(work_queue),
|
||||
_bit_map(bit_map),
|
||||
_overflow_stack(overflow_stack) { } // need to initialize revisit stack etc.
|
||||
|
||||
void do_oop(oop* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
void trim_queue(size_t max);
|
||||
void handle_stack_overflow(HeapWord* lost);
|
||||
};
|
||||
|
@ -3947,11 +3949,9 @@ class Par_ConcMarkingClosure: public OopClosure {
|
|||
// the salient assumption here is that stolen oops must
|
||||
// always be initialized, so we do not need to check for
|
||||
// uninitialized objects before scanning here.
|
||||
void Par_ConcMarkingClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
assert(this_oop->is_oop_or_null(),
|
||||
"expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
void Par_ConcMarkingClosure::do_oop(oop obj) {
|
||||
assert(obj->is_oop_or_null(), "expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
// Check if oop points into the CMS generation
|
||||
// and is not marked
|
||||
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
|
||||
|
@ -3970,7 +3970,7 @@ void Par_ConcMarkingClosure::do_oop(oop* p) {
|
|||
}
|
||||
)
|
||||
if (simulate_overflow ||
|
||||
!(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
|
||||
!(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
|
||||
// stack overflow
|
||||
if (PrintCMSStatistics != 0) {
|
||||
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
|
||||
|
@ -3987,6 +3987,9 @@ void Par_ConcMarkingClosure::do_oop(oop* p) {
|
|||
}
|
||||
}
|
||||
|
||||
void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
|
||||
void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
|
||||
|
||||
void Par_ConcMarkingClosure::trim_queue(size_t max) {
|
||||
while (_work_queue->size() > max) {
|
||||
oop new_oop;
|
||||
|
@ -4086,8 +4089,8 @@ void CMSConcMarkingTask::coordinator_yield() {
|
|||
//
|
||||
// Tony 2006.06.29
|
||||
for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
|
||||
ConcurrentMarkSweepThread::should_yield() &&
|
||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||
ConcurrentMarkSweepThread::should_yield() &&
|
||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||
os::sleep(Thread::current(), 1, false);
|
||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||
}
|
||||
|
@ -6048,8 +6051,8 @@ void CMSCollector::reset(bool asynch) {
|
|||
|
||||
// See the comment in coordinator_yield()
|
||||
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
||||
ConcurrentMarkSweepThread::should_yield() &&
|
||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||
ConcurrentMarkSweepThread::should_yield() &&
|
||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||
os::sleep(Thread::current(), 1, false);
|
||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||
}
|
||||
|
@ -6362,19 +6365,19 @@ MarkRefsIntoClosure::MarkRefsIntoClosure(
|
|||
assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
|
||||
}
|
||||
|
||||
void MarkRefsIntoClosure::do_oop(oop* p) {
|
||||
void MarkRefsIntoClosure::do_oop(oop obj) {
|
||||
// if p points into _span, then mark corresponding bit in _markBitMap
|
||||
oop thisOop = *p;
|
||||
if (thisOop != NULL) {
|
||||
assert(thisOop->is_oop(), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)thisOop;
|
||||
if (_span.contains(addr)) {
|
||||
// this should be made more efficient
|
||||
_bitMap->mark(addr);
|
||||
}
|
||||
assert(obj->is_oop(), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr)) {
|
||||
// this should be made more efficient
|
||||
_bitMap->mark(addr);
|
||||
}
|
||||
}
|
||||
|
||||
void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
|
||||
void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
|
||||
|
||||
// A variant of the above, used for CMS marking verification.
|
||||
MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
|
||||
MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm,
|
||||
|
@ -6387,23 +6390,23 @@ MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
|
|||
assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
|
||||
}
|
||||
|
||||
void MarkRefsIntoVerifyClosure::do_oop(oop* p) {
|
||||
void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
|
||||
// if p points into _span, then mark corresponding bit in _markBitMap
|
||||
oop this_oop = *p;
|
||||
if (this_oop != NULL) {
|
||||
assert(this_oop->is_oop(), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
if (_span.contains(addr)) {
|
||||
_verification_bm->mark(addr);
|
||||
if (!_cms_bm->isMarked(addr)) {
|
||||
oop(addr)->print();
|
||||
gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
|
||||
fatal("... aborting");
|
||||
}
|
||||
assert(obj->is_oop(), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr)) {
|
||||
_verification_bm->mark(addr);
|
||||
if (!_cms_bm->isMarked(addr)) {
|
||||
oop(addr)->print();
|
||||
gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
|
||||
fatal("... aborting");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
|
||||
void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
// MarkRefsIntoAndScanClosure
|
||||
//////////////////////////////////////////////////
|
||||
|
@ -6438,13 +6441,13 @@ MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
|
|||
// The marks are made in the marking bit map and the marking stack is
|
||||
// used for keeping the (newly) grey objects during the scan.
|
||||
// The parallel version (Par_...) appears further below.
|
||||
void MarkRefsIntoAndScanClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
if (this_oop != NULL) {
|
||||
assert(this_oop->is_oop(), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
|
||||
assert(_collector->overflow_list_is_empty(), "should be empty");
|
||||
void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
|
||||
if (obj != NULL) {
|
||||
assert(obj->is_oop(), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
|
||||
assert(_collector->overflow_list_is_empty(),
|
||||
"overflow list should be empty");
|
||||
if (_span.contains(addr) &&
|
||||
!_bit_map->isMarked(addr)) {
|
||||
// mark bit map (object is now grey)
|
||||
|
@ -6452,7 +6455,7 @@ void MarkRefsIntoAndScanClosure::do_oop(oop* p) {
|
|||
// push on marking stack (stack should be empty), and drain the
|
||||
// stack by applying this closure to the oops in the oops popped
|
||||
// from the stack (i.e. blacken the grey objects)
|
||||
bool res = _mark_stack->push(this_oop);
|
||||
bool res = _mark_stack->push(obj);
|
||||
assert(res, "Should have space to push on empty stack");
|
||||
do {
|
||||
oop new_oop = _mark_stack->pop();
|
||||
|
@ -6488,6 +6491,9 @@ void MarkRefsIntoAndScanClosure::do_oop(oop* p) {
|
|||
}
|
||||
}
|
||||
|
||||
void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
|
||||
void MarkRefsIntoAndScanClosure::do_yield_work() {
|
||||
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
||||
"CMS thread should hold CMS token");
|
||||
|
@ -6506,9 +6512,11 @@ void MarkRefsIntoAndScanClosure::do_yield_work() {
|
|||
_collector->icms_wait();
|
||||
|
||||
// See the comment in coordinator_yield()
|
||||
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
||||
ConcurrentMarkSweepThread::should_yield() &&
|
||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||
for (unsigned i = 0;
|
||||
i < CMSYieldSleepCount &&
|
||||
ConcurrentMarkSweepThread::should_yield() &&
|
||||
!CMSCollector::foregroundGCIsActive();
|
||||
++i) {
|
||||
os::sleep(Thread::current(), 1, false);
|
||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||
}
|
||||
|
@ -6545,13 +6553,12 @@ Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
|
|||
// the scan phase whence they are also available for stealing by parallel
|
||||
// threads. Since the marking bit map is shared, updates are
|
||||
// synchronized (via CAS).
|
||||
void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
if (this_oop != NULL) {
|
||||
void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
|
||||
if (obj != NULL) {
|
||||
// Ignore mark word because this could be an already marked oop
|
||||
// that may be chained at the end of the overflow list.
|
||||
assert(this_oop->is_oop(true /* ignore mark word */), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
assert(obj->is_oop(), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr) &&
|
||||
!_bit_map->isMarked(addr)) {
|
||||
// mark bit map (object will become grey):
|
||||
|
@ -6565,7 +6572,7 @@ void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) {
|
|||
// queue to an appropriate length by applying this closure to
|
||||
// the oops in the oops popped from the stack (i.e. blacken the
|
||||
// grey objects)
|
||||
bool res = _work_queue->push(this_oop);
|
||||
bool res = _work_queue->push(obj);
|
||||
assert(res, "Low water mark should be less than capacity?");
|
||||
trim_queue(_low_water_mark);
|
||||
} // Else, another thread claimed the object
|
||||
|
@ -6573,6 +6580,9 @@ void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) {
|
|||
}
|
||||
}
|
||||
|
||||
void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
|
||||
// This closure is used to rescan the marked objects on the dirty cards
|
||||
// in the mod union table and the card table proper.
|
||||
size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
|
||||
|
@ -6675,8 +6685,8 @@ void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
|
|||
|
||||
// See the comment in coordinator_yield()
|
||||
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
||||
ConcurrentMarkSweepThread::should_yield() &&
|
||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||
ConcurrentMarkSweepThread::should_yield() &&
|
||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||
os::sleep(Thread::current(), 1, false);
|
||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||
}
|
||||
|
@ -6928,13 +6938,13 @@ void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
|
|||
assert(_markStack->isEmpty(),
|
||||
"should drain stack to limit stack usage");
|
||||
// convert ptr to an oop preparatory to scanning
|
||||
oop this_oop = oop(ptr);
|
||||
oop obj = oop(ptr);
|
||||
// Ignore mark word in verification below, since we
|
||||
// may be running concurrent with mutators.
|
||||
assert(this_oop->is_oop(true), "should be an oop");
|
||||
assert(obj->is_oop(true), "should be an oop");
|
||||
assert(_finger <= ptr, "_finger runneth ahead");
|
||||
// advance the finger to right end of this object
|
||||
_finger = ptr + this_oop->size();
|
||||
_finger = ptr + obj->size();
|
||||
assert(_finger > ptr, "we just incremented it above");
|
||||
// On large heaps, it may take us some time to get through
|
||||
// the marking phase (especially if running iCMS). During
|
||||
|
@ -6980,7 +6990,7 @@ void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
|
|||
_span, _bitMap, _markStack,
|
||||
_revisitStack,
|
||||
_finger, this);
|
||||
bool res = _markStack->push(this_oop);
|
||||
bool res = _markStack->push(obj);
|
||||
assert(res, "Empty non-zero size stack should have space for single push");
|
||||
while (!_markStack->isEmpty()) {
|
||||
oop new_oop = _markStack->pop();
|
||||
|
@ -7052,13 +7062,13 @@ void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
|
|||
assert(_work_queue->size() == 0,
|
||||
"should drain stack to limit stack usage");
|
||||
// convert ptr to an oop preparatory to scanning
|
||||
oop this_oop = oop(ptr);
|
||||
oop obj = oop(ptr);
|
||||
// Ignore mark word in verification below, since we
|
||||
// may be running concurrent with mutators.
|
||||
assert(this_oop->is_oop(true), "should be an oop");
|
||||
assert(obj->is_oop(true), "should be an oop");
|
||||
assert(_finger <= ptr, "_finger runneth ahead");
|
||||
// advance the finger to right end of this object
|
||||
_finger = ptr + this_oop->size();
|
||||
_finger = ptr + obj->size();
|
||||
assert(_finger > ptr, "we just incremented it above");
|
||||
// On large heaps, it may take us some time to get through
|
||||
// the marking phase (especially if running iCMS). During
|
||||
|
@ -7106,7 +7116,7 @@ void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
|
|||
_revisit_stack,
|
||||
_finger,
|
||||
gfa, this);
|
||||
bool res = _work_queue->push(this_oop); // overflow could occur here
|
||||
bool res = _work_queue->push(obj); // overflow could occur here
|
||||
assert(res, "Will hold once we use workqueues");
|
||||
while (true) {
|
||||
oop new_oop;
|
||||
|
@ -7176,15 +7186,15 @@ void MarkFromRootsVerifyClosure::do_bit(size_t offset) {
|
|||
assert(_mark_stack->isEmpty(),
|
||||
"should drain stack to limit stack usage");
|
||||
// convert addr to an oop preparatory to scanning
|
||||
oop this_oop = oop(addr);
|
||||
assert(this_oop->is_oop(), "should be an oop");
|
||||
oop obj = oop(addr);
|
||||
assert(obj->is_oop(), "should be an oop");
|
||||
assert(_finger <= addr, "_finger runneth ahead");
|
||||
// advance the finger to right end of this object
|
||||
_finger = addr + this_oop->size();
|
||||
_finger = addr + obj->size();
|
||||
assert(_finger > addr, "we just incremented it above");
|
||||
// Note: the finger doesn't advance while we drain
|
||||
// the stack below.
|
||||
bool res = _mark_stack->push(this_oop);
|
||||
bool res = _mark_stack->push(obj);
|
||||
assert(res, "Empty non-zero size stack should have space for single push");
|
||||
while (!_mark_stack->isEmpty()) {
|
||||
oop new_oop = _mark_stack->pop();
|
||||
|
@ -7207,6 +7217,8 @@ PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
|
|||
_mark_stack(mark_stack)
|
||||
{ }
|
||||
|
||||
void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
|
||||
void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
|
||||
|
||||
// Upon stack overflow, we discard (part of) the stack,
|
||||
// remembering the least address amongst those discarded
|
||||
|
@ -7219,20 +7231,20 @@ void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
|
|||
_mark_stack->expand(); // expand the stack if possible
|
||||
}
|
||||
|
||||
void PushAndMarkVerifyClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
assert(this_oop->is_oop_or_null(), "expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
void PushAndMarkVerifyClosure::do_oop(oop obj) {
|
||||
assert(obj->is_oop_or_null(), "expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
|
||||
// Oop lies in _span and isn't yet grey or black
|
||||
_verification_bm->mark(addr); // now grey
|
||||
if (!_cms_bm->isMarked(addr)) {
|
||||
oop(addr)->print();
|
||||
gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
|
||||
gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
|
||||
addr);
|
||||
fatal("... aborting");
|
||||
}
|
||||
|
||||
if (!_mark_stack->push(this_oop)) { // stack overflow
|
||||
if (!_mark_stack->push(obj)) { // stack overflow
|
||||
if (PrintCMSStatistics != 0) {
|
||||
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
|
||||
SIZE_FORMAT, _mark_stack->capacity());
|
||||
|
@ -7285,7 +7297,6 @@ Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
|
|||
_should_remember_klasses(collector->should_unload_classes())
|
||||
{ }
|
||||
|
||||
|
||||
void CMSCollector::lower_restart_addr(HeapWord* low) {
|
||||
assert(_span.contains(low), "Out of bounds addr");
|
||||
if (_restart_addr == NULL) {
|
||||
|
@ -7321,12 +7332,10 @@ void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
|
|||
_overflow_stack->expand(); // expand the stack if possible
|
||||
}
|
||||
|
||||
|
||||
void PushOrMarkClosure::do_oop(oop* p) {
|
||||
oop thisOop = *p;
|
||||
void PushOrMarkClosure::do_oop(oop obj) {
|
||||
// Ignore mark word because we are running concurrent with mutators.
|
||||
assert(thisOop->is_oop_or_null(true), "expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)thisOop;
|
||||
assert(obj->is_oop_or_null(true), "expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
|
||||
// Oop lies in _span and isn't yet grey or black
|
||||
_bitMap->mark(addr); // now grey
|
||||
|
@ -7342,7 +7351,7 @@ void PushOrMarkClosure::do_oop(oop* p) {
|
|||
simulate_overflow = true;
|
||||
}
|
||||
)
|
||||
if (simulate_overflow || !_markStack->push(thisOop)) { // stack overflow
|
||||
if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
|
||||
if (PrintCMSStatistics != 0) {
|
||||
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
|
||||
SIZE_FORMAT, _markStack->capacity());
|
||||
|
@ -7358,11 +7367,13 @@ void PushOrMarkClosure::do_oop(oop* p) {
|
|||
}
|
||||
}
|
||||
|
||||
void Par_PushOrMarkClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
|
||||
void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
|
||||
|
||||
void Par_PushOrMarkClosure::do_oop(oop obj) {
|
||||
// Ignore mark word because we are running concurrent with mutators.
|
||||
assert(this_oop->is_oop_or_null(true), "expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
assert(obj->is_oop_or_null(true), "expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
|
||||
// Oop lies in _span and isn't yet grey or black
|
||||
// We read the global_finger (volatile read) strictly after marking oop
|
||||
|
@ -7391,7 +7402,7 @@ void Par_PushOrMarkClosure::do_oop(oop* p) {
|
|||
}
|
||||
)
|
||||
if (simulate_overflow ||
|
||||
!(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
|
||||
!(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
|
||||
// stack overflow
|
||||
if (PrintCMSStatistics != 0) {
|
||||
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
|
||||
|
@ -7408,6 +7419,8 @@ void Par_PushOrMarkClosure::do_oop(oop* p) {
|
|||
}
|
||||
}
|
||||
|
||||
void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
|
||||
void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
|
||||
|
||||
PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
|
||||
MemRegion span,
|
||||
|
@ -7432,16 +7445,11 @@ PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
|
|||
|
||||
// Grey object rescan during pre-cleaning and second checkpoint phases --
|
||||
// the non-parallel version (the parallel version appears further below.)
|
||||
void PushAndMarkClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
// Ignore mark word verification. If during concurrent precleaning
|
||||
// the object monitor may be locked. If during the checkpoint
|
||||
// phases, the object may already have been reached by a different
|
||||
// path and may be at the end of the global overflow list (so
|
||||
// the mark word may be NULL).
|
||||
assert(this_oop->is_oop_or_null(true/* ignore mark word */),
|
||||
void PushAndMarkClosure::do_oop(oop obj) {
|
||||
// If _concurrent_precleaning, ignore mark word verification
|
||||
assert(obj->is_oop_or_null(_concurrent_precleaning),
|
||||
"expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
// Check if oop points into the CMS generation
|
||||
// and is not marked
|
||||
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
|
||||
|
@ -7456,7 +7464,7 @@ void PushAndMarkClosure::do_oop(oop* p) {
|
|||
simulate_overflow = true;
|
||||
}
|
||||
)
|
||||
if (simulate_overflow || !_mark_stack->push(this_oop)) {
|
||||
if (simulate_overflow || !_mark_stack->push(obj)) {
|
||||
if (_concurrent_precleaning) {
|
||||
// During precleaning we can just dirty the appropriate card
|
||||
// in the mod union table, thus ensuring that the object remains
|
||||
|
@ -7468,7 +7476,7 @@ void PushAndMarkClosure::do_oop(oop* p) {
|
|||
} else {
|
||||
// During the remark phase, we need to remember this oop
|
||||
// in the overflow list.
|
||||
_collector->push_on_overflow_list(this_oop);
|
||||
_collector->push_on_overflow_list(obj);
|
||||
_collector->_ser_pmc_remark_ovflw++;
|
||||
}
|
||||
}
|
||||
|
@ -7492,10 +7500,12 @@ Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
|
|||
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
|
||||
}
|
||||
|
||||
void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
|
||||
void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
|
||||
|
||||
// Grey object rescan during second checkpoint phase --
|
||||
// the parallel version.
|
||||
void Par_PushAndMarkClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
void Par_PushAndMarkClosure::do_oop(oop obj) {
|
||||
// In the assert below, we ignore the mark word because
|
||||
// this oop may point to an already visited object that is
|
||||
// on the overflow stack (in which case the mark word has
|
||||
|
@ -7507,9 +7517,9 @@ void Par_PushAndMarkClosure::do_oop(oop* p) {
|
|||
// value, by the time we get to examined this failing assert in
|
||||
// the debugger, is_oop_or_null(false) may subsequently start
|
||||
// to hold.
|
||||
assert(this_oop->is_oop_or_null(true),
|
||||
assert(obj->is_oop_or_null(true),
|
||||
"expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
// Check if oop points into the CMS generation
|
||||
// and is not marked
|
||||
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
|
||||
|
@ -7527,14 +7537,17 @@ void Par_PushAndMarkClosure::do_oop(oop* p) {
|
|||
simulate_overflow = true;
|
||||
}
|
||||
)
|
||||
if (simulate_overflow || !_work_queue->push(this_oop)) {
|
||||
_collector->par_push_on_overflow_list(this_oop);
|
||||
if (simulate_overflow || !_work_queue->push(obj)) {
|
||||
_collector->par_push_on_overflow_list(obj);
|
||||
_collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
|
||||
}
|
||||
} // Else, some other thread got there first
|
||||
}
|
||||
}
|
||||
|
||||
void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
|
||||
void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
|
||||
|
||||
void PushAndMarkClosure::remember_klass(Klass* k) {
|
||||
if (!_revisit_stack->push(oop(k))) {
|
||||
fatal("Revisit stack overflowed in PushAndMarkClosure");
|
||||
|
@ -8228,9 +8241,8 @@ bool CMSIsAliveClosure::do_object_b(oop obj) {
|
|||
}
|
||||
|
||||
// CMSKeepAliveClosure: the serial version
|
||||
void CMSKeepAliveClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
void CMSKeepAliveClosure::do_oop(oop obj) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr) &&
|
||||
!_bit_map->isMarked(addr)) {
|
||||
_bit_map->mark(addr);
|
||||
|
@ -8242,26 +8254,28 @@ void CMSKeepAliveClosure::do_oop(oop* p) {
|
|||
simulate_overflow = true;
|
||||
}
|
||||
)
|
||||
if (simulate_overflow || !_mark_stack->push(this_oop)) {
|
||||
_collector->push_on_overflow_list(this_oop);
|
||||
if (simulate_overflow || !_mark_stack->push(obj)) {
|
||||
_collector->push_on_overflow_list(obj);
|
||||
_collector->_ser_kac_ovflw++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
|
||||
void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
|
||||
|
||||
// CMSParKeepAliveClosure: a parallel version of the above.
|
||||
// The work queues are private to each closure (thread),
|
||||
// but (may be) available for stealing by other threads.
|
||||
void CMSParKeepAliveClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
void CMSParKeepAliveClosure::do_oop(oop obj) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr) &&
|
||||
!_bit_map->isMarked(addr)) {
|
||||
// In general, during recursive tracing, several threads
|
||||
// may be concurrently getting here; the first one to
|
||||
// "tag" it, claims it.
|
||||
if (_bit_map->par_mark(addr)) {
|
||||
bool res = _work_queue->push(this_oop);
|
||||
bool res = _work_queue->push(obj);
|
||||
assert(res, "Low water mark should be much less than capacity");
|
||||
// Do a recursive trim in the hope that this will keep
|
||||
// stack usage lower, but leave some oops for potential stealers
|
||||
|
@ -8270,6 +8284,9 @@ void CMSParKeepAliveClosure::do_oop(oop* p) {
|
|||
}
|
||||
}
|
||||
|
||||
void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
|
||||
void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
|
||||
|
||||
void CMSParKeepAliveClosure::trim_queue(uint max) {
|
||||
while (_work_queue->size() > max) {
|
||||
oop new_oop;
|
||||
|
@ -8285,9 +8302,8 @@ void CMSParKeepAliveClosure::trim_queue(uint max) {
|
|||
}
|
||||
}
|
||||
|
||||
void CMSInnerParMarkAndPushClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr) &&
|
||||
!_bit_map->isMarked(addr)) {
|
||||
if (_bit_map->par_mark(addr)) {
|
||||
|
@ -8299,14 +8315,17 @@ void CMSInnerParMarkAndPushClosure::do_oop(oop* p) {
|
|||
simulate_overflow = true;
|
||||
}
|
||||
)
|
||||
if (simulate_overflow || !_work_queue->push(this_oop)) {
|
||||
_collector->par_push_on_overflow_list(this_oop);
|
||||
if (simulate_overflow || !_work_queue->push(obj)) {
|
||||
_collector->par_push_on_overflow_list(obj);
|
||||
_collector->_par_kac_ovflw++;
|
||||
}
|
||||
} // Else another thread got there already
|
||||
}
|
||||
}
|
||||
|
||||
void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
|
||||
void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
|
||||
|
||||
//////////////////////////////////////////////////////////////////
|
||||
// CMSExpansionCause /////////////////////////////
|
||||
//////////////////////////////////////////////////////////////////
|
||||
|
@ -8337,12 +8356,12 @@ void CMSDrainMarkingStackClosure::do_void() {
|
|||
while (!_mark_stack->isEmpty() ||
|
||||
// if stack is empty, check the overflow list
|
||||
_collector->take_from_overflow_list(num, _mark_stack)) {
|
||||
oop this_oop = _mark_stack->pop();
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
oop obj = _mark_stack->pop();
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
assert(_span.contains(addr), "Should be within span");
|
||||
assert(_bit_map->isMarked(addr), "Should be marked");
|
||||
assert(this_oop->is_oop(), "Should be an oop");
|
||||
this_oop->oop_iterate(_keep_alive);
|
||||
assert(obj->is_oop(), "Should be an oop");
|
||||
obj->oop_iterate(_keep_alive);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1138,7 +1138,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
|||
// Allocation support
|
||||
HeapWord* allocate(size_t size, bool tlab);
|
||||
HeapWord* have_lock_and_allocate(size_t size, bool tlab);
|
||||
oop promote(oop obj, size_t obj_size, oop* ref);
|
||||
oop promote(oop obj, size_t obj_size);
|
||||
HeapWord* par_allocate(size_t size, bool tlab) {
|
||||
return allocate(size, tlab);
|
||||
}
|
||||
|
@ -1301,9 +1301,8 @@ class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
|
|||
// This closure is used to check that a certain set of oops is empty.
|
||||
class FalseClosure: public OopClosure {
|
||||
public:
|
||||
void do_oop(oop* p) {
|
||||
guarantee(false, "Should be an empty set");
|
||||
}
|
||||
void do_oop(oop* p) { guarantee(false, "Should be an empty set"); }
|
||||
void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); }
|
||||
};
|
||||
|
||||
// This closure is used to do concurrent marking from the roots
|
||||
|
@ -1380,6 +1379,12 @@ class PushAndMarkVerifyClosure: public OopClosure {
|
|||
CMSBitMap* _verification_bm;
|
||||
CMSBitMap* _cms_bm;
|
||||
CMSMarkStack* _mark_stack;
|
||||
protected:
|
||||
void do_oop(oop p);
|
||||
template <class T> inline void do_oop_work(T *p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
do_oop(obj);
|
||||
}
|
||||
public:
|
||||
PushAndMarkVerifyClosure(CMSCollector* cms_collector,
|
||||
MemRegion span,
|
||||
|
@ -1387,6 +1392,7 @@ class PushAndMarkVerifyClosure: public OopClosure {
|
|||
CMSBitMap* cms_bm,
|
||||
CMSMarkStack* mark_stack);
|
||||
void do_oop(oop* p);
|
||||
void do_oop(narrowOop* p);
|
||||
// Deal with a stack overflow condition
|
||||
void handle_stack_overflow(HeapWord* lost);
|
||||
};
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
// CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
// have any questions.
|
||||
//
|
||||
//
|
||||
//
|
||||
|
||||
asParNewGeneration.hpp adaptiveSizePolicy.hpp
|
||||
|
@ -66,8 +66,8 @@ parNewGeneration.cpp handles.hpp
|
|||
parNewGeneration.cpp handles.inline.hpp
|
||||
parNewGeneration.cpp java.hpp
|
||||
parNewGeneration.cpp objArrayOop.hpp
|
||||
parNewGeneration.cpp oop.pcgc.inline.hpp
|
||||
parNewGeneration.cpp oop.inline.hpp
|
||||
parNewGeneration.cpp oop.pcgc.inline.hpp
|
||||
parNewGeneration.cpp parGCAllocBuffer.hpp
|
||||
parNewGeneration.cpp parNewGeneration.hpp
|
||||
parNewGeneration.cpp parOopClosures.inline.hpp
|
||||
|
@ -80,3 +80,8 @@ parNewGeneration.cpp workgroup.hpp
|
|||
parNewGeneration.hpp defNewGeneration.hpp
|
||||
parNewGeneration.hpp parGCAllocBuffer.hpp
|
||||
parNewGeneration.hpp taskqueue.hpp
|
||||
|
||||
parOopClosures.hpp genOopClosures.hpp
|
||||
|
||||
parOopClosures.inline.hpp parNewGeneration.hpp
|
||||
parOopClosures.inline.hpp parOopClosures.hpp
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
// CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
// have any questions.
|
||||
//
|
||||
//
|
||||
//
|
||||
|
||||
// NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
|
||||
|
@ -279,6 +279,7 @@ psParallelCompact.hpp mutableSpace.hpp
|
|||
psParallelCompact.hpp objectStartArray.hpp
|
||||
psParallelCompact.hpp oop.hpp
|
||||
psParallelCompact.hpp parMarkBitMap.hpp
|
||||
psParallelCompact.hpp psCompactionManager.hpp
|
||||
psParallelCompact.hpp sharedHeap.hpp
|
||||
|
||||
psOldGen.cpp psAdaptiveSizePolicy.hpp
|
||||
|
|
|
@ -32,18 +32,19 @@ ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
|
|||
_allocated(0), _wasted(0)
|
||||
{
|
||||
assert (min_size() > AlignmentReserve, "Inconsistency!");
|
||||
// arrayOopDesc::header_size depends on command line initialization.
|
||||
FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT));
|
||||
AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
|
||||
}
|
||||
|
||||
const size_t ParGCAllocBuffer::FillerHeaderSize =
|
||||
align_object_size(arrayOopDesc::header_size(T_INT));
|
||||
size_t ParGCAllocBuffer::FillerHeaderSize;
|
||||
|
||||
// If the minimum object size is greater than MinObjAlignment, we can
|
||||
// end up with a shard at the end of the buffer that's smaller than
|
||||
// the smallest object. We can't allow that because the buffer must
|
||||
// look like it's full of objects when we retire it, so we make
|
||||
// sure we have enough space for a filler int array object.
|
||||
const size_t ParGCAllocBuffer::AlignmentReserve =
|
||||
oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
|
||||
size_t ParGCAllocBuffer::AlignmentReserve;
|
||||
|
||||
void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
|
||||
assert(!retain || end_of_gc, "Can only retain at GC end.");
|
||||
|
|
|
@ -41,8 +41,8 @@ protected:
|
|||
size_t _allocated; // in HeapWord units
|
||||
size_t _wasted; // in HeapWord units
|
||||
char tail[32];
|
||||
static const size_t FillerHeaderSize;
|
||||
static const size_t AlignmentReserve;
|
||||
static size_t FillerHeaderSize;
|
||||
static size_t AlignmentReserve;
|
||||
|
||||
public:
|
||||
// Initializes the buffer to be empty, but with the given "word_sz".
|
||||
|
|
|
@ -104,16 +104,15 @@ void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
|
|||
// must be removed.
|
||||
arrayOop(old)->set_length(end);
|
||||
}
|
||||
|
||||
// process our set of indices (include header in first chunk)
|
||||
oop* start_addr = start == 0 ? (oop*)obj : obj->obj_at_addr(start);
|
||||
oop* end_addr = obj->base() + end; // obj_at_addr(end) asserts end < length
|
||||
MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
|
||||
// should make sure end is even (aligned to HeapWord in case of compressed oops)
|
||||
if ((HeapWord *)obj < young_old_boundary()) {
|
||||
// object is in to_space
|
||||
obj->oop_iterate(&_to_space_closure, mr);
|
||||
obj->oop_iterate_range(&_to_space_closure, start, end);
|
||||
} else {
|
||||
// object is in old generation
|
||||
obj->oop_iterate(&_old_gen_closure, mr);
|
||||
obj->oop_iterate_range(&_old_gen_closure, start, end);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -319,7 +318,6 @@ void ParScanThreadStateSet::flush()
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
ParScanClosure::ParScanClosure(ParNewGeneration* g,
|
||||
ParScanThreadState* par_scan_state) :
|
||||
OopsInGenClosure(g), _par_scan_state(par_scan_state), _g(g)
|
||||
|
@ -328,11 +326,25 @@ ParScanClosure::ParScanClosure(ParNewGeneration* g,
|
|||
_boundary = _g->reserved().end();
|
||||
}
|
||||
|
||||
void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
|
||||
void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
|
||||
|
||||
void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
|
||||
void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
|
||||
|
||||
void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); }
|
||||
void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
|
||||
|
||||
void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); }
|
||||
void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
|
||||
|
||||
ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
|
||||
ParScanThreadState* par_scan_state)
|
||||
: ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
|
||||
{
|
||||
}
|
||||
{}
|
||||
|
||||
void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); }
|
||||
void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
|
||||
|
||||
#ifdef WIN32
|
||||
#pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */
|
||||
|
@ -475,51 +487,66 @@ ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
|
|||
ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
|
||||
DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
|
||||
|
||||
void
|
||||
// ParNewGeneration::
|
||||
ParKeepAliveClosure::do_oop(oop* p) {
|
||||
// We never expect to see a null reference being processed
|
||||
// as a weak reference.
|
||||
assert (*p != NULL, "expected non-null ref");
|
||||
assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
|
||||
template <class T>
|
||||
void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
|
||||
#ifdef ASSERT
|
||||
{
|
||||
assert(!oopDesc::is_null(*p), "expected non-null ref");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
// We never expect to see a null reference being processed
|
||||
// as a weak reference.
|
||||
assert(obj->is_oop(), "expected an oop while scanning weak refs");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
_par_cl->do_oop_nv(p);
|
||||
|
||||
if (Universe::heap()->is_in_reserved(p)) {
|
||||
_rs->write_ref_field_gc_par(p, *p);
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
_rs->write_ref_field_gc_par(p, obj);
|
||||
}
|
||||
}
|
||||
|
||||
void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); }
|
||||
void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); }
|
||||
|
||||
// ParNewGeneration::
|
||||
KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :
|
||||
DefNewGeneration::KeepAliveClosure(cl) {}
|
||||
|
||||
void
|
||||
// ParNewGeneration::
|
||||
KeepAliveClosure::do_oop(oop* p) {
|
||||
// We never expect to see a null reference being processed
|
||||
// as a weak reference.
|
||||
assert (*p != NULL, "expected non-null ref");
|
||||
assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
|
||||
template <class T>
|
||||
void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
|
||||
#ifdef ASSERT
|
||||
{
|
||||
assert(!oopDesc::is_null(*p), "expected non-null ref");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
// We never expect to see a null reference being processed
|
||||
// as a weak reference.
|
||||
assert(obj->is_oop(), "expected an oop while scanning weak refs");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
_cl->do_oop_nv(p);
|
||||
|
||||
if (Universe::heap()->is_in_reserved(p)) {
|
||||
_rs->write_ref_field_gc_par(p, *p);
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
_rs->write_ref_field_gc_par(p, obj);
|
||||
}
|
||||
}
|
||||
|
||||
void ScanClosureWithParBarrier::do_oop(oop* p) {
|
||||
oop obj = *p;
|
||||
// Should we copy the obj?
|
||||
if (obj != NULL) {
|
||||
void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); }
|
||||
void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
|
||||
|
||||
template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if ((HeapWord*)obj < _boundary) {
|
||||
assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
|
||||
if (obj->is_forwarded()) {
|
||||
*p = obj->forwardee();
|
||||
} else {
|
||||
*p = _g->DefNewGeneration::copy_to_survivor_space(obj, p);
|
||||
}
|
||||
oop new_obj = obj->is_forwarded()
|
||||
? obj->forwardee()
|
||||
: _g->DefNewGeneration::copy_to_survivor_space(obj);
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
}
|
||||
if (_gc_barrier) {
|
||||
// If p points to a younger generation, mark the card.
|
||||
|
@ -530,6 +557,9 @@ void ScanClosureWithParBarrier::do_oop(oop* p) {
|
|||
}
|
||||
}
|
||||
|
||||
void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
|
||||
void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
|
||||
|
||||
class ParNewRefProcTaskProxy: public AbstractGangTask {
|
||||
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
|
||||
public:
|
||||
|
|
|
@ -33,7 +33,6 @@ class ParEvacuateFollowersClosure;
|
|||
// but they must be here to allow ParScanClosure::do_oop_work to be defined
|
||||
// in genOopClosures.inline.hpp.
|
||||
|
||||
|
||||
typedef OopTaskQueue ObjToScanQueue;
|
||||
typedef OopTaskQueueSet ObjToScanQueueSet;
|
||||
|
||||
|
@ -41,15 +40,20 @@ typedef OopTaskQueueSet ObjToScanQueueSet;
|
|||
const int PAR_STATS_ENABLED = 0;
|
||||
|
||||
class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
|
||||
private:
|
||||
ParScanWeakRefClosure* _par_cl;
|
||||
protected:
|
||||
template <class T> void do_oop_work(T* p);
|
||||
public:
|
||||
ParKeepAliveClosure(ParScanWeakRefClosure* cl);
|
||||
void do_oop(oop* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
// The state needed by thread performing parallel young-gen collection.
|
||||
class ParScanThreadState {
|
||||
friend class ParScanThreadStateSet;
|
||||
private:
|
||||
ObjToScanQueue *_work_queue;
|
||||
|
||||
ParGCAllocBuffer _to_space_alloc_buffer;
|
||||
|
@ -111,7 +115,7 @@ class ParScanThreadState {
|
|||
ObjToScanQueueSet* work_queue_set_, size_t desired_plab_sz_,
|
||||
ParallelTaskTerminator& term_);
|
||||
|
||||
public:
|
||||
public:
|
||||
ageTable* age_table() {return &_ageTable;}
|
||||
|
||||
ObjToScanQueue* work_queue() { return _work_queue; }
|
||||
|
@ -195,13 +199,13 @@ public:
|
|||
double elapsed() {
|
||||
return os::elapsedTime() - _start;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
class ParNewGenTask: public AbstractGangTask {
|
||||
ParNewGeneration* _gen;
|
||||
Generation* _next_gen;
|
||||
HeapWord* _young_old_boundary;
|
||||
private:
|
||||
ParNewGeneration* _gen;
|
||||
Generation* _next_gen;
|
||||
HeapWord* _young_old_boundary;
|
||||
class ParScanThreadStateSet* _state_set;
|
||||
|
||||
public:
|
||||
|
@ -216,35 +220,44 @@ public:
|
|||
};
|
||||
|
||||
class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
|
||||
protected:
|
||||
template <class T> void do_oop_work(T* p);
|
||||
public:
|
||||
KeepAliveClosure(ScanWeakRefClosure* cl);
|
||||
void do_oop(oop* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
class EvacuateFollowersClosureGeneral: public VoidClosure {
|
||||
GenCollectedHeap* _gch;
|
||||
int _level;
|
||||
OopsInGenClosure* _scan_cur_or_nonheap;
|
||||
OopsInGenClosure* _scan_older;
|
||||
public:
|
||||
EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
|
||||
OopsInGenClosure* cur,
|
||||
OopsInGenClosure* older);
|
||||
void do_void();
|
||||
private:
|
||||
GenCollectedHeap* _gch;
|
||||
int _level;
|
||||
OopsInGenClosure* _scan_cur_or_nonheap;
|
||||
OopsInGenClosure* _scan_older;
|
||||
public:
|
||||
EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
|
||||
OopsInGenClosure* cur,
|
||||
OopsInGenClosure* older);
|
||||
virtual void do_void();
|
||||
};
|
||||
|
||||
// Closure for scanning ParNewGeneration.
|
||||
// Same as ScanClosure, except does parallel GC barrier.
|
||||
class ScanClosureWithParBarrier: public ScanClosure {
|
||||
public:
|
||||
protected:
|
||||
template <class T> void do_oop_work(T* p);
|
||||
public:
|
||||
ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier);
|
||||
void do_oop(oop* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
// Implements AbstractRefProcTaskExecutor for ParNew.
|
||||
class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
|
||||
public:
|
||||
|
||||
private:
|
||||
ParNewGeneration& _generation;
|
||||
ParScanThreadStateSet& _state_set;
|
||||
public:
|
||||
ParNewRefProcTaskExecutor(ParNewGeneration& generation,
|
||||
ParScanThreadStateSet& state_set)
|
||||
: _generation(generation), _state_set(state_set)
|
||||
|
@ -255,9 +268,6 @@ public:
|
|||
virtual void execute(EnqueueTask& task);
|
||||
// Switch to single threaded mode.
|
||||
virtual void set_single_threaded_mode();
|
||||
private:
|
||||
ParNewGeneration& _generation;
|
||||
ParScanThreadStateSet& _state_set;
|
||||
};
|
||||
|
||||
|
||||
|
@ -269,6 +279,7 @@ class ParNewGeneration: public DefNewGeneration {
|
|||
friend class ParNewRefProcTaskExecutor;
|
||||
friend class ParScanThreadStateSet;
|
||||
|
||||
private:
|
||||
// XXX use a global constant instead of 64!
|
||||
struct ObjToScanQueuePadded {
|
||||
ObjToScanQueue work_queue;
|
||||
|
@ -314,7 +325,7 @@ class ParNewGeneration: public DefNewGeneration {
|
|||
// the details of the policy.
|
||||
virtual void adjust_desired_tenuring_threshold();
|
||||
|
||||
public:
|
||||
public:
|
||||
ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level);
|
||||
|
||||
~ParNewGeneration() {
|
||||
|
|
|
@ -26,70 +26,77 @@
|
|||
|
||||
class ParScanThreadState;
|
||||
class ParNewGeneration;
|
||||
template<class E> class GenericTaskQueueSet;
|
||||
typedef GenericTaskQueueSet<oop> ObjToScanQueueSet;
|
||||
typedef OopTaskQueueSet ObjToScanQueueSet;
|
||||
class ParallelTaskTerminator;
|
||||
|
||||
class ParScanClosure: public OopsInGenClosure {
|
||||
protected:
|
||||
protected:
|
||||
ParScanThreadState* _par_scan_state;
|
||||
ParNewGeneration* _g;
|
||||
HeapWord* _boundary;
|
||||
void do_oop_work(oop* p,
|
||||
bool gc_barrier,
|
||||
bool root_scan);
|
||||
|
||||
void par_do_barrier(oop* p);
|
||||
|
||||
public:
|
||||
ParNewGeneration* _g;
|
||||
HeapWord* _boundary;
|
||||
template <class T> void inline par_do_barrier(T* p);
|
||||
template <class T> void inline do_oop_work(T* p,
|
||||
bool gc_barrier,
|
||||
bool root_scan);
|
||||
public:
|
||||
ParScanClosure(ParNewGeneration* g, ParScanThreadState* par_scan_state);
|
||||
};
|
||||
|
||||
class ParScanWithBarrierClosure: public ParScanClosure {
|
||||
public:
|
||||
void do_oop(oop* p) { do_oop_work(p, true, false); }
|
||||
void do_oop_nv(oop* p) { do_oop_work(p, true, false); }
|
||||
public:
|
||||
ParScanWithBarrierClosure(ParNewGeneration* g,
|
||||
ParScanThreadState* par_scan_state) :
|
||||
ParScanClosure(g, par_scan_state) {}
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p);
|
||||
};
|
||||
|
||||
class ParScanWithoutBarrierClosure: public ParScanClosure {
|
||||
public:
|
||||
public:
|
||||
ParScanWithoutBarrierClosure(ParNewGeneration* g,
|
||||
ParScanThreadState* par_scan_state) :
|
||||
ParScanClosure(g, par_scan_state) {}
|
||||
void do_oop(oop* p) { do_oop_work(p, false, false); }
|
||||
void do_oop_nv(oop* p) { do_oop_work(p, false, false); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p);
|
||||
};
|
||||
|
||||
class ParRootScanWithBarrierTwoGensClosure: public ParScanClosure {
|
||||
public:
|
||||
public:
|
||||
ParRootScanWithBarrierTwoGensClosure(ParNewGeneration* g,
|
||||
ParScanThreadState* par_scan_state) :
|
||||
ParScanClosure(g, par_scan_state) {}
|
||||
void do_oop(oop* p) { do_oop_work(p, true, true); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
class ParRootScanWithoutBarrierClosure: public ParScanClosure {
|
||||
public:
|
||||
public:
|
||||
ParRootScanWithoutBarrierClosure(ParNewGeneration* g,
|
||||
ParScanThreadState* par_scan_state) :
|
||||
ParScanClosure(g, par_scan_state) {}
|
||||
void do_oop(oop* p) { do_oop_work(p, false, true); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
class ParScanWeakRefClosure: public ScanWeakRefClosure {
|
||||
protected:
|
||||
protected:
|
||||
ParScanThreadState* _par_scan_state;
|
||||
public:
|
||||
template <class T> inline void do_oop_work(T* p);
|
||||
public:
|
||||
ParScanWeakRefClosure(ParNewGeneration* g,
|
||||
ParScanThreadState* par_scan_state);
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p);
|
||||
};
|
||||
|
||||
class ParEvacuateFollowersClosure: public VoidClosure {
|
||||
private:
|
||||
ParScanThreadState* _par_scan_state;
|
||||
ParScanThreadState* par_scan_state() { return _par_scan_state; }
|
||||
|
||||
|
@ -121,8 +128,7 @@ class ParEvacuateFollowersClosure: public VoidClosure {
|
|||
|
||||
ParallelTaskTerminator* _terminator;
|
||||
ParallelTaskTerminator* terminator() { return _terminator; }
|
||||
|
||||
public:
|
||||
public:
|
||||
ParEvacuateFollowersClosure(
|
||||
ParScanThreadState* par_scan_state_,
|
||||
ParScanWithoutBarrierClosure* to_space_closure_,
|
||||
|
@ -132,5 +138,5 @@ public:
|
|||
ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
|
||||
ObjToScanQueueSet* task_queues_,
|
||||
ParallelTaskTerminator* terminator_);
|
||||
void do_void();
|
||||
virtual void do_void();
|
||||
};
|
||||
|
|
|
@ -22,10 +22,9 @@
|
|||
*
|
||||
*/
|
||||
|
||||
inline void ParScanWeakRefClosure::do_oop(oop* p)
|
||||
{
|
||||
oop obj = *p;
|
||||
assert (obj != NULL, "null weak reference?");
|
||||
template <class T> inline void ParScanWeakRefClosure::do_oop_work(T* p) {
|
||||
assert (!oopDesc::is_null(*p), "null weak reference?");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
// weak references are sometimes scanned twice; must check
|
||||
// that to-space doesn't already contain this object
|
||||
if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
|
||||
|
@ -33,41 +32,43 @@ inline void ParScanWeakRefClosure::do_oop(oop* p)
|
|||
// ParScanClosure::do_oop_work).
|
||||
klassOop objK = obj->klass();
|
||||
markOop m = obj->mark();
|
||||
oop new_obj;
|
||||
if (m->is_marked()) { // Contains forwarding pointer.
|
||||
*p = ParNewGeneration::real_forwardee(obj);
|
||||
new_obj = ParNewGeneration::real_forwardee(obj);
|
||||
} else {
|
||||
size_t obj_sz = obj->size_given_klass(objK->klass_part());
|
||||
*p = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state,
|
||||
obj, obj_sz, m);
|
||||
new_obj = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state,
|
||||
obj, obj_sz, m);
|
||||
}
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
}
|
||||
}
|
||||
|
||||
inline void ParScanWeakRefClosure::do_oop_nv(oop* p)
|
||||
{
|
||||
ParScanWeakRefClosure::do_oop(p);
|
||||
}
|
||||
inline void ParScanWeakRefClosure::do_oop_nv(oop* p) { ParScanWeakRefClosure::do_oop_work(p); }
|
||||
inline void ParScanWeakRefClosure::do_oop_nv(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
|
||||
|
||||
inline void ParScanClosure::par_do_barrier(oop* p) {
|
||||
template <class T> inline void ParScanClosure::par_do_barrier(T* p) {
|
||||
assert(generation()->is_in_reserved(p), "expected ref in generation");
|
||||
oop obj = *p;
|
||||
assert(obj != NULL, "expected non-null object");
|
||||
assert(!oopDesc::is_null(*p), "expected non-null object");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
// If p points to a younger generation, mark the card.
|
||||
if ((HeapWord*)obj < gen_boundary()) {
|
||||
rs()->write_ref_field_gc_par(p, obj);
|
||||
}
|
||||
}
|
||||
|
||||
inline void ParScanClosure::do_oop_work(oop* p,
|
||||
template <class T>
|
||||
inline void ParScanClosure::do_oop_work(T* p,
|
||||
bool gc_barrier,
|
||||
bool root_scan) {
|
||||
oop obj = *p;
|
||||
assert((!Universe::heap()->is_in_reserved(p) ||
|
||||
generation()->is_in_reserved(p))
|
||||
&& (generation()->level() == 0 || gc_barrier),
|
||||
"The gen must be right, and we must be doing the barrier "
|
||||
"in older generations.");
|
||||
if (obj != NULL) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if ((HeapWord*)obj < _boundary) {
|
||||
assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
|
||||
// OK, we need to ensure that it is copied.
|
||||
|
@ -78,11 +79,14 @@ inline void ParScanClosure::do_oop_work(oop* p,
|
|||
// forwarded.
|
||||
klassOop objK = obj->klass();
|
||||
markOop m = obj->mark();
|
||||
oop new_obj;
|
||||
if (m->is_marked()) { // Contains forwarding pointer.
|
||||
*p = ParNewGeneration::real_forwardee(obj);
|
||||
new_obj = ParNewGeneration::real_forwardee(obj);
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
} else {
|
||||
size_t obj_sz = obj->size_given_klass(objK->klass_part());
|
||||
*p = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
|
||||
new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
if (root_scan) {
|
||||
// This may have pushed an object. If we have a root
|
||||
// category with a lot of roots, can't let the queue get too
|
||||
|
@ -97,3 +101,9 @@ inline void ParScanClosure::do_oop_work(oop* p,
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void ParScanWithBarrierClosure::do_oop_nv(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
|
||||
inline void ParScanWithBarrierClosure::do_oop_nv(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
|
||||
|
||||
inline void ParScanWithoutBarrierClosure::do_oop_nv(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
|
||||
inline void ParScanWithoutBarrierClosure::do_oop_nv(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
|
||||
|
|
|
@ -28,17 +28,16 @@
|
|||
// Checks an individual oop for missing precise marks. Mark
|
||||
// may be either dirty or newgen.
|
||||
class CheckForUnmarkedOops : public OopClosure {
|
||||
PSYoungGen* _young_gen;
|
||||
private:
|
||||
PSYoungGen* _young_gen;
|
||||
CardTableExtension* _card_table;
|
||||
HeapWord* _unmarked_addr;
|
||||
jbyte* _unmarked_card;
|
||||
HeapWord* _unmarked_addr;
|
||||
jbyte* _unmarked_card;
|
||||
|
||||
public:
|
||||
CheckForUnmarkedOops( PSYoungGen* young_gen, CardTableExtension* card_table ) :
|
||||
_young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
|
||||
|
||||
virtual void do_oop(oop* p) {
|
||||
if (_young_gen->is_in_reserved(*p) &&
|
||||
protected:
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
if (_young_gen->is_in_reserved(obj) &&
|
||||
!_card_table->addr_is_marked_imprecise(p)) {
|
||||
// Don't overwrite the first missing card mark
|
||||
if (_unmarked_addr == NULL) {
|
||||
|
@ -48,6 +47,13 @@ class CheckForUnmarkedOops : public OopClosure {
|
|||
}
|
||||
}
|
||||
|
||||
public:
|
||||
CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) :
|
||||
_young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
|
||||
|
||||
virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }
|
||||
|
||||
bool has_unmarked_oop() {
|
||||
return _unmarked_addr != NULL;
|
||||
}
|
||||
|
@ -56,7 +62,8 @@ class CheckForUnmarkedOops : public OopClosure {
|
|||
// Checks all objects for the existance of some type of mark,
|
||||
// precise or imprecise, dirty or newgen.
|
||||
class CheckForUnmarkedObjects : public ObjectClosure {
|
||||
PSYoungGen* _young_gen;
|
||||
private:
|
||||
PSYoungGen* _young_gen;
|
||||
CardTableExtension* _card_table;
|
||||
|
||||
public:
|
||||
|
@ -75,7 +82,7 @@ class CheckForUnmarkedObjects : public ObjectClosure {
|
|||
// we test for missing precise marks first. If any are found, we don't
|
||||
// fail unless the object head is also unmarked.
|
||||
virtual void do_object(oop obj) {
|
||||
CheckForUnmarkedOops object_check( _young_gen, _card_table );
|
||||
CheckForUnmarkedOops object_check(_young_gen, _card_table);
|
||||
obj->oop_iterate(&object_check);
|
||||
if (object_check.has_unmarked_oop()) {
|
||||
assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
|
||||
|
@ -85,19 +92,25 @@ class CheckForUnmarkedObjects : public ObjectClosure {
|
|||
|
||||
// Checks for precise marking of oops as newgen.
|
||||
class CheckForPreciseMarks : public OopClosure {
|
||||
PSYoungGen* _young_gen;
|
||||
private:
|
||||
PSYoungGen* _young_gen;
|
||||
CardTableExtension* _card_table;
|
||||
|
||||
protected:
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
if (_young_gen->is_in_reserved(obj)) {
|
||||
assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
|
||||
_card_table->set_card_newgen(p);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) :
|
||||
_young_gen(young_gen), _card_table(card_table) { }
|
||||
|
||||
virtual void do_oop(oop* p) {
|
||||
if (_young_gen->is_in_reserved(*p)) {
|
||||
assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
|
||||
_card_table->set_card_newgen(p);
|
||||
}
|
||||
}
|
||||
virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); }
|
||||
};
|
||||
|
||||
// We get passed the space_top value to prevent us from traversing into
|
||||
|
|
|
@ -80,7 +80,7 @@ class CardTableExtension : public CardTableModRefBS {
|
|||
static bool card_is_verify(int value) { return value == verify_card; }
|
||||
|
||||
// Card marking
|
||||
void inline_write_ref_field_gc(oop* field, oop new_val) {
|
||||
void inline_write_ref_field_gc(void* field, oop new_val) {
|
||||
jbyte* byte = byte_for(field);
|
||||
*byte = youngergen_card;
|
||||
}
|
||||
|
|
|
@ -146,7 +146,7 @@ void RefProcTaskExecutor::execute(ProcessTask& task)
|
|||
{
|
||||
ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
|
||||
uint parallel_gc_threads = heap->gc_task_manager()->workers();
|
||||
TaskQueueSetSuper* qset = ParCompactionManager::chunk_array();
|
||||
ChunkTaskQueueSet* qset = ParCompactionManager::chunk_array();
|
||||
ParallelTaskTerminator terminator(parallel_gc_threads, qset);
|
||||
GCTaskQueue* q = GCTaskQueue::create();
|
||||
for(uint i=0; i<parallel_gc_threads; i++) {
|
||||
|
|
|
@ -33,8 +33,8 @@ const int PREFETCH_QUEUE_SIZE = 8;
|
|||
|
||||
class PrefetchQueue : public CHeapObj {
|
||||
private:
|
||||
oop* _prefetch_queue[PREFETCH_QUEUE_SIZE];
|
||||
unsigned int _prefetch_index;
|
||||
void* _prefetch_queue[PREFETCH_QUEUE_SIZE];
|
||||
uint _prefetch_index;
|
||||
|
||||
public:
|
||||
int length() { return PREFETCH_QUEUE_SIZE; }
|
||||
|
@ -46,20 +46,21 @@ class PrefetchQueue : public CHeapObj {
|
|||
_prefetch_index = 0;
|
||||
}
|
||||
|
||||
inline oop* push_and_pop(oop* p) {
|
||||
Prefetch::write((*p)->mark_addr(), 0);
|
||||
template <class T> inline void* push_and_pop(T* p) {
|
||||
oop o = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
Prefetch::write(o->mark_addr(), 0);
|
||||
// This prefetch is intended to make sure the size field of array
|
||||
// oops is in cache. It assumes the the object layout is
|
||||
// mark -> klass -> size, and that mark and klass are heapword
|
||||
// sized. If this should change, this prefetch will need updating!
|
||||
Prefetch::write((*p)->mark_addr() + (HeapWordSize*2), 0);
|
||||
Prefetch::write(o->mark_addr() + (HeapWordSize*2), 0);
|
||||
_prefetch_queue[_prefetch_index++] = p;
|
||||
_prefetch_index &= (PREFETCH_QUEUE_SIZE-1);
|
||||
return _prefetch_queue[_prefetch_index];
|
||||
}
|
||||
|
||||
// Stores a NULL pointer in the pop'd location.
|
||||
inline oop* pop() {
|
||||
inline void* pop() {
|
||||
_prefetch_queue[_prefetch_index++] = NULL;
|
||||
_prefetch_index &= (PREFETCH_QUEUE_SIZE-1);
|
||||
return _prefetch_queue[_prefetch_index];
|
||||
|
|
|
@ -168,7 +168,7 @@ void PSMarkSweepDecorator::precompact() {
|
|||
start_array->allocate_block(compact_top);
|
||||
}
|
||||
|
||||
debug_only(MarkSweep::register_live_oop(oop(q), size));
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), size));
|
||||
compact_top += size;
|
||||
assert(compact_top <= dest->space()->end(),
|
||||
"Exceeding space in destination");
|
||||
|
@ -234,7 +234,7 @@ void PSMarkSweepDecorator::precompact() {
|
|||
start_array->allocate_block(compact_top);
|
||||
}
|
||||
|
||||
debug_only(MarkSweep::register_live_oop(oop(q), sz));
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), sz));
|
||||
compact_top += sz;
|
||||
assert(compact_top <= dest->space()->end(),
|
||||
"Exceeding space in destination");
|
||||
|
@ -326,15 +326,11 @@ void PSMarkSweepDecorator::adjust_pointers() {
|
|||
HeapWord* end = _first_dead;
|
||||
|
||||
while (q < end) {
|
||||
debug_only(MarkSweep::track_interior_pointers(oop(q)));
|
||||
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
|
||||
// point all the oops to the new location
|
||||
size_t size = oop(q)->adjust_pointers();
|
||||
|
||||
debug_only(MarkSweep::check_interior_pointers());
|
||||
|
||||
debug_only(MarkSweep::validate_live_oop(oop(q), size));
|
||||
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
|
||||
q += size;
|
||||
}
|
||||
|
||||
|
@ -354,11 +350,11 @@ void PSMarkSweepDecorator::adjust_pointers() {
|
|||
Prefetch::write(q, interval);
|
||||
if (oop(q)->is_gc_marked()) {
|
||||
// q is alive
|
||||
debug_only(MarkSweep::track_interior_pointers(oop(q)));
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
|
||||
// point all the oops to the new location
|
||||
size_t size = oop(q)->adjust_pointers();
|
||||
debug_only(MarkSweep::check_interior_pointers());
|
||||
debug_only(MarkSweep::validate_live_oop(oop(q), size));
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
|
||||
debug_only(prev_q = q);
|
||||
q += size;
|
||||
} else {
|
||||
|
@ -392,7 +388,7 @@ void PSMarkSweepDecorator::compact(bool mangle_free_space ) {
|
|||
while (q < end) {
|
||||
size_t size = oop(q)->size();
|
||||
assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
|
||||
debug_only(MarkSweep::live_oop_moved_to(q, size, q));
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q));
|
||||
debug_only(prev_q = q);
|
||||
q += size;
|
||||
}
|
||||
|
@ -427,7 +423,7 @@ void PSMarkSweepDecorator::compact(bool mangle_free_space ) {
|
|||
Prefetch::write(compaction_top, copy_interval);
|
||||
|
||||
// copy object and reinit its mark
|
||||
debug_only(MarkSweep::live_oop_moved_to(q, size, compaction_top));
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, compaction_top));
|
||||
assert(q != compaction_top, "everything in this pass should be moving");
|
||||
Copy::aligned_conjoint_words(q, compaction_top, size);
|
||||
oop(compaction_top)->init_mark();
|
||||
|
|
|
@ -81,14 +81,14 @@ bool PSParallelCompact::_dwl_initialized = false;
|
|||
#endif // #ifdef ASSERT
|
||||
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
GrowableArray<oop*>* PSParallelCompact::_root_refs_stack = NULL;
|
||||
GrowableArray<void*>* PSParallelCompact::_root_refs_stack = NULL;
|
||||
GrowableArray<oop> * PSParallelCompact::_live_oops = NULL;
|
||||
GrowableArray<oop> * PSParallelCompact::_live_oops_moved_to = NULL;
|
||||
GrowableArray<size_t>* PSParallelCompact::_live_oops_size = NULL;
|
||||
size_t PSParallelCompact::_live_oops_index = 0;
|
||||
size_t PSParallelCompact::_live_oops_index_at_perm = 0;
|
||||
GrowableArray<oop*>* PSParallelCompact::_other_refs_stack = NULL;
|
||||
GrowableArray<oop*>* PSParallelCompact::_adjusted_pointers = NULL;
|
||||
GrowableArray<void*>* PSParallelCompact::_other_refs_stack = NULL;
|
||||
GrowableArray<void*>* PSParallelCompact::_adjusted_pointers = NULL;
|
||||
bool PSParallelCompact::_pointer_tracking = false;
|
||||
bool PSParallelCompact::_root_tracking = true;
|
||||
|
||||
|
@ -811,46 +811,23 @@ ParMarkBitMap PSParallelCompact::_mark_bitmap;
|
|||
ParallelCompactData PSParallelCompact::_summary_data;
|
||||
|
||||
PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
|
||||
|
||||
void PSParallelCompact::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); }
|
||||
bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
|
||||
|
||||
void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
|
||||
void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
|
||||
|
||||
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
|
||||
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
|
||||
|
||||
void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) {
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
if (ValidateMarkSweep) {
|
||||
if (!Universe::heap()->is_in_reserved(p)) {
|
||||
_root_refs_stack->push(p);
|
||||
} else {
|
||||
_other_refs_stack->push(p);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
mark_and_push(_compaction_manager, p);
|
||||
}
|
||||
void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
|
||||
void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
|
||||
|
||||
void PSParallelCompact::mark_and_follow(ParCompactionManager* cm,
|
||||
oop* p) {
|
||||
assert(Universe::heap()->is_in_reserved(p),
|
||||
"we should only be traversing objects here");
|
||||
oop m = *p;
|
||||
if (m != NULL && mark_bitmap()->is_unmarked(m)) {
|
||||
if (mark_obj(m)) {
|
||||
m->follow_contents(cm); // Follow contents of the marked object
|
||||
}
|
||||
}
|
||||
}
|
||||
void PSParallelCompact::FollowStackClosure::do_void() { follow_stack(_compaction_manager); }
|
||||
|
||||
// Anything associated with this variable is temporary.
|
||||
|
||||
void PSParallelCompact::mark_and_push_internal(ParCompactionManager* cm,
|
||||
oop* p) {
|
||||
// Push marked object, contents will be followed later
|
||||
oop m = *p;
|
||||
if (mark_obj(m)) {
|
||||
// This thread marked the object and
|
||||
// owns the subsequent processing of it.
|
||||
cm->save_for_scanning(m);
|
||||
}
|
||||
}
|
||||
void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
|
||||
void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
|
||||
|
||||
void PSParallelCompact::post_initialize() {
|
||||
ParallelScavengeHeap* heap = gc_heap();
|
||||
|
@ -2751,23 +2728,6 @@ void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
|
|||
young_gen->move_and_update(cm);
|
||||
}
|
||||
|
||||
void PSParallelCompact::follow_root(ParCompactionManager* cm, oop* p) {
|
||||
assert(!Universe::heap()->is_in_reserved(p),
|
||||
"roots shouldn't be things within the heap");
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
if (ValidateMarkSweep) {
|
||||
guarantee(!_root_refs_stack->contains(p), "should only be in here once");
|
||||
_root_refs_stack->push(p);
|
||||
}
|
||||
#endif
|
||||
oop m = *p;
|
||||
if (m != NULL && mark_bitmap()->is_unmarked(m)) {
|
||||
if (mark_obj(m)) {
|
||||
m->follow_contents(cm); // Follow contents of the marked object
|
||||
}
|
||||
}
|
||||
follow_stack(cm);
|
||||
}
|
||||
|
||||
void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
|
||||
while(!cm->overflow_stack()->is_empty()) {
|
||||
|
@ -2807,7 +2767,7 @@ PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) {
|
|||
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
|
||||
void PSParallelCompact::track_adjusted_pointer(oop* p, oop newobj, bool isroot) {
|
||||
void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) {
|
||||
if (!ValidateMarkSweep)
|
||||
return;
|
||||
|
||||
|
@ -2821,7 +2781,7 @@ void PSParallelCompact::track_adjusted_pointer(oop* p, oop newobj, bool isroot)
|
|||
if (index != -1) {
|
||||
int l = _root_refs_stack->length();
|
||||
if (l > 0 && l - 1 != index) {
|
||||
oop* last = _root_refs_stack->pop();
|
||||
void* last = _root_refs_stack->pop();
|
||||
assert(last != p, "should be different");
|
||||
_root_refs_stack->at_put(index, last);
|
||||
} else {
|
||||
|
@ -2832,7 +2792,7 @@ void PSParallelCompact::track_adjusted_pointer(oop* p, oop newobj, bool isroot)
|
|||
}
|
||||
|
||||
|
||||
void PSParallelCompact::check_adjust_pointer(oop* p) {
|
||||
void PSParallelCompact::check_adjust_pointer(void* p) {
|
||||
_adjusted_pointers->push(p);
|
||||
}
|
||||
|
||||
|
@ -2840,7 +2800,8 @@ void PSParallelCompact::check_adjust_pointer(oop* p) {
|
|||
class AdjusterTracker: public OopClosure {
|
||||
public:
|
||||
AdjusterTracker() {};
|
||||
void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); }
|
||||
void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); }
|
||||
void do_oop(narrowOop* o) { PSParallelCompact::check_adjust_pointer(o); }
|
||||
};
|
||||
|
||||
|
||||
|
@ -2948,25 +2909,6 @@ void PSParallelCompact::print_new_location_of_heap_address(HeapWord* q) {
|
|||
}
|
||||
#endif //VALIDATE_MARK_SWEEP
|
||||
|
||||
void PSParallelCompact::adjust_pointer(oop* p, bool isroot) {
|
||||
oop obj = *p;
|
||||
VALIDATE_MARK_SWEEP_ONLY(oop saved_new_pointer = NULL);
|
||||
if (obj != NULL) {
|
||||
oop new_pointer = (oop) summary_data().calc_new_pointer(obj);
|
||||
assert(new_pointer != NULL || // is forwarding ptr?
|
||||
obj->is_shared(), // never forwarded?
|
||||
"should have a new location");
|
||||
// Just always do the update unconditionally?
|
||||
if (new_pointer != NULL) {
|
||||
*p = new_pointer;
|
||||
assert(Universe::heap()->is_in_reserved(new_pointer),
|
||||
"should be in object space");
|
||||
VALIDATE_MARK_SWEEP_ONLY(saved_new_pointer = new_pointer);
|
||||
}
|
||||
}
|
||||
VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, saved_new_pointer, isroot));
|
||||
}
|
||||
|
||||
// Update interior oops in the ranges of chunks [beg_chunk, end_chunk).
|
||||
void
|
||||
PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
|
||||
|
|
|
@ -80,11 +80,11 @@ public:
|
|||
static const size_t ChunkSize;
|
||||
static const size_t ChunkSizeBytes;
|
||||
|
||||
// Mask for the bits in a size_t to get an offset within a chunk.
|
||||
// Mask for the bits in a size_t to get an offset within a chunk.
|
||||
static const size_t ChunkSizeOffsetMask;
|
||||
// Mask for the bits in a pointer to get an offset within a chunk.
|
||||
// Mask for the bits in a pointer to get an offset within a chunk.
|
||||
static const size_t ChunkAddrOffsetMask;
|
||||
// Mask for the bits in a pointer to get the address of the start of a chunk.
|
||||
// Mask for the bits in a pointer to get the address of the start of a chunk.
|
||||
static const size_t ChunkAddrMask;
|
||||
|
||||
static const size_t Log2BlockSize;
|
||||
|
@ -229,7 +229,7 @@ public:
|
|||
// 1 bit marks the end of an object.
|
||||
class BlockData
|
||||
{
|
||||
public:
|
||||
public:
|
||||
typedef short int blk_ofs_t;
|
||||
|
||||
blk_ofs_t offset() const { return _offset >= 0 ? _offset : -_offset; }
|
||||
|
@ -269,7 +269,7 @@ public:
|
|||
return !_first_is_start_bit;
|
||||
}
|
||||
|
||||
private:
|
||||
private:
|
||||
blk_ofs_t _offset;
|
||||
// This is temporary until the mark_bitmap is separated into
|
||||
// a start bit array and an end bit array.
|
||||
|
@ -277,7 +277,7 @@ public:
|
|||
#ifdef ASSERT
|
||||
short _set_phase;
|
||||
static short _cur_phase;
|
||||
public:
|
||||
public:
|
||||
static void set_cur_phase(short v) { _cur_phase = v; }
|
||||
#endif
|
||||
};
|
||||
|
@ -729,48 +729,51 @@ class PSParallelCompact : AllStatic {
|
|||
} SpaceId;
|
||||
|
||||
public:
|
||||
// In line closure decls
|
||||
// Inline closure decls
|
||||
//
|
||||
|
||||
class IsAliveClosure: public BoolObjectClosure {
|
||||
public:
|
||||
void do_object(oop p) { assert(false, "don't call"); }
|
||||
bool do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
|
||||
virtual void do_object(oop p);
|
||||
virtual bool do_object_b(oop p);
|
||||
};
|
||||
|
||||
class KeepAliveClosure: public OopClosure {
|
||||
private:
|
||||
ParCompactionManager* _compaction_manager;
|
||||
protected:
|
||||
template <class T> inline void do_oop_work(T* p);
|
||||
public:
|
||||
KeepAliveClosure(ParCompactionManager* cm) {
|
||||
_compaction_manager = cm;
|
||||
}
|
||||
void do_oop(oop* p);
|
||||
KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
class FollowRootClosure: public OopsInGenClosure{
|
||||
// Current unused
|
||||
class FollowRootClosure: public OopsInGenClosure {
|
||||
private:
|
||||
ParCompactionManager* _compaction_manager;
|
||||
public:
|
||||
FollowRootClosure(ParCompactionManager* cm) {
|
||||
_compaction_manager = cm;
|
||||
}
|
||||
void do_oop(oop* p) { follow_root(_compaction_manager, p); }
|
||||
FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
virtual const bool do_nmethods() const { return true; }
|
||||
};
|
||||
|
||||
class FollowStackClosure: public VoidClosure {
|
||||
private:
|
||||
ParCompactionManager* _compaction_manager;
|
||||
public:
|
||||
FollowStackClosure(ParCompactionManager* cm) {
|
||||
_compaction_manager = cm;
|
||||
}
|
||||
void do_void() { follow_stack(_compaction_manager); }
|
||||
FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
|
||||
virtual void do_void();
|
||||
};
|
||||
|
||||
class AdjustPointerClosure: public OopsInGenClosure {
|
||||
private:
|
||||
bool _is_root;
|
||||
public:
|
||||
AdjustPointerClosure(bool is_root) : _is_root(is_root) {}
|
||||
void do_oop(oop* p) { adjust_pointer(p, _is_root); }
|
||||
AdjustPointerClosure(bool is_root) : _is_root(is_root) { }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
// Closure for verifying update of pointers. Does not
|
||||
|
@ -805,8 +808,6 @@ class PSParallelCompact : AllStatic {
|
|||
friend class instanceKlassKlass;
|
||||
friend class RefProcTaskProxy;
|
||||
|
||||
static void mark_and_push_internal(ParCompactionManager* cm, oop* p);
|
||||
|
||||
private:
|
||||
static elapsedTimer _accumulated_time;
|
||||
static unsigned int _total_invocations;
|
||||
|
@ -838,9 +839,9 @@ class PSParallelCompact : AllStatic {
|
|||
|
||||
private:
|
||||
// Closure accessors
|
||||
static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
|
||||
static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
|
||||
static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; }
|
||||
static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
|
||||
static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
|
||||
|
||||
static void initialize_space_info();
|
||||
|
||||
|
@ -859,10 +860,11 @@ class PSParallelCompact : AllStatic {
|
|||
static void follow_stack(ParCompactionManager* cm);
|
||||
static void follow_weak_klass_links(ParCompactionManager* cm);
|
||||
|
||||
static void adjust_pointer(oop* p, bool is_root);
|
||||
template <class T> static inline void adjust_pointer(T* p, bool is_root);
|
||||
static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
|
||||
|
||||
static void follow_root(ParCompactionManager* cm, oop* p);
|
||||
template <class T>
|
||||
static inline void follow_root(ParCompactionManager* cm, T* p);
|
||||
|
||||
// Compute the dense prefix for the designated space. This is an experimental
|
||||
// implementation currently not used in production.
|
||||
|
@ -971,14 +973,14 @@ class PSParallelCompact : AllStatic {
|
|||
|
||||
protected:
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
static GrowableArray<oop*>* _root_refs_stack;
|
||||
static GrowableArray<void*>* _root_refs_stack;
|
||||
static GrowableArray<oop> * _live_oops;
|
||||
static GrowableArray<oop> * _live_oops_moved_to;
|
||||
static GrowableArray<size_t>* _live_oops_size;
|
||||
static size_t _live_oops_index;
|
||||
static size_t _live_oops_index_at_perm;
|
||||
static GrowableArray<oop*>* _other_refs_stack;
|
||||
static GrowableArray<oop*>* _adjusted_pointers;
|
||||
static GrowableArray<void*>* _other_refs_stack;
|
||||
static GrowableArray<void*>* _adjusted_pointers;
|
||||
static bool _pointer_tracking;
|
||||
static bool _root_tracking;
|
||||
|
||||
|
@ -999,12 +1001,12 @@ class PSParallelCompact : AllStatic {
|
|||
|
||||
public:
|
||||
class MarkAndPushClosure: public OopClosure {
|
||||
private:
|
||||
ParCompactionManager* _compaction_manager;
|
||||
public:
|
||||
MarkAndPushClosure(ParCompactionManager* cm) {
|
||||
_compaction_manager = cm;
|
||||
}
|
||||
void do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
|
||||
MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
virtual const bool do_nmethods() const { return true; }
|
||||
};
|
||||
|
||||
|
@ -1038,21 +1040,9 @@ class PSParallelCompact : AllStatic {
|
|||
|
||||
// Marking support
|
||||
static inline bool mark_obj(oop obj);
|
||||
static bool mark_obj(oop* p) {
|
||||
if (*p != NULL) {
|
||||
return mark_obj(*p);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
static void mark_and_push(ParCompactionManager* cm, oop* p) {
|
||||
// Check mark and maybe push on
|
||||
// marking stack
|
||||
oop m = *p;
|
||||
if (m != NULL && mark_bitmap()->is_unmarked(m)) {
|
||||
mark_and_push_internal(cm, p);
|
||||
}
|
||||
}
|
||||
// Check mark and maybe push on marking stack
|
||||
template <class T> static inline void mark_and_push(ParCompactionManager* cm,
|
||||
T* p);
|
||||
|
||||
// Compaction support.
|
||||
// Return true if p is in the range [beg_addr, end_addr).
|
||||
|
@ -1127,13 +1117,17 @@ class PSParallelCompact : AllStatic {
|
|||
static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
|
||||
|
||||
// Mark pointer and follow contents.
|
||||
static void mark_and_follow(ParCompactionManager* cm, oop* p);
|
||||
template <class T>
|
||||
static inline void mark_and_follow(ParCompactionManager* cm, T* p);
|
||||
|
||||
static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
|
||||
static ParallelCompactData& summary_data() { return _summary_data; }
|
||||
|
||||
static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); }
|
||||
static inline void adjust_pointer(oop* p,
|
||||
static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); }
|
||||
static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
|
||||
|
||||
template <class T>
|
||||
static inline void adjust_pointer(T* p,
|
||||
HeapWord* beg_addr,
|
||||
HeapWord* end_addr);
|
||||
|
||||
|
@ -1147,8 +1141,8 @@ class PSParallelCompact : AllStatic {
|
|||
static jlong millis_since_last_gc();
|
||||
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
static void track_adjusted_pointer(oop* p, oop newobj, bool isroot);
|
||||
static void check_adjust_pointer(oop* p); // Adjust this pointer
|
||||
static void track_adjusted_pointer(void* p, bool isroot);
|
||||
static void check_adjust_pointer(void* p);
|
||||
static void track_interior_pointers(oop obj);
|
||||
static void check_interior_pointers();
|
||||
|
||||
|
@ -1185,7 +1179,7 @@ class PSParallelCompact : AllStatic {
|
|||
#endif // #ifdef ASSERT
|
||||
};
|
||||
|
||||
bool PSParallelCompact::mark_obj(oop obj) {
|
||||
inline bool PSParallelCompact::mark_obj(oop obj) {
|
||||
const int obj_size = obj->size();
|
||||
if (mark_bitmap()->mark_obj(obj, obj_size)) {
|
||||
_summary_data.add_obj(obj, obj_size);
|
||||
|
@ -1195,13 +1189,94 @@ bool PSParallelCompact::mark_obj(oop obj) {
|
|||
}
|
||||
}
|
||||
|
||||
inline bool PSParallelCompact::print_phases()
|
||||
{
|
||||
template <class T>
|
||||
inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) {
|
||||
assert(!Universe::heap()->is_in_reserved(p),
|
||||
"roots shouldn't be things within the heap");
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
if (ValidateMarkSweep) {
|
||||
guarantee(!_root_refs_stack->contains(p), "should only be in here once");
|
||||
_root_refs_stack->push(p);
|
||||
}
|
||||
#endif
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (mark_bitmap()->is_unmarked(obj)) {
|
||||
if (mark_obj(obj)) {
|
||||
obj->follow_contents(cm);
|
||||
}
|
||||
}
|
||||
}
|
||||
follow_stack(cm);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void PSParallelCompact::mark_and_follow(ParCompactionManager* cm,
|
||||
T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (mark_bitmap()->is_unmarked(obj)) {
|
||||
if (mark_obj(obj)) {
|
||||
obj->follow_contents(cm);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (mark_bitmap()->is_unmarked(obj)) {
|
||||
if (mark_obj(obj)) {
|
||||
// This thread marked the object and owns the subsequent processing of it.
|
||||
cm->save_for_scanning(obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
oop new_obj = (oop)summary_data().calc_new_pointer(obj);
|
||||
assert(new_obj != NULL || // is forwarding ptr?
|
||||
obj->is_shared(), // never forwarded?
|
||||
"should be forwarded");
|
||||
// Just always do the update unconditionally?
|
||||
if (new_obj != NULL) {
|
||||
assert(Universe::heap()->is_in_reserved(new_obj),
|
||||
"should be in object space");
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
}
|
||||
}
|
||||
VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, isroot));
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) {
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
if (ValidateMarkSweep) {
|
||||
if (!Universe::heap()->is_in_reserved(p)) {
|
||||
_root_refs_stack->push(p);
|
||||
} else {
|
||||
_other_refs_stack->push(p);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
mark_and_push(_compaction_manager, p);
|
||||
}
|
||||
|
||||
inline bool PSParallelCompact::print_phases() {
|
||||
return _print_phases;
|
||||
}
|
||||
|
||||
inline double PSParallelCompact::normal_distribution(double density)
|
||||
{
|
||||
inline double PSParallelCompact::normal_distribution(double density) {
|
||||
assert(_dwl_initialized, "uninitialized");
|
||||
const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
|
||||
return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
|
||||
|
@ -1257,10 +1332,11 @@ inline bool PSParallelCompact::should_update_klass(klassOop k) {
|
|||
return ((HeapWord*) k) >= dense_prefix(perm_space_id);
|
||||
}
|
||||
|
||||
inline void PSParallelCompact::adjust_pointer(oop* p,
|
||||
template <class T>
|
||||
inline void PSParallelCompact::adjust_pointer(T* p,
|
||||
HeapWord* beg_addr,
|
||||
HeapWord* end_addr) {
|
||||
if (is_in(p, beg_addr, end_addr)) {
|
||||
if (is_in((HeapWord*)p, beg_addr, end_addr)) {
|
||||
adjust_pointer(p);
|
||||
}
|
||||
}
|
||||
|
@ -1332,18 +1408,18 @@ class UpdateOnlyClosure: public ParMarkBitMapClosure {
|
|||
inline void do_addr(HeapWord* addr);
|
||||
};
|
||||
|
||||
inline void UpdateOnlyClosure::do_addr(HeapWord* addr) {
|
||||
inline void UpdateOnlyClosure::do_addr(HeapWord* addr)
|
||||
{
|
||||
_start_array->allocate_block(addr);
|
||||
oop(addr)->update_contents(compaction_manager());
|
||||
}
|
||||
|
||||
class FillClosure: public ParMarkBitMapClosure {
|
||||
public:
|
||||
FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id):
|
||||
public:
|
||||
FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
|
||||
ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
|
||||
_space_id(space_id),
|
||||
_start_array(PSParallelCompact::start_array(space_id))
|
||||
{
|
||||
_start_array(PSParallelCompact::start_array(space_id)) {
|
||||
assert(_space_id == PSParallelCompact::perm_space_id ||
|
||||
_space_id == PSParallelCompact::old_space_id,
|
||||
"cannot use FillClosure in the young gen");
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#include "incls/_precompiled.incl"
|
||||
#include "incls/_psPromotionLAB.cpp.incl"
|
||||
|
||||
const size_t PSPromotionLAB::filler_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT));
|
||||
size_t PSPromotionLAB::filler_header_size;
|
||||
|
||||
// This is the shared initialization code. It sets up the basic pointers,
|
||||
// and allows enough extra space for a filler object. We call a virtual
|
||||
|
@ -41,6 +41,10 @@ void PSPromotionLAB::initialize(MemRegion lab) {
|
|||
set_end(end);
|
||||
set_top(bottom);
|
||||
|
||||
// Initialize after VM starts up because header_size depends on compressed
|
||||
// oops.
|
||||
filler_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT));
|
||||
|
||||
// We can be initialized to a zero size!
|
||||
if (free() > 0) {
|
||||
if (ZapUnusedHeapArea) {
|
||||
|
|
|
@ -32,7 +32,7 @@ class ObjectStartArray;
|
|||
|
||||
class PSPromotionLAB : public CHeapObj {
|
||||
protected:
|
||||
static const size_t filler_header_size;
|
||||
static size_t filler_header_size;
|
||||
|
||||
enum LabState {
|
||||
needs_flush,
|
||||
|
|
|
@ -182,7 +182,7 @@ PSPromotionManager::PSPromotionManager() {
|
|||
claimed_stack_depth()->initialize();
|
||||
queue_size = claimed_stack_depth()->max_elems();
|
||||
// We want the overflow stack to be permanent
|
||||
_overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<oop*>(10, true);
|
||||
_overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<StarTask>(10, true);
|
||||
_overflow_stack_breadth = NULL;
|
||||
} else {
|
||||
claimed_stack_breadth()->initialize();
|
||||
|
@ -240,6 +240,7 @@ void PSPromotionManager::reset() {
|
|||
#endif // PS_PM_STATS
|
||||
}
|
||||
|
||||
|
||||
void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
|
||||
assert(depth_first(), "invariant");
|
||||
assert(overflow_stack_depth() != NULL, "invariant");
|
||||
|
@ -254,13 +255,15 @@ void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
|
|||
#endif /* ASSERT */
|
||||
|
||||
do {
|
||||
oop* p;
|
||||
StarTask p;
|
||||
|
||||
// Drain overflow stack first, so other threads can steal from
|
||||
// claimed stack while we work.
|
||||
while(!overflow_stack_depth()->is_empty()) {
|
||||
p = overflow_stack_depth()->pop();
|
||||
process_popped_location_depth(p);
|
||||
// linux compiler wants different overloaded operator= in taskqueue to
|
||||
// assign to p that the other compilers don't like.
|
||||
StarTask ptr = overflow_stack_depth()->pop();
|
||||
process_popped_location_depth(ptr);
|
||||
}
|
||||
|
||||
if (totally_drain) {
|
||||
|
@ -365,7 +368,7 @@ void PSPromotionManager::flush_labs() {
|
|||
//
|
||||
|
||||
oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
|
||||
assert(PSScavenge::should_scavenge(o), "Sanity");
|
||||
assert(PSScavenge::should_scavenge(&o), "Sanity");
|
||||
|
||||
oop new_obj = NULL;
|
||||
|
||||
|
@ -530,16 +533,30 @@ oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
|
|||
// This code must come after the CAS test, or it will print incorrect
|
||||
// information.
|
||||
if (TraceScavenge) {
|
||||
gclog_or_tty->print_cr("{%s %s 0x%x -> 0x%x (%d)}",
|
||||
PSScavenge::should_scavenge(new_obj) ? "copying" : "tenuring",
|
||||
gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}",
|
||||
PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
|
||||
new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
return new_obj;
|
||||
}
|
||||
|
||||
template <class T> void PSPromotionManager::process_array_chunk_work(
|
||||
oop obj,
|
||||
int start, int end) {
|
||||
assert(start < end, "invariant");
|
||||
T* const base = (T*)objArrayOop(obj)->base();
|
||||
T* p = base + start;
|
||||
T* const chunk_end = base + end;
|
||||
while (p < chunk_end) {
|
||||
if (PSScavenge::should_scavenge(p)) {
|
||||
claim_or_forward_depth(p);
|
||||
}
|
||||
++p;
|
||||
}
|
||||
}
|
||||
|
||||
void PSPromotionManager::process_array_chunk(oop old) {
|
||||
assert(PSChunkLargeArrays, "invariant");
|
||||
assert(old->is_objArray(), "invariant");
|
||||
|
@ -569,15 +586,10 @@ void PSPromotionManager::process_array_chunk(oop old) {
|
|||
arrayOop(old)->set_length(actual_length);
|
||||
}
|
||||
|
||||
assert(start < end, "invariant");
|
||||
oop* const base = objArrayOop(obj)->base();
|
||||
oop* p = base + start;
|
||||
oop* const chunk_end = base + end;
|
||||
while (p < chunk_end) {
|
||||
if (PSScavenge::should_scavenge(*p)) {
|
||||
claim_or_forward_depth(p);
|
||||
}
|
||||
++p;
|
||||
if (UseCompressedOops) {
|
||||
process_array_chunk_work<narrowOop>(obj, start, end);
|
||||
} else {
|
||||
process_array_chunk_work<oop>(obj, start, end);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -42,8 +42,6 @@ class MutableSpace;
|
|||
class PSOldGen;
|
||||
class ParCompactionManager;
|
||||
|
||||
#define PS_CHUNKED_ARRAY_OOP_MASK 1
|
||||
|
||||
#define PS_PM_STATS 0
|
||||
|
||||
class PSPromotionManager : public CHeapObj {
|
||||
|
@ -80,7 +78,7 @@ class PSPromotionManager : public CHeapObj {
|
|||
PrefetchQueue _prefetch_queue;
|
||||
|
||||
OopStarTaskQueue _claimed_stack_depth;
|
||||
GrowableArray<oop*>* _overflow_stack_depth;
|
||||
GrowableArray<StarTask>* _overflow_stack_depth;
|
||||
OopTaskQueue _claimed_stack_breadth;
|
||||
GrowableArray<oop>* _overflow_stack_breadth;
|
||||
|
||||
|
@ -92,13 +90,15 @@ class PSPromotionManager : public CHeapObj {
|
|||
uint _min_array_size_for_chunking;
|
||||
|
||||
// Accessors
|
||||
static PSOldGen* old_gen() { return _old_gen; }
|
||||
static MutableSpace* young_space() { return _young_space; }
|
||||
static PSOldGen* old_gen() { return _old_gen; }
|
||||
static MutableSpace* young_space() { return _young_space; }
|
||||
|
||||
inline static PSPromotionManager* manager_array(int index);
|
||||
template <class T> inline void claim_or_forward_internal_depth(T* p);
|
||||
template <class T> inline void claim_or_forward_internal_breadth(T* p);
|
||||
|
||||
GrowableArray<oop*>* overflow_stack_depth() { return _overflow_stack_depth; }
|
||||
GrowableArray<oop>* overflow_stack_breadth() { return _overflow_stack_breadth; }
|
||||
GrowableArray<StarTask>* overflow_stack_depth() { return _overflow_stack_depth; }
|
||||
GrowableArray<oop>* overflow_stack_breadth() { return _overflow_stack_breadth; }
|
||||
|
||||
// On the task queues we push reference locations as well as
|
||||
// partially-scanned arrays (in the latter case, we push an oop to
|
||||
|
@ -116,27 +116,37 @@ class PSPromotionManager : public CHeapObj {
|
|||
// (oop). We do all the necessary casting in the mask / unmask
|
||||
// methods to avoid sprinkling the rest of the code with more casts.
|
||||
|
||||
bool is_oop_masked(oop* p) {
|
||||
return ((intptr_t) p & PS_CHUNKED_ARRAY_OOP_MASK) == PS_CHUNKED_ARRAY_OOP_MASK;
|
||||
// These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any
|
||||
// future masks) can't conflict with COMPRESSED_OOP_MASK
|
||||
#define PS_CHUNKED_ARRAY_OOP_MASK 0x2
|
||||
|
||||
bool is_oop_masked(StarTask p) {
|
||||
// If something is marked chunked it's always treated like wide oop*
|
||||
return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) ==
|
||||
PS_CHUNKED_ARRAY_OOP_MASK;
|
||||
}
|
||||
|
||||
oop* mask_chunked_array_oop(oop obj) {
|
||||
assert(!is_oop_masked((oop*) obj), "invariant");
|
||||
oop* ret = (oop*) ((intptr_t) obj | PS_CHUNKED_ARRAY_OOP_MASK);
|
||||
oop* ret = (oop*) ((uintptr_t)obj | PS_CHUNKED_ARRAY_OOP_MASK);
|
||||
assert(is_oop_masked(ret), "invariant");
|
||||
return ret;
|
||||
}
|
||||
|
||||
oop unmask_chunked_array_oop(oop* p) {
|
||||
oop unmask_chunked_array_oop(StarTask p) {
|
||||
assert(is_oop_masked(p), "invariant");
|
||||
oop ret = oop((intptr_t) p & ~PS_CHUNKED_ARRAY_OOP_MASK);
|
||||
assert(!p.is_narrow(), "chunked array oops cannot be narrow");
|
||||
oop *chunk = (oop*)p; // cast p to oop (uses conversion operator)
|
||||
oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK));
|
||||
assert(!is_oop_masked((oop*) ret), "invariant");
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <class T> void process_array_chunk_work(oop obj,
|
||||
int start, int end);
|
||||
void process_array_chunk(oop old);
|
||||
|
||||
void push_depth(oop* p) {
|
||||
template <class T> void push_depth(T* p) {
|
||||
assert(depth_first(), "pre-condition");
|
||||
|
||||
#if PS_PM_STATS
|
||||
|
@ -175,7 +185,7 @@ class PSPromotionManager : public CHeapObj {
|
|||
}
|
||||
|
||||
protected:
|
||||
static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; }
|
||||
static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; }
|
||||
static OopTaskQueueSet* stack_array_breadth() { return _stack_array_breadth; }
|
||||
|
||||
public:
|
||||
|
@ -227,6 +237,7 @@ class PSPromotionManager : public CHeapObj {
|
|||
drain_stacks_breadth(totally_drain);
|
||||
}
|
||||
}
|
||||
public:
|
||||
void drain_stacks_cond_depth() {
|
||||
if (claimed_stack_depth()->size() > _target_stack_size) {
|
||||
drain_stacks_depth(false);
|
||||
|
@ -256,15 +267,11 @@ class PSPromotionManager : public CHeapObj {
|
|||
return _depth_first;
|
||||
}
|
||||
|
||||
inline void process_popped_location_depth(oop* p);
|
||||
inline void process_popped_location_depth(StarTask p);
|
||||
|
||||
inline void flush_prefetch_queue();
|
||||
|
||||
inline void claim_or_forward_depth(oop* p);
|
||||
inline void claim_or_forward_internal_depth(oop* p);
|
||||
|
||||
inline void claim_or_forward_breadth(oop* p);
|
||||
inline void claim_or_forward_internal_breadth(oop* p);
|
||||
template <class T> inline void claim_or_forward_depth(T* p);
|
||||
template <class T> inline void claim_or_forward_breadth(T* p);
|
||||
|
||||
#if PS_PM_STATS
|
||||
void increment_steals(oop* p = NULL) {
|
||||
|
|
|
@ -28,64 +28,68 @@ inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
|
|||
return _manager_array[index];
|
||||
}
|
||||
|
||||
inline void PSPromotionManager::claim_or_forward_internal_depth(oop* p) {
|
||||
if (p != NULL) {
|
||||
oop o = *p;
|
||||
template <class T>
|
||||
inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
|
||||
if (p != NULL) { // XXX: error if p != NULL here
|
||||
oop o = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
if (o->is_forwarded()) {
|
||||
o = o->forwardee();
|
||||
|
||||
// Card mark
|
||||
if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
|
||||
PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
|
||||
}
|
||||
*p = o;
|
||||
oopDesc::encode_store_heap_oop_not_null(p, o);
|
||||
} else {
|
||||
push_depth(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void PSPromotionManager::claim_or_forward_internal_breadth(oop* p) {
|
||||
if (p != NULL) {
|
||||
oop o = *p;
|
||||
template <class T>
|
||||
inline void PSPromotionManager::claim_or_forward_internal_breadth(T* p) {
|
||||
if (p != NULL) { // XXX: error if p != NULL here
|
||||
oop o = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
if (o->is_forwarded()) {
|
||||
o = o->forwardee();
|
||||
} else {
|
||||
o = copy_to_survivor_space(o, false);
|
||||
}
|
||||
|
||||
// Card mark
|
||||
if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
|
||||
PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
|
||||
}
|
||||
*p = o;
|
||||
oopDesc::encode_store_heap_oop_not_null(p, o);
|
||||
}
|
||||
}
|
||||
|
||||
inline void PSPromotionManager::flush_prefetch_queue() {
|
||||
assert(!depth_first(), "invariant");
|
||||
for (int i=0; i<_prefetch_queue.length(); i++) {
|
||||
claim_or_forward_internal_breadth(_prefetch_queue.pop());
|
||||
for (int i = 0; i < _prefetch_queue.length(); i++) {
|
||||
claim_or_forward_internal_breadth((oop*)_prefetch_queue.pop());
|
||||
}
|
||||
}
|
||||
|
||||
inline void PSPromotionManager::claim_or_forward_depth(oop* p) {
|
||||
template <class T>
|
||||
inline void PSPromotionManager::claim_or_forward_depth(T* p) {
|
||||
assert(depth_first(), "invariant");
|
||||
assert(PSScavenge::should_scavenge(*p, true), "revisiting object?");
|
||||
assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
|
||||
assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
|
||||
"Sanity");
|
||||
assert(Universe::heap()->is_in(p), "pointer outside heap");
|
||||
|
||||
claim_or_forward_internal_depth(p);
|
||||
}
|
||||
|
||||
inline void PSPromotionManager::claim_or_forward_breadth(oop* p) {
|
||||
template <class T>
|
||||
inline void PSPromotionManager::claim_or_forward_breadth(T* p) {
|
||||
assert(!depth_first(), "invariant");
|
||||
assert(PSScavenge::should_scavenge(*p, true), "revisiting object?");
|
||||
assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
|
||||
assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
|
||||
"Sanity");
|
||||
assert(Universe::heap()->is_in(p), "pointer outside heap");
|
||||
|
||||
if (UsePrefetchQueue) {
|
||||
claim_or_forward_internal_breadth(_prefetch_queue.push_and_pop(p));
|
||||
claim_or_forward_internal_breadth((T*)_prefetch_queue.push_and_pop(p));
|
||||
} else {
|
||||
// This option is used for testing. The use of the prefetch
|
||||
// queue can delay the processing of the objects and thus
|
||||
|
@ -106,12 +110,16 @@ inline void PSPromotionManager::claim_or_forward_breadth(oop* p) {
|
|||
}
|
||||
}
|
||||
|
||||
inline void PSPromotionManager::process_popped_location_depth(oop* p) {
|
||||
inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
|
||||
if (is_oop_masked(p)) {
|
||||
assert(PSChunkLargeArrays, "invariant");
|
||||
oop const old = unmask_chunked_array_oop(p);
|
||||
process_array_chunk(old);
|
||||
} else {
|
||||
PSScavenge::copy_and_push_safe_barrier(this, p);
|
||||
if (p.is_narrow()) {
|
||||
PSScavenge::copy_and_push_safe_barrier(this, (narrowOop*)p);
|
||||
} else {
|
||||
PSScavenge::copy_and_push_safe_barrier(this, (oop*)p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -65,16 +65,18 @@ public:
|
|||
assert(_promotion_manager != NULL, "Sanity");
|
||||
}
|
||||
|
||||
void do_oop(oop* p) {
|
||||
assert (*p != NULL, "expected non-null ref");
|
||||
assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
assert (!oopDesc::is_null(*p), "expected non-null ref");
|
||||
assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(),
|
||||
"expected an oop while scanning weak refs");
|
||||
|
||||
oop obj = oop(*p);
|
||||
// Weak refs may be visited more than once.
|
||||
if (PSScavenge::should_scavenge(obj, _to_space)) {
|
||||
if (PSScavenge::should_scavenge(p, _to_space)) {
|
||||
PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
|
||||
}
|
||||
}
|
||||
virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
|
||||
};
|
||||
|
||||
class PSEvacuateFollowersClosure: public VoidClosure {
|
||||
|
@ -83,7 +85,7 @@ class PSEvacuateFollowersClosure: public VoidClosure {
|
|||
public:
|
||||
PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
|
||||
|
||||
void do_void() {
|
||||
virtual void do_void() {
|
||||
assert(_promotion_manager != NULL, "Sanity");
|
||||
_promotion_manager->drain_stacks(true);
|
||||
guarantee(_promotion_manager->stacks_empty(),
|
||||
|
|
|
@ -116,16 +116,16 @@ class PSScavenge: AllStatic {
|
|||
// If an attempt to promote fails, this method is invoked
|
||||
static void oop_promotion_failed(oop obj, markOop obj_mark);
|
||||
|
||||
static inline bool should_scavenge(oop p);
|
||||
template <class T> static inline bool should_scavenge(T* p);
|
||||
|
||||
// These call should_scavenge() above and, if it returns true, also check that
|
||||
// the object was not newly copied into to_space. The version with the bool
|
||||
// argument is a convenience wrapper that fetches the to_space pointer from
|
||||
// the heap and calls the other version (if the arg is true).
|
||||
static inline bool should_scavenge(oop p, MutableSpace* to_space);
|
||||
static inline bool should_scavenge(oop p, bool check_to_space);
|
||||
template <class T> static inline bool should_scavenge(T* p, MutableSpace* to_space);
|
||||
template <class T> static inline bool should_scavenge(T* p, bool check_to_space);
|
||||
|
||||
inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, oop* p);
|
||||
template <class T> inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, T* p);
|
||||
|
||||
// Is an object in the young generation
|
||||
// This assumes that the HeapWord argument is in the heap,
|
||||
|
|
|
@ -22,28 +22,33 @@
|
|||
*
|
||||
*/
|
||||
|
||||
|
||||
inline void PSScavenge::save_to_space_top_before_gc() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
_to_space_top_before_gc = heap->young_gen()->to_space()->top();
|
||||
}
|
||||
|
||||
inline bool PSScavenge::should_scavenge(oop p) {
|
||||
return p == NULL ? false : PSScavenge::is_obj_in_young((HeapWord*) p);
|
||||
template <class T> inline bool PSScavenge::should_scavenge(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (oopDesc::is_null(heap_oop)) return false;
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
return PSScavenge::is_obj_in_young((HeapWord*)obj);
|
||||
}
|
||||
|
||||
inline bool PSScavenge::should_scavenge(oop p, MutableSpace* to_space) {
|
||||
template <class T>
|
||||
inline bool PSScavenge::should_scavenge(T* p, MutableSpace* to_space) {
|
||||
if (should_scavenge(p)) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
// Skip objects copied to to_space since the scavenge started.
|
||||
HeapWord* const addr = (HeapWord*) p;
|
||||
HeapWord* const addr = (HeapWord*)obj;
|
||||
return addr < to_space_top_before_gc() || addr >= to_space->end();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
inline bool PSScavenge::should_scavenge(oop p, bool check_to_space) {
|
||||
template <class T>
|
||||
inline bool PSScavenge::should_scavenge(T* p, bool check_to_space) {
|
||||
if (check_to_space) {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*) Universe::heap();
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
return should_scavenge(p, heap->young_gen()->to_space());
|
||||
}
|
||||
return should_scavenge(p);
|
||||
|
@ -52,24 +57,23 @@ inline bool PSScavenge::should_scavenge(oop p, bool check_to_space) {
|
|||
// Attempt to "claim" oop at p via CAS, push the new obj if successful
|
||||
// This version tests the oop* to make sure it is within the heap before
|
||||
// attempting marking.
|
||||
template <class T>
|
||||
inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm,
|
||||
oop* p) {
|
||||
assert(should_scavenge(*p, true), "revisiting object?");
|
||||
T* p) {
|
||||
assert(should_scavenge(p, true), "revisiting object?");
|
||||
|
||||
oop o = *p;
|
||||
if (o->is_forwarded()) {
|
||||
*p = o->forwardee();
|
||||
} else {
|
||||
*p = pm->copy_to_survivor_space(o, pm->depth_first());
|
||||
}
|
||||
oop o = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop new_obj = o->is_forwarded()
|
||||
? o->forwardee()
|
||||
: pm->copy_to_survivor_space(o, pm->depth_first());
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
|
||||
// We cannot mark without test, as some code passes us pointers
|
||||
// that are outside the heap.
|
||||
if ((!PSScavenge::is_obj_in_young((HeapWord*) p)) &&
|
||||
if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
|
||||
Universe::heap()->is_in_reserved(p)) {
|
||||
o = *p;
|
||||
if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
|
||||
card_table()->inline_write_ref_field_gc(p, o);
|
||||
if (PSScavenge::is_obj_in_young((HeapWord*)new_obj)) {
|
||||
card_table()->inline_write_ref_field_gc(p, new_obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,15 +34,17 @@ class PSScavengeRootsClosure: public OopClosure {
|
|||
private:
|
||||
PSPromotionManager* _promotion_manager;
|
||||
|
||||
public:
|
||||
PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { }
|
||||
|
||||
virtual void do_oop(oop* p) {
|
||||
if (PSScavenge::should_scavenge(*p)) {
|
||||
protected:
|
||||
template <class T> void do_oop_work(T *p) {
|
||||
if (PSScavenge::should_scavenge(p)) {
|
||||
// We never card mark roots, maybe call a func without test?
|
||||
PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
|
||||
}
|
||||
}
|
||||
public:
|
||||
PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { }
|
||||
void do_oop(oop* p) { PSScavengeRootsClosure::do_oop_work(p); }
|
||||
void do_oop(narrowOop* p) { PSScavengeRootsClosure::do_oop_work(p); }
|
||||
};
|
||||
|
||||
void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
|
@ -135,7 +137,7 @@ void StealTask::do_it(GCTaskManager* manager, uint which) {
|
|||
int random_seed = 17;
|
||||
if (pm->depth_first()) {
|
||||
while(true) {
|
||||
oop* p;
|
||||
StarTask p;
|
||||
if (PSPromotionManager::steal_depth(which, &random_seed, p)) {
|
||||
#if PS_PM_STATS
|
||||
pm->increment_steals(p);
|
||||
|
@ -164,8 +166,7 @@ void StealTask::do_it(GCTaskManager* manager, uint which) {
|
|||
}
|
||||
}
|
||||
}
|
||||
guarantee(pm->stacks_empty(),
|
||||
"stacks should be empty at this point");
|
||||
guarantee(pm->stacks_empty(), "stacks should be empty at this point");
|
||||
}
|
||||
|
||||
//
|
||||
|
|
|
@ -36,16 +36,16 @@ PreservedMark* MarkSweep::_preserved_marks = NULL;
|
|||
ReferenceProcessor* MarkSweep::_ref_processor = NULL;
|
||||
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
GrowableArray<oop*>* MarkSweep::_root_refs_stack = NULL;
|
||||
GrowableArray<void*>* MarkSweep::_root_refs_stack = NULL;
|
||||
GrowableArray<oop> * MarkSweep::_live_oops = NULL;
|
||||
GrowableArray<oop> * MarkSweep::_live_oops_moved_to = NULL;
|
||||
GrowableArray<size_t>* MarkSweep::_live_oops_size = NULL;
|
||||
size_t MarkSweep::_live_oops_index = 0;
|
||||
size_t MarkSweep::_live_oops_index_at_perm = 0;
|
||||
GrowableArray<oop*>* MarkSweep::_other_refs_stack = NULL;
|
||||
GrowableArray<oop*>* MarkSweep::_adjusted_pointers = NULL;
|
||||
bool MarkSweep::_pointer_tracking = false;
|
||||
bool MarkSweep::_root_tracking = true;
|
||||
GrowableArray<void*>* MarkSweep::_other_refs_stack = NULL;
|
||||
GrowableArray<void*>* MarkSweep::_adjusted_pointers = NULL;
|
||||
bool MarkSweep::_pointer_tracking = false;
|
||||
bool MarkSweep::_root_tracking = true;
|
||||
|
||||
GrowableArray<HeapWord*>* MarkSweep::_cur_gc_live_oops = NULL;
|
||||
GrowableArray<HeapWord*>* MarkSweep::_cur_gc_live_oops_moved_to = NULL;
|
||||
|
@ -59,7 +59,6 @@ void MarkSweep::revisit_weak_klass_link(Klass* k) {
|
|||
_revisit_klass_stack->push(k);
|
||||
}
|
||||
|
||||
|
||||
void MarkSweep::follow_weak_klass_links() {
|
||||
// All klasses on the revisit stack are marked at this point.
|
||||
// Update and follow all subklass, sibling and implementor links.
|
||||
|
@ -69,44 +68,15 @@ void MarkSweep::follow_weak_klass_links() {
|
|||
follow_stack();
|
||||
}
|
||||
|
||||
MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;
|
||||
|
||||
void MarkSweep::mark_and_follow(oop* p) {
|
||||
assert(Universe::heap()->is_in_reserved(p),
|
||||
"we should only be traversing objects here");
|
||||
oop m = *p;
|
||||
if (m != NULL && !m->mark()->is_marked()) {
|
||||
mark_object(m);
|
||||
m->follow_contents(); // Follow contents of the marked object
|
||||
}
|
||||
}
|
||||
|
||||
void MarkSweep::_mark_and_push(oop* p) {
|
||||
// Push marked object, contents will be followed later
|
||||
oop m = *p;
|
||||
mark_object(m);
|
||||
_marking_stack->push(m);
|
||||
}
|
||||
void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); }
|
||||
void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
|
||||
|
||||
MarkSweep::MarkAndPushClosure MarkSweep::mark_and_push_closure;
|
||||
|
||||
void MarkSweep::follow_root(oop* p) {
|
||||
assert(!Universe::heap()->is_in_reserved(p),
|
||||
"roots shouldn't be things within the heap");
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
if (ValidateMarkSweep) {
|
||||
guarantee(!_root_refs_stack->contains(p), "should only be in here once");
|
||||
_root_refs_stack->push(p);
|
||||
}
|
||||
#endif
|
||||
oop m = *p;
|
||||
if (m != NULL && !m->mark()->is_marked()) {
|
||||
mark_object(m);
|
||||
m->follow_contents(); // Follow contents of the marked object
|
||||
}
|
||||
follow_stack();
|
||||
}
|
||||
|
||||
MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;
|
||||
void MarkSweep::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(p); }
|
||||
void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); }
|
||||
|
||||
void MarkSweep::follow_stack() {
|
||||
while (!_marking_stack->is_empty()) {
|
||||
|
@ -118,6 +88,7 @@ void MarkSweep::follow_stack() {
|
|||
|
||||
MarkSweep::FollowStackClosure MarkSweep::follow_stack_closure;
|
||||
|
||||
void MarkSweep::FollowStackClosure::do_void() { follow_stack(); }
|
||||
|
||||
// We preserve the mark which should be replaced at the end and the location that it
|
||||
// will go. Note that the object that this markOop belongs to isn't currently at that
|
||||
|
@ -142,6 +113,9 @@ void MarkSweep::preserve_mark(oop obj, markOop mark) {
|
|||
MarkSweep::AdjustPointerClosure MarkSweep::adjust_root_pointer_closure(true);
|
||||
MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure(false);
|
||||
|
||||
void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
|
||||
void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
|
||||
|
||||
void MarkSweep::adjust_marks() {
|
||||
assert(_preserved_oop_stack == NULL ||
|
||||
_preserved_oop_stack->length() == _preserved_mark_stack->length(),
|
||||
|
@ -187,7 +161,7 @@ void MarkSweep::restore_marks() {
|
|||
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
|
||||
void MarkSweep::track_adjusted_pointer(oop* p, oop newobj, bool isroot) {
|
||||
void MarkSweep::track_adjusted_pointer(void* p, bool isroot) {
|
||||
if (!ValidateMarkSweep)
|
||||
return;
|
||||
|
||||
|
@ -201,7 +175,7 @@ void MarkSweep::track_adjusted_pointer(oop* p, oop newobj, bool isroot) {
|
|||
if (index != -1) {
|
||||
int l = _root_refs_stack->length();
|
||||
if (l > 0 && l - 1 != index) {
|
||||
oop* last = _root_refs_stack->pop();
|
||||
void* last = _root_refs_stack->pop();
|
||||
assert(last != p, "should be different");
|
||||
_root_refs_stack->at_put(index, last);
|
||||
} else {
|
||||
|
@ -211,19 +185,17 @@ void MarkSweep::track_adjusted_pointer(oop* p, oop newobj, bool isroot) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void MarkSweep::check_adjust_pointer(oop* p) {
|
||||
void MarkSweep::check_adjust_pointer(void* p) {
|
||||
_adjusted_pointers->push(p);
|
||||
}
|
||||
|
||||
|
||||
class AdjusterTracker: public OopClosure {
|
||||
public:
|
||||
AdjusterTracker() {};
|
||||
void do_oop(oop* o) { MarkSweep::check_adjust_pointer(o); }
|
||||
AdjusterTracker() {}
|
||||
void do_oop(oop* o) { MarkSweep::check_adjust_pointer(o); }
|
||||
void do_oop(narrowOop* o) { MarkSweep::check_adjust_pointer(o); }
|
||||
};
|
||||
|
||||
|
||||
void MarkSweep::track_interior_pointers(oop obj) {
|
||||
if (ValidateMarkSweep) {
|
||||
_adjusted_pointers->clear();
|
||||
|
@ -234,7 +206,6 @@ void MarkSweep::track_interior_pointers(oop obj) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void MarkSweep::check_interior_pointers() {
|
||||
if (ValidateMarkSweep) {
|
||||
_pointer_tracking = false;
|
||||
|
@ -242,7 +213,6 @@ void MarkSweep::check_interior_pointers() {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void MarkSweep::reset_live_oop_tracking(bool at_perm) {
|
||||
if (ValidateMarkSweep) {
|
||||
guarantee((size_t)_live_oops->length() == _live_oops_index, "should be at end of live oops");
|
||||
|
@ -250,7 +220,6 @@ void MarkSweep::reset_live_oop_tracking(bool at_perm) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void MarkSweep::register_live_oop(oop p, size_t size) {
|
||||
if (ValidateMarkSweep) {
|
||||
_live_oops->push(p);
|
||||
|
@ -283,7 +252,6 @@ void MarkSweep::live_oop_moved_to(HeapWord* q, size_t size,
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void MarkSweep::compaction_complete() {
|
||||
if (RecordMarkSweepCompaction) {
|
||||
GrowableArray<HeapWord*>* _tmp_live_oops = _cur_gc_live_oops;
|
||||
|
@ -299,7 +267,6 @@ void MarkSweep::compaction_complete() {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void MarkSweep::print_new_location_of_heap_address(HeapWord* q) {
|
||||
if (!RecordMarkSweepCompaction) {
|
||||
tty->print_cr("Requires RecordMarkSweepCompaction to be enabled");
|
||||
|
@ -318,7 +285,7 @@ void MarkSweep::print_new_location_of_heap_address(HeapWord* q) {
|
|||
HeapWord* new_oop = _last_gc_live_oops_moved_to->at(i);
|
||||
size_t offset = (q - old_oop);
|
||||
tty->print_cr("Address " PTR_FORMAT, q);
|
||||
tty->print_cr(" Was in oop " PTR_FORMAT ", size %d, at offset %d", old_oop, sz, offset);
|
||||
tty->print_cr(" Was in oop " PTR_FORMAT ", size " SIZE_FORMAT ", at offset " SIZE_FORMAT, old_oop, sz, offset);
|
||||
tty->print_cr(" Now in oop " PTR_FORMAT ", actual address " PTR_FORMAT, new_oop, new_oop + offset);
|
||||
return;
|
||||
}
|
||||
|
@ -328,23 +295,16 @@ void MarkSweep::print_new_location_of_heap_address(HeapWord* q) {
|
|||
}
|
||||
#endif //VALIDATE_MARK_SWEEP
|
||||
|
||||
MarkSweep::IsAliveClosure MarkSweep::is_alive;
|
||||
MarkSweep::IsAliveClosure MarkSweep::is_alive;
|
||||
|
||||
void MarkSweep::KeepAliveClosure::do_oop(oop* p) {
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
if (ValidateMarkSweep) {
|
||||
if (!Universe::heap()->is_in_reserved(p)) {
|
||||
_root_refs_stack->push(p);
|
||||
} else {
|
||||
_other_refs_stack->push(p);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
mark_and_push(p);
|
||||
}
|
||||
void MarkSweep::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); }
|
||||
bool MarkSweep::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); }
|
||||
|
||||
MarkSweep::KeepAliveClosure MarkSweep::keep_alive;
|
||||
|
||||
void MarkSweep::KeepAliveClosure::do_oop(oop* p) { MarkSweep::KeepAliveClosure::do_oop_work(p); }
|
||||
void MarkSweep::KeepAliveClosure::do_oop(narrowOop* p) { MarkSweep::KeepAliveClosure::do_oop_work(p); }
|
||||
|
||||
void marksweep_init() { /* empty */ }
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
|
|
@ -46,55 +46,59 @@ class ReferenceProcessor;
|
|||
#define VALIDATE_MARK_SWEEP_ONLY(code)
|
||||
#endif
|
||||
|
||||
|
||||
// declared at end
|
||||
class PreservedMark;
|
||||
|
||||
class MarkSweep : AllStatic {
|
||||
//
|
||||
// In line closure decls
|
||||
// Inline closure decls
|
||||
//
|
||||
|
||||
class FollowRootClosure: public OopsInGenClosure{
|
||||
class FollowRootClosure: public OopsInGenClosure {
|
||||
public:
|
||||
void do_oop(oop* p) { follow_root(p); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
virtual const bool do_nmethods() const { return true; }
|
||||
};
|
||||
|
||||
class MarkAndPushClosure: public OopClosure {
|
||||
public:
|
||||
void do_oop(oop* p) { mark_and_push(p); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
virtual const bool do_nmethods() const { return true; }
|
||||
};
|
||||
|
||||
class FollowStackClosure: public VoidClosure {
|
||||
public:
|
||||
void do_void() { follow_stack(); }
|
||||
virtual void do_void();
|
||||
};
|
||||
|
||||
class AdjustPointerClosure: public OopsInGenClosure {
|
||||
private:
|
||||
bool _is_root;
|
||||
public:
|
||||
AdjustPointerClosure(bool is_root) : _is_root(is_root) {}
|
||||
void do_oop(oop* p) { _adjust_pointer(p, _is_root); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
// Used for java/lang/ref handling
|
||||
class IsAliveClosure: public BoolObjectClosure {
|
||||
public:
|
||||
void do_object(oop p) { assert(false, "don't call"); }
|
||||
bool do_object_b(oop p) { return p->is_gc_marked(); }
|
||||
virtual void do_object(oop p);
|
||||
virtual bool do_object_b(oop p);
|
||||
};
|
||||
|
||||
class KeepAliveClosure: public OopClosure {
|
||||
protected:
|
||||
template <class T> void do_oop_work(T* p);
|
||||
public:
|
||||
void do_oop(oop* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
//
|
||||
// Friend decls
|
||||
//
|
||||
|
||||
friend class AdjustPointerClosure;
|
||||
friend class KeepAliveClosure;
|
||||
friend class VM_MarkSweep;
|
||||
|
@ -120,14 +124,14 @@ class MarkSweep : AllStatic {
|
|||
static ReferenceProcessor* _ref_processor;
|
||||
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
static GrowableArray<oop*>* _root_refs_stack;
|
||||
static GrowableArray<void*>* _root_refs_stack;
|
||||
static GrowableArray<oop> * _live_oops;
|
||||
static GrowableArray<oop> * _live_oops_moved_to;
|
||||
static GrowableArray<size_t>* _live_oops_size;
|
||||
static size_t _live_oops_index;
|
||||
static size_t _live_oops_index_at_perm;
|
||||
static GrowableArray<oop*>* _other_refs_stack;
|
||||
static GrowableArray<oop*>* _adjusted_pointers;
|
||||
static GrowableArray<void*>* _other_refs_stack;
|
||||
static GrowableArray<void*>* _adjusted_pointers;
|
||||
static bool _pointer_tracking;
|
||||
static bool _root_tracking;
|
||||
|
||||
|
@ -146,9 +150,8 @@ class MarkSweep : AllStatic {
|
|||
static GrowableArray<size_t>* _last_gc_live_oops_size;
|
||||
#endif
|
||||
|
||||
|
||||
// Non public closures
|
||||
static IsAliveClosure is_alive;
|
||||
static IsAliveClosure is_alive;
|
||||
static KeepAliveClosure keep_alive;
|
||||
|
||||
// Class unloading. Update subklass/sibling/implementor links at end of marking phase.
|
||||
|
@ -159,9 +162,9 @@ class MarkSweep : AllStatic {
|
|||
|
||||
public:
|
||||
// Public closures
|
||||
static FollowRootClosure follow_root_closure;
|
||||
static MarkAndPushClosure mark_and_push_closure;
|
||||
static FollowStackClosure follow_stack_closure;
|
||||
static FollowRootClosure follow_root_closure;
|
||||
static MarkAndPushClosure mark_and_push_closure;
|
||||
static FollowStackClosure follow_stack_closure;
|
||||
static AdjustPointerClosure adjust_root_pointer_closure;
|
||||
static AdjustPointerClosure adjust_pointer_closure;
|
||||
|
||||
|
@ -170,39 +173,29 @@ class MarkSweep : AllStatic {
|
|||
|
||||
// Call backs for marking
|
||||
static void mark_object(oop obj);
|
||||
static void follow_root(oop* p); // Mark pointer and follow contents. Empty marking
|
||||
// Mark pointer and follow contents. Empty marking stack afterwards.
|
||||
template <class T> static inline void follow_root(T* p);
|
||||
// Mark pointer and follow contents.
|
||||
template <class T> static inline void mark_and_follow(T* p);
|
||||
// Check mark and maybe push on marking stack
|
||||
template <class T> static inline void mark_and_push(T* p);
|
||||
|
||||
// stack afterwards.
|
||||
static void follow_stack(); // Empty marking stack.
|
||||
|
||||
static void mark_and_follow(oop* p); // Mark pointer and follow contents.
|
||||
static void _mark_and_push(oop* p); // Mark pointer and push obj on
|
||||
// marking stack.
|
||||
static void preserve_mark(oop p, markOop mark);
|
||||
// Save the mark word so it can be restored later
|
||||
static void adjust_marks(); // Adjust the pointers in the preserved marks table
|
||||
static void restore_marks(); // Restore the marks that we saved in preserve_mark
|
||||
|
||||
template <class T> static inline void adjust_pointer(T* p, bool isroot);
|
||||
|
||||
static void mark_and_push(oop* p) { // Check mark and maybe push on
|
||||
// marking stack
|
||||
// assert(Universe::is_reserved_heap((oop)p), "we should only be traversing objects here");
|
||||
oop m = *p;
|
||||
if (m != NULL && !m->mark()->is_marked()) {
|
||||
_mark_and_push(p);
|
||||
}
|
||||
}
|
||||
|
||||
static void follow_stack(); // Empty marking stack.
|
||||
|
||||
|
||||
static void preserve_mark(oop p, markOop mark); // Save the mark word so it can be restored later
|
||||
static void adjust_marks(); // Adjust the pointers in the preserved marks table
|
||||
static void restore_marks(); // Restore the marks that we saved in preserve_mark
|
||||
|
||||
static void _adjust_pointer(oop* p, bool isroot);
|
||||
|
||||
static void adjust_root_pointer(oop* p) { _adjust_pointer(p, true); }
|
||||
static void adjust_pointer(oop* p) { _adjust_pointer(p, false); }
|
||||
static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
|
||||
static void adjust_pointer(oop* p) { adjust_pointer(p, false); }
|
||||
static void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
|
||||
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
static void track_adjusted_pointer(oop* p, oop newobj, bool isroot);
|
||||
static void check_adjust_pointer(oop* p); // Adjust this pointer
|
||||
static void track_adjusted_pointer(void* p, bool isroot);
|
||||
static void check_adjust_pointer(void* p);
|
||||
static void track_interior_pointers(oop obj);
|
||||
static void check_interior_pointers();
|
||||
|
||||
|
@ -223,7 +216,6 @@ class MarkSweep : AllStatic {
|
|||
static void revisit_weak_klass_link(Klass* k); // Update subklass/sibling/implementor links at end of marking.
|
||||
};
|
||||
|
||||
|
||||
class PreservedMark VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
oop _obj;
|
||||
|
|
|
@ -22,32 +22,11 @@
|
|||
*
|
||||
*/
|
||||
|
||||
inline void MarkSweep::_adjust_pointer(oop* p, bool isroot) {
|
||||
oop obj = *p;
|
||||
VALIDATE_MARK_SWEEP_ONLY(oop saved_new_pointer = NULL);
|
||||
if (obj != NULL) {
|
||||
oop new_pointer = oop(obj->mark()->decode_pointer());
|
||||
assert(new_pointer != NULL || // is forwarding ptr?
|
||||
obj->mark() == markOopDesc::prototype() || // not gc marked?
|
||||
(UseBiasedLocking && obj->mark()->has_bias_pattern()) || // not gc marked?
|
||||
obj->is_shared(), // never forwarded?
|
||||
"should contain a forwarding pointer");
|
||||
if (new_pointer != NULL) {
|
||||
*p = new_pointer;
|
||||
assert(Universe::heap()->is_in_reserved(new_pointer),
|
||||
"should be in object space");
|
||||
VALIDATE_MARK_SWEEP_ONLY(saved_new_pointer = new_pointer);
|
||||
}
|
||||
}
|
||||
VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, saved_new_pointer, isroot));
|
||||
}
|
||||
|
||||
inline void MarkSweep::mark_object(oop obj) {
|
||||
|
||||
#ifndef SERIALGC
|
||||
if (UseParallelOldGC && VerifyParallelOldWithMarkSweep) {
|
||||
assert(PSParallelCompact::mark_bitmap()->is_marked(obj),
|
||||
"Should be marked in the marking bitmap");
|
||||
"Should be marked in the marking bitmap");
|
||||
}
|
||||
#endif // SERIALGC
|
||||
|
||||
|
@ -60,3 +39,80 @@ inline void MarkSweep::mark_object(oop obj) {
|
|||
preserve_mark(obj, mark);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::follow_root(T* p) {
|
||||
assert(!Universe::heap()->is_in_reserved(p),
|
||||
"roots shouldn't be things within the heap");
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
if (ValidateMarkSweep) {
|
||||
guarantee(!_root_refs_stack->contains(p), "should only be in here once");
|
||||
_root_refs_stack->push(p);
|
||||
}
|
||||
#endif
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!obj->mark()->is_marked()) {
|
||||
mark_object(obj);
|
||||
obj->follow_contents();
|
||||
}
|
||||
}
|
||||
follow_stack();
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::mark_and_follow(T* p) {
|
||||
// assert(Universe::heap()->is_in_reserved(p), "should be in object space");
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!obj->mark()->is_marked()) {
|
||||
mark_object(obj);
|
||||
obj->follow_contents();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::mark_and_push(T* p) {
|
||||
// assert(Universe::heap()->is_in_reserved(p), "should be in object space");
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!obj->mark()->is_marked()) {
|
||||
mark_object(obj);
|
||||
_marking_stack->push(obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::adjust_pointer(T* p, bool isroot) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
oop new_obj = oop(obj->mark()->decode_pointer());
|
||||
assert(new_obj != NULL || // is forwarding ptr?
|
||||
obj->mark() == markOopDesc::prototype() || // not gc marked?
|
||||
(UseBiasedLocking && obj->mark()->has_bias_pattern()) ||
|
||||
// not gc marked?
|
||||
obj->is_shared(), // never forwarded?
|
||||
"should be forwarded");
|
||||
if (new_obj != NULL) {
|
||||
assert(Universe::heap()->is_in_reserved(new_obj),
|
||||
"should be in object space");
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
}
|
||||
}
|
||||
VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, isroot));
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::KeepAliveClosure::do_oop_work(T* p) {
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
if (ValidateMarkSweep) {
|
||||
if (!Universe::heap()->is_in_reserved(p)) {
|
||||
_root_refs_stack->push(p);
|
||||
} else {
|
||||
_other_refs_stack->push(p);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
mark_and_push(p);
|
||||
}
|
||||
|
|
|
@ -35,7 +35,6 @@ int CollectedHeap::_fire_out_of_memory_count = 0;
|
|||
CollectedHeap::CollectedHeap() :
|
||||
_reserved(), _barrier_set(NULL), _is_gc_active(false),
|
||||
_total_collections(0), _total_full_collections(0),
|
||||
_max_heap_capacity(0),
|
||||
_gc_cause(GCCause::_no_gc), _gc_lastcause(GCCause::_no_gc) {
|
||||
NOT_PRODUCT(_promotion_failure_alot_count = 0;)
|
||||
NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
|
||||
|
|
|
@ -53,7 +53,6 @@ class CollectedHeap : public CHeapObj {
|
|||
bool _is_gc_active;
|
||||
unsigned int _total_collections; // ... started
|
||||
unsigned int _total_full_collections; // ... started
|
||||
size_t _max_heap_capacity;
|
||||
NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
|
||||
NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
|
||||
|
||||
|
@ -149,10 +148,7 @@ class CollectedHeap : public CHeapObj {
|
|||
virtual void post_initialize() = 0;
|
||||
|
||||
MemRegion reserved_region() const { return _reserved; }
|
||||
|
||||
// Return the number of bytes currently reserved, committed, and used,
|
||||
// respectively, for holding objects.
|
||||
size_t reserved_obj_bytes() const { return _reserved.byte_size(); }
|
||||
address base() const { return (address)reserved_region().start(); }
|
||||
|
||||
// Future cleanup here. The following functions should specify bytes or
|
||||
// heapwords as part of their signature.
|
||||
|
|
|
@ -61,7 +61,10 @@ void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
|
|||
obj->set_klass(klass());
|
||||
assert(!Universe::is_fully_initialized() || obj->blueprint() != NULL,
|
||||
"missing blueprint");
|
||||
}
|
||||
|
||||
// Support for jvmti and dtrace
|
||||
inline void post_allocation_notify(KlassHandle klass, oop obj) {
|
||||
// support for JVMTI VMObjectAlloc event (no-op if not enabled)
|
||||
JvmtiExport::vm_object_alloc_event_collector(obj);
|
||||
|
||||
|
@ -79,18 +82,22 @@ void CollectedHeap::post_allocation_setup_obj(KlassHandle klass,
|
|||
post_allocation_setup_common(klass, obj, size);
|
||||
assert(Universe::is_bootstrapping() ||
|
||||
!((oop)obj)->blueprint()->oop_is_array(), "must not be an array");
|
||||
// notify jvmti and dtrace
|
||||
post_allocation_notify(klass, (oop)obj);
|
||||
}
|
||||
|
||||
void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
|
||||
HeapWord* obj,
|
||||
size_t size,
|
||||
int length) {
|
||||
// Set array length before posting jvmti object alloc event
|
||||
// in post_allocation_setup_common()
|
||||
assert(length >= 0, "length should be non-negative");
|
||||
((arrayOop)obj)->set_length(length);
|
||||
post_allocation_setup_common(klass, obj, size);
|
||||
// Must set length after installing klass as set_klass zeros the length
|
||||
// field in UseCompressedOops
|
||||
((arrayOop)obj)->set_length(length);
|
||||
assert(((oop)obj)->blueprint()->oop_is_array(), "must be an array");
|
||||
// notify jvmti and dtrace (must be after length is set for dtrace)
|
||||
post_allocation_notify(klass, (oop)obj);
|
||||
}
|
||||
|
||||
HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS) {
|
||||
|
|
|
@ -191,7 +191,6 @@ array.hpp allocation.inline.hpp
|
|||
arrayKlass.cpp arrayKlass.hpp
|
||||
arrayKlass.cpp arrayKlassKlass.hpp
|
||||
arrayKlass.cpp arrayOop.hpp
|
||||
arrayKlass.cpp collectedHeap.hpp
|
||||
arrayKlass.cpp collectedHeap.inline.hpp
|
||||
arrayKlass.cpp gcLocker.hpp
|
||||
arrayKlass.cpp instanceKlass.hpp
|
||||
|
@ -211,6 +210,7 @@ arrayKlass.hpp universe.hpp
|
|||
arrayKlassKlass.cpp arrayKlassKlass.hpp
|
||||
arrayKlassKlass.cpp handles.inline.hpp
|
||||
arrayKlassKlass.cpp javaClasses.hpp
|
||||
arrayKlassKlass.cpp markSweep.inline.hpp
|
||||
arrayKlassKlass.cpp oop.inline.hpp
|
||||
|
||||
arrayKlassKlass.hpp arrayKlass.hpp
|
||||
|
@ -250,7 +250,7 @@ assembler.inline.hpp threadLocalStorage.hpp
|
|||
assembler_<arch_model>.cpp assembler_<arch_model>.inline.hpp
|
||||
assembler_<arch_model>.cpp biasedLocking.hpp
|
||||
assembler_<arch_model>.cpp cardTableModRefBS.hpp
|
||||
assembler_<arch_model>.cpp collectedHeap.hpp
|
||||
assembler_<arch_model>.cpp collectedHeap.inline.hpp
|
||||
assembler_<arch_model>.cpp interfaceSupport.hpp
|
||||
assembler_<arch_model>.cpp interpreter.hpp
|
||||
assembler_<arch_model>.cpp objectMonitor.hpp
|
||||
|
@ -331,9 +331,8 @@ bitMap.hpp top.hpp
|
|||
bitMap.inline.hpp atomic.hpp
|
||||
bitMap.inline.hpp bitMap.hpp
|
||||
|
||||
blockOffsetTable.cpp blockOffsetTable.hpp
|
||||
blockOffsetTable.cpp blockOffsetTable.inline.hpp
|
||||
blockOffsetTable.cpp collectedHeap.hpp
|
||||
blockOffsetTable.cpp collectedHeap.inline.hpp
|
||||
blockOffsetTable.cpp iterator.hpp
|
||||
blockOffsetTable.cpp java.hpp
|
||||
blockOffsetTable.cpp oop.inline.hpp
|
||||
|
@ -990,6 +989,7 @@ codeCache.cpp methodOop.hpp
|
|||
codeCache.cpp mutexLocker.hpp
|
||||
codeCache.cpp nmethod.hpp
|
||||
codeCache.cpp objArrayOop.hpp
|
||||
codeCache.cpp oop.inline.hpp
|
||||
codeCache.cpp pcDesc.hpp
|
||||
codeCache.cpp resourceArea.hpp
|
||||
|
||||
|
@ -1124,7 +1124,7 @@ compiledICHolderKlass.cpp collectedHeap.inline.hpp
|
|||
compiledICHolderKlass.cpp compiledICHolderKlass.hpp
|
||||
compiledICHolderKlass.cpp handles.inline.hpp
|
||||
compiledICHolderKlass.cpp javaClasses.hpp
|
||||
compiledICHolderKlass.cpp markSweep.hpp
|
||||
compiledICHolderKlass.cpp markSweep.inline.hpp
|
||||
compiledICHolderKlass.cpp oop.inline.hpp
|
||||
compiledICHolderKlass.cpp oop.inline2.hpp
|
||||
compiledICHolderKlass.cpp permGen.hpp
|
||||
|
@ -1192,6 +1192,7 @@ constMethodKlass.cpp constMethodOop.hpp
|
|||
constMethodKlass.cpp gcLocker.hpp
|
||||
constMethodKlass.cpp handles.inline.hpp
|
||||
constMethodKlass.cpp interpreter.hpp
|
||||
constMethodKlass.cpp markSweep.inline.hpp
|
||||
constMethodKlass.cpp oop.inline.hpp
|
||||
constMethodKlass.cpp oop.inline2.hpp
|
||||
constMethodKlass.cpp resourceArea.hpp
|
||||
|
@ -1210,6 +1211,8 @@ constantPoolKlass.cpp collectedHeap.inline.hpp
|
|||
constantPoolKlass.cpp constantPoolKlass.hpp
|
||||
constantPoolKlass.cpp constantPoolOop.hpp
|
||||
constantPoolKlass.cpp handles.inline.hpp
|
||||
constantPoolKlass.cpp javaClasses.hpp
|
||||
constantPoolKlass.cpp markSweep.inline.hpp
|
||||
constantPoolKlass.cpp oop.inline.hpp
|
||||
constantPoolKlass.cpp oop.inline2.hpp
|
||||
constantPoolKlass.cpp oopFactory.hpp
|
||||
|
@ -1261,7 +1264,8 @@ cpCacheKlass.cpp collectedHeap.hpp
|
|||
cpCacheKlass.cpp constantPoolOop.hpp
|
||||
cpCacheKlass.cpp cpCacheKlass.hpp
|
||||
cpCacheKlass.cpp handles.inline.hpp
|
||||
cpCacheKlass.cpp markSweep.hpp
|
||||
cpCacheKlass.cpp javaClasses.hpp
|
||||
cpCacheKlass.cpp markSweep.inline.hpp
|
||||
cpCacheKlass.cpp oop.inline.hpp
|
||||
cpCacheKlass.cpp permGen.hpp
|
||||
|
||||
|
@ -1273,7 +1277,6 @@ cpCacheOop.cpp cpCacheOop.hpp
|
|||
cpCacheOop.cpp handles.inline.hpp
|
||||
cpCacheOop.cpp interpreter.hpp
|
||||
cpCacheOop.cpp jvmtiRedefineClassesTrace.hpp
|
||||
cpCacheOop.cpp markSweep.hpp
|
||||
cpCacheOop.cpp markSweep.inline.hpp
|
||||
cpCacheOop.cpp objArrayOop.hpp
|
||||
cpCacheOop.cpp oop.inline.hpp
|
||||
|
@ -1385,7 +1388,6 @@ debug_<arch>.cpp top.hpp
|
|||
|
||||
defNewGeneration.cpp collectorCounters.hpp
|
||||
defNewGeneration.cpp copy.hpp
|
||||
defNewGeneration.cpp defNewGeneration.hpp
|
||||
defNewGeneration.cpp defNewGeneration.inline.hpp
|
||||
defNewGeneration.cpp gcLocker.inline.hpp
|
||||
defNewGeneration.cpp gcPolicyCounters.hpp
|
||||
|
@ -1397,7 +1399,6 @@ defNewGeneration.cpp iterator.hpp
|
|||
defNewGeneration.cpp java.hpp
|
||||
defNewGeneration.cpp oop.inline.hpp
|
||||
defNewGeneration.cpp referencePolicy.hpp
|
||||
defNewGeneration.cpp space.hpp
|
||||
defNewGeneration.cpp space.inline.hpp
|
||||
defNewGeneration.cpp thread_<os_family>.inline.hpp
|
||||
|
||||
|
@ -1406,6 +1407,7 @@ defNewGeneration.hpp cSpaceCounters.hpp
|
|||
defNewGeneration.hpp generation.inline.hpp
|
||||
defNewGeneration.hpp generationCounters.hpp
|
||||
|
||||
defNewGeneration.inline.hpp cardTableRS.hpp
|
||||
defNewGeneration.inline.hpp defNewGeneration.hpp
|
||||
defNewGeneration.inline.hpp space.hpp
|
||||
|
||||
|
@ -1956,6 +1958,7 @@ instanceKlass.cpp javaClasses.hpp
|
|||
instanceKlass.cpp jvmti.h
|
||||
instanceKlass.cpp jvmtiExport.hpp
|
||||
instanceKlass.cpp jvmtiRedefineClassesTrace.hpp
|
||||
instanceKlass.cpp markSweep.inline.hpp
|
||||
instanceKlass.cpp methodOop.hpp
|
||||
instanceKlass.cpp mutexLocker.hpp
|
||||
instanceKlass.cpp objArrayKlassKlass.hpp
|
||||
|
@ -1991,6 +1994,7 @@ instanceKlassKlass.cpp instanceKlassKlass.hpp
|
|||
instanceKlassKlass.cpp instanceRefKlass.hpp
|
||||
instanceKlassKlass.cpp javaClasses.hpp
|
||||
instanceKlassKlass.cpp jvmtiExport.hpp
|
||||
instanceKlassKlass.cpp markSweep.inline.hpp
|
||||
instanceKlassKlass.cpp objArrayKlassKlass.hpp
|
||||
instanceKlassKlass.cpp objArrayOop.hpp
|
||||
instanceKlassKlass.cpp oop.inline.hpp
|
||||
|
@ -2012,7 +2016,7 @@ instanceRefKlass.cpp genCollectedHeap.hpp
|
|||
instanceRefKlass.cpp genOopClosures.inline.hpp
|
||||
instanceRefKlass.cpp instanceRefKlass.hpp
|
||||
instanceRefKlass.cpp javaClasses.hpp
|
||||
instanceRefKlass.cpp markSweep.hpp
|
||||
instanceRefKlass.cpp markSweep.inline.hpp
|
||||
instanceRefKlass.cpp oop.inline.hpp
|
||||
instanceRefKlass.cpp preserveException.hpp
|
||||
instanceRefKlass.cpp systemDictionary.hpp
|
||||
|
@ -2492,7 +2496,7 @@ klassKlass.cpp instanceKlass.hpp
|
|||
klassKlass.cpp instanceOop.hpp
|
||||
klassKlass.cpp klassKlass.hpp
|
||||
klassKlass.cpp klassOop.hpp
|
||||
klassKlass.cpp markSweep.hpp
|
||||
klassKlass.cpp markSweep.inline.hpp
|
||||
klassKlass.cpp methodKlass.hpp
|
||||
klassKlass.cpp objArrayKlass.hpp
|
||||
klassKlass.cpp oop.inline.hpp
|
||||
|
@ -2519,7 +2523,7 @@ klassVtable.cpp instanceKlass.hpp
|
|||
klassVtable.cpp jvmtiRedefineClassesTrace.hpp
|
||||
klassVtable.cpp klassOop.hpp
|
||||
klassVtable.cpp klassVtable.hpp
|
||||
klassVtable.cpp markSweep.hpp
|
||||
klassVtable.cpp markSweep.inline.hpp
|
||||
klassVtable.cpp methodOop.hpp
|
||||
klassVtable.cpp objArrayOop.hpp
|
||||
klassVtable.cpp oop.inline.hpp
|
||||
|
@ -2632,6 +2636,9 @@ markOop.inline.hpp klassOop.hpp
|
|||
markOop.inline.hpp markOop.hpp
|
||||
|
||||
markSweep.cpp compileBroker.hpp
|
||||
|
||||
markSweep.hpp collectedHeap.hpp
|
||||
|
||||
memRegion.cpp globals.hpp
|
||||
memRegion.cpp memRegion.hpp
|
||||
|
||||
|
@ -2731,7 +2738,7 @@ methodDataKlass.cpp collectedHeap.inline.hpp
|
|||
methodDataKlass.cpp gcLocker.hpp
|
||||
methodDataKlass.cpp handles.inline.hpp
|
||||
methodDataKlass.cpp klassOop.hpp
|
||||
methodDataKlass.cpp markSweep.hpp
|
||||
methodDataKlass.cpp markSweep.inline.hpp
|
||||
methodDataKlass.cpp methodDataKlass.hpp
|
||||
methodDataKlass.cpp methodDataOop.hpp
|
||||
methodDataKlass.cpp oop.inline.hpp
|
||||
|
@ -2746,7 +2753,6 @@ methodDataOop.cpp bytecodeStream.hpp
|
|||
methodDataOop.cpp deoptimization.hpp
|
||||
methodDataOop.cpp handles.inline.hpp
|
||||
methodDataOop.cpp linkResolver.hpp
|
||||
methodDataOop.cpp markSweep.hpp
|
||||
methodDataOop.cpp markSweep.inline.hpp
|
||||
methodDataOop.cpp methodDataOop.hpp
|
||||
methodDataOop.cpp oop.inline.hpp
|
||||
|
@ -2764,7 +2770,7 @@ methodKlass.cpp handles.inline.hpp
|
|||
methodKlass.cpp interpreter.hpp
|
||||
methodKlass.cpp javaClasses.hpp
|
||||
methodKlass.cpp klassOop.hpp
|
||||
methodKlass.cpp markSweep.hpp
|
||||
methodKlass.cpp markSweep.inline.hpp
|
||||
methodKlass.cpp methodDataOop.hpp
|
||||
methodKlass.cpp methodKlass.hpp
|
||||
methodKlass.cpp oop.inline.hpp
|
||||
|
@ -2941,6 +2947,7 @@ objArrayKlass.cpp systemDictionary.hpp
|
|||
objArrayKlass.cpp universe.inline.hpp
|
||||
objArrayKlass.cpp vmSymbols.hpp
|
||||
|
||||
|
||||
objArrayKlass.hpp arrayKlass.hpp
|
||||
objArrayKlass.hpp instanceKlass.hpp
|
||||
objArrayKlass.hpp specialized_oop_closures.hpp
|
||||
|
@ -2948,6 +2955,7 @@ objArrayKlass.hpp specialized_oop_closures.hpp
|
|||
objArrayKlassKlass.cpp collectedHeap.inline.hpp
|
||||
objArrayKlassKlass.cpp instanceKlass.hpp
|
||||
objArrayKlassKlass.cpp javaClasses.hpp
|
||||
objArrayKlassKlass.cpp markSweep.inline.hpp
|
||||
objArrayKlassKlass.cpp objArrayKlassKlass.hpp
|
||||
objArrayKlassKlass.cpp oop.inline.hpp
|
||||
objArrayKlassKlass.cpp oop.inline2.hpp
|
||||
|
@ -2956,6 +2964,7 @@ objArrayKlassKlass.cpp systemDictionary.hpp
|
|||
objArrayKlassKlass.hpp arrayKlassKlass.hpp
|
||||
objArrayKlassKlass.hpp objArrayKlass.hpp
|
||||
|
||||
objArrayOop.cpp objArrayKlass.hpp
|
||||
objArrayOop.cpp objArrayOop.hpp
|
||||
objArrayOop.cpp oop.inline.hpp
|
||||
|
||||
|
@ -3005,7 +3014,6 @@ oop.inline.hpp generation.hpp
|
|||
oop.inline.hpp klass.hpp
|
||||
oop.inline.hpp klassOop.hpp
|
||||
oop.inline.hpp markOop.inline.hpp
|
||||
oop.inline.hpp markSweep.hpp
|
||||
oop.inline.hpp markSweep.inline.hpp
|
||||
oop.inline.hpp oop.hpp
|
||||
oop.inline.hpp os.hpp
|
||||
|
@ -4536,6 +4544,7 @@ vtableStubs.cpp handles.inline.hpp
|
|||
vtableStubs.cpp instanceKlass.hpp
|
||||
vtableStubs.cpp jvmtiExport.hpp
|
||||
vtableStubs.cpp klassVtable.hpp
|
||||
vtableStubs.cpp oop.inline.hpp
|
||||
vtableStubs.cpp mutexLocker.hpp
|
||||
vtableStubs.cpp resourceArea.hpp
|
||||
vtableStubs.cpp sharedRuntime.hpp
|
||||
|
|
|
@ -35,7 +35,10 @@ class InterpreterRuntime: AllStatic {
|
|||
static methodOop method(JavaThread *thread) { return last_frame(thread).interpreter_frame_method(); }
|
||||
static address bcp(JavaThread *thread) { return last_frame(thread).interpreter_frame_bcp(); }
|
||||
static void set_bcp_and_mdp(address bcp, JavaThread*thread);
|
||||
static Bytecodes::Code code(JavaThread *thread) { return Bytecodes::code_at(bcp(thread)); }
|
||||
static Bytecodes::Code code(JavaThread *thread) {
|
||||
// pass method to avoid calling unsafe bcp_to_method (partial fix 4926272)
|
||||
return Bytecodes::code_at(bcp(thread), method(thread));
|
||||
}
|
||||
static bool already_resolved(JavaThread *thread) { return cache_entry(thread)->is_resolved(code(thread)); }
|
||||
static int one_byte_index(JavaThread *thread) { return bcp(thread)[1]; }
|
||||
static int two_byte_index(JavaThread *thread) { return Bytes::get_Java_u2(bcp(thread) + 1); }
|
||||
|
|
|
@ -54,9 +54,9 @@ public:
|
|||
|
||||
// These functions indicate whether a particular access of the given
|
||||
// kinds requires a barrier.
|
||||
virtual bool read_ref_needs_barrier(oop* field) = 0;
|
||||
virtual bool read_ref_needs_barrier(void* field) = 0;
|
||||
virtual bool read_prim_needs_barrier(HeapWord* field, size_t bytes) = 0;
|
||||
virtual bool write_ref_needs_barrier(oop* field, oop new_val) = 0;
|
||||
virtual bool write_ref_needs_barrier(void* field, oop new_val) = 0;
|
||||
virtual bool write_prim_needs_barrier(HeapWord* field, size_t bytes, juint val1, juint val2) = 0;
|
||||
|
||||
// The first four operations provide a direct implementation of the
|
||||
|
@ -64,7 +64,7 @@ public:
|
|||
// directly, as appropriate.
|
||||
|
||||
// Invoke the barrier, if any, necessary when reading the given ref field.
|
||||
virtual void read_ref_field(oop* field) = 0;
|
||||
virtual void read_ref_field(void* field) = 0;
|
||||
|
||||
// Invoke the barrier, if any, necessary when reading the given primitive
|
||||
// "field" of "bytes" bytes in "obj".
|
||||
|
@ -75,9 +75,9 @@ public:
|
|||
// (For efficiency reasons, this operation is specialized for certain
|
||||
// barrier types. Semantically, it should be thought of as a call to the
|
||||
// virtual "_work" function below, which must implement the barrier.)
|
||||
inline void write_ref_field(oop* field, oop new_val);
|
||||
inline void write_ref_field(void* field, oop new_val);
|
||||
protected:
|
||||
virtual void write_ref_field_work(oop* field, oop new_val) = 0;
|
||||
virtual void write_ref_field_work(void* field, oop new_val) = 0;
|
||||
public:
|
||||
|
||||
// Invoke the barrier, if any, necessary when writing the "bytes"-byte
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
// performance-critical calls when when the barrier is the most common
|
||||
// card-table kind.
|
||||
|
||||
void BarrierSet::write_ref_field(oop* field, oop new_val) {
|
||||
void BarrierSet::write_ref_field(void* field, oop new_val) {
|
||||
if (kind() == CardTableModRef) {
|
||||
((CardTableModRefBS*)this)->inline_write_ref_field(field, new_val);
|
||||
} else {
|
||||
|
|
|
@ -294,7 +294,7 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
|
|||
// Note that these versions are precise! The scanning code has to handle the
|
||||
// fact that the write barrier may be either precise or imprecise.
|
||||
|
||||
void CardTableModRefBS::write_ref_field_work(oop* field, oop newVal) {
|
||||
void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
|
||||
inline_write_ref_field(field, newVal);
|
||||
}
|
||||
|
||||
|
|
|
@ -273,7 +273,7 @@ public:
|
|||
|
||||
// *** Barrier set functions.
|
||||
|
||||
inline bool write_ref_needs_barrier(oop* field, oop new_val) {
|
||||
inline bool write_ref_needs_barrier(void* field, oop new_val) {
|
||||
// Note that this assumes the perm gen is the highest generation
|
||||
// in the address space
|
||||
return new_val != NULL && !new_val->is_perm();
|
||||
|
@ -285,7 +285,7 @@ public:
|
|||
// these functions here for performance.
|
||||
protected:
|
||||
void write_ref_field_work(oop obj, size_t offset, oop newVal);
|
||||
void write_ref_field_work(oop* field, oop newVal);
|
||||
void write_ref_field_work(void* field, oop newVal);
|
||||
public:
|
||||
|
||||
bool has_write_ref_array_opt() { return true; }
|
||||
|
@ -315,7 +315,7 @@ public:
|
|||
|
||||
// *** Card-table-barrier-specific things.
|
||||
|
||||
inline void inline_write_ref_field(oop* field, oop newVal) {
|
||||
inline void inline_write_ref_field(void* field, oop newVal) {
|
||||
jbyte* byte = byte_for(field);
|
||||
*byte = dirty_card;
|
||||
}
|
||||
|
|
|
@ -191,7 +191,7 @@ public:
|
|||
// prev-younger-gen ==> cur_youngergen_and_prev_nonclean_card
|
||||
// cur-younger-gen ==> cur_younger_gen
|
||||
// cur_youngergen_and_prev_nonclean_card ==> no change.
|
||||
void CardTableRS::write_ref_field_gc_par(oop* field, oop new_val) {
|
||||
void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
|
||||
jbyte* entry = ct_bs()->byte_for(field);
|
||||
do {
|
||||
jbyte entry_val = *entry;
|
||||
|
@ -290,28 +290,36 @@ void CardTableRS::invalidate_or_clear(Generation* gen, bool younger,
|
|||
|
||||
|
||||
class VerifyCleanCardClosure: public OopClosure {
|
||||
HeapWord* boundary;
|
||||
HeapWord* begin; HeapWord* end;
|
||||
public:
|
||||
void do_oop(oop* p) {
|
||||
private:
|
||||
HeapWord* _boundary;
|
||||
HeapWord* _begin;
|
||||
HeapWord* _end;
|
||||
protected:
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
HeapWord* jp = (HeapWord*)p;
|
||||
if (jp >= begin && jp < end) {
|
||||
guarantee(*p == NULL || (HeapWord*)p < boundary
|
||||
|| (HeapWord*)(*p) >= boundary,
|
||||
if (jp >= _begin && jp < _end) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
guarantee(obj == NULL ||
|
||||
(HeapWord*)p < _boundary ||
|
||||
(HeapWord*)obj >= _boundary,
|
||||
"pointer on clean card crosses boundary");
|
||||
}
|
||||
}
|
||||
VerifyCleanCardClosure(HeapWord* b, HeapWord* _begin, HeapWord* _end) :
|
||||
boundary(b), begin(_begin), end(_end) {}
|
||||
public:
|
||||
VerifyCleanCardClosure(HeapWord* b, HeapWord* begin, HeapWord* end) :
|
||||
_boundary(b), _begin(begin), _end(end) {}
|
||||
virtual void do_oop(oop* p) { VerifyCleanCardClosure::do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); }
|
||||
};
|
||||
|
||||
class VerifyCTSpaceClosure: public SpaceClosure {
|
||||
private:
|
||||
CardTableRS* _ct;
|
||||
HeapWord* _boundary;
|
||||
public:
|
||||
VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) :
|
||||
_ct(ct), _boundary(boundary) {}
|
||||
void do_space(Space* s) { _ct->verify_space(s, _boundary); }
|
||||
virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); }
|
||||
};
|
||||
|
||||
class VerifyCTGenClosure: public GenCollectedHeap::GenClosure {
|
||||
|
|
|
@ -106,18 +106,18 @@ public:
|
|||
// closure application.
|
||||
void younger_refs_iterate(Generation* g, OopsInGenClosure* blk);
|
||||
|
||||
void inline_write_ref_field_gc(oop* field, oop new_val) {
|
||||
void inline_write_ref_field_gc(void* field, oop new_val) {
|
||||
jbyte* byte = _ct_bs.byte_for(field);
|
||||
*byte = youngergen_card;
|
||||
}
|
||||
void write_ref_field_gc_work(oop* field, oop new_val) {
|
||||
void write_ref_field_gc_work(void* field, oop new_val) {
|
||||
inline_write_ref_field_gc(field, new_val);
|
||||
}
|
||||
|
||||
// Override. Might want to devirtualize this in the same fashion as
|
||||
// above. Ensures that the value of the card for field says that it's
|
||||
// a younger card in the current collection.
|
||||
virtual void write_ref_field_gc_par(oop* field, oop new_val);
|
||||
virtual void write_ref_field_gc_par(void* field, oop new_val);
|
||||
|
||||
void resize_covered_region(MemRegion new_region);
|
||||
|
||||
|
|
|
@ -49,9 +49,9 @@ public:
|
|||
// to prevent visiting any object twice.
|
||||
|
||||
class RecursiveAdjustSharedObjectClosure : public OopClosure {
|
||||
public:
|
||||
void do_oop(oop* o) {
|
||||
oop obj = *o;
|
||||
protected:
|
||||
template <class T> inline void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
if (obj->is_shared_readwrite()) {
|
||||
if (obj->mark()->is_marked()) {
|
||||
obj->init_mark(); // Don't revisit this object.
|
||||
|
@ -71,7 +71,10 @@ public:
|
|||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
public:
|
||||
virtual void do_oop(oop* p) { RecursiveAdjustSharedObjectClosure::do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) { RecursiveAdjustSharedObjectClosure::do_oop_work(p); }
|
||||
};
|
||||
|
||||
|
||||
|
@ -86,9 +89,9 @@ public:
|
|||
// as doing so can cause hash codes to be computed, destroying
|
||||
// forwarding pointers.
|
||||
class TraversePlaceholdersClosure : public OopClosure {
|
||||
public:
|
||||
void do_oop(oop* o) {
|
||||
oop obj = *o;
|
||||
protected:
|
||||
template <class T> inline void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
if (obj->klass() == Universe::symbolKlassObj() &&
|
||||
obj->is_shared_readonly()) {
|
||||
symbolHandle sym((symbolOop) obj);
|
||||
|
@ -99,6 +102,10 @@ class TraversePlaceholdersClosure : public OopClosure {
|
|||
}
|
||||
}
|
||||
}
|
||||
public:
|
||||
virtual void do_oop(oop* p) { TraversePlaceholdersClosure::do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) { TraversePlaceholdersClosure::do_oop_work(p); }
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -47,31 +47,9 @@ KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
|
|||
_rs = (CardTableRS*)rs;
|
||||
}
|
||||
|
||||
void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) {
|
||||
// We never expect to see a null reference being processed
|
||||
// as a weak reference.
|
||||
assert (*p != NULL, "expected non-null ref");
|
||||
assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
|
||||
void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
|
||||
void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
|
||||
|
||||
_cl->do_oop_nv(p);
|
||||
|
||||
// Card marking is trickier for weak refs.
|
||||
// This oop is a 'next' field which was filled in while we
|
||||
// were discovering weak references. While we might not need
|
||||
// to take a special action to keep this reference alive, we
|
||||
// will need to dirty a card as the field was modified.
|
||||
//
|
||||
// Alternatively, we could create a method which iterates through
|
||||
// each generation, allowing them in turn to examine the modified
|
||||
// field.
|
||||
//
|
||||
// We could check that p is also in an older generation, but
|
||||
// dirty cards in the youngest gen are never scanned, so the
|
||||
// extra check probably isn't worthwhile.
|
||||
if (Universe::heap()->is_in_reserved(p)) {
|
||||
_rs->inline_write_ref_field_gc(p, *p);
|
||||
}
|
||||
}
|
||||
|
||||
DefNewGeneration::FastKeepAliveClosure::
|
||||
FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
|
||||
|
@ -79,19 +57,8 @@ FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
|
|||
_boundary = g->reserved().end();
|
||||
}
|
||||
|
||||
void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) {
|
||||
assert (*p != NULL, "expected non-null ref");
|
||||
assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
|
||||
|
||||
_cl->do_oop_nv(p);
|
||||
|
||||
// Optimized for Defnew generation if it's the youngest generation:
|
||||
// we set a younger_gen card if we have an older->youngest
|
||||
// generation pointer.
|
||||
if (((HeapWord*)(*p) < _boundary) && Universe::heap()->is_in_reserved(p)) {
|
||||
_rs->inline_write_ref_field_gc(p, *p);
|
||||
}
|
||||
}
|
||||
void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
|
||||
void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
|
||||
|
||||
DefNewGeneration::EvacuateFollowersClosure::
|
||||
EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
|
||||
|
@ -132,6 +99,9 @@ ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
|
|||
_boundary = _g->reserved().end();
|
||||
}
|
||||
|
||||
void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
|
||||
void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
|
||||
|
||||
FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
|
||||
OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
|
||||
{
|
||||
|
@ -139,6 +109,9 @@ FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
|
|||
_boundary = _g->reserved().end();
|
||||
}
|
||||
|
||||
void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
|
||||
void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
|
||||
|
||||
ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
|
||||
OopClosure(g->ref_processor()), _g(g)
|
||||
{
|
||||
|
@ -146,6 +119,11 @@ ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
|
|||
_boundary = _g->reserved().end();
|
||||
}
|
||||
|
||||
void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
|
||||
void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
|
||||
|
||||
void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
|
||||
void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
|
||||
|
||||
DefNewGeneration::DefNewGeneration(ReservedSpace rs,
|
||||
size_t initial_size,
|
||||
|
@ -656,7 +634,7 @@ void DefNewGeneration::handle_promotion_failure(oop old) {
|
|||
}
|
||||
}
|
||||
|
||||
oop DefNewGeneration::copy_to_survivor_space(oop old, oop* from) {
|
||||
oop DefNewGeneration::copy_to_survivor_space(oop old) {
|
||||
assert(is_in_reserved(old) && !old->is_forwarded(),
|
||||
"shouldn't be scavenging this oop");
|
||||
size_t s = old->size();
|
||||
|
@ -669,7 +647,7 @@ oop DefNewGeneration::copy_to_survivor_space(oop old, oop* from) {
|
|||
|
||||
// Otherwise try allocating obj tenured
|
||||
if (obj == NULL) {
|
||||
obj = _next_gen->promote(old, s, from);
|
||||
obj = _next_gen->promote(old, s);
|
||||
if (obj == NULL) {
|
||||
if (!HandlePromotionFailure) {
|
||||
// A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
|
||||
|
@ -862,3 +840,69 @@ void DefNewGeneration::print_on(outputStream* st) const {
|
|||
const char* DefNewGeneration::name() const {
|
||||
return "def new generation";
|
||||
}
|
||||
|
||||
// Moved from inline file as they are not called inline
|
||||
CompactibleSpace* DefNewGeneration::first_compaction_space() const {
|
||||
return eden();
|
||||
}
|
||||
|
||||
HeapWord* DefNewGeneration::allocate(size_t word_size,
|
||||
bool is_tlab) {
|
||||
// This is the slow-path allocation for the DefNewGeneration.
|
||||
// Most allocations are fast-path in compiled code.
|
||||
// We try to allocate from the eden. If that works, we are happy.
|
||||
// Note that since DefNewGeneration supports lock-free allocation, we
|
||||
// have to use it here, as well.
|
||||
HeapWord* result = eden()->par_allocate(word_size);
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
}
|
||||
do {
|
||||
HeapWord* old_limit = eden()->soft_end();
|
||||
if (old_limit < eden()->end()) {
|
||||
// Tell the next generation we reached a limit.
|
||||
HeapWord* new_limit =
|
||||
next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
|
||||
if (new_limit != NULL) {
|
||||
Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
|
||||
} else {
|
||||
assert(eden()->soft_end() == eden()->end(),
|
||||
"invalid state after allocation_limit_reached returned null");
|
||||
}
|
||||
} else {
|
||||
// The allocation failed and the soft limit is equal to the hard limit,
|
||||
// there are no reasons to do an attempt to allocate
|
||||
assert(old_limit == eden()->end(), "sanity check");
|
||||
break;
|
||||
}
|
||||
// Try to allocate until succeeded or the soft limit can't be adjusted
|
||||
result = eden()->par_allocate(word_size);
|
||||
} while (result == NULL);
|
||||
|
||||
// If the eden is full and the last collection bailed out, we are running
|
||||
// out of heap space, and we try to allocate the from-space, too.
|
||||
// allocate_from_space can't be inlined because that would introduce a
|
||||
// circular dependency at compile time.
|
||||
if (result == NULL) {
|
||||
result = allocate_from_space(word_size);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
HeapWord* DefNewGeneration::par_allocate(size_t word_size,
|
||||
bool is_tlab) {
|
||||
return eden()->par_allocate(word_size);
|
||||
}
|
||||
|
||||
void DefNewGeneration::gc_prologue(bool full) {
|
||||
// Ensure that _end and _soft_end are the same in eden space.
|
||||
eden()->set_soft_end(eden()->end());
|
||||
}
|
||||
|
||||
size_t DefNewGeneration::tlab_capacity() const {
|
||||
return eden()->capacity();
|
||||
}
|
||||
|
||||
size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
|
||||
return unsafe_max_alloc_nogc();
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
|
||||
class EdenSpace;
|
||||
class ContiguousSpace;
|
||||
class ScanClosure;
|
||||
|
||||
// DefNewGeneration is a young generation containing eden, from- and
|
||||
// to-space.
|
||||
|
@ -155,17 +156,21 @@ protected:
|
|||
protected:
|
||||
ScanWeakRefClosure* _cl;
|
||||
CardTableRS* _rs;
|
||||
template <class T> void do_oop_work(T* p);
|
||||
public:
|
||||
KeepAliveClosure(ScanWeakRefClosure* cl);
|
||||
void do_oop(oop* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
class FastKeepAliveClosure: public KeepAliveClosure {
|
||||
protected:
|
||||
HeapWord* _boundary;
|
||||
template <class T> void do_oop_work(T* p);
|
||||
public:
|
||||
FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl);
|
||||
void do_oop(oop* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
class EvacuateFollowersClosure: public VoidClosure {
|
||||
|
@ -206,7 +211,7 @@ protected:
|
|||
ContiguousSpace* from() const { return _from_space; }
|
||||
ContiguousSpace* to() const { return _to_space; }
|
||||
|
||||
inline CompactibleSpace* first_compaction_space() const;
|
||||
virtual CompactibleSpace* first_compaction_space() const;
|
||||
|
||||
// Space enquiries
|
||||
size_t capacity() const;
|
||||
|
@ -226,8 +231,8 @@ protected:
|
|||
|
||||
// Thread-local allocation buffers
|
||||
bool supports_tlab_allocation() const { return true; }
|
||||
inline size_t tlab_capacity() const;
|
||||
inline size_t unsafe_max_tlab_alloc() const;
|
||||
size_t tlab_capacity() const;
|
||||
size_t unsafe_max_tlab_alloc() const;
|
||||
|
||||
// Grow the generation by the specified number of bytes.
|
||||
// The size of bytes is assumed to be properly aligned.
|
||||
|
@ -265,13 +270,13 @@ protected:
|
|||
return result;
|
||||
}
|
||||
|
||||
inline HeapWord* allocate(size_t word_size, bool is_tlab);
|
||||
HeapWord* allocate(size_t word_size, bool is_tlab);
|
||||
HeapWord* allocate_from_space(size_t word_size);
|
||||
|
||||
inline HeapWord* par_allocate(size_t word_size, bool is_tlab);
|
||||
HeapWord* par_allocate(size_t word_size, bool is_tlab);
|
||||
|
||||
// Prologue & Epilogue
|
||||
inline virtual void gc_prologue(bool full);
|
||||
virtual void gc_prologue(bool full);
|
||||
virtual void gc_epilogue(bool full);
|
||||
|
||||
// Doesn't require additional work during GC prologue and epilogue
|
||||
|
@ -307,7 +312,7 @@ protected:
|
|||
bool is_tlab,
|
||||
bool parallel = false);
|
||||
|
||||
oop copy_to_survivor_space(oop old, oop* from);
|
||||
oop copy_to_survivor_space(oop old);
|
||||
int tenuring_threshold() { return _tenuring_threshold; }
|
||||
|
||||
// Performance Counter support
|
||||
|
|
|
@ -22,67 +22,60 @@
|
|||
*
|
||||
*/
|
||||
|
||||
CompactibleSpace* DefNewGeneration::first_compaction_space() const {
|
||||
return eden();
|
||||
}
|
||||
// Methods of protected closure types
|
||||
|
||||
HeapWord* DefNewGeneration::allocate(size_t word_size,
|
||||
bool is_tlab) {
|
||||
// This is the slow-path allocation for the DefNewGeneration.
|
||||
// Most allocations are fast-path in compiled code.
|
||||
// We try to allocate from the eden. If that works, we are happy.
|
||||
// Note that since DefNewGeneration supports lock-free allocation, we
|
||||
// have to use it here, as well.
|
||||
HeapWord* result = eden()->par_allocate(word_size);
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
template <class T>
|
||||
inline void DefNewGeneration::KeepAliveClosure::do_oop_work(T* p) {
|
||||
#ifdef ASSERT
|
||||
{
|
||||
// We never expect to see a null reference being processed
|
||||
// as a weak reference.
|
||||
assert (!oopDesc::is_null(*p), "expected non-null ref");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
assert (obj->is_oop(), "expected an oop while scanning weak refs");
|
||||
}
|
||||
do {
|
||||
HeapWord* old_limit = eden()->soft_end();
|
||||
if (old_limit < eden()->end()) {
|
||||
// Tell the next generation we reached a limit.
|
||||
HeapWord* new_limit =
|
||||
next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
|
||||
if (new_limit != NULL) {
|
||||
Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
|
||||
} else {
|
||||
assert(eden()->soft_end() == eden()->end(),
|
||||
"invalid state after allocation_limit_reached returned null");
|
||||
}
|
||||
} else {
|
||||
// The allocation failed and the soft limit is equal to the hard limit,
|
||||
// there are no reasons to do an attempt to allocate
|
||||
assert(old_limit == eden()->end(), "sanity check");
|
||||
break;
|
||||
}
|
||||
// Try to allocate until succeeded or the soft limit can't be adjusted
|
||||
result = eden()->par_allocate(word_size);
|
||||
} while (result == NULL);
|
||||
#endif // ASSERT
|
||||
|
||||
// If the eden is full and the last collection bailed out, we are running
|
||||
// out of heap space, and we try to allocate the from-space, too.
|
||||
// allocate_from_space can't be inlined because that would introduce a
|
||||
// circular dependency at compile time.
|
||||
if (result == NULL) {
|
||||
result = allocate_from_space(word_size);
|
||||
_cl->do_oop_nv(p);
|
||||
|
||||
// Card marking is trickier for weak refs.
|
||||
// This oop is a 'next' field which was filled in while we
|
||||
// were discovering weak references. While we might not need
|
||||
// to take a special action to keep this reference alive, we
|
||||
// will need to dirty a card as the field was modified.
|
||||
//
|
||||
// Alternatively, we could create a method which iterates through
|
||||
// each generation, allowing them in turn to examine the modified
|
||||
// field.
|
||||
//
|
||||
// We could check that p is also in an older generation, but
|
||||
// dirty cards in the youngest gen are never scanned, so the
|
||||
// extra check probably isn't worthwhile.
|
||||
if (Universe::heap()->is_in_reserved(p)) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
_rs->inline_write_ref_field_gc(p, obj);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
HeapWord* DefNewGeneration::par_allocate(size_t word_size,
|
||||
bool is_tlab) {
|
||||
return eden()->par_allocate(word_size);
|
||||
}
|
||||
template <class T>
|
||||
inline void DefNewGeneration::FastKeepAliveClosure::do_oop_work(T* p) {
|
||||
#ifdef ASSERT
|
||||
{
|
||||
// We never expect to see a null reference being processed
|
||||
// as a weak reference.
|
||||
assert (!oopDesc::is_null(*p), "expected non-null ref");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
assert (obj->is_oop(), "expected an oop while scanning weak refs");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void DefNewGeneration::gc_prologue(bool full) {
|
||||
// Ensure that _end and _soft_end are the same in eden space.
|
||||
eden()->set_soft_end(eden()->end());
|
||||
}
|
||||
_cl->do_oop_nv(p);
|
||||
|
||||
size_t DefNewGeneration::tlab_capacity() const {
|
||||
return eden()->capacity();
|
||||
}
|
||||
|
||||
size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
|
||||
return unsafe_max_alloc_nogc();
|
||||
// Optimized for Defnew generation if it's the youngest generation:
|
||||
// we set a younger_gen card if we have an older->youngest
|
||||
// generation pointer.
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
if (((HeapWord*)obj < _boundary) && Universe::heap()->is_in_reserved(p)) {
|
||||
_rs->inline_write_ref_field_gc(p, obj);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,9 +60,9 @@ public:
|
|||
hash_offset = java_lang_String::hash_offset_in_bytes();
|
||||
}
|
||||
|
||||
void do_oop(oop* pobj) {
|
||||
if (pobj != NULL) {
|
||||
oop obj = *pobj;
|
||||
void do_oop(oop* p) {
|
||||
if (p != NULL) {
|
||||
oop obj = *p;
|
||||
if (obj->klass() == SystemDictionary::string_klass()) {
|
||||
|
||||
int hash;
|
||||
|
@ -79,6 +79,7 @@ public:
|
|||
}
|
||||
}
|
||||
}
|
||||
void do_oop(narrowOop* p) { ShouldNotReachHere(); }
|
||||
};
|
||||
|
||||
|
||||
|
@ -121,9 +122,8 @@ static bool mark_object(oop obj) {
|
|||
|
||||
class MarkObjectsOopClosure : public OopClosure {
|
||||
public:
|
||||
void do_oop(oop* pobj) {
|
||||
mark_object(*pobj);
|
||||
}
|
||||
void do_oop(oop* p) { mark_object(*p); }
|
||||
void do_oop(narrowOop* p) { ShouldNotReachHere(); }
|
||||
};
|
||||
|
||||
|
||||
|
@ -136,6 +136,7 @@ public:
|
|||
mark_object(obj);
|
||||
}
|
||||
}
|
||||
void do_oop(narrowOop* pobj) { ShouldNotReachHere(); }
|
||||
};
|
||||
|
||||
|
||||
|
@ -554,6 +555,7 @@ public:
|
|||
}
|
||||
}
|
||||
}
|
||||
void do_oop(narrowOop* pobj) { ShouldNotReachHere(); }
|
||||
};
|
||||
|
||||
|
||||
|
@ -690,6 +692,8 @@ public:
|
|||
++top;
|
||||
}
|
||||
|
||||
void do_oop(narrowOop* pobj) { ShouldNotReachHere(); }
|
||||
|
||||
void do_int(int* p) {
|
||||
check_space();
|
||||
*top = (oop)(intptr_t)*p;
|
||||
|
|
|
@ -624,6 +624,7 @@ public:
|
|||
void do_oop(oop* p) {
|
||||
assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
|
||||
}
|
||||
void do_oop(narrowOop* p) { ShouldNotReachHere(); }
|
||||
};
|
||||
static AssertIsPermClosure assert_is_perm_closure;
|
||||
|
||||
|
@ -1300,8 +1301,7 @@ void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
|
|||
|
||||
oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
|
||||
oop obj,
|
||||
size_t obj_size,
|
||||
oop* ref) {
|
||||
size_t obj_size) {
|
||||
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
|
||||
HeapWord* result = NULL;
|
||||
|
||||
|
|
|
@ -452,8 +452,7 @@ public:
|
|||
// gen; return the new location of obj if successful. Otherwise, return NULL.
|
||||
oop handle_failed_promotion(Generation* gen,
|
||||
oop obj,
|
||||
size_t obj_size,
|
||||
oop* ref);
|
||||
size_t obj_size);
|
||||
|
||||
private:
|
||||
// Accessor for memory state verification support
|
||||
|
|
|
@ -73,8 +73,7 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
|
|||
|
||||
VALIDATE_MARK_SWEEP_ONLY(
|
||||
if (ValidateMarkSweep) {
|
||||
guarantee(_root_refs_stack->length() == 0,
|
||||
"should be empty by now");
|
||||
guarantee(_root_refs_stack->length() == 0, "should be empty by now");
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -165,9 +164,9 @@ void GenMarkSweep::allocate_stacks() {
|
|||
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
if (ValidateMarkSweep) {
|
||||
_root_refs_stack = new (ResourceObj::C_HEAP) GrowableArray<oop*>(100, true);
|
||||
_other_refs_stack = new (ResourceObj::C_HEAP) GrowableArray<oop*>(100, true);
|
||||
_adjusted_pointers = new (ResourceObj::C_HEAP) GrowableArray<oop*>(100, true);
|
||||
_root_refs_stack = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true);
|
||||
_other_refs_stack = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true);
|
||||
_adjusted_pointers = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true);
|
||||
_live_oops = new (ResourceObj::C_HEAP) GrowableArray<oop>(100, true);
|
||||
_live_oops_moved_to = new (ResourceObj::C_HEAP) GrowableArray<oop>(100, true);
|
||||
_live_oops_size = new (ResourceObj::C_HEAP) GrowableArray<size_t>(100, true);
|
||||
|
|
|
@ -28,6 +28,11 @@ class CardTableRS;
|
|||
class CardTableModRefBS;
|
||||
class DefNewGeneration;
|
||||
|
||||
template<class E> class GenericTaskQueue;
|
||||
typedef GenericTaskQueue<oop> OopTaskQueue;
|
||||
template<class E> class GenericTaskQueueSet;
|
||||
typedef GenericTaskQueueSet<oop> OopTaskQueueSet;
|
||||
|
||||
// Closure for iterating roots from a particular generation
|
||||
// Note: all classes deriving from this MUST call this do_barrier
|
||||
// method at the end of their own do_oop method!
|
||||
|
@ -35,13 +40,13 @@ class DefNewGeneration;
|
|||
|
||||
class OopsInGenClosure : public OopClosure {
|
||||
private:
|
||||
Generation* _orig_gen; // generation originally set in ctor
|
||||
Generation* _gen; // generation being scanned
|
||||
Generation* _orig_gen; // generation originally set in ctor
|
||||
Generation* _gen; // generation being scanned
|
||||
|
||||
protected:
|
||||
// Some subtypes need access.
|
||||
HeapWord* _gen_boundary; // start of generation
|
||||
CardTableRS* _rs; // remembered set
|
||||
HeapWord* _gen_boundary; // start of generation
|
||||
CardTableRS* _rs; // remembered set
|
||||
|
||||
// For assertions
|
||||
Generation* generation() { return _gen; }
|
||||
|
@ -49,7 +54,7 @@ class OopsInGenClosure : public OopClosure {
|
|||
|
||||
// Derived classes that modify oops so that they might be old-to-young
|
||||
// pointers must call the method below.
|
||||
void do_barrier(oop* p);
|
||||
template <class T> void do_barrier(T* p);
|
||||
|
||||
public:
|
||||
OopsInGenClosure() : OopClosure(NULL),
|
||||
|
@ -75,14 +80,17 @@ class OopsInGenClosure : public OopClosure {
|
|||
// This closure will perform barrier store calls for ALL
|
||||
// pointers in scanned oops.
|
||||
class ScanClosure: public OopsInGenClosure {
|
||||
protected:
|
||||
protected:
|
||||
DefNewGeneration* _g;
|
||||
HeapWord* _boundary;
|
||||
bool _gc_barrier;
|
||||
public:
|
||||
HeapWord* _boundary;
|
||||
bool _gc_barrier;
|
||||
template <class T> inline void do_oop_work(T* p);
|
||||
public:
|
||||
ScanClosure(DefNewGeneration* g, bool gc_barrier);
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p);
|
||||
bool do_header() { return false; }
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_write;
|
||||
|
@ -95,14 +103,17 @@ public:
|
|||
// pointers into the DefNewGeneration. This is less
|
||||
// precise, but faster, than a ScanClosure
|
||||
class FastScanClosure: public OopsInGenClosure {
|
||||
protected:
|
||||
protected:
|
||||
DefNewGeneration* _g;
|
||||
HeapWord* _boundary;
|
||||
bool _gc_barrier;
|
||||
public:
|
||||
HeapWord* _boundary;
|
||||
bool _gc_barrier;
|
||||
template <class T> inline void do_oop_work(T* p);
|
||||
public:
|
||||
FastScanClosure(DefNewGeneration* g, bool gc_barrier);
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p);
|
||||
bool do_header() { return false; }
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_write;
|
||||
|
@ -110,19 +121,27 @@ public:
|
|||
};
|
||||
|
||||
class FilteringClosure: public OopClosure {
|
||||
HeapWord* _boundary;
|
||||
private:
|
||||
HeapWord* _boundary;
|
||||
OopClosure* _cl;
|
||||
public:
|
||||
protected:
|
||||
template <class T> inline void do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if ((HeapWord*)obj < _boundary) {
|
||||
_cl->do_oop(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
public:
|
||||
FilteringClosure(HeapWord* boundary, OopClosure* cl) :
|
||||
OopClosure(cl->_ref_processor), _boundary(boundary),
|
||||
_cl(cl) {}
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) {
|
||||
oop obj = *p;
|
||||
if ((HeapWord*)obj < _boundary && obj != NULL) {
|
||||
_cl->do_oop(p);
|
||||
}
|
||||
}
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { FilteringClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { FilteringClosure::do_oop_work(p); }
|
||||
bool do_header() { return false; }
|
||||
};
|
||||
|
||||
|
@ -131,19 +150,26 @@ public:
|
|||
// OopsInGenClosure -- weak references are processed all
|
||||
// at once, with no notion of which generation they were in.
|
||||
class ScanWeakRefClosure: public OopClosure {
|
||||
protected:
|
||||
DefNewGeneration* _g;
|
||||
HeapWord* _boundary;
|
||||
public:
|
||||
protected:
|
||||
DefNewGeneration* _g;
|
||||
HeapWord* _boundary;
|
||||
template <class T> inline void do_oop_work(T* p);
|
||||
public:
|
||||
ScanWeakRefClosure(DefNewGeneration* g);
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p);
|
||||
};
|
||||
|
||||
class VerifyOopClosure: public OopClosure {
|
||||
public:
|
||||
void do_oop(oop* p) {
|
||||
guarantee((*p)->is_oop_or_null(), "invalid oop");
|
||||
protected:
|
||||
template <class T> inline void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
guarantee(obj->is_oop_or_null(), "invalid oop");
|
||||
}
|
||||
public:
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
static VerifyOopClosure verify_oop;
|
||||
};
|
||||
|
|
|
@ -38,10 +38,10 @@ inline void OopsInGenClosure::set_generation(Generation* gen) {
|
|||
}
|
||||
}
|
||||
|
||||
inline void OopsInGenClosure::do_barrier(oop* p) {
|
||||
template <class T> inline void OopsInGenClosure::do_barrier(T* p) {
|
||||
assert(generation()->is_in_reserved(p), "expected ref in generation");
|
||||
oop obj = *p;
|
||||
assert(obj != NULL, "expected non-null object");
|
||||
assert(!oopDesc::is_null(*p), "expected non-null object");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
// If p points to a younger generation, mark the card.
|
||||
if ((HeapWord*)obj < _gen_boundary) {
|
||||
_rs->inline_write_ref_field_gc(p, obj);
|
||||
|
@ -49,18 +49,17 @@ inline void OopsInGenClosure::do_barrier(oop* p) {
|
|||
}
|
||||
|
||||
// NOTE! Any changes made here should also be made
|
||||
// in FastScanClosure::do_oop();
|
||||
inline void ScanClosure::do_oop(oop* p) {
|
||||
oop obj = *p;
|
||||
// in FastScanClosure::do_oop_work()
|
||||
template <class T> inline void ScanClosure::do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
// Should we copy the obj?
|
||||
if (obj != NULL) {
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if ((HeapWord*)obj < _boundary) {
|
||||
assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
|
||||
if (obj->is_forwarded()) {
|
||||
*p = obj->forwardee();
|
||||
} else {
|
||||
*p = _g->copy_to_survivor_space(obj, p);
|
||||
}
|
||||
oop new_obj = obj->is_forwarded() ? obj->forwardee()
|
||||
: _g->copy_to_survivor_space(obj);
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
}
|
||||
if (_gc_barrier) {
|
||||
// Now call parent closure
|
||||
|
@ -69,23 +68,21 @@ inline void ScanClosure::do_oop(oop* p) {
|
|||
}
|
||||
}
|
||||
|
||||
inline void ScanClosure::do_oop_nv(oop* p) {
|
||||
ScanClosure::do_oop(p);
|
||||
}
|
||||
inline void ScanClosure::do_oop_nv(oop* p) { ScanClosure::do_oop_work(p); }
|
||||
inline void ScanClosure::do_oop_nv(narrowOop* p) { ScanClosure::do_oop_work(p); }
|
||||
|
||||
// NOTE! Any changes made here should also be made
|
||||
// in ScanClosure::do_oop();
|
||||
inline void FastScanClosure::do_oop(oop* p) {
|
||||
oop obj = *p;
|
||||
// in ScanClosure::do_oop_work()
|
||||
template <class T> inline void FastScanClosure::do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
// Should we copy the obj?
|
||||
if (obj != NULL) {
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if ((HeapWord*)obj < _boundary) {
|
||||
assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
|
||||
if (obj->is_forwarded()) {
|
||||
*p = obj->forwardee();
|
||||
} else {
|
||||
*p = _g->copy_to_survivor_space(obj, p);
|
||||
}
|
||||
oop new_obj = obj->is_forwarded() ? obj->forwardee()
|
||||
: _g->copy_to_survivor_space(obj);
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
if (_gc_barrier) {
|
||||
// Now call parent closure
|
||||
do_barrier(p);
|
||||
|
@ -94,26 +91,22 @@ inline void FastScanClosure::do_oop(oop* p) {
|
|||
}
|
||||
}
|
||||
|
||||
inline void FastScanClosure::do_oop_nv(oop* p) {
|
||||
FastScanClosure::do_oop(p);
|
||||
}
|
||||
inline void FastScanClosure::do_oop_nv(oop* p) { FastScanClosure::do_oop_work(p); }
|
||||
inline void FastScanClosure::do_oop_nv(narrowOop* p) { FastScanClosure::do_oop_work(p); }
|
||||
|
||||
// Note similarity to ScanClosure; the difference is that
|
||||
// the barrier set is taken care of outside this closure.
|
||||
inline void ScanWeakRefClosure::do_oop(oop* p) {
|
||||
oop obj = *p;
|
||||
assert (obj != NULL, "null weak reference?");
|
||||
template <class T> inline void ScanWeakRefClosure::do_oop_work(T* p) {
|
||||
assert(!oopDesc::is_null(*p), "null weak reference?");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
// weak references are sometimes scanned twice; must check
|
||||
// that to-space doesn't already contain this object
|
||||
if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
|
||||
if (obj->is_forwarded()) {
|
||||
*p = obj->forwardee();
|
||||
} else {
|
||||
*p = _g->copy_to_survivor_space(obj, p);
|
||||
}
|
||||
oop new_obj = obj->is_forwarded() ? obj->forwardee()
|
||||
: _g->copy_to_survivor_space(obj);
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
}
|
||||
}
|
||||
|
||||
inline void ScanWeakRefClosure::do_oop_nv(oop* p) {
|
||||
ScanWeakRefClosure::do_oop(p);
|
||||
}
|
||||
inline void ScanWeakRefClosure::do_oop_nv(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
|
||||
inline void ScanWeakRefClosure::do_oop_nv(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue