mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-26 14:24:46 +02:00
6700789: G1: Enable use of compressed oops with G1 heaps
Modifications to G1 so as to allow the use of compressed oops. Reviewed-by: apetrusenko, coleenp, jmasa, kvn, never, phh, tonyp
This commit is contained in:
parent
50d7db1805
commit
075c1335cb
58 changed files with 1233 additions and 1175 deletions
|
@ -4208,6 +4208,7 @@ void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offs
|
|||
PtrQueue::byte_offset_of_active()),
|
||||
tmp);
|
||||
}
|
||||
|
||||
// Check on whether to annul.
|
||||
br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
|
||||
delayed() -> nop();
|
||||
|
@ -4215,13 +4216,13 @@ void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offs
|
|||
// satb_log_barrier_work1(tmp, offset);
|
||||
if (index == noreg) {
|
||||
if (Assembler::is_simm13(offset)) {
|
||||
ld_ptr(obj, offset, tmp);
|
||||
load_heap_oop(obj, offset, tmp);
|
||||
} else {
|
||||
set(offset, tmp);
|
||||
ld_ptr(obj, tmp, tmp);
|
||||
load_heap_oop(obj, tmp, tmp);
|
||||
}
|
||||
} else {
|
||||
ld_ptr(obj, index, tmp);
|
||||
load_heap_oop(obj, index, tmp);
|
||||
}
|
||||
|
||||
// satb_log_barrier_work2(obj, tmp, offset);
|
||||
|
|
|
@ -6805,14 +6805,18 @@ void MacroAssembler::g1_write_barrier_pre(Register obj,
|
|||
jcc(Assembler::equal, done);
|
||||
|
||||
// if (x.f == NULL) goto done;
|
||||
cmpptr(Address(obj, 0), NULL_WORD);
|
||||
#ifdef _LP64
|
||||
load_heap_oop(tmp2, Address(obj, 0));
|
||||
#else
|
||||
movptr(tmp2, Address(obj, 0));
|
||||
#endif
|
||||
cmpptr(tmp2, (int32_t) NULL_WORD);
|
||||
jcc(Assembler::equal, done);
|
||||
|
||||
// Can we store original value in the thread's buffer?
|
||||
|
||||
LP64_ONLY(movslq(tmp, index);)
|
||||
movptr(tmp2, Address(obj, 0));
|
||||
#ifdef _LP64
|
||||
movslq(tmp, index);
|
||||
cmpq(tmp, 0);
|
||||
#else
|
||||
cmpl(index, 0);
|
||||
|
@ -6834,8 +6838,7 @@ void MacroAssembler::g1_write_barrier_pre(Register obj,
|
|||
if(tosca_live) push(rax);
|
||||
push(obj);
|
||||
#ifdef _LP64
|
||||
movq(c_rarg0, Address(obj, 0));
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, r15_thread);
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, r15_thread);
|
||||
#else
|
||||
push(thread);
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, thread);
|
||||
|
|
|
@ -269,11 +269,11 @@ void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
|
|||
|
||||
#ifndef PRODUCT
|
||||
void trace_method_handle_stub(const char* adaptername,
|
||||
oop mh,
|
||||
oopDesc* mh,
|
||||
intptr_t* entry_sp,
|
||||
intptr_t* saved_sp) {
|
||||
// called as a leaf from native code: do not block the JVM!
|
||||
printf("MH %s "PTR_FORMAT" "PTR_FORMAT" "INTX_FORMAT"\n", adaptername, mh, entry_sp, entry_sp - saved_sp);
|
||||
printf("MH %s "PTR_FORMAT" "PTR_FORMAT" "INTX_FORMAT"\n", adaptername, (void*)mh, entry_sp, entry_sp - saved_sp);
|
||||
}
|
||||
#endif //PRODUCT
|
||||
|
||||
|
|
|
@ -709,7 +709,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
//
|
||||
// Input:
|
||||
// start - starting address
|
||||
// end - element count
|
||||
// count - element count
|
||||
void gen_write_ref_array_pre_barrier(Register start, Register count) {
|
||||
assert_different_registers(start, count);
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
|
@ -757,7 +757,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
|
||||
__ addptr(rsp, 2*wordSize);
|
||||
__ popa();
|
||||
|
||||
}
|
||||
break;
|
||||
|
||||
|
|
|
@ -1207,9 +1207,9 @@ class StubGenerator: public StubCodeGenerator {
|
|||
__ pusha(); // push registers (overkill)
|
||||
// must compute element count unless barrier set interface is changed (other platforms supply count)
|
||||
assert_different_registers(start, end, scratch);
|
||||
__ lea(scratch, Address(end, wordSize));
|
||||
__ subptr(scratch, start);
|
||||
__ shrptr(scratch, LogBytesPerWord);
|
||||
__ lea(scratch, Address(end, BytesPerHeapOop));
|
||||
__ subptr(scratch, start); // subtract start to get #bytes
|
||||
__ shrptr(scratch, LogBytesPerHeapOop); // convert to element count
|
||||
__ mov(c_rarg0, start);
|
||||
__ mov(c_rarg1, scratch);
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
|
||||
|
@ -1225,6 +1225,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
Label L_loop;
|
||||
|
||||
__ shrptr(start, CardTableModRefBS::card_shift);
|
||||
__ addptr(end, BytesPerHeapOop);
|
||||
__ shrptr(end, CardTableModRefBS::card_shift);
|
||||
__ subptr(end, start); // number of bytes to copy
|
||||
|
||||
|
@ -2251,6 +2252,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
// and report their number to the caller.
|
||||
assert_different_registers(rax, r14_length, count, to, end_to, rcx);
|
||||
__ lea(end_to, to_element_addr);
|
||||
__ addptr(end_to, -heapOopSize); // make an inclusive end pointer
|
||||
gen_write_ref_array_post_barrier(to, end_to, rscratch1);
|
||||
__ movptr(rax, r14_length); // original oops
|
||||
__ addptr(rax, count); // K = (original - remaining) oops
|
||||
|
@ -2259,7 +2261,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||
|
||||
// Come here on success only.
|
||||
__ BIND(L_do_card_marks);
|
||||
__ addptr(end_to, -wordSize); // make an inclusive end pointer
|
||||
__ addptr(end_to, -heapOopSize); // make an inclusive end pointer
|
||||
gen_write_ref_array_post_barrier(to, end_to, rscratch1);
|
||||
__ xorptr(rax, rax); // return 0 on success
|
||||
|
||||
|
|
|
@ -42,35 +42,40 @@ protected:
|
|||
BufferLength = 1024
|
||||
};
|
||||
|
||||
oop *_buffer[BufferLength];
|
||||
oop **_buffer_top;
|
||||
oop **_buffer_curr;
|
||||
StarTask _buffer[BufferLength];
|
||||
StarTask* _buffer_top;
|
||||
StarTask* _buffer_curr;
|
||||
|
||||
OopClosure* _oc;
|
||||
double _closure_app_seconds;
|
||||
|
||||
void process_buffer () {
|
||||
|
||||
double start = os::elapsedTime();
|
||||
for (oop **curr = _buffer; curr < _buffer_curr; ++curr) {
|
||||
_oc->do_oop(*curr);
|
||||
for (StarTask* curr = _buffer; curr < _buffer_curr; ++curr) {
|
||||
if (curr->is_narrow()) {
|
||||
assert(UseCompressedOops, "Error");
|
||||
_oc->do_oop((narrowOop*)(*curr));
|
||||
} else {
|
||||
_oc->do_oop((oop*)(*curr));
|
||||
}
|
||||
}
|
||||
_buffer_curr = _buffer;
|
||||
_closure_app_seconds += (os::elapsedTime() - start);
|
||||
}
|
||||
|
||||
public:
|
||||
virtual void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
virtual void do_oop(oop *p) {
|
||||
template <class T> inline void do_oop_work(T* p) {
|
||||
if (_buffer_curr == _buffer_top) {
|
||||
process_buffer();
|
||||
}
|
||||
|
||||
*_buffer_curr = p;
|
||||
StarTask new_ref(p);
|
||||
*_buffer_curr = new_ref;
|
||||
++_buffer_curr;
|
||||
}
|
||||
|
||||
public:
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop(oop* p) { do_oop_work(p); }
|
||||
|
||||
void done () {
|
||||
if (_buffer_curr > _buffer) {
|
||||
process_buffer();
|
||||
|
@ -88,18 +93,17 @@ public:
|
|||
class BufferingOopsInGenClosure: public OopsInGenClosure {
|
||||
BufferingOopClosure _boc;
|
||||
OopsInGenClosure* _oc;
|
||||
protected:
|
||||
template <class T> inline void do_oop_work(T* p) {
|
||||
assert(generation()->is_in_reserved((void*)p), "Must be in!");
|
||||
_boc.do_oop(p);
|
||||
}
|
||||
public:
|
||||
BufferingOopsInGenClosure(OopsInGenClosure *oc) :
|
||||
_boc(oc), _oc(oc) {}
|
||||
|
||||
virtual void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
|
||||
virtual void do_oop(oop* p) {
|
||||
assert(generation()->is_in_reserved(p), "Must be in!");
|
||||
_boc.do_oop(p);
|
||||
}
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop(oop* p) { do_oop_work(p); }
|
||||
|
||||
void done() {
|
||||
_boc.done();
|
||||
|
@ -130,9 +134,9 @@ private:
|
|||
BufferLength = 1024
|
||||
};
|
||||
|
||||
oop *_buffer[BufferLength];
|
||||
oop **_buffer_top;
|
||||
oop **_buffer_curr;
|
||||
StarTask _buffer[BufferLength];
|
||||
StarTask* _buffer_top;
|
||||
StarTask* _buffer_curr;
|
||||
|
||||
HeapRegion* _hr_buffer[BufferLength];
|
||||
HeapRegion** _hr_curr;
|
||||
|
@ -148,13 +152,18 @@ private:
|
|||
double start = os::elapsedTime();
|
||||
HeapRegion** hr_curr = _hr_buffer;
|
||||
HeapRegion* hr_prev = NULL;
|
||||
for (oop **curr = _buffer; curr < _buffer_curr; ++curr) {
|
||||
for (StarTask* curr = _buffer; curr < _buffer_curr; ++curr) {
|
||||
HeapRegion* region = *hr_curr;
|
||||
if (region != hr_prev) {
|
||||
_oc->set_region(region);
|
||||
hr_prev = region;
|
||||
}
|
||||
_oc->do_oop(*curr);
|
||||
if (curr->is_narrow()) {
|
||||
assert(UseCompressedOops, "Error");
|
||||
_oc->do_oop((narrowOop*)(*curr));
|
||||
} else {
|
||||
_oc->do_oop((oop*)(*curr));
|
||||
}
|
||||
++hr_curr;
|
||||
}
|
||||
_buffer_curr = _buffer;
|
||||
|
@ -163,17 +172,16 @@ private:
|
|||
}
|
||||
|
||||
public:
|
||||
virtual void do_oop(narrowOop *p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
|
||||
virtual void do_oop(oop *p) {
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
if (_buffer_curr == _buffer_top) {
|
||||
assert(_hr_curr > _hr_buffer, "_hr_curr should be consistent with _buffer_curr");
|
||||
process_buffer();
|
||||
}
|
||||
|
||||
*_buffer_curr = p;
|
||||
StarTask new_ref(p);
|
||||
*_buffer_curr = new_ref;
|
||||
++_buffer_curr;
|
||||
*_hr_curr = _from;
|
||||
++_hr_curr;
|
||||
|
|
|
@ -452,13 +452,10 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
|
|||
_regionStack.allocate(G1MarkRegionStackSize);
|
||||
|
||||
// Create & start a ConcurrentMark thread.
|
||||
if (G1ConcMark) {
|
||||
_cmThread = new ConcurrentMarkThread(this);
|
||||
assert(cmThread() != NULL, "CM Thread should have been created");
|
||||
assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
|
||||
} else {
|
||||
_cmThread = NULL;
|
||||
}
|
||||
|
||||
_g1h = G1CollectedHeap::heap();
|
||||
assert(CGC_lock != NULL, "Where's the CGC_lock?");
|
||||
assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency");
|
||||
|
@ -783,18 +780,18 @@ public:
|
|||
bool do_barrier) : _cm(cm), _g1h(g1h),
|
||||
_do_barrier(do_barrier) { }
|
||||
|
||||
virtual void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
|
||||
virtual void do_oop(oop* p) {
|
||||
oop thisOop = *p;
|
||||
if (thisOop != NULL) {
|
||||
assert(thisOop->is_oop() || thisOop->mark() == NULL,
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
assert(obj->is_oop() || obj->mark() == NULL,
|
||||
"expected an oop, possibly with mark word displaced");
|
||||
HeapWord* addr = (HeapWord*)thisOop;
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_g1h->is_in_g1_reserved(addr)) {
|
||||
_cm->grayRoot(thisOop);
|
||||
_cm->grayRoot(obj);
|
||||
}
|
||||
}
|
||||
if (_do_barrier) {
|
||||
|
@ -850,16 +847,6 @@ void ConcurrentMark::checkpointRootsInitial() {
|
|||
double start = os::elapsedTime();
|
||||
GCOverheadReporter::recordSTWStart(start);
|
||||
|
||||
// If there has not been a GC[n-1] since last GC[n] cycle completed,
|
||||
// precede our marking with a collection of all
|
||||
// younger generations to keep floating garbage to a minimum.
|
||||
// YSR: we won't do this for now -- it's an optimization to be
|
||||
// done post-beta.
|
||||
|
||||
// YSR: ignoring weak refs for now; will do at bug fixing stage
|
||||
// EVM: assert(discoveredRefsAreClear());
|
||||
|
||||
|
||||
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
|
||||
g1p->record_concurrent_mark_init_start();
|
||||
checkpointRootsInitialPre();
|
||||
|
@ -1135,6 +1122,13 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
|||
return;
|
||||
}
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
gclog_or_tty->print(" VerifyDuringGC:(before)");
|
||||
Universe::heap()->prepare_for_verify();
|
||||
Universe::verify(true, false, true);
|
||||
}
|
||||
|
||||
G1CollectorPolicy* g1p = g1h->g1_policy();
|
||||
g1p->record_concurrent_mark_remark_start();
|
||||
|
||||
|
@ -1159,8 +1153,10 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
|||
JavaThread::satb_mark_queue_set().set_active_all_threads(false);
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
g1h->prepare_for_verify();
|
||||
g1h->verify(/* allow_dirty */ true,
|
||||
HandleMark hm; // handle scope
|
||||
gclog_or_tty->print(" VerifyDuringGC:(after)");
|
||||
Universe::heap()->prepare_for_verify();
|
||||
Universe::heap()->verify(/* allow_dirty */ true,
|
||||
/* silent */ false,
|
||||
/* use_prev_marking */ false);
|
||||
}
|
||||
|
@ -1658,6 +1654,15 @@ void ConcurrentMark::cleanup() {
|
|||
return;
|
||||
}
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
gclog_or_tty->print(" VerifyDuringGC:(before)");
|
||||
Universe::heap()->prepare_for_verify();
|
||||
Universe::verify(/* allow dirty */ true,
|
||||
/* silent */ false,
|
||||
/* prev marking */ true);
|
||||
}
|
||||
|
||||
_cleanup_co_tracker.disable();
|
||||
|
||||
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
|
||||
|
@ -1790,10 +1795,12 @@ void ConcurrentMark::cleanup() {
|
|||
g1h->increment_total_collections();
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
g1h->prepare_for_verify();
|
||||
g1h->verify(/* allow_dirty */ true,
|
||||
HandleMark hm; // handle scope
|
||||
gclog_or_tty->print(" VerifyDuringGC:(after)");
|
||||
Universe::heap()->prepare_for_verify();
|
||||
Universe::verify(/* allow dirty */ true,
|
||||
/* silent */ false,
|
||||
/* use_prev_marking */ true);
|
||||
/* prev marking */ true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1852,12 +1859,11 @@ class G1CMKeepAliveClosure: public OopClosure {
|
|||
_g1(g1), _cm(cm),
|
||||
_bitMap(bitMap) {}
|
||||
|
||||
void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
|
||||
void do_oop(oop* p) {
|
||||
oop thisOop = *p;
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
oop thisOop = oopDesc::load_decode_heap_oop(p);
|
||||
HeapWord* addr = (HeapWord*)thisOop;
|
||||
if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(thisOop)) {
|
||||
_bitMap->mark(addr);
|
||||
|
@ -2016,12 +2022,11 @@ public:
|
|||
ReachablePrinterOopClosure(CMBitMapRO* bitmap, outputStream* out) :
|
||||
_bitmap(bitmap), _g1h(G1CollectedHeap::heap()), _out(out) { }
|
||||
|
||||
void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
void do_oop( oop* p) { do_oop_work(p); }
|
||||
|
||||
void do_oop(oop* p) {
|
||||
oop obj = *p;
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
const char* str = NULL;
|
||||
const char* str2 = "";
|
||||
|
||||
|
@ -2163,6 +2168,7 @@ void ConcurrentMark::deal_with_reference(oop obj) {
|
|||
|
||||
|
||||
HeapWord* objAddr = (HeapWord*) obj;
|
||||
assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
|
||||
if (_g1h->is_in_g1_reserved(objAddr)) {
|
||||
tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" );
|
||||
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||
|
@ -2380,7 +2386,7 @@ class CSMarkOopClosure: public OopClosure {
|
|||
}
|
||||
}
|
||||
|
||||
bool drain() {
|
||||
template <class T> bool drain() {
|
||||
while (_ms_ind > 0) {
|
||||
oop obj = pop();
|
||||
assert(obj != NULL, "Since index was non-zero.");
|
||||
|
@ -2394,9 +2400,8 @@ class CSMarkOopClosure: public OopClosure {
|
|||
}
|
||||
// Now process this portion of this one.
|
||||
int lim = MIN2(next_arr_ind, len);
|
||||
assert(!UseCompressedOops, "This needs to be fixed");
|
||||
for (int j = arr_ind; j < lim; j++) {
|
||||
do_oop(aobj->obj_at_addr<oop>(j));
|
||||
do_oop(aobj->obj_at_addr<T>(j));
|
||||
}
|
||||
|
||||
} else {
|
||||
|
@ -2423,13 +2428,13 @@ public:
|
|||
FREE_C_HEAP_ARRAY(jint, _array_ind_stack);
|
||||
}
|
||||
|
||||
void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
|
||||
void do_oop(oop* p) {
|
||||
oop obj = *p;
|
||||
if (obj == NULL) return;
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (oopDesc::is_null(heap_oop)) return;
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (obj->is_forwarded()) {
|
||||
// If the object has already been forwarded, we have to make sure
|
||||
// that it's marked. So follow the forwarding pointer. Note that
|
||||
|
@ -2478,7 +2483,11 @@ public:
|
|||
oop obj = oop(addr);
|
||||
if (!obj->is_forwarded()) {
|
||||
if (!_oop_cl.push(obj)) return false;
|
||||
if (!_oop_cl.drain()) return false;
|
||||
if (UseCompressedOops) {
|
||||
if (!_oop_cl.drain<narrowOop>()) return false;
|
||||
} else {
|
||||
if (!_oop_cl.drain<oop>()) return false;
|
||||
}
|
||||
}
|
||||
// Otherwise...
|
||||
return true;
|
||||
|
@ -2636,9 +2645,6 @@ void ConcurrentMark::disable_co_trackers() {
|
|||
|
||||
// abandon current marking iteration due to a Full GC
|
||||
void ConcurrentMark::abort() {
|
||||
// If we're not marking, nothing to do.
|
||||
if (!G1ConcMark) return;
|
||||
|
||||
// Clear all marks to force marking thread to do nothing
|
||||
_nextMarkBitMap->clearAll();
|
||||
// Empty mark stack
|
||||
|
@ -2814,14 +2820,14 @@ private:
|
|||
CMTask* _task;
|
||||
|
||||
public:
|
||||
void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
|
||||
void do_oop(oop* p) {
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant" );
|
||||
tmp_guarantee_CM( !_g1h->heap_region_containing((HeapWord*) p)->is_on_free_list(), "invariant" );
|
||||
|
||||
oop obj = *p;
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
if (_cm->verbose_high())
|
||||
gclog_or_tty->print_cr("[%d] we're looking at location "
|
||||
"*"PTR_FORMAT" = "PTR_FORMAT,
|
||||
|
@ -2967,6 +2973,7 @@ void CMTask::deal_with_reference(oop obj) {
|
|||
++_refs_reached;
|
||||
|
||||
HeapWord* objAddr = (HeapWord*) obj;
|
||||
assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
|
||||
if (_g1h->is_in_g1_reserved(objAddr)) {
|
||||
tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" );
|
||||
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||
|
@ -3030,6 +3037,7 @@ void CMTask::deal_with_reference(oop obj) {
|
|||
void CMTask::push(oop obj) {
|
||||
HeapWord* objAddr = (HeapWord*) obj;
|
||||
tmp_guarantee_CM( _g1h->is_in_g1_reserved(objAddr), "invariant" );
|
||||
tmp_guarantee_CM( !_g1h->heap_region_containing(objAddr)->is_on_free_list(), "invariant" );
|
||||
tmp_guarantee_CM( !_g1h->is_obj_ill(obj), "invariant" );
|
||||
tmp_guarantee_CM( _nextMarkBitMap->isMarked(objAddr), "invariant" );
|
||||
|
||||
|
@ -3275,6 +3283,8 @@ void CMTask::drain_local_queue(bool partially) {
|
|||
|
||||
tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) obj),
|
||||
"invariant" );
|
||||
tmp_guarantee_CM( !_g1h->heap_region_containing(obj)->is_on_free_list(),
|
||||
"invariant" );
|
||||
|
||||
scan_object(obj);
|
||||
|
||||
|
|
|
@ -763,6 +763,7 @@ private:
|
|||
CMBitMap* _nextMarkBitMap;
|
||||
// the task queue of this task
|
||||
CMTaskQueue* _task_queue;
|
||||
private:
|
||||
// the task queue set---needed for stealing
|
||||
CMTaskQueueSet* _task_queues;
|
||||
// indicates whether the task has been claimed---this is only for
|
||||
|
|
|
@ -424,7 +424,7 @@ G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
|
|||
while (n <= next_boundary) {
|
||||
q = n;
|
||||
oop obj = oop(q);
|
||||
if (obj->klass() == NULL) return q;
|
||||
if (obj->klass_or_null() == NULL) return q;
|
||||
n += obj->size();
|
||||
}
|
||||
assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
|
||||
|
@ -436,7 +436,7 @@ G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
|
|||
while (n <= next_boundary) {
|
||||
q = n;
|
||||
oop obj = oop(q);
|
||||
if (obj->klass() == NULL) return q;
|
||||
if (obj->klass_or_null() == NULL) return q;
|
||||
n += _sp->block_size(q);
|
||||
}
|
||||
assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
|
||||
|
|
|
@ -96,14 +96,14 @@ forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
|
|||
while (n <= addr) {
|
||||
q = n;
|
||||
oop obj = oop(q);
|
||||
if (obj->klass() == NULL) return q;
|
||||
if (obj->klass_or_null() == NULL) return q;
|
||||
n += obj->size();
|
||||
}
|
||||
} else {
|
||||
while (n <= addr) {
|
||||
q = n;
|
||||
oop obj = oop(q);
|
||||
if (obj->klass() == NULL) return q;
|
||||
if (obj->klass_or_null() == NULL) return q;
|
||||
n += _sp->block_size(q);
|
||||
}
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
|
|||
inline HeapWord*
|
||||
G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q,
|
||||
const void* addr) {
|
||||
if (oop(q)->klass() == NULL) return q;
|
||||
if (oop(q)->klass_or_null() == NULL) return q;
|
||||
HeapWord* n = q + _sp->block_size(q);
|
||||
// In the normal case, where the query "addr" is a card boundary, and the
|
||||
// offset table chunks are the same size as cards, the block starting at
|
||||
|
|
|
@ -1655,11 +1655,14 @@ void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent,
|
|||
// Computes the sum of the storage used by the various regions.
|
||||
|
||||
size_t G1CollectedHeap::used() const {
|
||||
assert(Heap_lock->owner() != NULL,
|
||||
"Should be owned on this thread's behalf.");
|
||||
// Temporarily, until 6859911 is fixed. XXX
|
||||
// assert(Heap_lock->owner() != NULL,
|
||||
// "Should be owned on this thread's behalf.");
|
||||
size_t result = _summary_bytes_used;
|
||||
if (_cur_alloc_region != NULL)
|
||||
result += _cur_alloc_region->used();
|
||||
// Read only once in case it is set to NULL concurrently
|
||||
HeapRegion* hr = _cur_alloc_region;
|
||||
if (hr != NULL)
|
||||
result += hr->used();
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -2133,12 +2136,12 @@ public:
|
|||
VerifyLivenessOopClosure(G1CollectedHeap* _g1h) {
|
||||
g1h = _g1h;
|
||||
}
|
||||
void do_oop(narrowOop *p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
void do_oop(oop *p) {
|
||||
oop obj = *p;
|
||||
assert(obj == NULL || !g1h->is_obj_dead(obj),
|
||||
void do_oop(narrowOop *p) { do_oop_work(p); }
|
||||
void do_oop( oop *p) { do_oop_work(p); }
|
||||
|
||||
template <class T> void do_oop_work(T *p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
guarantee(obj == NULL || !g1h->is_obj_dead(obj),
|
||||
"Dead object referenced by a not dead object");
|
||||
}
|
||||
};
|
||||
|
@ -2206,8 +2209,10 @@ public:
|
|||
// use_prev_marking == true -> use "prev" marking information,
|
||||
// use_prev_marking == false -> use "next" marking information
|
||||
VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking)
|
||||
: _allow_dirty(allow_dirty), _par(par),
|
||||
: _allow_dirty(allow_dirty),
|
||||
_par(par),
|
||||
_use_prev_marking(use_prev_marking) {}
|
||||
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
|
||||
"Should be unclaimed at verify points.");
|
||||
|
@ -2231,18 +2236,16 @@ public:
|
|||
// use_prev_marking == true -> use "prev" marking information,
|
||||
// use_prev_marking == false -> use "next" marking information
|
||||
VerifyRootsClosure(bool use_prev_marking) :
|
||||
_g1h(G1CollectedHeap::heap()), _failures(false),
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_failures(false),
|
||||
_use_prev_marking(use_prev_marking) { }
|
||||
|
||||
bool failures() { return _failures; }
|
||||
|
||||
void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
|
||||
void do_oop(oop* p) {
|
||||
oop obj = *p;
|
||||
if (obj != NULL) {
|
||||
template <class T> void do_oop_nv(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
|
||||
gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
|
||||
"points to dead obj "PTR_FORMAT, p, (void*) obj);
|
||||
|
@ -2251,6 +2254,9 @@ public:
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
void do_oop(oop* p) { do_oop_nv(p); }
|
||||
void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
||||
// This is the task used for parallel heap verification.
|
||||
|
@ -2267,7 +2273,8 @@ public:
|
|||
G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty,
|
||||
bool use_prev_marking) :
|
||||
AbstractGangTask("Parallel verify task"),
|
||||
_g1h(g1h), _allow_dirty(allow_dirty),
|
||||
_g1h(g1h),
|
||||
_allow_dirty(allow_dirty),
|
||||
_use_prev_marking(use_prev_marking) { }
|
||||
|
||||
void work(int worker_i) {
|
||||
|
@ -2479,14 +2486,12 @@ void G1CollectedHeap::do_collection_pause() {
|
|||
|
||||
void
|
||||
G1CollectedHeap::doConcurrentMark() {
|
||||
if (G1ConcMark) {
|
||||
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
|
||||
if (!_cmThread->in_progress()) {
|
||||
_cmThread->set_started();
|
||||
CGC_lock->notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class VerifyMarkedObjsClosure: public ObjectClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
|
@ -2561,9 +2566,11 @@ G1CollectedHeap::setup_surviving_young_words() {
|
|||
"Not enough space for young surv words summary.");
|
||||
}
|
||||
memset(_surviving_young_words, 0, array_length * sizeof(size_t));
|
||||
#ifdef ASSERT
|
||||
for (size_t i = 0; i < array_length; ++i) {
|
||||
guarantee( _surviving_young_words[i] == 0, "invariant" );
|
||||
assert( _surviving_young_words[i] == 0, "memset above" );
|
||||
}
|
||||
#endif // !ASSERT
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -2649,7 +2656,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
|||
COMPILER2_PRESENT(DerivedPointerTable::clear());
|
||||
|
||||
// We want to turn off ref discovery, if necessary, and turn it back on
|
||||
// on again later if we do.
|
||||
// on again later if we do. XXX Dubious: why is discovery disabled?
|
||||
bool was_enabled = ref_processor()->discovery_enabled();
|
||||
if (was_enabled) ref_processor()->disable_discovery();
|
||||
|
||||
|
@ -2662,9 +2669,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
|||
double start_time_sec = os::elapsedTime();
|
||||
GCOverheadReporter::recordSTWStart(start_time_sec);
|
||||
size_t start_used_bytes = used();
|
||||
if (!G1ConcMark) {
|
||||
do_sync_mark();
|
||||
}
|
||||
|
||||
g1_policy()->record_collection_pause_start(start_time_sec,
|
||||
start_used_bytes);
|
||||
|
@ -2775,6 +2779,13 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
|||
g1_policy()->should_initiate_conc_mark()) {
|
||||
concurrent_mark()->checkpointRootsInitialPost();
|
||||
set_marking_started();
|
||||
// CAUTION: after the doConcurrentMark() call below,
|
||||
// the concurrent marking thread(s) could be running
|
||||
// concurrently with us. Make sure that anything after
|
||||
// this point does not assume that we are the only GC thread
|
||||
// running. Note: of course, the actual marking work will
|
||||
// not start until the safepoint itself is released in
|
||||
// ConcurrentGCThread::safepoint_desynchronize().
|
||||
doConcurrentMark();
|
||||
}
|
||||
|
||||
|
@ -3123,9 +3134,7 @@ class G1KeepAliveClosure: public OopClosure {
|
|||
G1CollectedHeap* _g1;
|
||||
public:
|
||||
G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
|
||||
void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
|
||||
void do_oop( oop* p) {
|
||||
oop obj = *p;
|
||||
#ifdef G1_DEBUG
|
||||
|
@ -3138,7 +3147,6 @@ public:
|
|||
if (_g1->obj_in_cs(obj)) {
|
||||
assert( obj->is_forwarded(), "invariant" );
|
||||
*p = obj->forwardee();
|
||||
|
||||
#ifdef G1_DEBUG
|
||||
gclog_or_tty->print_cr(" in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT,
|
||||
(void*) obj, (void*) *p);
|
||||
|
@ -3155,12 +3163,12 @@ public:
|
|||
UpdateRSetImmediate(G1CollectedHeap* g1) :
|
||||
_g1(g1), _g1_rem_set(g1->g1_rem_set()) {}
|
||||
|
||||
void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
void do_oop(oop* p) {
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
assert(_from->is_in_reserved(p), "paranoia");
|
||||
if (*p != NULL && !_from->is_survivor()) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) {
|
||||
_g1_rem_set->par_write_ref(_from, p, 0);
|
||||
}
|
||||
}
|
||||
|
@ -3176,12 +3184,12 @@ public:
|
|||
UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
|
||||
_g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
|
||||
|
||||
void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
void do_oop(oop* p) {
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
assert(_from->is_in_reserved(p), "paranoia");
|
||||
if (!_from->is_in_reserved(*p) && !_from->is_survivor()) {
|
||||
if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
|
||||
!_from->is_survivor()) {
|
||||
size_t card_index = _ct_bs->index_for(p);
|
||||
if (_ct_bs->mark_card_deferred(card_index)) {
|
||||
_dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
|
||||
|
@ -3536,316 +3544,15 @@ void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
|
|||
fill_with_object(block, free_words);
|
||||
}
|
||||
|
||||
#define use_local_bitmaps 1
|
||||
#define verify_local_bitmaps 0
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
class GCLabBitMap;
|
||||
class GCLabBitMapClosure: public BitMapClosure {
|
||||
private:
|
||||
ConcurrentMark* _cm;
|
||||
GCLabBitMap* _bitmap;
|
||||
|
||||
public:
|
||||
GCLabBitMapClosure(ConcurrentMark* cm,
|
||||
GCLabBitMap* bitmap) {
|
||||
_cm = cm;
|
||||
_bitmap = bitmap;
|
||||
}
|
||||
|
||||
virtual bool do_bit(size_t offset);
|
||||
};
|
||||
|
||||
#endif // PRODUCT
|
||||
|
||||
#define oop_buffer_length 256
|
||||
|
||||
class GCLabBitMap: public BitMap {
|
||||
private:
|
||||
ConcurrentMark* _cm;
|
||||
|
||||
int _shifter;
|
||||
size_t _bitmap_word_covers_words;
|
||||
|
||||
// beginning of the heap
|
||||
HeapWord* _heap_start;
|
||||
|
||||
// this is the actual start of the GCLab
|
||||
HeapWord* _real_start_word;
|
||||
|
||||
// this is the actual end of the GCLab
|
||||
HeapWord* _real_end_word;
|
||||
|
||||
// this is the first word, possibly located before the actual start
|
||||
// of the GCLab, that corresponds to the first bit of the bitmap
|
||||
HeapWord* _start_word;
|
||||
|
||||
// size of a GCLab in words
|
||||
size_t _gclab_word_size;
|
||||
|
||||
static int shifter() {
|
||||
return MinObjAlignment - 1;
|
||||
}
|
||||
|
||||
// how many heap words does a single bitmap word corresponds to?
|
||||
static size_t bitmap_word_covers_words() {
|
||||
return BitsPerWord << shifter();
|
||||
}
|
||||
|
||||
static size_t gclab_word_size() {
|
||||
return G1ParallelGCAllocBufferSize / HeapWordSize;
|
||||
}
|
||||
|
||||
static size_t bitmap_size_in_bits() {
|
||||
size_t bits_in_bitmap = gclab_word_size() >> shifter();
|
||||
// We are going to ensure that the beginning of a word in this
|
||||
// bitmap also corresponds to the beginning of a word in the
|
||||
// global marking bitmap. To handle the case where a GCLab
|
||||
// starts from the middle of the bitmap, we need to add enough
|
||||
// space (i.e. up to a bitmap word) to ensure that we have
|
||||
// enough bits in the bitmap.
|
||||
return bits_in_bitmap + BitsPerWord - 1;
|
||||
}
|
||||
public:
|
||||
GCLabBitMap(HeapWord* heap_start)
|
||||
: BitMap(bitmap_size_in_bits()),
|
||||
_cm(G1CollectedHeap::heap()->concurrent_mark()),
|
||||
_shifter(shifter()),
|
||||
_bitmap_word_covers_words(bitmap_word_covers_words()),
|
||||
_heap_start(heap_start),
|
||||
_gclab_word_size(gclab_word_size()),
|
||||
_real_start_word(NULL),
|
||||
_real_end_word(NULL),
|
||||
_start_word(NULL)
|
||||
{
|
||||
guarantee( size_in_words() >= bitmap_size_in_words(),
|
||||
"just making sure");
|
||||
}
|
||||
|
||||
inline unsigned heapWordToOffset(HeapWord* addr) {
|
||||
unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter;
|
||||
assert(offset < size(), "offset should be within bounds");
|
||||
return offset;
|
||||
}
|
||||
|
||||
inline HeapWord* offsetToHeapWord(size_t offset) {
|
||||
HeapWord* addr = _start_word + (offset << _shifter);
|
||||
assert(_real_start_word <= addr && addr < _real_end_word, "invariant");
|
||||
return addr;
|
||||
}
|
||||
|
||||
bool fields_well_formed() {
|
||||
bool ret1 = (_real_start_word == NULL) &&
|
||||
(_real_end_word == NULL) &&
|
||||
(_start_word == NULL);
|
||||
if (ret1)
|
||||
return true;
|
||||
|
||||
bool ret2 = _real_start_word >= _start_word &&
|
||||
_start_word < _real_end_word &&
|
||||
(_real_start_word + _gclab_word_size) == _real_end_word &&
|
||||
(_start_word + _gclab_word_size + _bitmap_word_covers_words)
|
||||
> _real_end_word;
|
||||
return ret2;
|
||||
}
|
||||
|
||||
inline bool mark(HeapWord* addr) {
|
||||
guarantee(use_local_bitmaps, "invariant");
|
||||
assert(fields_well_formed(), "invariant");
|
||||
|
||||
if (addr >= _real_start_word && addr < _real_end_word) {
|
||||
assert(!isMarked(addr), "should not have already been marked");
|
||||
|
||||
// first mark it on the bitmap
|
||||
at_put(heapWordToOffset(addr), true);
|
||||
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
inline bool isMarked(HeapWord* addr) {
|
||||
guarantee(use_local_bitmaps, "invariant");
|
||||
assert(fields_well_formed(), "invariant");
|
||||
|
||||
return at(heapWordToOffset(addr));
|
||||
}
|
||||
|
||||
void set_buffer(HeapWord* start) {
|
||||
guarantee(use_local_bitmaps, "invariant");
|
||||
clear();
|
||||
|
||||
assert(start != NULL, "invariant");
|
||||
_real_start_word = start;
|
||||
_real_end_word = start + _gclab_word_size;
|
||||
|
||||
size_t diff =
|
||||
pointer_delta(start, _heap_start) % _bitmap_word_covers_words;
|
||||
_start_word = start - diff;
|
||||
|
||||
assert(fields_well_formed(), "invariant");
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void verify() {
|
||||
// verify that the marks have been propagated
|
||||
GCLabBitMapClosure cl(_cm, this);
|
||||
iterate(&cl);
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
void retire() {
|
||||
guarantee(use_local_bitmaps, "invariant");
|
||||
assert(fields_well_formed(), "invariant");
|
||||
|
||||
if (_start_word != NULL) {
|
||||
CMBitMap* mark_bitmap = _cm->nextMarkBitMap();
|
||||
|
||||
// this means that the bitmap was set up for the GCLab
|
||||
assert(_real_start_word != NULL && _real_end_word != NULL, "invariant");
|
||||
|
||||
mark_bitmap->mostly_disjoint_range_union(this,
|
||||
0, // always start from the start of the bitmap
|
||||
_start_word,
|
||||
size_in_words());
|
||||
_cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (use_local_bitmaps && verify_local_bitmaps)
|
||||
verify();
|
||||
#endif // PRODUCT
|
||||
} else {
|
||||
assert(_real_start_word == NULL && _real_end_word == NULL, "invariant");
|
||||
}
|
||||
}
|
||||
|
||||
static size_t bitmap_size_in_words() {
|
||||
return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord;
|
||||
}
|
||||
};
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
bool GCLabBitMapClosure::do_bit(size_t offset) {
|
||||
HeapWord* addr = _bitmap->offsetToHeapWord(offset);
|
||||
guarantee(_cm->isMarked(oop(addr)), "it should be!");
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif // PRODUCT
|
||||
|
||||
class G1ParGCAllocBuffer: public ParGCAllocBuffer {
|
||||
private:
|
||||
bool _retired;
|
||||
bool _during_marking;
|
||||
GCLabBitMap _bitmap;
|
||||
|
||||
public:
|
||||
G1ParGCAllocBuffer() :
|
||||
ParGCAllocBuffer(G1ParallelGCAllocBufferSize / HeapWordSize),
|
||||
_during_marking(G1CollectedHeap::heap()->mark_in_progress()),
|
||||
_bitmap(G1CollectedHeap::heap()->reserved_region().start()),
|
||||
_retired(false)
|
||||
{ }
|
||||
|
||||
inline bool mark(HeapWord* addr) {
|
||||
guarantee(use_local_bitmaps, "invariant");
|
||||
assert(_during_marking, "invariant");
|
||||
return _bitmap.mark(addr);
|
||||
}
|
||||
|
||||
inline void set_buf(HeapWord* buf) {
|
||||
if (use_local_bitmaps && _during_marking)
|
||||
_bitmap.set_buffer(buf);
|
||||
ParGCAllocBuffer::set_buf(buf);
|
||||
_retired = false;
|
||||
}
|
||||
|
||||
inline void retire(bool end_of_gc, bool retain) {
|
||||
if (_retired)
|
||||
return;
|
||||
if (use_local_bitmaps && _during_marking) {
|
||||
_bitmap.retire();
|
||||
}
|
||||
ParGCAllocBuffer::retire(end_of_gc, retain);
|
||||
_retired = true;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class G1ParScanThreadState : public StackObj {
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
RefToScanQueue* _refs;
|
||||
DirtyCardQueue _dcq;
|
||||
CardTableModRefBS* _ct_bs;
|
||||
G1RemSet* _g1_rem;
|
||||
|
||||
typedef GrowableArray<oop*> OverflowQueue;
|
||||
OverflowQueue* _overflowed_refs;
|
||||
|
||||
G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
|
||||
ageTable _age_table;
|
||||
|
||||
size_t _alloc_buffer_waste;
|
||||
size_t _undo_waste;
|
||||
|
||||
OopsInHeapRegionClosure* _evac_failure_cl;
|
||||
G1ParScanHeapEvacClosure* _evac_cl;
|
||||
G1ParScanPartialArrayClosure* _partial_scan_cl;
|
||||
|
||||
int _hash_seed;
|
||||
int _queue_num;
|
||||
|
||||
int _term_attempts;
|
||||
#if G1_DETAILED_STATS
|
||||
int _pushes, _pops, _steals, _steal_attempts;
|
||||
int _overflow_pushes;
|
||||
#endif
|
||||
|
||||
double _start;
|
||||
double _start_strong_roots;
|
||||
double _strong_roots_time;
|
||||
double _start_term;
|
||||
double _term_time;
|
||||
|
||||
// Map from young-age-index (0 == not young, 1 is youngest) to
|
||||
// surviving words. base is what we get back from the malloc call
|
||||
size_t* _surviving_young_words_base;
|
||||
// this points into the array, as we use the first few entries for padding
|
||||
size_t* _surviving_young_words;
|
||||
|
||||
#define PADDING_ELEM_NUM (64 / sizeof(size_t))
|
||||
|
||||
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
|
||||
|
||||
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
|
||||
|
||||
DirtyCardQueue& dirty_card_queue() { return _dcq; }
|
||||
CardTableModRefBS* ctbs() { return _ct_bs; }
|
||||
|
||||
void immediate_rs_update(HeapRegion* from, oop* p, int tid) {
|
||||
if (!from->is_survivor()) {
|
||||
_g1_rem->par_write_ref(from, p, tid);
|
||||
}
|
||||
}
|
||||
|
||||
void deferred_rs_update(HeapRegion* from, oop* p, int tid) {
|
||||
// If the new value of the field points to the same region or
|
||||
// is the to-space, we don't need to include it in the Rset updates.
|
||||
if (!from->is_in_reserved(*p) && !from->is_survivor()) {
|
||||
size_t card_index = ctbs()->index_for(p);
|
||||
// If the card hasn't been added to the buffer, do it.
|
||||
if (ctbs()->mark_card_deferred(card_index)) {
|
||||
dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
|
||||
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
|
||||
: _g1h(g1h),
|
||||
_refs(g1h->task_queue(queue_num)),
|
||||
_dcq(&g1h->dirty_card_queue_set()),
|
||||
|
@ -3882,268 +3589,21 @@ public:
|
|||
_start = os::elapsedTime();
|
||||
}
|
||||
|
||||
~G1ParScanThreadState() {
|
||||
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
|
||||
}
|
||||
|
||||
RefToScanQueue* refs() { return _refs; }
|
||||
OverflowQueue* overflowed_refs() { return _overflowed_refs; }
|
||||
ageTable* age_table() { return &_age_table; }
|
||||
|
||||
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
|
||||
return &_alloc_buffers[purpose];
|
||||
}
|
||||
|
||||
size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
|
||||
size_t undo_waste() { return _undo_waste; }
|
||||
|
||||
void push_on_queue(oop* ref) {
|
||||
assert(ref != NULL, "invariant");
|
||||
assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), "invariant");
|
||||
|
||||
if (!refs()->push(ref)) {
|
||||
overflowed_refs()->push(ref);
|
||||
IF_G1_DETAILED_STATS(note_overflow_push());
|
||||
} else {
|
||||
IF_G1_DETAILED_STATS(note_push());
|
||||
}
|
||||
}
|
||||
|
||||
void pop_from_queue(oop*& ref) {
|
||||
if (!refs()->pop_local(ref)) {
|
||||
ref = NULL;
|
||||
} else {
|
||||
assert(ref != NULL, "invariant");
|
||||
assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref),
|
||||
"invariant");
|
||||
|
||||
IF_G1_DETAILED_STATS(note_pop());
|
||||
}
|
||||
}
|
||||
|
||||
void pop_from_overflow_queue(oop*& ref) {
|
||||
ref = overflowed_refs()->pop();
|
||||
}
|
||||
|
||||
int refs_to_scan() { return refs()->size(); }
|
||||
int overflowed_refs_to_scan() { return overflowed_refs()->length(); }
|
||||
|
||||
void update_rs(HeapRegion* from, oop* p, int tid) {
|
||||
if (G1DeferredRSUpdate) {
|
||||
deferred_rs_update(from, p, tid);
|
||||
} else {
|
||||
immediate_rs_update(from, p, tid);
|
||||
}
|
||||
}
|
||||
|
||||
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
|
||||
|
||||
HeapWord* obj = NULL;
|
||||
if (word_sz * 100 <
|
||||
(size_t)(G1ParallelGCAllocBufferSize / HeapWordSize) *
|
||||
ParallelGCBufferWastePct) {
|
||||
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
|
||||
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
|
||||
alloc_buf->retire(false, false);
|
||||
|
||||
HeapWord* buf =
|
||||
_g1h->par_allocate_during_gc(purpose, G1ParallelGCAllocBufferSize / HeapWordSize);
|
||||
if (buf == NULL) return NULL; // Let caller handle allocation failure.
|
||||
// Otherwise.
|
||||
alloc_buf->set_buf(buf);
|
||||
|
||||
obj = alloc_buf->allocate(word_sz);
|
||||
assert(obj != NULL, "buffer was definitely big enough...");
|
||||
} else {
|
||||
obj = _g1h->par_allocate_during_gc(purpose, word_sz);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
|
||||
HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
|
||||
if (obj != NULL) return obj;
|
||||
return allocate_slow(purpose, word_sz);
|
||||
}
|
||||
|
||||
void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
|
||||
if (alloc_buffer(purpose)->contains(obj)) {
|
||||
guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1),
|
||||
"should contain whole object");
|
||||
alloc_buffer(purpose)->undo_allocation(obj, word_sz);
|
||||
} else {
|
||||
CollectedHeap::fill_with_object(obj, word_sz);
|
||||
add_to_undo_waste(word_sz);
|
||||
}
|
||||
}
|
||||
|
||||
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
|
||||
_evac_failure_cl = evac_failure_cl;
|
||||
}
|
||||
OopsInHeapRegionClosure* evac_failure_closure() {
|
||||
return _evac_failure_cl;
|
||||
}
|
||||
|
||||
void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
|
||||
_evac_cl = evac_cl;
|
||||
}
|
||||
|
||||
void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
|
||||
_partial_scan_cl = partial_scan_cl;
|
||||
}
|
||||
|
||||
int* hash_seed() { return &_hash_seed; }
|
||||
int queue_num() { return _queue_num; }
|
||||
|
||||
int term_attempts() { return _term_attempts; }
|
||||
void note_term_attempt() { _term_attempts++; }
|
||||
|
||||
#if G1_DETAILED_STATS
|
||||
int pushes() { return _pushes; }
|
||||
int pops() { return _pops; }
|
||||
int steals() { return _steals; }
|
||||
int steal_attempts() { return _steal_attempts; }
|
||||
int overflow_pushes() { return _overflow_pushes; }
|
||||
|
||||
void note_push() { _pushes++; }
|
||||
void note_pop() { _pops++; }
|
||||
void note_steal() { _steals++; }
|
||||
void note_steal_attempt() { _steal_attempts++; }
|
||||
void note_overflow_push() { _overflow_pushes++; }
|
||||
#endif
|
||||
|
||||
void start_strong_roots() {
|
||||
_start_strong_roots = os::elapsedTime();
|
||||
}
|
||||
void end_strong_roots() {
|
||||
_strong_roots_time += (os::elapsedTime() - _start_strong_roots);
|
||||
}
|
||||
double strong_roots_time() { return _strong_roots_time; }
|
||||
|
||||
void start_term_time() {
|
||||
note_term_attempt();
|
||||
_start_term = os::elapsedTime();
|
||||
}
|
||||
void end_term_time() {
|
||||
_term_time += (os::elapsedTime() - _start_term);
|
||||
}
|
||||
double term_time() { return _term_time; }
|
||||
|
||||
double elapsed() {
|
||||
return os::elapsedTime() - _start;
|
||||
}
|
||||
|
||||
size_t* surviving_young_words() {
|
||||
// We add on to hide entry 0 which accumulates surviving words for
|
||||
// age -1 regions (i.e. non-young ones)
|
||||
return _surviving_young_words;
|
||||
}
|
||||
|
||||
void retire_alloc_buffers() {
|
||||
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
||||
size_t waste = _alloc_buffers[ap].words_remaining();
|
||||
add_to_alloc_buffer_waste(waste);
|
||||
_alloc_buffers[ap].retire(true, false);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void deal_with_reference(oop* ref_to_scan) {
|
||||
if (has_partial_array_mask(ref_to_scan)) {
|
||||
_partial_scan_cl->do_oop_nv(ref_to_scan);
|
||||
} else {
|
||||
// Note: we can use "raw" versions of "region_containing" because
|
||||
// "obj_to_scan" is definitely in the heap, and is not in a
|
||||
// humongous region.
|
||||
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
|
||||
_evac_cl->set_region(r);
|
||||
_evac_cl->do_oop_nv(ref_to_scan);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
void trim_queue() {
|
||||
// I've replicated the loop twice, first to drain the overflow
|
||||
// queue, second to drain the task queue. This is better than
|
||||
// having a single loop, which checks both conditions and, inside
|
||||
// it, either pops the overflow queue or the task queue, as each
|
||||
// loop is tighter. Also, the decision to drain the overflow queue
|
||||
// first is not arbitrary, as the overflow queue is not visible
|
||||
// to the other workers, whereas the task queue is. So, we want to
|
||||
// drain the "invisible" entries first, while allowing the other
|
||||
// workers to potentially steal the "visible" entries.
|
||||
|
||||
while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
|
||||
while (overflowed_refs_to_scan() > 0) {
|
||||
oop *ref_to_scan = NULL;
|
||||
pop_from_overflow_queue(ref_to_scan);
|
||||
assert(ref_to_scan != NULL, "invariant");
|
||||
// We shouldn't have pushed it on the queue if it was not
|
||||
// pointing into the CSet.
|
||||
assert(ref_to_scan != NULL, "sanity");
|
||||
assert(has_partial_array_mask(ref_to_scan) ||
|
||||
_g1h->obj_in_cs(*ref_to_scan), "sanity");
|
||||
|
||||
deal_with_reference(ref_to_scan);
|
||||
}
|
||||
|
||||
while (refs_to_scan() > 0) {
|
||||
oop *ref_to_scan = NULL;
|
||||
pop_from_queue(ref_to_scan);
|
||||
|
||||
if (ref_to_scan != NULL) {
|
||||
// We shouldn't have pushed it on the queue if it was not
|
||||
// pointing into the CSet.
|
||||
assert(has_partial_array_mask(ref_to_scan) ||
|
||||
_g1h->obj_in_cs(*ref_to_scan), "sanity");
|
||||
|
||||
deal_with_reference(ref_to_scan);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||
_g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
|
||||
_par_scan_state(par_scan_state) { }
|
||||
|
||||
// This closure is applied to the fields of the objects that have just been copied.
|
||||
// Should probably be made inline and moved in g1OopClosures.inline.hpp.
|
||||
void G1ParScanClosure::do_oop_nv(oop* p) {
|
||||
oop obj = *p;
|
||||
|
||||
if (obj != NULL) {
|
||||
if (_g1->in_cset_fast_test(obj)) {
|
||||
// We're not going to even bother checking whether the object is
|
||||
// already forwarded or not, as this usually causes an immediate
|
||||
// stall. We'll try to prefetch the object (for write, given that
|
||||
// we might need to install the forwarding reference) and we'll
|
||||
// get back to it when pop it from the queue
|
||||
Prefetch::write(obj->mark_addr(), 0);
|
||||
Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
|
||||
|
||||
// slightly paranoid test; I'm trying to catch potential
|
||||
// problems before we go into push_on_queue to know where the
|
||||
// problem is coming from
|
||||
assert(obj == *p, "the value of *p should not have changed");
|
||||
_par_scan_state->push_on_queue(p);
|
||||
} else {
|
||||
_par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void G1ParCopyHelper::mark_forwardee(oop* p) {
|
||||
template <class T> void G1ParCopyHelper::mark_forwardee(T* p) {
|
||||
// This is called _after_ do_oop_work has been called, hence after
|
||||
// the object has been relocated to its new location and *p points
|
||||
// to its new location.
|
||||
|
||||
oop thisOop = *p;
|
||||
if (thisOop != NULL) {
|
||||
assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(thisOop)),
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop(heap_oop);
|
||||
assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)),
|
||||
"shouldn't still be in the CSet if evacuation didn't fail.");
|
||||
HeapWord* addr = (HeapWord*)thisOop;
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_g1->is_in_g1_reserved(addr))
|
||||
_cm->grayRoot(oop(addr));
|
||||
}
|
||||
|
@ -4226,7 +3686,8 @@ oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
|
|||
|
||||
if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
|
||||
arrayOop(old)->set_length(0);
|
||||
_par_scan_state->push_on_queue(set_partial_array_mask(old));
|
||||
oop* old_p = set_partial_array_mask(old);
|
||||
_par_scan_state->push_on_queue(old_p);
|
||||
} else {
|
||||
// No point in using the slower heap_region_containing() method,
|
||||
// given that we know obj is in the heap.
|
||||
|
@ -4240,11 +3701,11 @@ oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
|
|||
return obj;
|
||||
}
|
||||
|
||||
template<bool do_gen_barrier, G1Barrier barrier,
|
||||
bool do_mark_forwardee, bool skip_cset_test>
|
||||
void G1ParCopyClosure<do_gen_barrier, barrier,
|
||||
do_mark_forwardee, skip_cset_test>::do_oop_work(oop* p) {
|
||||
oop obj = *p;
|
||||
template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee, bool skip_cset_test>
|
||||
template <class T>
|
||||
void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee, skip_cset_test>
|
||||
::do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
assert(barrier != G1BarrierRS || obj != NULL,
|
||||
"Precondition: G1BarrierRS implies obj is nonNull");
|
||||
|
||||
|
@ -4261,9 +3722,10 @@ void G1ParCopyClosure<do_gen_barrier, barrier,
|
|||
"into CS.", p, (void*) obj);
|
||||
#endif
|
||||
if (obj->is_forwarded()) {
|
||||
*p = obj->forwardee();
|
||||
oopDesc::encode_store_heap_oop(p, obj->forwardee());
|
||||
} else {
|
||||
*p = copy_to_survivor_space(obj);
|
||||
oop copy_oop = copy_to_survivor_space(obj);
|
||||
oopDesc::encode_store_heap_oop(p, copy_oop);
|
||||
}
|
||||
// When scanning the RS, we only care about objs in CS.
|
||||
if (barrier == G1BarrierRS) {
|
||||
|
@ -4282,21 +3744,9 @@ void G1ParCopyClosure<do_gen_barrier, barrier,
|
|||
}
|
||||
|
||||
template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p);
|
||||
template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(narrowOop* p);
|
||||
|
||||
template<class T> void G1ParScanPartialArrayClosure::process_array_chunk(
|
||||
oop obj, int start, int end) {
|
||||
// process our set of indices (include header in first chunk)
|
||||
assert(start < end, "invariant");
|
||||
T* const base = (T*)objArrayOop(obj)->base();
|
||||
T* const start_addr = (start == 0) ? (T*) obj : base + start;
|
||||
T* const end_addr = base + end;
|
||||
MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
|
||||
_scanner.set_region(_g1->heap_region_containing(obj));
|
||||
obj->oop_iterate(&_scanner, mr);
|
||||
}
|
||||
|
||||
void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) {
|
||||
assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops");
|
||||
template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
|
||||
assert(has_partial_array_mask(p), "invariant");
|
||||
oop old = clear_partial_array_mask(p);
|
||||
assert(old->is_objArray(), "must be obj array");
|
||||
|
@ -4316,19 +3766,19 @@ void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) {
|
|||
end = start + ParGCArrayScanChunk;
|
||||
arrayOop(old)->set_length(end);
|
||||
// Push remainder.
|
||||
_par_scan_state->push_on_queue(set_partial_array_mask(old));
|
||||
oop* old_p = set_partial_array_mask(old);
|
||||
assert(arrayOop(old)->length() < obj->length(), "Empty push?");
|
||||
_par_scan_state->push_on_queue(old_p);
|
||||
} else {
|
||||
// Restore length so that the heap remains parsable in
|
||||
// case of evacuation failure.
|
||||
arrayOop(old)->set_length(end);
|
||||
}
|
||||
|
||||
_scanner.set_region(_g1->heap_region_containing_raw(obj));
|
||||
// process our set of indices (include header in first chunk)
|
||||
process_array_chunk<oop>(obj, start, end);
|
||||
obj->oop_iterate_range(&_scanner, start, end);
|
||||
}
|
||||
|
||||
int G1ScanAndBalanceClosure::_nq = 0;
|
||||
|
||||
class G1ParEvacuateFollowersClosure : public VoidClosure {
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
|
@ -4351,21 +3801,28 @@ public:
|
|||
void do_void() {
|
||||
G1ParScanThreadState* pss = par_scan_state();
|
||||
while (true) {
|
||||
oop* ref_to_scan;
|
||||
pss->trim_queue();
|
||||
IF_G1_DETAILED_STATS(pss->note_steal_attempt());
|
||||
if (queues()->steal(pss->queue_num(),
|
||||
pss->hash_seed(),
|
||||
ref_to_scan)) {
|
||||
|
||||
StarTask stolen_task;
|
||||
if (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
|
||||
IF_G1_DETAILED_STATS(pss->note_steal());
|
||||
|
||||
// slightly paranoid tests; I'm trying to catch potential
|
||||
// problems before we go into push_on_queue to know where the
|
||||
// problem is coming from
|
||||
assert(ref_to_scan != NULL, "invariant");
|
||||
assert(has_partial_array_mask(ref_to_scan) ||
|
||||
_g1h->obj_in_cs(*ref_to_scan), "invariant");
|
||||
pss->push_on_queue(ref_to_scan);
|
||||
assert((oop*)stolen_task != NULL, "Error");
|
||||
if (stolen_task.is_narrow()) {
|
||||
assert(UseCompressedOops, "Error");
|
||||
narrowOop* p = (narrowOop*) stolen_task;
|
||||
assert(has_partial_array_mask(p) ||
|
||||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "Error");
|
||||
pss->push_on_queue(p);
|
||||
} else {
|
||||
oop* p = (oop*) stolen_task;
|
||||
assert(has_partial_array_mask(p) || _g1h->obj_in_cs(*p), "Error");
|
||||
pss->push_on_queue(p);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
pss->start_term_time();
|
||||
|
@ -4382,6 +3839,7 @@ protected:
|
|||
G1CollectedHeap* _g1h;
|
||||
RefToScanQueueSet *_queues;
|
||||
ParallelTaskTerminator _terminator;
|
||||
int _n_workers;
|
||||
|
||||
Mutex _stats_lock;
|
||||
Mutex* stats_lock() { return &_stats_lock; }
|
||||
|
@ -4397,7 +3855,8 @@ public:
|
|||
_g1h(g1h),
|
||||
_queues(task_queues),
|
||||
_terminator(workers, _queues),
|
||||
_stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
|
||||
_stats_lock(Mutex::leaf, "parallel G1 stats lock", true),
|
||||
_n_workers(workers)
|
||||
{}
|
||||
|
||||
RefToScanQueueSet* queues() { return _queues; }
|
||||
|
@ -4407,6 +3866,7 @@ public:
|
|||
}
|
||||
|
||||
void work(int i) {
|
||||
if (i >= _n_workers) return; // no work needed this round
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
|
||||
|
@ -4504,23 +3964,6 @@ public:
|
|||
|
||||
// *** Common G1 Evacuation Stuff
|
||||
|
||||
class G1CountClosure: public OopsInHeapRegionClosure {
|
||||
public:
|
||||
int n;
|
||||
G1CountClosure() : n(0) {}
|
||||
void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
void do_oop(oop* p) {
|
||||
oop obj = *p;
|
||||
assert(obj != NULL && G1CollectedHeap::heap()->obj_in_cs(obj),
|
||||
"Rem set closure called on non-rem-set pointer.");
|
||||
n++;
|
||||
}
|
||||
};
|
||||
|
||||
static G1CountClosure count_closure;
|
||||
|
||||
void
|
||||
G1CollectedHeap::
|
||||
g1_process_strong_roots(bool collecting_perm_gen,
|
||||
|
@ -5570,8 +5013,3 @@ bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
|
|||
void G1CollectedHeap::g1_unimplemented() {
|
||||
// Unimplemented();
|
||||
}
|
||||
|
||||
|
||||
// Local Variables: ***
|
||||
// c-indentation-style: gnu ***
|
||||
// End: ***
|
||||
|
|
|
@ -56,8 +56,8 @@ class ConcurrentZFThread;
|
|||
# define IF_G1_DETAILED_STATS(code)
|
||||
#endif
|
||||
|
||||
typedef GenericTaskQueue<oop*> RefToScanQueue;
|
||||
typedef GenericTaskQueueSet<oop*> RefToScanQueueSet;
|
||||
typedef GenericTaskQueue<StarTask> RefToScanQueue;
|
||||
typedef GenericTaskQueueSet<StarTask> RefToScanQueueSet;
|
||||
|
||||
typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
|
||||
typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
|
||||
|
@ -1271,6 +1271,552 @@ public:
|
|||
|
||||
};
|
||||
|
||||
// Local Variables: ***
|
||||
// c-indentation-style: gnu ***
|
||||
// End: ***
|
||||
#define use_local_bitmaps 1
|
||||
#define verify_local_bitmaps 0
|
||||
#define oop_buffer_length 256
|
||||
|
||||
#ifndef PRODUCT
|
||||
class GCLabBitMap;
|
||||
class GCLabBitMapClosure: public BitMapClosure {
|
||||
private:
|
||||
ConcurrentMark* _cm;
|
||||
GCLabBitMap* _bitmap;
|
||||
|
||||
public:
|
||||
GCLabBitMapClosure(ConcurrentMark* cm,
|
||||
GCLabBitMap* bitmap) {
|
||||
_cm = cm;
|
||||
_bitmap = bitmap;
|
||||
}
|
||||
|
||||
virtual bool do_bit(size_t offset);
|
||||
};
|
||||
#endif // !PRODUCT
|
||||
|
||||
class GCLabBitMap: public BitMap {
|
||||
private:
|
||||
ConcurrentMark* _cm;
|
||||
|
||||
int _shifter;
|
||||
size_t _bitmap_word_covers_words;
|
||||
|
||||
// beginning of the heap
|
||||
HeapWord* _heap_start;
|
||||
|
||||
// this is the actual start of the GCLab
|
||||
HeapWord* _real_start_word;
|
||||
|
||||
// this is the actual end of the GCLab
|
||||
HeapWord* _real_end_word;
|
||||
|
||||
// this is the first word, possibly located before the actual start
|
||||
// of the GCLab, that corresponds to the first bit of the bitmap
|
||||
HeapWord* _start_word;
|
||||
|
||||
// size of a GCLab in words
|
||||
size_t _gclab_word_size;
|
||||
|
||||
static int shifter() {
|
||||
return MinObjAlignment - 1;
|
||||
}
|
||||
|
||||
// how many heap words does a single bitmap word corresponds to?
|
||||
static size_t bitmap_word_covers_words() {
|
||||
return BitsPerWord << shifter();
|
||||
}
|
||||
|
||||
static size_t gclab_word_size() {
|
||||
return G1ParallelGCAllocBufferSize / HeapWordSize;
|
||||
}
|
||||
|
||||
static size_t bitmap_size_in_bits() {
|
||||
size_t bits_in_bitmap = gclab_word_size() >> shifter();
|
||||
// We are going to ensure that the beginning of a word in this
|
||||
// bitmap also corresponds to the beginning of a word in the
|
||||
// global marking bitmap. To handle the case where a GCLab
|
||||
// starts from the middle of the bitmap, we need to add enough
|
||||
// space (i.e. up to a bitmap word) to ensure that we have
|
||||
// enough bits in the bitmap.
|
||||
return bits_in_bitmap + BitsPerWord - 1;
|
||||
}
|
||||
public:
|
||||
GCLabBitMap(HeapWord* heap_start)
|
||||
: BitMap(bitmap_size_in_bits()),
|
||||
_cm(G1CollectedHeap::heap()->concurrent_mark()),
|
||||
_shifter(shifter()),
|
||||
_bitmap_word_covers_words(bitmap_word_covers_words()),
|
||||
_heap_start(heap_start),
|
||||
_gclab_word_size(gclab_word_size()),
|
||||
_real_start_word(NULL),
|
||||
_real_end_word(NULL),
|
||||
_start_word(NULL)
|
||||
{
|
||||
guarantee( size_in_words() >= bitmap_size_in_words(),
|
||||
"just making sure");
|
||||
}
|
||||
|
||||
inline unsigned heapWordToOffset(HeapWord* addr) {
|
||||
unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter;
|
||||
assert(offset < size(), "offset should be within bounds");
|
||||
return offset;
|
||||
}
|
||||
|
||||
inline HeapWord* offsetToHeapWord(size_t offset) {
|
||||
HeapWord* addr = _start_word + (offset << _shifter);
|
||||
assert(_real_start_word <= addr && addr < _real_end_word, "invariant");
|
||||
return addr;
|
||||
}
|
||||
|
||||
bool fields_well_formed() {
|
||||
bool ret1 = (_real_start_word == NULL) &&
|
||||
(_real_end_word == NULL) &&
|
||||
(_start_word == NULL);
|
||||
if (ret1)
|
||||
return true;
|
||||
|
||||
bool ret2 = _real_start_word >= _start_word &&
|
||||
_start_word < _real_end_word &&
|
||||
(_real_start_word + _gclab_word_size) == _real_end_word &&
|
||||
(_start_word + _gclab_word_size + _bitmap_word_covers_words)
|
||||
> _real_end_word;
|
||||
return ret2;
|
||||
}
|
||||
|
||||
inline bool mark(HeapWord* addr) {
|
||||
guarantee(use_local_bitmaps, "invariant");
|
||||
assert(fields_well_formed(), "invariant");
|
||||
|
||||
if (addr >= _real_start_word && addr < _real_end_word) {
|
||||
assert(!isMarked(addr), "should not have already been marked");
|
||||
|
||||
// first mark it on the bitmap
|
||||
at_put(heapWordToOffset(addr), true);
|
||||
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
inline bool isMarked(HeapWord* addr) {
|
||||
guarantee(use_local_bitmaps, "invariant");
|
||||
assert(fields_well_formed(), "invariant");
|
||||
|
||||
return at(heapWordToOffset(addr));
|
||||
}
|
||||
|
||||
void set_buffer(HeapWord* start) {
|
||||
guarantee(use_local_bitmaps, "invariant");
|
||||
clear();
|
||||
|
||||
assert(start != NULL, "invariant");
|
||||
_real_start_word = start;
|
||||
_real_end_word = start + _gclab_word_size;
|
||||
|
||||
size_t diff =
|
||||
pointer_delta(start, _heap_start) % _bitmap_word_covers_words;
|
||||
_start_word = start - diff;
|
||||
|
||||
assert(fields_well_formed(), "invariant");
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void verify() {
|
||||
// verify that the marks have been propagated
|
||||
GCLabBitMapClosure cl(_cm, this);
|
||||
iterate(&cl);
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
void retire() {
|
||||
guarantee(use_local_bitmaps, "invariant");
|
||||
assert(fields_well_formed(), "invariant");
|
||||
|
||||
if (_start_word != NULL) {
|
||||
CMBitMap* mark_bitmap = _cm->nextMarkBitMap();
|
||||
|
||||
// this means that the bitmap was set up for the GCLab
|
||||
assert(_real_start_word != NULL && _real_end_word != NULL, "invariant");
|
||||
|
||||
mark_bitmap->mostly_disjoint_range_union(this,
|
||||
0, // always start from the start of the bitmap
|
||||
_start_word,
|
||||
size_in_words());
|
||||
_cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (use_local_bitmaps && verify_local_bitmaps)
|
||||
verify();
|
||||
#endif // PRODUCT
|
||||
} else {
|
||||
assert(_real_start_word == NULL && _real_end_word == NULL, "invariant");
|
||||
}
|
||||
}
|
||||
|
||||
static size_t bitmap_size_in_words() {
|
||||
return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord;
|
||||
}
|
||||
};
|
||||
|
||||
class G1ParGCAllocBuffer: public ParGCAllocBuffer {
|
||||
private:
|
||||
bool _retired;
|
||||
bool _during_marking;
|
||||
GCLabBitMap _bitmap;
|
||||
|
||||
public:
|
||||
G1ParGCAllocBuffer() :
|
||||
ParGCAllocBuffer(G1ParallelGCAllocBufferSize / HeapWordSize),
|
||||
_during_marking(G1CollectedHeap::heap()->mark_in_progress()),
|
||||
_bitmap(G1CollectedHeap::heap()->reserved_region().start()),
|
||||
_retired(false)
|
||||
{ }
|
||||
|
||||
inline bool mark(HeapWord* addr) {
|
||||
guarantee(use_local_bitmaps, "invariant");
|
||||
assert(_during_marking, "invariant");
|
||||
return _bitmap.mark(addr);
|
||||
}
|
||||
|
||||
inline void set_buf(HeapWord* buf) {
|
||||
if (use_local_bitmaps && _during_marking)
|
||||
_bitmap.set_buffer(buf);
|
||||
ParGCAllocBuffer::set_buf(buf);
|
||||
_retired = false;
|
||||
}
|
||||
|
||||
inline void retire(bool end_of_gc, bool retain) {
|
||||
if (_retired)
|
||||
return;
|
||||
if (use_local_bitmaps && _during_marking) {
|
||||
_bitmap.retire();
|
||||
}
|
||||
ParGCAllocBuffer::retire(end_of_gc, retain);
|
||||
_retired = true;
|
||||
}
|
||||
};
|
||||
|
||||
class G1ParScanThreadState : public StackObj {
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
RefToScanQueue* _refs;
|
||||
DirtyCardQueue _dcq;
|
||||
CardTableModRefBS* _ct_bs;
|
||||
G1RemSet* _g1_rem;
|
||||
|
||||
typedef GrowableArray<StarTask> OverflowQueue;
|
||||
OverflowQueue* _overflowed_refs;
|
||||
|
||||
G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
|
||||
ageTable _age_table;
|
||||
|
||||
size_t _alloc_buffer_waste;
|
||||
size_t _undo_waste;
|
||||
|
||||
OopsInHeapRegionClosure* _evac_failure_cl;
|
||||
G1ParScanHeapEvacClosure* _evac_cl;
|
||||
G1ParScanPartialArrayClosure* _partial_scan_cl;
|
||||
|
||||
int _hash_seed;
|
||||
int _queue_num;
|
||||
|
||||
int _term_attempts;
|
||||
#if G1_DETAILED_STATS
|
||||
int _pushes, _pops, _steals, _steal_attempts;
|
||||
int _overflow_pushes;
|
||||
#endif
|
||||
|
||||
double _start;
|
||||
double _start_strong_roots;
|
||||
double _strong_roots_time;
|
||||
double _start_term;
|
||||
double _term_time;
|
||||
|
||||
// Map from young-age-index (0 == not young, 1 is youngest) to
|
||||
// surviving words. base is what we get back from the malloc call
|
||||
size_t* _surviving_young_words_base;
|
||||
// this points into the array, as we use the first few entries for padding
|
||||
size_t* _surviving_young_words;
|
||||
|
||||
#define PADDING_ELEM_NUM (64 / sizeof(size_t))
|
||||
|
||||
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
|
||||
|
||||
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
|
||||
|
||||
DirtyCardQueue& dirty_card_queue() { return _dcq; }
|
||||
CardTableModRefBS* ctbs() { return _ct_bs; }
|
||||
|
||||
template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
|
||||
if (!from->is_survivor()) {
|
||||
_g1_rem->par_write_ref(from, p, tid);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
|
||||
// If the new value of the field points to the same region or
|
||||
// is the to-space, we don't need to include it in the Rset updates.
|
||||
if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
|
||||
size_t card_index = ctbs()->index_for(p);
|
||||
// If the card hasn't been added to the buffer, do it.
|
||||
if (ctbs()->mark_card_deferred(card_index)) {
|
||||
dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num);
|
||||
|
||||
~G1ParScanThreadState() {
|
||||
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
|
||||
}
|
||||
|
||||
RefToScanQueue* refs() { return _refs; }
|
||||
OverflowQueue* overflowed_refs() { return _overflowed_refs; }
|
||||
ageTable* age_table() { return &_age_table; }
|
||||
|
||||
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
|
||||
return &_alloc_buffers[purpose];
|
||||
}
|
||||
|
||||
size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
|
||||
size_t undo_waste() { return _undo_waste; }
|
||||
|
||||
template <class T> void push_on_queue(T* ref) {
|
||||
assert(ref != NULL, "invariant");
|
||||
assert(has_partial_array_mask(ref) ||
|
||||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(ref)), "invariant");
|
||||
#ifdef ASSERT
|
||||
if (has_partial_array_mask(ref)) {
|
||||
oop p = clear_partial_array_mask(ref);
|
||||
// Verify that we point into the CS
|
||||
assert(_g1h->obj_in_cs(p), "Should be in CS");
|
||||
}
|
||||
#endif
|
||||
if (!refs()->push(ref)) {
|
||||
overflowed_refs()->push(ref);
|
||||
IF_G1_DETAILED_STATS(note_overflow_push());
|
||||
} else {
|
||||
IF_G1_DETAILED_STATS(note_push());
|
||||
}
|
||||
}
|
||||
|
||||
void pop_from_queue(StarTask& ref) {
|
||||
if (refs()->pop_local(ref)) {
|
||||
assert((oop*)ref != NULL, "pop_local() returned true");
|
||||
assert(UseCompressedOops || !ref.is_narrow(), "Error");
|
||||
assert(has_partial_array_mask((oop*)ref) ||
|
||||
_g1h->obj_in_cs(ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)ref)
|
||||
: oopDesc::load_decode_heap_oop((oop*)ref)),
|
||||
"invariant");
|
||||
IF_G1_DETAILED_STATS(note_pop());
|
||||
} else {
|
||||
StarTask null_task;
|
||||
ref = null_task;
|
||||
}
|
||||
}
|
||||
|
||||
void pop_from_overflow_queue(StarTask& ref) {
|
||||
StarTask new_ref = overflowed_refs()->pop();
|
||||
assert((oop*)new_ref != NULL, "pop() from a local non-empty stack");
|
||||
assert(UseCompressedOops || !new_ref.is_narrow(), "Error");
|
||||
assert(has_partial_array_mask((oop*)new_ref) ||
|
||||
_g1h->obj_in_cs(new_ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)new_ref)
|
||||
: oopDesc::load_decode_heap_oop((oop*)new_ref)),
|
||||
"invariant");
|
||||
ref = new_ref;
|
||||
}
|
||||
|
||||
int refs_to_scan() { return refs()->size(); }
|
||||
int overflowed_refs_to_scan() { return overflowed_refs()->length(); }
|
||||
|
||||
template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
|
||||
if (G1DeferredRSUpdate) {
|
||||
deferred_rs_update(from, p, tid);
|
||||
} else {
|
||||
immediate_rs_update(from, p, tid);
|
||||
}
|
||||
}
|
||||
|
||||
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
|
||||
|
||||
HeapWord* obj = NULL;
|
||||
if (word_sz * 100 <
|
||||
(size_t)(G1ParallelGCAllocBufferSize / HeapWordSize) *
|
||||
ParallelGCBufferWastePct) {
|
||||
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
|
||||
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
|
||||
alloc_buf->retire(false, false);
|
||||
|
||||
HeapWord* buf =
|
||||
_g1h->par_allocate_during_gc(purpose, G1ParallelGCAllocBufferSize / HeapWordSize);
|
||||
if (buf == NULL) return NULL; // Let caller handle allocation failure.
|
||||
// Otherwise.
|
||||
alloc_buf->set_buf(buf);
|
||||
|
||||
obj = alloc_buf->allocate(word_sz);
|
||||
assert(obj != NULL, "buffer was definitely big enough...");
|
||||
} else {
|
||||
obj = _g1h->par_allocate_during_gc(purpose, word_sz);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
|
||||
HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
|
||||
if (obj != NULL) return obj;
|
||||
return allocate_slow(purpose, word_sz);
|
||||
}
|
||||
|
||||
void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
|
||||
if (alloc_buffer(purpose)->contains(obj)) {
|
||||
assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
|
||||
"should contain whole object");
|
||||
alloc_buffer(purpose)->undo_allocation(obj, word_sz);
|
||||
} else {
|
||||
CollectedHeap::fill_with_object(obj, word_sz);
|
||||
add_to_undo_waste(word_sz);
|
||||
}
|
||||
}
|
||||
|
||||
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
|
||||
_evac_failure_cl = evac_failure_cl;
|
||||
}
|
||||
OopsInHeapRegionClosure* evac_failure_closure() {
|
||||
return _evac_failure_cl;
|
||||
}
|
||||
|
||||
void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
|
||||
_evac_cl = evac_cl;
|
||||
}
|
||||
|
||||
void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
|
||||
_partial_scan_cl = partial_scan_cl;
|
||||
}
|
||||
|
||||
int* hash_seed() { return &_hash_seed; }
|
||||
int queue_num() { return _queue_num; }
|
||||
|
||||
int term_attempts() { return _term_attempts; }
|
||||
void note_term_attempt() { _term_attempts++; }
|
||||
|
||||
#if G1_DETAILED_STATS
|
||||
int pushes() { return _pushes; }
|
||||
int pops() { return _pops; }
|
||||
int steals() { return _steals; }
|
||||
int steal_attempts() { return _steal_attempts; }
|
||||
int overflow_pushes() { return _overflow_pushes; }
|
||||
|
||||
void note_push() { _pushes++; }
|
||||
void note_pop() { _pops++; }
|
||||
void note_steal() { _steals++; }
|
||||
void note_steal_attempt() { _steal_attempts++; }
|
||||
void note_overflow_push() { _overflow_pushes++; }
|
||||
#endif
|
||||
|
||||
void start_strong_roots() {
|
||||
_start_strong_roots = os::elapsedTime();
|
||||
}
|
||||
void end_strong_roots() {
|
||||
_strong_roots_time += (os::elapsedTime() - _start_strong_roots);
|
||||
}
|
||||
double strong_roots_time() { return _strong_roots_time; }
|
||||
|
||||
void start_term_time() {
|
||||
note_term_attempt();
|
||||
_start_term = os::elapsedTime();
|
||||
}
|
||||
void end_term_time() {
|
||||
_term_time += (os::elapsedTime() - _start_term);
|
||||
}
|
||||
double term_time() { return _term_time; }
|
||||
|
||||
double elapsed() {
|
||||
return os::elapsedTime() - _start;
|
||||
}
|
||||
|
||||
size_t* surviving_young_words() {
|
||||
// We add on to hide entry 0 which accumulates surviving words for
|
||||
// age -1 regions (i.e. non-young ones)
|
||||
return _surviving_young_words;
|
||||
}
|
||||
|
||||
void retire_alloc_buffers() {
|
||||
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
||||
size_t waste = _alloc_buffers[ap].words_remaining();
|
||||
add_to_alloc_buffer_waste(waste);
|
||||
_alloc_buffers[ap].retire(true, false);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
template <class T> void deal_with_reference(T* ref_to_scan) {
|
||||
if (has_partial_array_mask(ref_to_scan)) {
|
||||
_partial_scan_cl->do_oop_nv(ref_to_scan);
|
||||
} else {
|
||||
// Note: we can use "raw" versions of "region_containing" because
|
||||
// "obj_to_scan" is definitely in the heap, and is not in a
|
||||
// humongous region.
|
||||
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
|
||||
_evac_cl->set_region(r);
|
||||
_evac_cl->do_oop_nv(ref_to_scan);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
void trim_queue() {
|
||||
// I've replicated the loop twice, first to drain the overflow
|
||||
// queue, second to drain the task queue. This is better than
|
||||
// having a single loop, which checks both conditions and, inside
|
||||
// it, either pops the overflow queue or the task queue, as each
|
||||
// loop is tighter. Also, the decision to drain the overflow queue
|
||||
// first is not arbitrary, as the overflow queue is not visible
|
||||
// to the other workers, whereas the task queue is. So, we want to
|
||||
// drain the "invisible" entries first, while allowing the other
|
||||
// workers to potentially steal the "visible" entries.
|
||||
|
||||
while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
|
||||
while (overflowed_refs_to_scan() > 0) {
|
||||
StarTask ref_to_scan;
|
||||
assert((oop*)ref_to_scan == NULL, "Constructed above");
|
||||
pop_from_overflow_queue(ref_to_scan);
|
||||
// We shouldn't have pushed it on the queue if it was not
|
||||
// pointing into the CSet.
|
||||
assert((oop*)ref_to_scan != NULL, "Follows from inner loop invariant");
|
||||
if (ref_to_scan.is_narrow()) {
|
||||
assert(UseCompressedOops, "Error");
|
||||
narrowOop* p = (narrowOop*)ref_to_scan;
|
||||
assert(!has_partial_array_mask(p) &&
|
||||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
deal_with_reference(p);
|
||||
} else {
|
||||
oop* p = (oop*)ref_to_scan;
|
||||
assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) ||
|
||||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
deal_with_reference(p);
|
||||
}
|
||||
}
|
||||
|
||||
while (refs_to_scan() > 0) {
|
||||
StarTask ref_to_scan;
|
||||
assert((oop*)ref_to_scan == NULL, "Constructed above");
|
||||
pop_from_queue(ref_to_scan);
|
||||
if ((oop*)ref_to_scan != NULL) {
|
||||
if (ref_to_scan.is_narrow()) {
|
||||
assert(UseCompressedOops, "Error");
|
||||
narrowOop* p = (narrowOop*)ref_to_scan;
|
||||
assert(!has_partial_array_mask(p) &&
|
||||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
deal_with_reference(p);
|
||||
} else {
|
||||
oop* p = (oop*)ref_to_scan;
|
||||
assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) ||
|
||||
_g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "sanity");
|
||||
deal_with_reference(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
|
@ -293,10 +293,6 @@ void G1CollectorPolicy::init() {
|
|||
if (G1SteadyStateUsed < 50) {
|
||||
vm_exit_during_initialization("G1SteadyStateUsed must be at least 50%.");
|
||||
}
|
||||
if (UseConcMarkSweepGC) {
|
||||
vm_exit_during_initialization("-XX:+UseG1GC is incompatible with "
|
||||
"-XX:+UseConcMarkSweepGC.");
|
||||
}
|
||||
|
||||
initialize_gc_policy_counters();
|
||||
|
||||
|
|
|
@ -42,18 +42,6 @@ public:
|
|||
virtual void set_region(HeapRegion* from) { _from = from; }
|
||||
};
|
||||
|
||||
|
||||
class G1ScanAndBalanceClosure : public OopClosure {
|
||||
G1CollectedHeap* _g1;
|
||||
static int _nq;
|
||||
public:
|
||||
G1ScanAndBalanceClosure(G1CollectedHeap* g1) : _g1(g1) { }
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); }
|
||||
};
|
||||
|
||||
class G1ParClosureSuper : public OopsInHeapRegionClosure {
|
||||
protected:
|
||||
G1CollectedHeap* _g1;
|
||||
|
@ -69,34 +57,32 @@ class G1ParScanClosure : public G1ParClosureSuper {
|
|||
public:
|
||||
G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||
G1ParClosureSuper(g1, par_scan_state) { }
|
||||
void do_oop_nv(oop* p); // should be made inline
|
||||
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
|
||||
template <class T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
||||
#define G1_PARTIAL_ARRAY_MASK 1
|
||||
#define G1_PARTIAL_ARRAY_MASK 0x2
|
||||
|
||||
inline bool has_partial_array_mask(oop* ref) {
|
||||
return (intptr_t) ref & G1_PARTIAL_ARRAY_MASK;
|
||||
template <class T> inline bool has_partial_array_mask(T* ref) {
|
||||
return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
|
||||
}
|
||||
|
||||
inline oop* set_partial_array_mask(oop obj) {
|
||||
return (oop*) ((intptr_t) obj | G1_PARTIAL_ARRAY_MASK);
|
||||
template <class T> inline T* set_partial_array_mask(T obj) {
|
||||
assert(((uintptr_t)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
|
||||
return (T*) ((uintptr_t)obj | G1_PARTIAL_ARRAY_MASK);
|
||||
}
|
||||
|
||||
inline oop clear_partial_array_mask(oop* ref) {
|
||||
template <class T> inline oop clear_partial_array_mask(T* ref) {
|
||||
return oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
|
||||
}
|
||||
|
||||
class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
|
||||
G1ParScanClosure _scanner;
|
||||
template <class T> void process_array_chunk(oop obj, int start, int end);
|
||||
public:
|
||||
G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||
G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state) { }
|
||||
void do_oop_nv(oop* p);
|
||||
void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
|
||||
template <class T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
@ -105,7 +91,7 @@ public:
|
|||
class G1ParCopyHelper : public G1ParClosureSuper {
|
||||
G1ParScanClosure *_scanner;
|
||||
protected:
|
||||
void mark_forwardee(oop* p);
|
||||
template <class T> void mark_forwardee(T* p);
|
||||
oop copy_to_survivor_space(oop obj);
|
||||
public:
|
||||
G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
|
||||
|
@ -117,36 +103,35 @@ template<bool do_gen_barrier, G1Barrier barrier,
|
|||
bool do_mark_forwardee, bool skip_cset_test>
|
||||
class G1ParCopyClosure : public G1ParCopyHelper {
|
||||
G1ParScanClosure _scanner;
|
||||
void do_oop_work(oop* p);
|
||||
void do_oop_work(narrowOop* p) { guarantee(false, "NYI"); }
|
||||
template <class T> void do_oop_work(T* p);
|
||||
public:
|
||||
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||
_scanner(g1, par_scan_state), G1ParCopyHelper(g1, par_scan_state, &_scanner) { }
|
||||
inline void do_oop_nv(oop* p) {
|
||||
template <class T> void do_oop_nv(T* p) {
|
||||
do_oop_work(p);
|
||||
if (do_mark_forwardee)
|
||||
mark_forwardee(p);
|
||||
}
|
||||
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
||||
typedef G1ParCopyClosure<false, G1BarrierNone, false, false> G1ParScanExtRootClosure;
|
||||
typedef G1ParCopyClosure<true, G1BarrierNone, false, false> G1ParScanPermClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierRS, false, false> G1ParScanHeapRSClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierNone, true, false> G1ParScanAndMarkExtRootClosure;
|
||||
typedef G1ParCopyClosure<true, G1BarrierNone, true, false> G1ParScanAndMarkPermClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierRS, false, false> G1ParScanHeapRSClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierRS, true, false> G1ParScanAndMarkHeapRSClosure;
|
||||
// This is the only case when we set skip_cset_test. Basically, this
|
||||
// closure is (should?) only be called directly while we're draining
|
||||
// the overflow and task queues. In that case we know that the
|
||||
// reference in question points into the collection set, otherwise we
|
||||
// would not have pushed it on the queue.
|
||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
|
||||
// would not have pushed it on the queue. The following is defined in
|
||||
// g1_specialized_oop_closures.hpp.
|
||||
// typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
|
||||
// We need a separate closure to handle references during evacuation
|
||||
// failure processing, as it cannot asume that the reference already
|
||||
// points to the collection set (like G1ParScanHeapEvacClosure does).
|
||||
// failure processing, as we cannot asume that the reference already
|
||||
// points into the collection set (like G1ParScanHeapEvacClosure does).
|
||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false, false> G1ParScanHeapEvacFailureClosure;
|
||||
|
||||
class FilterIntoCSClosure: public OopClosure {
|
||||
|
@ -158,10 +143,9 @@ public:
|
|||
G1CollectedHeap* g1, OopClosure* oc) :
|
||||
_dcto_cl(dcto_cl), _g1(g1), _oc(oc)
|
||||
{}
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); }
|
||||
template <class T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
||||
bool do_header() { return false; }
|
||||
};
|
||||
|
@ -174,10 +158,9 @@ public:
|
|||
OopsInHeapRegionClosure* oc) :
|
||||
_g1(g1), _oc(oc)
|
||||
{}
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); }
|
||||
template <class T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
||||
bool do_header() { return false; }
|
||||
void set_region(HeapRegion* from) {
|
||||
|
@ -195,10 +178,9 @@ public:
|
|||
ConcurrentMark* cm)
|
||||
: _g1(g1), _oc(oc), _cm(cm) { }
|
||||
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); }
|
||||
template <class T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
||||
bool do_header() { return false; }
|
||||
void set_region(HeapRegion* from) {
|
||||
|
@ -213,10 +195,9 @@ class FilterOutOfRegionClosure: public OopClosure {
|
|||
int _out_of_region;
|
||||
public:
|
||||
FilterOutOfRegionClosure(HeapRegion* r, OopClosure* oc);
|
||||
inline void do_oop_nv(oop* p);
|
||||
inline void do_oop_nv(narrowOop* p) { guarantee(false, "NYI"); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p) { guarantee(false, "NYI"); }
|
||||
template <class T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
||||
bool do_header() { return false; }
|
||||
int out_of_region() { return _out_of_region; }
|
||||
|
|
|
@ -31,9 +31,10 @@
|
|||
// perf-critical inner loop.
|
||||
#define FILTERINTOCSCLOSURE_DOHISTOGRAMCOUNT 0
|
||||
|
||||
inline void FilterIntoCSClosure::do_oop_nv(oop* p) {
|
||||
oop obj = *p;
|
||||
if (obj != NULL && _g1->obj_in_cs(obj)) {
|
||||
template <class T> inline void FilterIntoCSClosure::do_oop_nv(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop) &&
|
||||
_g1->obj_in_cs(oopDesc::decode_heap_oop_not_null(heap_oop))) {
|
||||
_oc->do_oop(p);
|
||||
#if FILTERINTOCSCLOSURE_DOHISTOGRAMCOUNT
|
||||
_dcto_cl->incr_count();
|
||||
|
@ -41,44 +42,32 @@ inline void FilterIntoCSClosure::do_oop_nv(oop* p) {
|
|||
}
|
||||
}
|
||||
|
||||
inline void FilterIntoCSClosure::do_oop(oop* p)
|
||||
{
|
||||
do_oop_nv(p);
|
||||
}
|
||||
|
||||
#define FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT 0
|
||||
|
||||
inline void FilterOutOfRegionClosure::do_oop_nv(oop* p) {
|
||||
oop obj = *p;
|
||||
HeapWord* obj_hw = (HeapWord*)obj;
|
||||
if (obj_hw != NULL && (obj_hw < _r_bottom || obj_hw >= _r_end)) {
|
||||
template <class T> inline void FilterOutOfRegionClosure::do_oop_nv(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
HeapWord* obj_hw = (HeapWord*)oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (obj_hw < _r_bottom || obj_hw >= _r_end) {
|
||||
_oc->do_oop(p);
|
||||
#if FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT
|
||||
_out_of_region++;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
inline void FilterOutOfRegionClosure::do_oop(oop* p)
|
||||
{
|
||||
do_oop_nv(p);
|
||||
}
|
||||
|
||||
inline void FilterInHeapRegionAndIntoCSClosure::do_oop_nv(oop* p) {
|
||||
oop obj = *p;
|
||||
if (obj != NULL && _g1->obj_in_cs(obj))
|
||||
template <class T> inline void FilterInHeapRegionAndIntoCSClosure::do_oop_nv(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop) &&
|
||||
_g1->obj_in_cs(oopDesc::decode_heap_oop_not_null(heap_oop)))
|
||||
_oc->do_oop(p);
|
||||
}
|
||||
|
||||
inline void FilterInHeapRegionAndIntoCSClosure::do_oop(oop* p)
|
||||
{
|
||||
do_oop_nv(p);
|
||||
}
|
||||
|
||||
|
||||
inline void FilterAndMarkInHeapRegionAndIntoCSClosure::do_oop_nv(oop* p) {
|
||||
oop obj = *p;
|
||||
if (obj != NULL) {
|
||||
template <class T> inline void FilterAndMarkInHeapRegionAndIntoCSClosure::do_oop_nv(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
HeapRegion* hr = _g1->heap_region_containing((HeapWord*) obj);
|
||||
if (hr != NULL) {
|
||||
if (hr->in_collection_set())
|
||||
|
@ -89,24 +78,29 @@ inline void FilterAndMarkInHeapRegionAndIntoCSClosure::do_oop_nv(oop* p) {
|
|||
}
|
||||
}
|
||||
|
||||
inline void FilterAndMarkInHeapRegionAndIntoCSClosure::do_oop(oop* p)
|
||||
{
|
||||
do_oop_nv(p);
|
||||
}
|
||||
// This closure is applied to the fields of the objects that have just been copied.
|
||||
template <class T> inline void G1ParScanClosure::do_oop_nv(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
|
||||
inline void G1ScanAndBalanceClosure::do_oop_nv(oop* p) {
|
||||
RefToScanQueue* q;
|
||||
if (ParallelGCThreads > 0) {
|
||||
// Deal the work out equally.
|
||||
_nq = (_nq + 1) % ParallelGCThreads;
|
||||
q = _g1->task_queue(_nq);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (_g1->in_cset_fast_test(obj)) {
|
||||
// We're not going to even bother checking whether the object is
|
||||
// already forwarded or not, as this usually causes an immediate
|
||||
// stall. We'll try to prefetch the object (for write, given that
|
||||
// we might need to install the forwarding reference) and we'll
|
||||
// get back to it when pop it from the queue
|
||||
Prefetch::write(obj->mark_addr(), 0);
|
||||
Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
|
||||
|
||||
// slightly paranoid test; I'm trying to catch potential
|
||||
// problems before we go into push_on_queue to know where the
|
||||
// problem is coming from
|
||||
assert(obj == oopDesc::load_decode_heap_oop(p),
|
||||
"p should still be pointing to obj");
|
||||
_par_scan_state->push_on_queue(p);
|
||||
} else {
|
||||
q = _g1->task_queue(0);
|
||||
_par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
|
||||
}
|
||||
bool nooverflow = q->push(p);
|
||||
guarantee(nooverflow, "Overflow during poplularity region processing");
|
||||
}
|
||||
|
||||
inline void G1ScanAndBalanceClosure::do_oop(oop* p) {
|
||||
do_oop_nv(p);
|
||||
}
|
||||
|
|
|
@ -65,11 +65,10 @@ public:
|
|||
void set_region(HeapRegion* from) {
|
||||
_blk->set_region(from);
|
||||
}
|
||||
virtual void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
virtual void do_oop(oop* p) {
|
||||
oop obj = *p;
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
if (_g1->obj_in_cs(obj)) _blk->do_oop(p);
|
||||
}
|
||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
||||
|
@ -110,11 +109,10 @@ class VerifyRSCleanCardOopClosure: public OopClosure {
|
|||
public:
|
||||
VerifyRSCleanCardOopClosure(G1CollectedHeap* g1) : _g1(g1) {}
|
||||
|
||||
virtual void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
virtual void do_oop(oop* p) {
|
||||
oop obj = *p;
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
HeapRegion* to = _g1->heap_region_containing(obj);
|
||||
guarantee(to == NULL || !to->in_collection_set(),
|
||||
"Missed a rem set member.");
|
||||
|
@ -129,9 +127,9 @@ HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
|
|||
{
|
||||
_seq_task = new SubTasksDone(NumSeqTasks);
|
||||
guarantee(n_workers() > 0, "There should be some workers");
|
||||
_new_refs = NEW_C_HEAP_ARRAY(GrowableArray<oop*>*, n_workers());
|
||||
_new_refs = NEW_C_HEAP_ARRAY(GrowableArray<OopOrNarrowOopStar>*, n_workers());
|
||||
for (uint i = 0; i < n_workers(); i++) {
|
||||
_new_refs[i] = new (ResourceObj::C_HEAP) GrowableArray<oop*>(8192,true);
|
||||
_new_refs[i] = new (ResourceObj::C_HEAP) GrowableArray<OopOrNarrowOopStar>(8192,true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -140,7 +138,7 @@ HRInto_G1RemSet::~HRInto_G1RemSet() {
|
|||
for (uint i = 0; i < n_workers(); i++) {
|
||||
delete _new_refs[i];
|
||||
}
|
||||
FREE_C_HEAP_ARRAY(GrowableArray<oop*>*, _new_refs);
|
||||
FREE_C_HEAP_ARRAY(GrowableArray<OopOrNarrowOopStar>*, _new_refs);
|
||||
}
|
||||
|
||||
void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
|
||||
|
@ -428,15 +426,15 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
void
|
||||
HRInto_G1RemSet::scanNewRefsRS(OopsInHeapRegionClosure* oc,
|
||||
template <class T> void
|
||||
HRInto_G1RemSet::scanNewRefsRS_work(OopsInHeapRegionClosure* oc,
|
||||
int worker_i) {
|
||||
double scan_new_refs_start_sec = os::elapsedTime();
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set());
|
||||
for (int i = 0; i < _new_refs[worker_i]->length(); i++) {
|
||||
oop* p = _new_refs[worker_i]->at(i);
|
||||
oop obj = *p;
|
||||
T* p = (T*) _new_refs[worker_i]->at(i);
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
// *p was in the collection set when p was pushed on "_new_refs", but
|
||||
// another thread may have processed this location from an RS, so it
|
||||
// might not point into the CS any longer. If so, it's obviously been
|
||||
|
@ -549,11 +547,10 @@ class UpdateRSetOopsIntoCSImmediate : public OopClosure {
|
|||
G1CollectedHeap* _g1;
|
||||
public:
|
||||
UpdateRSetOopsIntoCSImmediate(G1CollectedHeap* g1) : _g1(g1) { }
|
||||
virtual void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
virtual void do_oop(oop* p) {
|
||||
HeapRegion* to = _g1->heap_region_containing(*p);
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
HeapRegion* to = _g1->heap_region_containing(oopDesc::load_decode_heap_oop(p));
|
||||
if (to->in_collection_set()) {
|
||||
to->rem_set()->add_reference(p, 0);
|
||||
}
|
||||
|
@ -567,11 +564,10 @@ class UpdateRSetOopsIntoCSDeferred : public OopClosure {
|
|||
public:
|
||||
UpdateRSetOopsIntoCSDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
|
||||
_g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) { }
|
||||
virtual void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
virtual void do_oop(oop* p) {
|
||||
oop obj = *p;
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
if (_g1->obj_in_cs(obj)) {
|
||||
size_t card_index = _ct_bs->index_for(p);
|
||||
if (_ct_bs->mark_card_deferred(card_index)) {
|
||||
|
@ -581,10 +577,10 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
void HRInto_G1RemSet::new_refs_iterate(OopClosure* cl) {
|
||||
template <class T> void HRInto_G1RemSet::new_refs_iterate_work(OopClosure* cl) {
|
||||
for (size_t i = 0; i < n_workers(); i++) {
|
||||
for (int j = 0; j < _new_refs[i]->length(); j++) {
|
||||
oop* p = _new_refs[i]->at(j);
|
||||
T* p = (T*) _new_refs[i]->at(j);
|
||||
cl->do_oop(p);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -62,10 +62,12 @@ public:
|
|||
// If "this" is of the given subtype, return "this", else "NULL".
|
||||
virtual HRInto_G1RemSet* as_HRInto_G1RemSet() { return NULL; }
|
||||
|
||||
// Record, if necessary, the fact that *p (where "p" is in region "from")
|
||||
// has changed to its new value.
|
||||
// Record, if necessary, the fact that *p (where "p" is in region "from",
|
||||
// and is, a fortiori, required to be non-NULL) has changed to its new value.
|
||||
virtual void write_ref(HeapRegion* from, oop* p) = 0;
|
||||
virtual void write_ref(HeapRegion* from, narrowOop* p) = 0;
|
||||
virtual void par_write_ref(HeapRegion* from, oop* p, int tid) = 0;
|
||||
virtual void par_write_ref(HeapRegion* from, narrowOop* p, int tid) = 0;
|
||||
|
||||
// Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
|
||||
// or card, respectively, such that a region or card with a corresponding
|
||||
|
@ -105,7 +107,9 @@ public:
|
|||
|
||||
// Nothing is necessary in the version below.
|
||||
void write_ref(HeapRegion* from, oop* p) {}
|
||||
void write_ref(HeapRegion* from, narrowOop* p) {}
|
||||
void par_write_ref(HeapRegion* from, oop* p, int tid) {}
|
||||
void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {}
|
||||
|
||||
void scrub(BitMap* region_bm, BitMap* card_bm) {}
|
||||
void scrub_par(BitMap* region_bm, BitMap* card_bm,
|
||||
|
@ -143,8 +147,19 @@ protected:
|
|||
// their references into the collection summarized in "_new_refs".
|
||||
bool _par_traversal_in_progress;
|
||||
void set_par_traversal(bool b) { _par_traversal_in_progress = b; }
|
||||
GrowableArray<oop*>** _new_refs;
|
||||
void new_refs_iterate(OopClosure* cl);
|
||||
GrowableArray<OopOrNarrowOopStar>** _new_refs;
|
||||
template <class T> void new_refs_iterate_work(OopClosure* cl);
|
||||
void new_refs_iterate(OopClosure* cl) {
|
||||
if (UseCompressedOops) {
|
||||
new_refs_iterate_work<narrowOop>(cl);
|
||||
} else {
|
||||
new_refs_iterate_work<oop>(cl);
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
template <class T> void write_ref_nv(HeapRegion* from, T* p);
|
||||
template <class T> void par_write_ref_nv(HeapRegion* from, T* p, int tid);
|
||||
|
||||
public:
|
||||
// This is called to reset dual hash tables after the gc pause
|
||||
|
@ -161,7 +176,14 @@ public:
|
|||
void prepare_for_oops_into_collection_set_do();
|
||||
void cleanup_after_oops_into_collection_set_do();
|
||||
void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
|
||||
void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i);
|
||||
template <class T> void scanNewRefsRS_work(OopsInHeapRegionClosure* oc, int worker_i);
|
||||
void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i) {
|
||||
if (UseCompressedOops) {
|
||||
scanNewRefsRS_work<narrowOop>(oc, worker_i);
|
||||
} else {
|
||||
scanNewRefsRS_work<oop>(oc, worker_i);
|
||||
}
|
||||
}
|
||||
void updateRS(int worker_i);
|
||||
HeapRegion* calculateStartRegion(int i);
|
||||
|
||||
|
@ -172,12 +194,22 @@ public:
|
|||
|
||||
// Record, if necessary, the fact that *p (where "p" is in region "from",
|
||||
// which is required to be non-NULL) has changed to a new non-NULL value.
|
||||
inline void write_ref(HeapRegion* from, oop* p);
|
||||
// The "_nv" version is the same; it exists just so that it is not virtual.
|
||||
inline void write_ref_nv(HeapRegion* from, oop* p);
|
||||
// [Below the virtual version calls a non-virtual protected
|
||||
// workhorse that is templatified for narrow vs wide oop.]
|
||||
inline void write_ref(HeapRegion* from, oop* p) {
|
||||
write_ref_nv(from, p);
|
||||
}
|
||||
inline void write_ref(HeapRegion* from, narrowOop* p) {
|
||||
write_ref_nv(from, p);
|
||||
}
|
||||
inline void par_write_ref(HeapRegion* from, oop* p, int tid) {
|
||||
par_write_ref_nv(from, p, tid);
|
||||
}
|
||||
inline void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {
|
||||
par_write_ref_nv(from, p, tid);
|
||||
}
|
||||
|
||||
inline bool self_forwarded(oop obj);
|
||||
inline void par_write_ref(HeapRegion* from, oop* p, int tid);
|
||||
bool self_forwarded(oop obj);
|
||||
|
||||
void scrub(BitMap* region_bm, BitMap* card_bm);
|
||||
void scrub_par(BitMap* region_bm, BitMap* card_bm,
|
||||
|
@ -208,6 +240,9 @@ class UpdateRSOopClosure: public OopClosure {
|
|||
HeapRegion* _from;
|
||||
HRInto_G1RemSet* _rs;
|
||||
int _worker_i;
|
||||
|
||||
template <class T> void do_oop_work(T* p);
|
||||
|
||||
public:
|
||||
UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) :
|
||||
_from(NULL), _rs(rs), _worker_i(worker_i) {
|
||||
|
@ -219,11 +254,10 @@ public:
|
|||
_from = from;
|
||||
}
|
||||
|
||||
virtual void do_oop(narrowOop* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop(oop* p) { do_oop_work(p); }
|
||||
|
||||
// Override: this closure is idempotent.
|
||||
// bool idempotent() { return true; }
|
||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
||||
};
|
||||
|
||||
|
|
|
@ -30,12 +30,8 @@ inline size_t G1RemSet::n_workers() {
|
|||
}
|
||||
}
|
||||
|
||||
inline void HRInto_G1RemSet::write_ref_nv(HeapRegion* from, oop* p) {
|
||||
par_write_ref(from, p, 0);
|
||||
}
|
||||
|
||||
inline void HRInto_G1RemSet::write_ref(HeapRegion* from, oop* p) {
|
||||
write_ref_nv(from, p);
|
||||
template <class T> inline void HRInto_G1RemSet::write_ref_nv(HeapRegion* from, T* p) {
|
||||
par_write_ref_nv(from, p, 0);
|
||||
}
|
||||
|
||||
inline bool HRInto_G1RemSet::self_forwarded(oop obj) {
|
||||
|
@ -43,8 +39,8 @@ inline bool HRInto_G1RemSet::self_forwarded(oop obj) {
|
|||
return result;
|
||||
}
|
||||
|
||||
inline void HRInto_G1RemSet::par_write_ref(HeapRegion* from, oop* p, int tid) {
|
||||
oop obj = *p;
|
||||
template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* from, T* p, int tid) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
#ifdef ASSERT
|
||||
// can't do because of races
|
||||
// assert(obj == NULL || obj->is_oop(), "expected an oop");
|
||||
|
@ -71,7 +67,7 @@ inline void HRInto_G1RemSet::par_write_ref(HeapRegion* from, oop* p, int tid) {
|
|||
// false during the evacuation failure handing.
|
||||
if (_par_traversal_in_progress &&
|
||||
to->in_collection_set() && !self_forwarded(obj)) {
|
||||
_new_refs[tid]->push(p);
|
||||
_new_refs[tid]->push((void*)p);
|
||||
// Deferred updates to the Cset are either discarded (in the normal case),
|
||||
// or processed (if an evacuation failure occurs) at the end
|
||||
// of the collection.
|
||||
|
@ -89,11 +85,7 @@ inline void HRInto_G1RemSet::par_write_ref(HeapRegion* from, oop* p, int tid) {
|
|||
}
|
||||
}
|
||||
|
||||
inline void UpdateRSOopClosure::do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
|
||||
inline void UpdateRSOopClosure::do_oop(oop* p) {
|
||||
template <class T> inline void UpdateRSOopClosure::do_oop_work(T* p) {
|
||||
assert(_from != NULL, "from region must be non-NULL");
|
||||
_rs->par_write_ref(_from, p, _worker_i);
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
|
|||
|
||||
|
||||
void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
|
||||
assert(pre_val->is_oop_or_null(true), "Error");
|
||||
if (!JavaThread::satb_mark_queue_set().active()) return;
|
||||
Thread* thr = Thread::current();
|
||||
if (thr->is_Java_thread()) {
|
||||
|
@ -46,31 +47,30 @@ void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
|
|||
}
|
||||
|
||||
// When we know the current java thread:
|
||||
void
|
||||
G1SATBCardTableModRefBS::write_ref_field_pre_static(void* field,
|
||||
oop newVal,
|
||||
template <class T> void
|
||||
G1SATBCardTableModRefBS::write_ref_field_pre_static(T* field,
|
||||
oop new_val,
|
||||
JavaThread* jt) {
|
||||
if (!JavaThread::satb_mark_queue_set().active()) return;
|
||||
assert(!UseCompressedOops, "Else will need to modify this to deal with narrowOop");
|
||||
oop preVal = *(oop*)field;
|
||||
if (preVal != NULL) {
|
||||
jt->satb_mark_queue().enqueue(preVal);
|
||||
T heap_oop = oopDesc::load_heap_oop(field);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop pre_val = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
assert(pre_val->is_oop(true /* ignore mark word */), "Error");
|
||||
jt->satb_mark_queue().enqueue(pre_val);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
G1SATBCardTableModRefBS::write_ref_array_pre(MemRegion mr) {
|
||||
template <class T> void
|
||||
G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) {
|
||||
if (!JavaThread::satb_mark_queue_set().active()) return;
|
||||
assert(!UseCompressedOops, "Else will need to modify this to deal with narrowOop");
|
||||
oop* elem_ptr = (oop*)mr.start();
|
||||
while ((HeapWord*)elem_ptr < mr.end()) {
|
||||
oop elem = *elem_ptr;
|
||||
if (elem != NULL) enqueue(elem);
|
||||
elem_ptr++;
|
||||
T* elem_ptr = dst;
|
||||
for (int i = 0; i < count; i++, elem_ptr++) {
|
||||
T heap_oop = oopDesc::load_heap_oop(elem_ptr);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
G1SATBCardTableLoggingModRefBS::
|
||||
G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
|
||||
|
|
|
@ -47,31 +47,41 @@ public:
|
|||
|
||||
// This notes that we don't need to access any BarrierSet data
|
||||
// structures, so this can be called from a static context.
|
||||
static void write_ref_field_pre_static(void* field, oop newVal) {
|
||||
assert(!UseCompressedOops, "Else needs to be templatized");
|
||||
oop preVal = *((oop*)field);
|
||||
if (preVal != NULL) {
|
||||
enqueue(preVal);
|
||||
template <class T> static void write_ref_field_pre_static(T* field, oop newVal) {
|
||||
T heap_oop = oopDesc::load_heap_oop(field);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
enqueue(oopDesc::decode_heap_oop(heap_oop));
|
||||
}
|
||||
}
|
||||
|
||||
// When we know the current java thread:
|
||||
static void write_ref_field_pre_static(void* field, oop newVal,
|
||||
template <class T> static void write_ref_field_pre_static(T* field, oop newVal,
|
||||
JavaThread* jt);
|
||||
|
||||
// We export this to make it available in cases where the static
|
||||
// type of the barrier set is known. Note that it is non-virtual.
|
||||
inline void inline_write_ref_field_pre(void* field, oop newVal) {
|
||||
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {
|
||||
write_ref_field_pre_static(field, newVal);
|
||||
}
|
||||
|
||||
// This is the more general virtual version.
|
||||
void write_ref_field_pre_work(void* field, oop new_val) {
|
||||
// These are the more general virtual versions.
|
||||
virtual void write_ref_field_pre_work(oop* field, oop new_val) {
|
||||
inline_write_ref_field_pre(field, new_val);
|
||||
}
|
||||
virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {
|
||||
inline_write_ref_field_pre(field, new_val);
|
||||
}
|
||||
virtual void write_ref_field_pre_work(void* field, oop new_val) {
|
||||
guarantee(false, "Not needed");
|
||||
}
|
||||
|
||||
virtual void write_ref_array_pre(MemRegion mr);
|
||||
|
||||
template <class T> void write_ref_array_pre_work(T* dst, int count);
|
||||
virtual void write_ref_array_pre(oop* dst, int count) {
|
||||
write_ref_array_pre_work(dst, count);
|
||||
}
|
||||
virtual void write_ref_array_pre(narrowOop* dst, int count) {
|
||||
write_ref_array_pre_work(dst, count);
|
||||
}
|
||||
};
|
||||
|
||||
// Adds card-table logging to the post-barrier.
|
||||
|
|
|
@ -80,9 +80,6 @@
|
|||
develop(bool, G1TraceConcurrentRefinement, false, \
|
||||
"Trace G1 concurrent refinement") \
|
||||
\
|
||||
develop(bool, G1ConcMark, true, \
|
||||
"If true, run concurrent marking for G1") \
|
||||
\
|
||||
product(intx, G1MarkStackSize, 2 * 1024 * 1024, \
|
||||
"Size of the mark stack for concurrent marking.") \
|
||||
\
|
||||
|
|
|
@ -37,14 +37,12 @@ template<bool do_gen_barrier, G1Barrier barrier,
|
|||
class G1ParCopyClosure;
|
||||
class G1ParScanClosure;
|
||||
|
||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false, true>
|
||||
G1ParScanHeapEvacClosure;
|
||||
typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
|
||||
|
||||
class FilterIntoCSClosure;
|
||||
class FilterOutOfRegionClosure;
|
||||
class FilterInHeapRegionAndIntoCSClosure;
|
||||
class FilterAndMarkInHeapRegionAndIntoCSClosure;
|
||||
class G1ScanAndBalanceClosure;
|
||||
|
||||
#ifdef FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES
|
||||
#error "FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES already defined."
|
||||
|
@ -56,8 +54,7 @@ class G1ScanAndBalanceClosure;
|
|||
f(FilterIntoCSClosure,_nv) \
|
||||
f(FilterOutOfRegionClosure,_nv) \
|
||||
f(FilterInHeapRegionAndIntoCSClosure,_nv) \
|
||||
f(FilterAndMarkInHeapRegionAndIntoCSClosure,_nv) \
|
||||
f(G1ScanAndBalanceClosure,_nv)
|
||||
f(FilterAndMarkInHeapRegionAndIntoCSClosure,_nv)
|
||||
|
||||
#ifdef FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES
|
||||
#error "FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES already defined."
|
||||
|
|
|
@ -66,16 +66,16 @@ public:
|
|||
bool failures() { return _failures; }
|
||||
int n_failures() { return _n_failures; }
|
||||
|
||||
virtual void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
|
||||
void do_oop(oop* p) {
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
assert(_containing_obj != NULL, "Precondition");
|
||||
assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
|
||||
"Precondition");
|
||||
oop obj = *p;
|
||||
if (obj != NULL) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
bool failed = false;
|
||||
if (!_g1h->is_in_closed_subset(obj) ||
|
||||
_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
|
||||
|
@ -106,8 +106,8 @@ public:
|
|||
}
|
||||
|
||||
if (!_g1h->full_collection()) {
|
||||
HeapRegion* from = _g1h->heap_region_containing(p);
|
||||
HeapRegion* to = _g1h->heap_region_containing(*p);
|
||||
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
|
||||
HeapRegion* to = _g1h->heap_region_containing(obj);
|
||||
if (from != NULL && to != NULL &&
|
||||
from != to &&
|
||||
!to->isHumongous()) {
|
||||
|
@ -534,13 +534,13 @@ HeapRegion::object_iterate_mem_careful(MemRegion mr,
|
|||
// Otherwise, find the obj that extends onto mr.start().
|
||||
|
||||
assert(cur <= mr.start()
|
||||
&& (oop(cur)->klass() == NULL ||
|
||||
&& (oop(cur)->klass_or_null() == NULL ||
|
||||
cur + oop(cur)->size() > mr.start()),
|
||||
"postcondition of block_start");
|
||||
oop obj;
|
||||
while (cur < mr.end()) {
|
||||
obj = oop(cur);
|
||||
if (obj->klass() == NULL) {
|
||||
if (obj->klass_or_null() == NULL) {
|
||||
// Ran into an unparseable point.
|
||||
return cur;
|
||||
} else if (!g1h->is_obj_dead(obj)) {
|
||||
|
@ -577,7 +577,7 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
|
|||
assert(cur <= mr.start(), "Postcondition");
|
||||
|
||||
while (cur <= mr.start()) {
|
||||
if (oop(cur)->klass() == NULL) {
|
||||
if (oop(cur)->klass_or_null() == NULL) {
|
||||
// Ran into an unparseable point.
|
||||
return cur;
|
||||
}
|
||||
|
@ -591,7 +591,7 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
|
|||
obj = oop(cur);
|
||||
// If we finish this loop...
|
||||
assert(cur <= mr.start()
|
||||
&& obj->klass() != NULL
|
||||
&& obj->klass_or_null() != NULL
|
||||
&& cur + obj->size() > mr.start(),
|
||||
"Loop postcondition");
|
||||
if (!g1h->is_obj_dead(obj)) {
|
||||
|
@ -601,7 +601,7 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
|
|||
HeapWord* next;
|
||||
while (cur < mr.end()) {
|
||||
obj = oop(cur);
|
||||
if (obj->klass() == NULL) {
|
||||
if (obj->klass_or_null() == NULL) {
|
||||
// Ran into an unparseable point.
|
||||
return cur;
|
||||
};
|
||||
|
@ -781,8 +781,13 @@ void G1OffsetTableContigSpace::set_saved_mark() {
|
|||
// will pick up the right saved_mark_word() as the high water mark
|
||||
// of the region. Either way, the behaviour will be correct.
|
||||
ContiguousSpace::set_saved_mark();
|
||||
OrderAccess::storestore();
|
||||
_gc_time_stamp = curr_gc_time_stamp;
|
||||
OrderAccess::fence();
|
||||
// The following fence is to force a flush of the writes above, but
|
||||
// is strictly not needed because when an allocating worker thread
|
||||
// calls set_saved_mark() it does so under the ParGCRareEvent_lock;
|
||||
// when the lock is released, the write will be flushed.
|
||||
// OrderAccess::fence();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -126,7 +126,7 @@ protected:
|
|||
}
|
||||
}
|
||||
|
||||
void add_reference_work(oop* from, bool par) {
|
||||
void add_reference_work(OopOrNarrowOopStar from, bool par) {
|
||||
// Must make this robust in case "from" is not in "_hr", because of
|
||||
// concurrency.
|
||||
|
||||
|
@ -173,11 +173,11 @@ public:
|
|||
_bm.clear();
|
||||
}
|
||||
|
||||
void add_reference(oop* from) {
|
||||
void add_reference(OopOrNarrowOopStar from) {
|
||||
add_reference_work(from, /*parallel*/ true);
|
||||
}
|
||||
|
||||
void seq_add_reference(oop* from) {
|
||||
void seq_add_reference(OopOrNarrowOopStar from) {
|
||||
add_reference_work(from, /*parallel*/ false);
|
||||
}
|
||||
|
||||
|
@ -220,7 +220,7 @@ public:
|
|||
}
|
||||
|
||||
// Requires "from" to be in "hr()".
|
||||
bool contains_reference(oop* from) const {
|
||||
bool contains_reference(OopOrNarrowOopStar from) const {
|
||||
assert(hr()->is_in_reserved(from), "Precondition.");
|
||||
size_t card_ind = pointer_delta(from, hr()->bottom(),
|
||||
CardTableModRefBS::card_size);
|
||||
|
@ -394,7 +394,7 @@ public:
|
|||
void set_next(PosParPRT* nxt) { _next = nxt; }
|
||||
PosParPRT** next_addr() { return &_next; }
|
||||
|
||||
void add_reference(oop* from, int tid) {
|
||||
void add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
// Expand if necessary.
|
||||
PerRegionTable** pt = par_tables();
|
||||
if (par_tables() == NULL && tid > 0 && hr()->is_gc_alloc_region()) {
|
||||
|
@ -447,7 +447,7 @@ public:
|
|||
return res;
|
||||
}
|
||||
|
||||
bool contains_reference(oop* from) const {
|
||||
bool contains_reference(OopOrNarrowOopStar from) const {
|
||||
if (PerRegionTable::contains_reference(from)) return true;
|
||||
if (_par_tables != NULL) {
|
||||
for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
|
||||
|
@ -564,12 +564,15 @@ void OtherRegionsTable::print_from_card_cache() {
|
|||
}
|
||||
#endif
|
||||
|
||||
void OtherRegionsTable::add_reference(oop* from, int tid) {
|
||||
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
size_t cur_hrs_ind = hr()->hrs_index();
|
||||
|
||||
#if HRRS_VERBOSE
|
||||
gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
|
||||
from, *from);
|
||||
from,
|
||||
UseCompressedOops
|
||||
? oopDesc::load_decode_heap_oop((narrowOop*)from)
|
||||
: oopDesc::load_decode_heap_oop((oop*)from));
|
||||
#endif
|
||||
|
||||
int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
|
||||
|
@ -1021,13 +1024,13 @@ bool OtherRegionsTable::del_single_region_table(size_t ind,
|
|||
}
|
||||
}
|
||||
|
||||
bool OtherRegionsTable::contains_reference(oop* from) const {
|
||||
bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
|
||||
// Cast away const in this case.
|
||||
MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
|
||||
return contains_reference_locked(from);
|
||||
}
|
||||
|
||||
bool OtherRegionsTable::contains_reference_locked(oop* from) const {
|
||||
bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
|
||||
HeapRegion* hr = _g1h->heap_region_containing_raw(from);
|
||||
if (hr == NULL) return false;
|
||||
RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
|
||||
|
@ -1288,7 +1291,7 @@ bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
|
|||
|
||||
|
||||
|
||||
oop** HeapRegionRemSet::_recorded_oops = NULL;
|
||||
OopOrNarrowOopStar* HeapRegionRemSet::_recorded_oops = NULL;
|
||||
HeapWord** HeapRegionRemSet::_recorded_cards = NULL;
|
||||
HeapRegion** HeapRegionRemSet::_recorded_regions = NULL;
|
||||
int HeapRegionRemSet::_n_recorded = 0;
|
||||
|
@ -1297,13 +1300,13 @@ HeapRegionRemSet::Event* HeapRegionRemSet::_recorded_events = NULL;
|
|||
int* HeapRegionRemSet::_recorded_event_index = NULL;
|
||||
int HeapRegionRemSet::_n_recorded_events = 0;
|
||||
|
||||
void HeapRegionRemSet::record(HeapRegion* hr, oop* f) {
|
||||
void HeapRegionRemSet::record(HeapRegion* hr, OopOrNarrowOopStar f) {
|
||||
if (_recorded_oops == NULL) {
|
||||
assert(_n_recorded == 0
|
||||
&& _recorded_cards == NULL
|
||||
&& _recorded_regions == NULL,
|
||||
"Inv");
|
||||
_recorded_oops = NEW_C_HEAP_ARRAY(oop*, MaxRecorded);
|
||||
_recorded_oops = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded);
|
||||
_recorded_cards = NEW_C_HEAP_ARRAY(HeapWord*, MaxRecorded);
|
||||
_recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*, MaxRecorded);
|
||||
}
|
||||
|
@ -1408,21 +1411,21 @@ void HeapRegionRemSet::test() {
|
|||
HeapRegionRemSet* hrrs = hr0->rem_set();
|
||||
|
||||
// Make three references from region 0x101...
|
||||
hrrs->add_reference((oop*)hr1_start);
|
||||
hrrs->add_reference((oop*)hr1_mid);
|
||||
hrrs->add_reference((oop*)hr1_last);
|
||||
hrrs->add_reference((OopOrNarrowOopStar)hr1_start);
|
||||
hrrs->add_reference((OopOrNarrowOopStar)hr1_mid);
|
||||
hrrs->add_reference((OopOrNarrowOopStar)hr1_last);
|
||||
|
||||
hrrs->add_reference((oop*)hr2_start);
|
||||
hrrs->add_reference((oop*)hr2_mid);
|
||||
hrrs->add_reference((oop*)hr2_last);
|
||||
hrrs->add_reference((OopOrNarrowOopStar)hr2_start);
|
||||
hrrs->add_reference((OopOrNarrowOopStar)hr2_mid);
|
||||
hrrs->add_reference((OopOrNarrowOopStar)hr2_last);
|
||||
|
||||
hrrs->add_reference((oop*)hr3_start);
|
||||
hrrs->add_reference((oop*)hr3_mid);
|
||||
hrrs->add_reference((oop*)hr3_last);
|
||||
hrrs->add_reference((OopOrNarrowOopStar)hr3_start);
|
||||
hrrs->add_reference((OopOrNarrowOopStar)hr3_mid);
|
||||
hrrs->add_reference((OopOrNarrowOopStar)hr3_last);
|
||||
|
||||
// Now cause a coarsening.
|
||||
hrrs->add_reference((oop*)hr4->bottom());
|
||||
hrrs->add_reference((oop*)hr5->bottom());
|
||||
hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom());
|
||||
hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
|
||||
|
||||
// Now, does iteration yield these three?
|
||||
HeapRegionRemSetIterator iter;
|
||||
|
|
|
@ -116,9 +116,9 @@ public:
|
|||
|
||||
// For now. Could "expand" some tables in the future, so that this made
|
||||
// sense.
|
||||
void add_reference(oop* from, int tid);
|
||||
void add_reference(OopOrNarrowOopStar from, int tid);
|
||||
|
||||
void add_reference(oop* from) {
|
||||
void add_reference(OopOrNarrowOopStar from) {
|
||||
return add_reference(from, 0);
|
||||
}
|
||||
|
||||
|
@ -140,8 +140,8 @@ public:
|
|||
static size_t static_mem_size();
|
||||
static size_t fl_mem_size();
|
||||
|
||||
bool contains_reference(oop* from) const;
|
||||
bool contains_reference_locked(oop* from) const;
|
||||
bool contains_reference(OopOrNarrowOopStar from) const;
|
||||
bool contains_reference_locked(OopOrNarrowOopStar from) const;
|
||||
|
||||
void clear();
|
||||
|
||||
|
@ -192,7 +192,7 @@ private:
|
|||
// Unused unless G1RecordHRRSOops is true.
|
||||
|
||||
static const int MaxRecorded = 1000000;
|
||||
static oop** _recorded_oops;
|
||||
static OopOrNarrowOopStar* _recorded_oops;
|
||||
static HeapWord** _recorded_cards;
|
||||
static HeapRegion** _recorded_regions;
|
||||
static int _n_recorded;
|
||||
|
@ -231,13 +231,13 @@ public:
|
|||
|
||||
/* Used in the sequential case. Returns "true" iff this addition causes
|
||||
the size limit to be reached. */
|
||||
void add_reference(oop* from) {
|
||||
void add_reference(OopOrNarrowOopStar from) {
|
||||
_other_regions.add_reference(from);
|
||||
}
|
||||
|
||||
/* Used in the parallel case. Returns "true" iff this addition causes
|
||||
the size limit to be reached. */
|
||||
void add_reference(oop* from, int tid) {
|
||||
void add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
_other_regions.add_reference(from, tid);
|
||||
}
|
||||
|
||||
|
@ -301,7 +301,7 @@ public:
|
|||
return OtherRegionsTable::fl_mem_size();
|
||||
}
|
||||
|
||||
bool contains_reference(oop* from) const {
|
||||
bool contains_reference(OopOrNarrowOopStar from) const {
|
||||
return _other_regions.contains_reference(from);
|
||||
}
|
||||
void print() const;
|
||||
|
@ -329,7 +329,7 @@ public:
|
|||
}
|
||||
#endif
|
||||
|
||||
static void record(HeapRegion* hr, oop* f);
|
||||
static void record(HeapRegion* hr, OopOrNarrowOopStar f);
|
||||
static void print_recorded();
|
||||
static void record_event(Event evnt);
|
||||
|
||||
|
|
|
@ -43,6 +43,18 @@ void ObjPtrQueue::apply_closure_to_buffer(ObjectClosure* cl,
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void ObjPtrQueue::verify_oops_in_buffer() {
|
||||
if (_buf == NULL) return;
|
||||
for (size_t i = _index; i < _sz; i += oopSize) {
|
||||
oop obj = (oop)_buf[byte_index_to_index((int)i)];
|
||||
assert(obj != NULL && obj->is_oop(true /* ignore mark word */),
|
||||
"Not an oop");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
|
||||
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
|
||||
#endif // _MSC_VER
|
||||
|
@ -66,6 +78,7 @@ void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
|
|||
|
||||
|
||||
void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) {
|
||||
DEBUG_ONLY(t->satb_mark_queue().verify_oops_in_buffer();)
|
||||
t->satb_mark_queue().handle_zero_index();
|
||||
}
|
||||
|
||||
|
@ -143,7 +156,7 @@ void SATBMarkQueueSet::abandon_partial_marking() {
|
|||
}
|
||||
_completed_buffers_tail = NULL;
|
||||
_n_completed_buffers = 0;
|
||||
debug_only(assert_completed_buffer_list_len_correct_locked());
|
||||
DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
|
||||
}
|
||||
while (buffers_to_delete != NULL) {
|
||||
CompletedBufferNode* nd = buffers_to_delete;
|
||||
|
|
|
@ -39,6 +39,7 @@ public:
|
|||
static void apply_closure_to_buffer(ObjectClosure* cl,
|
||||
void** buf, size_t index, size_t sz);
|
||||
|
||||
void verify_oops_in_buffer() NOT_DEBUG_RETURN;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
bufferingOopClosure.hpp genOopClosures.hpp
|
||||
bufferingOopClosure.hpp generation.hpp
|
||||
bufferingOopClosure.hpp os.hpp
|
||||
bufferingOopClosure.hpp taskqueue.hpp
|
||||
|
||||
cardTableRS.cpp concurrentMark.hpp
|
||||
cardTableRS.cpp g1SATBCardTableModRefBS.hpp
|
||||
|
@ -139,7 +140,7 @@ g1CollectedHeap.cpp concurrentZFThread.hpp
|
|||
g1CollectedHeap.cpp g1CollectedHeap.inline.hpp
|
||||
g1CollectedHeap.cpp g1CollectorPolicy.hpp
|
||||
g1CollectedHeap.cpp g1MarkSweep.hpp
|
||||
g1CollectedHeap.cpp g1RemSet.hpp
|
||||
g1CollectedHeap.cpp g1RemSet.inline.hpp
|
||||
g1CollectedHeap.cpp g1OopClosures.inline.hpp
|
||||
g1CollectedHeap.cpp genOopClosures.inline.hpp
|
||||
g1CollectedHeap.cpp gcLocker.inline.hpp
|
||||
|
@ -151,13 +152,14 @@ g1CollectedHeap.cpp icBuffer.hpp
|
|||
g1CollectedHeap.cpp isGCActiveMark.hpp
|
||||
g1CollectedHeap.cpp oop.inline.hpp
|
||||
g1CollectedHeap.cpp oop.pcgc.inline.hpp
|
||||
g1CollectedHeap.cpp parGCAllocBuffer.hpp
|
||||
g1CollectedHeap.cpp vm_operations_g1.hpp
|
||||
g1CollectedHeap.cpp vmThread.hpp
|
||||
|
||||
g1CollectedHeap.hpp barrierSet.hpp
|
||||
g1CollectedHeap.hpp g1RemSet.hpp
|
||||
g1CollectedHeap.hpp heapRegion.hpp
|
||||
g1CollectedHeap.hpp memRegion.hpp
|
||||
g1CollectedHeap.hpp parGCAllocBuffer.hpp
|
||||
g1CollectedHeap.hpp sharedHeap.hpp
|
||||
|
||||
g1CollectedHeap.inline.hpp concurrentMark.hpp
|
||||
|
@ -245,6 +247,7 @@ g1RemSet.cpp intHisto.hpp
|
|||
g1RemSet.cpp iterator.hpp
|
||||
g1RemSet.cpp oop.inline.hpp
|
||||
|
||||
g1RemSet.inline.hpp oop.inline.hpp
|
||||
g1RemSet.inline.hpp g1RemSet.hpp
|
||||
g1RemSet.inline.hpp heapRegionRemSet.hpp
|
||||
|
||||
|
@ -255,6 +258,7 @@ g1SATBCardTableModRefBS.cpp thread.hpp
|
|||
g1SATBCardTableModRefBS.cpp thread_<os_family>.inline.hpp
|
||||
g1SATBCardTableModRefBS.cpp satbQueue.hpp
|
||||
|
||||
g1SATBCardTableModRefBS.hpp oop.inline.hpp
|
||||
g1SATBCardTableModRefBS.hpp cardTableModRefBS.hpp
|
||||
g1SATBCardTableModRefBS.hpp memRegion.hpp
|
||||
|
||||
|
|
|
@ -31,8 +31,9 @@ void CardTableModRefBS::par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
|
|||
bool clear,
|
||||
int n_threads) {
|
||||
if (n_threads > 0) {
|
||||
assert(n_threads == (int)ParallelGCThreads, "# worker threads != # requested!");
|
||||
|
||||
assert((n_threads == 1 && ParallelGCThreads == 0) ||
|
||||
n_threads <= (int)ParallelGCThreads,
|
||||
"# worker threads != # requested!");
|
||||
// Make sure the LNC array is valid for the space.
|
||||
jbyte** lowest_non_clean;
|
||||
uintptr_t lowest_non_clean_base_chunk_index;
|
||||
|
|
|
@ -885,7 +885,7 @@ void ParallelScavengeHeap::print_tracing_info() const {
|
|||
}
|
||||
|
||||
|
||||
void ParallelScavengeHeap::verify(bool allow_dirty, bool silent) {
|
||||
void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
|
||||
// Why do we need the total_collections()-filter below?
|
||||
if (total_collections() > 0) {
|
||||
if (!silent) {
|
||||
|
|
|
@ -217,7 +217,7 @@ class ParallelScavengeHeap : public CollectedHeap {
|
|||
virtual void gc_threads_do(ThreadClosure* tc) const;
|
||||
virtual void print_tracing_info() const;
|
||||
|
||||
void verify(bool allow_dirty, bool silent);
|
||||
void verify(bool allow_dirty, bool silent, bool /* option */);
|
||||
|
||||
void print_heap_change(size_t prev_used);
|
||||
|
||||
|
|
|
@ -117,6 +117,7 @@ inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
|
|||
process_array_chunk(old);
|
||||
} else {
|
||||
if (p.is_narrow()) {
|
||||
assert(UseCompressedOops, "Error");
|
||||
PSScavenge::copy_and_push_safe_barrier(this, (narrowOop*)p);
|
||||
} else {
|
||||
PSScavenge::copy_and_push_safe_barrier(this, (oop*)p);
|
||||
|
|
|
@ -533,7 +533,7 @@ class CollectedHeap : public CHeapObj {
|
|||
virtual void print_tracing_info() const = 0;
|
||||
|
||||
// Heap verification
|
||||
virtual void verify(bool allow_dirty, bool silent) = 0;
|
||||
virtual void verify(bool allow_dirty, bool silent, bool option) = 0;
|
||||
|
||||
// Non product verification and debugging.
|
||||
#ifndef PRODUCT
|
||||
|
|
|
@ -554,7 +554,6 @@ ciEnv.cpp jvmtiExport.hpp
|
|||
ciEnv.cpp linkResolver.hpp
|
||||
ciEnv.cpp methodDataOop.hpp
|
||||
ciEnv.cpp objArrayKlass.hpp
|
||||
ciEnv.cpp oop.hpp
|
||||
ciEnv.cpp oop.inline.hpp
|
||||
ciEnv.cpp oop.inline2.hpp
|
||||
ciEnv.cpp oopFactory.hpp
|
||||
|
@ -785,7 +784,6 @@ ciObjectFactory.hpp growableArray.hpp
|
|||
ciSignature.cpp allocation.inline.hpp
|
||||
ciSignature.cpp ciSignature.hpp
|
||||
ciSignature.cpp ciUtilities.hpp
|
||||
ciSignature.cpp oop.hpp
|
||||
ciSignature.cpp oop.inline.hpp
|
||||
ciSignature.cpp signature.hpp
|
||||
|
||||
|
@ -950,7 +948,6 @@ classLoadingService.hpp perfData.hpp
|
|||
classify.cpp classify.hpp
|
||||
classify.cpp systemDictionary.hpp
|
||||
|
||||
classify.hpp oop.hpp
|
||||
classify.hpp oop.inline.hpp
|
||||
|
||||
codeBlob.cpp allocation.inline.hpp
|
||||
|
@ -1185,7 +1182,6 @@ compilerOracle.cpp handles.inline.hpp
|
|||
compilerOracle.cpp jniHandles.hpp
|
||||
compilerOracle.cpp klass.hpp
|
||||
compilerOracle.cpp methodOop.hpp
|
||||
compilerOracle.cpp oop.hpp
|
||||
compilerOracle.cpp oop.inline.hpp
|
||||
compilerOracle.cpp oopFactory.hpp
|
||||
compilerOracle.cpp resourceArea.hpp
|
||||
|
@ -1629,7 +1625,6 @@ frame.cpp methodDataOop.hpp
|
|||
frame.cpp methodOop.hpp
|
||||
frame.cpp monitorChunk.hpp
|
||||
frame.cpp nativeInst_<arch>.hpp
|
||||
frame.cpp oop.hpp
|
||||
frame.cpp oop.inline.hpp
|
||||
frame.cpp oop.inline2.hpp
|
||||
frame.cpp oopMapCache.hpp
|
||||
|
@ -1797,7 +1792,6 @@ generation.cpp genOopClosures.inline.hpp
|
|||
generation.cpp generation.hpp
|
||||
generation.cpp generation.inline.hpp
|
||||
generation.cpp java.hpp
|
||||
generation.cpp oop.hpp
|
||||
generation.cpp oop.inline.hpp
|
||||
generation.cpp spaceDecorator.hpp
|
||||
generation.cpp space.inline.hpp
|
||||
|
@ -2270,7 +2264,6 @@ java.cpp jvmtiExport.hpp
|
|||
java.cpp memprofiler.hpp
|
||||
java.cpp methodOop.hpp
|
||||
java.cpp objArrayOop.hpp
|
||||
java.cpp oop.hpp
|
||||
java.cpp oop.inline.hpp
|
||||
java.cpp oopFactory.hpp
|
||||
java.cpp sharedRuntime.hpp
|
||||
|
@ -2947,7 +2940,7 @@ mutex_<os_family>.inline.hpp thread_<os_family>.inline.hpp
|
|||
nativeInst_<arch>.cpp assembler_<arch>.inline.hpp
|
||||
nativeInst_<arch>.cpp handles.hpp
|
||||
nativeInst_<arch>.cpp nativeInst_<arch>.hpp
|
||||
nativeInst_<arch>.cpp oop.hpp
|
||||
nativeInst_<arch>.cpp oop.inline.hpp
|
||||
nativeInst_<arch>.cpp ostream.hpp
|
||||
nativeInst_<arch>.cpp resourceArea.hpp
|
||||
nativeInst_<arch>.cpp sharedRuntime.hpp
|
||||
|
@ -3842,7 +3835,7 @@ stackMapTable.hpp stackMapFrame.hpp
|
|||
stackValue.cpp debugInfo.hpp
|
||||
stackValue.cpp frame.inline.hpp
|
||||
stackValue.cpp handles.inline.hpp
|
||||
stackValue.cpp oop.hpp
|
||||
stackValue.cpp oop.inline.hpp
|
||||
stackValue.cpp stackValue.hpp
|
||||
|
||||
stackValue.hpp handles.hpp
|
||||
|
@ -4329,7 +4322,6 @@ typeArrayOop.hpp typeArrayKlass.hpp
|
|||
unhandledOops.cpp collectedHeap.hpp
|
||||
unhandledOops.cpp gcLocker.inline.hpp
|
||||
unhandledOops.cpp globalDefinitions.hpp
|
||||
unhandledOops.cpp oop.hpp
|
||||
unhandledOops.cpp oop.inline.hpp
|
||||
unhandledOops.cpp thread.hpp
|
||||
unhandledOops.cpp unhandledOops.hpp
|
||||
|
@ -4465,7 +4457,6 @@ vframe.cpp javaClasses.hpp
|
|||
vframe.cpp nmethod.hpp
|
||||
vframe.cpp objectMonitor.hpp
|
||||
vframe.cpp objectMonitor.inline.hpp
|
||||
vframe.cpp oop.hpp
|
||||
vframe.cpp oop.inline.hpp
|
||||
vframe.cpp oopMapCache.hpp
|
||||
vframe.cpp pcDesc.hpp
|
||||
|
@ -4577,7 +4568,6 @@ vmThread.cpp events.hpp
|
|||
vmThread.cpp interfaceSupport.hpp
|
||||
vmThread.cpp methodOop.hpp
|
||||
vmThread.cpp mutexLocker.hpp
|
||||
vmThread.cpp oop.hpp
|
||||
vmThread.cpp oop.inline.hpp
|
||||
vmThread.cpp os.hpp
|
||||
vmThread.cpp resourceArea.hpp
|
||||
|
|
|
@ -47,7 +47,7 @@ dump.cpp javaCalls.hpp
|
|||
dump.cpp javaClasses.hpp
|
||||
dump.cpp loaderConstraints.hpp
|
||||
dump.cpp methodDataOop.hpp
|
||||
dump.cpp oop.hpp
|
||||
dump.cpp oop.inline.hpp
|
||||
dump.cpp oopFactory.hpp
|
||||
dump.cpp resourceArea.hpp
|
||||
dump.cpp signature.hpp
|
||||
|
@ -237,7 +237,7 @@ serialize.cpp compactingPermGenGen.hpp
|
|||
serialize.cpp compiledICHolderOop.hpp
|
||||
serialize.cpp methodDataOop.hpp
|
||||
serialize.cpp objArrayOop.hpp
|
||||
serialize.cpp oop.hpp
|
||||
serialize.cpp oop.inline.hpp
|
||||
serialize.cpp symbolTable.hpp
|
||||
serialize.cpp systemDictionary.hpp
|
||||
|
||||
|
@ -295,7 +295,7 @@ vmStructs.cpp nmethod.hpp
|
|||
vmStructs.cpp objArrayKlass.hpp
|
||||
vmStructs.cpp objArrayKlassKlass.hpp
|
||||
vmStructs.cpp objArrayOop.hpp
|
||||
vmStructs.cpp oop.hpp
|
||||
vmStructs.cpp oop.inline.hpp
|
||||
vmStructs.cpp oopMap.hpp
|
||||
vmStructs.cpp pcDesc.hpp
|
||||
vmStructs.cpp perfMemory.hpp
|
||||
|
|
|
@ -25,12 +25,27 @@
|
|||
# include "incls/_precompiled.incl"
|
||||
# include "incls/_barrierSet.cpp.incl"
|
||||
|
||||
// count is in HeapWord's
|
||||
// count is number of array elements being written
|
||||
void BarrierSet::static_write_ref_array_pre(HeapWord* start, size_t count) {
|
||||
Universe::heap()->barrier_set()->write_ref_array_pre(MemRegion(start, start + count));
|
||||
assert(count <= (size_t)max_intx, "count too large");
|
||||
#if 0
|
||||
warning("Pre: \t" INTPTR_FORMAT "[" SIZE_FORMAT "]\t",
|
||||
start, count);
|
||||
#endif
|
||||
if (UseCompressedOops) {
|
||||
Universe::heap()->barrier_set()->write_ref_array_pre((narrowOop*)start, (int)count);
|
||||
} else {
|
||||
Universe::heap()->barrier_set()->write_ref_array_pre( (oop*)start, (int)count);
|
||||
}
|
||||
}
|
||||
|
||||
// count is in HeapWord's
|
||||
// count is number of array elements being written
|
||||
void BarrierSet::static_write_ref_array_post(HeapWord* start, size_t count) {
|
||||
Universe::heap()->barrier_set()->write_ref_array_work(MemRegion(start, start + count));
|
||||
assert(count <= (size_t)max_intx, "count too large");
|
||||
HeapWord* end = start + objArrayOopDesc::array_size((int)count);
|
||||
#if 0
|
||||
warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT","INTPTR_FORMAT")\t",
|
||||
start, count, start, end);
|
||||
#endif
|
||||
Universe::heap()->barrier_set()->write_ref_array_work(MemRegion(start, end));
|
||||
}
|
||||
|
|
|
@ -81,9 +81,13 @@ public:
|
|||
// barrier types. Semantically, it should be thought of as a call to the
|
||||
// virtual "_work" function below, which must implement the barrier.)
|
||||
// First the pre-write versions...
|
||||
inline void write_ref_field_pre(void* field, oop new_val);
|
||||
template <class T> inline void write_ref_field_pre(T* field, oop new_val);
|
||||
private:
|
||||
// Keep this private so as to catch violations at build time.
|
||||
virtual void write_ref_field_pre_work( void* field, oop new_val) { guarantee(false, "Not needed"); };
|
||||
protected:
|
||||
virtual void write_ref_field_pre_work(void* field, oop new_val) {};
|
||||
virtual void write_ref_field_pre_work( oop* field, oop new_val) {};
|
||||
virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {};
|
||||
public:
|
||||
|
||||
// ...then the post-write version.
|
||||
|
@ -117,12 +121,17 @@ public:
|
|||
virtual void read_ref_array(MemRegion mr) = 0;
|
||||
virtual void read_prim_array(MemRegion mr) = 0;
|
||||
|
||||
virtual void write_ref_array_pre(MemRegion mr) {}
|
||||
virtual void write_ref_array_pre( oop* dst, int length) {}
|
||||
virtual void write_ref_array_pre(narrowOop* dst, int length) {}
|
||||
inline void write_ref_array(MemRegion mr);
|
||||
|
||||
// Static versions, suitable for calling from generated code.
|
||||
static void static_write_ref_array_pre(HeapWord* start, size_t count);
|
||||
static void static_write_ref_array_post(HeapWord* start, size_t count);
|
||||
// Narrow oop versions of the above; count is # of array elements being written,
|
||||
// starting with "start", which is HeapWord-aligned.
|
||||
static void static_write_ref_array_pre_narrow(HeapWord* start, size_t count);
|
||||
static void static_write_ref_array_post_narrow(HeapWord* start, size_t count);
|
||||
|
||||
protected:
|
||||
virtual void write_ref_array_work(MemRegion mr) = 0;
|
||||
|
|
|
@ -23,10 +23,10 @@
|
|||
*/
|
||||
|
||||
// Inline functions of BarrierSet, which de-virtualize certain
|
||||
// performance-critical calls when when the barrier is the most common
|
||||
// performance-critical calls when the barrier is the most common
|
||||
// card-table kind.
|
||||
|
||||
void BarrierSet::write_ref_field_pre(void* field, oop new_val) {
|
||||
template <class T> void BarrierSet::write_ref_field_pre(T* field, oop new_val) {
|
||||
if (kind() == CardTableModRef) {
|
||||
((CardTableModRefBS*)this)->inline_write_ref_field_pre(field, new_val);
|
||||
} else {
|
||||
|
|
|
@ -287,7 +287,7 @@ public:
|
|||
// these functions here for performance.
|
||||
protected:
|
||||
void write_ref_field_work(oop obj, size_t offset, oop newVal);
|
||||
void write_ref_field_work(void* field, oop newVal);
|
||||
virtual void write_ref_field_work(void* field, oop newVal);
|
||||
public:
|
||||
|
||||
bool has_write_ref_array_opt() { return true; }
|
||||
|
@ -317,10 +317,10 @@ public:
|
|||
|
||||
// *** Card-table-barrier-specific things.
|
||||
|
||||
inline void inline_write_ref_field_pre(void* field, oop newVal) {}
|
||||
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {}
|
||||
|
||||
inline void inline_write_ref_field(void* field, oop newVal) {
|
||||
jbyte* byte = byte_for(field);
|
||||
template <class T> inline void inline_write_ref_field(T* field, oop newVal) {
|
||||
jbyte* byte = byte_for((void*)field);
|
||||
*byte = dirty_card;
|
||||
}
|
||||
|
||||
|
|
|
@ -1194,7 +1194,7 @@ GCStats* GenCollectedHeap::gc_stats(int level) const {
|
|||
return _gens[level]->gc_stats();
|
||||
}
|
||||
|
||||
void GenCollectedHeap::verify(bool allow_dirty, bool silent) {
|
||||
void GenCollectedHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
|
||||
if (!silent) {
|
||||
gclog_or_tty->print("permgen ");
|
||||
}
|
||||
|
|
|
@ -325,7 +325,7 @@ public:
|
|||
void prepare_for_verify();
|
||||
|
||||
// Override.
|
||||
void verify(bool allow_dirty, bool silent);
|
||||
void verify(bool allow_dirty, bool silent, bool /* option */);
|
||||
|
||||
// Override.
|
||||
void print() const;
|
||||
|
|
|
@ -57,7 +57,7 @@ class OopsInGenClosure : public OopClosure {
|
|||
template <class T> void do_barrier(T* p);
|
||||
|
||||
// Version for use by closures that may be called in parallel code.
|
||||
void par_do_barrier(oop* p);
|
||||
template <class T> void par_do_barrier(T* p);
|
||||
|
||||
public:
|
||||
OopsInGenClosure() : OopClosure(NULL),
|
||||
|
|
|
@ -40,18 +40,20 @@ inline void OopsInGenClosure::set_generation(Generation* gen) {
|
|||
|
||||
template <class T> inline void OopsInGenClosure::do_barrier(T* p) {
|
||||
assert(generation()->is_in_reserved(p), "expected ref in generation");
|
||||
assert(!oopDesc::is_null(*p), "expected non-null object");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
assert(!oopDesc::is_null(heap_oop), "expected non-null oop");
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
// If p points to a younger generation, mark the card.
|
||||
if ((HeapWord*)obj < _gen_boundary) {
|
||||
_rs->inline_write_ref_field_gc(p, obj);
|
||||
}
|
||||
}
|
||||
|
||||
inline void OopsInGenClosure::par_do_barrier(oop* p) {
|
||||
template <class T> inline void OopsInGenClosure::par_do_barrier(T* p) {
|
||||
assert(generation()->is_in_reserved(p), "expected ref in generation");
|
||||
oop obj = *p;
|
||||
assert(obj != NULL, "expected non-null object");
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
assert(!oopDesc::is_null(heap_oop), "expected non-null oop");
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
// If p points to a younger generation, mark the card.
|
||||
if ((HeapWord*)obj < gen_boundary()) {
|
||||
rs()->write_ref_field_gc_par(p, obj);
|
||||
|
|
|
@ -1013,12 +1013,19 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
|
|||
// discovered_addr.
|
||||
oop current_head = refs_list.head();
|
||||
|
||||
// Note: In the case of G1, this pre-barrier is strictly
|
||||
// Note: In the case of G1, this specific pre-barrier is strictly
|
||||
// not necessary because the only case we are interested in
|
||||
// here is when *discovered_addr is NULL, so this will expand to
|
||||
// nothing. As a result, I am just manually eliding this out for G1.
|
||||
// here is when *discovered_addr is NULL (see the CAS further below),
|
||||
// so this will expand to nothing. As a result, we have manually
|
||||
// elided this out for G1, but left in the test for some future
|
||||
// collector that might have need for a pre-barrier here.
|
||||
if (_discovered_list_needs_barrier && !UseG1GC) {
|
||||
_bs->write_ref_field_pre((void*)discovered_addr, current_head); guarantee(false, "Needs to be fixed: YSR");
|
||||
if (UseCompressedOops) {
|
||||
_bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head);
|
||||
} else {
|
||||
_bs->write_ref_field_pre((oop*)discovered_addr, current_head);
|
||||
}
|
||||
guarantee(false, "Need to check non-G1 collector");
|
||||
}
|
||||
oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr,
|
||||
NULL);
|
||||
|
@ -1029,9 +1036,8 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
|
|||
refs_list.set_head(obj);
|
||||
refs_list.inc_length(1);
|
||||
if (_discovered_list_needs_barrier) {
|
||||
_bs->write_ref_field((void*)discovered_addr, current_head); guarantee(false, "Needs to be fixed: YSR");
|
||||
_bs->write_ref_field((void*)discovered_addr, current_head);
|
||||
}
|
||||
|
||||
} else {
|
||||
// If retest was non NULL, another thread beat us to it:
|
||||
// The reference has already been discovered...
|
||||
|
@ -1177,11 +1183,16 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
|
|||
// pre-value, we can safely elide the pre-barrier here for the case of G1.
|
||||
assert(discovered == NULL, "control point invariant");
|
||||
if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1
|
||||
if (UseCompressedOops) {
|
||||
_bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head);
|
||||
} else {
|
||||
_bs->write_ref_field_pre((oop*)discovered_addr, current_head);
|
||||
}
|
||||
guarantee(false, "Need to check non-G1 collector");
|
||||
}
|
||||
oop_store_raw(discovered_addr, current_head);
|
||||
if (_discovered_list_needs_barrier) {
|
||||
_bs->write_ref_field((oop*)discovered_addr, current_head);
|
||||
_bs->write_ref_field((void*)discovered_addr, current_head);
|
||||
}
|
||||
list->set_head(obj);
|
||||
list->inc_length(1);
|
||||
|
|
|
@ -106,6 +106,7 @@ class Space: public CHeapObj {
|
|||
virtual void set_end(HeapWord* value) { _end = value; }
|
||||
|
||||
virtual HeapWord* saved_mark_word() const { return _saved_mark_word; }
|
||||
|
||||
void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
|
||||
|
||||
MemRegionClosure* preconsumptionDirtyCardClosure() const {
|
||||
|
|
|
@ -1170,7 +1170,7 @@ void Universe::print_heap_after_gc(outputStream* st) {
|
|||
st->print_cr("}");
|
||||
}
|
||||
|
||||
void Universe::verify(bool allow_dirty, bool silent) {
|
||||
void Universe::verify(bool allow_dirty, bool silent, bool option) {
|
||||
if (SharedSkipVerify) {
|
||||
return;
|
||||
}
|
||||
|
@ -1194,7 +1194,7 @@ void Universe::verify(bool allow_dirty, bool silent) {
|
|||
if (!silent) gclog_or_tty->print("[Verifying ");
|
||||
if (!silent) gclog_or_tty->print("threads ");
|
||||
Threads::verify();
|
||||
heap()->verify(allow_dirty, silent);
|
||||
heap()->verify(allow_dirty, silent, option);
|
||||
|
||||
if (!silent) gclog_or_tty->print("syms ");
|
||||
SymbolTable::verify();
|
||||
|
|
|
@ -398,7 +398,7 @@ class Universe: AllStatic {
|
|||
|
||||
// Debugging
|
||||
static bool verify_in_progress() { return _verify_in_progress; }
|
||||
static void verify(bool allow_dirty = true, bool silent = false);
|
||||
static void verify(bool allow_dirty = true, bool silent = false, bool option = true);
|
||||
static int verify_count() { return _verify_count; }
|
||||
static void print();
|
||||
static void print_on(outputStream* st);
|
||||
|
|
|
@ -28,13 +28,14 @@
|
|||
template <class T>
|
||||
static void specialized_oop_follow_contents(instanceRefKlass* ref, oop obj) {
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||
oop referent = oopDesc::load_decode_heap_oop(referent_addr);
|
||||
T heap_oop = oopDesc::load_heap_oop(referent_addr);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
|
||||
}
|
||||
)
|
||||
if (referent != NULL) {
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!referent->is_gc_marked() &&
|
||||
MarkSweep::ref_processor()->
|
||||
discover_reference(obj, ref->reference_type())) {
|
||||
|
@ -81,13 +82,14 @@ static void specialized_oop_follow_contents(instanceRefKlass* ref,
|
|||
ParCompactionManager* cm,
|
||||
oop obj) {
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||
oop referent = oopDesc::load_decode_heap_oop(referent_addr);
|
||||
T heap_oop = oopDesc::load_heap_oop(referent_addr);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr("instanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
|
||||
}
|
||||
)
|
||||
if (referent != NULL) {
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) &&
|
||||
PSParallelCompact::ref_processor()->
|
||||
discover_reference(obj, ref->reference_type())) {
|
||||
|
@ -182,9 +184,10 @@ int instanceRefKlass::oop_adjust_pointers(oop obj) {
|
|||
} \
|
||||
\
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); \
|
||||
oop referent = oopDesc::load_decode_heap_oop(referent_addr); \
|
||||
if (referent != NULL && contains(referent_addr)) { \
|
||||
T heap_oop = oopDesc::load_heap_oop(referent_addr); \
|
||||
if (!oopDesc::is_null(heap_oop) && contains(referent_addr)) { \
|
||||
ReferenceProcessor* rp = closure->_ref_processor; \
|
||||
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop); \
|
||||
if (!referent->is_gc_marked() && (rp != NULL) && \
|
||||
rp->discover_reference(obj, reference_type())) { \
|
||||
return size; \
|
||||
|
|
|
@ -84,8 +84,6 @@ oop objArrayKlass::multi_allocate(int rank, jint* sizes, TRAPS) {
|
|||
template <class T> void objArrayKlass::do_copy(arrayOop s, T* src,
|
||||
arrayOop d, T* dst, int length, TRAPS) {
|
||||
|
||||
const size_t word_len = objArrayOopDesc::array_size(length);
|
||||
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
// For performance reasons, we assume we are that the write barrier we
|
||||
// are using has optimized modes for arrays of references. At least one
|
||||
|
@ -93,11 +91,10 @@ template <class T> void objArrayKlass::do_copy(arrayOop s, T* src,
|
|||
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
|
||||
assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
|
||||
|
||||
MemRegion dst_mr = MemRegion((HeapWord*)dst, word_len);
|
||||
if (s == d) {
|
||||
// since source and destination are equal we do not need conversion checks.
|
||||
assert(length > 0, "sanity check");
|
||||
bs->write_ref_array_pre(dst_mr);
|
||||
bs->write_ref_array_pre(dst, length);
|
||||
Copy::conjoint_oops_atomic(src, dst, length);
|
||||
} else {
|
||||
// We have to make sure all elements conform to the destination array
|
||||
|
@ -105,7 +102,7 @@ template <class T> void objArrayKlass::do_copy(arrayOop s, T* src,
|
|||
klassOop stype = objArrayKlass::cast(s->klass())->element_klass();
|
||||
if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
|
||||
// elements are guaranteed to be subtypes, so no check necessary
|
||||
bs->write_ref_array_pre(dst_mr);
|
||||
bs->write_ref_array_pre(dst, length);
|
||||
Copy::conjoint_oops_atomic(src, dst, length);
|
||||
} else {
|
||||
// slow case: need individual subtype checks
|
||||
|
@ -137,6 +134,7 @@ template <class T> void objArrayKlass::do_copy(arrayOop s, T* src,
|
|||
}
|
||||
}
|
||||
}
|
||||
const size_t word_len = objArrayOopDesc::array_size(length);
|
||||
bs->write_ref_array(MemRegion((HeapWord*)dst, word_len));
|
||||
}
|
||||
|
||||
|
|
|
@ -148,12 +148,14 @@ inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
|
|||
|
||||
inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
|
||||
assert(!is_null(v), "oop value can never be zero");
|
||||
assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
|
||||
address base = Universe::narrow_oop_base();
|
||||
int shift = Universe::narrow_oop_shift();
|
||||
uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
|
||||
assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
|
||||
uint64_t result = pd >> shift;
|
||||
assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
|
||||
assert(decode_heap_oop(result) == v, "reversibility");
|
||||
return (narrowOop)result;
|
||||
}
|
||||
|
||||
|
@ -449,7 +451,7 @@ inline void update_barrier_set(void* p, oop v) {
|
|||
oopDesc::bs()->write_ref_field(p, v);
|
||||
}
|
||||
|
||||
inline void update_barrier_set_pre(void* p, oop v) {
|
||||
template <class T> inline void update_barrier_set_pre(T* p, oop v) {
|
||||
oopDesc::bs()->write_ref_field_pre(p, v);
|
||||
}
|
||||
|
||||
|
@ -459,15 +461,15 @@ template <class T> inline void oop_store(T* p, oop v) {
|
|||
} else {
|
||||
update_barrier_set_pre(p, v);
|
||||
oopDesc::encode_store_heap_oop(p, v);
|
||||
update_barrier_set(p, v);
|
||||
update_barrier_set((void*)p, v); // cast away type
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> inline void oop_store(volatile T* p, oop v) {
|
||||
update_barrier_set_pre((void*)p, v);
|
||||
update_barrier_set_pre((T*)p, v); // cast away volatile
|
||||
// Used by release_obj_field_put, so use release_store_ptr.
|
||||
oopDesc::release_encode_store_heap_oop(p, v);
|
||||
update_barrier_set((void*)p, v);
|
||||
update_barrier_set((void*)p, v); // cast away type
|
||||
}
|
||||
|
||||
template <class T> inline void oop_store_without_check(T* p, oop v) {
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
typedef juint narrowOop; // Offset instead of address for an oop within a java object
|
||||
typedef class klassOopDesc* wideKlassOop; // to keep SA happy and unhandled oop
|
||||
// detector happy.
|
||||
typedef void* OopOrNarrowOopStar;
|
||||
|
||||
#ifndef CHECK_UNHANDLED_OOPS
|
||||
|
||||
|
|
|
@ -1789,7 +1789,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
|||
#ifdef _LP64
|
||||
// Push DecodeN down through phi.
|
||||
// The rest of phi graph will transform by split EncodeP node though phis up.
|
||||
if (UseCompressedOops && can_reshape && progress == NULL) {
|
||||
if (UseNewCode && UseCompressedOops && can_reshape && progress == NULL) {
|
||||
bool may_push = true;
|
||||
bool has_decodeN = false;
|
||||
Node* in_decodeN = NULL;
|
||||
|
|
|
@ -1048,7 +1048,11 @@ UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSwapObject(JNIEnv *env, jobject unsafe,
|
|||
oop e = JNIHandles::resolve(e_h);
|
||||
oop p = JNIHandles::resolve(obj);
|
||||
HeapWord* addr = (HeapWord *)index_oop_from_field_offset_long(p, offset);
|
||||
update_barrier_set_pre((void*)addr, e);
|
||||
if (UseCompressedOops) {
|
||||
update_barrier_set_pre((narrowOop*)addr, e);
|
||||
} else {
|
||||
update_barrier_set_pre((oop*)addr, e);
|
||||
}
|
||||
oop res = oopDesc::atomic_compare_exchange_oop(x, addr, e);
|
||||
jboolean success = (res == e);
|
||||
if (success)
|
||||
|
|
|
@ -1202,18 +1202,13 @@ void Arguments::set_ergonomics_flags() {
|
|||
}
|
||||
|
||||
#ifdef _LP64
|
||||
// Compressed Headers do not work with CMS, which uses a bit in the klass
|
||||
// field offset to determine free list chunk markers.
|
||||
// Check that UseCompressedOops can be set with the max heap size allocated
|
||||
// by ergonomics.
|
||||
if (MaxHeapSize <= max_heap_for_compressed_oops()) {
|
||||
if (FLAG_IS_DEFAULT(UseCompressedOops) && !UseG1GC) {
|
||||
if (FLAG_IS_DEFAULT(UseCompressedOops)) {
|
||||
// Turn off until bug is fixed.
|
||||
// the following line to return it to default status.
|
||||
// FLAG_SET_ERGO(bool, UseCompressedOops, true);
|
||||
} else if (UseCompressedOops && UseG1GC) {
|
||||
warning(" UseCompressedOops does not currently work with UseG1GC; switching off UseCompressedOops. ");
|
||||
FLAG_SET_DEFAULT(UseCompressedOops, false);
|
||||
}
|
||||
#ifdef _WIN64
|
||||
if (UseLargePages && UseCompressedOops) {
|
||||
|
@ -1454,6 +1449,7 @@ bool Arguments::check_gc_consistency() {
|
|||
if (UseSerialGC) i++;
|
||||
if (UseConcMarkSweepGC || UseParNewGC) i++;
|
||||
if (UseParallelGC || UseParallelOldGC) i++;
|
||||
if (UseG1GC) i++;
|
||||
if (i > 1) {
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Conflicting collector combinations in option list; "
|
||||
|
@ -2603,22 +2599,6 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
|
|||
return result;
|
||||
}
|
||||
|
||||
// These are hacks until G1 is fully supported and tested
|
||||
// but lets you force -XX:+UseG1GC in PRT and get it where it (mostly) works
|
||||
if (UseG1GC) {
|
||||
if (UseConcMarkSweepGC || UseParNewGC || UseParallelGC || UseParallelOldGC || UseSerialGC) {
|
||||
#ifndef PRODUCT
|
||||
tty->print_cr("-XX:+UseG1GC is incompatible with other collectors, using UseG1GC");
|
||||
#endif // PRODUCT
|
||||
UseConcMarkSweepGC = false;
|
||||
UseParNewGC = false;
|
||||
UseParallelGC = false;
|
||||
UseParallelOldGC = false;
|
||||
UseSerialGC = false;
|
||||
}
|
||||
no_shared_spaces();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (TraceBytecodesAt != 0) {
|
||||
TraceBytecodes = true;
|
||||
|
@ -2676,10 +2656,7 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
|
|||
} else if (UseParNewGC) {
|
||||
// Set some flags for ParNew
|
||||
set_parnew_gc_flags();
|
||||
}
|
||||
// Temporary; make the "if" an "else-if" before
|
||||
// we integrate G1. XXX
|
||||
if (UseG1GC) {
|
||||
} else if (UseG1GC) {
|
||||
// Set some flags for garbage-first, if needed.
|
||||
set_g1_gc_flags();
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ void SafepointSynchronize::begin() {
|
|||
// In the future we should investigate whether CMS can use the
|
||||
// more-general mechanism below. DLD (01/05).
|
||||
ConcurrentMarkSweepThread::synchronize(false);
|
||||
} else {
|
||||
} else if (UseG1GC) {
|
||||
ConcurrentGCThread::safepoint_synchronize();
|
||||
}
|
||||
#endif // SERIALGC
|
||||
|
@ -400,7 +400,7 @@ void SafepointSynchronize::end() {
|
|||
// If there are any concurrent GC threads resume them.
|
||||
if (UseConcMarkSweepGC) {
|
||||
ConcurrentMarkSweepThread::desynchronize(false);
|
||||
} else {
|
||||
} else if (UseG1GC) {
|
||||
ConcurrentGCThread::safepoint_desynchronize();
|
||||
}
|
||||
#endif // SERIALGC
|
||||
|
|
|
@ -119,6 +119,7 @@ JRT_LEAF(void, SharedRuntime::g1_wb_pre(oopDesc* orig, JavaThread *thread))
|
|||
assert(false, "should be optimized out");
|
||||
return;
|
||||
}
|
||||
assert(orig->is_oop(true /* ignore mark word */), "Error");
|
||||
// store the original value that was in the field reference
|
||||
thread->satb_mark_queue().enqueue(orig);
|
||||
JRT_END
|
||||
|
|
|
@ -64,15 +64,18 @@ bool ParallelTaskTerminator::peek_in_queue_set() {
|
|||
}
|
||||
|
||||
void ParallelTaskTerminator::yield() {
|
||||
assert(_offered_termination <= _n_threads, "Invariant");
|
||||
os::yield();
|
||||
}
|
||||
|
||||
void ParallelTaskTerminator::sleep(uint millis) {
|
||||
assert(_offered_termination <= _n_threads, "Invariant");
|
||||
os::sleep(Thread::current(), millis, false);
|
||||
}
|
||||
|
||||
bool
|
||||
ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
||||
assert(_offered_termination < _n_threads, "Invariant");
|
||||
Atomic::inc(&_offered_termination);
|
||||
|
||||
uint yield_count = 0;
|
||||
|
@ -96,6 +99,7 @@ ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
|||
// Loop waiting for all threads to offer termination or
|
||||
// more work.
|
||||
while (true) {
|
||||
assert(_offered_termination <= _n_threads, "Invariant");
|
||||
// Are all threads offering termination?
|
||||
if (_offered_termination == _n_threads) {
|
||||
return true;
|
||||
|
@ -151,6 +155,7 @@ ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
|||
if (peek_in_queue_set() ||
|
||||
(terminator != NULL && terminator->should_exit_termination())) {
|
||||
Atomic::dec(&_offered_termination);
|
||||
assert(_offered_termination < _n_threads, "Invariant");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -560,8 +560,14 @@ typedef GenericTaskQueueSet<Task> OopTaskQueueSet;
|
|||
class StarTask {
|
||||
void* _holder; // either union oop* or narrowOop*
|
||||
public:
|
||||
StarTask(narrowOop *p) { _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK); }
|
||||
StarTask(oop *p) { _holder = (void*)p; }
|
||||
StarTask(narrowOop* p) {
|
||||
assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
|
||||
_holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK);
|
||||
}
|
||||
StarTask(oop* p) {
|
||||
assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
|
||||
_holder = (void*)p;
|
||||
}
|
||||
StarTask() { _holder = NULL; }
|
||||
operator oop*() { return (oop*)_holder; }
|
||||
operator narrowOop*() {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue