mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-21 19:44:41 +02:00
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
This commit is contained in:
parent
680ecf1611
commit
4a831d45f0
273 changed files with 6585 additions and 2993 deletions
|
@ -29,22 +29,34 @@ class ConcurrentMarkSweepGeneration;
|
|||
class CMSBitMap;
|
||||
class CMSMarkStack;
|
||||
class CMSCollector;
|
||||
template<class E> class GenericTaskQueue;
|
||||
typedef GenericTaskQueue<oop> OopTaskQueue;
|
||||
template<class E> class GenericTaskQueueSet;
|
||||
typedef GenericTaskQueueSet<oop> OopTaskQueueSet;
|
||||
class MarkFromRootsClosure;
|
||||
class Par_MarkFromRootsClosure;
|
||||
|
||||
// Decode the oop and call do_oop on it.
|
||||
#define DO_OOP_WORK_DEFN \
|
||||
void do_oop(oop obj); \
|
||||
template <class T> inline void do_oop_work(T* p) { \
|
||||
T heap_oop = oopDesc::load_heap_oop(p); \
|
||||
if (!oopDesc::is_null(heap_oop)) { \
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
|
||||
do_oop(obj); \
|
||||
} \
|
||||
}
|
||||
|
||||
class MarkRefsIntoClosure: public OopsInGenClosure {
|
||||
const MemRegion _span;
|
||||
CMSBitMap* _bitMap;
|
||||
const bool _should_do_nmethods;
|
||||
private:
|
||||
const MemRegion _span;
|
||||
CMSBitMap* _bitMap;
|
||||
const bool _should_do_nmethods;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap,
|
||||
bool should_do_nmethods);
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop(p); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
|
||||
bool do_header() { return true; }
|
||||
virtual const bool do_nmethods() const {
|
||||
return _should_do_nmethods;
|
||||
|
@ -57,15 +69,20 @@ class MarkRefsIntoClosure: public OopsInGenClosure {
|
|||
// A variant of the above used in certain kinds of CMS
|
||||
// marking verification.
|
||||
class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
|
||||
const MemRegion _span;
|
||||
CMSBitMap* _verification_bm;
|
||||
CMSBitMap* _cms_bm;
|
||||
const bool _should_do_nmethods;
|
||||
private:
|
||||
const MemRegion _span;
|
||||
CMSBitMap* _verification_bm;
|
||||
CMSBitMap* _cms_bm;
|
||||
const bool _should_do_nmethods;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
|
||||
CMSBitMap* cms_bm, bool should_do_nmethods);
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop(p); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
|
||||
bool do_header() { return true; }
|
||||
virtual const bool do_nmethods() const {
|
||||
return _should_do_nmethods;
|
||||
|
@ -75,37 +92,40 @@ class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
|
|||
}
|
||||
};
|
||||
|
||||
|
||||
// The non-parallel version (the parallel version appears further below).
|
||||
class PushAndMarkClosure: public OopClosure {
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
CMSBitMap* _mod_union_table;
|
||||
CMSMarkStack* _mark_stack;
|
||||
CMSMarkStack* _revisit_stack;
|
||||
bool _concurrent_precleaning;
|
||||
bool const _should_remember_klasses;
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
CMSBitMap* _mod_union_table;
|
||||
CMSMarkStack* _mark_stack;
|
||||
CMSMarkStack* _revisit_stack;
|
||||
bool _concurrent_precleaning;
|
||||
bool const _should_remember_klasses;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
PushAndMarkClosure(CMSCollector* collector,
|
||||
MemRegion span,
|
||||
ReferenceProcessor* rp,
|
||||
CMSBitMap* bit_map,
|
||||
CMSBitMap* mod_union_table,
|
||||
CMSMarkStack* mark_stack,
|
||||
CMSMarkStack* revisit_stack,
|
||||
bool concurrent_precleaning);
|
||||
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop(p); }
|
||||
CMSMarkStack* mark_stack,
|
||||
CMSMarkStack* revisit_stack,
|
||||
bool concurrent_precleaning);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
|
||||
bool do_header() { return true; }
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
const bool should_remember_klasses() const {
|
||||
virtual const bool should_remember_klasses() const {
|
||||
return _should_remember_klasses;
|
||||
}
|
||||
void remember_klass(Klass* k);
|
||||
virtual void remember_klass(Klass* k);
|
||||
};
|
||||
|
||||
// In the parallel case, the revisit stack, the bit map and the
|
||||
|
@ -115,12 +135,15 @@ class PushAndMarkClosure: public OopClosure {
|
|||
// used in the non-parallel case above is here replaced with
|
||||
// an OopTaskQueue structure to allow efficient work stealing.
|
||||
class Par_PushAndMarkClosure: public OopClosure {
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
OopTaskQueue* _work_queue;
|
||||
CMSMarkStack* _revisit_stack;
|
||||
bool const _should_remember_klasses;
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
OopTaskQueue* _work_queue;
|
||||
CMSMarkStack* _revisit_stack;
|
||||
bool const _should_remember_klasses;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
Par_PushAndMarkClosure(CMSCollector* collector,
|
||||
MemRegion span,
|
||||
|
@ -128,43 +151,48 @@ class Par_PushAndMarkClosure: public OopClosure {
|
|||
CMSBitMap* bit_map,
|
||||
OopTaskQueue* work_queue,
|
||||
CMSMarkStack* revisit_stack);
|
||||
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop(p); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
|
||||
bool do_header() { return true; }
|
||||
Prefetch::style prefetch_style() {
|
||||
return Prefetch::do_read;
|
||||
}
|
||||
const bool should_remember_klasses() const {
|
||||
virtual const bool should_remember_klasses() const {
|
||||
return _should_remember_klasses;
|
||||
}
|
||||
void remember_klass(Klass* k);
|
||||
virtual void remember_klass(Klass* k);
|
||||
};
|
||||
|
||||
|
||||
// The non-parallel version (the parallel version appears further below).
|
||||
class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
CMSMarkStack* _mark_stack;
|
||||
PushAndMarkClosure _pushAndMarkClosure;
|
||||
CMSCollector* _collector;
|
||||
bool _yield;
|
||||
private:
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
CMSMarkStack* _mark_stack;
|
||||
PushAndMarkClosure _pushAndMarkClosure;
|
||||
CMSCollector* _collector;
|
||||
Mutex* _freelistLock;
|
||||
bool _yield;
|
||||
// Whether closure is being used for concurrent precleaning
|
||||
bool _concurrent_precleaning;
|
||||
Mutex* _freelistLock;
|
||||
bool _concurrent_precleaning;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
MarkRefsIntoAndScanClosure(MemRegion span,
|
||||
ReferenceProcessor* rp,
|
||||
CMSBitMap* bit_map,
|
||||
CMSBitMap* mod_union_table,
|
||||
CMSMarkStack* mark_stack,
|
||||
CMSMarkStack* revisit_stack,
|
||||
CMSMarkStack* mark_stack,
|
||||
CMSMarkStack* revisit_stack,
|
||||
CMSCollector* collector,
|
||||
bool should_yield,
|
||||
bool concurrent_precleaning);
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop(p); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
bool do_header() { return true; }
|
||||
virtual const bool do_nmethods() const { return true; }
|
||||
Prefetch::style prefetch_style() {
|
||||
|
@ -185,11 +213,14 @@ class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
|
|||
// sycnhronized. An OopTaskQueue structure, supporting efficient
|
||||
// workstealing, replaces a CMSMarkStack for storing grey objects.
|
||||
class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
OopTaskQueue* _work_queue;
|
||||
const uint _low_water_mark;
|
||||
Par_PushAndMarkClosure _par_pushAndMarkClosure;
|
||||
private:
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
OopTaskQueue* _work_queue;
|
||||
const uint _low_water_mark;
|
||||
Par_PushAndMarkClosure _par_pushAndMarkClosure;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
|
||||
MemRegion span,
|
||||
|
@ -197,8 +228,10 @@ class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
|
|||
CMSBitMap* bit_map,
|
||||
OopTaskQueue* work_queue,
|
||||
CMSMarkStack* revisit_stack);
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop(p); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
bool do_header() { return true; }
|
||||
virtual const bool do_nmethods() const { return true; }
|
||||
Prefetch::style prefetch_style() {
|
||||
|
@ -211,28 +244,34 @@ class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
|
|||
// following the first checkpoint. Its use is buried in
|
||||
// the closure MarkFromRootsClosure.
|
||||
class PushOrMarkClosure: public OopClosure {
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bitMap;
|
||||
CMSMarkStack* _markStack;
|
||||
CMSMarkStack* _revisitStack;
|
||||
HeapWord* const _finger;
|
||||
MarkFromRootsClosure* const _parent;
|
||||
bool const _should_remember_klasses;
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bitMap;
|
||||
CMSMarkStack* _markStack;
|
||||
CMSMarkStack* _revisitStack;
|
||||
HeapWord* const _finger;
|
||||
MarkFromRootsClosure* const
|
||||
_parent;
|
||||
bool const _should_remember_klasses;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
PushOrMarkClosure(CMSCollector* cms_collector,
|
||||
MemRegion span,
|
||||
CMSBitMap* bitMap,
|
||||
CMSMarkStack* markStack,
|
||||
CMSMarkStack* revisitStack,
|
||||
HeapWord* finger,
|
||||
CMSMarkStack* markStack,
|
||||
CMSMarkStack* revisitStack,
|
||||
HeapWord* finger,
|
||||
MarkFromRootsClosure* parent);
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop(p); }
|
||||
const bool should_remember_klasses() const {
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
|
||||
virtual const bool should_remember_klasses() const {
|
||||
return _should_remember_klasses;
|
||||
}
|
||||
void remember_klass(Klass* k);
|
||||
virtual void remember_klass(Klass* k);
|
||||
// Deal with a stack overflow condition
|
||||
void handle_stack_overflow(HeapWord* lost);
|
||||
private:
|
||||
|
@ -244,6 +283,7 @@ class PushOrMarkClosure: public OopClosure {
|
|||
// following the first checkpoint. Its use is buried in
|
||||
// the closure Par_MarkFromRootsClosure.
|
||||
class Par_PushOrMarkClosure: public OopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _whole_span;
|
||||
MemRegion _span; // local chunk
|
||||
|
@ -253,24 +293,29 @@ class Par_PushOrMarkClosure: public OopClosure {
|
|||
CMSMarkStack* _revisit_stack;
|
||||
HeapWord* const _finger;
|
||||
HeapWord** const _global_finger_addr;
|
||||
Par_MarkFromRootsClosure* const _parent;
|
||||
bool const _should_remember_klasses;
|
||||
Par_MarkFromRootsClosure* const
|
||||
_parent;
|
||||
bool const _should_remember_klasses;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
Par_PushOrMarkClosure(CMSCollector* cms_collector,
|
||||
MemRegion span,
|
||||
CMSBitMap* bit_map,
|
||||
OopTaskQueue* work_queue,
|
||||
CMSMarkStack* mark_stack,
|
||||
CMSMarkStack* revisit_stack,
|
||||
HeapWord* finger,
|
||||
HeapWord** global_finger_addr,
|
||||
Par_MarkFromRootsClosure* parent);
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop(p); }
|
||||
const bool should_remember_klasses() const {
|
||||
MemRegion span,
|
||||
CMSBitMap* bit_map,
|
||||
OopTaskQueue* work_queue,
|
||||
CMSMarkStack* mark_stack,
|
||||
CMSMarkStack* revisit_stack,
|
||||
HeapWord* finger,
|
||||
HeapWord** global_finger_addr,
|
||||
Par_MarkFromRootsClosure* parent);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
|
||||
virtual const bool should_remember_klasses() const {
|
||||
return _should_remember_klasses;
|
||||
}
|
||||
void remember_klass(Klass* k);
|
||||
virtual void remember_klass(Klass* k);
|
||||
// Deal with a stack overflow condition
|
||||
void handle_stack_overflow(HeapWord* lost);
|
||||
private:
|
||||
|
@ -282,10 +327,13 @@ class Par_PushOrMarkClosure: public OopClosure {
|
|||
// This is currently used during the (weak) reference object
|
||||
// processing phase of the CMS final checkpoint step.
|
||||
class CMSKeepAliveClosure: public OopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSMarkStack* _mark_stack;
|
||||
CMSBitMap* _bit_map;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
|
||||
CMSBitMap* bit_map, CMSMarkStack* mark_stack):
|
||||
|
@ -293,16 +341,20 @@ class CMSKeepAliveClosure: public OopClosure {
|
|||
_span(span),
|
||||
_bit_map(bit_map),
|
||||
_mark_stack(mark_stack) { }
|
||||
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop(p); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
|
||||
};
|
||||
|
||||
class CMSInnerParMarkAndPushClosure: public OopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
OopTaskQueue* _work_queue;
|
||||
CMSBitMap* _bit_map;
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
CMSInnerParMarkAndPushClosure(CMSCollector* collector,
|
||||
MemRegion span, CMSBitMap* bit_map,
|
||||
|
@ -311,24 +363,32 @@ class CMSInnerParMarkAndPushClosure: public OopClosure {
|
|||
_span(span),
|
||||
_bit_map(bit_map),
|
||||
_work_queue(work_queue) { }
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop(p); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
|
||||
};
|
||||
|
||||
// A parallel (MT) version of the above, used when
|
||||
// reference processing is parallel; the only difference
|
||||
// is in the do_oop method.
|
||||
class CMSParKeepAliveClosure: public OopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
OopTaskQueue* _work_queue;
|
||||
CMSBitMap* _bit_map;
|
||||
CMSInnerParMarkAndPushClosure _mark_and_push;
|
||||
CMSInnerParMarkAndPushClosure
|
||||
_mark_and_push;
|
||||
const uint _low_water_mark;
|
||||
void trim_queue(uint max);
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
|
||||
CMSBitMap* bit_map, OopTaskQueue* work_queue);
|
||||
void do_oop(oop* p);
|
||||
void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop(p); }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
|
||||
inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
|
||||
};
|
||||
|
|
|
@ -177,7 +177,7 @@ HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
|
|||
assert(q->forwardee() == NULL, "should be forwarded to NULL");
|
||||
}
|
||||
|
||||
debug_only(MarkSweep::register_live_oop(q, adjusted_size));
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
|
||||
compact_top += adjusted_size;
|
||||
|
||||
// we need to update the offset table so that the beginnings of objects can be
|
||||
|
@ -1211,7 +1211,7 @@ FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
|
|||
return fc;
|
||||
}
|
||||
|
||||
oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size, oop* ref) {
|
||||
oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
|
||||
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
|
||||
assert_locked();
|
||||
|
||||
|
@ -2116,7 +2116,6 @@ void CompactibleFreeListSpace::split(size_t from, size_t to1) {
|
|||
splitBirth(to2);
|
||||
}
|
||||
|
||||
|
||||
void CompactibleFreeListSpace::print() const {
|
||||
tty->print(" CompactibleFreeListSpace");
|
||||
Space::print();
|
||||
|
@ -2130,6 +2129,7 @@ void CompactibleFreeListSpace::prepare_for_verify() {
|
|||
}
|
||||
|
||||
class VerifyAllBlksClosure: public BlkClosure {
|
||||
private:
|
||||
const CompactibleFreeListSpace* _sp;
|
||||
const MemRegion _span;
|
||||
|
||||
|
@ -2137,7 +2137,7 @@ class VerifyAllBlksClosure: public BlkClosure {
|
|||
VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
|
||||
MemRegion span) : _sp(sp), _span(span) { }
|
||||
|
||||
size_t do_blk(HeapWord* addr) {
|
||||
virtual size_t do_blk(HeapWord* addr) {
|
||||
size_t res;
|
||||
if (_sp->block_is_obj(addr)) {
|
||||
oop p = oop(addr);
|
||||
|
@ -2160,12 +2160,54 @@ class VerifyAllBlksClosure: public BlkClosure {
|
|||
};
|
||||
|
||||
class VerifyAllOopsClosure: public OopClosure {
|
||||
private:
|
||||
const CMSCollector* _collector;
|
||||
const CompactibleFreeListSpace* _sp;
|
||||
const MemRegion _span;
|
||||
const bool _past_remark;
|
||||
const CMSBitMap* _bit_map;
|
||||
|
||||
protected:
|
||||
void do_oop(void* p, oop obj) {
|
||||
if (_span.contains(obj)) { // the interior oop points into CMS heap
|
||||
if (!_span.contains(p)) { // reference from outside CMS heap
|
||||
// Should be a valid object; the first disjunct below allows
|
||||
// us to sidestep an assertion in block_is_obj() that insists
|
||||
// that p be in _sp. Note that several generations (and spaces)
|
||||
// are spanned by _span (CMS heap) above.
|
||||
guarantee(!_sp->is_in_reserved(obj) ||
|
||||
_sp->block_is_obj((HeapWord*)obj),
|
||||
"Should be an object");
|
||||
guarantee(obj->is_oop(), "Should be an oop");
|
||||
obj->verify();
|
||||
if (_past_remark) {
|
||||
// Remark has been completed, the object should be marked
|
||||
_bit_map->isMarked((HeapWord*)obj);
|
||||
}
|
||||
} else { // reference within CMS heap
|
||||
if (_past_remark) {
|
||||
// Remark has been completed -- so the referent should have
|
||||
// been marked, if referring object is.
|
||||
if (_bit_map->isMarked(_collector->block_start(p))) {
|
||||
guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (_sp->is_in_reserved(p)) {
|
||||
// the reference is from FLS, and points out of FLS
|
||||
guarantee(obj->is_oop(), "Should be an oop");
|
||||
obj->verify();
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
do_oop(p, obj);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
VerifyAllOopsClosure(const CMSCollector* collector,
|
||||
const CompactibleFreeListSpace* sp, MemRegion span,
|
||||
|
@ -2173,40 +2215,8 @@ class VerifyAllOopsClosure: public OopClosure {
|
|||
OopClosure(), _collector(collector), _sp(sp), _span(span),
|
||||
_past_remark(past_remark), _bit_map(bit_map) { }
|
||||
|
||||
void do_oop(oop* ptr) {
|
||||
oop p = *ptr;
|
||||
if (p != NULL) {
|
||||
if (_span.contains(p)) { // the interior oop points into CMS heap
|
||||
if (!_span.contains(ptr)) { // reference from outside CMS heap
|
||||
// Should be a valid object; the first disjunct below allows
|
||||
// us to sidestep an assertion in block_is_obj() that insists
|
||||
// that p be in _sp. Note that several generations (and spaces)
|
||||
// are spanned by _span (CMS heap) above.
|
||||
guarantee(!_sp->is_in_reserved(p) || _sp->block_is_obj((HeapWord*)p),
|
||||
"Should be an object");
|
||||
guarantee(p->is_oop(), "Should be an oop");
|
||||
p->verify();
|
||||
if (_past_remark) {
|
||||
// Remark has been completed, the object should be marked
|
||||
_bit_map->isMarked((HeapWord*)p);
|
||||
}
|
||||
}
|
||||
else { // reference within CMS heap
|
||||
if (_past_remark) {
|
||||
// Remark has been completed -- so the referent should have
|
||||
// been marked, if referring object is.
|
||||
if (_bit_map->isMarked(_collector->block_start(ptr))) {
|
||||
guarantee(_bit_map->isMarked((HeapWord*)p), "Marking error?");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (_sp->is_in_reserved(ptr)) {
|
||||
// the reference is from FLS, and points out of FLS
|
||||
guarantee(p->is_oop(), "Should be an oop");
|
||||
p->verify();
|
||||
}
|
||||
}
|
||||
}
|
||||
virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
|
||||
};
|
||||
|
||||
void CompactibleFreeListSpace::verify(bool ignored) const {
|
||||
|
|
|
@ -540,7 +540,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
HeapWord* allocate(size_t size);
|
||||
HeapWord* par_allocate(size_t size);
|
||||
|
||||
oop promote(oop obj, size_t obj_size, oop* ref);
|
||||
oop promote(oop obj, size_t obj_size);
|
||||
void gc_prologue();
|
||||
void gc_epilogue();
|
||||
|
||||
|
|
|
@ -1226,7 +1226,7 @@ CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) {
|
||||
oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
|
||||
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
|
||||
// allocate, copy and if necessary update promoinfo --
|
||||
// delegate to underlying space.
|
||||
|
@ -1238,7 +1238,7 @@ oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) {
|
|||
}
|
||||
#endif // #ifndef PRODUCT
|
||||
|
||||
oop res = _cmsSpace->promote(obj, obj_size, ref);
|
||||
oop res = _cmsSpace->promote(obj, obj_size);
|
||||
if (res == NULL) {
|
||||
// expand and retry
|
||||
size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
|
||||
|
@ -1249,7 +1249,7 @@ oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) {
|
|||
assert(next_gen() == NULL, "assumption, based upon which no attempt "
|
||||
"is made to pass on a possibly failing "
|
||||
"promotion to next generation");
|
||||
res = _cmsSpace->promote(obj, obj_size, ref);
|
||||
res = _cmsSpace->promote(obj, obj_size);
|
||||
}
|
||||
if (res != NULL) {
|
||||
// See comment in allocate() about when objects should
|
||||
|
@ -3922,13 +3922,15 @@ void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
|
|||
}
|
||||
|
||||
class Par_ConcMarkingClosure: public OopClosure {
|
||||
private:
|
||||
CMSCollector* _collector;
|
||||
MemRegion _span;
|
||||
CMSBitMap* _bit_map;
|
||||
CMSMarkStack* _overflow_stack;
|
||||
CMSMarkStack* _revisit_stack; // XXXXXX Check proper use
|
||||
OopTaskQueue* _work_queue;
|
||||
|
||||
protected:
|
||||
DO_OOP_WORK_DEFN
|
||||
public:
|
||||
Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
|
||||
CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
|
||||
|
@ -3937,8 +3939,8 @@ class Par_ConcMarkingClosure: public OopClosure {
|
|||
_work_queue(work_queue),
|
||||
_bit_map(bit_map),
|
||||
_overflow_stack(overflow_stack) { } // need to initialize revisit stack etc.
|
||||
|
||||
void do_oop(oop* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
void trim_queue(size_t max);
|
||||
void handle_stack_overflow(HeapWord* lost);
|
||||
};
|
||||
|
@ -3947,11 +3949,9 @@ class Par_ConcMarkingClosure: public OopClosure {
|
|||
// the salient assumption here is that stolen oops must
|
||||
// always be initialized, so we do not need to check for
|
||||
// uninitialized objects before scanning here.
|
||||
void Par_ConcMarkingClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
assert(this_oop->is_oop_or_null(),
|
||||
"expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
void Par_ConcMarkingClosure::do_oop(oop obj) {
|
||||
assert(obj->is_oop_or_null(), "expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
// Check if oop points into the CMS generation
|
||||
// and is not marked
|
||||
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
|
||||
|
@ -3970,7 +3970,7 @@ void Par_ConcMarkingClosure::do_oop(oop* p) {
|
|||
}
|
||||
)
|
||||
if (simulate_overflow ||
|
||||
!(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
|
||||
!(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
|
||||
// stack overflow
|
||||
if (PrintCMSStatistics != 0) {
|
||||
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
|
||||
|
@ -3987,6 +3987,9 @@ void Par_ConcMarkingClosure::do_oop(oop* p) {
|
|||
}
|
||||
}
|
||||
|
||||
void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
|
||||
void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
|
||||
|
||||
void Par_ConcMarkingClosure::trim_queue(size_t max) {
|
||||
while (_work_queue->size() > max) {
|
||||
oop new_oop;
|
||||
|
@ -4086,8 +4089,8 @@ void CMSConcMarkingTask::coordinator_yield() {
|
|||
//
|
||||
// Tony 2006.06.29
|
||||
for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
|
||||
ConcurrentMarkSweepThread::should_yield() &&
|
||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||
ConcurrentMarkSweepThread::should_yield() &&
|
||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||
os::sleep(Thread::current(), 1, false);
|
||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||
}
|
||||
|
@ -6048,8 +6051,8 @@ void CMSCollector::reset(bool asynch) {
|
|||
|
||||
// See the comment in coordinator_yield()
|
||||
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
||||
ConcurrentMarkSweepThread::should_yield() &&
|
||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||
ConcurrentMarkSweepThread::should_yield() &&
|
||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||
os::sleep(Thread::current(), 1, false);
|
||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||
}
|
||||
|
@ -6362,19 +6365,19 @@ MarkRefsIntoClosure::MarkRefsIntoClosure(
|
|||
assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
|
||||
}
|
||||
|
||||
void MarkRefsIntoClosure::do_oop(oop* p) {
|
||||
void MarkRefsIntoClosure::do_oop(oop obj) {
|
||||
// if p points into _span, then mark corresponding bit in _markBitMap
|
||||
oop thisOop = *p;
|
||||
if (thisOop != NULL) {
|
||||
assert(thisOop->is_oop(), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)thisOop;
|
||||
if (_span.contains(addr)) {
|
||||
// this should be made more efficient
|
||||
_bitMap->mark(addr);
|
||||
}
|
||||
assert(obj->is_oop(), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr)) {
|
||||
// this should be made more efficient
|
||||
_bitMap->mark(addr);
|
||||
}
|
||||
}
|
||||
|
||||
void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
|
||||
void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
|
||||
|
||||
// A variant of the above, used for CMS marking verification.
|
||||
MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
|
||||
MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm,
|
||||
|
@ -6387,23 +6390,23 @@ MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
|
|||
assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
|
||||
}
|
||||
|
||||
void MarkRefsIntoVerifyClosure::do_oop(oop* p) {
|
||||
void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
|
||||
// if p points into _span, then mark corresponding bit in _markBitMap
|
||||
oop this_oop = *p;
|
||||
if (this_oop != NULL) {
|
||||
assert(this_oop->is_oop(), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
if (_span.contains(addr)) {
|
||||
_verification_bm->mark(addr);
|
||||
if (!_cms_bm->isMarked(addr)) {
|
||||
oop(addr)->print();
|
||||
gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
|
||||
fatal("... aborting");
|
||||
}
|
||||
assert(obj->is_oop(), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr)) {
|
||||
_verification_bm->mark(addr);
|
||||
if (!_cms_bm->isMarked(addr)) {
|
||||
oop(addr)->print();
|
||||
gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
|
||||
fatal("... aborting");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
|
||||
void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
// MarkRefsIntoAndScanClosure
|
||||
//////////////////////////////////////////////////
|
||||
|
@ -6438,13 +6441,13 @@ MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
|
|||
// The marks are made in the marking bit map and the marking stack is
|
||||
// used for keeping the (newly) grey objects during the scan.
|
||||
// The parallel version (Par_...) appears further below.
|
||||
void MarkRefsIntoAndScanClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
if (this_oop != NULL) {
|
||||
assert(this_oop->is_oop(), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
|
||||
assert(_collector->overflow_list_is_empty(), "should be empty");
|
||||
void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
|
||||
if (obj != NULL) {
|
||||
assert(obj->is_oop(), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
|
||||
assert(_collector->overflow_list_is_empty(),
|
||||
"overflow list should be empty");
|
||||
if (_span.contains(addr) &&
|
||||
!_bit_map->isMarked(addr)) {
|
||||
// mark bit map (object is now grey)
|
||||
|
@ -6452,7 +6455,7 @@ void MarkRefsIntoAndScanClosure::do_oop(oop* p) {
|
|||
// push on marking stack (stack should be empty), and drain the
|
||||
// stack by applying this closure to the oops in the oops popped
|
||||
// from the stack (i.e. blacken the grey objects)
|
||||
bool res = _mark_stack->push(this_oop);
|
||||
bool res = _mark_stack->push(obj);
|
||||
assert(res, "Should have space to push on empty stack");
|
||||
do {
|
||||
oop new_oop = _mark_stack->pop();
|
||||
|
@ -6488,6 +6491,9 @@ void MarkRefsIntoAndScanClosure::do_oop(oop* p) {
|
|||
}
|
||||
}
|
||||
|
||||
void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
|
||||
void MarkRefsIntoAndScanClosure::do_yield_work() {
|
||||
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
|
||||
"CMS thread should hold CMS token");
|
||||
|
@ -6506,9 +6512,11 @@ void MarkRefsIntoAndScanClosure::do_yield_work() {
|
|||
_collector->icms_wait();
|
||||
|
||||
// See the comment in coordinator_yield()
|
||||
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
||||
ConcurrentMarkSweepThread::should_yield() &&
|
||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||
for (unsigned i = 0;
|
||||
i < CMSYieldSleepCount &&
|
||||
ConcurrentMarkSweepThread::should_yield() &&
|
||||
!CMSCollector::foregroundGCIsActive();
|
||||
++i) {
|
||||
os::sleep(Thread::current(), 1, false);
|
||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||
}
|
||||
|
@ -6545,13 +6553,12 @@ Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
|
|||
// the scan phase whence they are also available for stealing by parallel
|
||||
// threads. Since the marking bit map is shared, updates are
|
||||
// synchronized (via CAS).
|
||||
void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
if (this_oop != NULL) {
|
||||
void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
|
||||
if (obj != NULL) {
|
||||
// Ignore mark word because this could be an already marked oop
|
||||
// that may be chained at the end of the overflow list.
|
||||
assert(this_oop->is_oop(true /* ignore mark word */), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
assert(obj->is_oop(), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr) &&
|
||||
!_bit_map->isMarked(addr)) {
|
||||
// mark bit map (object will become grey):
|
||||
|
@ -6565,7 +6572,7 @@ void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) {
|
|||
// queue to an appropriate length by applying this closure to
|
||||
// the oops in the oops popped from the stack (i.e. blacken the
|
||||
// grey objects)
|
||||
bool res = _work_queue->push(this_oop);
|
||||
bool res = _work_queue->push(obj);
|
||||
assert(res, "Low water mark should be less than capacity?");
|
||||
trim_queue(_low_water_mark);
|
||||
} // Else, another thread claimed the object
|
||||
|
@ -6573,6 +6580,9 @@ void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) {
|
|||
}
|
||||
}
|
||||
|
||||
void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
|
||||
|
||||
// This closure is used to rescan the marked objects on the dirty cards
|
||||
// in the mod union table and the card table proper.
|
||||
size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
|
||||
|
@ -6675,8 +6685,8 @@ void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
|
|||
|
||||
// See the comment in coordinator_yield()
|
||||
for (unsigned i = 0; i < CMSYieldSleepCount &&
|
||||
ConcurrentMarkSweepThread::should_yield() &&
|
||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||
ConcurrentMarkSweepThread::should_yield() &&
|
||||
!CMSCollector::foregroundGCIsActive(); ++i) {
|
||||
os::sleep(Thread::current(), 1, false);
|
||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||
}
|
||||
|
@ -6928,13 +6938,13 @@ void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
|
|||
assert(_markStack->isEmpty(),
|
||||
"should drain stack to limit stack usage");
|
||||
// convert ptr to an oop preparatory to scanning
|
||||
oop this_oop = oop(ptr);
|
||||
oop obj = oop(ptr);
|
||||
// Ignore mark word in verification below, since we
|
||||
// may be running concurrent with mutators.
|
||||
assert(this_oop->is_oop(true), "should be an oop");
|
||||
assert(obj->is_oop(true), "should be an oop");
|
||||
assert(_finger <= ptr, "_finger runneth ahead");
|
||||
// advance the finger to right end of this object
|
||||
_finger = ptr + this_oop->size();
|
||||
_finger = ptr + obj->size();
|
||||
assert(_finger > ptr, "we just incremented it above");
|
||||
// On large heaps, it may take us some time to get through
|
||||
// the marking phase (especially if running iCMS). During
|
||||
|
@ -6980,7 +6990,7 @@ void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
|
|||
_span, _bitMap, _markStack,
|
||||
_revisitStack,
|
||||
_finger, this);
|
||||
bool res = _markStack->push(this_oop);
|
||||
bool res = _markStack->push(obj);
|
||||
assert(res, "Empty non-zero size stack should have space for single push");
|
||||
while (!_markStack->isEmpty()) {
|
||||
oop new_oop = _markStack->pop();
|
||||
|
@ -7052,13 +7062,13 @@ void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
|
|||
assert(_work_queue->size() == 0,
|
||||
"should drain stack to limit stack usage");
|
||||
// convert ptr to an oop preparatory to scanning
|
||||
oop this_oop = oop(ptr);
|
||||
oop obj = oop(ptr);
|
||||
// Ignore mark word in verification below, since we
|
||||
// may be running concurrent with mutators.
|
||||
assert(this_oop->is_oop(true), "should be an oop");
|
||||
assert(obj->is_oop(true), "should be an oop");
|
||||
assert(_finger <= ptr, "_finger runneth ahead");
|
||||
// advance the finger to right end of this object
|
||||
_finger = ptr + this_oop->size();
|
||||
_finger = ptr + obj->size();
|
||||
assert(_finger > ptr, "we just incremented it above");
|
||||
// On large heaps, it may take us some time to get through
|
||||
// the marking phase (especially if running iCMS). During
|
||||
|
@ -7106,7 +7116,7 @@ void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
|
|||
_revisit_stack,
|
||||
_finger,
|
||||
gfa, this);
|
||||
bool res = _work_queue->push(this_oop); // overflow could occur here
|
||||
bool res = _work_queue->push(obj); // overflow could occur here
|
||||
assert(res, "Will hold once we use workqueues");
|
||||
while (true) {
|
||||
oop new_oop;
|
||||
|
@ -7176,15 +7186,15 @@ void MarkFromRootsVerifyClosure::do_bit(size_t offset) {
|
|||
assert(_mark_stack->isEmpty(),
|
||||
"should drain stack to limit stack usage");
|
||||
// convert addr to an oop preparatory to scanning
|
||||
oop this_oop = oop(addr);
|
||||
assert(this_oop->is_oop(), "should be an oop");
|
||||
oop obj = oop(addr);
|
||||
assert(obj->is_oop(), "should be an oop");
|
||||
assert(_finger <= addr, "_finger runneth ahead");
|
||||
// advance the finger to right end of this object
|
||||
_finger = addr + this_oop->size();
|
||||
_finger = addr + obj->size();
|
||||
assert(_finger > addr, "we just incremented it above");
|
||||
// Note: the finger doesn't advance while we drain
|
||||
// the stack below.
|
||||
bool res = _mark_stack->push(this_oop);
|
||||
bool res = _mark_stack->push(obj);
|
||||
assert(res, "Empty non-zero size stack should have space for single push");
|
||||
while (!_mark_stack->isEmpty()) {
|
||||
oop new_oop = _mark_stack->pop();
|
||||
|
@ -7207,6 +7217,8 @@ PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
|
|||
_mark_stack(mark_stack)
|
||||
{ }
|
||||
|
||||
void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
|
||||
void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
|
||||
|
||||
// Upon stack overflow, we discard (part of) the stack,
|
||||
// remembering the least address amongst those discarded
|
||||
|
@ -7219,20 +7231,20 @@ void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
|
|||
_mark_stack->expand(); // expand the stack if possible
|
||||
}
|
||||
|
||||
void PushAndMarkVerifyClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
assert(this_oop->is_oop_or_null(), "expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
void PushAndMarkVerifyClosure::do_oop(oop obj) {
|
||||
assert(obj->is_oop_or_null(), "expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
|
||||
// Oop lies in _span and isn't yet grey or black
|
||||
_verification_bm->mark(addr); // now grey
|
||||
if (!_cms_bm->isMarked(addr)) {
|
||||
oop(addr)->print();
|
||||
gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
|
||||
gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
|
||||
addr);
|
||||
fatal("... aborting");
|
||||
}
|
||||
|
||||
if (!_mark_stack->push(this_oop)) { // stack overflow
|
||||
if (!_mark_stack->push(obj)) { // stack overflow
|
||||
if (PrintCMSStatistics != 0) {
|
||||
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
|
||||
SIZE_FORMAT, _mark_stack->capacity());
|
||||
|
@ -7285,7 +7297,6 @@ Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
|
|||
_should_remember_klasses(collector->should_unload_classes())
|
||||
{ }
|
||||
|
||||
|
||||
void CMSCollector::lower_restart_addr(HeapWord* low) {
|
||||
assert(_span.contains(low), "Out of bounds addr");
|
||||
if (_restart_addr == NULL) {
|
||||
|
@ -7321,12 +7332,10 @@ void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
|
|||
_overflow_stack->expand(); // expand the stack if possible
|
||||
}
|
||||
|
||||
|
||||
void PushOrMarkClosure::do_oop(oop* p) {
|
||||
oop thisOop = *p;
|
||||
void PushOrMarkClosure::do_oop(oop obj) {
|
||||
// Ignore mark word because we are running concurrent with mutators.
|
||||
assert(thisOop->is_oop_or_null(true), "expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)thisOop;
|
||||
assert(obj->is_oop_or_null(true), "expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
|
||||
// Oop lies in _span and isn't yet grey or black
|
||||
_bitMap->mark(addr); // now grey
|
||||
|
@ -7342,7 +7351,7 @@ void PushOrMarkClosure::do_oop(oop* p) {
|
|||
simulate_overflow = true;
|
||||
}
|
||||
)
|
||||
if (simulate_overflow || !_markStack->push(thisOop)) { // stack overflow
|
||||
if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
|
||||
if (PrintCMSStatistics != 0) {
|
||||
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
|
||||
SIZE_FORMAT, _markStack->capacity());
|
||||
|
@ -7358,11 +7367,13 @@ void PushOrMarkClosure::do_oop(oop* p) {
|
|||
}
|
||||
}
|
||||
|
||||
void Par_PushOrMarkClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
|
||||
void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
|
||||
|
||||
void Par_PushOrMarkClosure::do_oop(oop obj) {
|
||||
// Ignore mark word because we are running concurrent with mutators.
|
||||
assert(this_oop->is_oop_or_null(true), "expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
assert(obj->is_oop_or_null(true), "expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
|
||||
// Oop lies in _span and isn't yet grey or black
|
||||
// We read the global_finger (volatile read) strictly after marking oop
|
||||
|
@ -7391,7 +7402,7 @@ void Par_PushOrMarkClosure::do_oop(oop* p) {
|
|||
}
|
||||
)
|
||||
if (simulate_overflow ||
|
||||
!(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
|
||||
!(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
|
||||
// stack overflow
|
||||
if (PrintCMSStatistics != 0) {
|
||||
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
|
||||
|
@ -7408,6 +7419,8 @@ void Par_PushOrMarkClosure::do_oop(oop* p) {
|
|||
}
|
||||
}
|
||||
|
||||
void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
|
||||
void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
|
||||
|
||||
PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
|
||||
MemRegion span,
|
||||
|
@ -7432,16 +7445,11 @@ PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
|
|||
|
||||
// Grey object rescan during pre-cleaning and second checkpoint phases --
|
||||
// the non-parallel version (the parallel version appears further below.)
|
||||
void PushAndMarkClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
// Ignore mark word verification. If during concurrent precleaning
|
||||
// the object monitor may be locked. If during the checkpoint
|
||||
// phases, the object may already have been reached by a different
|
||||
// path and may be at the end of the global overflow list (so
|
||||
// the mark word may be NULL).
|
||||
assert(this_oop->is_oop_or_null(true/* ignore mark word */),
|
||||
void PushAndMarkClosure::do_oop(oop obj) {
|
||||
// If _concurrent_precleaning, ignore mark word verification
|
||||
assert(obj->is_oop_or_null(_concurrent_precleaning),
|
||||
"expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
// Check if oop points into the CMS generation
|
||||
// and is not marked
|
||||
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
|
||||
|
@ -7456,7 +7464,7 @@ void PushAndMarkClosure::do_oop(oop* p) {
|
|||
simulate_overflow = true;
|
||||
}
|
||||
)
|
||||
if (simulate_overflow || !_mark_stack->push(this_oop)) {
|
||||
if (simulate_overflow || !_mark_stack->push(obj)) {
|
||||
if (_concurrent_precleaning) {
|
||||
// During precleaning we can just dirty the appropriate card
|
||||
// in the mod union table, thus ensuring that the object remains
|
||||
|
@ -7468,7 +7476,7 @@ void PushAndMarkClosure::do_oop(oop* p) {
|
|||
} else {
|
||||
// During the remark phase, we need to remember this oop
|
||||
// in the overflow list.
|
||||
_collector->push_on_overflow_list(this_oop);
|
||||
_collector->push_on_overflow_list(obj);
|
||||
_collector->_ser_pmc_remark_ovflw++;
|
||||
}
|
||||
}
|
||||
|
@ -7492,10 +7500,12 @@ Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
|
|||
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
|
||||
}
|
||||
|
||||
void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
|
||||
void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
|
||||
|
||||
// Grey object rescan during second checkpoint phase --
|
||||
// the parallel version.
|
||||
void Par_PushAndMarkClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
void Par_PushAndMarkClosure::do_oop(oop obj) {
|
||||
// In the assert below, we ignore the mark word because
|
||||
// this oop may point to an already visited object that is
|
||||
// on the overflow stack (in which case the mark word has
|
||||
|
@ -7507,9 +7517,9 @@ void Par_PushAndMarkClosure::do_oop(oop* p) {
|
|||
// value, by the time we get to examined this failing assert in
|
||||
// the debugger, is_oop_or_null(false) may subsequently start
|
||||
// to hold.
|
||||
assert(this_oop->is_oop_or_null(true),
|
||||
assert(obj->is_oop_or_null(true),
|
||||
"expected an oop or NULL");
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
// Check if oop points into the CMS generation
|
||||
// and is not marked
|
||||
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
|
||||
|
@ -7527,14 +7537,17 @@ void Par_PushAndMarkClosure::do_oop(oop* p) {
|
|||
simulate_overflow = true;
|
||||
}
|
||||
)
|
||||
if (simulate_overflow || !_work_queue->push(this_oop)) {
|
||||
_collector->par_push_on_overflow_list(this_oop);
|
||||
if (simulate_overflow || !_work_queue->push(obj)) {
|
||||
_collector->par_push_on_overflow_list(obj);
|
||||
_collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
|
||||
}
|
||||
} // Else, some other thread got there first
|
||||
}
|
||||
}
|
||||
|
||||
void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
|
||||
void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
|
||||
|
||||
void PushAndMarkClosure::remember_klass(Klass* k) {
|
||||
if (!_revisit_stack->push(oop(k))) {
|
||||
fatal("Revisit stack overflowed in PushAndMarkClosure");
|
||||
|
@ -8228,9 +8241,8 @@ bool CMSIsAliveClosure::do_object_b(oop obj) {
|
|||
}
|
||||
|
||||
// CMSKeepAliveClosure: the serial version
|
||||
void CMSKeepAliveClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
void CMSKeepAliveClosure::do_oop(oop obj) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr) &&
|
||||
!_bit_map->isMarked(addr)) {
|
||||
_bit_map->mark(addr);
|
||||
|
@ -8242,26 +8254,28 @@ void CMSKeepAliveClosure::do_oop(oop* p) {
|
|||
simulate_overflow = true;
|
||||
}
|
||||
)
|
||||
if (simulate_overflow || !_mark_stack->push(this_oop)) {
|
||||
_collector->push_on_overflow_list(this_oop);
|
||||
if (simulate_overflow || !_mark_stack->push(obj)) {
|
||||
_collector->push_on_overflow_list(obj);
|
||||
_collector->_ser_kac_ovflw++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
|
||||
void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
|
||||
|
||||
// CMSParKeepAliveClosure: a parallel version of the above.
|
||||
// The work queues are private to each closure (thread),
|
||||
// but (may be) available for stealing by other threads.
|
||||
void CMSParKeepAliveClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
void CMSParKeepAliveClosure::do_oop(oop obj) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr) &&
|
||||
!_bit_map->isMarked(addr)) {
|
||||
// In general, during recursive tracing, several threads
|
||||
// may be concurrently getting here; the first one to
|
||||
// "tag" it, claims it.
|
||||
if (_bit_map->par_mark(addr)) {
|
||||
bool res = _work_queue->push(this_oop);
|
||||
bool res = _work_queue->push(obj);
|
||||
assert(res, "Low water mark should be much less than capacity");
|
||||
// Do a recursive trim in the hope that this will keep
|
||||
// stack usage lower, but leave some oops for potential stealers
|
||||
|
@ -8270,6 +8284,9 @@ void CMSParKeepAliveClosure::do_oop(oop* p) {
|
|||
}
|
||||
}
|
||||
|
||||
void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
|
||||
void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
|
||||
|
||||
void CMSParKeepAliveClosure::trim_queue(uint max) {
|
||||
while (_work_queue->size() > max) {
|
||||
oop new_oop;
|
||||
|
@ -8285,9 +8302,8 @@ void CMSParKeepAliveClosure::trim_queue(uint max) {
|
|||
}
|
||||
}
|
||||
|
||||
void CMSInnerParMarkAndPushClosure::do_oop(oop* p) {
|
||||
oop this_oop = *p;
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr) &&
|
||||
!_bit_map->isMarked(addr)) {
|
||||
if (_bit_map->par_mark(addr)) {
|
||||
|
@ -8299,14 +8315,17 @@ void CMSInnerParMarkAndPushClosure::do_oop(oop* p) {
|
|||
simulate_overflow = true;
|
||||
}
|
||||
)
|
||||
if (simulate_overflow || !_work_queue->push(this_oop)) {
|
||||
_collector->par_push_on_overflow_list(this_oop);
|
||||
if (simulate_overflow || !_work_queue->push(obj)) {
|
||||
_collector->par_push_on_overflow_list(obj);
|
||||
_collector->_par_kac_ovflw++;
|
||||
}
|
||||
} // Else another thread got there already
|
||||
}
|
||||
}
|
||||
|
||||
void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
|
||||
void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
|
||||
|
||||
//////////////////////////////////////////////////////////////////
|
||||
// CMSExpansionCause /////////////////////////////
|
||||
//////////////////////////////////////////////////////////////////
|
||||
|
@ -8337,12 +8356,12 @@ void CMSDrainMarkingStackClosure::do_void() {
|
|||
while (!_mark_stack->isEmpty() ||
|
||||
// if stack is empty, check the overflow list
|
||||
_collector->take_from_overflow_list(num, _mark_stack)) {
|
||||
oop this_oop = _mark_stack->pop();
|
||||
HeapWord* addr = (HeapWord*)this_oop;
|
||||
oop obj = _mark_stack->pop();
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
assert(_span.contains(addr), "Should be within span");
|
||||
assert(_bit_map->isMarked(addr), "Should be marked");
|
||||
assert(this_oop->is_oop(), "Should be an oop");
|
||||
this_oop->oop_iterate(_keep_alive);
|
||||
assert(obj->is_oop(), "Should be an oop");
|
||||
obj->oop_iterate(_keep_alive);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1138,7 +1138,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
|||
// Allocation support
|
||||
HeapWord* allocate(size_t size, bool tlab);
|
||||
HeapWord* have_lock_and_allocate(size_t size, bool tlab);
|
||||
oop promote(oop obj, size_t obj_size, oop* ref);
|
||||
oop promote(oop obj, size_t obj_size);
|
||||
HeapWord* par_allocate(size_t size, bool tlab) {
|
||||
return allocate(size, tlab);
|
||||
}
|
||||
|
@ -1301,9 +1301,8 @@ class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
|
|||
// This closure is used to check that a certain set of oops is empty.
|
||||
class FalseClosure: public OopClosure {
|
||||
public:
|
||||
void do_oop(oop* p) {
|
||||
guarantee(false, "Should be an empty set");
|
||||
}
|
||||
void do_oop(oop* p) { guarantee(false, "Should be an empty set"); }
|
||||
void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); }
|
||||
};
|
||||
|
||||
// This closure is used to do concurrent marking from the roots
|
||||
|
@ -1380,6 +1379,12 @@ class PushAndMarkVerifyClosure: public OopClosure {
|
|||
CMSBitMap* _verification_bm;
|
||||
CMSBitMap* _cms_bm;
|
||||
CMSMarkStack* _mark_stack;
|
||||
protected:
|
||||
void do_oop(oop p);
|
||||
template <class T> inline void do_oop_work(T *p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
do_oop(obj);
|
||||
}
|
||||
public:
|
||||
PushAndMarkVerifyClosure(CMSCollector* cms_collector,
|
||||
MemRegion span,
|
||||
|
@ -1387,6 +1392,7 @@ class PushAndMarkVerifyClosure: public OopClosure {
|
|||
CMSBitMap* cms_bm,
|
||||
CMSMarkStack* mark_stack);
|
||||
void do_oop(oop* p);
|
||||
void do_oop(narrowOop* p);
|
||||
// Deal with a stack overflow condition
|
||||
void handle_stack_overflow(HeapWord* lost);
|
||||
};
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue