6420645: Create a vm that uses compressed oops for up to 32gb heapsizes

Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv

Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
This commit is contained in:
Coleen Phillimore 2008-04-13 17:43:42 -04:00
parent 680ecf1611
commit 4a831d45f0
273 changed files with 6585 additions and 2993 deletions

View file

@ -29,22 +29,34 @@ class ConcurrentMarkSweepGeneration;
class CMSBitMap;
class CMSMarkStack;
class CMSCollector;
template<class E> class GenericTaskQueue;
typedef GenericTaskQueue<oop> OopTaskQueue;
template<class E> class GenericTaskQueueSet;
typedef GenericTaskQueueSet<oop> OopTaskQueueSet;
class MarkFromRootsClosure;
class Par_MarkFromRootsClosure;
// Decode the oop and call do_oop on it.
#define DO_OOP_WORK_DEFN \
void do_oop(oop obj); \
template <class T> inline void do_oop_work(T* p) { \
T heap_oop = oopDesc::load_heap_oop(p); \
if (!oopDesc::is_null(heap_oop)) { \
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
do_oop(obj); \
} \
}
class MarkRefsIntoClosure: public OopsInGenClosure {
const MemRegion _span;
CMSBitMap* _bitMap;
const bool _should_do_nmethods;
private:
const MemRegion _span;
CMSBitMap* _bitMap;
const bool _should_do_nmethods;
protected:
DO_OOP_WORK_DEFN
public:
MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap,
bool should_do_nmethods);
void do_oop(oop* p);
void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop(p); }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
bool do_header() { return true; }
virtual const bool do_nmethods() const {
return _should_do_nmethods;
@ -57,15 +69,20 @@ class MarkRefsIntoClosure: public OopsInGenClosure {
// A variant of the above used in certain kinds of CMS
// marking verification.
class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
const MemRegion _span;
CMSBitMap* _verification_bm;
CMSBitMap* _cms_bm;
const bool _should_do_nmethods;
private:
const MemRegion _span;
CMSBitMap* _verification_bm;
CMSBitMap* _cms_bm;
const bool _should_do_nmethods;
protected:
DO_OOP_WORK_DEFN
public:
MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
CMSBitMap* cms_bm, bool should_do_nmethods);
void do_oop(oop* p);
void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop(p); }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
bool do_header() { return true; }
virtual const bool do_nmethods() const {
return _should_do_nmethods;
@ -75,37 +92,40 @@ class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
}
};
// The non-parallel version (the parallel version appears further below).
class PushAndMarkClosure: public OopClosure {
CMSCollector* _collector;
MemRegion _span;
CMSBitMap* _bit_map;
CMSBitMap* _mod_union_table;
CMSMarkStack* _mark_stack;
CMSMarkStack* _revisit_stack;
bool _concurrent_precleaning;
bool const _should_remember_klasses;
private:
CMSCollector* _collector;
MemRegion _span;
CMSBitMap* _bit_map;
CMSBitMap* _mod_union_table;
CMSMarkStack* _mark_stack;
CMSMarkStack* _revisit_stack;
bool _concurrent_precleaning;
bool const _should_remember_klasses;
protected:
DO_OOP_WORK_DEFN
public:
PushAndMarkClosure(CMSCollector* collector,
MemRegion span,
ReferenceProcessor* rp,
CMSBitMap* bit_map,
CMSBitMap* mod_union_table,
CMSMarkStack* mark_stack,
CMSMarkStack* revisit_stack,
bool concurrent_precleaning);
void do_oop(oop* p);
void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop(p); }
CMSMarkStack* mark_stack,
CMSMarkStack* revisit_stack,
bool concurrent_precleaning);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
bool do_header() { return true; }
Prefetch::style prefetch_style() {
return Prefetch::do_read;
}
const bool should_remember_klasses() const {
virtual const bool should_remember_klasses() const {
return _should_remember_klasses;
}
void remember_klass(Klass* k);
virtual void remember_klass(Klass* k);
};
// In the parallel case, the revisit stack, the bit map and the
@ -115,12 +135,15 @@ class PushAndMarkClosure: public OopClosure {
// used in the non-parallel case above is here replaced with
// an OopTaskQueue structure to allow efficient work stealing.
class Par_PushAndMarkClosure: public OopClosure {
CMSCollector* _collector;
MemRegion _span;
CMSBitMap* _bit_map;
OopTaskQueue* _work_queue;
CMSMarkStack* _revisit_stack;
bool const _should_remember_klasses;
private:
CMSCollector* _collector;
MemRegion _span;
CMSBitMap* _bit_map;
OopTaskQueue* _work_queue;
CMSMarkStack* _revisit_stack;
bool const _should_remember_klasses;
protected:
DO_OOP_WORK_DEFN
public:
Par_PushAndMarkClosure(CMSCollector* collector,
MemRegion span,
@ -128,43 +151,48 @@ class Par_PushAndMarkClosure: public OopClosure {
CMSBitMap* bit_map,
OopTaskQueue* work_queue,
CMSMarkStack* revisit_stack);
void do_oop(oop* p);
void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop(p); }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
bool do_header() { return true; }
Prefetch::style prefetch_style() {
return Prefetch::do_read;
}
const bool should_remember_klasses() const {
virtual const bool should_remember_klasses() const {
return _should_remember_klasses;
}
void remember_klass(Klass* k);
virtual void remember_klass(Klass* k);
};
// The non-parallel version (the parallel version appears further below).
class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
MemRegion _span;
CMSBitMap* _bit_map;
CMSMarkStack* _mark_stack;
PushAndMarkClosure _pushAndMarkClosure;
CMSCollector* _collector;
bool _yield;
private:
MemRegion _span;
CMSBitMap* _bit_map;
CMSMarkStack* _mark_stack;
PushAndMarkClosure _pushAndMarkClosure;
CMSCollector* _collector;
Mutex* _freelistLock;
bool _yield;
// Whether closure is being used for concurrent precleaning
bool _concurrent_precleaning;
Mutex* _freelistLock;
bool _concurrent_precleaning;
protected:
DO_OOP_WORK_DEFN
public:
MarkRefsIntoAndScanClosure(MemRegion span,
ReferenceProcessor* rp,
CMSBitMap* bit_map,
CMSBitMap* mod_union_table,
CMSMarkStack* mark_stack,
CMSMarkStack* revisit_stack,
CMSMarkStack* mark_stack,
CMSMarkStack* revisit_stack,
CMSCollector* collector,
bool should_yield,
bool concurrent_precleaning);
void do_oop(oop* p);
void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop(p); }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
bool do_header() { return true; }
virtual const bool do_nmethods() const { return true; }
Prefetch::style prefetch_style() {
@ -185,11 +213,14 @@ class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
// sycnhronized. An OopTaskQueue structure, supporting efficient
// workstealing, replaces a CMSMarkStack for storing grey objects.
class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
MemRegion _span;
CMSBitMap* _bit_map;
OopTaskQueue* _work_queue;
const uint _low_water_mark;
Par_PushAndMarkClosure _par_pushAndMarkClosure;
private:
MemRegion _span;
CMSBitMap* _bit_map;
OopTaskQueue* _work_queue;
const uint _low_water_mark;
Par_PushAndMarkClosure _par_pushAndMarkClosure;
protected:
DO_OOP_WORK_DEFN
public:
Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
MemRegion span,
@ -197,8 +228,10 @@ class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
CMSBitMap* bit_map,
OopTaskQueue* work_queue,
CMSMarkStack* revisit_stack);
void do_oop(oop* p);
void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop(p); }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
bool do_header() { return true; }
virtual const bool do_nmethods() const { return true; }
Prefetch::style prefetch_style() {
@ -211,28 +244,34 @@ class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
// following the first checkpoint. Its use is buried in
// the closure MarkFromRootsClosure.
class PushOrMarkClosure: public OopClosure {
CMSCollector* _collector;
MemRegion _span;
CMSBitMap* _bitMap;
CMSMarkStack* _markStack;
CMSMarkStack* _revisitStack;
HeapWord* const _finger;
MarkFromRootsClosure* const _parent;
bool const _should_remember_klasses;
private:
CMSCollector* _collector;
MemRegion _span;
CMSBitMap* _bitMap;
CMSMarkStack* _markStack;
CMSMarkStack* _revisitStack;
HeapWord* const _finger;
MarkFromRootsClosure* const
_parent;
bool const _should_remember_klasses;
protected:
DO_OOP_WORK_DEFN
public:
PushOrMarkClosure(CMSCollector* cms_collector,
MemRegion span,
CMSBitMap* bitMap,
CMSMarkStack* markStack,
CMSMarkStack* revisitStack,
HeapWord* finger,
CMSMarkStack* markStack,
CMSMarkStack* revisitStack,
HeapWord* finger,
MarkFromRootsClosure* parent);
void do_oop(oop* p);
void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop(p); }
const bool should_remember_klasses() const {
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
virtual const bool should_remember_klasses() const {
return _should_remember_klasses;
}
void remember_klass(Klass* k);
virtual void remember_klass(Klass* k);
// Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost);
private:
@ -244,6 +283,7 @@ class PushOrMarkClosure: public OopClosure {
// following the first checkpoint. Its use is buried in
// the closure Par_MarkFromRootsClosure.
class Par_PushOrMarkClosure: public OopClosure {
private:
CMSCollector* _collector;
MemRegion _whole_span;
MemRegion _span; // local chunk
@ -253,24 +293,29 @@ class Par_PushOrMarkClosure: public OopClosure {
CMSMarkStack* _revisit_stack;
HeapWord* const _finger;
HeapWord** const _global_finger_addr;
Par_MarkFromRootsClosure* const _parent;
bool const _should_remember_klasses;
Par_MarkFromRootsClosure* const
_parent;
bool const _should_remember_klasses;
protected:
DO_OOP_WORK_DEFN
public:
Par_PushOrMarkClosure(CMSCollector* cms_collector,
MemRegion span,
CMSBitMap* bit_map,
OopTaskQueue* work_queue,
CMSMarkStack* mark_stack,
CMSMarkStack* revisit_stack,
HeapWord* finger,
HeapWord** global_finger_addr,
Par_MarkFromRootsClosure* parent);
void do_oop(oop* p);
void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop(p); }
const bool should_remember_klasses() const {
MemRegion span,
CMSBitMap* bit_map,
OopTaskQueue* work_queue,
CMSMarkStack* mark_stack,
CMSMarkStack* revisit_stack,
HeapWord* finger,
HeapWord** global_finger_addr,
Par_MarkFromRootsClosure* parent);
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
virtual const bool should_remember_klasses() const {
return _should_remember_klasses;
}
void remember_klass(Klass* k);
virtual void remember_klass(Klass* k);
// Deal with a stack overflow condition
void handle_stack_overflow(HeapWord* lost);
private:
@ -282,10 +327,13 @@ class Par_PushOrMarkClosure: public OopClosure {
// This is currently used during the (weak) reference object
// processing phase of the CMS final checkpoint step.
class CMSKeepAliveClosure: public OopClosure {
private:
CMSCollector* _collector;
MemRegion _span;
CMSMarkStack* _mark_stack;
CMSBitMap* _bit_map;
protected:
DO_OOP_WORK_DEFN
public:
CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
CMSBitMap* bit_map, CMSMarkStack* mark_stack):
@ -293,16 +341,20 @@ class CMSKeepAliveClosure: public OopClosure {
_span(span),
_bit_map(bit_map),
_mark_stack(mark_stack) { }
void do_oop(oop* p);
void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop(p); }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
};
class CMSInnerParMarkAndPushClosure: public OopClosure {
private:
CMSCollector* _collector;
MemRegion _span;
OopTaskQueue* _work_queue;
CMSBitMap* _bit_map;
protected:
DO_OOP_WORK_DEFN
public:
CMSInnerParMarkAndPushClosure(CMSCollector* collector,
MemRegion span, CMSBitMap* bit_map,
@ -311,24 +363,32 @@ class CMSInnerParMarkAndPushClosure: public OopClosure {
_span(span),
_bit_map(bit_map),
_work_queue(work_queue) { }
void do_oop(oop* p);
void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop(p); }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
};
// A parallel (MT) version of the above, used when
// reference processing is parallel; the only difference
// is in the do_oop method.
class CMSParKeepAliveClosure: public OopClosure {
private:
CMSCollector* _collector;
MemRegion _span;
OopTaskQueue* _work_queue;
CMSBitMap* _bit_map;
CMSInnerParMarkAndPushClosure _mark_and_push;
CMSInnerParMarkAndPushClosure
_mark_and_push;
const uint _low_water_mark;
void trim_queue(uint max);
protected:
DO_OOP_WORK_DEFN
public:
CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
CMSBitMap* bit_map, OopTaskQueue* work_queue);
void do_oop(oop* p);
void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop(p); }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
};