mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-23 20:44:41 +02:00
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
This commit is contained in:
parent
680ecf1611
commit
4a831d45f0
273 changed files with 6585 additions and 2993 deletions
|
@ -28,17 +28,16 @@
|
|||
// Checks an individual oop for missing precise marks. Mark
|
||||
// may be either dirty or newgen.
|
||||
class CheckForUnmarkedOops : public OopClosure {
|
||||
PSYoungGen* _young_gen;
|
||||
private:
|
||||
PSYoungGen* _young_gen;
|
||||
CardTableExtension* _card_table;
|
||||
HeapWord* _unmarked_addr;
|
||||
jbyte* _unmarked_card;
|
||||
HeapWord* _unmarked_addr;
|
||||
jbyte* _unmarked_card;
|
||||
|
||||
public:
|
||||
CheckForUnmarkedOops( PSYoungGen* young_gen, CardTableExtension* card_table ) :
|
||||
_young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
|
||||
|
||||
virtual void do_oop(oop* p) {
|
||||
if (_young_gen->is_in_reserved(*p) &&
|
||||
protected:
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
if (_young_gen->is_in_reserved(obj) &&
|
||||
!_card_table->addr_is_marked_imprecise(p)) {
|
||||
// Don't overwrite the first missing card mark
|
||||
if (_unmarked_addr == NULL) {
|
||||
|
@ -48,6 +47,13 @@ class CheckForUnmarkedOops : public OopClosure {
|
|||
}
|
||||
}
|
||||
|
||||
public:
|
||||
CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) :
|
||||
_young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
|
||||
|
||||
virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }
|
||||
|
||||
bool has_unmarked_oop() {
|
||||
return _unmarked_addr != NULL;
|
||||
}
|
||||
|
@ -56,7 +62,8 @@ class CheckForUnmarkedOops : public OopClosure {
|
|||
// Checks all objects for the existance of some type of mark,
|
||||
// precise or imprecise, dirty or newgen.
|
||||
class CheckForUnmarkedObjects : public ObjectClosure {
|
||||
PSYoungGen* _young_gen;
|
||||
private:
|
||||
PSYoungGen* _young_gen;
|
||||
CardTableExtension* _card_table;
|
||||
|
||||
public:
|
||||
|
@ -75,7 +82,7 @@ class CheckForUnmarkedObjects : public ObjectClosure {
|
|||
// we test for missing precise marks first. If any are found, we don't
|
||||
// fail unless the object head is also unmarked.
|
||||
virtual void do_object(oop obj) {
|
||||
CheckForUnmarkedOops object_check( _young_gen, _card_table );
|
||||
CheckForUnmarkedOops object_check(_young_gen, _card_table);
|
||||
obj->oop_iterate(&object_check);
|
||||
if (object_check.has_unmarked_oop()) {
|
||||
assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
|
||||
|
@ -85,19 +92,25 @@ class CheckForUnmarkedObjects : public ObjectClosure {
|
|||
|
||||
// Checks for precise marking of oops as newgen.
|
||||
class CheckForPreciseMarks : public OopClosure {
|
||||
PSYoungGen* _young_gen;
|
||||
private:
|
||||
PSYoungGen* _young_gen;
|
||||
CardTableExtension* _card_table;
|
||||
|
||||
protected:
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
if (_young_gen->is_in_reserved(obj)) {
|
||||
assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
|
||||
_card_table->set_card_newgen(p);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) :
|
||||
_young_gen(young_gen), _card_table(card_table) { }
|
||||
|
||||
virtual void do_oop(oop* p) {
|
||||
if (_young_gen->is_in_reserved(*p)) {
|
||||
assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
|
||||
_card_table->set_card_newgen(p);
|
||||
}
|
||||
}
|
||||
virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); }
|
||||
};
|
||||
|
||||
// We get passed the space_top value to prevent us from traversing into
|
||||
|
|
|
@ -80,7 +80,7 @@ class CardTableExtension : public CardTableModRefBS {
|
|||
static bool card_is_verify(int value) { return value == verify_card; }
|
||||
|
||||
// Card marking
|
||||
void inline_write_ref_field_gc(oop* field, oop new_val) {
|
||||
void inline_write_ref_field_gc(void* field, oop new_val) {
|
||||
jbyte* byte = byte_for(field);
|
||||
*byte = youngergen_card;
|
||||
}
|
||||
|
|
|
@ -146,7 +146,7 @@ void RefProcTaskExecutor::execute(ProcessTask& task)
|
|||
{
|
||||
ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
|
||||
uint parallel_gc_threads = heap->gc_task_manager()->workers();
|
||||
TaskQueueSetSuper* qset = ParCompactionManager::chunk_array();
|
||||
ChunkTaskQueueSet* qset = ParCompactionManager::chunk_array();
|
||||
ParallelTaskTerminator terminator(parallel_gc_threads, qset);
|
||||
GCTaskQueue* q = GCTaskQueue::create();
|
||||
for(uint i=0; i<parallel_gc_threads; i++) {
|
||||
|
|
|
@ -33,8 +33,8 @@ const int PREFETCH_QUEUE_SIZE = 8;
|
|||
|
||||
class PrefetchQueue : public CHeapObj {
|
||||
private:
|
||||
oop* _prefetch_queue[PREFETCH_QUEUE_SIZE];
|
||||
unsigned int _prefetch_index;
|
||||
void* _prefetch_queue[PREFETCH_QUEUE_SIZE];
|
||||
uint _prefetch_index;
|
||||
|
||||
public:
|
||||
int length() { return PREFETCH_QUEUE_SIZE; }
|
||||
|
@ -46,20 +46,21 @@ class PrefetchQueue : public CHeapObj {
|
|||
_prefetch_index = 0;
|
||||
}
|
||||
|
||||
inline oop* push_and_pop(oop* p) {
|
||||
Prefetch::write((*p)->mark_addr(), 0);
|
||||
template <class T> inline void* push_and_pop(T* p) {
|
||||
oop o = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
Prefetch::write(o->mark_addr(), 0);
|
||||
// This prefetch is intended to make sure the size field of array
|
||||
// oops is in cache. It assumes the the object layout is
|
||||
// mark -> klass -> size, and that mark and klass are heapword
|
||||
// sized. If this should change, this prefetch will need updating!
|
||||
Prefetch::write((*p)->mark_addr() + (HeapWordSize*2), 0);
|
||||
Prefetch::write(o->mark_addr() + (HeapWordSize*2), 0);
|
||||
_prefetch_queue[_prefetch_index++] = p;
|
||||
_prefetch_index &= (PREFETCH_QUEUE_SIZE-1);
|
||||
return _prefetch_queue[_prefetch_index];
|
||||
}
|
||||
|
||||
// Stores a NULL pointer in the pop'd location.
|
||||
inline oop* pop() {
|
||||
inline void* pop() {
|
||||
_prefetch_queue[_prefetch_index++] = NULL;
|
||||
_prefetch_index &= (PREFETCH_QUEUE_SIZE-1);
|
||||
return _prefetch_queue[_prefetch_index];
|
||||
|
|
|
@ -168,7 +168,7 @@ void PSMarkSweepDecorator::precompact() {
|
|||
start_array->allocate_block(compact_top);
|
||||
}
|
||||
|
||||
debug_only(MarkSweep::register_live_oop(oop(q), size));
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), size));
|
||||
compact_top += size;
|
||||
assert(compact_top <= dest->space()->end(),
|
||||
"Exceeding space in destination");
|
||||
|
@ -234,7 +234,7 @@ void PSMarkSweepDecorator::precompact() {
|
|||
start_array->allocate_block(compact_top);
|
||||
}
|
||||
|
||||
debug_only(MarkSweep::register_live_oop(oop(q), sz));
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), sz));
|
||||
compact_top += sz;
|
||||
assert(compact_top <= dest->space()->end(),
|
||||
"Exceeding space in destination");
|
||||
|
@ -326,15 +326,11 @@ void PSMarkSweepDecorator::adjust_pointers() {
|
|||
HeapWord* end = _first_dead;
|
||||
|
||||
while (q < end) {
|
||||
debug_only(MarkSweep::track_interior_pointers(oop(q)));
|
||||
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
|
||||
// point all the oops to the new location
|
||||
size_t size = oop(q)->adjust_pointers();
|
||||
|
||||
debug_only(MarkSweep::check_interior_pointers());
|
||||
|
||||
debug_only(MarkSweep::validate_live_oop(oop(q), size));
|
||||
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
|
||||
q += size;
|
||||
}
|
||||
|
||||
|
@ -354,11 +350,11 @@ void PSMarkSweepDecorator::adjust_pointers() {
|
|||
Prefetch::write(q, interval);
|
||||
if (oop(q)->is_gc_marked()) {
|
||||
// q is alive
|
||||
debug_only(MarkSweep::track_interior_pointers(oop(q)));
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
|
||||
// point all the oops to the new location
|
||||
size_t size = oop(q)->adjust_pointers();
|
||||
debug_only(MarkSweep::check_interior_pointers());
|
||||
debug_only(MarkSweep::validate_live_oop(oop(q), size));
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
|
||||
debug_only(prev_q = q);
|
||||
q += size;
|
||||
} else {
|
||||
|
@ -392,7 +388,7 @@ void PSMarkSweepDecorator::compact(bool mangle_free_space ) {
|
|||
while (q < end) {
|
||||
size_t size = oop(q)->size();
|
||||
assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
|
||||
debug_only(MarkSweep::live_oop_moved_to(q, size, q));
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q));
|
||||
debug_only(prev_q = q);
|
||||
q += size;
|
||||
}
|
||||
|
@ -427,7 +423,7 @@ void PSMarkSweepDecorator::compact(bool mangle_free_space ) {
|
|||
Prefetch::write(compaction_top, copy_interval);
|
||||
|
||||
// copy object and reinit its mark
|
||||
debug_only(MarkSweep::live_oop_moved_to(q, size, compaction_top));
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, compaction_top));
|
||||
assert(q != compaction_top, "everything in this pass should be moving");
|
||||
Copy::aligned_conjoint_words(q, compaction_top, size);
|
||||
oop(compaction_top)->init_mark();
|
||||
|
|
|
@ -81,14 +81,14 @@ bool PSParallelCompact::_dwl_initialized = false;
|
|||
#endif // #ifdef ASSERT
|
||||
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
GrowableArray<oop*>* PSParallelCompact::_root_refs_stack = NULL;
|
||||
GrowableArray<void*>* PSParallelCompact::_root_refs_stack = NULL;
|
||||
GrowableArray<oop> * PSParallelCompact::_live_oops = NULL;
|
||||
GrowableArray<oop> * PSParallelCompact::_live_oops_moved_to = NULL;
|
||||
GrowableArray<size_t>* PSParallelCompact::_live_oops_size = NULL;
|
||||
size_t PSParallelCompact::_live_oops_index = 0;
|
||||
size_t PSParallelCompact::_live_oops_index_at_perm = 0;
|
||||
GrowableArray<oop*>* PSParallelCompact::_other_refs_stack = NULL;
|
||||
GrowableArray<oop*>* PSParallelCompact::_adjusted_pointers = NULL;
|
||||
GrowableArray<void*>* PSParallelCompact::_other_refs_stack = NULL;
|
||||
GrowableArray<void*>* PSParallelCompact::_adjusted_pointers = NULL;
|
||||
bool PSParallelCompact::_pointer_tracking = false;
|
||||
bool PSParallelCompact::_root_tracking = true;
|
||||
|
||||
|
@ -811,46 +811,23 @@ ParMarkBitMap PSParallelCompact::_mark_bitmap;
|
|||
ParallelCompactData PSParallelCompact::_summary_data;
|
||||
|
||||
PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
|
||||
|
||||
void PSParallelCompact::IsAliveClosure::do_object(oop p) { ShouldNotReachHere(); }
|
||||
bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
|
||||
|
||||
void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
|
||||
void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
|
||||
|
||||
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
|
||||
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
|
||||
|
||||
void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) {
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
if (ValidateMarkSweep) {
|
||||
if (!Universe::heap()->is_in_reserved(p)) {
|
||||
_root_refs_stack->push(p);
|
||||
} else {
|
||||
_other_refs_stack->push(p);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
mark_and_push(_compaction_manager, p);
|
||||
}
|
||||
void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
|
||||
void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
|
||||
|
||||
void PSParallelCompact::mark_and_follow(ParCompactionManager* cm,
|
||||
oop* p) {
|
||||
assert(Universe::heap()->is_in_reserved(p),
|
||||
"we should only be traversing objects here");
|
||||
oop m = *p;
|
||||
if (m != NULL && mark_bitmap()->is_unmarked(m)) {
|
||||
if (mark_obj(m)) {
|
||||
m->follow_contents(cm); // Follow contents of the marked object
|
||||
}
|
||||
}
|
||||
}
|
||||
void PSParallelCompact::FollowStackClosure::do_void() { follow_stack(_compaction_manager); }
|
||||
|
||||
// Anything associated with this variable is temporary.
|
||||
|
||||
void PSParallelCompact::mark_and_push_internal(ParCompactionManager* cm,
|
||||
oop* p) {
|
||||
// Push marked object, contents will be followed later
|
||||
oop m = *p;
|
||||
if (mark_obj(m)) {
|
||||
// This thread marked the object and
|
||||
// owns the subsequent processing of it.
|
||||
cm->save_for_scanning(m);
|
||||
}
|
||||
}
|
||||
void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
|
||||
void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
|
||||
|
||||
void PSParallelCompact::post_initialize() {
|
||||
ParallelScavengeHeap* heap = gc_heap();
|
||||
|
@ -2751,23 +2728,6 @@ void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
|
|||
young_gen->move_and_update(cm);
|
||||
}
|
||||
|
||||
void PSParallelCompact::follow_root(ParCompactionManager* cm, oop* p) {
|
||||
assert(!Universe::heap()->is_in_reserved(p),
|
||||
"roots shouldn't be things within the heap");
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
if (ValidateMarkSweep) {
|
||||
guarantee(!_root_refs_stack->contains(p), "should only be in here once");
|
||||
_root_refs_stack->push(p);
|
||||
}
|
||||
#endif
|
||||
oop m = *p;
|
||||
if (m != NULL && mark_bitmap()->is_unmarked(m)) {
|
||||
if (mark_obj(m)) {
|
||||
m->follow_contents(cm); // Follow contents of the marked object
|
||||
}
|
||||
}
|
||||
follow_stack(cm);
|
||||
}
|
||||
|
||||
void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
|
||||
while(!cm->overflow_stack()->is_empty()) {
|
||||
|
@ -2807,7 +2767,7 @@ PSParallelCompact::revisit_weak_klass_link(ParCompactionManager* cm, Klass* k) {
|
|||
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
|
||||
void PSParallelCompact::track_adjusted_pointer(oop* p, oop newobj, bool isroot) {
|
||||
void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) {
|
||||
if (!ValidateMarkSweep)
|
||||
return;
|
||||
|
||||
|
@ -2821,7 +2781,7 @@ void PSParallelCompact::track_adjusted_pointer(oop* p, oop newobj, bool isroot)
|
|||
if (index != -1) {
|
||||
int l = _root_refs_stack->length();
|
||||
if (l > 0 && l - 1 != index) {
|
||||
oop* last = _root_refs_stack->pop();
|
||||
void* last = _root_refs_stack->pop();
|
||||
assert(last != p, "should be different");
|
||||
_root_refs_stack->at_put(index, last);
|
||||
} else {
|
||||
|
@ -2832,7 +2792,7 @@ void PSParallelCompact::track_adjusted_pointer(oop* p, oop newobj, bool isroot)
|
|||
}
|
||||
|
||||
|
||||
void PSParallelCompact::check_adjust_pointer(oop* p) {
|
||||
void PSParallelCompact::check_adjust_pointer(void* p) {
|
||||
_adjusted_pointers->push(p);
|
||||
}
|
||||
|
||||
|
@ -2840,7 +2800,8 @@ void PSParallelCompact::check_adjust_pointer(oop* p) {
|
|||
class AdjusterTracker: public OopClosure {
|
||||
public:
|
||||
AdjusterTracker() {};
|
||||
void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); }
|
||||
void do_oop(oop* o) { PSParallelCompact::check_adjust_pointer(o); }
|
||||
void do_oop(narrowOop* o) { PSParallelCompact::check_adjust_pointer(o); }
|
||||
};
|
||||
|
||||
|
||||
|
@ -2948,25 +2909,6 @@ void PSParallelCompact::print_new_location_of_heap_address(HeapWord* q) {
|
|||
}
|
||||
#endif //VALIDATE_MARK_SWEEP
|
||||
|
||||
void PSParallelCompact::adjust_pointer(oop* p, bool isroot) {
|
||||
oop obj = *p;
|
||||
VALIDATE_MARK_SWEEP_ONLY(oop saved_new_pointer = NULL);
|
||||
if (obj != NULL) {
|
||||
oop new_pointer = (oop) summary_data().calc_new_pointer(obj);
|
||||
assert(new_pointer != NULL || // is forwarding ptr?
|
||||
obj->is_shared(), // never forwarded?
|
||||
"should have a new location");
|
||||
// Just always do the update unconditionally?
|
||||
if (new_pointer != NULL) {
|
||||
*p = new_pointer;
|
||||
assert(Universe::heap()->is_in_reserved(new_pointer),
|
||||
"should be in object space");
|
||||
VALIDATE_MARK_SWEEP_ONLY(saved_new_pointer = new_pointer);
|
||||
}
|
||||
}
|
||||
VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, saved_new_pointer, isroot));
|
||||
}
|
||||
|
||||
// Update interior oops in the ranges of chunks [beg_chunk, end_chunk).
|
||||
void
|
||||
PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
|
||||
|
|
|
@ -80,11 +80,11 @@ public:
|
|||
static const size_t ChunkSize;
|
||||
static const size_t ChunkSizeBytes;
|
||||
|
||||
// Mask for the bits in a size_t to get an offset within a chunk.
|
||||
// Mask for the bits in a size_t to get an offset within a chunk.
|
||||
static const size_t ChunkSizeOffsetMask;
|
||||
// Mask for the bits in a pointer to get an offset within a chunk.
|
||||
// Mask for the bits in a pointer to get an offset within a chunk.
|
||||
static const size_t ChunkAddrOffsetMask;
|
||||
// Mask for the bits in a pointer to get the address of the start of a chunk.
|
||||
// Mask for the bits in a pointer to get the address of the start of a chunk.
|
||||
static const size_t ChunkAddrMask;
|
||||
|
||||
static const size_t Log2BlockSize;
|
||||
|
@ -229,7 +229,7 @@ public:
|
|||
// 1 bit marks the end of an object.
|
||||
class BlockData
|
||||
{
|
||||
public:
|
||||
public:
|
||||
typedef short int blk_ofs_t;
|
||||
|
||||
blk_ofs_t offset() const { return _offset >= 0 ? _offset : -_offset; }
|
||||
|
@ -269,7 +269,7 @@ public:
|
|||
return !_first_is_start_bit;
|
||||
}
|
||||
|
||||
private:
|
||||
private:
|
||||
blk_ofs_t _offset;
|
||||
// This is temporary until the mark_bitmap is separated into
|
||||
// a start bit array and an end bit array.
|
||||
|
@ -277,7 +277,7 @@ public:
|
|||
#ifdef ASSERT
|
||||
short _set_phase;
|
||||
static short _cur_phase;
|
||||
public:
|
||||
public:
|
||||
static void set_cur_phase(short v) { _cur_phase = v; }
|
||||
#endif
|
||||
};
|
||||
|
@ -729,48 +729,51 @@ class PSParallelCompact : AllStatic {
|
|||
} SpaceId;
|
||||
|
||||
public:
|
||||
// In line closure decls
|
||||
// Inline closure decls
|
||||
//
|
||||
|
||||
class IsAliveClosure: public BoolObjectClosure {
|
||||
public:
|
||||
void do_object(oop p) { assert(false, "don't call"); }
|
||||
bool do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
|
||||
virtual void do_object(oop p);
|
||||
virtual bool do_object_b(oop p);
|
||||
};
|
||||
|
||||
class KeepAliveClosure: public OopClosure {
|
||||
private:
|
||||
ParCompactionManager* _compaction_manager;
|
||||
protected:
|
||||
template <class T> inline void do_oop_work(T* p);
|
||||
public:
|
||||
KeepAliveClosure(ParCompactionManager* cm) {
|
||||
_compaction_manager = cm;
|
||||
}
|
||||
void do_oop(oop* p);
|
||||
KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
class FollowRootClosure: public OopsInGenClosure{
|
||||
// Current unused
|
||||
class FollowRootClosure: public OopsInGenClosure {
|
||||
private:
|
||||
ParCompactionManager* _compaction_manager;
|
||||
public:
|
||||
FollowRootClosure(ParCompactionManager* cm) {
|
||||
_compaction_manager = cm;
|
||||
}
|
||||
void do_oop(oop* p) { follow_root(_compaction_manager, p); }
|
||||
FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
virtual const bool do_nmethods() const { return true; }
|
||||
};
|
||||
|
||||
class FollowStackClosure: public VoidClosure {
|
||||
private:
|
||||
ParCompactionManager* _compaction_manager;
|
||||
public:
|
||||
FollowStackClosure(ParCompactionManager* cm) {
|
||||
_compaction_manager = cm;
|
||||
}
|
||||
void do_void() { follow_stack(_compaction_manager); }
|
||||
FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
|
||||
virtual void do_void();
|
||||
};
|
||||
|
||||
class AdjustPointerClosure: public OopsInGenClosure {
|
||||
private:
|
||||
bool _is_root;
|
||||
public:
|
||||
AdjustPointerClosure(bool is_root) : _is_root(is_root) {}
|
||||
void do_oop(oop* p) { adjust_pointer(p, _is_root); }
|
||||
AdjustPointerClosure(bool is_root) : _is_root(is_root) { }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
// Closure for verifying update of pointers. Does not
|
||||
|
@ -805,8 +808,6 @@ class PSParallelCompact : AllStatic {
|
|||
friend class instanceKlassKlass;
|
||||
friend class RefProcTaskProxy;
|
||||
|
||||
static void mark_and_push_internal(ParCompactionManager* cm, oop* p);
|
||||
|
||||
private:
|
||||
static elapsedTimer _accumulated_time;
|
||||
static unsigned int _total_invocations;
|
||||
|
@ -838,9 +839,9 @@ class PSParallelCompact : AllStatic {
|
|||
|
||||
private:
|
||||
// Closure accessors
|
||||
static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
|
||||
static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
|
||||
static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; }
|
||||
static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
|
||||
static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
|
||||
|
||||
static void initialize_space_info();
|
||||
|
||||
|
@ -859,10 +860,11 @@ class PSParallelCompact : AllStatic {
|
|||
static void follow_stack(ParCompactionManager* cm);
|
||||
static void follow_weak_klass_links(ParCompactionManager* cm);
|
||||
|
||||
static void adjust_pointer(oop* p, bool is_root);
|
||||
template <class T> static inline void adjust_pointer(T* p, bool is_root);
|
||||
static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
|
||||
|
||||
static void follow_root(ParCompactionManager* cm, oop* p);
|
||||
template <class T>
|
||||
static inline void follow_root(ParCompactionManager* cm, T* p);
|
||||
|
||||
// Compute the dense prefix for the designated space. This is an experimental
|
||||
// implementation currently not used in production.
|
||||
|
@ -971,14 +973,14 @@ class PSParallelCompact : AllStatic {
|
|||
|
||||
protected:
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
static GrowableArray<oop*>* _root_refs_stack;
|
||||
static GrowableArray<void*>* _root_refs_stack;
|
||||
static GrowableArray<oop> * _live_oops;
|
||||
static GrowableArray<oop> * _live_oops_moved_to;
|
||||
static GrowableArray<size_t>* _live_oops_size;
|
||||
static size_t _live_oops_index;
|
||||
static size_t _live_oops_index_at_perm;
|
||||
static GrowableArray<oop*>* _other_refs_stack;
|
||||
static GrowableArray<oop*>* _adjusted_pointers;
|
||||
static GrowableArray<void*>* _other_refs_stack;
|
||||
static GrowableArray<void*>* _adjusted_pointers;
|
||||
static bool _pointer_tracking;
|
||||
static bool _root_tracking;
|
||||
|
||||
|
@ -999,12 +1001,12 @@ class PSParallelCompact : AllStatic {
|
|||
|
||||
public:
|
||||
class MarkAndPushClosure: public OopClosure {
|
||||
private:
|
||||
ParCompactionManager* _compaction_manager;
|
||||
public:
|
||||
MarkAndPushClosure(ParCompactionManager* cm) {
|
||||
_compaction_manager = cm;
|
||||
}
|
||||
void do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
|
||||
MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
virtual const bool do_nmethods() const { return true; }
|
||||
};
|
||||
|
||||
|
@ -1038,21 +1040,9 @@ class PSParallelCompact : AllStatic {
|
|||
|
||||
// Marking support
|
||||
static inline bool mark_obj(oop obj);
|
||||
static bool mark_obj(oop* p) {
|
||||
if (*p != NULL) {
|
||||
return mark_obj(*p);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
static void mark_and_push(ParCompactionManager* cm, oop* p) {
|
||||
// Check mark and maybe push on
|
||||
// marking stack
|
||||
oop m = *p;
|
||||
if (m != NULL && mark_bitmap()->is_unmarked(m)) {
|
||||
mark_and_push_internal(cm, p);
|
||||
}
|
||||
}
|
||||
// Check mark and maybe push on marking stack
|
||||
template <class T> static inline void mark_and_push(ParCompactionManager* cm,
|
||||
T* p);
|
||||
|
||||
// Compaction support.
|
||||
// Return true if p is in the range [beg_addr, end_addr).
|
||||
|
@ -1127,13 +1117,17 @@ class PSParallelCompact : AllStatic {
|
|||
static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
|
||||
|
||||
// Mark pointer and follow contents.
|
||||
static void mark_and_follow(ParCompactionManager* cm, oop* p);
|
||||
template <class T>
|
||||
static inline void mark_and_follow(ParCompactionManager* cm, T* p);
|
||||
|
||||
static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
|
||||
static ParallelCompactData& summary_data() { return _summary_data; }
|
||||
|
||||
static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); }
|
||||
static inline void adjust_pointer(oop* p,
|
||||
static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); }
|
||||
static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
|
||||
|
||||
template <class T>
|
||||
static inline void adjust_pointer(T* p,
|
||||
HeapWord* beg_addr,
|
||||
HeapWord* end_addr);
|
||||
|
||||
|
@ -1147,8 +1141,8 @@ class PSParallelCompact : AllStatic {
|
|||
static jlong millis_since_last_gc();
|
||||
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
static void track_adjusted_pointer(oop* p, oop newobj, bool isroot);
|
||||
static void check_adjust_pointer(oop* p); // Adjust this pointer
|
||||
static void track_adjusted_pointer(void* p, bool isroot);
|
||||
static void check_adjust_pointer(void* p);
|
||||
static void track_interior_pointers(oop obj);
|
||||
static void check_interior_pointers();
|
||||
|
||||
|
@ -1185,7 +1179,7 @@ class PSParallelCompact : AllStatic {
|
|||
#endif // #ifdef ASSERT
|
||||
};
|
||||
|
||||
bool PSParallelCompact::mark_obj(oop obj) {
|
||||
inline bool PSParallelCompact::mark_obj(oop obj) {
|
||||
const int obj_size = obj->size();
|
||||
if (mark_bitmap()->mark_obj(obj, obj_size)) {
|
||||
_summary_data.add_obj(obj, obj_size);
|
||||
|
@ -1195,13 +1189,94 @@ bool PSParallelCompact::mark_obj(oop obj) {
|
|||
}
|
||||
}
|
||||
|
||||
inline bool PSParallelCompact::print_phases()
|
||||
{
|
||||
template <class T>
|
||||
inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) {
|
||||
assert(!Universe::heap()->is_in_reserved(p),
|
||||
"roots shouldn't be things within the heap");
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
if (ValidateMarkSweep) {
|
||||
guarantee(!_root_refs_stack->contains(p), "should only be in here once");
|
||||
_root_refs_stack->push(p);
|
||||
}
|
||||
#endif
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (mark_bitmap()->is_unmarked(obj)) {
|
||||
if (mark_obj(obj)) {
|
||||
obj->follow_contents(cm);
|
||||
}
|
||||
}
|
||||
}
|
||||
follow_stack(cm);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void PSParallelCompact::mark_and_follow(ParCompactionManager* cm,
|
||||
T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (mark_bitmap()->is_unmarked(obj)) {
|
||||
if (mark_obj(obj)) {
|
||||
obj->follow_contents(cm);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (mark_bitmap()->is_unmarked(obj)) {
|
||||
if (mark_obj(obj)) {
|
||||
// This thread marked the object and owns the subsequent processing of it.
|
||||
cm->save_for_scanning(obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
oop new_obj = (oop)summary_data().calc_new_pointer(obj);
|
||||
assert(new_obj != NULL || // is forwarding ptr?
|
||||
obj->is_shared(), // never forwarded?
|
||||
"should be forwarded");
|
||||
// Just always do the update unconditionally?
|
||||
if (new_obj != NULL) {
|
||||
assert(Universe::heap()->is_in_reserved(new_obj),
|
||||
"should be in object space");
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
}
|
||||
}
|
||||
VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, isroot));
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) {
|
||||
#ifdef VALIDATE_MARK_SWEEP
|
||||
if (ValidateMarkSweep) {
|
||||
if (!Universe::heap()->is_in_reserved(p)) {
|
||||
_root_refs_stack->push(p);
|
||||
} else {
|
||||
_other_refs_stack->push(p);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
mark_and_push(_compaction_manager, p);
|
||||
}
|
||||
|
||||
inline bool PSParallelCompact::print_phases() {
|
||||
return _print_phases;
|
||||
}
|
||||
|
||||
inline double PSParallelCompact::normal_distribution(double density)
|
||||
{
|
||||
inline double PSParallelCompact::normal_distribution(double density) {
|
||||
assert(_dwl_initialized, "uninitialized");
|
||||
const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
|
||||
return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
|
||||
|
@ -1257,10 +1332,11 @@ inline bool PSParallelCompact::should_update_klass(klassOop k) {
|
|||
return ((HeapWord*) k) >= dense_prefix(perm_space_id);
|
||||
}
|
||||
|
||||
inline void PSParallelCompact::adjust_pointer(oop* p,
|
||||
template <class T>
|
||||
inline void PSParallelCompact::adjust_pointer(T* p,
|
||||
HeapWord* beg_addr,
|
||||
HeapWord* end_addr) {
|
||||
if (is_in(p, beg_addr, end_addr)) {
|
||||
if (is_in((HeapWord*)p, beg_addr, end_addr)) {
|
||||
adjust_pointer(p);
|
||||
}
|
||||
}
|
||||
|
@ -1332,18 +1408,18 @@ class UpdateOnlyClosure: public ParMarkBitMapClosure {
|
|||
inline void do_addr(HeapWord* addr);
|
||||
};
|
||||
|
||||
inline void UpdateOnlyClosure::do_addr(HeapWord* addr) {
|
||||
inline void UpdateOnlyClosure::do_addr(HeapWord* addr)
|
||||
{
|
||||
_start_array->allocate_block(addr);
|
||||
oop(addr)->update_contents(compaction_manager());
|
||||
}
|
||||
|
||||
class FillClosure: public ParMarkBitMapClosure {
|
||||
public:
|
||||
FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id):
|
||||
public:
|
||||
FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
|
||||
ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
|
||||
_space_id(space_id),
|
||||
_start_array(PSParallelCompact::start_array(space_id))
|
||||
{
|
||||
_start_array(PSParallelCompact::start_array(space_id)) {
|
||||
assert(_space_id == PSParallelCompact::perm_space_id ||
|
||||
_space_id == PSParallelCompact::old_space_id,
|
||||
"cannot use FillClosure in the young gen");
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#include "incls/_precompiled.incl"
|
||||
#include "incls/_psPromotionLAB.cpp.incl"
|
||||
|
||||
const size_t PSPromotionLAB::filler_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT));
|
||||
size_t PSPromotionLAB::filler_header_size;
|
||||
|
||||
// This is the shared initialization code. It sets up the basic pointers,
|
||||
// and allows enough extra space for a filler object. We call a virtual
|
||||
|
@ -41,6 +41,10 @@ void PSPromotionLAB::initialize(MemRegion lab) {
|
|||
set_end(end);
|
||||
set_top(bottom);
|
||||
|
||||
// Initialize after VM starts up because header_size depends on compressed
|
||||
// oops.
|
||||
filler_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT));
|
||||
|
||||
// We can be initialized to a zero size!
|
||||
if (free() > 0) {
|
||||
if (ZapUnusedHeapArea) {
|
||||
|
|
|
@ -32,7 +32,7 @@ class ObjectStartArray;
|
|||
|
||||
class PSPromotionLAB : public CHeapObj {
|
||||
protected:
|
||||
static const size_t filler_header_size;
|
||||
static size_t filler_header_size;
|
||||
|
||||
enum LabState {
|
||||
needs_flush,
|
||||
|
|
|
@ -182,7 +182,7 @@ PSPromotionManager::PSPromotionManager() {
|
|||
claimed_stack_depth()->initialize();
|
||||
queue_size = claimed_stack_depth()->max_elems();
|
||||
// We want the overflow stack to be permanent
|
||||
_overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<oop*>(10, true);
|
||||
_overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<StarTask>(10, true);
|
||||
_overflow_stack_breadth = NULL;
|
||||
} else {
|
||||
claimed_stack_breadth()->initialize();
|
||||
|
@ -240,6 +240,7 @@ void PSPromotionManager::reset() {
|
|||
#endif // PS_PM_STATS
|
||||
}
|
||||
|
||||
|
||||
void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
|
||||
assert(depth_first(), "invariant");
|
||||
assert(overflow_stack_depth() != NULL, "invariant");
|
||||
|
@ -254,13 +255,15 @@ void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
|
|||
#endif /* ASSERT */
|
||||
|
||||
do {
|
||||
oop* p;
|
||||
StarTask p;
|
||||
|
||||
// Drain overflow stack first, so other threads can steal from
|
||||
// claimed stack while we work.
|
||||
while(!overflow_stack_depth()->is_empty()) {
|
||||
p = overflow_stack_depth()->pop();
|
||||
process_popped_location_depth(p);
|
||||
// linux compiler wants different overloaded operator= in taskqueue to
|
||||
// assign to p that the other compilers don't like.
|
||||
StarTask ptr = overflow_stack_depth()->pop();
|
||||
process_popped_location_depth(ptr);
|
||||
}
|
||||
|
||||
if (totally_drain) {
|
||||
|
@ -365,7 +368,7 @@ void PSPromotionManager::flush_labs() {
|
|||
//
|
||||
|
||||
oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
|
||||
assert(PSScavenge::should_scavenge(o), "Sanity");
|
||||
assert(PSScavenge::should_scavenge(&o), "Sanity");
|
||||
|
||||
oop new_obj = NULL;
|
||||
|
||||
|
@ -530,16 +533,30 @@ oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
|
|||
// This code must come after the CAS test, or it will print incorrect
|
||||
// information.
|
||||
if (TraceScavenge) {
|
||||
gclog_or_tty->print_cr("{%s %s 0x%x -> 0x%x (%d)}",
|
||||
PSScavenge::should_scavenge(new_obj) ? "copying" : "tenuring",
|
||||
gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}",
|
||||
PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
|
||||
new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
return new_obj;
|
||||
}
|
||||
|
||||
template <class T> void PSPromotionManager::process_array_chunk_work(
|
||||
oop obj,
|
||||
int start, int end) {
|
||||
assert(start < end, "invariant");
|
||||
T* const base = (T*)objArrayOop(obj)->base();
|
||||
T* p = base + start;
|
||||
T* const chunk_end = base + end;
|
||||
while (p < chunk_end) {
|
||||
if (PSScavenge::should_scavenge(p)) {
|
||||
claim_or_forward_depth(p);
|
||||
}
|
||||
++p;
|
||||
}
|
||||
}
|
||||
|
||||
void PSPromotionManager::process_array_chunk(oop old) {
|
||||
assert(PSChunkLargeArrays, "invariant");
|
||||
assert(old->is_objArray(), "invariant");
|
||||
|
@ -569,15 +586,10 @@ void PSPromotionManager::process_array_chunk(oop old) {
|
|||
arrayOop(old)->set_length(actual_length);
|
||||
}
|
||||
|
||||
assert(start < end, "invariant");
|
||||
oop* const base = objArrayOop(obj)->base();
|
||||
oop* p = base + start;
|
||||
oop* const chunk_end = base + end;
|
||||
while (p < chunk_end) {
|
||||
if (PSScavenge::should_scavenge(*p)) {
|
||||
claim_or_forward_depth(p);
|
||||
}
|
||||
++p;
|
||||
if (UseCompressedOops) {
|
||||
process_array_chunk_work<narrowOop>(obj, start, end);
|
||||
} else {
|
||||
process_array_chunk_work<oop>(obj, start, end);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -42,8 +42,6 @@ class MutableSpace;
|
|||
class PSOldGen;
|
||||
class ParCompactionManager;
|
||||
|
||||
#define PS_CHUNKED_ARRAY_OOP_MASK 1
|
||||
|
||||
#define PS_PM_STATS 0
|
||||
|
||||
class PSPromotionManager : public CHeapObj {
|
||||
|
@ -80,7 +78,7 @@ class PSPromotionManager : public CHeapObj {
|
|||
PrefetchQueue _prefetch_queue;
|
||||
|
||||
OopStarTaskQueue _claimed_stack_depth;
|
||||
GrowableArray<oop*>* _overflow_stack_depth;
|
||||
GrowableArray<StarTask>* _overflow_stack_depth;
|
||||
OopTaskQueue _claimed_stack_breadth;
|
||||
GrowableArray<oop>* _overflow_stack_breadth;
|
||||
|
||||
|
@ -92,13 +90,15 @@ class PSPromotionManager : public CHeapObj {
|
|||
uint _min_array_size_for_chunking;
|
||||
|
||||
// Accessors
|
||||
static PSOldGen* old_gen() { return _old_gen; }
|
||||
static MutableSpace* young_space() { return _young_space; }
|
||||
static PSOldGen* old_gen() { return _old_gen; }
|
||||
static MutableSpace* young_space() { return _young_space; }
|
||||
|
||||
inline static PSPromotionManager* manager_array(int index);
|
||||
template <class T> inline void claim_or_forward_internal_depth(T* p);
|
||||
template <class T> inline void claim_or_forward_internal_breadth(T* p);
|
||||
|
||||
GrowableArray<oop*>* overflow_stack_depth() { return _overflow_stack_depth; }
|
||||
GrowableArray<oop>* overflow_stack_breadth() { return _overflow_stack_breadth; }
|
||||
GrowableArray<StarTask>* overflow_stack_depth() { return _overflow_stack_depth; }
|
||||
GrowableArray<oop>* overflow_stack_breadth() { return _overflow_stack_breadth; }
|
||||
|
||||
// On the task queues we push reference locations as well as
|
||||
// partially-scanned arrays (in the latter case, we push an oop to
|
||||
|
@ -116,27 +116,37 @@ class PSPromotionManager : public CHeapObj {
|
|||
// (oop). We do all the necessary casting in the mask / unmask
|
||||
// methods to avoid sprinkling the rest of the code with more casts.
|
||||
|
||||
bool is_oop_masked(oop* p) {
|
||||
return ((intptr_t) p & PS_CHUNKED_ARRAY_OOP_MASK) == PS_CHUNKED_ARRAY_OOP_MASK;
|
||||
// These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any
|
||||
// future masks) can't conflict with COMPRESSED_OOP_MASK
|
||||
#define PS_CHUNKED_ARRAY_OOP_MASK 0x2
|
||||
|
||||
bool is_oop_masked(StarTask p) {
|
||||
// If something is marked chunked it's always treated like wide oop*
|
||||
return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) ==
|
||||
PS_CHUNKED_ARRAY_OOP_MASK;
|
||||
}
|
||||
|
||||
oop* mask_chunked_array_oop(oop obj) {
|
||||
assert(!is_oop_masked((oop*) obj), "invariant");
|
||||
oop* ret = (oop*) ((intptr_t) obj | PS_CHUNKED_ARRAY_OOP_MASK);
|
||||
oop* ret = (oop*) ((uintptr_t)obj | PS_CHUNKED_ARRAY_OOP_MASK);
|
||||
assert(is_oop_masked(ret), "invariant");
|
||||
return ret;
|
||||
}
|
||||
|
||||
oop unmask_chunked_array_oop(oop* p) {
|
||||
oop unmask_chunked_array_oop(StarTask p) {
|
||||
assert(is_oop_masked(p), "invariant");
|
||||
oop ret = oop((intptr_t) p & ~PS_CHUNKED_ARRAY_OOP_MASK);
|
||||
assert(!p.is_narrow(), "chunked array oops cannot be narrow");
|
||||
oop *chunk = (oop*)p; // cast p to oop (uses conversion operator)
|
||||
oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK));
|
||||
assert(!is_oop_masked((oop*) ret), "invariant");
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <class T> void process_array_chunk_work(oop obj,
|
||||
int start, int end);
|
||||
void process_array_chunk(oop old);
|
||||
|
||||
void push_depth(oop* p) {
|
||||
template <class T> void push_depth(T* p) {
|
||||
assert(depth_first(), "pre-condition");
|
||||
|
||||
#if PS_PM_STATS
|
||||
|
@ -175,7 +185,7 @@ class PSPromotionManager : public CHeapObj {
|
|||
}
|
||||
|
||||
protected:
|
||||
static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; }
|
||||
static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; }
|
||||
static OopTaskQueueSet* stack_array_breadth() { return _stack_array_breadth; }
|
||||
|
||||
public:
|
||||
|
@ -227,6 +237,7 @@ class PSPromotionManager : public CHeapObj {
|
|||
drain_stacks_breadth(totally_drain);
|
||||
}
|
||||
}
|
||||
public:
|
||||
void drain_stacks_cond_depth() {
|
||||
if (claimed_stack_depth()->size() > _target_stack_size) {
|
||||
drain_stacks_depth(false);
|
||||
|
@ -256,15 +267,11 @@ class PSPromotionManager : public CHeapObj {
|
|||
return _depth_first;
|
||||
}
|
||||
|
||||
inline void process_popped_location_depth(oop* p);
|
||||
inline void process_popped_location_depth(StarTask p);
|
||||
|
||||
inline void flush_prefetch_queue();
|
||||
|
||||
inline void claim_or_forward_depth(oop* p);
|
||||
inline void claim_or_forward_internal_depth(oop* p);
|
||||
|
||||
inline void claim_or_forward_breadth(oop* p);
|
||||
inline void claim_or_forward_internal_breadth(oop* p);
|
||||
template <class T> inline void claim_or_forward_depth(T* p);
|
||||
template <class T> inline void claim_or_forward_breadth(T* p);
|
||||
|
||||
#if PS_PM_STATS
|
||||
void increment_steals(oop* p = NULL) {
|
||||
|
|
|
@ -28,64 +28,68 @@ inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
|
|||
return _manager_array[index];
|
||||
}
|
||||
|
||||
inline void PSPromotionManager::claim_or_forward_internal_depth(oop* p) {
|
||||
if (p != NULL) {
|
||||
oop o = *p;
|
||||
template <class T>
|
||||
inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
|
||||
if (p != NULL) { // XXX: error if p != NULL here
|
||||
oop o = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
if (o->is_forwarded()) {
|
||||
o = o->forwardee();
|
||||
|
||||
// Card mark
|
||||
if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
|
||||
PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
|
||||
}
|
||||
*p = o;
|
||||
oopDesc::encode_store_heap_oop_not_null(p, o);
|
||||
} else {
|
||||
push_depth(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void PSPromotionManager::claim_or_forward_internal_breadth(oop* p) {
|
||||
if (p != NULL) {
|
||||
oop o = *p;
|
||||
template <class T>
|
||||
inline void PSPromotionManager::claim_or_forward_internal_breadth(T* p) {
|
||||
if (p != NULL) { // XXX: error if p != NULL here
|
||||
oop o = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
if (o->is_forwarded()) {
|
||||
o = o->forwardee();
|
||||
} else {
|
||||
o = copy_to_survivor_space(o, false);
|
||||
}
|
||||
|
||||
// Card mark
|
||||
if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
|
||||
PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
|
||||
}
|
||||
*p = o;
|
||||
oopDesc::encode_store_heap_oop_not_null(p, o);
|
||||
}
|
||||
}
|
||||
|
||||
inline void PSPromotionManager::flush_prefetch_queue() {
|
||||
assert(!depth_first(), "invariant");
|
||||
for (int i=0; i<_prefetch_queue.length(); i++) {
|
||||
claim_or_forward_internal_breadth(_prefetch_queue.pop());
|
||||
for (int i = 0; i < _prefetch_queue.length(); i++) {
|
||||
claim_or_forward_internal_breadth((oop*)_prefetch_queue.pop());
|
||||
}
|
||||
}
|
||||
|
||||
inline void PSPromotionManager::claim_or_forward_depth(oop* p) {
|
||||
template <class T>
|
||||
inline void PSPromotionManager::claim_or_forward_depth(T* p) {
|
||||
assert(depth_first(), "invariant");
|
||||
assert(PSScavenge::should_scavenge(*p, true), "revisiting object?");
|
||||
assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
|
||||
assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
|
||||
"Sanity");
|
||||
assert(Universe::heap()->is_in(p), "pointer outside heap");
|
||||
|
||||
claim_or_forward_internal_depth(p);
|
||||
}
|
||||
|
||||
inline void PSPromotionManager::claim_or_forward_breadth(oop* p) {
|
||||
template <class T>
|
||||
inline void PSPromotionManager::claim_or_forward_breadth(T* p) {
|
||||
assert(!depth_first(), "invariant");
|
||||
assert(PSScavenge::should_scavenge(*p, true), "revisiting object?");
|
||||
assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
|
||||
assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
|
||||
"Sanity");
|
||||
assert(Universe::heap()->is_in(p), "pointer outside heap");
|
||||
|
||||
if (UsePrefetchQueue) {
|
||||
claim_or_forward_internal_breadth(_prefetch_queue.push_and_pop(p));
|
||||
claim_or_forward_internal_breadth((T*)_prefetch_queue.push_and_pop(p));
|
||||
} else {
|
||||
// This option is used for testing. The use of the prefetch
|
||||
// queue can delay the processing of the objects and thus
|
||||
|
@ -106,12 +110,16 @@ inline void PSPromotionManager::claim_or_forward_breadth(oop* p) {
|
|||
}
|
||||
}
|
||||
|
||||
inline void PSPromotionManager::process_popped_location_depth(oop* p) {
|
||||
inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
|
||||
if (is_oop_masked(p)) {
|
||||
assert(PSChunkLargeArrays, "invariant");
|
||||
oop const old = unmask_chunked_array_oop(p);
|
||||
process_array_chunk(old);
|
||||
} else {
|
||||
PSScavenge::copy_and_push_safe_barrier(this, p);
|
||||
if (p.is_narrow()) {
|
||||
PSScavenge::copy_and_push_safe_barrier(this, (narrowOop*)p);
|
||||
} else {
|
||||
PSScavenge::copy_and_push_safe_barrier(this, (oop*)p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -65,16 +65,18 @@ public:
|
|||
assert(_promotion_manager != NULL, "Sanity");
|
||||
}
|
||||
|
||||
void do_oop(oop* p) {
|
||||
assert (*p != NULL, "expected non-null ref");
|
||||
assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
assert (!oopDesc::is_null(*p), "expected non-null ref");
|
||||
assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(),
|
||||
"expected an oop while scanning weak refs");
|
||||
|
||||
oop obj = oop(*p);
|
||||
// Weak refs may be visited more than once.
|
||||
if (PSScavenge::should_scavenge(obj, _to_space)) {
|
||||
if (PSScavenge::should_scavenge(p, _to_space)) {
|
||||
PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
|
||||
}
|
||||
}
|
||||
virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
|
||||
};
|
||||
|
||||
class PSEvacuateFollowersClosure: public VoidClosure {
|
||||
|
@ -83,7 +85,7 @@ class PSEvacuateFollowersClosure: public VoidClosure {
|
|||
public:
|
||||
PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
|
||||
|
||||
void do_void() {
|
||||
virtual void do_void() {
|
||||
assert(_promotion_manager != NULL, "Sanity");
|
||||
_promotion_manager->drain_stacks(true);
|
||||
guarantee(_promotion_manager->stacks_empty(),
|
||||
|
|
|
@ -116,16 +116,16 @@ class PSScavenge: AllStatic {
|
|||
// If an attempt to promote fails, this method is invoked
|
||||
static void oop_promotion_failed(oop obj, markOop obj_mark);
|
||||
|
||||
static inline bool should_scavenge(oop p);
|
||||
template <class T> static inline bool should_scavenge(T* p);
|
||||
|
||||
// These call should_scavenge() above and, if it returns true, also check that
|
||||
// the object was not newly copied into to_space. The version with the bool
|
||||
// argument is a convenience wrapper that fetches the to_space pointer from
|
||||
// the heap and calls the other version (if the arg is true).
|
||||
static inline bool should_scavenge(oop p, MutableSpace* to_space);
|
||||
static inline bool should_scavenge(oop p, bool check_to_space);
|
||||
template <class T> static inline bool should_scavenge(T* p, MutableSpace* to_space);
|
||||
template <class T> static inline bool should_scavenge(T* p, bool check_to_space);
|
||||
|
||||
inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, oop* p);
|
||||
template <class T> inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, T* p);
|
||||
|
||||
// Is an object in the young generation
|
||||
// This assumes that the HeapWord argument is in the heap,
|
||||
|
|
|
@ -22,28 +22,33 @@
|
|||
*
|
||||
*/
|
||||
|
||||
|
||||
inline void PSScavenge::save_to_space_top_before_gc() {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
_to_space_top_before_gc = heap->young_gen()->to_space()->top();
|
||||
}
|
||||
|
||||
inline bool PSScavenge::should_scavenge(oop p) {
|
||||
return p == NULL ? false : PSScavenge::is_obj_in_young((HeapWord*) p);
|
||||
template <class T> inline bool PSScavenge::should_scavenge(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (oopDesc::is_null(heap_oop)) return false;
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
return PSScavenge::is_obj_in_young((HeapWord*)obj);
|
||||
}
|
||||
|
||||
inline bool PSScavenge::should_scavenge(oop p, MutableSpace* to_space) {
|
||||
template <class T>
|
||||
inline bool PSScavenge::should_scavenge(T* p, MutableSpace* to_space) {
|
||||
if (should_scavenge(p)) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
// Skip objects copied to to_space since the scavenge started.
|
||||
HeapWord* const addr = (HeapWord*) p;
|
||||
HeapWord* const addr = (HeapWord*)obj;
|
||||
return addr < to_space_top_before_gc() || addr >= to_space->end();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
inline bool PSScavenge::should_scavenge(oop p, bool check_to_space) {
|
||||
template <class T>
|
||||
inline bool PSScavenge::should_scavenge(T* p, bool check_to_space) {
|
||||
if (check_to_space) {
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*) Universe::heap();
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
return should_scavenge(p, heap->young_gen()->to_space());
|
||||
}
|
||||
return should_scavenge(p);
|
||||
|
@ -52,24 +57,23 @@ inline bool PSScavenge::should_scavenge(oop p, bool check_to_space) {
|
|||
// Attempt to "claim" oop at p via CAS, push the new obj if successful
|
||||
// This version tests the oop* to make sure it is within the heap before
|
||||
// attempting marking.
|
||||
template <class T>
|
||||
inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm,
|
||||
oop* p) {
|
||||
assert(should_scavenge(*p, true), "revisiting object?");
|
||||
T* p) {
|
||||
assert(should_scavenge(p, true), "revisiting object?");
|
||||
|
||||
oop o = *p;
|
||||
if (o->is_forwarded()) {
|
||||
*p = o->forwardee();
|
||||
} else {
|
||||
*p = pm->copy_to_survivor_space(o, pm->depth_first());
|
||||
}
|
||||
oop o = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop new_obj = o->is_forwarded()
|
||||
? o->forwardee()
|
||||
: pm->copy_to_survivor_space(o, pm->depth_first());
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
|
||||
// We cannot mark without test, as some code passes us pointers
|
||||
// that are outside the heap.
|
||||
if ((!PSScavenge::is_obj_in_young((HeapWord*) p)) &&
|
||||
if ((!PSScavenge::is_obj_in_young((HeapWord*)p)) &&
|
||||
Universe::heap()->is_in_reserved(p)) {
|
||||
o = *p;
|
||||
if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
|
||||
card_table()->inline_write_ref_field_gc(p, o);
|
||||
if (PSScavenge::is_obj_in_young((HeapWord*)new_obj)) {
|
||||
card_table()->inline_write_ref_field_gc(p, new_obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,15 +34,17 @@ class PSScavengeRootsClosure: public OopClosure {
|
|||
private:
|
||||
PSPromotionManager* _promotion_manager;
|
||||
|
||||
public:
|
||||
PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { }
|
||||
|
||||
virtual void do_oop(oop* p) {
|
||||
if (PSScavenge::should_scavenge(*p)) {
|
||||
protected:
|
||||
template <class T> void do_oop_work(T *p) {
|
||||
if (PSScavenge::should_scavenge(p)) {
|
||||
// We never card mark roots, maybe call a func without test?
|
||||
PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
|
||||
}
|
||||
}
|
||||
public:
|
||||
PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { }
|
||||
void do_oop(oop* p) { PSScavengeRootsClosure::do_oop_work(p); }
|
||||
void do_oop(narrowOop* p) { PSScavengeRootsClosure::do_oop_work(p); }
|
||||
};
|
||||
|
||||
void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
|
||||
|
@ -135,7 +137,7 @@ void StealTask::do_it(GCTaskManager* manager, uint which) {
|
|||
int random_seed = 17;
|
||||
if (pm->depth_first()) {
|
||||
while(true) {
|
||||
oop* p;
|
||||
StarTask p;
|
||||
if (PSPromotionManager::steal_depth(which, &random_seed, p)) {
|
||||
#if PS_PM_STATS
|
||||
pm->increment_steals(p);
|
||||
|
@ -164,8 +166,7 @@ void StealTask::do_it(GCTaskManager* manager, uint which) {
|
|||
}
|
||||
}
|
||||
}
|
||||
guarantee(pm->stacks_empty(),
|
||||
"stacks should be empty at this point");
|
||||
guarantee(pm->stacks_empty(), "stacks should be empty at this point");
|
||||
}
|
||||
|
||||
//
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue