mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-27 14:54:52 +02:00
Merge
This commit is contained in:
commit
8d6035660e
223 changed files with 5079 additions and 2219 deletions
|
@ -105,33 +105,6 @@ StupidG1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
|
|||
_g1->heap_region_iterate(&rc);
|
||||
}
|
||||
|
||||
class UpdateRSOopClosure: public OopClosure {
|
||||
HeapRegion* _from;
|
||||
HRInto_G1RemSet* _rs;
|
||||
int _worker_i;
|
||||
public:
|
||||
UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) :
|
||||
_from(NULL), _rs(rs), _worker_i(worker_i) {
|
||||
guarantee(_rs != NULL, "Requires an HRIntoG1RemSet");
|
||||
}
|
||||
|
||||
void set_from(HeapRegion* from) {
|
||||
assert(from != NULL, "from region must be non-NULL");
|
||||
_from = from;
|
||||
}
|
||||
|
||||
virtual void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
virtual void do_oop(oop* p) {
|
||||
assert(_from != NULL, "from region must be non-NULL");
|
||||
_rs->par_write_ref(_from, p, _worker_i);
|
||||
}
|
||||
// Override: this closure is idempotent.
|
||||
// bool idempotent() { return true; }
|
||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
||||
};
|
||||
|
||||
class UpdateRSOutOfRegionClosure: public HeapRegionClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
ModRefBarrierSet* _mr_bs;
|
||||
|
@ -177,11 +150,19 @@ HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
|
|||
_cards_scanned(NULL), _total_cards_scanned(0)
|
||||
{
|
||||
_seq_task = new SubTasksDone(NumSeqTasks);
|
||||
_new_refs = NEW_C_HEAP_ARRAY(GrowableArray<oop*>*, ParallelGCThreads);
|
||||
guarantee(n_workers() > 0, "There should be some workers");
|
||||
_new_refs = NEW_C_HEAP_ARRAY(GrowableArray<oop*>*, n_workers());
|
||||
for (uint i = 0; i < n_workers(); i++) {
|
||||
_new_refs[i] = new (ResourceObj::C_HEAP) GrowableArray<oop*>(8192,true);
|
||||
}
|
||||
}
|
||||
|
||||
HRInto_G1RemSet::~HRInto_G1RemSet() {
|
||||
delete _seq_task;
|
||||
for (uint i = 0; i < n_workers(); i++) {
|
||||
delete _new_refs[i];
|
||||
}
|
||||
FREE_C_HEAP_ARRAY(GrowableArray<oop*>*, _new_refs);
|
||||
}
|
||||
|
||||
void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
|
||||
|
@ -281,8 +262,9 @@ public:
|
|||
if (!_ct_bs->is_card_claimed(card_index) &&
|
||||
!_ct_bs->is_card_dirty(card_index)) {
|
||||
assert(_ct_bs->is_card_clean(card_index) ||
|
||||
_ct_bs->is_card_claimed(card_index),
|
||||
"Card is either dirty, clean, or claimed");
|
||||
_ct_bs->is_card_claimed(card_index) ||
|
||||
_ct_bs->is_card_deferred(card_index),
|
||||
"Card is either clean, claimed or deferred");
|
||||
if (_ct_bs->claim_card(card_index))
|
||||
scanCard(card_index, card_region);
|
||||
}
|
||||
|
@ -338,14 +320,12 @@ void HRInto_G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
|
|||
|
||||
_g1p->record_scan_rs_start_time(worker_i, rs_time_start * 1000.0);
|
||||
_g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
|
||||
if (ParallelGCThreads > 0) {
|
||||
// In this case, we called scanNewRefsRS and recorded the corresponding
|
||||
// time.
|
||||
double scan_new_refs_time_ms = _g1p->get_scan_new_refs_time(worker_i);
|
||||
if (scan_new_refs_time_ms > 0.0) {
|
||||
closure_app_time_ms += scan_new_refs_time_ms;
|
||||
}
|
||||
|
||||
double scan_new_refs_time_ms = _g1p->get_scan_new_refs_time(worker_i);
|
||||
if (scan_new_refs_time_ms > 0.0) {
|
||||
closure_app_time_ms += scan_new_refs_time_ms;
|
||||
}
|
||||
|
||||
_g1p->record_obj_copy_time(worker_i, closure_app_time_ms);
|
||||
}
|
||||
|
||||
|
@ -469,8 +449,8 @@ HRInto_G1RemSet::scanNewRefsRS(OopsInHeapRegionClosure* oc,
|
|||
double scan_new_refs_start_sec = os::elapsedTime();
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set());
|
||||
while (_new_refs[worker_i]->is_nonempty()) {
|
||||
oop* p = _new_refs[worker_i]->pop();
|
||||
for (int i = 0; i < _new_refs[worker_i]->length(); i++) {
|
||||
oop* p = _new_refs[worker_i]->at(i);
|
||||
oop obj = *p;
|
||||
// *p was in the collection set when p was pushed on "_new_refs", but
|
||||
// another thread may have processed this location from an RS, so it
|
||||
|
@ -480,10 +460,6 @@ HRInto_G1RemSet::scanNewRefsRS(OopsInHeapRegionClosure* oc,
|
|||
HeapRegion* r = g1h->heap_region_containing(p);
|
||||
|
||||
DEBUG_ONLY(HeapRegion* to = g1h->heap_region_containing(obj));
|
||||
assert(ParallelGCThreads > 1
|
||||
|| to->rem_set()->contains_reference(p),
|
||||
"Invariant: pushed after being added."
|
||||
"(Not reliable in parallel code.)");
|
||||
oc->set_region(r);
|
||||
// If "p" has already been processed concurrently, this is
|
||||
// idempotent.
|
||||
|
@ -538,8 +514,8 @@ HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
|
|||
}
|
||||
} else {
|
||||
assert(worker_i == 0, "invariant");
|
||||
|
||||
updateRS(0);
|
||||
scanNewRefsRS(oc, 0);
|
||||
scanRS(oc, 0);
|
||||
}
|
||||
}
|
||||
|
@ -559,11 +535,7 @@ prepare_for_oops_into_collection_set_do() {
|
|||
assert(!_par_traversal_in_progress, "Invariant between iterations.");
|
||||
if (ParallelGCThreads > 0) {
|
||||
set_par_traversal(true);
|
||||
int n_workers = _g1->workers()->total_workers();
|
||||
_seq_task->set_par_threads(n_workers);
|
||||
for (uint i = 0; i < ParallelGCThreads; i++)
|
||||
_new_refs[i] = new (ResourceObj::C_HEAP) GrowableArray<oop*>(8192,true);
|
||||
|
||||
_seq_task->set_par_threads((int)n_workers());
|
||||
if (cg1r->do_traversal()) {
|
||||
updateRS(0);
|
||||
// Have to do this again after updaters
|
||||
|
@ -587,6 +559,53 @@ class cleanUpIteratorsClosure : public HeapRegionClosure {
|
|||
}
|
||||
};
|
||||
|
||||
class UpdateRSetOopsIntoCSImmediate : public OopClosure {
|
||||
G1CollectedHeap* _g1;
|
||||
public:
|
||||
UpdateRSetOopsIntoCSImmediate(G1CollectedHeap* g1) : _g1(g1) { }
|
||||
virtual void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
virtual void do_oop(oop* p) {
|
||||
HeapRegion* to = _g1->heap_region_containing(*p);
|
||||
if (to->in_collection_set()) {
|
||||
if (to->rem_set()->add_reference(p, 0)) {
|
||||
_g1->schedule_popular_region_evac(to);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class UpdateRSetOopsIntoCSDeferred : public OopClosure {
|
||||
G1CollectedHeap* _g1;
|
||||
CardTableModRefBS* _ct_bs;
|
||||
DirtyCardQueue* _dcq;
|
||||
public:
|
||||
UpdateRSetOopsIntoCSDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
|
||||
_g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) { }
|
||||
virtual void do_oop(narrowOop* p) {
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
virtual void do_oop(oop* p) {
|
||||
oop obj = *p;
|
||||
if (_g1->obj_in_cs(obj)) {
|
||||
size_t card_index = _ct_bs->index_for(p);
|
||||
if (_ct_bs->mark_card_deferred(card_index)) {
|
||||
_dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void HRInto_G1RemSet::new_refs_iterate(OopClosure* cl) {
|
||||
for (size_t i = 0; i < n_workers(); i++) {
|
||||
for (int j = 0; j < _new_refs[i]->length(); j++) {
|
||||
oop* p = _new_refs[i]->at(j);
|
||||
cl->do_oop(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do() {
|
||||
guarantee( _cards_scanned != NULL, "invariant" );
|
||||
_total_cards_scanned = 0;
|
||||
|
@ -609,11 +628,25 @@ void HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do() {
|
|||
if (cg1r->do_traversal()) {
|
||||
cg1r->cg1rThread()->set_do_traversal(false);
|
||||
}
|
||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||
delete _new_refs[i];
|
||||
}
|
||||
set_par_traversal(false);
|
||||
}
|
||||
|
||||
if (_g1->evacuation_failed()) {
|
||||
// Restore remembered sets for the regions pointing into
|
||||
// the collection set.
|
||||
if (G1DeferredRSUpdate) {
|
||||
DirtyCardQueue dcq(&_g1->dirty_card_queue_set());
|
||||
UpdateRSetOopsIntoCSDeferred deferred_update(_g1, &dcq);
|
||||
new_refs_iterate(&deferred_update);
|
||||
} else {
|
||||
UpdateRSetOopsIntoCSImmediate immediate_update(_g1);
|
||||
new_refs_iterate(&immediate_update);
|
||||
}
|
||||
}
|
||||
for (uint i = 0; i < n_workers(); i++) {
|
||||
_new_refs[i]->clear();
|
||||
}
|
||||
|
||||
assert(!_par_traversal_in_progress, "Invariant between iterations.");
|
||||
}
|
||||
|
||||
|
@ -683,7 +716,8 @@ public:
|
|||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (!r->in_collection_set() &&
|
||||
!r->continuesHumongous() &&
|
||||
!r->is_young()) {
|
||||
!r->is_young() &&
|
||||
!r->is_survivor()) {
|
||||
_update_rs_oop_cl.set_from(r);
|
||||
UpdateRSObjectClosure update_rs_obj_cl(&_update_rs_oop_cl);
|
||||
|
||||
|
@ -820,7 +854,7 @@ void HRInto_G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i) {
|
|||
// before all the cards on the region are dirtied. This is unlikely,
|
||||
// and it doesn't happen often, but it can happen. So, the extra
|
||||
// check below filters out those cards.
|
||||
if (r->is_young()) {
|
||||
if (r->is_young() || r->is_survivor()) {
|
||||
return;
|
||||
}
|
||||
// While we are processing RSet buffers during the collection, we
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue