mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-17 17:44:40 +02:00
8065358: Refactor G1s usage of save_marks and reduce related races
Stop using save_marks in G1 related code and make setting the replacement field less racy. Reviewed-by: brutisso, tschatzl
This commit is contained in:
parent
d65f3c41b7
commit
b5ef32af36
5 changed files with 54 additions and 50 deletions
|
@ -59,7 +59,7 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
|
||||||
!(retained_region->top() == retained_region->end()) &&
|
!(retained_region->top() == retained_region->end()) &&
|
||||||
!retained_region->is_empty() &&
|
!retained_region->is_empty() &&
|
||||||
!retained_region->is_humongous()) {
|
!retained_region->is_humongous()) {
|
||||||
retained_region->record_top_and_timestamp();
|
retained_region->record_timestamp();
|
||||||
// The retained region was added to the old region set when it was
|
// The retained region was added to the old region set when it was
|
||||||
// retired. We have to remove it now, since we don't allow regions
|
// retired. We have to remove it now, since we don't allow regions
|
||||||
// we allocate to in the region sets. We'll re-add it later, when
|
// we allocate to in the region sets. We'll re-add it later, when
|
||||||
|
@ -94,6 +94,9 @@ void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, Evacuat
|
||||||
// want either way so no reason to check explicitly for either
|
// want either way so no reason to check explicitly for either
|
||||||
// condition.
|
// condition.
|
||||||
_retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
|
_retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
|
||||||
|
if (_retained_old_gc_alloc_region != NULL) {
|
||||||
|
_retained_old_gc_alloc_region->record_retained_region();
|
||||||
|
}
|
||||||
|
|
||||||
if (ResizePLAB) {
|
if (ResizePLAB) {
|
||||||
_g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
|
_g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
|
||||||
|
|
|
@ -6530,7 +6530,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
|
||||||
// We really only need to do this for old regions given that we
|
// We really only need to do this for old regions given that we
|
||||||
// should never scan survivors. But it doesn't hurt to do it
|
// should never scan survivors. But it doesn't hurt to do it
|
||||||
// for survivors too.
|
// for survivors too.
|
||||||
new_alloc_region->record_top_and_timestamp();
|
new_alloc_region->record_timestamp();
|
||||||
if (survivor) {
|
if (survivor) {
|
||||||
new_alloc_region->set_survivor();
|
new_alloc_region->set_survivor();
|
||||||
_hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
|
_hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
|
||||||
|
|
|
@ -140,11 +140,9 @@ public:
|
||||||
|
|
||||||
// Set the "from" region in the closure.
|
// Set the "from" region in the closure.
|
||||||
_oc->set_region(r);
|
_oc->set_region(r);
|
||||||
HeapWord* card_start = _bot_shared->address_for_index(index);
|
MemRegion card_region(_bot_shared->address_for_index(index), G1BlockOffsetSharedArray::N_words);
|
||||||
HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
|
MemRegion pre_gc_allocated(r->bottom(), r->scan_top());
|
||||||
Space *sp = SharedHeap::heap()->space_containing(card_start);
|
MemRegion mr = pre_gc_allocated.intersection(card_region);
|
||||||
MemRegion sm_region = sp->used_region_at_save_marks();
|
|
||||||
MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
|
|
||||||
if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
|
if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
|
||||||
// We make the card as "claimed" lazily (so races are possible
|
// We make the card as "claimed" lazily (so races are possible
|
||||||
// but they're benign), which reduces the number of duplicate
|
// but they're benign), which reduces the number of duplicate
|
||||||
|
|
|
@ -326,7 +326,7 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
|
||||||
|
|
||||||
hr_clear(false /*par*/, false /*clear_space*/);
|
hr_clear(false /*par*/, false /*clear_space*/);
|
||||||
set_top(bottom());
|
set_top(bottom());
|
||||||
record_top_and_timestamp();
|
record_timestamp();
|
||||||
|
|
||||||
assert(mr.end() == orig_end(),
|
assert(mr.end() == orig_end(),
|
||||||
err_msg("Given region end address " PTR_FORMAT " should match exactly "
|
err_msg("Given region end address " PTR_FORMAT " should match exactly "
|
||||||
|
@ -416,9 +416,9 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
|
||||||
|
|
||||||
// If we're within a stop-world GC, then we might look at a card in a
|
// If we're within a stop-world GC, then we might look at a card in a
|
||||||
// GC alloc region that extends onto a GC LAB, which may not be
|
// GC alloc region that extends onto a GC LAB, which may not be
|
||||||
// parseable. Stop such at the "saved_mark" of the region.
|
// parseable. Stop such at the "scan_top" of the region.
|
||||||
if (g1h->is_gc_active()) {
|
if (g1h->is_gc_active()) {
|
||||||
mr = mr.intersection(used_region_at_save_marks());
|
mr = mr.intersection(MemRegion(bottom(), scan_top()));
|
||||||
} else {
|
} else {
|
||||||
mr = mr.intersection(used_region());
|
mr = mr.intersection(used_region());
|
||||||
}
|
}
|
||||||
|
@ -969,7 +969,7 @@ void HeapRegion::prepare_for_compaction(CompactPoint* cp) {
|
||||||
|
|
||||||
void G1OffsetTableContigSpace::clear(bool mangle_space) {
|
void G1OffsetTableContigSpace::clear(bool mangle_space) {
|
||||||
set_top(bottom());
|
set_top(bottom());
|
||||||
set_saved_mark_word(bottom());
|
_scan_top = bottom();
|
||||||
CompactibleSpace::clear(mangle_space);
|
CompactibleSpace::clear(mangle_space);
|
||||||
reset_bot();
|
reset_bot();
|
||||||
}
|
}
|
||||||
|
@ -1001,41 +1001,42 @@ HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
|
||||||
return _offsets.threshold();
|
return _offsets.threshold();
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
|
HeapWord* G1OffsetTableContigSpace::scan_top() const {
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
|
|
||||||
HeapWord* local_top = top();
|
HeapWord* local_top = top();
|
||||||
OrderAccess::loadload();
|
OrderAccess::loadload();
|
||||||
if (_gc_time_stamp < g1h->get_gc_time_stamp()) {
|
const unsigned local_time_stamp = _gc_time_stamp;
|
||||||
|
assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant");
|
||||||
|
if (local_time_stamp < g1h->get_gc_time_stamp()) {
|
||||||
return local_top;
|
return local_top;
|
||||||
} else {
|
} else {
|
||||||
return Space::saved_mark_word();
|
return _scan_top;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1OffsetTableContigSpace::record_top_and_timestamp() {
|
void G1OffsetTableContigSpace::record_timestamp() {
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
|
unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
|
||||||
|
|
||||||
if (_gc_time_stamp < curr_gc_time_stamp) {
|
if (_gc_time_stamp < curr_gc_time_stamp) {
|
||||||
// The order of these is important, as another thread might be
|
// Setting the time stamp here tells concurrent readers to look at
|
||||||
// about to start scanning this region. If it does so after
|
// scan_top to know the maximum allowed address to look at.
|
||||||
// set_saved_mark and before _gc_time_stamp = ..., then the latter
|
|
||||||
// will be false, and it will pick up top() as the high water mark
|
// scan_top should be bottom for all regions except for the
|
||||||
// of region. If it does so after _gc_time_stamp = ..., then it
|
// retained old alloc region which should have scan_top == top
|
||||||
// will pick up the right saved_mark_word() as the high water mark
|
HeapWord* st = _scan_top;
|
||||||
// of the region. Either way, the behavior will be correct.
|
guarantee(st == _bottom || st == _top, "invariant");
|
||||||
Space::set_saved_mark_word(top());
|
|
||||||
OrderAccess::storestore();
|
|
||||||
_gc_time_stamp = curr_gc_time_stamp;
|
_gc_time_stamp = curr_gc_time_stamp;
|
||||||
// No need to do another barrier to flush the writes above. If
|
|
||||||
// this is called in parallel with other threads trying to
|
|
||||||
// allocate into the region, the caller should call this while
|
|
||||||
// holding a lock and when the lock is released the writes will be
|
|
||||||
// flushed.
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void G1OffsetTableContigSpace::record_retained_region() {
|
||||||
|
// scan_top is the maximum address where it's safe for the next gc to
|
||||||
|
// scan this region.
|
||||||
|
_scan_top = top();
|
||||||
|
}
|
||||||
|
|
||||||
void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
|
void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
|
||||||
object_iterate(blk);
|
object_iterate(blk);
|
||||||
}
|
}
|
||||||
|
@ -1063,6 +1064,8 @@ G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||||
void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
|
void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
|
||||||
CompactibleSpace::initialize(mr, clear_space, mangle_space);
|
CompactibleSpace::initialize(mr, clear_space, mangle_space);
|
||||||
_top = bottom();
|
_top = bottom();
|
||||||
|
_scan_top = bottom();
|
||||||
|
set_saved_mark_word(NULL);
|
||||||
reset_bot();
|
reset_bot();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -101,28 +101,25 @@ public:
|
||||||
// OffsetTableContigSpace. If the two versions of BlockOffsetTable could
|
// OffsetTableContigSpace. If the two versions of BlockOffsetTable could
|
||||||
// be reconciled, then G1OffsetTableContigSpace could go away.
|
// be reconciled, then G1OffsetTableContigSpace could go away.
|
||||||
|
|
||||||
// The idea behind time stamps is the following. Doing a save_marks on
|
// The idea behind time stamps is the following. We want to keep track of
|
||||||
// all regions at every GC pause is time consuming (if I remember
|
// the highest address where it's safe to scan objects for each region.
|
||||||
// well, 10ms or so). So, we would like to do that only for regions
|
// This is only relevant for current GC alloc regions so we keep a time stamp
|
||||||
// that are GC alloc regions. To achieve this, we use time
|
// per region to determine if the region has been allocated during the current
|
||||||
// stamps. For every evacuation pause, G1CollectedHeap generates a
|
// GC or not. If the time stamp is current we report a scan_top value which
|
||||||
// unique time stamp (essentially a counter that gets
|
// was saved at the end of the previous GC for retained alloc regions and which is
|
||||||
// incremented). Every time we want to call save_marks on a region,
|
// equal to the bottom for all other regions.
|
||||||
// we set the saved_mark_word to top and also copy the current GC
|
// There is a race between card scanners and allocating gc workers where we must ensure
|
||||||
// time stamp to the time stamp field of the space. Reading the
|
// that card scanners do not read the memory allocated by the gc workers.
|
||||||
// saved_mark_word involves checking the time stamp of the
|
// In order to enforce that, we must not return a value of _top which is more recent than the
|
||||||
// region. If it is the same as the current GC time stamp, then we
|
// time stamp. This is due to the fact that a region may become a gc alloc region at
|
||||||
// can safely read the saved_mark_word field, as it is valid. If the
|
// some point after we've read the timestamp value as being < the current time stamp.
|
||||||
// time stamp of the region is not the same as the current GC time
|
// The time stamps are re-initialized to zero at cleanup and at Full GCs.
|
||||||
// stamp, then we instead read top, as the saved_mark_word field is
|
// The current scheme that uses sequential unsigned ints will fail only if we have 4b
|
||||||
// invalid. Time stamps (on the regions and also on the
|
|
||||||
// G1CollectedHeap) are reset at every cleanup (we iterate over
|
|
||||||
// the regions anyway) and at the end of a Full GC. The current scheme
|
|
||||||
// that uses sequential unsigned ints will fail only if we have 4b
|
|
||||||
// evacuation pauses between two cleanups, which is _highly_ unlikely.
|
// evacuation pauses between two cleanups, which is _highly_ unlikely.
|
||||||
class G1OffsetTableContigSpace: public CompactibleSpace {
|
class G1OffsetTableContigSpace: public CompactibleSpace {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
HeapWord* _top;
|
HeapWord* _top;
|
||||||
|
HeapWord* volatile _scan_top;
|
||||||
protected:
|
protected:
|
||||||
G1BlockOffsetArrayContigSpace _offsets;
|
G1BlockOffsetArrayContigSpace _offsets;
|
||||||
Mutex _par_alloc_lock;
|
Mutex _par_alloc_lock;
|
||||||
|
@ -166,10 +163,11 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
|
||||||
void set_bottom(HeapWord* value);
|
void set_bottom(HeapWord* value);
|
||||||
void set_end(HeapWord* value);
|
void set_end(HeapWord* value);
|
||||||
|
|
||||||
virtual HeapWord* saved_mark_word() const;
|
HeapWord* scan_top() const;
|
||||||
void record_top_and_timestamp();
|
void record_timestamp();
|
||||||
void reset_gc_time_stamp() { _gc_time_stamp = 0; }
|
void reset_gc_time_stamp() { _gc_time_stamp = 0; }
|
||||||
unsigned get_gc_time_stamp() { return _gc_time_stamp; }
|
unsigned get_gc_time_stamp() { return _gc_time_stamp; }
|
||||||
|
void record_retained_region();
|
||||||
|
|
||||||
// See the comment above in the declaration of _pre_dummy_top for an
|
// See the comment above in the declaration of _pre_dummy_top for an
|
||||||
// explanation of what it is.
|
// explanation of what it is.
|
||||||
|
@ -191,6 +189,8 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
|
||||||
virtual HeapWord* allocate(size_t word_size);
|
virtual HeapWord* allocate(size_t word_size);
|
||||||
HeapWord* par_allocate(size_t word_size);
|
HeapWord* par_allocate(size_t word_size);
|
||||||
|
|
||||||
|
HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
|
||||||
|
|
||||||
// MarkSweep support phase3
|
// MarkSweep support phase3
|
||||||
virtual HeapWord* initialize_threshold();
|
virtual HeapWord* initialize_threshold();
|
||||||
virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
|
virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue