8197573: Remove concurrent cleanup and secondary free list handling

Remove secondary free list and all associated functionality, moving the cleanup work into the Cleanup pause instead.

Reviewed-by: sangheki, sjohanss
This commit is contained in:
Thomas Schatzl 2018-03-28 16:39:32 +02:00
parent 3a5ca51fff
commit 83d7c657c4
22 changed files with 126 additions and 485 deletions

View file

@ -49,19 +49,18 @@
STATIC_ASSERT(ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE < STATIC_ASSERT(ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE <
ConcurrentGCPhaseManager::IDLE_PHASE); ConcurrentGCPhaseManager::IDLE_PHASE);
#define EXPAND_CONCURRENT_PHASES(expander) \ #define EXPAND_CONCURRENT_PHASES(expander) \
expander(ANY, = ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE, NULL) \ expander(ANY, = ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE, NULL) \
expander(IDLE, = ConcurrentGCPhaseManager::IDLE_PHASE, NULL) \ expander(IDLE, = ConcurrentGCPhaseManager::IDLE_PHASE, NULL) \
expander(CONCURRENT_CYCLE,, "Concurrent Cycle") \ expander(CONCURRENT_CYCLE,, "Concurrent Cycle") \
expander(CLEAR_CLAIMED_MARKS,, "Concurrent Clear Claimed Marks") \ expander(CLEAR_CLAIMED_MARKS,, "Concurrent Clear Claimed Marks") \
expander(SCAN_ROOT_REGIONS,, "Concurrent Scan Root Regions") \ expander(SCAN_ROOT_REGIONS,, "Concurrent Scan Root Regions") \
expander(CONCURRENT_MARK,, "Concurrent Mark") \ expander(CONCURRENT_MARK,, "Concurrent Mark") \
expander(MARK_FROM_ROOTS,, "Concurrent Mark From Roots") \ expander(MARK_FROM_ROOTS,, "Concurrent Mark From Roots") \
expander(BEFORE_REMARK,, NULL) \ expander(BEFORE_REMARK,, NULL) \
expander(REMARK,, NULL) \ expander(REMARK,, NULL) \
expander(REBUILD_REMEMBERED_SETS,, "Concurrent Rebuild Remembered Sets") \ expander(REBUILD_REMEMBERED_SETS,, "Concurrent Rebuild Remembered Sets") \
expander(COMPLETE_CLEANUP,, "Concurrent Complete Cleanup") \ expander(CLEANUP_FOR_NEXT_MARK,, "Concurrent Cleanup for Next Mark") \
expander(CLEANUP_FOR_NEXT_MARK,, "Concurrent Cleanup for Next Mark") \
/* */ /* */
class G1ConcurrentPhase : public AllStatic { class G1ConcurrentPhase : public AllStatic {
@ -357,85 +356,17 @@ void ConcurrentMarkThread::run_service() {
double end_time = os::elapsedVTime(); double end_time = os::elapsedVTime();
// Update the total virtual time before doing this, since it will try // Update the total virtual time before doing this, since it will try
// to measure it to get the vtime for this marking. We purposely // to measure it to get the vtime for this marking.
// neglect the presumably-short "completeCleanup" phase here.
_vtime_accum = (end_time - _vtime_start); _vtime_accum = (end_time - _vtime_start);
if (!cm()->has_aborted()) { if (!cm()->has_aborted()) {
delay_to_keep_mmu(g1_policy, false /* cleanup */); delay_to_keep_mmu(g1_policy, false /* cleanup */);
if (!cm()->has_aborted()) {
CMCleanUp cl_cl(_cm);
VM_CGC_Operation op(&cl_cl, "Pause Cleanup");
VMThread::execute(&op);
}
} else {
// We don't want to update the marking status if a GC pause
// is already underway.
SuspendibleThreadSetJoiner sts_join;
g1h->collector_state()->set_mark_in_progress(false);
} }
// Check if cleanup set the free_regions_coming flag. If it if (!cm()->has_aborted()) {
// hasn't, we can just skip the next step. CMCleanUp cl_cl(_cm);
if (g1h->free_regions_coming()) { VM_CGC_Operation op(&cl_cl, "Pause Cleanup");
// The following will finish freeing up any regions that we VMThread::execute(&op);
// found to be empty during cleanup. We'll do this part
// without joining the suspendible set. If an evacuation pause
// takes place, then we would carry on freeing regions in
// case they are needed by the pause. If a Full GC takes
// place, it would wait for us to process the regions
// reclaimed by cleanup.
// Now do the concurrent cleanup operation.
G1ConcPhase p(G1ConcurrentPhase::COMPLETE_CLEANUP, this);
_cm->complete_cleanup();
// Notify anyone who's waiting that there are no more free
// regions coming. We have to do this before we join the STS
// (in fact, we should not attempt to join the STS in the
// interval between finishing the cleanup pause and clearing
// the free_regions_coming flag) otherwise we might deadlock:
// a GC worker could be blocked waiting for the notification
// whereas this thread will be blocked for the pause to finish
// while it's trying to join the STS, which is conditional on
// the GC workers finishing.
g1h->reset_free_regions_coming();
}
guarantee(cm()->cleanup_list_is_empty(),
"at this point there should be no regions on the cleanup list");
// There is a tricky race before recording that the concurrent
// cleanup has completed and a potential Full GC starting around
// the same time. We want to make sure that the Full GC calls
// abort() on concurrent mark after
// record_concurrent_mark_cleanup_completed(), since abort() is
// the method that will reset the concurrent mark state. If we
// end up calling record_concurrent_mark_cleanup_completed()
// after abort() then we might incorrectly undo some of the work
// abort() did. Checking the has_aborted() flag after joining
// the STS allows the correct ordering of the two methods. There
// are two scenarios:
//
// a) If we reach here before the Full GC, the fact that we have
// joined the STS means that the Full GC cannot start until we
// leave the STS, so record_concurrent_mark_cleanup_completed()
// will complete before abort() is called.
//
// b) If we reach here during the Full GC, we'll be held up from
// joining the STS until the Full GC is done, which means that
// abort() will have completed and has_aborted() will return
// true to prevent us from calling
// record_concurrent_mark_cleanup_completed() (and, in fact, it's
// not needed any more as the concurrent mark state has been
// already reset).
{
SuspendibleThreadSetJoiner sts_join;
if (!cm()->has_aborted()) {
g1_policy->record_concurrent_mark_cleanup_completed();
} else {
log_info(gc, marking)("Concurrent Mark Abort");
}
} }
// We now want to allow clearing of the marking bitmap to be // We now want to allow clearing of the marking bitmap to be

View file

@ -156,63 +156,13 @@ HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
// Private methods. // Private methods.
HeapRegion*
G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
while (!_secondary_free_list.is_empty() || free_regions_coming()) {
if (!_secondary_free_list.is_empty()) {
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
"secondary_free_list has %u entries",
_secondary_free_list.length());
// It looks as if there are free regions available on the
// secondary_free_list. Let's move them to the free_list and try
// again to allocate from it.
append_secondary_free_list();
assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
"empty we should have moved at least one entry to the free_list");
HeapRegion* res = _hrm.allocate_free_region(is_old);
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
"allocated " HR_FORMAT " from secondary_free_list",
HR_FORMAT_PARAMS(res));
return res;
}
// Wait here until we get notified either when (a) there are no
// more free regions coming or (b) some regions have been moved on
// the secondary_free_list.
SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
}
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
"could not allocate from secondary_free_list");
return NULL;
}
HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) { HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords, assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
"the only time we use this to allocate a humongous region is " "the only time we use this to allocate a humongous region is "
"when we are allocating a single humongous region"); "when we are allocating a single humongous region");
HeapRegion* res; HeapRegion* res = _hrm.allocate_free_region(is_old);
if (G1StressConcRegionFreeing) {
if (!_secondary_free_list.is_empty()) {
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
"forced to look at the secondary_free_list");
res = new_region_try_secondary_free_list(is_old);
if (res != NULL) {
return res;
}
}
}
res = _hrm.allocate_free_region(is_old);
if (res == NULL) {
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
"res == NULL, trying the secondary_free_list");
res = new_region_try_secondary_free_list(is_old);
}
if (res == NULL && do_expand && _expand_heap_after_alloc_failure) { if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
// Currently, only attempts to allocate GC alloc regions set // Currently, only attempts to allocate GC alloc regions set
// do_expand to true. So, we should only reach here during a // do_expand to true. So, we should only reach here during a
@ -380,17 +330,6 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
first = hr->hrm_index(); first = hr->hrm_index();
} }
} else { } else {
// We can't allocate humongous regions spanning more than one region while
// cleanupComplete() is running, since some of the regions we find to be
// empty might not yet be added to the free list. It is not straightforward
// to know in which list they are on so that we can remove them. We only
// need to do this if we need to allocate more than one region to satisfy the
// current humongous allocation request. If we are only allocating one region
// we use the one-region region allocation code (see above), that already
// potentially waits for regions from the secondary free list.
wait_while_free_regions_coming();
append_secondary_free_list_if_not_empty_with_lock();
// Policy: Try only empty regions (i.e. already committed first). Maybe we // Policy: Try only empty regions (i.e. already committed first). Maybe we
// are lucky enough to find some. // are lucky enough to find some.
first = _hrm.find_contiguous_only_empty(obj_regions); first = _hrm.find_contiguous_only_empty(obj_regions);
@ -1026,11 +965,6 @@ void G1CollectedHeap::print_hrm_post_compaction() {
} }
void G1CollectedHeap::abort_concurrent_cycle() { void G1CollectedHeap::abort_concurrent_cycle() {
// Note: When we have a more flexible GC logging framework that
// allows us to add optional attributes to a GC log record we
// could consider timing and reporting how long we wait in the
// following two methods.
wait_while_free_regions_coming();
// If we start the compaction before the CM threads finish // If we start the compaction before the CM threads finish
// scanning the root regions we might trip them over as we'll // scanning the root regions we might trip them over as we'll
// be moving objects / updating references. So let's wait until // be moving objects / updating references. So let's wait until
@ -1038,7 +972,6 @@ void G1CollectedHeap::abort_concurrent_cycle() {
// early. // early.
_cm->root_regions()->abort(); _cm->root_regions()->abort();
_cm->root_regions()->wait_until_scan_finished(); _cm->root_regions()->wait_until_scan_finished();
append_secondary_free_list_if_not_empty_with_lock();
// Disable discovery and empty the discovered lists // Disable discovery and empty the discovered lists
// for the CM ref processor. // for the CM ref processor.
@ -1476,13 +1409,11 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
_cr(NULL), _cr(NULL),
_g1mm(NULL), _g1mm(NULL),
_preserved_marks_set(true /* in_c_heap */), _preserved_marks_set(true /* in_c_heap */),
_secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
_old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()), _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
_humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()), _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
_humongous_reclaim_candidates(), _humongous_reclaim_candidates(),
_has_humongous_reclaim_candidates(false), _has_humongous_reclaim_candidates(false),
_archive_allocator(NULL), _archive_allocator(NULL),
_free_regions_coming(false),
_gc_time_stamp(0), _gc_time_stamp(0),
_summary_bytes_used(0), _summary_bytes_used(0),
_survivor_evac_stats("Young", YoungPLABSize, PLABWeight), _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
@ -2920,16 +2851,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
TraceMemoryManagerStats tms(&_memory_manager, gc_cause()); TraceMemoryManagerStats tms(&_memory_manager, gc_cause());
// If the secondary_free_list is not empty, append it to the
// free_list. No need to wait for the cleanup operation to finish;
// the region allocation code will check the secondary_free_list
// and wait if necessary. If the G1StressConcRegionFreeing flag is
// set, skip this step so that the region allocation code has to
// get entries from the secondary_free_list.
if (!G1StressConcRegionFreeing) {
append_secondary_free_list_if_not_empty_with_lock();
}
G1HeapTransition heap_transition(this); G1HeapTransition heap_transition(this);
size_t heap_used_bytes_before_gc = used(); size_t heap_used_bytes_before_gc = used();
@ -4424,12 +4345,11 @@ void G1CollectedHeap::free_region(HeapRegion* hr,
} }
void G1CollectedHeap::free_humongous_region(HeapRegion* hr, void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
FreeRegionList* free_list, FreeRegionList* free_list) {
bool skip_remset) {
assert(hr->is_humongous(), "this is only for humongous regions"); assert(hr->is_humongous(), "this is only for humongous regions");
assert(free_list != NULL, "pre-condition"); assert(free_list != NULL, "pre-condition");
hr->clear_humongous(); hr->clear_humongous();
free_region(hr, free_list, skip_remset); free_region(hr, free_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
} }
void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed, void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
@ -4821,7 +4741,7 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
_freed_bytes += r->used(); _freed_bytes += r->used();
r->set_containing_set(NULL); r->set_containing_set(NULL);
_humongous_regions_reclaimed++; _humongous_regions_reclaimed++;
g1h->free_humongous_region(r, _free_region_list, false /* skip_remset */ ); g1h->free_humongous_region(r, _free_region_list);
r = next; r = next;
} while (r != NULL); } while (r != NULL);
@ -4893,44 +4813,6 @@ void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
collection_set->stop_incremental_building(); collection_set->stop_incremental_building();
} }
void G1CollectedHeap::set_free_regions_coming() {
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : setting free regions coming");
assert(!free_regions_coming(), "pre-condition");
_free_regions_coming = true;
}
void G1CollectedHeap::reset_free_regions_coming() {
assert(free_regions_coming(), "pre-condition");
{
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
_free_regions_coming = false;
SecondaryFreeList_lock->notify_all();
}
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : reset free regions coming");
}
void G1CollectedHeap::wait_while_free_regions_coming() {
// Most of the time we won't have to wait, so let's do a quick test
// first before we take the lock.
if (!free_regions_coming()) {
return;
}
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : waiting for free regions");
{
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
while (free_regions_coming()) {
SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
}
}
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : done waiting for free regions");
}
bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) { bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
return _allocator->is_retained_old_region(hr); return _allocator->is_retained_old_region(hr);
} }

View file

@ -163,11 +163,6 @@ private:
static size_t _humongous_object_threshold_in_words; static size_t _humongous_object_threshold_in_words;
// The secondary free list which contains regions that have been
// freed up during the cleanup process. This will be appended to
// the master free list when appropriate.
FreeRegionList _secondary_free_list;
// It keeps track of the old regions. // It keeps track of the old regions.
HeapRegionSet _old_set; HeapRegionSet _old_set;
@ -380,13 +375,6 @@ private:
G1CollectionSet _collection_set; G1CollectionSet _collection_set;
// This is the second level of trying to allocate a new region. If
// new_region() didn't find a region on the free_list, this call will
// check whether there's anything available on the
// secondary_free_list and/or wait for more regions to appear on
// that list, if _free_regions_coming is set.
HeapRegion* new_region_try_secondary_free_list(bool is_old);
// Try to allocate a single non-humongous HeapRegion sufficient for // Try to allocate a single non-humongous HeapRegion sufficient for
// an allocation of the given word_size. If do_expand is true, // an allocation of the given word_size. If do_expand is true,
// attempt to expand the heap if necessary to satisfy the allocation // attempt to expand the heap if necessary to satisfy the allocation
@ -657,12 +645,11 @@ public:
// and calling free_region() for each of them. The freed regions // and calling free_region() for each of them. The freed regions
// will be added to the free list that's passed as a parameter (this // will be added to the free list that's passed as a parameter (this
// is usually a local list which will be appended to the master free // is usually a local list which will be appended to the master free
// list later). The used bytes of freed regions are accumulated in // list later).
// pre_used. If skip_remset is true, the region's RSet will not be freed // The method assumes that only a single thread is ever calling
// up. The assumption is that this will be done later. // this for a particular region at once.
void free_humongous_region(HeapRegion* hr, void free_humongous_region(HeapRegion* hr,
FreeRegionList* free_list, FreeRegionList* free_list);
bool skip_remset);
// Facility for allocating in 'archive' regions in high heap memory and // Facility for allocating in 'archive' regions in high heap memory and
// recording the allocated ranges. These should all be called from the // recording the allocated ranges. These should all be called from the
@ -919,8 +906,6 @@ private:
// discovery. // discovery.
G1CMIsAliveClosure _is_alive_closure_cm; G1CMIsAliveClosure _is_alive_closure_cm;
volatile bool _free_regions_coming;
public: public:
RefToScanQueue *task_queue(uint i) const; RefToScanQueue *task_queue(uint i) const;
@ -1066,26 +1051,6 @@ public:
} }
#endif // ASSERT #endif // ASSERT
// Wrapper for the region list operations that can be called from
// methods outside this class.
void secondary_free_list_add(FreeRegionList* list) {
_secondary_free_list.add_ordered(list);
}
void append_secondary_free_list() {
_hrm.insert_list_into_free_list(&_secondary_free_list);
}
void append_secondary_free_list_if_not_empty_with_lock() {
// If the secondary free list looks empty there's no reason to
// take the lock and then try to append it.
if (!_secondary_free_list.is_empty()) {
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
append_secondary_free_list();
}
}
inline void old_set_add(HeapRegion* hr); inline void old_set_add(HeapRegion* hr);
inline void old_set_remove(HeapRegion* hr); inline void old_set_remove(HeapRegion* hr);
@ -1093,11 +1058,6 @@ public:
return (_old_set.length() + _humongous_set.length()) * HeapRegion::GrainBytes; return (_old_set.length() + _humongous_set.length()) * HeapRegion::GrainBytes;
} }
void set_free_regions_coming();
void reset_free_regions_coming();
bool free_regions_coming() { return _free_regions_coming; }
void wait_while_free_regions_coming();
// Determine whether the given region is one that we are using as an // Determine whether the given region is one that we are using as an
// old GC alloc region. // old GC alloc region.
bool is_old_gc_alloc_region(HeapRegion* hr); bool is_old_gc_alloc_region(HeapRegion* hr);

View file

@ -71,9 +71,6 @@ class G1CollectorState {
bool _in_marking_window; bool _in_marking_window;
bool _in_marking_window_im; bool _in_marking_window_im;
// Are we going into a mixed gc phase.
bool _mixed_gc_pending;
bool _full_collection; bool _full_collection;
public: public:
@ -81,7 +78,6 @@ class G1CollectorState {
_gcs_are_young(true), _gcs_are_young(true),
_last_gc_was_young(false), _last_gc_was_young(false),
_last_young_gc(false), _last_young_gc(false),
_mixed_gc_pending(false),
_during_initial_mark_pause(false), _during_initial_mark_pause(false),
_initiate_conc_mark_if_possible(false), _initiate_conc_mark_if_possible(false),
@ -95,14 +91,13 @@ class G1CollectorState {
// Setters // Setters
void set_gcs_are_young(bool v) { _gcs_are_young = v; } void set_gcs_are_young(bool v) { _gcs_are_young = v; }
void set_last_gc_was_young(bool v) { _last_gc_was_young = v; } void set_last_gc_was_young(bool v) { _last_gc_was_young = v; }
void set_last_young_gc(bool v) { _last_young_gc = v; _mixed_gc_pending = false;} void set_last_young_gc(bool v) { _last_young_gc = v; }
void set_during_initial_mark_pause(bool v) { _during_initial_mark_pause = v; } void set_during_initial_mark_pause(bool v) { _during_initial_mark_pause = v; }
void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; } void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; }
void set_during_marking(bool v) { _during_marking = v; } void set_during_marking(bool v) { _during_marking = v; }
void set_mark_in_progress(bool v) { _mark_in_progress = v; } void set_mark_in_progress(bool v) { _mark_in_progress = v; }
void set_in_marking_window(bool v) { _in_marking_window = v; } void set_in_marking_window(bool v) { _in_marking_window = v; }
void set_in_marking_window_im(bool v) { _in_marking_window_im = v; } void set_in_marking_window_im(bool v) { _in_marking_window_im = v; }
void set_mixed_gc_pending(bool v) { _mixed_gc_pending = v; }
void set_full_collection(bool v) { _full_collection = v; } void set_full_collection(bool v) { _full_collection = v; }
// Getters // Getters
@ -115,7 +110,6 @@ class G1CollectorState {
bool mark_in_progress() const { return _mark_in_progress; } bool mark_in_progress() const { return _mark_in_progress; }
bool in_marking_window() const { return _in_marking_window; } bool in_marking_window() const { return _in_marking_window; }
bool in_marking_window_im() const { return _in_marking_window_im; } bool in_marking_window_im() const { return _in_marking_window_im; }
bool mixed_gc_pending() const { return _mixed_gc_pending; }
bool full_collection() const { return _full_collection; } bool full_collection() const { return _full_collection; }
// Composite booleans (clients worry about flickering) // Composite booleans (clients worry about flickering)

View file

@ -342,7 +342,6 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
_g1h(g1h), _g1h(g1h),
_completed_initialization(false), _completed_initialization(false),
_cleanup_list("Concurrent Mark Cleanup List"),
_mark_bitmap_1(), _mark_bitmap_1(),
_mark_bitmap_2(), _mark_bitmap_2(),
_prev_mark_bitmap(&_mark_bitmap_1), _prev_mark_bitmap(&_mark_bitmap_1),
@ -978,6 +977,7 @@ void G1ConcurrentMark::concurrent_cycle_end() {
_g1h->trace_heap_after_gc(_gc_tracer_cm); _g1h->trace_heap_after_gc(_gc_tracer_cm);
if (has_aborted()) { if (has_aborted()) {
log_info(gc, marking)("Concurrent Mark Abort");
_gc_tracer_cm->report_concurrent_mode_failure(); _gc_tracer_cm->report_concurrent_mode_failure();
} }
@ -1137,94 +1137,82 @@ void G1ConcurrentMark::checkpoint_roots_final(bool clear_all_soft_refs) {
_gc_tracer_cm->report_object_count_after_gc(&is_alive); _gc_tracer_cm->report_object_count_after_gc(&is_alive);
} }
class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { class G1CleanupTask: public AbstractGangTask {
G1CollectedHeap* _g1; // Per-region work during the Cleanup pause.
size_t _freed_bytes; class G1CleanupRegionsClosure : public HeapRegionClosure {
FreeRegionList* _local_cleanup_list; G1CollectedHeap* _g1;
uint _old_regions_removed; size_t _freed_bytes;
uint _humongous_regions_removed; FreeRegionList* _local_cleanup_list;
HRRSCleanupTask* _hrrs_cleanup_task; uint _old_regions_removed;
uint _humongous_regions_removed;
HRRSCleanupTask* _hrrs_cleanup_task;
public: public:
G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, G1CleanupRegionsClosure(G1CollectedHeap* g1,
FreeRegionList* local_cleanup_list, FreeRegionList* local_cleanup_list,
HRRSCleanupTask* hrrs_cleanup_task) : HRRSCleanupTask* hrrs_cleanup_task) :
_g1(g1), _g1(g1),
_freed_bytes(0), _freed_bytes(0),
_local_cleanup_list(local_cleanup_list), _local_cleanup_list(local_cleanup_list),
_old_regions_removed(0), _old_regions_removed(0),
_humongous_regions_removed(0), _humongous_regions_removed(0),
_hrrs_cleanup_task(hrrs_cleanup_task) { } _hrrs_cleanup_task(hrrs_cleanup_task) { }
size_t freed_bytes() { return _freed_bytes; } size_t freed_bytes() { return _freed_bytes; }
const uint old_regions_removed() { return _old_regions_removed; } const uint old_regions_removed() { return _old_regions_removed; }
const uint humongous_regions_removed() { return _humongous_regions_removed; } const uint humongous_regions_removed() { return _humongous_regions_removed; }
bool do_heap_region(HeapRegion *hr) { bool do_heap_region(HeapRegion *hr) {
_g1->reset_gc_time_stamps(hr); _g1->reset_gc_time_stamps(hr);
hr->note_end_of_marking(); hr->note_end_of_marking();
if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) { if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) {
_freed_bytes += hr->used(); _freed_bytes += hr->used();
hr->set_containing_set(NULL); hr->set_containing_set(NULL);
if (hr->is_humongous()) { if (hr->is_humongous()) {
_humongous_regions_removed++; _humongous_regions_removed++;
_g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */); _g1->free_humongous_region(hr, _local_cleanup_list);
} else {
_old_regions_removed++;
_g1->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
}
hr->clear_cardtable();
} else { } else {
_old_regions_removed++; hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
_g1->free_region(hr, _local_cleanup_list, true /* skip_remset */);
} }
} else {
hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); return false;
} }
};
return false;
}
};
class G1ParNoteEndTask: public AbstractGangTask {
friend class G1NoteEndOfConcMarkClosure;
protected:
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
FreeRegionList* _cleanup_list; FreeRegionList* _cleanup_list;
HeapRegionClaimer _hrclaimer; HeapRegionClaimer _hrclaimer;
public: public:
G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : G1CleanupTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { AbstractGangTask("G1 Cleanup"),
_g1h(g1h),
_cleanup_list(cleanup_list),
_hrclaimer(n_workers) {
HeapRegionRemSet::reset_for_cleanup_tasks();
} }
void work(uint worker_id) { void work(uint worker_id) {
FreeRegionList local_cleanup_list("Local Cleanup List"); FreeRegionList local_cleanup_list("Local Cleanup List");
HRRSCleanupTask hrrs_cleanup_task; HRRSCleanupTask hrrs_cleanup_task;
G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, G1CleanupRegionsClosure cl(_g1h,
&hrrs_cleanup_task); &local_cleanup_list,
_g1h->heap_region_par_iterate_from_worker_offset(&g1_note_end, &_hrclaimer, worker_id); &hrrs_cleanup_task);
assert(g1_note_end.is_complete(), "Shouldn't have yielded!"); _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
assert(cl.is_complete(), "Shouldn't have aborted!");
// Now update the lists // Now update the old/humongous region sets
_g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); _g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
{ {
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
_g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); _g1h->decrement_summary_bytes(cl.freed_bytes());
// If we iterate over the global cleanup list at the end of
// cleanup to do this printing we will not guarantee to only
// generate output for the newly-reclaimed regions (the list
// might not be empty at the beginning of cleanup; we might
// still be working on its previous contents). So we do the
// printing here, before we append the new regions to the global
// cleanup list.
G1HRPrinter* hr_printer = _g1h->hr_printer();
if (hr_printer->is_active()) {
FreeRegionListIterator iter(&local_cleanup_list);
while (iter.more_available()) {
HeapRegion* hr = iter.get_next();
hr_printer->cleanup(hr);
}
}
_cleanup_list->add_ordered(&local_cleanup_list); _cleanup_list->add_ordered(&local_cleanup_list);
assert(local_cleanup_list.is_empty(), "post-condition"); assert(local_cleanup_list.is_empty(), "post-condition");
@ -1234,6 +1222,28 @@ public:
} }
}; };
void G1ConcurrentMark::reclaim_empty_regions() {
WorkGang* workers = _g1h->workers();
FreeRegionList empty_regions_list("Empty Regions After Mark List");
G1CleanupTask cl(_g1h, &empty_regions_list, workers->active_workers());
workers->run_task(&cl);
if (!empty_regions_list.is_empty()) {
// Now print the empty regions list.
G1HRPrinter* hrp = _g1h->hr_printer();
if (hrp->is_active()) {
FreeRegionListIterator iter(&empty_regions_list);
while (iter.more_available()) {
HeapRegion* hr = iter.get_next();
hrp->cleanup(hr);
}
}
// And actually make them available.
_g1h->prepend_to_freelist(&empty_regions_list);
}
}
void G1ConcurrentMark::cleanup() { void G1ConcurrentMark::cleanup() {
// world is stopped at this checkpoint // world is stopped at this checkpoint
assert(SafepointSynchronize::is_at_safepoint(), assert(SafepointSynchronize::is_at_safepoint(),
@ -1258,8 +1268,6 @@ void G1ConcurrentMark::cleanup() {
double start = os::elapsedTime(); double start = os::elapsedTime();
HeapRegionRemSet::reset_for_cleanup_tasks();
{ {
GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking After Rebuild"); GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking After Rebuild");
G1UpdateRemSetTrackingAfterRebuild cl(_g1h); G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
@ -1277,29 +1285,19 @@ void G1ConcurrentMark::cleanup() {
_g1h->heap_region_iterate(&cl); _g1h->heap_region_iterate(&cl);
} }
// Install newly created mark bitMap as "prev".
swap_mark_bitmaps();
g1h->reset_gc_time_stamp(); g1h->reset_gc_time_stamp();
uint n_workers = _g1h->workers()->active_workers(); // Install newly created mark bitmap as "prev".
swap_mark_bitmaps();
// Note end of marking in all heap regions. {
G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); GCTraceTime(Debug, gc, phases)("Reclaim Empty Regions");
g1h->workers()->run_task(&g1_par_note_end_task); reclaim_empty_regions();
g1h->check_gc_time_stamps();
if (!cleanup_list_is_empty()) {
// The cleanup list is not empty, so we'll have to process it
// concurrently. Notify anyone else that might be wanting free
// regions that there will be more free regions coming soon.
g1h->set_free_regions_coming();
} }
g1h->check_gc_time_stamps();
{ {
GCTraceTime(Debug, gc, phases)("Finalize Concurrent Mark Cleanup"); GCTraceTime(Debug, gc, phases)("Finalize Concurrent Mark Cleanup");
// This will also free any regions totally full of garbage objects,
// and sort the regions.
g1h->g1_policy()->record_concurrent_mark_cleanup_end(); g1h->g1_policy()->record_concurrent_mark_cleanup_end();
} }
@ -1307,7 +1305,7 @@ void G1ConcurrentMark::cleanup() {
double end = os::elapsedTime(); double end = os::elapsedTime();
_cleanup_times.add((end - start) * 1000.0); _cleanup_times.add((end - start) * 1000.0);
// Clean up will have freed any regions completely full of garbage. // Cleanup will have freed any regions completely full of garbage.
// Update the soft reference policy with the new heap occupancy. // Update the soft reference policy with the new heap occupancy.
Universe::update_heap_info_at_gc(); Universe::update_heap_info_at_gc();
@ -1334,57 +1332,6 @@ void G1ConcurrentMark::cleanup() {
g1h->g1mm()->update_sizes(); g1h->g1mm()->update_sizes();
} }
void G1ConcurrentMark::complete_cleanup() {
if (has_aborted()) return;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
_cleanup_list.verify_optional();
FreeRegionList tmp_free_list("Tmp Free List");
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
"cleanup list has %u entries",
_cleanup_list.length());
// No one else should be accessing the _cleanup_list at this point,
// so it is not necessary to take any locks
while (!_cleanup_list.is_empty()) {
HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
assert(hr != NULL, "Got NULL from a non-empty list");
hr->par_clear();
tmp_free_list.add_ordered(hr);
// Instead of adding one region at a time to the secondary_free_list,
// we accumulate them in the local list and move them a few at a
// time. This also cuts down on the number of notify_all() calls
// we do during this process. We'll also append the local list when
// _cleanup_list is empty (which means we just removed the last
// region from the _cleanup_list).
if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
_cleanup_list.is_empty()) {
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
"appending %u entries to the secondary_free_list, "
"cleanup list still has %u entries",
tmp_free_list.length(),
_cleanup_list.length());
{
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
g1h->secondary_free_list_add(&tmp_free_list);
SecondaryFreeList_lock->notify_all();
}
#ifndef PRODUCT
if (G1StressConcRegionFreeing) {
for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
os::sleep(Thread::current(), (jlong) 1, false);
}
}
#endif
}
}
assert(tmp_free_list.is_empty(), "post-condition");
}
// Supporting Object and Oop closures for reference discovery // Supporting Object and Oop closures for reference discovery
// and processing in during marking // and processing in during marking

View file

@ -291,8 +291,6 @@ class G1ConcurrentMark: public CHeapObj<mtGC> {
G1CollectedHeap* _g1h; // The heap G1CollectedHeap* _g1h; // The heap
bool _completed_initialization; // Set to true when initialization is complete bool _completed_initialization; // Set to true when initialization is complete
FreeRegionList _cleanup_list;
// Concurrent marking support structures // Concurrent marking support structures
G1CMBitMap _mark_bitmap_1; G1CMBitMap _mark_bitmap_1;
G1CMBitMap _mark_bitmap_2; G1CMBitMap _mark_bitmap_2;
@ -373,6 +371,8 @@ class G1ConcurrentMark: public CHeapObj<mtGC> {
void swap_mark_bitmaps(); void swap_mark_bitmaps();
void reclaim_empty_regions();
// Resets the global marking data structures, as well as the // Resets the global marking data structures, as well as the
// task local ones; should be called during initial mark. // task local ones; should be called during initial mark.
void reset(); void reset();
@ -395,10 +395,6 @@ class G1ConcurrentMark: public CHeapObj<mtGC> {
// Prints all gathered CM-related statistics // Prints all gathered CM-related statistics
void print_stats(); void print_stats();
bool cleanup_list_is_empty() {
return _cleanup_list.is_empty();
}
HeapWord* finger() { return _finger; } HeapWord* finger() { return _finger; }
bool concurrent() { return _concurrent; } bool concurrent() { return _concurrent; }
uint active_tasks() { return _num_active_tasks; } uint active_tasks() { return _num_active_tasks; }
@ -569,8 +565,6 @@ public:
void checkpoint_roots_final_work(); void checkpoint_roots_final_work();
void cleanup(); void cleanup();
void complete_cleanup();
// Mark in the previous bitmap. Caution: the prev bitmap is usually read-only, so use // Mark in the previous bitmap. Caution: the prev bitmap is usually read-only, so use
// this carefully. // this carefully.
inline void mark_in_prev_bitmap(oop p); inline void mark_in_prev_bitmap(oop p);

View file

@ -103,7 +103,7 @@ void G1FullGCPrepareTask::G1CalculatePointersClosure::free_humongous_region(Heap
hr->set_containing_set(NULL); hr->set_containing_set(NULL);
_humongous_regions_removed++; _humongous_regions_removed++;
_g1h->free_humongous_region(hr, &dummy_free_list, false /* skip_remset */); _g1h->free_humongous_region(hr, &dummy_free_list);
prepare_for_compaction(hr); prepare_for_compaction(hr);
dummy_free_list.remove_all(); dummy_free_list.remove_all();
} }
@ -111,8 +111,7 @@ void G1FullGCPrepareTask::G1CalculatePointersClosure::free_humongous_region(Heap
void G1FullGCPrepareTask::G1CalculatePointersClosure::reset_region_metadata(HeapRegion* hr) { void G1FullGCPrepareTask::G1CalculatePointersClosure::reset_region_metadata(HeapRegion* hr) {
hr->reset_gc_time_stamp(); hr->reset_gc_time_stamp();
hr->rem_set()->clear(); hr->rem_set()->clear();
hr->clear_cardtable();
_g1h->card_table()->clear(MemRegion(hr->bottom(), hr->end()));
if (_g1h->g1_hot_card_cache()->use_cache()) { if (_g1h->g1_hot_card_cache()->use_cache()) {
_g1h->g1_hot_card_cache()->reset_card_counts(hr); _g1h->g1_hot_card_cache()->reset_card_counts(hr);

View file

@ -520,32 +520,6 @@ void G1HeapVerifier::verify_region_sets() {
// First, check the explicit lists. // First, check the explicit lists.
_g1h->_hrm.verify(); _g1h->_hrm.verify();
{
// Given that a concurrent operation might be adding regions to
// the secondary free list we have to take the lock before
// verifying it.
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
_g1h->_secondary_free_list.verify_list();
}
// If a concurrent region freeing operation is in progress it will
// be difficult to correctly attributed any free regions we come
// across to the correct free list given that they might belong to
// one of several (free_list, secondary_free_list, any local lists,
// etc.). So, if that's the case we will skip the rest of the
// verification operation. Alternatively, waiting for the concurrent
// operation to complete will have a non-trivial effect on the GC's
// operation (no concurrent operation will last longer than the
// interval between two calls to verification) and it might hide
// any issues that we would like to catch during testing.
if (_g1h->free_regions_coming()) {
return;
}
// Make sure we append the secondary_free_list on the free_list so
// that all free regions we will come across can be safely
// attributed to the free_list.
_g1h->append_secondary_free_list_if_not_empty_with_lock();
// Finally, make sure that the region accounting in the lists is // Finally, make sure that the region accounting in the lists is
// consistent with what we see in the heap. // consistent with what we see in the heap.

View file

@ -501,11 +501,6 @@ void G1Policy::record_concurrent_mark_cleanup_start() {
_mark_cleanup_start_sec = os::elapsedTime(); _mark_cleanup_start_sec = os::elapsedTime();
} }
void G1Policy::record_concurrent_mark_cleanup_completed() {
collector_state()->set_last_young_gc(collector_state()->mixed_gc_pending());
collector_state()->set_in_marking_window(false);
}
double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const { double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
return phase_times()->average_time_ms(phase); return phase_times()->average_time_ms(phase);
} }
@ -533,7 +528,6 @@ CollectionSetChooser* G1Policy::cset_chooser() const {
} }
bool G1Policy::about_to_start_mixed_phase() const { bool G1Policy::about_to_start_mixed_phase() const {
guarantee(_g1->concurrent_mark()->cm_thread()->during_cycle() || !collector_state()->mixed_gc_pending(), "Pending mixed phase when CM is idle!");
return _g1->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->last_young_gc(); return _g1->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->last_young_gc();
} }
@ -996,7 +990,8 @@ void G1Policy::record_concurrent_mark_cleanup_end() {
clear_collection_set_candidates(); clear_collection_set_candidates();
abort_time_to_mixed_tracking(); abort_time_to_mixed_tracking();
} }
collector_state()->set_mixed_gc_pending(mixed_gc_pending); collector_state()->set_last_young_gc(mixed_gc_pending);
collector_state()->set_in_marking_window(false);
double end_sec = os::elapsedTime(); double end_sec = os::elapsedTime();
double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;

View file

@ -323,7 +323,6 @@ public:
// Record start, end, and completion of cleanup. // Record start, end, and completion of cleanup.
void record_concurrent_mark_cleanup_start(); void record_concurrent_mark_cleanup_start();
void record_concurrent_mark_cleanup_end(); void record_concurrent_mark_cleanup_end();
void record_concurrent_mark_cleanup_completed();
void print_phases(); void print_phases();

View file

@ -75,8 +75,6 @@ private:
static size_t chunk_size() { return M; } static size_t chunk_size() { return M; }
void work(uint worker_id) { void work(uint worker_id) {
G1CardTable* ct = _g1h->card_table();
while (_cur_dirty_regions < _num_dirty_regions) { while (_cur_dirty_regions < _num_dirty_regions) {
size_t next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length; size_t next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length;
size_t max = MIN2(next + _chunk_length, _num_dirty_regions); size_t max = MIN2(next + _chunk_length, _num_dirty_regions);
@ -84,7 +82,7 @@ private:
for (size_t i = next; i < max; i++) { for (size_t i = next; i < max; i++) {
HeapRegion* r = _g1h->region_at(_dirty_region_list[i]); HeapRegion* r = _g1h->region_at(_dirty_region_list[i]);
if (!r->is_survivor()) { if (!r->is_survivor()) {
ct->clear(MemRegion(r->bottom(), r->end())); r->clear_cardtable();
} }
} }
} }
@ -272,9 +270,6 @@ public:
workers->run_task(&cl, num_workers); workers->run_task(&cl, num_workers);
#ifndef PRODUCT #ifndef PRODUCT
// Need to synchronize with concurrent cleanup since it needs to
// finish its card table clearing before we can verify.
G1CollectedHeap::heap()->wait_while_free_regions_coming();
G1CollectedHeap::heap()->verifier()->verify_card_table_cleanup(); G1CollectedHeap::heap()->verifier()->verify_card_table_cleanup();
#endif #endif
} }

View file

@ -59,7 +59,7 @@ void G1RemSetTrackingPolicy::update_at_allocate(HeapRegion* r) {
} }
void G1RemSetTrackingPolicy::update_at_free(HeapRegion* r) { void G1RemSetTrackingPolicy::update_at_free(HeapRegion* r) {
r->rem_set()->set_state_empty(); /* nothing to do */
} }
bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_bytes) { bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_bytes) {

View file

@ -210,16 +210,6 @@
"during RSet scanning.") \ "during RSet scanning.") \
range(1, max_uintx) \ range(1, max_uintx) \
\ \
develop(uintx, G1SecondaryFreeListAppendLength, 5, \
"The number of regions we will add to the secondary free list " \
"at every append operation") \
\
develop(bool, G1StressConcRegionFreeing, false, \
"It stresses the concurrent region freeing operation") \
\
develop(uintx, G1StressConcRegionFreeingDelayMillis, 0, \
"Artificial delay during concurrent region freeing") \
\
develop(uintx, G1DummyRegionsPerGC, 0, \ develop(uintx, G1DummyRegionsPerGC, 0, \
"The number of dummy regions G1 will allocate at the end of " \ "The number of dummy regions G1 will allocate at the end of " \
"each evacuation pause in order to artificially fill up the " \ "each evacuation pause in order to artificially fill up the " \

View file

@ -135,11 +135,7 @@ void HeapRegion::hr_clear(bool keep_remset, bool clear_space, bool locked) {
if (clear_space) clear(SpaceDecorator::Mangle); if (clear_space) clear(SpaceDecorator::Mangle);
} }
void HeapRegion::par_clear() { void HeapRegion::clear_cardtable() {
assert(used() == 0, "the region should have been already cleared");
assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
HeapRegionRemSet* hrrs = rem_set();
hrrs->clear();
G1CardTable* ct = G1CollectedHeap::heap()->card_table(); G1CardTable* ct = G1CollectedHeap::heap()->card_table();
ct->clear(MemRegion(bottom(), end())); ct->clear(MemRegion(bottom(), end()));
} }

View file

@ -506,10 +506,11 @@ class HeapRegion: public G1ContiguousSpace {
// Reset the HeapRegion to default values. // Reset the HeapRegion to default values.
// If skip_remset is true, do not clear the remembered set. // If skip_remset is true, do not clear the remembered set.
// If clear_space is true, clear the HeapRegion's memory.
// If locked is true, assume we are the only thread doing this operation.
void hr_clear(bool skip_remset, bool clear_space, bool locked = false); void hr_clear(bool skip_remset, bool clear_space, bool locked = false);
// Clear the parts skipped by skip_remset in hr_clear() in the HeapRegion during // Clear the card table corresponding to this region.
// a concurrent phase. void clear_cardtable();
void par_clear();
// Get the start of the unmarked area in this region. // Get the start of the unmarked area in this region.
HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; } HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }

View file

@ -833,8 +833,7 @@ void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
_other_regions.do_cleanup_work(hrrs_cleanup_task); _other_regions.do_cleanup_work(hrrs_cleanup_task);
} }
void void HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
SparsePRT::finish_cleanup_task(hrrs_cleanup_task); SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -321,14 +321,6 @@ void MasterFreeRegionListMtSafeChecker::check() {
} }
} }
void SecondaryFreeRegionListMtSafeChecker::check() {
// Secondary Free List MT safety protocol:
// Operations on the secondary free list should always be invoked
// while holding the SecondaryFreeList_lock.
guarantee(SecondaryFreeList_lock->owned_by_self(), "secondary free list MT safety protocol");
}
void OldRegionSetMtSafeChecker::check() { void OldRegionSetMtSafeChecker::check() {
// Master Old Set MT safety protocol: // Master Old Set MT safety protocol:
// (a) If we're at a safepoint, operations on the master old set // (a) If we're at a safepoint, operations on the master old set

View file

@ -59,7 +59,6 @@ public:
}; };
class MasterFreeRegionListMtSafeChecker : public HRSMtSafeChecker { public: void check(); }; class MasterFreeRegionListMtSafeChecker : public HRSMtSafeChecker { public: void check(); };
class SecondaryFreeRegionListMtSafeChecker : public HRSMtSafeChecker { public: void check(); };
class HumongousRegionSetMtSafeChecker : public HRSMtSafeChecker { public: void check(); }; class HumongousRegionSetMtSafeChecker : public HRSMtSafeChecker { public: void check(); };
class OldRegionSetMtSafeChecker : public HRSMtSafeChecker { public: void check(); }; class OldRegionSetMtSafeChecker : public HRSMtSafeChecker { public: void check(); };

View file

@ -116,7 +116,6 @@ Mutex* PerfDataManager_lock = NULL;
Mutex* OopMapCacheAlloc_lock = NULL; Mutex* OopMapCacheAlloc_lock = NULL;
Mutex* FreeList_lock = NULL; Mutex* FreeList_lock = NULL;
Monitor* SecondaryFreeList_lock = NULL;
Mutex* OldSets_lock = NULL; Mutex* OldSets_lock = NULL;
Monitor* RootRegionScan_lock = NULL; Monitor* RootRegionScan_lock = NULL;
@ -194,7 +193,6 @@ void mutex_init() {
def(Shared_DirtyCardQ_lock , PaddedMutex , access + 1, true, Monitor::_safepoint_check_never); def(Shared_DirtyCardQ_lock , PaddedMutex , access + 1, true, Monitor::_safepoint_check_never);
def(FreeList_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never); def(FreeList_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never);
def(SecondaryFreeList_lock , PaddedMonitor, leaf , true, Monitor::_safepoint_check_never);
def(OldSets_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never); def(OldSets_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never);
def(RootRegionScan_lock , PaddedMonitor, leaf , true, Monitor::_safepoint_check_never); def(RootRegionScan_lock , PaddedMonitor, leaf , true, Monitor::_safepoint_check_never);

View file

@ -117,7 +117,6 @@ extern Mutex* ParkerFreeList_lock;
extern Mutex* OopMapCacheAlloc_lock; // protects allocation of oop_map caches extern Mutex* OopMapCacheAlloc_lock; // protects allocation of oop_map caches
extern Mutex* FreeList_lock; // protects the free region list during safepoints extern Mutex* FreeList_lock; // protects the free region list during safepoints
extern Monitor* SecondaryFreeList_lock; // protects the secondary free region list
extern Mutex* OldSets_lock; // protects the old region sets extern Mutex* OldSets_lock; // protects the old region sets
extern Monitor* RootRegionScan_lock; // used to notify that the CM threads have finished scanning the IM snapshot regions extern Monitor* RootRegionScan_lock; // used to notify that the CM threads have finished scanning the IM snapshot regions

View file

@ -54,8 +54,6 @@ public class TestConcurrentPhaseControlG1 {
{"BEFORE_REMARK", null}, {"BEFORE_REMARK", null},
{"REMARK", "Pause Remark"}, {"REMARK", "Pause Remark"},
{"REBUILD_REMEMBERED_SETS", "Concurrent Rebuild Remembered Sets"}, {"REBUILD_REMEMBERED_SETS", "Concurrent Rebuild Remembered Sets"},
// "COMPLETE_CLEANUP", -- optional phase, not reached by this test
{"CLEANUP_FOR_NEXT_MARK", "Concurrent Cleanup for Next Mark"},
// Clear request // Clear request
{"IDLE", null}, {"IDLE", null},
{"ANY", null}, {"ANY", null},

View file

@ -54,7 +54,6 @@ public class TestConcurrentPhaseControlG1Basics {
"BEFORE_REMARK", "BEFORE_REMARK",
"REMARK", "REMARK",
"REBUILD_REMEMBERED_SETS", "REBUILD_REMEMBERED_SETS",
"COMPLETE_CLEANUP",
"CLEANUP_FOR_NEXT_MARK", "CLEANUP_FOR_NEXT_MARK",
}; };