mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-28 15:24:43 +02:00
8197573: Remove concurrent cleanup and secondary free list handling
Remove secondary free list and all associated functionality, moving the cleanup work into the Cleanup pause instead. Reviewed-by: sangheki, sjohanss
This commit is contained in:
parent
3a5ca51fff
commit
83d7c657c4
22 changed files with 126 additions and 485 deletions
|
@ -60,7 +60,6 @@ STATIC_ASSERT(ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE <
|
|||
expander(BEFORE_REMARK,, NULL) \
|
||||
expander(REMARK,, NULL) \
|
||||
expander(REBUILD_REMEMBERED_SETS,, "Concurrent Rebuild Remembered Sets") \
|
||||
expander(COMPLETE_CLEANUP,, "Concurrent Complete Cleanup") \
|
||||
expander(CLEANUP_FOR_NEXT_MARK,, "Concurrent Cleanup for Next Mark") \
|
||||
/* */
|
||||
|
||||
|
@ -357,86 +356,18 @@ void ConcurrentMarkThread::run_service() {
|
|||
|
||||
double end_time = os::elapsedVTime();
|
||||
// Update the total virtual time before doing this, since it will try
|
||||
// to measure it to get the vtime for this marking. We purposely
|
||||
// neglect the presumably-short "completeCleanup" phase here.
|
||||
// to measure it to get the vtime for this marking.
|
||||
_vtime_accum = (end_time - _vtime_start);
|
||||
|
||||
if (!cm()->has_aborted()) {
|
||||
delay_to_keep_mmu(g1_policy, false /* cleanup */);
|
||||
}
|
||||
|
||||
if (!cm()->has_aborted()) {
|
||||
CMCleanUp cl_cl(_cm);
|
||||
VM_CGC_Operation op(&cl_cl, "Pause Cleanup");
|
||||
VMThread::execute(&op);
|
||||
}
|
||||
} else {
|
||||
// We don't want to update the marking status if a GC pause
|
||||
// is already underway.
|
||||
SuspendibleThreadSetJoiner sts_join;
|
||||
g1h->collector_state()->set_mark_in_progress(false);
|
||||
}
|
||||
|
||||
// Check if cleanup set the free_regions_coming flag. If it
|
||||
// hasn't, we can just skip the next step.
|
||||
if (g1h->free_regions_coming()) {
|
||||
// The following will finish freeing up any regions that we
|
||||
// found to be empty during cleanup. We'll do this part
|
||||
// without joining the suspendible set. If an evacuation pause
|
||||
// takes place, then we would carry on freeing regions in
|
||||
// case they are needed by the pause. If a Full GC takes
|
||||
// place, it would wait for us to process the regions
|
||||
// reclaimed by cleanup.
|
||||
|
||||
// Now do the concurrent cleanup operation.
|
||||
G1ConcPhase p(G1ConcurrentPhase::COMPLETE_CLEANUP, this);
|
||||
_cm->complete_cleanup();
|
||||
|
||||
// Notify anyone who's waiting that there are no more free
|
||||
// regions coming. We have to do this before we join the STS
|
||||
// (in fact, we should not attempt to join the STS in the
|
||||
// interval between finishing the cleanup pause and clearing
|
||||
// the free_regions_coming flag) otherwise we might deadlock:
|
||||
// a GC worker could be blocked waiting for the notification
|
||||
// whereas this thread will be blocked for the pause to finish
|
||||
// while it's trying to join the STS, which is conditional on
|
||||
// the GC workers finishing.
|
||||
g1h->reset_free_regions_coming();
|
||||
}
|
||||
guarantee(cm()->cleanup_list_is_empty(),
|
||||
"at this point there should be no regions on the cleanup list");
|
||||
|
||||
// There is a tricky race before recording that the concurrent
|
||||
// cleanup has completed and a potential Full GC starting around
|
||||
// the same time. We want to make sure that the Full GC calls
|
||||
// abort() on concurrent mark after
|
||||
// record_concurrent_mark_cleanup_completed(), since abort() is
|
||||
// the method that will reset the concurrent mark state. If we
|
||||
// end up calling record_concurrent_mark_cleanup_completed()
|
||||
// after abort() then we might incorrectly undo some of the work
|
||||
// abort() did. Checking the has_aborted() flag after joining
|
||||
// the STS allows the correct ordering of the two methods. There
|
||||
// are two scenarios:
|
||||
//
|
||||
// a) If we reach here before the Full GC, the fact that we have
|
||||
// joined the STS means that the Full GC cannot start until we
|
||||
// leave the STS, so record_concurrent_mark_cleanup_completed()
|
||||
// will complete before abort() is called.
|
||||
//
|
||||
// b) If we reach here during the Full GC, we'll be held up from
|
||||
// joining the STS until the Full GC is done, which means that
|
||||
// abort() will have completed and has_aborted() will return
|
||||
// true to prevent us from calling
|
||||
// record_concurrent_mark_cleanup_completed() (and, in fact, it's
|
||||
// not needed any more as the concurrent mark state has been
|
||||
// already reset).
|
||||
{
|
||||
SuspendibleThreadSetJoiner sts_join;
|
||||
if (!cm()->has_aborted()) {
|
||||
g1_policy->record_concurrent_mark_cleanup_completed();
|
||||
} else {
|
||||
log_info(gc, marking)("Concurrent Mark Abort");
|
||||
}
|
||||
}
|
||||
|
||||
// We now want to allow clearing of the marking bitmap to be
|
||||
// suspended by a collection pause.
|
||||
|
|
|
@ -156,63 +156,13 @@ HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
|
|||
|
||||
// Private methods.
|
||||
|
||||
HeapRegion*
|
||||
G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
|
||||
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
while (!_secondary_free_list.is_empty() || free_regions_coming()) {
|
||||
if (!_secondary_free_list.is_empty()) {
|
||||
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
|
||||
"secondary_free_list has %u entries",
|
||||
_secondary_free_list.length());
|
||||
// It looks as if there are free regions available on the
|
||||
// secondary_free_list. Let's move them to the free_list and try
|
||||
// again to allocate from it.
|
||||
append_secondary_free_list();
|
||||
|
||||
assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
|
||||
"empty we should have moved at least one entry to the free_list");
|
||||
HeapRegion* res = _hrm.allocate_free_region(is_old);
|
||||
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
|
||||
"allocated " HR_FORMAT " from secondary_free_list",
|
||||
HR_FORMAT_PARAMS(res));
|
||||
return res;
|
||||
}
|
||||
|
||||
// Wait here until we get notified either when (a) there are no
|
||||
// more free regions coming or (b) some regions have been moved on
|
||||
// the secondary_free_list.
|
||||
SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
|
||||
}
|
||||
|
||||
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
|
||||
"could not allocate from secondary_free_list");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
|
||||
assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
|
||||
"the only time we use this to allocate a humongous region is "
|
||||
"when we are allocating a single humongous region");
|
||||
|
||||
HeapRegion* res;
|
||||
if (G1StressConcRegionFreeing) {
|
||||
if (!_secondary_free_list.is_empty()) {
|
||||
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
|
||||
"forced to look at the secondary_free_list");
|
||||
res = new_region_try_secondary_free_list(is_old);
|
||||
if (res != NULL) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
}
|
||||
HeapRegion* res = _hrm.allocate_free_region(is_old);
|
||||
|
||||
res = _hrm.allocate_free_region(is_old);
|
||||
|
||||
if (res == NULL) {
|
||||
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
|
||||
"res == NULL, trying the secondary_free_list");
|
||||
res = new_region_try_secondary_free_list(is_old);
|
||||
}
|
||||
if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
|
||||
// Currently, only attempts to allocate GC alloc regions set
|
||||
// do_expand to true. So, we should only reach here during a
|
||||
|
@ -380,17 +330,6 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
|||
first = hr->hrm_index();
|
||||
}
|
||||
} else {
|
||||
// We can't allocate humongous regions spanning more than one region while
|
||||
// cleanupComplete() is running, since some of the regions we find to be
|
||||
// empty might not yet be added to the free list. It is not straightforward
|
||||
// to know in which list they are on so that we can remove them. We only
|
||||
// need to do this if we need to allocate more than one region to satisfy the
|
||||
// current humongous allocation request. If we are only allocating one region
|
||||
// we use the one-region region allocation code (see above), that already
|
||||
// potentially waits for regions from the secondary free list.
|
||||
wait_while_free_regions_coming();
|
||||
append_secondary_free_list_if_not_empty_with_lock();
|
||||
|
||||
// Policy: Try only empty regions (i.e. already committed first). Maybe we
|
||||
// are lucky enough to find some.
|
||||
first = _hrm.find_contiguous_only_empty(obj_regions);
|
||||
|
@ -1026,11 +965,6 @@ void G1CollectedHeap::print_hrm_post_compaction() {
|
|||
}
|
||||
|
||||
void G1CollectedHeap::abort_concurrent_cycle() {
|
||||
// Note: When we have a more flexible GC logging framework that
|
||||
// allows us to add optional attributes to a GC log record we
|
||||
// could consider timing and reporting how long we wait in the
|
||||
// following two methods.
|
||||
wait_while_free_regions_coming();
|
||||
// If we start the compaction before the CM threads finish
|
||||
// scanning the root regions we might trip them over as we'll
|
||||
// be moving objects / updating references. So let's wait until
|
||||
|
@ -1038,7 +972,6 @@ void G1CollectedHeap::abort_concurrent_cycle() {
|
|||
// early.
|
||||
_cm->root_regions()->abort();
|
||||
_cm->root_regions()->wait_until_scan_finished();
|
||||
append_secondary_free_list_if_not_empty_with_lock();
|
||||
|
||||
// Disable discovery and empty the discovered lists
|
||||
// for the CM ref processor.
|
||||
|
@ -1476,13 +1409,11 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
|
|||
_cr(NULL),
|
||||
_g1mm(NULL),
|
||||
_preserved_marks_set(true /* in_c_heap */),
|
||||
_secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
|
||||
_old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
|
||||
_humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
|
||||
_humongous_reclaim_candidates(),
|
||||
_has_humongous_reclaim_candidates(false),
|
||||
_archive_allocator(NULL),
|
||||
_free_regions_coming(false),
|
||||
_gc_time_stamp(0),
|
||||
_summary_bytes_used(0),
|
||||
_survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
|
||||
|
@ -2920,16 +2851,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
|||
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
|
||||
TraceMemoryManagerStats tms(&_memory_manager, gc_cause());
|
||||
|
||||
// If the secondary_free_list is not empty, append it to the
|
||||
// free_list. No need to wait for the cleanup operation to finish;
|
||||
// the region allocation code will check the secondary_free_list
|
||||
// and wait if necessary. If the G1StressConcRegionFreeing flag is
|
||||
// set, skip this step so that the region allocation code has to
|
||||
// get entries from the secondary_free_list.
|
||||
if (!G1StressConcRegionFreeing) {
|
||||
append_secondary_free_list_if_not_empty_with_lock();
|
||||
}
|
||||
|
||||
G1HeapTransition heap_transition(this);
|
||||
size_t heap_used_bytes_before_gc = used();
|
||||
|
||||
|
@ -4424,12 +4345,11 @@ void G1CollectedHeap::free_region(HeapRegion* hr,
|
|||
}
|
||||
|
||||
void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
|
||||
FreeRegionList* free_list,
|
||||
bool skip_remset) {
|
||||
FreeRegionList* free_list) {
|
||||
assert(hr->is_humongous(), "this is only for humongous regions");
|
||||
assert(free_list != NULL, "pre-condition");
|
||||
hr->clear_humongous();
|
||||
free_region(hr, free_list, skip_remset);
|
||||
free_region(hr, free_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
|
||||
|
@ -4821,7 +4741,7 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
|
|||
_freed_bytes += r->used();
|
||||
r->set_containing_set(NULL);
|
||||
_humongous_regions_reclaimed++;
|
||||
g1h->free_humongous_region(r, _free_region_list, false /* skip_remset */ );
|
||||
g1h->free_humongous_region(r, _free_region_list);
|
||||
r = next;
|
||||
} while (r != NULL);
|
||||
|
||||
|
@ -4893,44 +4813,6 @@ void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
|
|||
collection_set->stop_incremental_building();
|
||||
}
|
||||
|
||||
void G1CollectedHeap::set_free_regions_coming() {
|
||||
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : setting free regions coming");
|
||||
|
||||
assert(!free_regions_coming(), "pre-condition");
|
||||
_free_regions_coming = true;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::reset_free_regions_coming() {
|
||||
assert(free_regions_coming(), "pre-condition");
|
||||
|
||||
{
|
||||
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
_free_regions_coming = false;
|
||||
SecondaryFreeList_lock->notify_all();
|
||||
}
|
||||
|
||||
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : reset free regions coming");
|
||||
}
|
||||
|
||||
void G1CollectedHeap::wait_while_free_regions_coming() {
|
||||
// Most of the time we won't have to wait, so let's do a quick test
|
||||
// first before we take the lock.
|
||||
if (!free_regions_coming()) {
|
||||
return;
|
||||
}
|
||||
|
||||
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : waiting for free regions");
|
||||
|
||||
{
|
||||
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
while (free_regions_coming()) {
|
||||
SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
|
||||
}
|
||||
}
|
||||
|
||||
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : done waiting for free regions");
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
|
||||
return _allocator->is_retained_old_region(hr);
|
||||
}
|
||||
|
|
|
@ -163,11 +163,6 @@ private:
|
|||
|
||||
static size_t _humongous_object_threshold_in_words;
|
||||
|
||||
// The secondary free list which contains regions that have been
|
||||
// freed up during the cleanup process. This will be appended to
|
||||
// the master free list when appropriate.
|
||||
FreeRegionList _secondary_free_list;
|
||||
|
||||
// It keeps track of the old regions.
|
||||
HeapRegionSet _old_set;
|
||||
|
||||
|
@ -380,13 +375,6 @@ private:
|
|||
|
||||
G1CollectionSet _collection_set;
|
||||
|
||||
// This is the second level of trying to allocate a new region. If
|
||||
// new_region() didn't find a region on the free_list, this call will
|
||||
// check whether there's anything available on the
|
||||
// secondary_free_list and/or wait for more regions to appear on
|
||||
// that list, if _free_regions_coming is set.
|
||||
HeapRegion* new_region_try_secondary_free_list(bool is_old);
|
||||
|
||||
// Try to allocate a single non-humongous HeapRegion sufficient for
|
||||
// an allocation of the given word_size. If do_expand is true,
|
||||
// attempt to expand the heap if necessary to satisfy the allocation
|
||||
|
@ -657,12 +645,11 @@ public:
|
|||
// and calling free_region() for each of them. The freed regions
|
||||
// will be added to the free list that's passed as a parameter (this
|
||||
// is usually a local list which will be appended to the master free
|
||||
// list later). The used bytes of freed regions are accumulated in
|
||||
// pre_used. If skip_remset is true, the region's RSet will not be freed
|
||||
// up. The assumption is that this will be done later.
|
||||
// list later).
|
||||
// The method assumes that only a single thread is ever calling
|
||||
// this for a particular region at once.
|
||||
void free_humongous_region(HeapRegion* hr,
|
||||
FreeRegionList* free_list,
|
||||
bool skip_remset);
|
||||
FreeRegionList* free_list);
|
||||
|
||||
// Facility for allocating in 'archive' regions in high heap memory and
|
||||
// recording the allocated ranges. These should all be called from the
|
||||
|
@ -919,8 +906,6 @@ private:
|
|||
// discovery.
|
||||
G1CMIsAliveClosure _is_alive_closure_cm;
|
||||
|
||||
volatile bool _free_regions_coming;
|
||||
|
||||
public:
|
||||
|
||||
RefToScanQueue *task_queue(uint i) const;
|
||||
|
@ -1066,26 +1051,6 @@ public:
|
|||
}
|
||||
#endif // ASSERT
|
||||
|
||||
// Wrapper for the region list operations that can be called from
|
||||
// methods outside this class.
|
||||
|
||||
void secondary_free_list_add(FreeRegionList* list) {
|
||||
_secondary_free_list.add_ordered(list);
|
||||
}
|
||||
|
||||
void append_secondary_free_list() {
|
||||
_hrm.insert_list_into_free_list(&_secondary_free_list);
|
||||
}
|
||||
|
||||
void append_secondary_free_list_if_not_empty_with_lock() {
|
||||
// If the secondary free list looks empty there's no reason to
|
||||
// take the lock and then try to append it.
|
||||
if (!_secondary_free_list.is_empty()) {
|
||||
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
append_secondary_free_list();
|
||||
}
|
||||
}
|
||||
|
||||
inline void old_set_add(HeapRegion* hr);
|
||||
inline void old_set_remove(HeapRegion* hr);
|
||||
|
||||
|
@ -1093,11 +1058,6 @@ public:
|
|||
return (_old_set.length() + _humongous_set.length()) * HeapRegion::GrainBytes;
|
||||
}
|
||||
|
||||
void set_free_regions_coming();
|
||||
void reset_free_regions_coming();
|
||||
bool free_regions_coming() { return _free_regions_coming; }
|
||||
void wait_while_free_regions_coming();
|
||||
|
||||
// Determine whether the given region is one that we are using as an
|
||||
// old GC alloc region.
|
||||
bool is_old_gc_alloc_region(HeapRegion* hr);
|
||||
|
|
|
@ -71,9 +71,6 @@ class G1CollectorState {
|
|||
bool _in_marking_window;
|
||||
bool _in_marking_window_im;
|
||||
|
||||
// Are we going into a mixed gc phase.
|
||||
bool _mixed_gc_pending;
|
||||
|
||||
bool _full_collection;
|
||||
|
||||
public:
|
||||
|
@ -81,7 +78,6 @@ class G1CollectorState {
|
|||
_gcs_are_young(true),
|
||||
_last_gc_was_young(false),
|
||||
_last_young_gc(false),
|
||||
_mixed_gc_pending(false),
|
||||
|
||||
_during_initial_mark_pause(false),
|
||||
_initiate_conc_mark_if_possible(false),
|
||||
|
@ -95,14 +91,13 @@ class G1CollectorState {
|
|||
// Setters
|
||||
void set_gcs_are_young(bool v) { _gcs_are_young = v; }
|
||||
void set_last_gc_was_young(bool v) { _last_gc_was_young = v; }
|
||||
void set_last_young_gc(bool v) { _last_young_gc = v; _mixed_gc_pending = false;}
|
||||
void set_last_young_gc(bool v) { _last_young_gc = v; }
|
||||
void set_during_initial_mark_pause(bool v) { _during_initial_mark_pause = v; }
|
||||
void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; }
|
||||
void set_during_marking(bool v) { _during_marking = v; }
|
||||
void set_mark_in_progress(bool v) { _mark_in_progress = v; }
|
||||
void set_in_marking_window(bool v) { _in_marking_window = v; }
|
||||
void set_in_marking_window_im(bool v) { _in_marking_window_im = v; }
|
||||
void set_mixed_gc_pending(bool v) { _mixed_gc_pending = v; }
|
||||
void set_full_collection(bool v) { _full_collection = v; }
|
||||
|
||||
// Getters
|
||||
|
@ -115,7 +110,6 @@ class G1CollectorState {
|
|||
bool mark_in_progress() const { return _mark_in_progress; }
|
||||
bool in_marking_window() const { return _in_marking_window; }
|
||||
bool in_marking_window_im() const { return _in_marking_window_im; }
|
||||
bool mixed_gc_pending() const { return _mixed_gc_pending; }
|
||||
bool full_collection() const { return _full_collection; }
|
||||
|
||||
// Composite booleans (clients worry about flickering)
|
||||
|
|
|
@ -342,7 +342,6 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
|
|||
_g1h(g1h),
|
||||
_completed_initialization(false),
|
||||
|
||||
_cleanup_list("Concurrent Mark Cleanup List"),
|
||||
_mark_bitmap_1(),
|
||||
_mark_bitmap_2(),
|
||||
_prev_mark_bitmap(&_mark_bitmap_1),
|
||||
|
@ -978,6 +977,7 @@ void G1ConcurrentMark::concurrent_cycle_end() {
|
|||
_g1h->trace_heap_after_gc(_gc_tracer_cm);
|
||||
|
||||
if (has_aborted()) {
|
||||
log_info(gc, marking)("Concurrent Mark Abort");
|
||||
_gc_tracer_cm->report_concurrent_mode_failure();
|
||||
}
|
||||
|
||||
|
@ -1137,7 +1137,9 @@ void G1ConcurrentMark::checkpoint_roots_final(bool clear_all_soft_refs) {
|
|||
_gc_tracer_cm->report_object_count_after_gc(&is_alive);
|
||||
}
|
||||
|
||||
class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
|
||||
class G1CleanupTask: public AbstractGangTask {
|
||||
// Per-region work during the Cleanup pause.
|
||||
class G1CleanupRegionsClosure : public HeapRegionClosure {
|
||||
G1CollectedHeap* _g1;
|
||||
size_t _freed_bytes;
|
||||
FreeRegionList* _local_cleanup_list;
|
||||
|
@ -1145,8 +1147,8 @@ class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
|
|||
uint _humongous_regions_removed;
|
||||
HRRSCleanupTask* _hrrs_cleanup_task;
|
||||
|
||||
public:
|
||||
G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
|
||||
public:
|
||||
G1CleanupRegionsClosure(G1CollectedHeap* g1,
|
||||
FreeRegionList* local_cleanup_list,
|
||||
HRRSCleanupTask* hrrs_cleanup_task) :
|
||||
_g1(g1),
|
||||
|
@ -1169,62 +1171,48 @@ public:
|
|||
hr->set_containing_set(NULL);
|
||||
if (hr->is_humongous()) {
|
||||
_humongous_regions_removed++;
|
||||
_g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */);
|
||||
_g1->free_humongous_region(hr, _local_cleanup_list);
|
||||
} else {
|
||||
_old_regions_removed++;
|
||||
_g1->free_region(hr, _local_cleanup_list, true /* skip_remset */);
|
||||
_g1->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */);
|
||||
}
|
||||
hr->clear_cardtable();
|
||||
} else {
|
||||
hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
class G1ParNoteEndTask: public AbstractGangTask {
|
||||
friend class G1NoteEndOfConcMarkClosure;
|
||||
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
FreeRegionList* _cleanup_list;
|
||||
HeapRegionClaimer _hrclaimer;
|
||||
|
||||
public:
|
||||
G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
|
||||
AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
|
||||
G1CleanupTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
|
||||
AbstractGangTask("G1 Cleanup"),
|
||||
_g1h(g1h),
|
||||
_cleanup_list(cleanup_list),
|
||||
_hrclaimer(n_workers) {
|
||||
|
||||
HeapRegionRemSet::reset_for_cleanup_tasks();
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
FreeRegionList local_cleanup_list("Local Cleanup List");
|
||||
HRRSCleanupTask hrrs_cleanup_task;
|
||||
G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
|
||||
G1CleanupRegionsClosure cl(_g1h,
|
||||
&local_cleanup_list,
|
||||
&hrrs_cleanup_task);
|
||||
_g1h->heap_region_par_iterate_from_worker_offset(&g1_note_end, &_hrclaimer, worker_id);
|
||||
assert(g1_note_end.is_complete(), "Shouldn't have yielded!");
|
||||
_g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
|
||||
assert(cl.is_complete(), "Shouldn't have aborted!");
|
||||
|
||||
// Now update the lists
|
||||
_g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
|
||||
// Now update the old/humongous region sets
|
||||
_g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
|
||||
{
|
||||
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
||||
_g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
|
||||
|
||||
// If we iterate over the global cleanup list at the end of
|
||||
// cleanup to do this printing we will not guarantee to only
|
||||
// generate output for the newly-reclaimed regions (the list
|
||||
// might not be empty at the beginning of cleanup; we might
|
||||
// still be working on its previous contents). So we do the
|
||||
// printing here, before we append the new regions to the global
|
||||
// cleanup list.
|
||||
|
||||
G1HRPrinter* hr_printer = _g1h->hr_printer();
|
||||
if (hr_printer->is_active()) {
|
||||
FreeRegionListIterator iter(&local_cleanup_list);
|
||||
while (iter.more_available()) {
|
||||
HeapRegion* hr = iter.get_next();
|
||||
hr_printer->cleanup(hr);
|
||||
}
|
||||
}
|
||||
_g1h->decrement_summary_bytes(cl.freed_bytes());
|
||||
|
||||
_cleanup_list->add_ordered(&local_cleanup_list);
|
||||
assert(local_cleanup_list.is_empty(), "post-condition");
|
||||
|
@ -1234,6 +1222,28 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
void G1ConcurrentMark::reclaim_empty_regions() {
|
||||
WorkGang* workers = _g1h->workers();
|
||||
FreeRegionList empty_regions_list("Empty Regions After Mark List");
|
||||
|
||||
G1CleanupTask cl(_g1h, &empty_regions_list, workers->active_workers());
|
||||
workers->run_task(&cl);
|
||||
|
||||
if (!empty_regions_list.is_empty()) {
|
||||
// Now print the empty regions list.
|
||||
G1HRPrinter* hrp = _g1h->hr_printer();
|
||||
if (hrp->is_active()) {
|
||||
FreeRegionListIterator iter(&empty_regions_list);
|
||||
while (iter.more_available()) {
|
||||
HeapRegion* hr = iter.get_next();
|
||||
hrp->cleanup(hr);
|
||||
}
|
||||
}
|
||||
// And actually make them available.
|
||||
_g1h->prepend_to_freelist(&empty_regions_list);
|
||||
}
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::cleanup() {
|
||||
// world is stopped at this checkpoint
|
||||
assert(SafepointSynchronize::is_at_safepoint(),
|
||||
|
@ -1258,8 +1268,6 @@ void G1ConcurrentMark::cleanup() {
|
|||
|
||||
double start = os::elapsedTime();
|
||||
|
||||
HeapRegionRemSet::reset_for_cleanup_tasks();
|
||||
|
||||
{
|
||||
GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking After Rebuild");
|
||||
G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
|
||||
|
@ -1277,29 +1285,19 @@ void G1ConcurrentMark::cleanup() {
|
|||
_g1h->heap_region_iterate(&cl);
|
||||
}
|
||||
|
||||
// Install newly created mark bitMap as "prev".
|
||||
swap_mark_bitmaps();
|
||||
|
||||
g1h->reset_gc_time_stamp();
|
||||
|
||||
uint n_workers = _g1h->workers()->active_workers();
|
||||
|
||||
// Note end of marking in all heap regions.
|
||||
G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
|
||||
g1h->workers()->run_task(&g1_par_note_end_task);
|
||||
g1h->check_gc_time_stamps();
|
||||
|
||||
if (!cleanup_list_is_empty()) {
|
||||
// The cleanup list is not empty, so we'll have to process it
|
||||
// concurrently. Notify anyone else that might be wanting free
|
||||
// regions that there will be more free regions coming soon.
|
||||
g1h->set_free_regions_coming();
|
||||
// Install newly created mark bitmap as "prev".
|
||||
swap_mark_bitmaps();
|
||||
{
|
||||
GCTraceTime(Debug, gc, phases)("Reclaim Empty Regions");
|
||||
reclaim_empty_regions();
|
||||
}
|
||||
|
||||
g1h->check_gc_time_stamps();
|
||||
|
||||
{
|
||||
GCTraceTime(Debug, gc, phases)("Finalize Concurrent Mark Cleanup");
|
||||
// This will also free any regions totally full of garbage objects,
|
||||
// and sort the regions.
|
||||
g1h->g1_policy()->record_concurrent_mark_cleanup_end();
|
||||
}
|
||||
|
||||
|
@ -1307,7 +1305,7 @@ void G1ConcurrentMark::cleanup() {
|
|||
double end = os::elapsedTime();
|
||||
_cleanup_times.add((end - start) * 1000.0);
|
||||
|
||||
// Clean up will have freed any regions completely full of garbage.
|
||||
// Cleanup will have freed any regions completely full of garbage.
|
||||
// Update the soft reference policy with the new heap occupancy.
|
||||
Universe::update_heap_info_at_gc();
|
||||
|
||||
|
@ -1334,57 +1332,6 @@ void G1ConcurrentMark::cleanup() {
|
|||
g1h->g1mm()->update_sizes();
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::complete_cleanup() {
|
||||
if (has_aborted()) return;
|
||||
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
_cleanup_list.verify_optional();
|
||||
FreeRegionList tmp_free_list("Tmp Free List");
|
||||
|
||||
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
|
||||
"cleanup list has %u entries",
|
||||
_cleanup_list.length());
|
||||
|
||||
// No one else should be accessing the _cleanup_list at this point,
|
||||
// so it is not necessary to take any locks
|
||||
while (!_cleanup_list.is_empty()) {
|
||||
HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
|
||||
assert(hr != NULL, "Got NULL from a non-empty list");
|
||||
hr->par_clear();
|
||||
tmp_free_list.add_ordered(hr);
|
||||
|
||||
// Instead of adding one region at a time to the secondary_free_list,
|
||||
// we accumulate them in the local list and move them a few at a
|
||||
// time. This also cuts down on the number of notify_all() calls
|
||||
// we do during this process. We'll also append the local list when
|
||||
// _cleanup_list is empty (which means we just removed the last
|
||||
// region from the _cleanup_list).
|
||||
if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
|
||||
_cleanup_list.is_empty()) {
|
||||
log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "
|
||||
"appending %u entries to the secondary_free_list, "
|
||||
"cleanup list still has %u entries",
|
||||
tmp_free_list.length(),
|
||||
_cleanup_list.length());
|
||||
|
||||
{
|
||||
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
g1h->secondary_free_list_add(&tmp_free_list);
|
||||
SecondaryFreeList_lock->notify_all();
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
if (G1StressConcRegionFreeing) {
|
||||
for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
|
||||
os::sleep(Thread::current(), (jlong) 1, false);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
assert(tmp_free_list.is_empty(), "post-condition");
|
||||
}
|
||||
|
||||
// Supporting Object and Oop closures for reference discovery
|
||||
// and processing in during marking
|
||||
|
||||
|
|
|
@ -291,8 +291,6 @@ class G1ConcurrentMark: public CHeapObj<mtGC> {
|
|||
G1CollectedHeap* _g1h; // The heap
|
||||
bool _completed_initialization; // Set to true when initialization is complete
|
||||
|
||||
FreeRegionList _cleanup_list;
|
||||
|
||||
// Concurrent marking support structures
|
||||
G1CMBitMap _mark_bitmap_1;
|
||||
G1CMBitMap _mark_bitmap_2;
|
||||
|
@ -373,6 +371,8 @@ class G1ConcurrentMark: public CHeapObj<mtGC> {
|
|||
|
||||
void swap_mark_bitmaps();
|
||||
|
||||
void reclaim_empty_regions();
|
||||
|
||||
// Resets the global marking data structures, as well as the
|
||||
// task local ones; should be called during initial mark.
|
||||
void reset();
|
||||
|
@ -395,10 +395,6 @@ class G1ConcurrentMark: public CHeapObj<mtGC> {
|
|||
// Prints all gathered CM-related statistics
|
||||
void print_stats();
|
||||
|
||||
bool cleanup_list_is_empty() {
|
||||
return _cleanup_list.is_empty();
|
||||
}
|
||||
|
||||
HeapWord* finger() { return _finger; }
|
||||
bool concurrent() { return _concurrent; }
|
||||
uint active_tasks() { return _num_active_tasks; }
|
||||
|
@ -569,8 +565,6 @@ public:
|
|||
void checkpoint_roots_final_work();
|
||||
|
||||
void cleanup();
|
||||
void complete_cleanup();
|
||||
|
||||
// Mark in the previous bitmap. Caution: the prev bitmap is usually read-only, so use
|
||||
// this carefully.
|
||||
inline void mark_in_prev_bitmap(oop p);
|
||||
|
|
|
@ -103,7 +103,7 @@ void G1FullGCPrepareTask::G1CalculatePointersClosure::free_humongous_region(Heap
|
|||
hr->set_containing_set(NULL);
|
||||
_humongous_regions_removed++;
|
||||
|
||||
_g1h->free_humongous_region(hr, &dummy_free_list, false /* skip_remset */);
|
||||
_g1h->free_humongous_region(hr, &dummy_free_list);
|
||||
prepare_for_compaction(hr);
|
||||
dummy_free_list.remove_all();
|
||||
}
|
||||
|
@ -111,8 +111,7 @@ void G1FullGCPrepareTask::G1CalculatePointersClosure::free_humongous_region(Heap
|
|||
void G1FullGCPrepareTask::G1CalculatePointersClosure::reset_region_metadata(HeapRegion* hr) {
|
||||
hr->reset_gc_time_stamp();
|
||||
hr->rem_set()->clear();
|
||||
|
||||
_g1h->card_table()->clear(MemRegion(hr->bottom(), hr->end()));
|
||||
hr->clear_cardtable();
|
||||
|
||||
if (_g1h->g1_hot_card_cache()->use_cache()) {
|
||||
_g1h->g1_hot_card_cache()->reset_card_counts(hr);
|
||||
|
|
|
@ -520,32 +520,6 @@ void G1HeapVerifier::verify_region_sets() {
|
|||
|
||||
// First, check the explicit lists.
|
||||
_g1h->_hrm.verify();
|
||||
{
|
||||
// Given that a concurrent operation might be adding regions to
|
||||
// the secondary free list we have to take the lock before
|
||||
// verifying it.
|
||||
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
_g1h->_secondary_free_list.verify_list();
|
||||
}
|
||||
|
||||
// If a concurrent region freeing operation is in progress it will
|
||||
// be difficult to correctly attributed any free regions we come
|
||||
// across to the correct free list given that they might belong to
|
||||
// one of several (free_list, secondary_free_list, any local lists,
|
||||
// etc.). So, if that's the case we will skip the rest of the
|
||||
// verification operation. Alternatively, waiting for the concurrent
|
||||
// operation to complete will have a non-trivial effect on the GC's
|
||||
// operation (no concurrent operation will last longer than the
|
||||
// interval between two calls to verification) and it might hide
|
||||
// any issues that we would like to catch during testing.
|
||||
if (_g1h->free_regions_coming()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Make sure we append the secondary_free_list on the free_list so
|
||||
// that all free regions we will come across can be safely
|
||||
// attributed to the free_list.
|
||||
_g1h->append_secondary_free_list_if_not_empty_with_lock();
|
||||
|
||||
// Finally, make sure that the region accounting in the lists is
|
||||
// consistent with what we see in the heap.
|
||||
|
|
|
@ -501,11 +501,6 @@ void G1Policy::record_concurrent_mark_cleanup_start() {
|
|||
_mark_cleanup_start_sec = os::elapsedTime();
|
||||
}
|
||||
|
||||
void G1Policy::record_concurrent_mark_cleanup_completed() {
|
||||
collector_state()->set_last_young_gc(collector_state()->mixed_gc_pending());
|
||||
collector_state()->set_in_marking_window(false);
|
||||
}
|
||||
|
||||
double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
|
||||
return phase_times()->average_time_ms(phase);
|
||||
}
|
||||
|
@ -533,7 +528,6 @@ CollectionSetChooser* G1Policy::cset_chooser() const {
|
|||
}
|
||||
|
||||
bool G1Policy::about_to_start_mixed_phase() const {
|
||||
guarantee(_g1->concurrent_mark()->cm_thread()->during_cycle() || !collector_state()->mixed_gc_pending(), "Pending mixed phase when CM is idle!");
|
||||
return _g1->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->last_young_gc();
|
||||
}
|
||||
|
||||
|
@ -996,7 +990,8 @@ void G1Policy::record_concurrent_mark_cleanup_end() {
|
|||
clear_collection_set_candidates();
|
||||
abort_time_to_mixed_tracking();
|
||||
}
|
||||
collector_state()->set_mixed_gc_pending(mixed_gc_pending);
|
||||
collector_state()->set_last_young_gc(mixed_gc_pending);
|
||||
collector_state()->set_in_marking_window(false);
|
||||
|
||||
double end_sec = os::elapsedTime();
|
||||
double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
|
||||
|
|
|
@ -323,7 +323,6 @@ public:
|
|||
// Record start, end, and completion of cleanup.
|
||||
void record_concurrent_mark_cleanup_start();
|
||||
void record_concurrent_mark_cleanup_end();
|
||||
void record_concurrent_mark_cleanup_completed();
|
||||
|
||||
void print_phases();
|
||||
|
||||
|
|
|
@ -75,8 +75,6 @@ private:
|
|||
static size_t chunk_size() { return M; }
|
||||
|
||||
void work(uint worker_id) {
|
||||
G1CardTable* ct = _g1h->card_table();
|
||||
|
||||
while (_cur_dirty_regions < _num_dirty_regions) {
|
||||
size_t next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length;
|
||||
size_t max = MIN2(next + _chunk_length, _num_dirty_regions);
|
||||
|
@ -84,7 +82,7 @@ private:
|
|||
for (size_t i = next; i < max; i++) {
|
||||
HeapRegion* r = _g1h->region_at(_dirty_region_list[i]);
|
||||
if (!r->is_survivor()) {
|
||||
ct->clear(MemRegion(r->bottom(), r->end()));
|
||||
r->clear_cardtable();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -272,9 +270,6 @@ public:
|
|||
workers->run_task(&cl, num_workers);
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Need to synchronize with concurrent cleanup since it needs to
|
||||
// finish its card table clearing before we can verify.
|
||||
G1CollectedHeap::heap()->wait_while_free_regions_coming();
|
||||
G1CollectedHeap::heap()->verifier()->verify_card_table_cleanup();
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ void G1RemSetTrackingPolicy::update_at_allocate(HeapRegion* r) {
|
|||
}
|
||||
|
||||
void G1RemSetTrackingPolicy::update_at_free(HeapRegion* r) {
|
||||
r->rem_set()->set_state_empty();
|
||||
/* nothing to do */
|
||||
}
|
||||
|
||||
bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_bytes) {
|
||||
|
|
|
@ -210,16 +210,6 @@
|
|||
"during RSet scanning.") \
|
||||
range(1, max_uintx) \
|
||||
\
|
||||
develop(uintx, G1SecondaryFreeListAppendLength, 5, \
|
||||
"The number of regions we will add to the secondary free list " \
|
||||
"at every append operation") \
|
||||
\
|
||||
develop(bool, G1StressConcRegionFreeing, false, \
|
||||
"It stresses the concurrent region freeing operation") \
|
||||
\
|
||||
develop(uintx, G1StressConcRegionFreeingDelayMillis, 0, \
|
||||
"Artificial delay during concurrent region freeing") \
|
||||
\
|
||||
develop(uintx, G1DummyRegionsPerGC, 0, \
|
||||
"The number of dummy regions G1 will allocate at the end of " \
|
||||
"each evacuation pause in order to artificially fill up the " \
|
||||
|
|
|
@ -135,11 +135,7 @@ void HeapRegion::hr_clear(bool keep_remset, bool clear_space, bool locked) {
|
|||
if (clear_space) clear(SpaceDecorator::Mangle);
|
||||
}
|
||||
|
||||
void HeapRegion::par_clear() {
|
||||
assert(used() == 0, "the region should have been already cleared");
|
||||
assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
|
||||
HeapRegionRemSet* hrrs = rem_set();
|
||||
hrrs->clear();
|
||||
void HeapRegion::clear_cardtable() {
|
||||
G1CardTable* ct = G1CollectedHeap::heap()->card_table();
|
||||
ct->clear(MemRegion(bottom(), end()));
|
||||
}
|
||||
|
|
|
@ -506,10 +506,11 @@ class HeapRegion: public G1ContiguousSpace {
|
|||
|
||||
// Reset the HeapRegion to default values.
|
||||
// If skip_remset is true, do not clear the remembered set.
|
||||
// If clear_space is true, clear the HeapRegion's memory.
|
||||
// If locked is true, assume we are the only thread doing this operation.
|
||||
void hr_clear(bool skip_remset, bool clear_space, bool locked = false);
|
||||
// Clear the parts skipped by skip_remset in hr_clear() in the HeapRegion during
|
||||
// a concurrent phase.
|
||||
void par_clear();
|
||||
// Clear the card table corresponding to this region.
|
||||
void clear_cardtable();
|
||||
|
||||
// Get the start of the unmarked area in this region.
|
||||
HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
|
||||
|
|
|
@ -833,8 +833,7 @@ void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
|
|||
_other_regions.do_cleanup_work(hrrs_cleanup_task);
|
||||
}
|
||||
|
||||
void
|
||||
HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
|
||||
void HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
|
||||
SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -321,14 +321,6 @@ void MasterFreeRegionListMtSafeChecker::check() {
|
|||
}
|
||||
}
|
||||
|
||||
void SecondaryFreeRegionListMtSafeChecker::check() {
|
||||
// Secondary Free List MT safety protocol:
|
||||
// Operations on the secondary free list should always be invoked
|
||||
// while holding the SecondaryFreeList_lock.
|
||||
|
||||
guarantee(SecondaryFreeList_lock->owned_by_self(), "secondary free list MT safety protocol");
|
||||
}
|
||||
|
||||
void OldRegionSetMtSafeChecker::check() {
|
||||
// Master Old Set MT safety protocol:
|
||||
// (a) If we're at a safepoint, operations on the master old set
|
||||
|
|
|
@ -59,7 +59,6 @@ public:
|
|||
};
|
||||
|
||||
class MasterFreeRegionListMtSafeChecker : public HRSMtSafeChecker { public: void check(); };
|
||||
class SecondaryFreeRegionListMtSafeChecker : public HRSMtSafeChecker { public: void check(); };
|
||||
class HumongousRegionSetMtSafeChecker : public HRSMtSafeChecker { public: void check(); };
|
||||
class OldRegionSetMtSafeChecker : public HRSMtSafeChecker { public: void check(); };
|
||||
|
||||
|
|
|
@ -116,7 +116,6 @@ Mutex* PerfDataManager_lock = NULL;
|
|||
Mutex* OopMapCacheAlloc_lock = NULL;
|
||||
|
||||
Mutex* FreeList_lock = NULL;
|
||||
Monitor* SecondaryFreeList_lock = NULL;
|
||||
Mutex* OldSets_lock = NULL;
|
||||
Monitor* RootRegionScan_lock = NULL;
|
||||
|
||||
|
@ -194,7 +193,6 @@ void mutex_init() {
|
|||
def(Shared_DirtyCardQ_lock , PaddedMutex , access + 1, true, Monitor::_safepoint_check_never);
|
||||
|
||||
def(FreeList_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never);
|
||||
def(SecondaryFreeList_lock , PaddedMonitor, leaf , true, Monitor::_safepoint_check_never);
|
||||
def(OldSets_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never);
|
||||
def(RootRegionScan_lock , PaddedMonitor, leaf , true, Monitor::_safepoint_check_never);
|
||||
|
||||
|
|
|
@ -117,7 +117,6 @@ extern Mutex* ParkerFreeList_lock;
|
|||
extern Mutex* OopMapCacheAlloc_lock; // protects allocation of oop_map caches
|
||||
|
||||
extern Mutex* FreeList_lock; // protects the free region list during safepoints
|
||||
extern Monitor* SecondaryFreeList_lock; // protects the secondary free region list
|
||||
extern Mutex* OldSets_lock; // protects the old region sets
|
||||
extern Monitor* RootRegionScan_lock; // used to notify that the CM threads have finished scanning the IM snapshot regions
|
||||
|
||||
|
|
|
@ -54,8 +54,6 @@ public class TestConcurrentPhaseControlG1 {
|
|||
{"BEFORE_REMARK", null},
|
||||
{"REMARK", "Pause Remark"},
|
||||
{"REBUILD_REMEMBERED_SETS", "Concurrent Rebuild Remembered Sets"},
|
||||
// "COMPLETE_CLEANUP", -- optional phase, not reached by this test
|
||||
{"CLEANUP_FOR_NEXT_MARK", "Concurrent Cleanup for Next Mark"},
|
||||
// Clear request
|
||||
{"IDLE", null},
|
||||
{"ANY", null},
|
||||
|
|
|
@ -54,7 +54,6 @@ public class TestConcurrentPhaseControlG1Basics {
|
|||
"BEFORE_REMARK",
|
||||
"REMARK",
|
||||
"REBUILD_REMEMBERED_SETS",
|
||||
"COMPLETE_CLEANUP",
|
||||
"CLEANUP_FOR_NEXT_MARK",
|
||||
};
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue