This commit is contained in:
John Coomes 2010-11-01 10:49:14 -07:00
commit f07d7731aa
27 changed files with 297 additions and 310 deletions

View file

@ -354,12 +354,8 @@ void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
double CMSStats::time_until_cms_gen_full() const { double CMSStats::time_until_cms_gen_full() const {
size_t cms_free = _cms_gen->cmsSpace()->free(); size_t cms_free = _cms_gen->cmsSpace()->free();
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
size_t expected_promotion = gch->get_gen(0)->capacity(); size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
if (HandlePromotionFailure) { (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
expected_promotion = MIN2(
(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
expected_promotion);
}
if (cms_free > expected_promotion) { if (cms_free > expected_promotion) {
// Start a cms collection if there isn't enough space to promote // Start a cms collection if there isn't enough space to promote
// for the next minor collection. Use the padded average as // for the next minor collection. Use the padded average as
@ -865,57 +861,18 @@ size_t ConcurrentMarkSweepGeneration::max_available() const {
return free() + _virtual_space.uncommitted_size(); return free() + _virtual_space.uncommitted_size();
} }
bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe( bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
size_t max_promotion_in_bytes, size_t available = max_available();
bool younger_handles_promotion_failure) const { size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
// This is the most conservative test. Full promotion is
// guaranteed if this is used. The multiplicative factor is to
// account for the worst case "dilatation".
double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t
adjusted_max_promo_bytes = (double)max_uintx;
}
bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes);
if (younger_handles_promotion_failure && !result) {
// Full promotion is not guaranteed because fragmentation
// of the cms generation can prevent the full promotion.
result = (max_available() >= (size_t)adjusted_max_promo_bytes);
if (!result) {
// With promotion failure handling the test for the ability
// to support the promotion does not have to be guaranteed.
// Use an average of the amount promoted.
result = max_available() >= (size_t)
gc_stats()->avg_promoted()->padded_average();
if (PrintGC && Verbose && result) {
gclog_or_tty->print_cr(
"\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
" max_available: " SIZE_FORMAT
" avg_promoted: " SIZE_FORMAT,
max_available(), (size_t)
gc_stats()->avg_promoted()->padded_average());
}
} else {
if (PrintGC && Verbose) { if (PrintGC && Verbose) {
gclog_or_tty->print_cr( gclog_or_tty->print_cr(
"\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe" "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
" max_available: " SIZE_FORMAT "max_promo("SIZE_FORMAT")",
" adj_max_promo_bytes: " SIZE_FORMAT, res? "":" not", available, res? ">=":"<",
max_available(), (size_t)adjusted_max_promo_bytes); av_promo, max_promotion_in_bytes);
} }
} return res;
} else {
if (PrintGC && Verbose) {
gclog_or_tty->print_cr(
"\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
" contiguous_available: " SIZE_FORMAT
" adj_max_promo_bytes: " SIZE_FORMAT,
max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
}
}
return result;
} }
// At a promotion failure dump information on block layout in heap // At a promotion failure dump information on block layout in heap
@ -6091,23 +6048,14 @@ void CMSCollector::sweep(bool asynch) {
assert(_collectorState == Resizing, "Change of collector state to" assert(_collectorState == Resizing, "Change of collector state to"
" Resizing must be done under the freelistLocks (plural)"); " Resizing must be done under the freelistLocks (plural)");
// Now that sweeping has been completed, if the GCH's // Now that sweeping has been completed, we clear
// incremental_collection_will_fail flag is set, clear it, // the incremental_collection_failed flag,
// thus inviting a younger gen collection to promote into // thus inviting a younger gen collection to promote into
// this generation. If such a promotion may still fail, // this generation. If such a promotion may still fail,
// the flag will be set again when a young collection is // the flag will be set again when a young collection is
// attempted. // attempted.
// I think the incremental_collection_will_fail flag's use
// is specific to a 2 generation collection policy, so i'll
// assert that that's the configuration we are operating within.
// The use of the flag can and should be generalized appropriately
// in the future to deal with a general n-generation system.
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(gch->collector_policy()->is_two_generation_policy(), gch->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up
"Resetting of incremental_collection_will_fail flag"
" may be incorrect otherwise");
gch->clear_incremental_collection_will_fail();
gch->update_full_collections_completed(_collection_count_start); gch->update_full_collections_completed(_collection_count_start);
} }

View file

@ -1185,8 +1185,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
virtual void par_promote_alloc_done(int thread_num); virtual void par_promote_alloc_done(int thread_num);
virtual void par_oop_since_save_marks_iterate_done(int thread_num); virtual void par_oop_since_save_marks_iterate_done(int thread_num);
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes, virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
bool younger_handles_promotion_failure) const;
// Inform this (non-young) generation that a promotion failure was // Inform this (non-young) generation that a promotion failure was
// encountered during a collection of a younger generation that // encountered during a collection of a younger generation that

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -272,12 +272,16 @@ void ConcurrentMarkSweepThread::desynchronize(bool is_cms_thread) {
} }
} }
// Wait until the next synchronous GC or a timeout, whichever is earlier. // Wait until the next synchronous GC, a concurrent full gc request,
void ConcurrentMarkSweepThread::wait_on_cms_lock(long t) { // or a timeout, whichever is earlier.
void ConcurrentMarkSweepThread::wait_on_cms_lock(long t_millis) {
MutexLockerEx x(CGC_lock, MutexLockerEx x(CGC_lock,
Mutex::_no_safepoint_check_flag); Mutex::_no_safepoint_check_flag);
if (_should_terminate || _collector->_full_gc_requested) {
return;
}
set_CMS_flag(CMS_cms_wants_token); // to provoke notifies set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
CGC_lock->wait(Mutex::_no_safepoint_check_flag, t); CGC_lock->wait(Mutex::_no_safepoint_check_flag, t_millis);
clear_CMS_flag(CMS_cms_wants_token); clear_CMS_flag(CMS_cms_wants_token);
assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token), assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
"Should not be set"); "Should not be set");
@ -289,7 +293,8 @@ void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
icms_wait(); icms_wait();
return; return;
} else { } else {
// Wait until the next synchronous GC or a timeout, whichever is earlier // Wait until the next synchronous GC, a concurrent full gc
// request or a timeout, whichever is earlier.
wait_on_cms_lock(CMSWaitDuration); wait_on_cms_lock(CMSWaitDuration);
} }
// Check if we should start a CMS collection cycle // Check if we should start a CMS collection cycle

View file

@ -120,8 +120,10 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
} }
// Wait on CMS lock until the next synchronous GC // Wait on CMS lock until the next synchronous GC
// or given timeout, whichever is earlier. // or given timeout, whichever is earlier. A timeout value
void wait_on_cms_lock(long t); // milliseconds // of 0 indicates that there is no upper bound on the wait time.
// A concurrent full gc request terminates the wait.
void wait_on_cms_lock(long t_millis);
// The CMS thread will yield during the work portion of its cycle // The CMS thread will yield during the work portion of its cycle
// only when requested to. Both synchronous and asychronous requests // only when requested to. Both synchronous and asychronous requests

View file

@ -2418,6 +2418,8 @@ void ConcurrentMark::clear_marking_state() {
for (int i = 0; i < (int)_max_task_num; ++i) { for (int i = 0; i < (int)_max_task_num; ++i) {
OopTaskQueue* queue = _task_queues->queue(i); OopTaskQueue* queue = _task_queues->queue(i);
queue->set_empty(); queue->set_empty();
// Clear any partial regions from the CMTasks
_tasks[i]->clear_aborted_region();
} }
} }
@ -2706,7 +2708,6 @@ void ConcurrentMark::abort() {
clear_marking_state(); clear_marking_state();
for (int i = 0; i < (int)_max_task_num; ++i) { for (int i = 0; i < (int)_max_task_num; ++i) {
_tasks[i]->clear_region_fields(); _tasks[i]->clear_region_fields();
_tasks[i]->clear_aborted_region();
} }
_has_aborted = true; _has_aborted = true;
@ -2985,7 +2986,7 @@ void CMTask::reset(CMBitMap* nextMarkBitMap) {
_nextMarkBitMap = nextMarkBitMap; _nextMarkBitMap = nextMarkBitMap;
clear_region_fields(); clear_region_fields();
clear_aborted_region(); assert(_aborted_region.is_empty(), "should have been cleared");
_calls = 0; _calls = 0;
_elapsed_time_ms = 0.0; _elapsed_time_ms = 0.0;

View file

@ -577,6 +577,16 @@ void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_
#endif #endif
} }
void
G1BlockOffsetArray::set_for_starts_humongous(HeapWord* new_end) {
assert(_end == new_end, "_end should have already been updated");
// The first BOT entry should have offset 0.
_array->set_offset_array(_array->index_for(_bottom), 0);
// The rest should point to the first one.
set_remainder_to_point_to_start(_bottom + N_words, new_end);
}
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// G1BlockOffsetArrayContigSpace // G1BlockOffsetArrayContigSpace
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
@ -626,3 +636,12 @@ void G1BlockOffsetArrayContigSpace::zero_bottom_entry() {
"Precondition of call"); "Precondition of call");
_array->set_offset_array(bottom_index, 0); _array->set_offset_array(bottom_index, 0);
} }
void
G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_end) {
G1BlockOffsetArray::set_for_starts_humongous(new_end);
// Make sure _next_offset_threshold and _next_offset_index point to new_end.
_next_offset_threshold = new_end;
_next_offset_index = _array->index_for(new_end);
}

View file

@ -436,6 +436,8 @@ public:
} }
void check_all_cards(size_t left_card, size_t right_card) const; void check_all_cards(size_t left_card, size_t right_card) const;
virtual void set_for_starts_humongous(HeapWord* new_end);
}; };
// A subtype of BlockOffsetArray that takes advantage of the fact // A subtype of BlockOffsetArray that takes advantage of the fact
@ -484,4 +486,6 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
HeapWord* block_start_unsafe(const void* addr); HeapWord* block_start_unsafe(const void* addr);
HeapWord* block_start_unsafe_const(const void* addr) const; HeapWord* block_start_unsafe_const(const void* addr) const;
virtual void set_for_starts_humongous(HeapWord* new_end);
}; };

View file

@ -4118,10 +4118,14 @@ void G1ParEvacuateFollowersClosure::do_void() {
while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
assert(pss->verify_task(stolen_task), "sanity"); assert(pss->verify_task(stolen_task), "sanity");
if (stolen_task.is_narrow()) { if (stolen_task.is_narrow()) {
pss->push_on_queue((narrowOop*) stolen_task); pss->deal_with_reference((narrowOop*) stolen_task);
} else { } else {
pss->push_on_queue((oop*) stolen_task); pss->deal_with_reference((oop*) stolen_task);
} }
// We've just processed a reference and we might have made
// available new entries on the queues. So we have to make sure
// we drain the queues as necessary.
pss->trim_queue(); pss->trim_queue();
} }
} while (!offer_termination()); } while (!offer_termination());

View file

@ -1772,7 +1772,6 @@ public:
} }
} }
private:
template <class T> void deal_with_reference(T* ref_to_scan) { template <class T> void deal_with_reference(T* ref_to_scan) {
if (has_partial_array_mask(ref_to_scan)) { if (has_partial_array_mask(ref_to_scan)) {
_partial_scan_cl->do_oop_nv(ref_to_scan); _partial_scan_cl->do_oop_nv(ref_to_scan);

View file

@ -377,10 +377,26 @@ void HeapRegion::calc_gc_efficiency() {
} }
// </PREDICTION> // </PREDICTION>
void HeapRegion::set_startsHumongous() { void HeapRegion::set_startsHumongous(HeapWord* new_end) {
assert(end() == _orig_end,
"Should be normal before the humongous object allocation");
assert(top() == bottom(), "should be empty");
_humongous_type = StartsHumongous; _humongous_type = StartsHumongous;
_humongous_start_region = this; _humongous_start_region = this;
assert(end() == _orig_end, "Should be normal before alloc.");
set_end(new_end);
_offsets.set_for_starts_humongous(new_end);
}
void HeapRegion::set_continuesHumongous(HeapRegion* start) {
assert(end() == _orig_end,
"Should be normal before the humongous object allocation");
assert(top() == bottom(), "should be empty");
assert(start->startsHumongous(), "pre-condition");
_humongous_type = ContinuesHumongous;
_humongous_start_region = start;
} }
bool HeapRegion::claimHeapRegion(jint claimValue) { bool HeapRegion::claimHeapRegion(jint claimValue) {
@ -500,23 +516,6 @@ CompactibleSpace* HeapRegion::next_compaction_space() const {
return blk.result(); return blk.result();
} }
void HeapRegion::set_continuesHumongous(HeapRegion* start) {
// The order is important here.
start->add_continuingHumongousRegion(this);
_humongous_type = ContinuesHumongous;
_humongous_start_region = start;
}
void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) {
// Must join the blocks of the current H region seq with the block of the
// added region.
offsets()->join_blocks(bottom(), cont->bottom());
arrayOop obj = (arrayOop)(bottom());
obj->set_length((int) (obj->length() + cont->capacity()/jintSize));
set_end(cont->end());
set_top(cont->end());
}
void HeapRegion::save_marks() { void HeapRegion::save_marks() {
set_saved_mark(); set_saved_mark();
} }

View file

@ -395,14 +395,12 @@ class HeapRegion: public G1OffsetTableContigSpace {
// Causes the current region to represent a humongous object spanning "n" // Causes the current region to represent a humongous object spanning "n"
// regions. // regions.
virtual void set_startsHumongous(); void set_startsHumongous(HeapWord* new_end);
// The regions that continue a humongous sequence should be added using // The regions that continue a humongous sequence should be added using
// this method, in increasing address order. // this method, in increasing address order.
void set_continuesHumongous(HeapRegion* start); void set_continuesHumongous(HeapRegion* start);
void add_continuingHumongousRegion(HeapRegion* cont);
// If the region has a remembered set, return a pointer to it. // If the region has a remembered set, return a pointer to it.
HeapRegionRemSet* rem_set() const { HeapRegionRemSet* rem_set() const {
return _rem_set; return _rem_set;
@ -733,13 +731,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
FilterOutOfRegionClosure* cl, FilterOutOfRegionClosure* cl,
bool filter_young); bool filter_young);
// The region "mr" is entirely in "this", and starts and ends at block
// boundaries. The caller declares that all the contained blocks are
// coalesced into one.
void declare_filled_region_to_BOT(MemRegion mr) {
_offsets.single_block(mr.start(), mr.end());
}
// A version of block start that is guaranteed to find *some* block // A version of block start that is guaranteed to find *some* block
// boundary at or before "p", but does not object iteration, and may // boundary at or before "p", but does not object iteration, and may
// therefore be used safely when the heap is unparseable. // therefore be used safely when the heap is unparseable.

View file

@ -1159,9 +1159,7 @@ HeapRegionRemSetIterator() :
_hrrs(NULL), _hrrs(NULL),
_g1h(G1CollectedHeap::heap()), _g1h(G1CollectedHeap::heap()),
_bosa(NULL), _bosa(NULL),
_sparse_iter(size_t(G1CollectedHeap::heap()->reserved_region().start()) _sparse_iter() { }
>> CardTableModRefBS::card_shift)
{}
void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) { void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
_hrrs = hrrs; _hrrs = hrrs;

View file

@ -91,34 +91,118 @@ HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
} }
if (sumSizes >= word_size) { if (sumSizes >= word_size) {
_alloc_search_start = cur; _alloc_search_start = cur;
// Mark the allocated regions as allocated.
// We need to initialize the region(s) we just discovered. This is
// a bit tricky given that it can happen concurrently with
// refinement threads refining cards on these regions and
// potentially wanting to refine the BOT as they are scanning
// those cards (this can happen shortly after a cleanup; see CR
// 6991377). So we have to set up the region(s) carefully and in
// a specific order.
// Currently, allocs_are_zero_filled() returns false. The zero
// filling infrastructure will be going away soon (see CR 6977804).
// So no need to do anything else here.
bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled(); bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
assert(!zf, "not supported");
// This will be the "starts humongous" region.
HeapRegion* first_hr = _regions.at(first); HeapRegion* first_hr = _regions.at(first);
for (int i = first; i < cur; i++) { {
HeapRegion* hr = _regions.at(i); MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
if (zf) first_hr->set_zero_fill_allocated();
hr->ensure_zero_filled(); }
// The header of the new object will be placed at the bottom of
// the first region.
HeapWord* new_obj = first_hr->bottom();
// This will be the new end of the first region in the series that
// should also match the end of the last region in the seriers.
// (Note: sumSizes = "region size" x "number of regions we found").
HeapWord* new_end = new_obj + sumSizes;
// This will be the new top of the first region that will reflect
// this allocation.
HeapWord* new_top = new_obj + word_size;
// First, we need to zero the header of the space that we will be
// allocating. When we update top further down, some refinement
// threads might try to scan the region. By zeroing the header we
// ensure that any thread that will try to scan the region will
// come across the zero klass word and bail out.
//
// NOTE: It would not have been correct to have used
// CollectedHeap::fill_with_object() and make the space look like
// an int array. The thread that is doing the allocation will
// later update the object header to a potentially different array
// type and, for a very short period of time, the klass and length
// fields will be inconsistent. This could cause a refinement
// thread to calculate the object size incorrectly.
Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
// We will set up the first region as "starts humongous". This
// will also update the BOT covering all the regions to reflect
// that there is a single object that starts at the bottom of the
// first region.
first_hr->set_startsHumongous(new_end);
// Then, if there are any, we will set up the "continues
// humongous" regions.
HeapRegion* hr = NULL;
for (int i = first + 1; i < cur; ++i) {
hr = _regions.at(i);
{ {
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
hr->set_zero_fill_allocated(); hr->set_zero_fill_allocated();
} }
size_t sz = hr->capacity() / HeapWordSize;
HeapWord* tmp = hr->allocate(sz);
assert(tmp != NULL, "Humongous allocation failure");
MemRegion mr = MemRegion(tmp, sz);
CollectedHeap::fill_with_object(mr);
hr->declare_filled_region_to_BOT(mr);
if (i == first) {
first_hr->set_startsHumongous();
} else {
assert(i > first, "sanity");
hr->set_continuesHumongous(first_hr); hr->set_continuesHumongous(first_hr);
} }
// If we have "continues humongous" regions (hr != NULL), then the
// end of the last one should match new_end.
assert(hr == NULL || hr->end() == new_end, "sanity");
// Up to this point no concurrent thread would have been able to
// do any scanning on any region in this series. All the top
// fields still point to bottom, so the intersection between
// [bottom,top] and [card_start,card_end] will be empty. Before we
// update the top fields, we'll do a storestore to make sure that
// no thread sees the update to top before the zeroing of the
// object header and the BOT initialization.
OrderAccess::storestore();
// Now that the BOT and the object header have been initialized,
// we can update top of the "starts humongous" region.
assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
"new_top should be in this region");
first_hr->set_top(new_top);
// Now, we will update the top fields of the "continues humongous"
// regions. The reason we need to do this is that, otherwise,
// these regions would look empty and this will confuse parts of
// G1. For example, the code that looks for a consecutive number
// of empty regions will consider them empty and try to
// re-allocate them. We can extend is_empty() to also include
// !continuesHumongous(), but it is easier to just update the top
// fields here.
hr = NULL;
for (int i = first + 1; i < cur; ++i) {
hr = _regions.at(i);
if ((i + 1) == cur) {
// last continues humongous region
assert(hr->bottom() < new_top && new_top <= hr->end(),
"new_top should fall on this region");
hr->set_top(new_top);
} else {
// not last one
assert(new_top > hr->end(), "new_top should be above this region");
hr->set_top(hr->end());
} }
HeapWord* first_hr_bot = first_hr->bottom(); }
HeapWord* obj_end = first_hr_bot + word_size; // If we have continues humongous regions (hr != NULL), then the
first_hr->set_top(obj_end); // end of the last one should match new_end and its top should
return first_hr_bot; // match new_top.
assert(hr == NULL ||
(hr->end() == new_end && hr->top() == new_top), "sanity");
return new_obj;
} else { } else {
// If we started from the beginning, we want to know why we can't alloc. // If we started from the beginning, we want to know why we can't alloc.
return NULL; return NULL;

View file

@ -308,7 +308,7 @@ void RSHashTable::add_entry(SparsePRTEntry* e) {
assert(e2->num_valid_cards() > 0, "Postcondition."); assert(e2->num_valid_cards() > 0, "Postcondition.");
} }
CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() { CardIdx_t RSHashTableIter::find_first_card_in_list() {
CardIdx_t res; CardIdx_t res;
while (_bl_ind != RSHashTable::NullEntry) { while (_bl_ind != RSHashTable::NullEntry) {
res = _rsht->entry(_bl_ind)->card(0); res = _rsht->entry(_bl_ind)->card(0);
@ -322,14 +322,11 @@ CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
return SparsePRTEntry::NullEntry; return SparsePRTEntry::NullEntry;
} }
size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) { size_t RSHashTableIter::compute_card_ind(CardIdx_t ci) {
return return (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion) + ci;
_heap_bot_card_ind
+ (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion)
+ ci;
} }
bool /* RSHashTable:: */ RSHashTableIter::has_next(size_t& card_index) { bool RSHashTableIter::has_next(size_t& card_index) {
_card_ind++; _card_ind++;
CardIdx_t ci; CardIdx_t ci;
if (_card_ind < SparsePRTEntry::cards_num() && if (_card_ind < SparsePRTEntry::cards_num() &&

View file

@ -169,7 +169,6 @@ class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
int _bl_ind; // [-1, 0.._rsht->_capacity) int _bl_ind; // [-1, 0.._rsht->_capacity)
short _card_ind; // [0..SparsePRTEntry::cards_num()) short _card_ind; // [0..SparsePRTEntry::cards_num())
RSHashTable* _rsht; RSHashTable* _rsht;
size_t _heap_bot_card_ind;
// If the bucket list pointed to by _bl_ind contains a card, sets // If the bucket list pointed to by _bl_ind contains a card, sets
// _bl_ind to the index of that entry, and returns the card. // _bl_ind to the index of that entry, and returns the card.
@ -183,13 +182,11 @@ class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
size_t compute_card_ind(CardIdx_t ci); size_t compute_card_ind(CardIdx_t ci);
public: public:
RSHashTableIter(size_t heap_bot_card_ind) : RSHashTableIter() :
_tbl_ind(RSHashTable::NullEntry), _tbl_ind(RSHashTable::NullEntry),
_bl_ind(RSHashTable::NullEntry), _bl_ind(RSHashTable::NullEntry),
_card_ind((SparsePRTEntry::cards_num() - 1)), _card_ind((SparsePRTEntry::cards_num() - 1)),
_rsht(NULL), _rsht(NULL) {}
_heap_bot_card_ind(heap_bot_card_ind)
{}
void init(RSHashTable* rsht) { void init(RSHashTable* rsht) {
_rsht = rsht; _rsht = rsht;
@ -280,20 +277,11 @@ public:
bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const { bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
return _next->contains_card(region_id, card_index); return _next->contains_card(region_id, card_index);
} }
#if 0
void verify_is_cleared();
void print();
#endif
}; };
class SparsePRTIter: public /* RSHashTable:: */RSHashTableIter { class SparsePRTIter: public RSHashTableIter {
public: public:
SparsePRTIter(size_t heap_bot_card_ind) :
/* RSHashTable:: */RSHashTableIter(heap_bot_card_ind)
{}
void init(const SparsePRT* sprt) { void init(const SparsePRT* sprt) {
RSHashTableIter::init(sprt->cur()); RSHashTableIter::init(sprt->cur());
} }

View file

@ -846,7 +846,7 @@ void ParNewGeneration::collect(bool full,
// from this generation, pass on collection; let the next generation // from this generation, pass on collection; let the next generation
// do it. // do it.
if (!collection_attempt_is_safe()) { if (!collection_attempt_is_safe()) {
gch->set_incremental_collection_will_fail(); gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one
return; return;
} }
assert(to()->is_empty(), "Else not collection_attempt_is_safe"); assert(to()->is_empty(), "Else not collection_attempt_is_safe");
@ -935,8 +935,6 @@ void ParNewGeneration::collect(bool full,
assert(to()->is_empty(), "to space should be empty now"); assert(to()->is_empty(), "to space should be empty now");
} else { } else {
assert(HandlePromotionFailure,
"Should only be here if promotion failure handling is on");
assert(_promo_failure_scan_stack.is_empty(), "post condition"); assert(_promo_failure_scan_stack.is_empty(), "post condition");
_promo_failure_scan_stack.clear(true); // Clear cached segments. _promo_failure_scan_stack.clear(true); // Clear cached segments.
@ -947,7 +945,7 @@ void ParNewGeneration::collect(bool full,
// All the spaces are in play for mark-sweep. // All the spaces are in play for mark-sweep.
swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
from()->set_next_compaction_space(to()); from()->set_next_compaction_space(to());
gch->set_incremental_collection_will_fail(); gch->set_incremental_collection_failed();
// Inform the next generation that a promotion failure occurred. // Inform the next generation that a promotion failure occurred.
_next_gen->promotion_failure_occurred(); _next_gen->promotion_failure_occurred();
@ -1092,11 +1090,6 @@ oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
old, m, sz); old, m, sz);
if (new_obj == NULL) { if (new_obj == NULL) {
if (!HandlePromotionFailure) {
// A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
// is incorrectly set. In any case, its seriously wrong to be here!
vm_exit_out_of_memory(sz*wordSize, "promotion");
}
// promotion failed, forward to self // promotion failed, forward to self
_promotion_failed = true; _promotion_failed = true;
new_obj = old; new_obj = old;
@ -1206,12 +1199,6 @@ oop ParNewGeneration::copy_to_survivor_space_with_undo(
old, m, sz); old, m, sz);
if (new_obj == NULL) { if (new_obj == NULL) {
if (!HandlePromotionFailure) {
// A failed promotion likely means the MaxLiveObjectEvacuationRatio
// flag is incorrectly set. In any case, its seriously wrong to be
// here!
vm_exit_out_of_memory(sz*wordSize, "promotion");
}
// promotion failed, forward to self // promotion failed, forward to self
forward_ptr = old->forward_to_atomic(old); forward_ptr = old->forward_to_atomic(old);
new_obj = old; new_obj = old;

View file

@ -659,9 +659,6 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
} }
return result; // could be null if we are out of space return result; // could be null if we are out of space
} else if (!gch->incremental_collection_will_fail()) { } else if (!gch->incremental_collection_will_fail()) {
// The gc_prologues have not executed yet. The value
// for incremental_collection_will_fail() is the remanent
// of the last collection.
// Do an incremental collection. // Do an incremental collection.
gch->do_collection(false /* full */, gch->do_collection(false /* full */,
false /* clear_all_soft_refs */, false /* clear_all_soft_refs */,
@ -739,9 +736,8 @@ bool GenCollectorPolicy::should_try_older_generation_allocation(
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc(); size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
return (word_size > heap_word_size(gen0_capacity)) return (word_size > heap_word_size(gen0_capacity))
|| (GC_locker::is_active_and_needs_gc()) || GC_locker::is_active_and_needs_gc()
|| ( gch->last_incremental_collection_failed() || gch->incremental_collection_failed();
&& gch->incremental_collection_will_fail());
} }

View file

@ -510,7 +510,7 @@ void DefNewGeneration::collect(bool full,
// from this generation, pass on collection; let the next generation // from this generation, pass on collection; let the next generation
// do it. // do it.
if (!collection_attempt_is_safe()) { if (!collection_attempt_is_safe()) {
gch->set_incremental_collection_will_fail(); gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
return; return;
} }
assert(to()->is_empty(), "Else not collection_attempt_is_safe"); assert(to()->is_empty(), "Else not collection_attempt_is_safe");
@ -596,9 +596,8 @@ void DefNewGeneration::collect(bool full,
if (PrintGC && !PrintGCDetails) { if (PrintGC && !PrintGCDetails) {
gch->print_heap_change(gch_prev_used); gch->print_heap_change(gch_prev_used);
} }
assert(!gch->incremental_collection_failed(), "Should be clear");
} else { } else {
assert(HandlePromotionFailure,
"Should not be here unless promotion failure handling is on");
assert(_promo_failure_scan_stack.is_empty(), "post condition"); assert(_promo_failure_scan_stack.is_empty(), "post condition");
_promo_failure_scan_stack.clear(true); // Clear cached segments. _promo_failure_scan_stack.clear(true); // Clear cached segments.
@ -613,7 +612,7 @@ void DefNewGeneration::collect(bool full,
// and from-space. // and from-space.
swap_spaces(); // For uniformity wrt ParNewGeneration. swap_spaces(); // For uniformity wrt ParNewGeneration.
from()->set_next_compaction_space(to()); from()->set_next_compaction_space(to());
gch->set_incremental_collection_will_fail(); gch->set_incremental_collection_failed();
// Inform the next generation that a promotion failure occurred. // Inform the next generation that a promotion failure occurred.
_next_gen->promotion_failure_occurred(); _next_gen->promotion_failure_occurred();
@ -700,12 +699,6 @@ oop DefNewGeneration::copy_to_survivor_space(oop old) {
if (obj == NULL) { if (obj == NULL) {
obj = _next_gen->promote(old, s); obj = _next_gen->promote(old, s);
if (obj == NULL) { if (obj == NULL) {
if (!HandlePromotionFailure) {
// A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
// is incorrectly set. In any case, its seriously wrong to be here!
vm_exit_out_of_memory(s*wordSize, "promotion");
}
handle_promotion_failure(old); handle_promotion_failure(old);
return old; return old;
} }
@ -812,47 +805,43 @@ bool DefNewGeneration::collection_attempt_is_safe() {
assert(_next_gen != NULL, assert(_next_gen != NULL,
"This must be the youngest gen, and not the only gen"); "This must be the youngest gen, and not the only gen");
} }
return _next_gen->promotion_attempt_is_safe(used());
// Decide if there's enough room for a full promotion
// When using extremely large edens, we effectively lose a
// large amount of old space. Use the "MaxLiveObjectEvacuationRatio"
// flag to reduce the minimum evacuation space requirements. If
// there is not enough space to evacuate eden during a scavenge,
// the VM will immediately exit with an out of memory error.
// This flag has not been tested
// with collectors other than simple mark & sweep.
//
// Note that with the addition of promotion failure handling, the
// VM will not immediately exit but will undo the young generation
// collection. The parameter is left here for compatibility.
const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0;
// worst_case_evacuation is based on "used()". For the case where this
// method is called after a collection, this is still appropriate because
// the case that needs to be detected is one in which a full collection
// has been done and has overflowed into the young generation. In that
// case a minor collection will fail (the overflow of the full collection
// means there is no space in the old generation for any promotion).
size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio);
return _next_gen->promotion_attempt_is_safe(worst_case_evacuation,
HandlePromotionFailure);
} }
void DefNewGeneration::gc_epilogue(bool full) { void DefNewGeneration::gc_epilogue(bool full) {
DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
assert(!GC_locker::is_active(), "We should not be executing here");
// Check if the heap is approaching full after a collection has // Check if the heap is approaching full after a collection has
// been done. Generally the young generation is empty at // been done. Generally the young generation is empty at
// a minimum at the end of a collection. If it is not, then // a minimum at the end of a collection. If it is not, then
// the heap is approaching full. // the heap is approaching full.
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
clear_should_allocate_from_space(); if (full) {
if (collection_attempt_is_safe()) { DEBUG_ONLY(seen_incremental_collection_failed = false;)
gch->clear_incremental_collection_will_fail(); if (!collection_attempt_is_safe()) {
gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
set_should_allocate_from_space(); // we seem to be running out of space
} else { } else {
gch->set_incremental_collection_will_fail(); gch->clear_incremental_collection_failed(); // We just did a full collection
if (full) { // we seem to be running out of space clear_should_allocate_from_space(); // if set
set_should_allocate_from_space();
} }
} else {
#ifdef ASSERT
// It is possible that incremental_collection_failed() == true
// here, because an attempted scavenge did not succeed. The policy
// is normally expected to cause a full collection which should
// clear that condition, so we should not be here twice in a row
// with incremental_collection_failed() == true without having done
// a full collection in between.
if (!seen_incremental_collection_failed &&
gch->incremental_collection_failed()) {
seen_incremental_collection_failed = true;
} else if (seen_incremental_collection_failed) {
assert(!gch->incremental_collection_failed(), "Twice in a row");
seen_incremental_collection_failed = false;
}
#endif // ASSERT
} }
if (ZapUnusedHeapArea) { if (ZapUnusedHeapArea) {

View file

@ -82,12 +82,6 @@ protected:
Stack<oop> _objs_with_preserved_marks; Stack<oop> _objs_with_preserved_marks;
Stack<markOop> _preserved_marks_of_objs; Stack<markOop> _preserved_marks_of_objs;
// Returns true if the collection can be safely attempted.
// If this method returns false, a collection is not
// guaranteed to fail but the system may not be able
// to recover from the failure.
bool collection_attempt_is_safe();
// Promotion failure handling // Promotion failure handling
OopClosure *_promo_failure_scan_stack_closure; OopClosure *_promo_failure_scan_stack_closure;
void set_promo_failure_scan_stack_closure(OopClosure *scan_stack_closure) { void set_promo_failure_scan_stack_closure(OopClosure *scan_stack_closure) {
@ -304,6 +298,14 @@ protected:
// GC support // GC support
virtual void compute_new_size(); virtual void compute_new_size();
// Returns true if the collection is likely to be safely
// completed. Even if this method returns true, a collection
// may not be guaranteed to succeed, and the system should be
// able to safely unwind and recover from that failure, albeit
// at some additional cost. Override superclass's implementation.
virtual bool collection_attempt_is_safe();
virtual void collect(bool full, virtual void collect(bool full,
bool clear_all_soft_refs, bool clear_all_soft_refs,
size_t size, size_t size,

View file

@ -142,8 +142,7 @@ jint GenCollectedHeap::initialize() {
} }
_perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set()); _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
clear_incremental_collection_will_fail(); clear_incremental_collection_failed();
clear_last_incremental_collection_failed();
#ifndef SERIALGC #ifndef SERIALGC
// If we are running CMS, create the collector responsible // If we are running CMS, create the collector responsible
@ -1347,17 +1346,6 @@ class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
}; };
void GenCollectedHeap::gc_epilogue(bool full) { void GenCollectedHeap::gc_epilogue(bool full) {
// Remember if a partial collection of the heap failed, and
// we did a complete collection.
if (full && incremental_collection_will_fail()) {
set_last_incremental_collection_failed();
} else {
clear_last_incremental_collection_failed();
}
// Clear the flag, if set; the generation gc_epilogues will set the
// flag again if the condition persists despite the collection.
clear_incremental_collection_will_fail();
#ifdef COMPILER2 #ifdef COMPILER2
assert(DerivedPointerTable::is_empty(), "derived pointer present"); assert(DerivedPointerTable::is_empty(), "derived pointer present");
size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr())); size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));

View file

@ -62,11 +62,10 @@ public:
// The generational collector policy. // The generational collector policy.
GenCollectorPolicy* _gen_policy; GenCollectorPolicy* _gen_policy;
// If a generation would bail out of an incremental collection, // Indicates that the most recent previous incremental collection failed.
// it sets this flag. If the flag is set, satisfy_failed_allocation // The flag is cleared when an action is taken that might clear the
// will attempt allocating in all generations before doing a full GC. // condition that caused that incremental collection to fail.
bool _incremental_collection_will_fail; bool _incremental_collection_failed;
bool _last_incremental_collection_failed;
// In support of ExplicitGCInvokesConcurrent functionality // In support of ExplicitGCInvokesConcurrent functionality
unsigned int _full_collections_completed; unsigned int _full_collections_completed;
@ -469,26 +468,26 @@ public:
// call to "save_marks". // call to "save_marks".
bool no_allocs_since_save_marks(int level); bool no_allocs_since_save_marks(int level);
// If a generation bails out of an incremental collection, // Returns true if an incremental collection is likely to fail.
// it sets this flag.
bool incremental_collection_will_fail() { bool incremental_collection_will_fail() {
return _incremental_collection_will_fail; // Assumes a 2-generation system; the first disjunct remembers if an
} // incremental collection failed, even when we thought (second disjunct)
void set_incremental_collection_will_fail() { // that it would not.
_incremental_collection_will_fail = true; assert(heap()->collector_policy()->is_two_generation_policy(),
} "the following definition may not be suitable for an n(>2)-generation system");
void clear_incremental_collection_will_fail() { return incremental_collection_failed() || !get_gen(0)->collection_attempt_is_safe();
_incremental_collection_will_fail = false;
} }
bool last_incremental_collection_failed() const { // If a generation bails out of an incremental collection,
return _last_incremental_collection_failed; // it sets this flag.
bool incremental_collection_failed() const {
return _incremental_collection_failed;
} }
void set_last_incremental_collection_failed() { void set_incremental_collection_failed() {
_last_incremental_collection_failed = true; _incremental_collection_failed = true;
} }
void clear_last_incremental_collection_failed() { void clear_incremental_collection_failed() {
_last_incremental_collection_failed = false; _incremental_collection_failed = false;
} }
// Promotion of obj into gen failed. Try to promote obj to higher non-perm // Promotion of obj into gen failed. Try to promote obj to higher non-perm

View file

@ -165,15 +165,16 @@ size_t Generation::max_contiguous_available() const {
return max; return max;
} }
bool Generation::promotion_attempt_is_safe(size_t promotion_in_bytes, bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
bool not_used) const { size_t available = max_contiguous_available();
bool res = (available >= max_promotion_in_bytes);
if (PrintGC && Verbose) { if (PrintGC && Verbose) {
gclog_or_tty->print_cr("Generation::promotion_attempt_is_safe" gclog_or_tty->print_cr(
" contiguous_available: " SIZE_FORMAT "Generation: promo attempt is%s safe: available("SIZE_FORMAT") %s max_promo("SIZE_FORMAT")",
" promotion_in_bytes: " SIZE_FORMAT, res? "":" not", available, res? ">=":"<",
max_contiguous_available(), promotion_in_bytes); max_promotion_in_bytes);
} }
return max_contiguous_available() >= promotion_in_bytes; return res;
} }
// Ignores "ref" and calls allocate(). // Ignores "ref" and calls allocate().

View file

@ -173,15 +173,11 @@ class Generation: public CHeapObj {
// The largest number of contiguous free bytes in this or any higher generation. // The largest number of contiguous free bytes in this or any higher generation.
virtual size_t max_contiguous_available() const; virtual size_t max_contiguous_available() const;
// Returns true if promotions of the specified amount can // Returns true if promotions of the specified amount are
// be attempted safely (without a vm failure). // likely to succeed without a promotion failure.
// Promotion of the full amount is not guaranteed but // Promotion of the full amount is not guaranteed but
// can be attempted. // might be attempted in the worst case.
// younger_handles_promotion_failure virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const;
// is true if the younger generation handles a promotion
// failure.
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
bool younger_handles_promotion_failure) const;
// For a non-young generation, this interface can be used to inform a // For a non-young generation, this interface can be used to inform a
// generation that a promotion attempt into that generation failed. // generation that a promotion attempt into that generation failed.
@ -358,6 +354,16 @@ class Generation: public CHeapObj {
return (full || should_allocate(word_size, is_tlab)); return (full || should_allocate(word_size, is_tlab));
} }
// Returns true if the collection is likely to be safely
// completed. Even if this method returns true, a collection
// may not be guaranteed to succeed, and the system should be
// able to safely unwind and recover from that failure, albeit
// at some additional cost.
virtual bool collection_attempt_is_safe() {
guarantee(false, "Are you sure you want to call this method?");
return true;
}
// Perform a garbage collection. // Perform a garbage collection.
// If full is true attempt a full garbage collection of this generation. // If full is true attempt a full garbage collection of this generation.
// Otherwise, attempting to (at least) free enough space to support an // Otherwise, attempting to (at least) free enough space to support an

View file

@ -419,29 +419,16 @@ void TenuredGeneration::retire_alloc_buffers_before_full_gc() {}
void TenuredGeneration::verify_alloc_buffers_clean() {} void TenuredGeneration::verify_alloc_buffers_clean() {}
#endif // SERIALGC #endif // SERIALGC
bool TenuredGeneration::promotion_attempt_is_safe( bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
size_t max_promotion_in_bytes, size_t available = max_contiguous_available();
bool younger_handles_promotion_failure) const { size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
bool result = max_contiguous_available() >= max_promotion_in_bytes;
if (younger_handles_promotion_failure && !result) {
result = max_contiguous_available() >=
(size_t) gc_stats()->avg_promoted()->padded_average();
if (PrintGC && Verbose && result) {
gclog_or_tty->print_cr("TenuredGeneration::promotion_attempt_is_safe"
" contiguous_available: " SIZE_FORMAT
" avg_promoted: " SIZE_FORMAT,
max_contiguous_available(),
gc_stats()->avg_promoted()->padded_average());
}
} else {
if (PrintGC && Verbose) { if (PrintGC && Verbose) {
gclog_or_tty->print_cr("TenuredGeneration::promotion_attempt_is_safe" gclog_or_tty->print_cr(
" contiguous_available: " SIZE_FORMAT "Tenured: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
" promotion_in_bytes: " SIZE_FORMAT, "max_promo("SIZE_FORMAT")",
max_contiguous_available(), max_promotion_in_bytes); res? "":" not", available, res? ">=":"<",
av_promo, max_promotion_in_bytes);
} }
} return res;
return result;
} }

View file

@ -101,8 +101,7 @@ class TenuredGeneration: public OneContigSpaceCardGeneration {
virtual void update_gc_stats(int level, bool full); virtual void update_gc_stats(int level, bool full);
virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes, virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes) const;
bool younger_handles_promotion_failure) const;
void verify_alloc_buffers_clean(); void verify_alloc_buffers_clean();
}; };

View file

@ -190,6 +190,10 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
JDK_Version::jdk_update(6,18), JDK_Version::jdk(7) }, JDK_Version::jdk_update(6,18), JDK_Version::jdk(7) },
{ "UseDepthFirstScavengeOrder", { "UseDepthFirstScavengeOrder",
JDK_Version::jdk_update(6,22), JDK_Version::jdk(7) }, JDK_Version::jdk_update(6,22), JDK_Version::jdk(7) },
{ "HandlePromotionFailure",
JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) },
{ "MaxLiveObjectEvacuationRatio",
JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) },
{ NULL, JDK_Version(0), JDK_Version(0) } { NULL, JDK_Version(0), JDK_Version(0) }
}; };
@ -1728,8 +1732,6 @@ bool Arguments::check_vm_args_consistency() {
status = false; status = false;
} }
status = status && verify_percentage(MaxLiveObjectEvacuationRatio,
"MaxLiveObjectEvacuationRatio");
status = status && verify_percentage(AdaptiveSizePolicyWeight, status = status && verify_percentage(AdaptiveSizePolicyWeight,
"AdaptiveSizePolicyWeight"); "AdaptiveSizePolicyWeight");
status = status && verify_percentage(AdaptivePermSizeWeight, "AdaptivePermSizeWeight"); status = status && verify_percentage(AdaptivePermSizeWeight, "AdaptivePermSizeWeight");

View file

@ -1588,7 +1588,7 @@ class CommandLineFlags {
"(Temporary, subject to experimentation)" \ "(Temporary, subject to experimentation)" \
"Nominal minimum work per abortable preclean iteration") \ "Nominal minimum work per abortable preclean iteration") \
\ \
product(intx, CMSAbortablePrecleanWaitMillis, 100, \ manageable(intx, CMSAbortablePrecleanWaitMillis, 100, \
"(Temporary, subject to experimentation)" \ "(Temporary, subject to experimentation)" \
" Time that we sleep between iterations when not given" \ " Time that we sleep between iterations when not given" \
" enough work per iteration") \ " enough work per iteration") \
@ -1680,7 +1680,7 @@ class CommandLineFlags {
product(uintx, CMSWorkQueueDrainThreshold, 10, \ product(uintx, CMSWorkQueueDrainThreshold, 10, \
"Don't drain below this size per parallel worker/thief") \ "Don't drain below this size per parallel worker/thief") \
\ \
product(intx, CMSWaitDuration, 2000, \ manageable(intx, CMSWaitDuration, 2000, \
"Time in milliseconds that CMS thread waits for young GC") \ "Time in milliseconds that CMS thread waits for young GC") \
\ \
product(bool, CMSYield, true, \ product(bool, CMSYield, true, \
@ -1789,10 +1789,6 @@ class CommandLineFlags {
notproduct(bool, GCALotAtAllSafepoints, false, \ notproduct(bool, GCALotAtAllSafepoints, false, \
"Enforce ScavengeALot/GCALot at all potential safepoints") \ "Enforce ScavengeALot/GCALot at all potential safepoints") \
\ \
product(bool, HandlePromotionFailure, true, \
"The youngest generation collection does not require " \
"a guarantee of full promotion of all live objects.") \
\
product(bool, PrintPromotionFailure, false, \ product(bool, PrintPromotionFailure, false, \
"Print additional diagnostic information following " \ "Print additional diagnostic information following " \
" promotion failure") \ " promotion failure") \
@ -3006,9 +3002,6 @@ class CommandLineFlags {
product(intx, NewRatio, 2, \ product(intx, NewRatio, 2, \
"Ratio of new/old generation sizes") \ "Ratio of new/old generation sizes") \
\ \
product(uintx, MaxLiveObjectEvacuationRatio, 100, \
"Max percent of eden objects that will be live at scavenge") \
\
product_pd(uintx, NewSizeThreadIncrease, \ product_pd(uintx, NewSizeThreadIncrease, \
"Additional size added to desired new generation size per " \ "Additional size added to desired new generation size per " \
"non-daemon thread (in bytes)") \ "non-daemon thread (in bytes)") \