mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-26 14:24:46 +02:00
Merge
This commit is contained in:
commit
ce2d714a66
4 changed files with 17 additions and 13 deletions
|
@ -3460,7 +3460,9 @@ void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
|
||||||
void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
|
void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
|
||||||
assert_locked_or_safepoint(Heap_lock);
|
assert_locked_or_safepoint(Heap_lock);
|
||||||
size_t size = ReservedSpace::page_align_size_down(bytes);
|
size_t size = ReservedSpace::page_align_size_down(bytes);
|
||||||
if (size > 0) {
|
// Only shrink if a compaction was done so that all the free space
|
||||||
|
// in the generation is in a contiguous block at the end.
|
||||||
|
if (size > 0 && did_compact()) {
|
||||||
shrink_by(size);
|
shrink_by(size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -8696,9 +8698,10 @@ void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
|
||||||
assert(inFreeRange(), "Should only be called if currently in a free range.");
|
assert(inFreeRange(), "Should only be called if currently in a free range.");
|
||||||
HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
|
HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
|
||||||
assert(_sp->used_region().contains(eob - 1),
|
assert(_sp->used_region().contains(eob - 1),
|
||||||
err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
|
err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
|
||||||
|
" out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
|
||||||
" when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
|
" when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
|
||||||
_limit, _sp->bottom(), _sp->end(), fc, chunk_size));
|
eob, eob-1, _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
|
||||||
if (eob >= _limit) {
|
if (eob >= _limit) {
|
||||||
assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
|
assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
|
||||||
if (CMSTraceSweeper) {
|
if (CMSTraceSweeper) {
|
||||||
|
|
|
@ -981,7 +981,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||||
|
|
||||||
if (should_try_gc) {
|
if (should_try_gc) {
|
||||||
bool succeeded;
|
bool succeeded;
|
||||||
result = do_collection_pause(word_size, gc_count_before, &succeeded);
|
result = do_collection_pause(word_size, gc_count_before, &succeeded,
|
||||||
|
GCCause::_g1_inc_collection_pause);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
assert(succeeded, "only way to get back a non-NULL result");
|
assert(succeeded, "only way to get back a non-NULL result");
|
||||||
return result;
|
return result;
|
||||||
|
@ -1106,7 +1107,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||||
// enough space for the allocation to succeed after the pause.
|
// enough space for the allocation to succeed after the pause.
|
||||||
|
|
||||||
bool succeeded;
|
bool succeeded;
|
||||||
result = do_collection_pause(word_size, gc_count_before, &succeeded);
|
result = do_collection_pause(word_size, gc_count_before, &succeeded,
|
||||||
|
GCCause::_g1_humongous_allocation);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
assert(succeeded, "only way to get back a non-NULL result");
|
assert(succeeded, "only way to get back a non-NULL result");
|
||||||
return result;
|
return result;
|
||||||
|
@ -3698,14 +3700,15 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
|
||||||
|
|
||||||
HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
|
HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
|
||||||
unsigned int gc_count_before,
|
unsigned int gc_count_before,
|
||||||
bool* succeeded) {
|
bool* succeeded,
|
||||||
|
GCCause::Cause gc_cause) {
|
||||||
assert_heap_not_locked_and_not_at_safepoint();
|
assert_heap_not_locked_and_not_at_safepoint();
|
||||||
g1_policy()->record_stop_world_start();
|
g1_policy()->record_stop_world_start();
|
||||||
VM_G1IncCollectionPause op(gc_count_before,
|
VM_G1IncCollectionPause op(gc_count_before,
|
||||||
word_size,
|
word_size,
|
||||||
false, /* should_initiate_conc_mark */
|
false, /* should_initiate_conc_mark */
|
||||||
g1_policy()->max_pause_time_ms(),
|
g1_policy()->max_pause_time_ms(),
|
||||||
GCCause::_g1_inc_collection_pause);
|
gc_cause);
|
||||||
VMThread::execute(&op);
|
VMThread::execute(&op);
|
||||||
|
|
||||||
HeapWord* result = op.result();
|
HeapWord* result = op.result();
|
||||||
|
|
|
@ -776,9 +776,10 @@ protected:
|
||||||
// it has to be read while holding the Heap_lock. Currently, both
|
// it has to be read while holding the Heap_lock. Currently, both
|
||||||
// methods that call do_collection_pause() release the Heap_lock
|
// methods that call do_collection_pause() release the Heap_lock
|
||||||
// before the call, so it's easy to read gc_count_before just before.
|
// before the call, so it's easy to read gc_count_before just before.
|
||||||
HeapWord* do_collection_pause(size_t word_size,
|
HeapWord* do_collection_pause(size_t word_size,
|
||||||
unsigned int gc_count_before,
|
unsigned int gc_count_before,
|
||||||
bool* succeeded);
|
bool* succeeded,
|
||||||
|
GCCause::Cause gc_cause);
|
||||||
|
|
||||||
// The guts of the incremental collection pause, executed by the vm
|
// The guts of the incremental collection pause, executed by the vm
|
||||||
// thread. It returns false if it is unable to do the collection due
|
// thread. It returns false if it is unable to do the collection due
|
||||||
|
|
|
@ -70,9 +70,6 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause(
|
||||||
guarantee(target_pause_time_ms > 0.0,
|
guarantee(target_pause_time_ms > 0.0,
|
||||||
err_msg("target_pause_time_ms = %1.6lf should be positive",
|
err_msg("target_pause_time_ms = %1.6lf should be positive",
|
||||||
target_pause_time_ms));
|
target_pause_time_ms));
|
||||||
guarantee(word_size == 0 || gc_cause == GCCause::_g1_inc_collection_pause,
|
|
||||||
"we can only request an allocation if the GC cause is for "
|
|
||||||
"an incremental GC pause");
|
|
||||||
_gc_cause = gc_cause;
|
_gc_cause = gc_cause;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue