mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-27 23:04:50 +02:00
Merge
This commit is contained in:
commit
c5b5e2e3a7
30 changed files with 729 additions and 119 deletions
|
@ -1124,8 +1124,7 @@ class BacktraceBuilder: public StackObj {
|
||||||
if (_dirty && _methods != NULL) {
|
if (_dirty && _methods != NULL) {
|
||||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||||
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
|
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
|
||||||
bs->write_ref_array(MemRegion((HeapWord*)_methods->base(),
|
bs->write_ref_array((HeapWord*)_methods->base(), _methods->length());
|
||||||
_methods->array_size()));
|
|
||||||
_dirty = false;
|
_dirty = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -709,7 +709,8 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
||||||
|
|
||||||
// Support for parallelizing survivor space rescan
|
// Support for parallelizing survivor space rescan
|
||||||
if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
|
if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
|
||||||
size_t max_plab_samples = MaxNewSize/((SurvivorRatio+2)*MinTLABSize);
|
size_t max_plab_samples = cp->max_gen0_size()/
|
||||||
|
((SurvivorRatio+2)*MinTLABSize);
|
||||||
_survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
|
_survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
|
||||||
_survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
|
_survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
|
||||||
_cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
|
_cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
|
||||||
|
|
|
@ -155,7 +155,7 @@ bool DirtyCardQueueSet::mut_process_buffer(void** buf) {
|
||||||
}
|
}
|
||||||
|
|
||||||
DirtyCardQueueSet::CompletedBufferNode*
|
DirtyCardQueueSet::CompletedBufferNode*
|
||||||
DirtyCardQueueSet::get_completed_buffer_lock(int stop_at) {
|
DirtyCardQueueSet::get_completed_buffer(int stop_at) {
|
||||||
CompletedBufferNode* nd = NULL;
|
CompletedBufferNode* nd = NULL;
|
||||||
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
|
||||||
|
|
||||||
|
@ -175,28 +175,6 @@ DirtyCardQueueSet::get_completed_buffer_lock(int stop_at) {
|
||||||
return nd;
|
return nd;
|
||||||
}
|
}
|
||||||
|
|
||||||
// We only do this in contexts where there is no concurrent enqueueing.
|
|
||||||
DirtyCardQueueSet::CompletedBufferNode*
|
|
||||||
DirtyCardQueueSet::get_completed_buffer_CAS() {
|
|
||||||
CompletedBufferNode* nd = _completed_buffers_head;
|
|
||||||
|
|
||||||
while (nd != NULL) {
|
|
||||||
CompletedBufferNode* next = nd->next;
|
|
||||||
CompletedBufferNode* result =
|
|
||||||
(CompletedBufferNode*)Atomic::cmpxchg_ptr(next,
|
|
||||||
&_completed_buffers_head,
|
|
||||||
nd);
|
|
||||||
if (result == nd) {
|
|
||||||
return result;
|
|
||||||
} else {
|
|
||||||
nd = _completed_buffers_head;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert(_completed_buffers_head == NULL, "Loop post");
|
|
||||||
_completed_buffers_tail = NULL;
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool DirtyCardQueueSet::
|
bool DirtyCardQueueSet::
|
||||||
apply_closure_to_completed_buffer_helper(int worker_i,
|
apply_closure_to_completed_buffer_helper(int worker_i,
|
||||||
CompletedBufferNode* nd) {
|
CompletedBufferNode* nd) {
|
||||||
|
@ -222,15 +200,10 @@ apply_closure_to_completed_buffer_helper(int worker_i,
|
||||||
|
|
||||||
bool DirtyCardQueueSet::apply_closure_to_completed_buffer(int worker_i,
|
bool DirtyCardQueueSet::apply_closure_to_completed_buffer(int worker_i,
|
||||||
int stop_at,
|
int stop_at,
|
||||||
bool with_CAS)
|
bool during_pause)
|
||||||
{
|
{
|
||||||
CompletedBufferNode* nd = NULL;
|
assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
|
||||||
if (with_CAS) {
|
CompletedBufferNode* nd = get_completed_buffer(stop_at);
|
||||||
guarantee(stop_at == 0, "Precondition");
|
|
||||||
nd = get_completed_buffer_CAS();
|
|
||||||
} else {
|
|
||||||
nd = get_completed_buffer_lock(stop_at);
|
|
||||||
}
|
|
||||||
bool res = apply_closure_to_completed_buffer_helper(worker_i, nd);
|
bool res = apply_closure_to_completed_buffer_helper(worker_i, nd);
|
||||||
if (res) Atomic::inc(&_processed_buffers_rs_thread);
|
if (res) Atomic::inc(&_processed_buffers_rs_thread);
|
||||||
return res;
|
return res;
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
|
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -120,12 +120,13 @@ public:
|
||||||
// is returned to the completed buffer set, and this call returns false.
|
// is returned to the completed buffer set, and this call returns false.
|
||||||
bool apply_closure_to_completed_buffer(int worker_i = 0,
|
bool apply_closure_to_completed_buffer(int worker_i = 0,
|
||||||
int stop_at = 0,
|
int stop_at = 0,
|
||||||
bool with_CAS = false);
|
bool during_pause = false);
|
||||||
|
|
||||||
bool apply_closure_to_completed_buffer_helper(int worker_i,
|
bool apply_closure_to_completed_buffer_helper(int worker_i,
|
||||||
CompletedBufferNode* nd);
|
CompletedBufferNode* nd);
|
||||||
|
|
||||||
CompletedBufferNode* get_completed_buffer_CAS();
|
CompletedBufferNode* get_completed_buffer(int stop_at);
|
||||||
CompletedBufferNode* get_completed_buffer_lock(int stop_at);
|
|
||||||
// Applies the current closure to all completed buffers,
|
// Applies the current closure to all completed buffers,
|
||||||
// non-consumptively.
|
// non-consumptively.
|
||||||
void apply_closure_to_all_completed_buffers();
|
void apply_closure_to_all_completed_buffers();
|
||||||
|
|
|
@ -928,6 +928,8 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
|
||||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
||||||
TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
|
TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
|
||||||
|
|
||||||
|
TraceMemoryManagerStats tms(true /* fullGC */);
|
||||||
|
|
||||||
double start = os::elapsedTime();
|
double start = os::elapsedTime();
|
||||||
g1_policy()->record_full_collection_start();
|
g1_policy()->record_full_collection_start();
|
||||||
|
|
||||||
|
@ -1001,6 +1003,8 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
|
||||||
|
|
||||||
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
|
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
|
||||||
|
|
||||||
|
MemoryService::track_memory_usage();
|
||||||
|
|
||||||
if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
|
if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
|
||||||
HandleMark hm; // Discard invalid handles created during verification
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
gclog_or_tty->print(" VerifyAfterGC:");
|
gclog_or_tty->print(" VerifyAfterGC:");
|
||||||
|
@ -1732,13 +1736,6 @@ size_t G1CollectedHeap::unsafe_max_alloc() {
|
||||||
return car->free();
|
return car->free();
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::collect(GCCause::Cause cause) {
|
|
||||||
// The caller doesn't have the Heap_lock
|
|
||||||
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
|
|
||||||
MutexLocker ml(Heap_lock);
|
|
||||||
collect_locked(cause);
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
|
void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
|
||||||
assert(Thread::current()->is_VM_thread(), "Precondition#1");
|
assert(Thread::current()->is_VM_thread(), "Precondition#1");
|
||||||
assert(Heap_lock->is_locked(), "Precondition#2");
|
assert(Heap_lock->is_locked(), "Precondition#2");
|
||||||
|
@ -1755,17 +1752,31 @@ void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void G1CollectedHeap::collect(GCCause::Cause cause) {
|
||||||
|
// The caller doesn't have the Heap_lock
|
||||||
|
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
|
||||||
|
|
||||||
void G1CollectedHeap::collect_locked(GCCause::Cause cause) {
|
int gc_count_before;
|
||||||
// Don't want to do a GC until cleanup is completed.
|
|
||||||
wait_for_cleanup_complete();
|
|
||||||
|
|
||||||
// Read the GC count while holding the Heap_lock
|
|
||||||
int gc_count_before = SharedHeap::heap()->total_collections();
|
|
||||||
{
|
{
|
||||||
MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
|
MutexLocker ml(Heap_lock);
|
||||||
VM_G1CollectFull op(gc_count_before, cause);
|
// Read the GC count while holding the Heap_lock
|
||||||
VMThread::execute(&op);
|
gc_count_before = SharedHeap::heap()->total_collections();
|
||||||
|
|
||||||
|
// Don't want to do a GC until cleanup is completed.
|
||||||
|
wait_for_cleanup_complete();
|
||||||
|
} // We give up heap lock; VMThread::execute gets it back below
|
||||||
|
switch (cause) {
|
||||||
|
case GCCause::_scavenge_alot: {
|
||||||
|
// Do an incremental pause, which might sometimes be abandoned.
|
||||||
|
VM_G1IncCollectionPause op(gc_count_before, cause);
|
||||||
|
VMThread::execute(&op);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default: {
|
||||||
|
// In all other cases, we currently do a full gc.
|
||||||
|
VM_G1CollectFull op(gc_count_before, cause);
|
||||||
|
VMThread::execute(&op);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2119,7 +2130,7 @@ size_t G1CollectedHeap::large_typearray_limit() {
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t G1CollectedHeap::max_capacity() const {
|
size_t G1CollectedHeap::max_capacity() const {
|
||||||
return _g1_committed.byte_size();
|
return g1_reserved_obj_bytes();
|
||||||
}
|
}
|
||||||
|
|
||||||
jlong G1CollectedHeap::millis_since_last_gc() {
|
jlong G1CollectedHeap::millis_since_last_gc() {
|
||||||
|
@ -2638,6 +2649,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
ResourceMark rm;
|
||||||
|
|
||||||
char verbose_str[128];
|
char verbose_str[128];
|
||||||
sprintf(verbose_str, "GC pause ");
|
sprintf(verbose_str, "GC pause ");
|
||||||
if (g1_policy()->in_young_gc_mode()) {
|
if (g1_policy()->in_young_gc_mode()) {
|
||||||
|
@ -2649,8 +2662,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||||
if (g1_policy()->should_initiate_conc_mark())
|
if (g1_policy()->should_initiate_conc_mark())
|
||||||
strcat(verbose_str, " (initial-mark)");
|
strcat(verbose_str, " (initial-mark)");
|
||||||
|
|
||||||
GCCauseSetter x(this, GCCause::_g1_inc_collection_pause);
|
|
||||||
|
|
||||||
// if PrintGCDetails is on, we'll print long statistics information
|
// if PrintGCDetails is on, we'll print long statistics information
|
||||||
// in the collector policy code, so let's not print this as the output
|
// in the collector policy code, so let's not print this as the output
|
||||||
// is messy if we do.
|
// is messy if we do.
|
||||||
|
@ -2658,7 +2669,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
||||||
TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
|
TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
|
||||||
|
|
||||||
ResourceMark rm;
|
TraceMemoryManagerStats tms(false /* fullGC */);
|
||||||
|
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||||
assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
|
assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
|
||||||
guarantee(!is_gc_active(), "collection is not reentrant");
|
guarantee(!is_gc_active(), "collection is not reentrant");
|
||||||
|
@ -2802,6 +2814,22 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||||
_young_list->reset_auxilary_lists();
|
_young_list->reset_auxilary_lists();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if (_in_cset_fast_test != NULL) {
|
||||||
|
assert(_in_cset_fast_test_base != NULL, "Since _in_cset_fast_test isn't");
|
||||||
|
FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
|
||||||
|
// this is more for peace of mind; we're nulling them here and
|
||||||
|
// we're expecting them to be null at the beginning of the next GC
|
||||||
|
_in_cset_fast_test = NULL;
|
||||||
|
_in_cset_fast_test_base = NULL;
|
||||||
|
}
|
||||||
|
// This looks confusing, because the DPT should really be empty
|
||||||
|
// at this point -- since we have not done any collection work,
|
||||||
|
// there should not be any derived pointers in the table to update;
|
||||||
|
// however, there is some additional state in the DPT which is
|
||||||
|
// reset at the end of the (null) "gc" here via the following call.
|
||||||
|
// A better approach might be to split off that state resetting work
|
||||||
|
// into a separate method that asserts that the DPT is empty and call
|
||||||
|
// that here. That is deferred for now.
|
||||||
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
|
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2838,6 +2866,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
||||||
|
|
||||||
assert(regions_accounted_for(), "Region leakage.");
|
assert(regions_accounted_for(), "Region leakage.");
|
||||||
|
|
||||||
|
MemoryService::track_memory_usage();
|
||||||
|
|
||||||
if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
|
if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
|
||||||
HandleMark hm; // Discard invalid handles created during verification
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
gclog_or_tty->print(" VerifyAfterGC:");
|
gclog_or_tty->print(" VerifyAfterGC:");
|
||||||
|
|
|
@ -692,7 +692,7 @@ public:
|
||||||
|
|
||||||
// Reserved (g1 only; super method includes perm), capacity and the used
|
// Reserved (g1 only; super method includes perm), capacity and the used
|
||||||
// portion in bytes.
|
// portion in bytes.
|
||||||
size_t g1_reserved_obj_bytes() { return _g1_reserved.byte_size(); }
|
size_t g1_reserved_obj_bytes() const { return _g1_reserved.byte_size(); }
|
||||||
virtual size_t capacity() const;
|
virtual size_t capacity() const;
|
||||||
virtual size_t used() const;
|
virtual size_t used() const;
|
||||||
// This should be called when we're not holding the heap lock. The
|
// This should be called when we're not holding the heap lock. The
|
||||||
|
|
|
@ -1516,8 +1516,30 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
|
||||||
(end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
|
(end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
|
||||||
update_recent_gc_times(end_time_sec, elapsed_ms);
|
update_recent_gc_times(end_time_sec, elapsed_ms);
|
||||||
_recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
|
_recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
|
||||||
// using 1.01 to account for floating point inaccuracies
|
if (recent_avg_pause_time_ratio() < 0.0 ||
|
||||||
assert(recent_avg_pause_time_ratio() < 1.01, "All GC?");
|
(recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
|
||||||
|
#ifndef PRODUCT
|
||||||
|
// Dump info to allow post-facto debugging
|
||||||
|
gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
|
||||||
|
gclog_or_tty->print_cr("-------------------------------------------");
|
||||||
|
gclog_or_tty->print_cr("Recent GC Times (ms):");
|
||||||
|
_recent_gc_times_ms->dump();
|
||||||
|
gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
|
||||||
|
_recent_prev_end_times_for_all_gcs_sec->dump();
|
||||||
|
gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
|
||||||
|
_recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
|
||||||
|
// In debug mode, terminate the JVM if the user wants to debug at this point.
|
||||||
|
assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
|
||||||
|
#endif // !PRODUCT
|
||||||
|
// Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
|
||||||
|
// CR 6902692 by redoing the manner in which the ratio is incrementally computed.
|
||||||
|
if (_recent_avg_pause_time_ratio < 0.0) {
|
||||||
|
_recent_avg_pause_time_ratio = 0.0;
|
||||||
|
} else {
|
||||||
|
assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
|
||||||
|
_recent_avg_pause_time_ratio = 1.0;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (G1PolicyVerbose > 1) {
|
if (G1PolicyVerbose > 1) {
|
||||||
|
@ -2825,8 +2847,15 @@ choose_collection_set() {
|
||||||
double non_young_start_time_sec;
|
double non_young_start_time_sec;
|
||||||
start_recording_regions();
|
start_recording_regions();
|
||||||
|
|
||||||
guarantee(_target_pause_time_ms > -1.0,
|
guarantee(_target_pause_time_ms > -1.0
|
||||||
|
NOT_PRODUCT(|| Universe::heap()->gc_cause() == GCCause::_scavenge_alot),
|
||||||
"_target_pause_time_ms should have been set!");
|
"_target_pause_time_ms should have been set!");
|
||||||
|
#ifndef PRODUCT
|
||||||
|
if (_target_pause_time_ms <= -1.0) {
|
||||||
|
assert(ScavengeALot && Universe::heap()->gc_cause() == GCCause::_scavenge_alot, "Error");
|
||||||
|
_target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
assert(_collection_set == NULL, "Precondition");
|
assert(_collection_set == NULL, "Precondition");
|
||||||
|
|
||||||
double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
|
double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
|
||||||
|
@ -2972,7 +3001,3 @@ record_collection_pause_end(bool abandoned) {
|
||||||
G1CollectorPolicy::record_collection_pause_end(abandoned);
|
G1CollectorPolicy::record_collection_pause_end(abandoned);
|
||||||
assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
|
assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Local Variables: ***
|
|
||||||
// c-indentation-style: gnu ***
|
|
||||||
// End: ***
|
|
||||||
|
|
|
@ -86,12 +86,22 @@ void G1MMUTrackerQueue::add_pause(double start, double end, bool gc_thread) {
|
||||||
// increase the array size (:-)
|
// increase the array size (:-)
|
||||||
// remove the oldest entry (this might allow more GC time for
|
// remove the oldest entry (this might allow more GC time for
|
||||||
// the time slice than what's allowed)
|
// the time slice than what's allowed)
|
||||||
// concolidate the two entries with the minimum gap between them
|
// consolidate the two entries with the minimum gap between them
|
||||||
// (this mighte allow less GC time than what's allowed)
|
// (this might allow less GC time than what's allowed)
|
||||||
guarantee(0, "array full, currently we can't recover");
|
guarantee(NOT_PRODUCT(ScavengeALot ||) G1ForgetfulMMUTracker,
|
||||||
|
"array full, currently we can't recover unless +G1ForgetfulMMUTracker");
|
||||||
|
// In the case where ScavengeALot is true, such overflow is not
|
||||||
|
// uncommon; in such cases, we can, without much loss of precision
|
||||||
|
// or performance (we are GC'ing most of the time anyway!),
|
||||||
|
// simply overwrite the oldest entry in the tracker: this
|
||||||
|
// is also the behaviour when G1ForgetfulMMUTracker is enabled.
|
||||||
|
_head_index = trim_index(_head_index + 1);
|
||||||
|
assert(_head_index == _tail_index, "Because we have a full circular buffer");
|
||||||
|
_tail_index = trim_index(_tail_index + 1);
|
||||||
|
} else {
|
||||||
|
_head_index = trim_index(_head_index + 1);
|
||||||
|
++_no_entries;
|
||||||
}
|
}
|
||||||
_head_index = trim_index(_head_index + 1);
|
|
||||||
++_no_entries;
|
|
||||||
_array[_head_index] = G1MMUTrackerQueueElem(start, end);
|
_array[_head_index] = G1MMUTrackerQueueElem(start, end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -99,7 +99,10 @@ private:
|
||||||
// The array is of fixed size and I don't think we'll need more than
|
// The array is of fixed size and I don't think we'll need more than
|
||||||
// two or three entries with the current behaviour of G1 pauses.
|
// two or three entries with the current behaviour of G1 pauses.
|
||||||
// If the array is full, an easy fix is to look for the pauses with
|
// If the array is full, an easy fix is to look for the pauses with
|
||||||
// the shortest gap between them and concolidate them.
|
// the shortest gap between them and consolidate them.
|
||||||
|
// For now, we have taken the expedient alternative of forgetting
|
||||||
|
// the oldest entry in the event that +G1ForgetfulMMUTracker, thus
|
||||||
|
// potentially violating MMU specs for some time thereafter.
|
||||||
|
|
||||||
G1MMUTrackerQueueElem _array[QueueLength];
|
G1MMUTrackerQueueElem _array[QueueLength];
|
||||||
int _head_index;
|
int _head_index;
|
||||||
|
|
|
@ -242,6 +242,10 @@
|
||||||
product(bool, G1UseSurvivorSpaces, true, \
|
product(bool, G1UseSurvivorSpaces, true, \
|
||||||
"When true, use survivor space.") \
|
"When true, use survivor space.") \
|
||||||
\
|
\
|
||||||
|
develop(bool, G1FailOnFPError, false, \
|
||||||
|
"When set, G1 will fail when it encounters an FP 'error', " \
|
||||||
|
"so as to allow debugging") \
|
||||||
|
\
|
||||||
develop(bool, G1FixedTenuringThreshold, false, \
|
develop(bool, G1FixedTenuringThreshold, false, \
|
||||||
"When set, G1 will not adjust the tenuring threshold") \
|
"When set, G1 will not adjust the tenuring threshold") \
|
||||||
\
|
\
|
||||||
|
@ -252,6 +256,9 @@
|
||||||
"If non-0 is the size of the G1 survivor space, " \
|
"If non-0 is the size of the G1 survivor space, " \
|
||||||
"otherwise SurvivorRatio is used to determine the size") \
|
"otherwise SurvivorRatio is used to determine the size") \
|
||||||
\
|
\
|
||||||
|
product(bool, G1ForgetfulMMUTracker, false, \
|
||||||
|
"If the MMU tracker's memory is full, forget the oldest entry") \
|
||||||
|
\
|
||||||
product(uintx, G1HeapRegionSize, 0, \
|
product(uintx, G1HeapRegionSize, 0, \
|
||||||
"Size of the G1 regions.") \
|
"Size of the G1 regions.") \
|
||||||
\
|
\
|
||||||
|
|
|
@ -107,7 +107,7 @@ void** PtrQueueSet::allocate_buffer() {
|
||||||
res[0] = NULL;
|
res[0] = NULL;
|
||||||
return res;
|
return res;
|
||||||
} else {
|
} else {
|
||||||
return NEW_C_HEAP_ARRAY(void*, _sz);
|
return (void**) NEW_C_HEAP_ARRAY(char, _sz);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -127,7 +127,8 @@ void PtrQueueSet::reduce_free_list() {
|
||||||
assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong.");
|
assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong.");
|
||||||
void** head = _buf_free_list;
|
void** head = _buf_free_list;
|
||||||
_buf_free_list = (void**)_buf_free_list[0];
|
_buf_free_list = (void**)_buf_free_list[0];
|
||||||
FREE_C_HEAP_ARRAY(void*,head);
|
FREE_C_HEAP_ARRAY(char, head);
|
||||||
|
_buf_free_list_sz --;
|
||||||
n--;
|
n--;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,7 +42,7 @@ void VM_G1CollectFull::doit() {
|
||||||
void VM_G1IncCollectionPause::doit() {
|
void VM_G1IncCollectionPause::doit() {
|
||||||
JvmtiGCForAllocationMarker jgcm;
|
JvmtiGCForAllocationMarker jgcm;
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
GCCauseSetter x(g1h, GCCause::_g1_inc_collection_pause);
|
GCCauseSetter x(g1h, _gc_cause);
|
||||||
g1h->do_collection_pause_at_safepoint();
|
g1h->do_collection_pause_at_safepoint();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -68,8 +68,9 @@ class VM_G1CollectForAllocation: public VM_GC_Operation {
|
||||||
|
|
||||||
class VM_G1IncCollectionPause: public VM_GC_Operation {
|
class VM_G1IncCollectionPause: public VM_GC_Operation {
|
||||||
public:
|
public:
|
||||||
VM_G1IncCollectionPause(int gc_count_before) :
|
VM_G1IncCollectionPause(int gc_count_before,
|
||||||
VM_GC_Operation(gc_count_before) {}
|
GCCause::Cause gc_cause = GCCause::_g1_inc_collection_pause) :
|
||||||
|
VM_GC_Operation(gc_count_before) { _gc_cause = gc_cause; }
|
||||||
virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
|
virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
|
||||||
virtual void doit();
|
virtual void doit();
|
||||||
virtual const char* name() const {
|
virtual const char* name() const {
|
||||||
|
|
|
@ -222,6 +222,15 @@ g1MarkSweep.hpp oop.hpp
|
||||||
g1MarkSweep.hpp timer.hpp
|
g1MarkSweep.hpp timer.hpp
|
||||||
g1MarkSweep.hpp universe.hpp
|
g1MarkSweep.hpp universe.hpp
|
||||||
|
|
||||||
|
g1MemoryPool.cpp heapRegion.hpp
|
||||||
|
g1MemoryPool.cpp g1CollectedHeap.inline.hpp
|
||||||
|
g1MemoryPool.cpp g1CollectedHeap.hpp
|
||||||
|
g1MemoryPool.cpp g1CollectorPolicy.hpp
|
||||||
|
g1MemoryPool.cpp g1MemoryPool.hpp
|
||||||
|
|
||||||
|
g1MemoryPool.hpp memoryUsage.hpp
|
||||||
|
g1MemoryPool.hpp memoryPool.hpp
|
||||||
|
|
||||||
g1OopClosures.inline.hpp concurrentMark.hpp
|
g1OopClosures.inline.hpp concurrentMark.hpp
|
||||||
g1OopClosures.inline.hpp g1OopClosures.hpp
|
g1OopClosures.inline.hpp g1OopClosures.hpp
|
||||||
g1OopClosures.inline.hpp g1CollectedHeap.hpp
|
g1OopClosures.inline.hpp g1CollectedHeap.hpp
|
||||||
|
@ -303,6 +312,8 @@ heapRegionSeq.inline.hpp heapRegionSeq.hpp
|
||||||
|
|
||||||
klass.hpp g1OopClosures.hpp
|
klass.hpp g1OopClosures.hpp
|
||||||
|
|
||||||
|
memoryService.cpp g1MemoryPool.hpp
|
||||||
|
|
||||||
ptrQueue.cpp allocation.hpp
|
ptrQueue.cpp allocation.hpp
|
||||||
ptrQueue.cpp allocation.inline.hpp
|
ptrQueue.cpp allocation.inline.hpp
|
||||||
ptrQueue.cpp mutex.hpp
|
ptrQueue.cpp mutex.hpp
|
||||||
|
|
|
@ -289,7 +289,7 @@ attachListener.hpp allocation.hpp
|
||||||
attachListener.hpp debug.hpp
|
attachListener.hpp debug.hpp
|
||||||
attachListener.hpp ostream.hpp
|
attachListener.hpp ostream.hpp
|
||||||
|
|
||||||
barrierSet.cpp barrierSet.hpp
|
barrierSet.cpp barrierSet.inline.hpp
|
||||||
barrierSet.cpp collectedHeap.hpp
|
barrierSet.cpp collectedHeap.hpp
|
||||||
barrierSet.cpp universe.hpp
|
barrierSet.cpp universe.hpp
|
||||||
|
|
||||||
|
|
|
@ -41,11 +41,6 @@ void BarrierSet::static_write_ref_array_pre(HeapWord* start, size_t count) {
|
||||||
|
|
||||||
// count is number of array elements being written
|
// count is number of array elements being written
|
||||||
void BarrierSet::static_write_ref_array_post(HeapWord* start, size_t count) {
|
void BarrierSet::static_write_ref_array_post(HeapWord* start, size_t count) {
|
||||||
assert(count <= (size_t)max_intx, "count too large");
|
// simply delegate to instance method
|
||||||
HeapWord* end = start + objArrayOopDesc::array_size((int)count);
|
Universe::heap()->barrier_set()->write_ref_array(start, count);
|
||||||
#if 0
|
|
||||||
warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT","INTPTR_FORMAT")\t",
|
|
||||||
start, count, start, end);
|
|
||||||
#endif
|
|
||||||
Universe::heap()->barrier_set()->write_ref_array_work(MemRegion(start, end));
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -121,17 +121,20 @@ public:
|
||||||
virtual void read_ref_array(MemRegion mr) = 0;
|
virtual void read_ref_array(MemRegion mr) = 0;
|
||||||
virtual void read_prim_array(MemRegion mr) = 0;
|
virtual void read_prim_array(MemRegion mr) = 0;
|
||||||
|
|
||||||
|
// Below length is the # array elements being written
|
||||||
virtual void write_ref_array_pre( oop* dst, int length) {}
|
virtual void write_ref_array_pre( oop* dst, int length) {}
|
||||||
virtual void write_ref_array_pre(narrowOop* dst, int length) {}
|
virtual void write_ref_array_pre(narrowOop* dst, int length) {}
|
||||||
|
// Below MemRegion mr is expected to be HeapWord-aligned
|
||||||
inline void write_ref_array(MemRegion mr);
|
inline void write_ref_array(MemRegion mr);
|
||||||
|
// Below count is the # array elements being written, starting
|
||||||
|
// at the address "start", which may not necessarily be HeapWord-aligned
|
||||||
|
inline void write_ref_array(HeapWord* start, size_t count);
|
||||||
|
|
||||||
// Static versions, suitable for calling from generated code.
|
// Static versions, suitable for calling from generated code;
|
||||||
|
// count is # array elements being written, starting with "start",
|
||||||
|
// which may not necessarily be HeapWord-aligned.
|
||||||
static void static_write_ref_array_pre(HeapWord* start, size_t count);
|
static void static_write_ref_array_pre(HeapWord* start, size_t count);
|
||||||
static void static_write_ref_array_post(HeapWord* start, size_t count);
|
static void static_write_ref_array_post(HeapWord* start, size_t count);
|
||||||
// Narrow oop versions of the above; count is # of array elements being written,
|
|
||||||
// starting with "start", which is HeapWord-aligned.
|
|
||||||
static void static_write_ref_array_pre_narrow(HeapWord* start, size_t count);
|
|
||||||
static void static_write_ref_array_post_narrow(HeapWord* start, size_t count);
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual void write_ref_array_work(MemRegion mr) = 0;
|
virtual void write_ref_array_work(MemRegion mr) = 0;
|
||||||
|
|
|
@ -43,6 +43,8 @@ void BarrierSet::write_ref_field(void* field, oop new_val) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void BarrierSet::write_ref_array(MemRegion mr) {
|
void BarrierSet::write_ref_array(MemRegion mr) {
|
||||||
|
assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start() , "Unaligned start");
|
||||||
|
assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
|
||||||
if (kind() == CardTableModRef) {
|
if (kind() == CardTableModRef) {
|
||||||
((CardTableModRefBS*)this)->inline_write_ref_array(mr);
|
((CardTableModRefBS*)this)->inline_write_ref_array(mr);
|
||||||
} else {
|
} else {
|
||||||
|
@ -50,6 +52,34 @@ void BarrierSet::write_ref_array(MemRegion mr) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// count is number of array elements being written
|
||||||
|
void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
|
||||||
|
assert(count <= (size_t)max_intx, "count too large");
|
||||||
|
HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
|
||||||
|
// In the case of compressed oops, start and end may potentially be misaligned;
|
||||||
|
// so we need to conservatively align the first downward (this is not
|
||||||
|
// strictly necessary for current uses, but a case of good hygiene and,
|
||||||
|
// if you will, aesthetics) and the second upward (this is essential for
|
||||||
|
// current uses) to a HeapWord boundary, so we mark all cards overlapping
|
||||||
|
// this write. In the event that this evolves in the future to calling a
|
||||||
|
// logging barrier of narrow oop granularity, like the pre-barrier for G1
|
||||||
|
// (mentioned here merely by way of example), we will need to change this
|
||||||
|
// interface, much like the pre-barrier one above, so it is "exactly precise"
|
||||||
|
// (if i may be allowed the adverbial redundancy for emphasis) and does not
|
||||||
|
// include narrow oop slots not included in the original write interval.
|
||||||
|
HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize);
|
||||||
|
HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize);
|
||||||
|
// If compressed oops were not being used, these should already be aligned
|
||||||
|
assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
|
||||||
|
"Expected heap word alignment of start and end");
|
||||||
|
#if 0
|
||||||
|
warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT","INTPTR_FORMAT")\t",
|
||||||
|
start, count, aligned_start, aligned_end);
|
||||||
|
#endif
|
||||||
|
write_ref_array_work(MemRegion(aligned_start, aligned_end));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void BarrierSet::write_region(MemRegion mr) {
|
void BarrierSet::write_region(MemRegion mr) {
|
||||||
if (kind() == CardTableModRef) {
|
if (kind() == CardTableModRef) {
|
||||||
((CardTableModRefBS*)this)->inline_write_region(mr);
|
((CardTableModRefBS*)this)->inline_write_region(mr);
|
||||||
|
|
|
@ -511,6 +511,8 @@ void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp,
|
||||||
}
|
}
|
||||||
|
|
||||||
void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
|
void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
|
||||||
|
assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
|
||||||
|
assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
|
||||||
jbyte* cur = byte_for(mr.start());
|
jbyte* cur = byte_for(mr.start());
|
||||||
jbyte* last = byte_after(mr.last());
|
jbyte* last = byte_after(mr.last());
|
||||||
while (cur < last) {
|
while (cur < last) {
|
||||||
|
@ -520,6 +522,8 @@ void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
|
void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
|
||||||
|
assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
|
||||||
|
assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
|
||||||
for (int i = 0; i < _cur_covered_regions; i++) {
|
for (int i = 0; i < _cur_covered_regions; i++) {
|
||||||
MemRegion mri = mr.intersection(_covered[i]);
|
MemRegion mri = mr.intersection(_covered[i]);
|
||||||
if (!mri.is_empty()) dirty_MemRegion(mri);
|
if (!mri.is_empty()) dirty_MemRegion(mri);
|
||||||
|
|
|
@ -224,10 +224,6 @@ public:
|
||||||
CodeBlobClosure* code_roots,
|
CodeBlobClosure* code_roots,
|
||||||
OopClosure* non_root_closure);
|
OopClosure* non_root_closure);
|
||||||
|
|
||||||
|
|
||||||
// Like CollectedHeap::collect, but assume that the caller holds the Heap_lock.
|
|
||||||
virtual void collect_locked(GCCause::Cause cause) = 0;
|
|
||||||
|
|
||||||
// The functions below are helper functions that a subclass of
|
// The functions below are helper functions that a subclass of
|
||||||
// "SharedHeap" can use in the implementation of its virtual
|
// "SharedHeap" can use in the implementation of its virtual
|
||||||
// functions.
|
// functions.
|
||||||
|
|
|
@ -127,16 +127,14 @@ template <class T> void objArrayKlass::do_copy(arrayOop s, T* src,
|
||||||
// pointer delta is scaled to number of elements (length field in
|
// pointer delta is scaled to number of elements (length field in
|
||||||
// objArrayOop) which we assume is 32 bit.
|
// objArrayOop) which we assume is 32 bit.
|
||||||
assert(pd == (size_t)(int)pd, "length field overflow");
|
assert(pd == (size_t)(int)pd, "length field overflow");
|
||||||
const size_t done_word_len = objArrayOopDesc::array_size((int)pd);
|
bs->write_ref_array((HeapWord*)dst, pd);
|
||||||
bs->write_ref_array(MemRegion((HeapWord*)dst, done_word_len));
|
|
||||||
THROW(vmSymbols::java_lang_ArrayStoreException());
|
THROW(vmSymbols::java_lang_ArrayStoreException());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
const size_t word_len = objArrayOopDesc::array_size(length);
|
bs->write_ref_array((HeapWord*)dst, length);
|
||||||
bs->write_ref_array(MemRegion((HeapWord*)dst, word_len));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void objArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
|
void objArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
|
||||||
|
|
|
@ -37,6 +37,32 @@ class objArrayOopDesc : public arrayOopDesc {
|
||||||
return &((T*)base())[index];
|
return &((T*)base())[index];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Give size of objArrayOop in HeapWords minus the header
|
||||||
|
static int array_size(int length) {
|
||||||
|
const int OopsPerHeapWord = HeapWordSize/heapOopSize;
|
||||||
|
assert(OopsPerHeapWord >= 1 && (HeapWordSize % heapOopSize == 0),
|
||||||
|
"Else the following (new) computation would be in error");
|
||||||
|
#ifdef ASSERT
|
||||||
|
// The old code is left in for sanity-checking; it'll
|
||||||
|
// go away pretty soon. XXX
|
||||||
|
// Without UseCompressedOops, this is simply:
|
||||||
|
// oop->length() * HeapWordsPerOop;
|
||||||
|
// With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
|
||||||
|
// The oop elements are aligned up to wordSize
|
||||||
|
const int HeapWordsPerOop = heapOopSize/HeapWordSize;
|
||||||
|
int old_res;
|
||||||
|
if (HeapWordsPerOop > 0) {
|
||||||
|
old_res = length * HeapWordsPerOop;
|
||||||
|
} else {
|
||||||
|
old_res = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord;
|
||||||
|
}
|
||||||
|
#endif // ASSERT
|
||||||
|
int res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord;
|
||||||
|
assert(res == old_res, "Inconsistency between old and new.");
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Returns the offset of the first element.
|
// Returns the offset of the first element.
|
||||||
static int base_offset_in_bytes() {
|
static int base_offset_in_bytes() {
|
||||||
|
@ -67,27 +93,14 @@ class objArrayOopDesc : public arrayOopDesc {
|
||||||
// Sizing
|
// Sizing
|
||||||
static int header_size() { return arrayOopDesc::header_size(T_OBJECT); }
|
static int header_size() { return arrayOopDesc::header_size(T_OBJECT); }
|
||||||
int object_size() { return object_size(length()); }
|
int object_size() { return object_size(length()); }
|
||||||
int array_size() { return array_size(length()); }
|
|
||||||
|
|
||||||
static int object_size(int length) {
|
static int object_size(int length) {
|
||||||
// This returns the object size in HeapWords.
|
// This returns the object size in HeapWords.
|
||||||
return align_object_size(header_size() + array_size(length));
|
uint asz = array_size(length);
|
||||||
}
|
uint osz = align_object_size(header_size() + asz);
|
||||||
|
assert(osz >= asz, "no overflow");
|
||||||
// Give size of objArrayOop in HeapWords minus the header
|
assert((int)osz > 0, "no overflow");
|
||||||
static int array_size(int length) {
|
return (int)osz;
|
||||||
// Without UseCompressedOops, this is simply:
|
|
||||||
// oop->length() * HeapWordsPerOop;
|
|
||||||
// With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
|
|
||||||
// The oop elements are aligned up to wordSize
|
|
||||||
const int HeapWordsPerOop = heapOopSize/HeapWordSize;
|
|
||||||
if (HeapWordsPerOop > 0) {
|
|
||||||
return length * HeapWordsPerOop;
|
|
||||||
} else {
|
|
||||||
const int OopsPerHeapWord = HeapWordSize/heapOopSize;
|
|
||||||
int word_len = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord;
|
|
||||||
return word_len;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// special iterators for index ranges, returns size of object
|
// special iterators for index ranges, returns size of object
|
||||||
|
|
162
hotspot/src/share/vm/services/g1MemoryPool.cpp
Normal file
162
hotspot/src/share/vm/services/g1MemoryPool.cpp
Normal file
|
@ -0,0 +1,162 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||||
|
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||||
|
* have any questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
# include "incls/_precompiled.incl"
|
||||||
|
# include "incls/_g1MemoryPool.cpp.incl"
|
||||||
|
|
||||||
|
G1MemoryPoolSuper::G1MemoryPoolSuper(G1CollectedHeap* g1h,
|
||||||
|
const char* name,
|
||||||
|
size_t init_size,
|
||||||
|
size_t max_size,
|
||||||
|
bool support_usage_threshold) :
|
||||||
|
_g1h(g1h), CollectedMemoryPool(name,
|
||||||
|
MemoryPool::Heap,
|
||||||
|
init_size,
|
||||||
|
max_size,
|
||||||
|
support_usage_threshold) {
|
||||||
|
assert(UseG1GC, "sanity");
|
||||||
|
}
|
||||||
|
|
||||||
|
// See the comment at the top of g1MemoryPool.hpp
|
||||||
|
size_t G1MemoryPoolSuper::eden_space_committed(G1CollectedHeap* g1h) {
|
||||||
|
return MAX2(eden_space_used(g1h), (size_t) HeapRegion::GrainBytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
// See the comment at the top of g1MemoryPool.hpp
|
||||||
|
size_t G1MemoryPoolSuper::eden_space_used(G1CollectedHeap* g1h) {
|
||||||
|
size_t young_list_length = g1h->young_list_length();
|
||||||
|
size_t eden_used = young_list_length * HeapRegion::GrainBytes;
|
||||||
|
size_t survivor_used = survivor_space_used(g1h);
|
||||||
|
eden_used = subtract_up_to_zero(eden_used, survivor_used);
|
||||||
|
return eden_used;
|
||||||
|
}
|
||||||
|
|
||||||
|
// See the comment at the top of g1MemoryPool.hpp
|
||||||
|
size_t G1MemoryPoolSuper::eden_space_max(G1CollectedHeap* g1h) {
|
||||||
|
// This should ensure that it returns a value no smaller than the
|
||||||
|
// region size. Currently, eden_space_committed() guarantees that.
|
||||||
|
return eden_space_committed(g1h);
|
||||||
|
}
|
||||||
|
|
||||||
|
// See the comment at the top of g1MemoryPool.hpp
|
||||||
|
size_t G1MemoryPoolSuper::survivor_space_committed(G1CollectedHeap* g1h) {
|
||||||
|
return MAX2(survivor_space_used(g1h), (size_t) HeapRegion::GrainBytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
// See the comment at the top of g1MemoryPool.hpp
|
||||||
|
size_t G1MemoryPoolSuper::survivor_space_used(G1CollectedHeap* g1h) {
|
||||||
|
size_t survivor_num = g1h->g1_policy()->recorded_survivor_regions();
|
||||||
|
size_t survivor_used = survivor_num * HeapRegion::GrainBytes;
|
||||||
|
return survivor_used;
|
||||||
|
}
|
||||||
|
|
||||||
|
// See the comment at the top of g1MemoryPool.hpp
|
||||||
|
size_t G1MemoryPoolSuper::survivor_space_max(G1CollectedHeap* g1h) {
|
||||||
|
// This should ensure that it returns a value no smaller than the
|
||||||
|
// region size. Currently, survivor_space_committed() guarantees that.
|
||||||
|
return survivor_space_committed(g1h);
|
||||||
|
}
|
||||||
|
|
||||||
|
// See the comment at the top of g1MemoryPool.hpp
|
||||||
|
size_t G1MemoryPoolSuper::old_space_committed(G1CollectedHeap* g1h) {
|
||||||
|
size_t committed = overall_committed(g1h);
|
||||||
|
size_t eden_committed = eden_space_committed(g1h);
|
||||||
|
size_t survivor_committed = survivor_space_committed(g1h);
|
||||||
|
committed = subtract_up_to_zero(committed, eden_committed);
|
||||||
|
committed = subtract_up_to_zero(committed, survivor_committed);
|
||||||
|
committed = MAX2(committed, (size_t) HeapRegion::GrainBytes);
|
||||||
|
return committed;
|
||||||
|
}
|
||||||
|
|
||||||
|
// See the comment at the top of g1MemoryPool.hpp
|
||||||
|
size_t G1MemoryPoolSuper::old_space_used(G1CollectedHeap* g1h) {
|
||||||
|
size_t used = overall_used(g1h);
|
||||||
|
size_t eden_used = eden_space_used(g1h);
|
||||||
|
size_t survivor_used = survivor_space_used(g1h);
|
||||||
|
used = subtract_up_to_zero(used, eden_used);
|
||||||
|
used = subtract_up_to_zero(used, survivor_used);
|
||||||
|
return used;
|
||||||
|
}
|
||||||
|
|
||||||
|
// See the comment at the top of g1MemoryPool.hpp
|
||||||
|
size_t G1MemoryPoolSuper::old_space_max(G1CollectedHeap* g1h) {
|
||||||
|
size_t max = overall_max(g1h);
|
||||||
|
size_t eden_max = eden_space_max(g1h);
|
||||||
|
size_t survivor_max = survivor_space_max(g1h);
|
||||||
|
max = subtract_up_to_zero(max, eden_max);
|
||||||
|
max = subtract_up_to_zero(max, survivor_max);
|
||||||
|
max = MAX2(max, (size_t) HeapRegion::GrainBytes);
|
||||||
|
return max;
|
||||||
|
}
|
||||||
|
|
||||||
|
G1EdenPool::G1EdenPool(G1CollectedHeap* g1h) :
|
||||||
|
G1MemoryPoolSuper(g1h,
|
||||||
|
"G1 Eden",
|
||||||
|
eden_space_committed(g1h), /* init_size */
|
||||||
|
eden_space_max(g1h), /* max_size */
|
||||||
|
false /* support_usage_threshold */) {
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryUsage G1EdenPool::get_memory_usage() {
|
||||||
|
size_t initial_sz = initial_size();
|
||||||
|
size_t max_sz = max_size();
|
||||||
|
size_t used = used_in_bytes();
|
||||||
|
size_t committed = eden_space_committed(_g1h);
|
||||||
|
|
||||||
|
return MemoryUsage(initial_sz, used, committed, max_sz);
|
||||||
|
}
|
||||||
|
|
||||||
|
G1SurvivorPool::G1SurvivorPool(G1CollectedHeap* g1h) :
|
||||||
|
G1MemoryPoolSuper(g1h,
|
||||||
|
"G1 Survivor",
|
||||||
|
survivor_space_committed(g1h), /* init_size */
|
||||||
|
survivor_space_max(g1h), /* max_size */
|
||||||
|
false /* support_usage_threshold */) {
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryUsage G1SurvivorPool::get_memory_usage() {
|
||||||
|
size_t initial_sz = initial_size();
|
||||||
|
size_t max_sz = max_size();
|
||||||
|
size_t used = used_in_bytes();
|
||||||
|
size_t committed = survivor_space_committed(_g1h);
|
||||||
|
|
||||||
|
return MemoryUsage(initial_sz, used, committed, max_sz);
|
||||||
|
}
|
||||||
|
|
||||||
|
G1OldGenPool::G1OldGenPool(G1CollectedHeap* g1h) :
|
||||||
|
G1MemoryPoolSuper(g1h,
|
||||||
|
"G1 Old Gen",
|
||||||
|
old_space_committed(g1h), /* init_size */
|
||||||
|
old_space_max(g1h), /* max_size */
|
||||||
|
true /* support_usage_threshold */) {
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryUsage G1OldGenPool::get_memory_usage() {
|
||||||
|
size_t initial_sz = initial_size();
|
||||||
|
size_t max_sz = max_size();
|
||||||
|
size_t used = used_in_bytes();
|
||||||
|
size_t committed = old_space_committed(_g1h);
|
||||||
|
|
||||||
|
return MemoryUsage(initial_sz, used, committed, max_sz);
|
||||||
|
}
|
197
hotspot/src/share/vm/services/g1MemoryPool.hpp
Normal file
197
hotspot/src/share/vm/services/g1MemoryPool.hpp
Normal file
|
@ -0,0 +1,197 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||||
|
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||||
|
* have any questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
class G1CollectedHeap;
|
||||||
|
|
||||||
|
// This file contains the three classes that represent the memory
|
||||||
|
// pools of the G1 spaces: G1EdenPool, G1SurvivorPool, and
|
||||||
|
// G1OldGenPool. In G1, unlike our other GCs, we do not have a
|
||||||
|
// physical space for each of those spaces. Instead, we allocate
|
||||||
|
// regions for all three spaces out of a single pool of regions (that
|
||||||
|
// pool basically covers the entire heap). As a result, the eden,
|
||||||
|
// survivor, and old gen are considered logical spaces in G1, as each
|
||||||
|
// is a set of non-contiguous regions. This is also reflected in the
|
||||||
|
// way we map them to memory pools here. The easiest way to have done
|
||||||
|
// this would have been to map the entire G1 heap to a single memory
|
||||||
|
// pool. However, it's helpful to show how large the eden and survivor
|
||||||
|
// get, as this does affect the performance and behavior of G1. Which
|
||||||
|
// is why we introduce the three memory pools implemented here.
|
||||||
|
//
|
||||||
|
// The above approach inroduces a couple of challenging issues in the
|
||||||
|
// implementation of the three memory pools:
|
||||||
|
//
|
||||||
|
// 1) The used space calculation for a pool is not necessarily
|
||||||
|
// independent of the others. We can easily get from G1 the overall
|
||||||
|
// used space in the entire heap, the number of regions in the young
|
||||||
|
// generation (includes both eden and survivors), and the number of
|
||||||
|
// survivor regions. So, from that we calculate:
|
||||||
|
//
|
||||||
|
// survivor_used = survivor_num * region_size
|
||||||
|
// eden_used = young_region_num * region_size - survivor_used
|
||||||
|
// old_gen_used = overall_used - eden_used - survivor_used
|
||||||
|
//
|
||||||
|
// Note that survivor_used and eden_used are upper bounds. To get the
|
||||||
|
// actual value we would have to iterate over the regions and add up
|
||||||
|
// ->used(). But that'd be expensive. So, we'll accept some lack of
|
||||||
|
// accuracy for those two. But, we have to be careful when calculating
|
||||||
|
// old_gen_used, in case we subtract from overall_used more then the
|
||||||
|
// actual number and our result goes negative.
|
||||||
|
//
|
||||||
|
// 2) Calculating the used space is straightforward, as described
|
||||||
|
// above. However, how do we calculate the committed space, given that
|
||||||
|
// we allocate space for the eden, survivor, and old gen out of the
|
||||||
|
// same pool of regions? One way to do this is to use the used value
|
||||||
|
// as also the committed value for the eden and survivor spaces and
|
||||||
|
// then calculate the old gen committed space as follows:
|
||||||
|
//
|
||||||
|
// old_gen_committed = overall_committed - eden_committed - survivor_committed
|
||||||
|
//
|
||||||
|
// Maybe a better way to do that would be to calculate used for eden
|
||||||
|
// and survivor as a sum of ->used() over their regions and then
|
||||||
|
// calculate committed as region_num * region_size (i.e., what we use
|
||||||
|
// to calculate the used space now). This is something to consider
|
||||||
|
// in the future.
|
||||||
|
//
|
||||||
|
// 3) Another decision that is again not straightforward is what is
|
||||||
|
// the max size that each memory pool can grow to. Right now, we set
|
||||||
|
// that the committed size for the eden and the survivors and
|
||||||
|
// calculate the old gen max as follows (basically, it's a similar
|
||||||
|
// pattern to what we use for the committed space, as described
|
||||||
|
// above):
|
||||||
|
//
|
||||||
|
// old_gen_max = overall_max - eden_max - survivor_max
|
||||||
|
//
|
||||||
|
// 4) Now, there is a very subtle issue with all the above. The
|
||||||
|
// framework will call get_memory_usage() on the three pools
|
||||||
|
// asynchronously. As a result, each call might get a different value
|
||||||
|
// for, say, survivor_num which will yield inconsistent values for
|
||||||
|
// eden_used, survivor_used, and old_gen_used (as survivor_num is used
|
||||||
|
// in the calculation of all three). This would normally be
|
||||||
|
// ok. However, it's possible that this might cause the sum of
|
||||||
|
// eden_used, survivor_used, and old_gen_used to go over the max heap
|
||||||
|
// size and this seems to sometimes cause JConsole (and maybe other
|
||||||
|
// clients) to get confused. There's not a really an easy / clean
|
||||||
|
// solution to this problem, due to the asynchrounous nature of the
|
||||||
|
// framework.
|
||||||
|
|
||||||
|
|
||||||
|
// This class is shared by the three G1 memory pool classes
|
||||||
|
// (G1EdenPool, G1SurvivorPool, G1OldGenPool). Given that the way we
|
||||||
|
// calculate used / committed bytes for these three pools is related
|
||||||
|
// (see comment above), we put the calculations in this class so that
|
||||||
|
// we can easily share them among the subclasses.
|
||||||
|
class G1MemoryPoolSuper : public CollectedMemoryPool {
|
||||||
|
private:
|
||||||
|
// It returns x - y if x > y, 0 otherwise.
|
||||||
|
// As described in the comment above, some of the inputs to the
|
||||||
|
// calculations we have to do are obtained concurrently and hence
|
||||||
|
// may be inconsistent with each other. So, this provides a
|
||||||
|
// defensive way of performing the subtraction and avoids the value
|
||||||
|
// going negative (which would mean a very large result, given that
|
||||||
|
// the parameter are size_t).
|
||||||
|
static size_t subtract_up_to_zero(size_t x, size_t y) {
|
||||||
|
if (x > y) {
|
||||||
|
return x - y;
|
||||||
|
} else {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
G1CollectedHeap* _g1h;
|
||||||
|
|
||||||
|
// Would only be called from subclasses.
|
||||||
|
G1MemoryPoolSuper(G1CollectedHeap* g1h,
|
||||||
|
const char* name,
|
||||||
|
size_t init_size,
|
||||||
|
size_t max_size,
|
||||||
|
bool support_usage_threshold);
|
||||||
|
|
||||||
|
// The reason why all the code is in static methods is so that it
|
||||||
|
// can be safely called from the constructors of the subclasses.
|
||||||
|
|
||||||
|
static size_t overall_committed(G1CollectedHeap* g1h) {
|
||||||
|
return g1h->capacity();
|
||||||
|
}
|
||||||
|
static size_t overall_used(G1CollectedHeap* g1h) {
|
||||||
|
return g1h->used_unlocked();
|
||||||
|
}
|
||||||
|
static size_t overall_max(G1CollectedHeap* g1h) {
|
||||||
|
return g1h->g1_reserved_obj_bytes();
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t eden_space_committed(G1CollectedHeap* g1h);
|
||||||
|
static size_t eden_space_used(G1CollectedHeap* g1h);
|
||||||
|
static size_t eden_space_max(G1CollectedHeap* g1h);
|
||||||
|
|
||||||
|
static size_t survivor_space_committed(G1CollectedHeap* g1h);
|
||||||
|
static size_t survivor_space_used(G1CollectedHeap* g1h);
|
||||||
|
static size_t survivor_space_max(G1CollectedHeap* g1h);
|
||||||
|
|
||||||
|
static size_t old_space_committed(G1CollectedHeap* g1h);
|
||||||
|
static size_t old_space_used(G1CollectedHeap* g1h);
|
||||||
|
static size_t old_space_max(G1CollectedHeap* g1h);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Memory pool that represents the G1 eden.
|
||||||
|
class G1EdenPool : public G1MemoryPoolSuper {
|
||||||
|
public:
|
||||||
|
G1EdenPool(G1CollectedHeap* g1h);
|
||||||
|
|
||||||
|
size_t used_in_bytes() {
|
||||||
|
return eden_space_used(_g1h);
|
||||||
|
}
|
||||||
|
size_t max_size() const {
|
||||||
|
return eden_space_max(_g1h);
|
||||||
|
}
|
||||||
|
MemoryUsage get_memory_usage();
|
||||||
|
};
|
||||||
|
|
||||||
|
// Memory pool that represents the G1 survivor.
|
||||||
|
class G1SurvivorPool : public G1MemoryPoolSuper {
|
||||||
|
public:
|
||||||
|
G1SurvivorPool(G1CollectedHeap* g1h);
|
||||||
|
|
||||||
|
size_t used_in_bytes() {
|
||||||
|
return survivor_space_used(_g1h);
|
||||||
|
}
|
||||||
|
size_t max_size() const {
|
||||||
|
return survivor_space_max(_g1h);
|
||||||
|
}
|
||||||
|
MemoryUsage get_memory_usage();
|
||||||
|
};
|
||||||
|
|
||||||
|
// Memory pool that represents the G1 old gen.
|
||||||
|
class G1OldGenPool : public G1MemoryPoolSuper {
|
||||||
|
public:
|
||||||
|
G1OldGenPool(G1CollectedHeap* g1h);
|
||||||
|
|
||||||
|
size_t used_in_bytes() {
|
||||||
|
return old_space_used(_g1h);
|
||||||
|
}
|
||||||
|
size_t max_size() const {
|
||||||
|
return old_space_max(_g1h);
|
||||||
|
}
|
||||||
|
MemoryUsage get_memory_usage();
|
||||||
|
};
|
|
@ -72,6 +72,14 @@ GCMemoryManager* MemoryManager::get_psMarkSweep_memory_manager() {
|
||||||
return (GCMemoryManager*) new PSMarkSweepMemoryManager();
|
return (GCMemoryManager*) new PSMarkSweepMemoryManager();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
GCMemoryManager* MemoryManager::get_g1YoungGen_memory_manager() {
|
||||||
|
return (GCMemoryManager*) new G1YoungGenMemoryManager();
|
||||||
|
}
|
||||||
|
|
||||||
|
GCMemoryManager* MemoryManager::get_g1OldGen_memory_manager() {
|
||||||
|
return (GCMemoryManager*) new G1OldGenMemoryManager();
|
||||||
|
}
|
||||||
|
|
||||||
instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
|
instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
|
||||||
// Must do an acquire so as to force ordering of subsequent
|
// Must do an acquire so as to force ordering of subsequent
|
||||||
// loads from anything _memory_mgr_obj points to or implies.
|
// loads from anything _memory_mgr_obj points to or implies.
|
||||||
|
|
|
@ -54,7 +54,9 @@ public:
|
||||||
ParNew,
|
ParNew,
|
||||||
ConcurrentMarkSweep,
|
ConcurrentMarkSweep,
|
||||||
PSScavenge,
|
PSScavenge,
|
||||||
PSMarkSweep
|
PSMarkSweep,
|
||||||
|
G1YoungGen,
|
||||||
|
G1OldGen
|
||||||
};
|
};
|
||||||
|
|
||||||
MemoryManager();
|
MemoryManager();
|
||||||
|
@ -85,6 +87,8 @@ public:
|
||||||
static GCMemoryManager* get_cms_memory_manager();
|
static GCMemoryManager* get_cms_memory_manager();
|
||||||
static GCMemoryManager* get_psScavenge_memory_manager();
|
static GCMemoryManager* get_psScavenge_memory_manager();
|
||||||
static GCMemoryManager* get_psMarkSweep_memory_manager();
|
static GCMemoryManager* get_psMarkSweep_memory_manager();
|
||||||
|
static GCMemoryManager* get_g1YoungGen_memory_manager();
|
||||||
|
static GCMemoryManager* get_g1OldGen_memory_manager();
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -231,3 +235,21 @@ public:
|
||||||
MemoryManager::Name kind() { return MemoryManager::PSMarkSweep; }
|
MemoryManager::Name kind() { return MemoryManager::PSMarkSweep; }
|
||||||
const char* name() { return "PS MarkSweep"; }
|
const char* name() { return "PS MarkSweep"; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class G1YoungGenMemoryManager : public GCMemoryManager {
|
||||||
|
private:
|
||||||
|
public:
|
||||||
|
G1YoungGenMemoryManager() : GCMemoryManager() {}
|
||||||
|
|
||||||
|
MemoryManager::Name kind() { return MemoryManager::G1YoungGen; }
|
||||||
|
const char* name() { return "G1 Young Generation"; }
|
||||||
|
};
|
||||||
|
|
||||||
|
class G1OldGenMemoryManager : public GCMemoryManager {
|
||||||
|
private:
|
||||||
|
public:
|
||||||
|
G1OldGenMemoryManager() : GCMemoryManager() {}
|
||||||
|
|
||||||
|
MemoryManager::Name kind() { return MemoryManager::G1OldGen; }
|
||||||
|
const char* name() { return "G1 Old Generation"; }
|
||||||
|
};
|
||||||
|
|
|
@ -60,8 +60,8 @@ void MemoryService::set_universe_heap(CollectedHeap* heap) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case CollectedHeap::G1CollectedHeap : {
|
case CollectedHeap::G1CollectedHeap : {
|
||||||
G1CollectedHeap::g1_unimplemented();
|
add_g1_heap_info(G1CollectedHeap::heap());
|
||||||
return;
|
break;
|
||||||
}
|
}
|
||||||
#endif // SERIALGC
|
#endif // SERIALGC
|
||||||
default: {
|
default: {
|
||||||
|
@ -164,6 +164,19 @@ void MemoryService::add_parallel_scavenge_heap_info(ParallelScavengeHeap* heap)
|
||||||
add_psOld_memory_pool(heap->old_gen(), _major_gc_manager);
|
add_psOld_memory_pool(heap->old_gen(), _major_gc_manager);
|
||||||
add_psPerm_memory_pool(heap->perm_gen(), _major_gc_manager);
|
add_psPerm_memory_pool(heap->perm_gen(), _major_gc_manager);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MemoryService::add_g1_heap_info(G1CollectedHeap* g1h) {
|
||||||
|
assert(UseG1GC, "sanity");
|
||||||
|
|
||||||
|
_minor_gc_manager = MemoryManager::get_g1YoungGen_memory_manager();
|
||||||
|
_major_gc_manager = MemoryManager::get_g1OldGen_memory_manager();
|
||||||
|
_managers_list->append(_minor_gc_manager);
|
||||||
|
_managers_list->append(_major_gc_manager);
|
||||||
|
|
||||||
|
add_g1YoungGen_memory_pool(g1h, _major_gc_manager, _minor_gc_manager);
|
||||||
|
add_g1OldGen_memory_pool(g1h, _major_gc_manager);
|
||||||
|
add_g1PermGen_memory_pool(g1h, _major_gc_manager);
|
||||||
|
}
|
||||||
#endif // SERIALGC
|
#endif // SERIALGC
|
||||||
|
|
||||||
MemoryPool* MemoryService::add_gen(Generation* gen,
|
MemoryPool* MemoryService::add_gen(Generation* gen,
|
||||||
|
@ -384,6 +397,64 @@ void MemoryService::add_psPerm_memory_pool(PSPermGen* gen, MemoryManager* mgr) {
|
||||||
mgr->add_pool(perm_gen);
|
mgr->add_pool(perm_gen);
|
||||||
_pools_list->append(perm_gen);
|
_pools_list->append(perm_gen);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MemoryService::add_g1YoungGen_memory_pool(G1CollectedHeap* g1h,
|
||||||
|
MemoryManager* major_mgr,
|
||||||
|
MemoryManager* minor_mgr) {
|
||||||
|
assert(major_mgr != NULL && minor_mgr != NULL, "should have two managers");
|
||||||
|
|
||||||
|
G1EdenPool* eden = new G1EdenPool(g1h);
|
||||||
|
G1SurvivorPool* survivor = new G1SurvivorPool(g1h);
|
||||||
|
|
||||||
|
major_mgr->add_pool(eden);
|
||||||
|
major_mgr->add_pool(survivor);
|
||||||
|
minor_mgr->add_pool(eden);
|
||||||
|
minor_mgr->add_pool(survivor);
|
||||||
|
_pools_list->append(eden);
|
||||||
|
_pools_list->append(survivor);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryService::add_g1OldGen_memory_pool(G1CollectedHeap* g1h,
|
||||||
|
MemoryManager* mgr) {
|
||||||
|
assert(mgr != NULL, "should have one manager");
|
||||||
|
|
||||||
|
G1OldGenPool* old_gen = new G1OldGenPool(g1h);
|
||||||
|
mgr->add_pool(old_gen);
|
||||||
|
_pools_list->append(old_gen);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryService::add_g1PermGen_memory_pool(G1CollectedHeap* g1h,
|
||||||
|
MemoryManager* mgr) {
|
||||||
|
assert(mgr != NULL, "should have one manager");
|
||||||
|
|
||||||
|
CompactingPermGenGen* perm_gen = (CompactingPermGenGen*) g1h->perm_gen();
|
||||||
|
PermanentGenerationSpec* spec = perm_gen->spec();
|
||||||
|
size_t max_size = spec->max_size() - spec->read_only_size()
|
||||||
|
- spec->read_write_size();
|
||||||
|
MemoryPool* pool = add_space(perm_gen->unshared_space(),
|
||||||
|
"G1 Perm Gen",
|
||||||
|
false, /* is_heap */
|
||||||
|
max_size,
|
||||||
|
true /* support_usage_threshold */);
|
||||||
|
mgr->add_pool(pool);
|
||||||
|
|
||||||
|
// in case we support CDS in G1
|
||||||
|
if (UseSharedSpaces) {
|
||||||
|
pool = add_space(perm_gen->ro_space(),
|
||||||
|
"G1 Perm Gen [shared-ro]",
|
||||||
|
false, /* is_heap */
|
||||||
|
spec->read_only_size(),
|
||||||
|
true /* support_usage_threshold */);
|
||||||
|
mgr->add_pool(pool);
|
||||||
|
|
||||||
|
pool = add_space(perm_gen->rw_space(),
|
||||||
|
"G1 Perm Gen [shared-rw]",
|
||||||
|
false, /* is_heap */
|
||||||
|
spec->read_write_size(),
|
||||||
|
true /* support_usage_threshold */);
|
||||||
|
mgr->add_pool(pool);
|
||||||
|
}
|
||||||
|
}
|
||||||
#endif // SERIALGC
|
#endif // SERIALGC
|
||||||
|
|
||||||
void MemoryService::add_code_heap_memory_pool(CodeHeap* heap) {
|
void MemoryService::add_code_heap_memory_pool(CodeHeap* heap) {
|
||||||
|
|
|
@ -40,6 +40,7 @@ class GenCollectedHeap;
|
||||||
class ParallelScavengeHeap;
|
class ParallelScavengeHeap;
|
||||||
class CompactingPermGenGen;
|
class CompactingPermGenGen;
|
||||||
class CMSPermGenGen;
|
class CMSPermGenGen;
|
||||||
|
class G1CollectedHeap;
|
||||||
|
|
||||||
// VM Monitoring and Management Support
|
// VM Monitoring and Management Support
|
||||||
|
|
||||||
|
@ -88,6 +89,13 @@ private:
|
||||||
static void add_psPerm_memory_pool(PSPermGen* perm,
|
static void add_psPerm_memory_pool(PSPermGen* perm,
|
||||||
MemoryManager* mgr);
|
MemoryManager* mgr);
|
||||||
|
|
||||||
|
static void add_g1YoungGen_memory_pool(G1CollectedHeap* g1h,
|
||||||
|
MemoryManager* major_mgr,
|
||||||
|
MemoryManager* minor_mgr);
|
||||||
|
static void add_g1OldGen_memory_pool(G1CollectedHeap* g1h,
|
||||||
|
MemoryManager* mgr);
|
||||||
|
static void add_g1PermGen_memory_pool(G1CollectedHeap* g1h,
|
||||||
|
MemoryManager* mgr);
|
||||||
|
|
||||||
static MemoryPool* add_space(ContiguousSpace* space,
|
static MemoryPool* add_space(ContiguousSpace* space,
|
||||||
const char* name,
|
const char* name,
|
||||||
|
@ -111,6 +119,7 @@ private:
|
||||||
|
|
||||||
static void add_gen_collected_heap_info(GenCollectedHeap* heap);
|
static void add_gen_collected_heap_info(GenCollectedHeap* heap);
|
||||||
static void add_parallel_scavenge_heap_info(ParallelScavengeHeap* heap);
|
static void add_parallel_scavenge_heap_info(ParallelScavengeHeap* heap);
|
||||||
|
static void add_g1_heap_info(G1CollectedHeap* g1h);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static void set_universe_heap(CollectedHeap* heap);
|
static void set_universe_heap(CollectedHeap* heap);
|
||||||
|
|
|
@ -241,3 +241,33 @@ double TruncatedSeq::predict_next() const {
|
||||||
|
|
||||||
return b0 + b1 * num;
|
return b0 + b1 * num;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Printing/Debugging Support
|
||||||
|
|
||||||
|
void AbsSeq::dump() { dump_on(gclog_or_tty); }
|
||||||
|
|
||||||
|
void AbsSeq::dump_on(outputStream* s) {
|
||||||
|
s->print_cr("\t _num = %d, _sum = %7.3f, _sum_of_squares = %7.3f",
|
||||||
|
_num, _sum, _sum_of_squares);
|
||||||
|
s->print_cr("\t _davg = %7.3f, _dvariance = %7.3f, _alpha = %7.3f",
|
||||||
|
_davg, _dvariance, _alpha);
|
||||||
|
}
|
||||||
|
|
||||||
|
void NumberSeq::dump_on(outputStream* s) {
|
||||||
|
AbsSeq::dump_on(s);
|
||||||
|
s->print_cr("\t\t _last = %7.3f, _maximum = %7.3f");
|
||||||
|
}
|
||||||
|
|
||||||
|
void TruncatedSeq::dump_on(outputStream* s) {
|
||||||
|
AbsSeq::dump_on(s);
|
||||||
|
s->print_cr("\t\t _length = %d, _next = %d", _length, _next);
|
||||||
|
for (int i = 0; i < _length; i++) {
|
||||||
|
if (i%5 == 0) {
|
||||||
|
s->cr();
|
||||||
|
s->print("\t");
|
||||||
|
}
|
||||||
|
s->print("\t[%d]=%7.3f", i, _sequence[i]);
|
||||||
|
}
|
||||||
|
s->print_cr("");
|
||||||
|
}
|
||||||
|
|
|
@ -74,6 +74,10 @@ public:
|
||||||
double davg() const; // decaying average
|
double davg() const; // decaying average
|
||||||
double dvariance() const; // decaying variance
|
double dvariance() const; // decaying variance
|
||||||
double dsd() const; // decaying "standard deviation"
|
double dsd() const; // decaying "standard deviation"
|
||||||
|
|
||||||
|
// Debugging/Printing
|
||||||
|
virtual void dump();
|
||||||
|
virtual void dump_on(outputStream* s);
|
||||||
};
|
};
|
||||||
|
|
||||||
class NumberSeq: public AbsSeq {
|
class NumberSeq: public AbsSeq {
|
||||||
|
@ -91,6 +95,9 @@ public:
|
||||||
virtual void add(double val);
|
virtual void add(double val);
|
||||||
virtual double maximum() const { return _maximum; }
|
virtual double maximum() const { return _maximum; }
|
||||||
virtual double last() const { return _last; }
|
virtual double last() const { return _last; }
|
||||||
|
|
||||||
|
// Debugging/Printing
|
||||||
|
virtual void dump_on(outputStream* s);
|
||||||
};
|
};
|
||||||
|
|
||||||
class TruncatedSeq: public AbsSeq {
|
class TruncatedSeq: public AbsSeq {
|
||||||
|
@ -114,4 +121,7 @@ public:
|
||||||
|
|
||||||
double oldest() const; // the oldest valid value in the sequence
|
double oldest() const; // the oldest valid value in the sequence
|
||||||
double predict_next() const; // prediction based on linear regression
|
double predict_next() const; // prediction based on linear regression
|
||||||
|
|
||||||
|
// Debugging/Printing
|
||||||
|
virtual void dump_on(outputStream* s);
|
||||||
};
|
};
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue