This commit is contained in:
Jon Masamitsu 2008-07-28 15:30:23 -07:00
commit b8633a3b01
46 changed files with 1401 additions and 256 deletions

View file

@ -172,15 +172,25 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
_to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
_gen_counters);
compute_space_boundaries(0);
compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
update_counters();
_next_gen = NULL;
_tenuring_threshold = MaxTenuringThreshold;
_pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
}
void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size) {
uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
bool clear_space,
bool mangle_space) {
uintx alignment =
GenCollectedHeap::heap()->collector_policy()->min_alignment();
// If the spaces are being cleared (only done at heap initialization
// currently), the survivor spaces need not be empty.
// Otherwise, no care is taken for used areas in the survivor spaces
// so check.
assert(clear_space || (to()->is_empty() && from()->is_empty()),
"Initialization of the survivor spaces assumes these are empty");
// Compute sizes
uintx size = _virtual_space.committed_size();
@ -214,16 +224,41 @@ void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size) {
MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
eden()->initialize(edenMR, (minimum_eden_size == 0));
// If minumum_eden_size != 0, we will not have cleared any
// A minimum eden size implies that there is a part of eden that
// is being used and that affects the initialization of any
// newly formed eden.
bool live_in_eden = minimum_eden_size > 0;
// If not clearing the spaces, do some checking to verify that
// the space are already mangled.
if (!clear_space) {
// Must check mangling before the spaces are reshaped. Otherwise,
// the bottom or end of one space may have moved into another
// a failure of the check may not correctly indicate which space
// is not properly mangled.
if (ZapUnusedHeapArea) {
HeapWord* limit = (HeapWord*) _virtual_space.high();
eden()->check_mangled_unused_area(limit);
from()->check_mangled_unused_area(limit);
to()->check_mangled_unused_area(limit);
}
}
// Reset the spaces for their new regions.
eden()->initialize(edenMR,
clear_space && !live_in_eden,
SpaceDecorator::Mangle);
// If clear_space and live_in_eden, we will not have cleared any
// portion of eden above its top. This can cause newly
// expanded space not to be mangled if using ZapUnusedHeapArea.
// We explicitly do such mangling here.
if (ZapUnusedHeapArea && (minimum_eden_size != 0)) {
if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
eden()->mangle_unused_area();
}
from()->initialize(fromMR, true);
to()->initialize(toMR , true);
from()->initialize(fromMR, clear_space, mangle_space);
to()->initialize(toMR, clear_space, mangle_space);
// Set next compaction spaces.
eden()->set_next_compaction_space(from());
// The to-space is normally empty before a compaction so need
// not be considered. The exception is during promotion
@ -250,7 +285,16 @@ void DefNewGeneration::swap_spaces() {
bool DefNewGeneration::expand(size_t bytes) {
MutexLocker x(ExpandHeap_lock);
HeapWord* prev_high = (HeapWord*) _virtual_space.high();
bool success = _virtual_space.expand_by(bytes);
if (success && ZapUnusedHeapArea) {
// Mangle newly committed space immediately because it
// can be done here more simply that after the new
// spaces have been computed.
HeapWord* new_high = (HeapWord*) _virtual_space.high();
MemRegion mangle_region(prev_high, new_high);
SpaceMangler::mangle_region(mangle_region);
}
// Do not attempt an expand-to-the reserve size. The
// request should properly observe the maximum size of
@ -262,7 +306,8 @@ bool DefNewGeneration::expand(size_t bytes) {
// value.
if (GC_locker::is_active()) {
if (PrintGC && Verbose) {
gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
gclog_or_tty->print_cr("Garbage collection disabled, "
"expanded heap instead");
}
}
@ -326,16 +371,24 @@ void DefNewGeneration::compute_new_size() {
changed = true;
}
if (changed) {
compute_space_boundaries(eden()->used());
MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
// The spaces have already been mangled at this point but
// may not have been cleared (set top = bottom) and should be.
// Mangling was done when the heap was being expanded.
compute_space_boundaries(eden()->used(),
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
MemRegion cmr((HeapWord*)_virtual_space.low(),
(HeapWord*)_virtual_space.high());
Universe::heap()->barrier_set()->resize_covered_region(cmr);
if (Verbose && PrintGC) {
size_t new_size_after = _virtual_space.committed_size();
size_t eden_size_after = eden()->capacity();
size_t survivor_size_after = from()->capacity();
gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden="
gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
SIZE_FORMAT "K [eden="
SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
new_size_before/K, new_size_after/K, eden_size_after/K, survivor_size_after/K);
new_size_before/K, new_size_after/K,
eden_size_after/K, survivor_size_after/K);
if (WizardMode) {
gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
thread_increase_size/K, threads_count);
@ -480,7 +533,7 @@ void DefNewGeneration::collect(bool full,
ScanWeakRefClosure scan_weak_ref(this);
age_table()->clear();
to()->clear();
to()->clear(SpaceDecorator::Mangle);
gch->rem_set()->prepare_for_younger_refs_iterate(false);
@ -525,8 +578,18 @@ void DefNewGeneration::collect(bool full,
soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, NULL);
if (!promotion_failed()) {
// Swap the survivor spaces.
eden()->clear();
from()->clear();
eden()->clear(SpaceDecorator::Mangle);
from()->clear(SpaceDecorator::Mangle);
if (ZapUnusedHeapArea) {
// This is now done here because of the piece-meal mangling which
// can check for valid mangling at intermediate points in the
// collection(s). When a minor collection fails to collect
// sufficient space resizing of the young generation can occur
// an redistribute the spaces in the young generation. Mangle
// here so that unzapped regions don't get distributed to
// other spaces.
to()->mangle_unused_area();
}
swap_spaces();
assert(to()->is_empty(), "to space should be empty now");
@ -753,6 +816,15 @@ void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* reque
}
}
void DefNewGeneration::reset_scratch() {
// If contributing scratch in to_space, mangle all of
// to_space if ZapUnusedHeapArea. This is needed because
// top is not maintained while using to-space as scratch.
if (ZapUnusedHeapArea) {
to()->mangle_unused_area_complete();
}
}
bool DefNewGeneration::collection_attempt_is_safe() {
if (!to()->is_empty()) {
return false;
@ -806,11 +878,25 @@ void DefNewGeneration::gc_epilogue(bool full) {
}
}
if (ZapUnusedHeapArea) {
eden()->check_mangled_unused_area_complete();
from()->check_mangled_unused_area_complete();
to()->check_mangled_unused_area_complete();
}
// update the generation and space performance counters
update_counters();
gch->collector_policy()->counters()->update_counters();
}
void DefNewGeneration::record_spaces_top() {
assert(ZapUnusedHeapArea, "Not mangling unused space");
eden()->set_top_for_allocations();
to()->set_top_for_allocations();
from()->set_top_for_allocations();
}
void DefNewGeneration::update_counters() {
if (UsePerfData) {
_eden_counters->update_all();