6672698: mangle_unused_area() should not remangle the entire heap at each collection

Maintain a high water mark for the allocations in a space and mangle only up to that high water mark.

Reviewed-by: ysr, apetrusenko
This commit is contained in:
Jon Masamitsu 2008-07-09 15:08:55 -07:00
parent 0d9452401c
commit 18dbebd143
43 changed files with 1299 additions and 206 deletions

View file

@ -170,9 +170,20 @@ bool ASPSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
if (desired_size > orig_size) {
// Grow the generation
size_t change = desired_size - orig_size;
HeapWord* prev_low = (HeapWord*) virtual_space()->low();
if (!virtual_space()->expand_by(change)) {
return false;
}
if (ZapUnusedHeapArea) {
// Mangle newly committed space immediately because it
// can be done here more simply that after the new
// spaces have been computed.
HeapWord* new_low = (HeapWord*) virtual_space()->low();
assert(new_low < prev_low, "Did not grow");
MemRegion mangle_region(new_low, prev_low);
SpaceMangler::mangle_region(mangle_region);
}
size_changed = true;
} else if (desired_size < orig_size) {
size_t desired_change = orig_size - desired_size;
@ -215,8 +226,10 @@ bool ASPSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
// current implementation does not allow holes between the spaces
// _young_generation_boundary has to be reset because it changes.
// so additional verification
void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
size_t requested_survivor_size) {
assert(UseAdaptiveSizePolicy, "sanity check");
assert(requested_eden_size > 0 && requested_survivor_size > 0,
"just checking");
@ -276,22 +289,42 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t alignment = heap->intra_heap_alignment();
const bool maintain_minimum =
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
bool eden_from_to_order = from_start < to_start;
// Check whether from space is below to space
if (from_start < to_start) {
if (eden_from_to_order) {
// Eden, from, to
if (PrintAdaptiveSizePolicy && Verbose) {
gclog_or_tty->print_cr(" Eden, from, to:");
}
// Set eden
// Compute how big eden can be, then adjust end.
// See comment in PSYoungGen::resize_spaces() on
// calculating eden_end.
const size_t eden_size = MIN2(requested_eden_size,
pointer_delta(from_start,
eden_start,
sizeof(char)));
// "requested_eden_size" is a goal for the size of eden
// and may not be attainable. "eden_size" below is
// calculated based on the location of from-space and
// the goal for the size of eden. from-space is
// fixed in place because it contains live data.
// The calculation is done this way to avoid 32bit
// overflow (i.e., eden_start + requested_eden_size
// may too large for representation in 32bits).
size_t eden_size;
if (maintain_minimum) {
// Only make eden larger than the requested size if
// the minimum size of the generation has to be maintained.
// This could be done in general but policy at a higher
// level is determining a requested size for eden and that
// should be honored unless there is a fundamental reason.
eden_size = pointer_delta(from_start,
eden_start,
sizeof(char));
} else {
eden_size = MIN2(requested_eden_size,
pointer_delta(from_start, eden_start, sizeof(char)));
}
eden_end = eden_start + eden_size;
assert(eden_end >= eden_start, "addition overflowed")
@ -371,12 +404,14 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
to_start = MAX2(to_start, eden_start + alignment);
// Compute how big eden can be, then adjust end.
// See comment in PSYoungGen::resize_spaces() on
// calculating eden_end.
const size_t eden_size = MIN2(requested_eden_size,
pointer_delta(to_start,
eden_start,
sizeof(char)));
// See comments above on calculating eden_end.
size_t eden_size;
if (maintain_minimum) {
eden_size = pointer_delta(to_start, eden_start, sizeof(char));
} else {
eden_size = MIN2(requested_eden_size,
pointer_delta(to_start, eden_start, sizeof(char)));
}
eden_end = eden_start + eden_size;
assert(eden_end >= eden_start, "addition overflowed")
@ -423,9 +458,47 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
size_t old_from = from_space()->capacity_in_bytes();
size_t old_to = to_space()->capacity_in_bytes();
eden_space()->initialize(edenMR, true);
to_space()->initialize(toMR , true);
from_space()->initialize(fromMR, false); // Note, not cleared!
if (ZapUnusedHeapArea) {
// NUMA is a special case because a numa space is not mangled
// in order to not prematurely bind its address to memory to
// the wrong memory (i.e., don't want the GC thread to first
// touch the memory). The survivor spaces are not numa
// spaces and are mangled.
if (UseNUMA) {
if (eden_from_to_order) {
mangle_survivors(from_space(), fromMR, to_space(), toMR);
} else {
mangle_survivors(to_space(), toMR, from_space(), fromMR);
}
}
// If not mangling the spaces, do some checking to verify that
// the spaces are already mangled.
// The spaces should be correctly mangled at this point so
// do some checking here. Note that they are not being mangled
// in the calls to initialize().
// Must check mangling before the spaces are reshaped. Otherwise,
// the bottom or end of one space may have moved into an area
// covered by another space and a failure of the check may
// not correctly indicate which space is not properly mangled.
HeapWord* limit = (HeapWord*) virtual_space()->high();
eden_space()->check_mangled_unused_area(limit);
from_space()->check_mangled_unused_area(limit);
to_space()->check_mangled_unused_area(limit);
}
// When an existing space is being initialized, it is not
// mangled because the space has been previously mangled.
eden_space()->initialize(edenMR,
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
to_space()->initialize(toMR,
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
from_space()->initialize(fromMR,
SpaceDecorator::DontClear,
SpaceDecorator::DontMangle);
PSScavenge::set_young_generation_boundary(eden_space()->bottom());
assert(from_space()->top() == old_from_top, "from top changed!");
@ -446,7 +519,6 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
}
space_invariants();
}
void ASPSYoungGen::reset_after_change() {
assert_locked_or_safepoint(Heap_lock);
@ -458,7 +530,9 @@ void ASPSYoungGen::reset_after_change() {
HeapWord* eden_bottom = eden_space()->bottom();
if (new_eden_bottom != eden_bottom) {
MemRegion eden_mr(new_eden_bottom, eden_space()->end());
eden_space()->initialize(eden_mr, true);
eden_space()->initialize(eden_mr,
SpaceDecorator::Clear,
SpaceDecorator::Mangle);
PSScavenge::set_young_generation_boundary(eden_space()->bottom());
}
MemRegion cmr((HeapWord*)virtual_space()->low(),

View file

@ -666,9 +666,9 @@ void CardTableExtension::resize_commit_uncommit(int changed_region,
HeapWord* new_end_for_commit =
MIN2(cur_committed.end(), _guard_region.start());
MemRegion new_committed =
MemRegion(new_start_aligned, new_end_for_commit);
if(!new_committed.is_empty()) {
if(new_start_aligned < new_end_for_commit) {
MemRegion new_committed =
MemRegion(new_start_aligned, new_end_for_commit);
if (!os::commit_memory((char*)new_committed.start(),
new_committed.byte_size())) {
vm_exit_out_of_memory(new_committed.byte_size(),

View file

@ -938,3 +938,23 @@ void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
// Delegate the resize to the generation.
_old_gen->resize(desired_free_space);
}
#ifndef PRODUCT
void ParallelScavengeHeap::record_gen_tops_before_GC() {
if (ZapUnusedHeapArea) {
young_gen()->record_spaces_top();
old_gen()->record_spaces_top();
perm_gen()->record_spaces_top();
}
}
void ParallelScavengeHeap::gen_mangle_unused_area() {
if (ZapUnusedHeapArea) {
young_gen()->eden_space()->mangle_unused_area();
young_gen()->to_space()->mangle_unused_area();
young_gen()->from_space()->mangle_unused_area();
old_gen()->object_space()->mangle_unused_area();
perm_gen()->object_space()->mangle_unused_area();
}
}
#endif

View file

@ -213,6 +213,12 @@ class ParallelScavengeHeap : public CollectedHeap {
// Resize the old generation. The reserved space for the
// generation may be expanded in preparation for the resize.
void resize_old_gen(size_t desired_free_space);
// Save the tops of the spaces in all generations
void record_gen_tops_before_GC() PRODUCT_RETURN;
// Mangle the unused parts of all spaces in the heap
void gen_mangle_unused_area() PRODUCT_RETURN;
};
inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)

View file

@ -98,6 +98,9 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
// Increment the invocation count
heap->increment_total_collections(true /* full */);
// Save information needed to minimize mangling
heap->record_gen_tops_before_GC();
// We need to track unique mark sweep invocations as well.
_total_invocations++;
@ -188,6 +191,12 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
deallocate_stacks();
if (ZapUnusedHeapArea) {
// Do a complete mangle (top to end) because the usage for
// scratch does not maintain a top pointer.
young_gen->to_space()->mangle_unused_area_complete();
}
eden_empty = young_gen->eden_space()->is_empty();
if (!eden_empty) {
eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
@ -198,7 +207,7 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
Universe::update_heap_info_at_gc();
survivors_empty = young_gen->from_space()->is_empty() &&
young_gen->to_space()->is_empty();
young_gen->to_space()->is_empty();
young_gen_empty = eden_empty && survivors_empty;
BarrierSet* bs = heap->barrier_set();
@ -344,6 +353,11 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
perm_gen->verify_object_start_array();
}
if (ZapUnusedHeapArea) {
old_gen->object_space()->check_mangled_unused_area_complete();
perm_gen->object_space()->check_mangled_unused_area_complete();
}
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
if (PrintHeapAtGC) {

View file

@ -438,5 +438,7 @@ void PSMarkSweepDecorator::compact(bool mangle_free_space ) {
"should point inside space");
space()->set_top(compaction_top());
if (mangle_free_space) space()->mangle_unused_area();
if (mangle_free_space) {
space()->mangle_unused_area();
}
}

View file

@ -87,6 +87,15 @@ void PSOldGen::initialize_work(const char* perf_data_name, int level) {
MemRegion cmr((HeapWord*)virtual_space()->low(),
(HeapWord*)virtual_space()->high());
if (ZapUnusedHeapArea) {
// Mangle newly committed space immediately rather than
// waiting for the initialization of the space even though
// mangling is related to spaces. Doing it here eliminates
// the need to carry along information that a complete mangling
// (bottom to end) needs to be done.
SpaceMangler::mangle_region(cmr);
}
Universe::heap()->barrier_set()->resize_covered_region(cmr);
CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set();
@ -112,7 +121,9 @@ void PSOldGen::initialize_work(const char* perf_data_name, int level) {
if (_object_space == NULL)
vm_exit_during_initialization("Could not allocate an old gen space");
object_space()->initialize(cmr, true);
object_space()->initialize(cmr,
SpaceDecorator::Clear,
SpaceDecorator::Mangle);
_object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
@ -232,6 +243,19 @@ bool PSOldGen::expand_by(size_t bytes) {
assert_locked_or_safepoint(Heap_lock);
bool result = virtual_space()->expand_by(bytes);
if (result) {
if (ZapUnusedHeapArea) {
// We need to mangle the newly expanded area. The memregion spans
// end -> new_end, we assume that top -> end is already mangled.
// Do the mangling before post_resize() is called because
// the space is available for allocation after post_resize();
HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
assert(object_space()->end() < virtual_space_high,
"Should be true before post_resize()");
MemRegion mangle_region(object_space()->end(), virtual_space_high);
// Note that the object space has not yet been updated to
// coincede with the new underlying virtual space.
SpaceMangler::mangle_region(mangle_region);
}
post_resize();
if (UsePerfData) {
_space_counters->update_capacity();
@ -348,16 +372,7 @@ void PSOldGen::post_resize() {
start_array()->set_covered_region(new_memregion);
Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
// Did we expand?
HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
if (object_space()->end() < virtual_space_high) {
// We need to mangle the newly expanded area. The memregion spans
// end -> new_end, we assume that top -> end is already mangled.
// This cannot be safely tested for, as allocation may be taking
// place.
MemRegion mangle_region(object_space()->end(), virtual_space_high);
object_space()->mangle_region(mangle_region);
}
// ALWAYS do this last!!
object_space()->set_end(virtual_space_high);
@ -462,3 +477,10 @@ void PSOldGen::verify_object_start_array() {
VerifyObjectStartArrayClosure check( this, &_start_array );
object_iterate(&check);
}
#ifndef PRODUCT
void PSOldGen::record_spaces_top() {
assert(ZapUnusedHeapArea, "Not mangling unused space");
object_space()->set_top_for_allocations();
}
#endif

View file

@ -185,4 +185,8 @@ class PSOldGen : public CHeapObj {
// Printing support
virtual const char* name() const { return _name; }
// Debugging support
// Save the tops of all spaces for later use during mangling.
void record_spaces_top() PRODUCT_RETURN;
};

View file

@ -1058,6 +1058,10 @@ void PSParallelCompact::post_compact()
ref_processor()->enqueue_discovered_references(NULL);
if (ZapUnusedHeapArea) {
heap->gen_mangle_unused_area();
}
// Update time of last GC
reset_millis_since_last_gc();
}
@ -1959,6 +1963,11 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
PSPermGen* perm_gen = heap->perm_gen();
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
if (ZapUnusedHeapArea) {
// Save information needed to minimize mangling
heap->record_gen_tops_before_GC();
}
_print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
// Make sure data structures are sane, make the heap parsable, and do other
@ -2127,17 +2136,19 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
size_t max_eden_size = young_gen->max_size() -
young_gen->from_space()->capacity_in_bytes() -
young_gen->to_space()->capacity_in_bytes();
size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
young_gen->eden_space()->used_in_bytes(),
old_gen->used_in_bytes(),
perm_gen->used_in_bytes(),
young_gen->eden_space()->capacity_in_bytes(),
old_gen->max_gen_size(),
max_eden_size,
true /* full gc*/,
gc_cause);
size_policy->compute_generation_free_space(
young_gen->used_in_bytes(),
young_gen->eden_space()->used_in_bytes(),
old_gen->used_in_bytes(),
perm_gen->used_in_bytes(),
young_gen->eden_space()->capacity_in_bytes(),
old_gen->max_gen_size(),
max_eden_size,
true /* full gc*/,
gc_cause);
heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
heap->resize_old_gen(
size_policy->calculated_old_free_size_in_bytes());
// Don't resize the young generation at an major collection. A
// desired young generation size may have been calculated but
@ -2210,6 +2221,11 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
perm_gen->verify_object_start_array();
}
if (ZapUnusedHeapArea) {
old_gen->object_space()->check_mangled_unused_area_complete();
perm_gen->object_space()->check_mangled_unused_area_complete();
}
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
collection_exit.update();

View file

@ -716,6 +716,99 @@ class BitBlockUpdateClosure: public ParMarkBitMapClosure {
virtual IterationStatus do_addr(HeapWord* addr, size_t words);
};
// The UseParallelOldGC collector is a stop-the-world garbage
// collector that does parts of the collection using parallel threads.
// The collection includes the tenured generation and the young
// generation. The permanent generation is collected at the same
// time as the other two generations but the permanent generation
// is collect by a single GC thread. The permanent generation is
// collected serially because of the requirement that during the
// processing of a klass AAA, any objects reference by AAA must
// already have been processed. This requirement is enforced by
// a left (lower address) to right (higher address) sliding compaction.
//
// There are four phases of the collection.
//
// - marking phase
// - summary phase
// - compacting phase
// - clean up phase
//
// Roughly speaking these phases correspond, respectively, to
// - mark all the live objects
// - calculate the destination of each object at the end of the collection
// - move the objects to their destination
// - update some references and reinitialize some variables
//
// These three phases are invoked in PSParallelCompact::invoke_no_policy().
// The marking phase is implemented in PSParallelCompact::marking_phase()
// and does a complete marking of the heap.
// The summary phase is implemented in PSParallelCompact::summary_phase().
// The move and update phase is implemented in PSParallelCompact::compact().
//
// A space that is being collected is divided into chunks and with
// each chunk is associated an object of type ParallelCompactData.
// Each chunk is of a fixed size and typically will contain more than
// 1 object and may have parts of objects at the front and back of the
// chunk.
//
// chunk -----+---------------------+----------
// objects covered [ AAA )[ BBB )[ CCC )[ DDD )
//
// The marking phase does a complete marking of all live objects in the
// heap. The marking also compiles the size of the data for
// all live objects covered by the chunk. This size includes the
// part of any live object spanning onto the chunk (part of AAA
// if it is live) from the front, all live objects contained in the chunk
// (BBB and/or CCC if they are live), and the part of any live objects
// covered by the chunk that extends off the chunk (part of DDD if it is
// live). The marking phase uses multiple GC threads and marking is
// done in a bit array of type ParMarkBitMap. The marking of the
// bit map is done atomically as is the accumulation of the size of the
// live objects covered by a chunk.
//
// The summary phase calculates the total live data to the left of
// each chunk XXX. Based on that total and the bottom of the space,
// it can calculate the starting location of the live data in XXX.
// The summary phase calculates for each chunk XXX quantites such as
//
// - the amount of live data at the beginning of a chunk from an object
// entering the chunk.
// - the location of the first live data on the chunk
// - a count of the number of chunks receiving live data from XXX.
//
// See ParallelCompactData for precise details. The summary phase also
// calculates the dense prefix for the compaction. The dense prefix
// is a portion at the beginning of the space that is not moved. The
// objects in the dense prefix do need to have their object references
// updated. See method summarize_dense_prefix().
//
// The summary phase is done using 1 GC thread.
//
// The compaction phase moves objects to their new location and updates
// all references in the object.
//
// A current exception is that objects that cross a chunk boundary
// are moved but do not have their references updated. References are
// not updated because it cannot easily be determined if the klass
// pointer KKK for the object AAA has been updated. KKK likely resides
// in a chunk to the left of the chunk containing AAA. These AAA's
// have there references updated at the end in a clean up phase.
// See the method PSParallelCompact::update_deferred_objects(). An
// alternate strategy is being investigated for this deferral of updating.
//
// Compaction is done on a chunk basis. A chunk that is ready to be
// filled is put on a ready list and GC threads take chunk off the list
// and fill them. A chunk is ready to be filled if it
// empty of live objects. Such a chunk may have been initially
// empty (only contained
// dead objects) or may have had all its live objects copied out already.
// A chunk that compacts into itself is also ready for filling. The
// ready list is initially filled with empty chunks and chunks compacting
// into themselves. There is always at least 1 chunk that can be put on
// the ready list. The chunks are atomically added and removed from
// the ready list.
//
class PSParallelCompact : AllStatic {
public:
// Convenient access to type names.

View file

@ -265,6 +265,11 @@ bool PSScavenge::invoke_no_policy() {
young_gen->eden_space()->accumulate_statistics();
}
if (ZapUnusedHeapArea) {
// Save information needed to minimize mangling
heap->record_gen_tops_before_GC();
}
if (PrintHeapAtGC) {
Universe::print_heap_before_gc();
}
@ -315,7 +320,7 @@ bool PSScavenge::invoke_no_policy() {
if (!ScavengeWithObjectsInToSpace) {
assert(young_gen->to_space()->is_empty(),
"Attempt to scavenge with live objects in to_space");
young_gen->to_space()->clear();
young_gen->to_space()->clear(SpaceDecorator::Mangle);
} else if (ZapUnusedHeapArea) {
young_gen->to_space()->mangle_unused_area();
}
@ -437,8 +442,10 @@ bool PSScavenge::invoke_no_policy() {
if (!promotion_failure_occurred) {
// Swap the survivor spaces.
young_gen->eden_space()->clear();
young_gen->from_space()->clear();
young_gen->eden_space()->clear(SpaceDecorator::Mangle);
young_gen->from_space()->clear(SpaceDecorator::Mangle);
young_gen->swap_spaces();
size_t survived = young_gen->from_space()->used_in_bytes();
@ -600,6 +607,12 @@ bool PSScavenge::invoke_no_policy() {
Universe::print_heap_after_gc();
}
if (ZapUnusedHeapArea) {
young_gen->eden_space()->check_mangled_unused_area_complete();
young_gen->from_space()->check_mangled_unused_area_complete();
young_gen->to_space()->check_mangled_unused_area_complete();
}
scavenge_exit.update();
if (PrintGCTaskTimeStamps) {

View file

@ -36,7 +36,7 @@ PSYoungGen::PSYoungGen(size_t initial_size,
void PSYoungGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
assert(_init_gen_size != 0, "Should have a finite size");
_virtual_space = new PSVirtualSpace(rs, alignment);
if (!_virtual_space->expand_by(_init_gen_size)) {
if (!virtual_space()->expand_by(_init_gen_size)) {
vm_exit_during_initialization("Could not reserve enough space for "
"object heap");
}
@ -49,13 +49,20 @@ void PSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
void PSYoungGen::initialize_work() {
_reserved = MemRegion((HeapWord*)_virtual_space->low_boundary(),
(HeapWord*)_virtual_space->high_boundary());
_reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
(HeapWord*)virtual_space()->high_boundary());
MemRegion cmr((HeapWord*)_virtual_space->low(),
(HeapWord*)_virtual_space->high());
MemRegion cmr((HeapWord*)virtual_space()->low(),
(HeapWord*)virtual_space()->high());
Universe::heap()->barrier_set()->resize_covered_region(cmr);
if (ZapUnusedHeapArea) {
// Mangle newly committed space immediately because it
// can be done here more simply that after the new
// spaces have been computed.
SpaceMangler::mangle_region(cmr);
}
if (UseNUMA) {
_eden_space = new MutableNUMASpace();
} else {
@ -89,7 +96,7 @@ void PSYoungGen::initialize_work() {
// Compute maximum space sizes for performance counters
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
size_t alignment = heap->intra_heap_alignment();
size_t size = _virtual_space->reserved_size();
size_t size = virtual_space()->reserved_size();
size_t max_survivor_size;
size_t max_eden_size;
@ -142,7 +149,7 @@ void PSYoungGen::compute_initial_space_boundaries() {
// Compute sizes
size_t alignment = heap->intra_heap_alignment();
size_t size = _virtual_space->committed_size();
size_t size = virtual_space()->committed_size();
size_t survivor_size = size / InitialSurvivorRatio;
survivor_size = align_size_down(survivor_size, alignment);
@ -164,18 +171,18 @@ void PSYoungGen::compute_initial_space_boundaries() {
}
void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
assert(eden_size < _virtual_space->committed_size(), "just checking");
assert(eden_size < virtual_space()->committed_size(), "just checking");
assert(eden_size > 0 && survivor_size > 0, "just checking");
// Initial layout is Eden, to, from. After swapping survivor spaces,
// that leaves us with Eden, from, to, which is step one in our two
// step resize-with-live-data procedure.
char *eden_start = _virtual_space->low();
char *eden_start = virtual_space()->low();
char *to_start = eden_start + eden_size;
char *from_start = to_start + survivor_size;
char *from_end = from_start + survivor_size;
assert(from_end == _virtual_space->high(), "just checking");
assert(from_end == virtual_space()->high(), "just checking");
assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
assert(is_object_aligned((intptr_t)to_start), "checking alignment");
assert(is_object_aligned((intptr_t)from_start), "checking alignment");
@ -184,9 +191,9 @@ void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
MemRegion to_mr ((HeapWord*)to_start, (HeapWord*)from_start);
MemRegion from_mr((HeapWord*)from_start, (HeapWord*)from_end);
eden_space()->initialize(eden_mr, true);
to_space()->initialize(to_mr , true);
from_space()->initialize(from_mr, true);
eden_space()->initialize(eden_mr, true, ZapUnusedHeapArea);
to_space()->initialize(to_mr , true, ZapUnusedHeapArea);
from_space()->initialize(from_mr, true, ZapUnusedHeapArea);
}
#ifndef PRODUCT
@ -207,7 +214,7 @@ void PSYoungGen::space_invariants() {
char* to_start = (char*)to_space()->bottom();
char* to_end = (char*)to_space()->end();
guarantee(eden_start >= _virtual_space->low(), "eden bottom");
guarantee(eden_start >= virtual_space()->low(), "eden bottom");
guarantee(eden_start < eden_end, "eden space consistency");
guarantee(from_start < from_end, "from space consistency");
guarantee(to_start < to_end, "to space consistency");
@ -217,29 +224,29 @@ void PSYoungGen::space_invariants() {
// Eden, from, to
guarantee(eden_end <= from_start, "eden/from boundary");
guarantee(from_end <= to_start, "from/to boundary");
guarantee(to_end <= _virtual_space->high(), "to end");
guarantee(to_end <= virtual_space()->high(), "to end");
} else {
// Eden, to, from
guarantee(eden_end <= to_start, "eden/to boundary");
guarantee(to_end <= from_start, "to/from boundary");
guarantee(from_end <= _virtual_space->high(), "from end");
guarantee(from_end <= virtual_space()->high(), "from end");
}
// More checks that the virtual space is consistent with the spaces
assert(_virtual_space->committed_size() >=
assert(virtual_space()->committed_size() >=
(eden_space()->capacity_in_bytes() +
to_space()->capacity_in_bytes() +
from_space()->capacity_in_bytes()), "Committed size is inconsistent");
assert(_virtual_space->committed_size() <= _virtual_space->reserved_size(),
assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
"Space invariant");
char* eden_top = (char*)eden_space()->top();
char* from_top = (char*)from_space()->top();
char* to_top = (char*)to_space()->top();
assert(eden_top <= _virtual_space->high(), "eden top");
assert(from_top <= _virtual_space->high(), "from top");
assert(to_top <= _virtual_space->high(), "to top");
assert(eden_top <= virtual_space()->high(), "eden top");
assert(from_top <= virtual_space()->high(), "from top");
assert(to_top <= virtual_space()->high(), "to top");
_virtual_space->verify();
virtual_space()->verify();
}
#endif
@ -265,8 +272,8 @@ void PSYoungGen::resize(size_t eden_size, size_t survivor_size) {
bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
const size_t alignment = _virtual_space->alignment();
size_t orig_size = _virtual_space->committed_size();
const size_t alignment = virtual_space()->alignment();
size_t orig_size = virtual_space()->committed_size();
bool size_changed = false;
// There used to be this guarantee there.
@ -288,10 +295,18 @@ bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
// Grow the generation
size_t change = desired_size - orig_size;
assert(change % alignment == 0, "just checking");
if (!_virtual_space->expand_by(change)) {
HeapWord* prev_high = (HeapWord*) virtual_space()->high();
if (!virtual_space()->expand_by(change)) {
return false; // Error if we fail to resize!
}
if (ZapUnusedHeapArea) {
// Mangle newly committed space immediately because it
// can be done here more simply that after the new
// spaces have been computed.
HeapWord* new_high = (HeapWord*) virtual_space()->high();
MemRegion mangle_region(prev_high, new_high);
SpaceMangler::mangle_region(mangle_region);
}
size_changed = true;
} else if (desired_size < orig_size) {
size_t desired_change = orig_size - desired_size;
@ -321,19 +336,95 @@ bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
post_resize();
if (Verbose && PrintGC) {
size_t current_size = _virtual_space->committed_size();
size_t current_size = virtual_space()->committed_size();
gclog_or_tty->print_cr("PSYoung generation size changed: "
SIZE_FORMAT "K->" SIZE_FORMAT "K",
orig_size/K, current_size/K);
}
}
guarantee(eden_plus_survivors <= _virtual_space->committed_size() ||
_virtual_space->committed_size() == max_size(), "Sanity");
guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
virtual_space()->committed_size() == max_size(), "Sanity");
return true;
}
#ifndef PRODUCT
// In the numa case eden is not mangled so a survivor space
// moving into a region previously occupied by a survivor
// may find an unmangled region. Also in the PS case eden
// to-space and from-space may not touch (i.e., there may be
// gaps between them due to movement while resizing the
// spaces). Those gaps must be mangled.
void PSYoungGen::mangle_survivors(MutableSpace* s1,
MemRegion s1MR,
MutableSpace* s2,
MemRegion s2MR) {
// Check eden and gap between eden and from-space, in deciding
// what to mangle in from-space. Check the gap between from-space
// and to-space when deciding what to mangle.
//
// +--------+ +----+ +---+
// | eden | |s1 | |s2 |
// +--------+ +----+ +---+
// +-------+ +-----+
// |s1MR | |s2MR |
// +-------+ +-----+
// All of survivor-space is properly mangled so find the
// upper bound on the mangling for any portion above current s1.
HeapWord* delta_end = MIN2(s1->bottom(), s1MR.end());
MemRegion delta1_left;
if (s1MR.start() < delta_end) {
delta1_left = MemRegion(s1MR.start(), delta_end);
s1->mangle_region(delta1_left);
}
// Find any portion to the right of the current s1.
HeapWord* delta_start = MAX2(s1->end(), s1MR.start());
MemRegion delta1_right;
if (delta_start < s1MR.end()) {
delta1_right = MemRegion(delta_start, s1MR.end());
s1->mangle_region(delta1_right);
}
// Similarly for the second survivor space except that
// any of the new region that overlaps with the current
// region of the first survivor space has already been
// mangled.
delta_end = MIN2(s2->bottom(), s2MR.end());
delta_start = MAX2(s2MR.start(), s1->end());
MemRegion delta2_left;
if (s2MR.start() < delta_end) {
delta2_left = MemRegion(s2MR.start(), delta_end);
s2->mangle_region(delta2_left);
}
delta_start = MAX2(s2->end(), s2MR.start());
MemRegion delta2_right;
if (delta_start < s2MR.end()) {
s2->mangle_region(delta2_right);
}
if (TraceZapUnusedHeapArea) {
// s1
gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
"New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
s1->bottom(), s1->end(), s1MR.start(), s1MR.end());
gclog_or_tty->print_cr(" Mangle before: [" PTR_FORMAT ", "
PTR_FORMAT ") Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
delta1_left.start(), delta1_left.end(), delta1_right.start(),
delta1_right.end());
// s2
gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
"New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
s2->bottom(), s2->end(), s2MR.start(), s2MR.end());
gclog_or_tty->print_cr(" Mangle before: [" PTR_FORMAT ", "
PTR_FORMAT ") Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
delta2_left.start(), delta2_left.end(), delta2_right.start(),
delta2_right.end());
}
}
#endif // NOT PRODUCT
void PSYoungGen::resize_spaces(size_t requested_eden_size,
size_t requested_survivor_size) {
@ -396,9 +487,11 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
const bool maintain_minimum =
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
bool eden_from_to_order = from_start < to_start;
// Check whether from space is below to space
if (from_start < to_start) {
if (eden_from_to_order) {
// Eden, from, to
eden_from_to_order = true;
if (PrintAdaptiveSizePolicy && Verbose) {
gclog_or_tty->print_cr(" Eden, from, to:");
}
@ -435,7 +528,7 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
// extra calculations.
// First calculate an optimal to-space
to_end = (char*)_virtual_space->high();
to_end = (char*)virtual_space()->high();
to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
sizeof(char));
@ -491,7 +584,7 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
// to space as if we were able to resize from space, even though from
// space is not modified.
// Giving eden priority was tried and gave poorer performance.
to_end = (char*)pointer_delta(_virtual_space->high(),
to_end = (char*)pointer_delta(virtual_space()->high(),
(char*)requested_survivor_size,
sizeof(char));
to_end = MIN2(to_end, from_start);
@ -560,9 +653,45 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
size_t old_from = from_space()->capacity_in_bytes();
size_t old_to = to_space()->capacity_in_bytes();
eden_space()->initialize(edenMR, true);
to_space()->initialize(toMR , true);
from_space()->initialize(fromMR, false); // Note, not cleared!
if (ZapUnusedHeapArea) {
// NUMA is a special case because a numa space is not mangled
// in order to not prematurely bind its address to memory to
// the wrong memory (i.e., don't want the GC thread to first
// touch the memory). The survivor spaces are not numa
// spaces and are mangled.
if (UseNUMA) {
if (eden_from_to_order) {
mangle_survivors(from_space(), fromMR, to_space(), toMR);
} else {
mangle_survivors(to_space(), toMR, from_space(), fromMR);
}
}
// If not mangling the spaces, do some checking to verify that
// the spaces are already mangled.
// The spaces should be correctly mangled at this point so
// do some checking here. Note that they are not being mangled
// in the calls to initialize().
// Must check mangling before the spaces are reshaped. Otherwise,
// the bottom or end of one space may have moved into an area
// covered by another space and a failure of the check may
// not correctly indicate which space is not properly mangled.
HeapWord* limit = (HeapWord*) virtual_space()->high();
eden_space()->check_mangled_unused_area(limit);
from_space()->check_mangled_unused_area(limit);
to_space()->check_mangled_unused_area(limit);
}
// When an existing space is being initialized, it is not
// mangled because the space has been previously mangled.
eden_space()->initialize(edenMR,
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
to_space()->initialize(toMR,
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
from_space()->initialize(fromMR,
SpaceDecorator::DontClear,
SpaceDecorator::DontMangle);
assert(from_space()->top() == old_from_top, "from top changed!");
@ -671,7 +800,7 @@ void PSYoungGen::print_on(outputStream* st) const {
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
capacity_in_bytes()/K, used_in_bytes()/K);
}
_virtual_space->print_space_boundaries_on(st);
virtual_space()->print_space_boundaries_on(st);
st->print(" eden"); eden_space()->print_on(st);
st->print(" from"); from_space()->print_on(st);
st->print(" to "); to_space()->print_on(st);
@ -774,7 +903,9 @@ void PSYoungGen::reset_survivors_after_shrink() {
// Was there a shrink of the survivor space?
if (new_end < space_shrinking->end()) {
MemRegion mr(space_shrinking->bottom(), new_end);
space_shrinking->initialize(mr, false /* clear */);
space_shrinking->initialize(mr,
SpaceDecorator::DontClear,
SpaceDecorator::Mangle);
}
}
@ -809,3 +940,12 @@ void PSYoungGen::verify(bool allow_dirty) {
from_space()->verify(allow_dirty);
to_space()->verify(allow_dirty);
}
#ifndef PRODUCT
void PSYoungGen::record_spaces_top() {
assert(ZapUnusedHeapArea, "Not mangling unused space");
eden_space()->set_top_for_allocations();
from_space()->set_top_for_allocations();
to_space()->set_top_for_allocations();
}
#endif

View file

@ -179,4 +179,12 @@ class PSYoungGen : public CHeapObj {
// Space boundary invariant checker
void space_invariants() PRODUCT_RETURN;
// Helper for mangling survivor spaces.
void mangle_survivors(MutableSpace* s1,
MemRegion s1MR,
MutableSpace* s2,
MemRegion s2MR) PRODUCT_RETURN;
void record_spaces_top() PRODUCT_RETURN;
};