This commit is contained in:
Jon Masamitsu 2008-07-28 15:30:23 -07:00
commit b8633a3b01
46 changed files with 1401 additions and 256 deletions

View file

@ -2278,7 +2278,7 @@ void os::Linux::libnuma_init() {
dlsym(RTLD_DEFAULT, "sched_getcpu"))); dlsym(RTLD_DEFAULT, "sched_getcpu")));
if (sched_getcpu() != -1) { // Does it work? if (sched_getcpu() != -1) { // Does it work?
void *handle = dlopen("libnuma.so", RTLD_LAZY); void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
if (handle != NULL) { if (handle != NULL) {
set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t, set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,
dlsym(handle, "numa_node_to_cpus"))); dlsym(handle, "numa_node_to_cpus")));

View file

@ -2658,6 +2658,12 @@ size_t os::numa_get_leaf_groups(int *ids, size_t size) {
top += r; top += r;
cur++; cur++;
} }
if (bottom == 0) {
// Handle a situation, when the OS reports no memory available.
// Assume UMA architecture.
ids[0] = 0;
return 1;
}
return bottom; return bottom;
} }
@ -4581,7 +4587,7 @@ void os::Solaris::synchronization_init() {
} }
void os::Solaris::liblgrp_init() { void os::Solaris::liblgrp_init() {
void *handle = dlopen("liblgrp.so", RTLD_LAZY); void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
if (handle != NULL) { if (handle != NULL) {
os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home"))); os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init"))); os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));

View file

@ -71,8 +71,15 @@ TreeList* TreeList::as_TreeList(TreeChunk* tc) {
TreeList* TreeList::as_TreeList(HeapWord* addr, size_t size) { TreeList* TreeList::as_TreeList(HeapWord* addr, size_t size) {
TreeChunk* tc = (TreeChunk*) addr; TreeChunk* tc = (TreeChunk*) addr;
assert(size >= sizeof(TreeChunk), "Chunk is too small for a TreeChunk"); assert(size >= sizeof(TreeChunk), "Chunk is too small for a TreeChunk");
assert(tc->size() == 0 && tc->prev() == NULL && tc->next() == NULL, // The space in the heap will have been mangled initially but
"Space should be clear"); // is not remangled when a free chunk is returned to the free list
// (since it is used to maintain the chunk on the free list).
assert((ZapUnusedHeapArea &&
SpaceMangler::is_mangled((HeapWord*) tc->size_addr()) &&
SpaceMangler::is_mangled((HeapWord*) tc->prev_addr()) &&
SpaceMangler::is_mangled((HeapWord*) tc->next_addr())) ||
(tc->size() == 0 && tc->prev() == NULL && tc->next() == NULL),
"Space should be clear or mangled");
tc->setSize(size); tc->setSize(size);
tc->linkPrev(NULL); tc->linkPrev(NULL);
tc->linkNext(NULL); tc->linkNext(NULL);

View file

@ -54,7 +54,7 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
_collector(NULL) _collector(NULL)
{ {
_bt.set_space(this); _bt.set_space(this);
initialize(mr, true); initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
// We have all of "mr", all of which we place in the dictionary // We have all of "mr", all of which we place in the dictionary
// as one big chunk. We'll need to decide here which of several // as one big chunk. We'll need to decide here which of several
// possible alternative dictionary implementations to use. For // possible alternative dictionary implementations to use. For

View file

@ -22,7 +22,6 @@
* *
*/ */
// A FreeBlockDictionary is an abstract superclass that will allow // A FreeBlockDictionary is an abstract superclass that will allow
// a number of alternative implementations in the future. // a number of alternative implementations in the future.
class FreeBlockDictionary: public CHeapObj { class FreeBlockDictionary: public CHeapObj {

View file

@ -85,6 +85,8 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
} }
debug_only(void* prev_addr() const { return (void*)&_prev; }) debug_only(void* prev_addr() const { return (void*)&_prev; })
debug_only(void* next_addr() const { return (void*)&_next; })
debug_only(void* size_addr() const { return (void*)&_size; })
size_t size() const volatile { size_t size() const volatile {
LP64_ONLY(if (UseCompressedOops) return mark()->get_size(); else ) LP64_ONLY(if (UseCompressedOops) return mark()->get_size(); else )

View file

@ -28,6 +28,7 @@ binaryTreeDictionary.cpp allocationStats.hpp
binaryTreeDictionary.cpp binaryTreeDictionary.hpp binaryTreeDictionary.cpp binaryTreeDictionary.hpp
binaryTreeDictionary.cpp globals.hpp binaryTreeDictionary.cpp globals.hpp
binaryTreeDictionary.cpp ostream.hpp binaryTreeDictionary.cpp ostream.hpp
binaryTreeDictionary.cpp spaceDecorator.hpp
binaryTreeDictionary.hpp freeBlockDictionary.hpp binaryTreeDictionary.hpp freeBlockDictionary.hpp
binaryTreeDictionary.hpp freeList.hpp binaryTreeDictionary.hpp freeList.hpp
@ -114,6 +115,7 @@ compactibleFreeListSpace.cpp java.hpp
compactibleFreeListSpace.cpp liveRange.hpp compactibleFreeListSpace.cpp liveRange.hpp
compactibleFreeListSpace.cpp oop.inline.hpp compactibleFreeListSpace.cpp oop.inline.hpp
compactibleFreeListSpace.cpp resourceArea.hpp compactibleFreeListSpace.cpp resourceArea.hpp
compactibleFreeListSpace.cpp spaceDecorator.hpp
compactibleFreeListSpace.cpp universe.inline.hpp compactibleFreeListSpace.cpp universe.inline.hpp
compactibleFreeListSpace.cpp vmThread.hpp compactibleFreeListSpace.cpp vmThread.hpp

View file

@ -22,16 +22,17 @@
// //
// //
asParNewGeneration.hpp adaptiveSizePolicy.hpp asParNewGeneration.hpp adaptiveSizePolicy.hpp
asParNewGeneration.hpp parNewGeneration.hpp asParNewGeneration.hpp parNewGeneration.hpp
asParNewGeneration.cpp asParNewGeneration.hpp asParNewGeneration.cpp asParNewGeneration.hpp
asParNewGeneration.cpp cmsAdaptiveSizePolicy.hpp asParNewGeneration.cpp cmsAdaptiveSizePolicy.hpp
asParNewGeneration.cpp cmsGCAdaptivePolicyCounters.hpp asParNewGeneration.cpp cmsGCAdaptivePolicyCounters.hpp
asParNewGeneration.cpp defNewGeneration.inline.hpp asParNewGeneration.cpp defNewGeneration.inline.hpp
asParNewGeneration.cpp oop.pcgc.inline.hpp asParNewGeneration.cpp oop.pcgc.inline.hpp
asParNewGeneration.cpp parNewGeneration.hpp asParNewGeneration.cpp parNewGeneration.hpp
asParNewGeneration.cpp referencePolicy.hpp asParNewGeneration.cpp referencePolicy.hpp
asParNewGeneration.cpp spaceDecorator.hpp
parCardTableModRefBS.cpp allocation.inline.hpp parCardTableModRefBS.cpp allocation.inline.hpp
parCardTableModRefBS.cpp cardTableModRefBS.hpp parCardTableModRefBS.cpp cardTableModRefBS.hpp
@ -75,6 +76,7 @@ parNewGeneration.cpp referencePolicy.hpp
parNewGeneration.cpp resourceArea.hpp parNewGeneration.cpp resourceArea.hpp
parNewGeneration.cpp sharedHeap.hpp parNewGeneration.cpp sharedHeap.hpp
parNewGeneration.cpp space.hpp parNewGeneration.cpp space.hpp
parNewGeneration.cpp spaceDecorator.hpp
parNewGeneration.cpp workgroup.hpp parNewGeneration.cpp workgroup.hpp
parNewGeneration.hpp defNewGeneration.hpp parNewGeneration.hpp defNewGeneration.hpp

View file

@ -53,14 +53,15 @@ asPSOldGen.cpp java.hpp
asPSOldGen.cpp oop.inline.hpp asPSOldGen.cpp oop.inline.hpp
asPSOldGen.cpp parallelScavengeHeap.hpp asPSOldGen.cpp parallelScavengeHeap.hpp
asPSOldGen.cpp psMarkSweepDecorator.hpp asPSOldGen.cpp psMarkSweepDecorator.hpp
asPSOldGen.cpp asPSOldGen.hpp asPSOldGen.cpp asPSOldGen.hpp
asPSYoungGen.hpp generationCounters.hpp asPSYoungGen.hpp generationCounters.hpp
asPSYoungGen.hpp mutableSpace.hpp asPSYoungGen.hpp mutableSpace.hpp
asPSYoungGen.hpp objectStartArray.hpp asPSYoungGen.hpp objectStartArray.hpp
asPSYoungGen.hpp spaceCounters.hpp asPSYoungGen.hpp spaceCounters.hpp
asPSYoungGen.hpp psVirtualspace.hpp asPSYoungGen.hpp psVirtualspace.hpp
asPSYoungGen.hpp psYoungGen.hpp asPSYoungGen.hpp psYoungGen.hpp
asPSYoungGen.hpp spaceDecorator.hpp
asPSYoungGen.cpp gcUtil.hpp asPSYoungGen.cpp gcUtil.hpp
asPSYoungGen.cpp java.hpp asPSYoungGen.cpp java.hpp
@ -68,8 +69,9 @@ asPSYoungGen.cpp oop.inline.hpp
asPSYoungGen.cpp parallelScavengeHeap.hpp asPSYoungGen.cpp parallelScavengeHeap.hpp
asPSYoungGen.cpp psMarkSweepDecorator.hpp asPSYoungGen.cpp psMarkSweepDecorator.hpp
asPSYoungGen.cpp psScavenge.hpp asPSYoungGen.cpp psScavenge.hpp
asPSYoungGen.cpp asPSYoungGen.hpp asPSYoungGen.cpp asPSYoungGen.hpp
asPSYoungGen.cpp psYoungGen.hpp asPSYoungGen.cpp psYoungGen.hpp
asPSYoungGen.cpp spaceDecorator.hpp
cardTableExtension.cpp cardTableExtension.hpp cardTableExtension.cpp cardTableExtension.hpp
cardTableExtension.cpp gcTaskManager.hpp cardTableExtension.cpp gcTaskManager.hpp
@ -225,6 +227,7 @@ psMarkSweep.cpp psYoungGen.hpp
psMarkSweep.cpp referencePolicy.hpp psMarkSweep.cpp referencePolicy.hpp
psMarkSweep.cpp referenceProcessor.hpp psMarkSweep.cpp referenceProcessor.hpp
psMarkSweep.cpp safepoint.hpp psMarkSweep.cpp safepoint.hpp
psMarkSweep.cpp spaceDecorator.hpp
psMarkSweep.cpp symbolTable.hpp psMarkSweep.cpp symbolTable.hpp
psMarkSweep.cpp systemDictionary.hpp psMarkSweep.cpp systemDictionary.hpp
psMarkSweep.cpp vmThread.hpp psMarkSweep.cpp vmThread.hpp
@ -239,6 +242,7 @@ psMarkSweepDecorator.cpp oop.inline.hpp
psMarkSweepDecorator.cpp parallelScavengeHeap.hpp psMarkSweepDecorator.cpp parallelScavengeHeap.hpp
psMarkSweepDecorator.cpp psMarkSweep.hpp psMarkSweepDecorator.cpp psMarkSweep.hpp
psMarkSweepDecorator.cpp psMarkSweepDecorator.hpp psMarkSweepDecorator.cpp psMarkSweepDecorator.hpp
psMarkSweepDecorator.cpp spaceDecorator.hpp
psMarkSweepDecorator.cpp systemDictionary.hpp psMarkSweepDecorator.cpp systemDictionary.hpp
psMarkSweepDecorator.hpp mutableSpace.hpp psMarkSweepDecorator.hpp mutableSpace.hpp
@ -290,6 +294,7 @@ psOldGen.cpp oop.inline.hpp
psOldGen.cpp parallelScavengeHeap.hpp psOldGen.cpp parallelScavengeHeap.hpp
psOldGen.cpp psMarkSweepDecorator.hpp psOldGen.cpp psMarkSweepDecorator.hpp
psOldGen.cpp psOldGen.hpp psOldGen.cpp psOldGen.hpp
psOldGen.cpp spaceDecorator.hpp
psOldGen.hpp psGenerationCounters.hpp psOldGen.hpp psGenerationCounters.hpp
psOldGen.hpp mutableSpace.hpp psOldGen.hpp mutableSpace.hpp
@ -351,6 +356,7 @@ psScavenge.cpp psTasks.hpp
psScavenge.cpp referencePolicy.hpp psScavenge.cpp referencePolicy.hpp
psScavenge.cpp referenceProcessor.hpp psScavenge.cpp referenceProcessor.hpp
psScavenge.cpp resourceArea.hpp psScavenge.cpp resourceArea.hpp
psScavenge.cpp spaceDecorator.hpp
psScavenge.cpp threadCritical.hpp psScavenge.cpp threadCritical.hpp
psScavenge.cpp vmThread.hpp psScavenge.cpp vmThread.hpp
psScavenge.cpp vm_operations.hpp psScavenge.cpp vm_operations.hpp
@ -409,8 +415,8 @@ psVirtualspace.hpp virtualspace.hpp
psVirtualspace.cpp os.hpp psVirtualspace.cpp os.hpp
psVirtualspace.cpp os_<os_family>.inline.hpp psVirtualspace.cpp os_<os_family>.inline.hpp
psVirtualspace.cpp psVirtualspace.hpp psVirtualspace.cpp psVirtualspace.hpp
psVirtualspace.cpp virtualspace.hpp psVirtualspace.cpp virtualspace.hpp
psYoungGen.cpp gcUtil.hpp psYoungGen.cpp gcUtil.hpp
psYoungGen.cpp java.hpp psYoungGen.cpp java.hpp
@ -419,7 +425,8 @@ psYoungGen.cpp parallelScavengeHeap.hpp
psYoungGen.cpp psMarkSweepDecorator.hpp psYoungGen.cpp psMarkSweepDecorator.hpp
psYoungGen.cpp psScavenge.hpp psYoungGen.cpp psScavenge.hpp
psYoungGen.cpp psYoungGen.hpp psYoungGen.cpp psYoungGen.hpp
psYoungGen.cpp mutableNUMASpace.hpp psYoungGen.cpp mutableNUMASpace.hpp
psYoungGen.cpp spaceDecorator.hpp
psYoungGen.hpp psGenerationCounters.hpp psYoungGen.hpp psGenerationCounters.hpp
psYoungGen.hpp mutableSpace.hpp psYoungGen.hpp mutableSpace.hpp

View file

@ -56,6 +56,7 @@ markSweep.inline.hpp psParallelCompact.hpp
mutableNUMASpace.cpp mutableNUMASpace.hpp mutableNUMASpace.cpp mutableNUMASpace.hpp
mutableNUMASpace.cpp oop.inline.hpp mutableNUMASpace.cpp oop.inline.hpp
mutableNUMASpace.cpp sharedHeap.hpp mutableNUMASpace.cpp sharedHeap.hpp
mutableNUMASpace.cpp spaceDecorator.hpp
mutableNUMASpace.cpp thread_<os_family>.inline.hpp mutableNUMASpace.cpp thread_<os_family>.inline.hpp
mutableNUMASpace.hpp mutableSpace.hpp mutableNUMASpace.hpp mutableSpace.hpp
@ -64,6 +65,7 @@ mutableNUMASpace.hpp gcUtil.hpp
mutableSpace.cpp mutableSpace.hpp mutableSpace.cpp mutableSpace.hpp
mutableSpace.cpp oop.inline.hpp mutableSpace.cpp oop.inline.hpp
mutableSpace.cpp safepoint.hpp mutableSpace.cpp safepoint.hpp
mutableSpace.cpp spaceDecorator.hpp
mutableSpace.cpp thread.hpp mutableSpace.cpp thread.hpp
spaceCounters.cpp resourceArea.hpp spaceCounters.cpp resourceArea.hpp

View file

@ -162,10 +162,9 @@ bool ASParNewGeneration::resize_generation(size_t eden_size,
// Grow the generation // Grow the generation
size_t change = desired_size - orig_size; size_t change = desired_size - orig_size;
assert(change % alignment == 0, "just checking"); assert(change % alignment == 0, "just checking");
if (!virtual_space()->expand_by(change)) { if (expand(change)) {
return false; // Error if we fail to resize! return false; // Error if we fail to resize!
} }
size_changed = true; size_changed = true;
} else if (desired_size < orig_size) { } else if (desired_size < orig_size) {
size_t desired_change = orig_size - desired_size; size_t desired_change = orig_size - desired_size;
@ -222,7 +221,9 @@ void ASParNewGeneration::reset_survivors_after_shrink() {
// Was there a shrink of the survivor space? // Was there a shrink of the survivor space?
if (new_end < to()->end()) { if (new_end < to()->end()) {
MemRegion mr(to()->bottom(), new_end); MemRegion mr(to()->bottom(), new_end);
to()->initialize(mr, false /* clear */); to()->initialize(mr,
SpaceDecorator::DontClear,
SpaceDecorator::DontMangle);
} }
} }
} }
@ -322,9 +323,7 @@ void ASParNewGeneration::resize_spaces(size_t requested_eden_size,
pointer_delta(from_start, eden_start, sizeof(char))); pointer_delta(from_start, eden_start, sizeof(char)));
} }
// tty->print_cr("eden_size before: " SIZE_FORMAT, eden_size);
eden_size = align_size_down(eden_size, alignment); eden_size = align_size_down(eden_size, alignment);
// tty->print_cr("eden_size after: " SIZE_FORMAT, eden_size);
eden_end = eden_start + eden_size; eden_end = eden_start + eden_size;
assert(eden_end >= eden_start, "addition overflowed") assert(eden_end >= eden_start, "addition overflowed")
@ -501,11 +500,31 @@ void ASParNewGeneration::resize_spaces(size_t requested_eden_size,
size_t old_from = from()->capacity(); size_t old_from = from()->capacity();
size_t old_to = to()->capacity(); size_t old_to = to()->capacity();
// If not clearing the spaces, do some checking to verify that
// the spaces are already mangled.
// Must check mangling before the spaces are reshaped. Otherwise,
// the bottom or end of one space may have moved into another
// a failure of the check may not correctly indicate which space
// is not properly mangled.
if (ZapUnusedHeapArea) {
HeapWord* limit = (HeapWord*) virtual_space()->high();
eden()->check_mangled_unused_area(limit);
from()->check_mangled_unused_area(limit);
to()->check_mangled_unused_area(limit);
}
// The call to initialize NULL's the next compaction space // The call to initialize NULL's the next compaction space
eden()->initialize(edenMR, true); eden()->initialize(edenMR,
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
eden()->set_next_compaction_space(from()); eden()->set_next_compaction_space(from());
to()->initialize(toMR , true); to()->initialize(toMR ,
from()->initialize(fromMR, false); // Note, not cleared! SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
from()->initialize(fromMR,
SpaceDecorator::DontClear,
SpaceDecorator::DontMangle);
assert(from()->top() == old_from_top, "from top changed!"); assert(from()->top() == old_from_top, "from top changed!");

View file

@ -727,7 +727,7 @@ void ParNewGeneration::collect(bool full,
SpecializationStats::clear(); SpecializationStats::clear();
age_table()->clear(); age_table()->clear();
to()->clear(); to()->clear(SpaceDecorator::Mangle);
gch->save_marks(); gch->save_marks();
assert(workers != NULL, "Need parallel worker threads."); assert(workers != NULL, "Need parallel worker threads.");
@ -793,8 +793,18 @@ void ParNewGeneration::collect(bool full,
} }
if (!promotion_failed()) { if (!promotion_failed()) {
// Swap the survivor spaces. // Swap the survivor spaces.
eden()->clear(); eden()->clear(SpaceDecorator::Mangle);
from()->clear(); from()->clear(SpaceDecorator::Mangle);
if (ZapUnusedHeapArea) {
// This is now done here because of the piece-meal mangling which
// can check for valid mangling at intermediate points in the
// collection(s). When a minor collection fails to collect
// sufficient space resizing of the young generation can occur
// an redistribute the spaces in the young generation. Mangle
// here so that unzapped regions don't get distributed to
// other spaces.
to()->mangle_unused_area();
}
swap_spaces(); swap_spaces();
assert(to()->is_empty(), "to space should be empty now"); assert(to()->is_empty(), "to space should be empty now");

View file

@ -170,9 +170,20 @@ bool ASPSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
if (desired_size > orig_size) { if (desired_size > orig_size) {
// Grow the generation // Grow the generation
size_t change = desired_size - orig_size; size_t change = desired_size - orig_size;
HeapWord* prev_low = (HeapWord*) virtual_space()->low();
if (!virtual_space()->expand_by(change)) { if (!virtual_space()->expand_by(change)) {
return false; return false;
} }
if (ZapUnusedHeapArea) {
// Mangle newly committed space immediately because it
// can be done here more simply that after the new
// spaces have been computed.
HeapWord* new_low = (HeapWord*) virtual_space()->low();
assert(new_low < prev_low, "Did not grow");
MemRegion mangle_region(new_low, prev_low);
SpaceMangler::mangle_region(mangle_region);
}
size_changed = true; size_changed = true;
} else if (desired_size < orig_size) { } else if (desired_size < orig_size) {
size_t desired_change = orig_size - desired_size; size_t desired_change = orig_size - desired_size;
@ -215,8 +226,10 @@ bool ASPSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
// current implementation does not allow holes between the spaces // current implementation does not allow holes between the spaces
// _young_generation_boundary has to be reset because it changes. // _young_generation_boundary has to be reset because it changes.
// so additional verification // so additional verification
void ASPSYoungGen::resize_spaces(size_t requested_eden_size, void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
size_t requested_survivor_size) { size_t requested_survivor_size) {
assert(UseAdaptiveSizePolicy, "sanity check");
assert(requested_eden_size > 0 && requested_survivor_size > 0, assert(requested_eden_size > 0 && requested_survivor_size > 0,
"just checking"); "just checking");
@ -276,22 +289,42 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
const size_t alignment = heap->intra_heap_alignment(); const size_t alignment = heap->intra_heap_alignment();
const bool maintain_minimum =
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
bool eden_from_to_order = from_start < to_start;
// Check whether from space is below to space // Check whether from space is below to space
if (from_start < to_start) { if (eden_from_to_order) {
// Eden, from, to // Eden, from, to
if (PrintAdaptiveSizePolicy && Verbose) { if (PrintAdaptiveSizePolicy && Verbose) {
gclog_or_tty->print_cr(" Eden, from, to:"); gclog_or_tty->print_cr(" Eden, from, to:");
} }
// Set eden // Set eden
// Compute how big eden can be, then adjust end. // "requested_eden_size" is a goal for the size of eden
// See comment in PSYoungGen::resize_spaces() on // and may not be attainable. "eden_size" below is
// calculating eden_end. // calculated based on the location of from-space and
const size_t eden_size = MIN2(requested_eden_size, // the goal for the size of eden. from-space is
pointer_delta(from_start, // fixed in place because it contains live data.
eden_start, // The calculation is done this way to avoid 32bit
sizeof(char))); // overflow (i.e., eden_start + requested_eden_size
// may too large for representation in 32bits).
size_t eden_size;
if (maintain_minimum) {
// Only make eden larger than the requested size if
// the minimum size of the generation has to be maintained.
// This could be done in general but policy at a higher
// level is determining a requested size for eden and that
// should be honored unless there is a fundamental reason.
eden_size = pointer_delta(from_start,
eden_start,
sizeof(char));
} else {
eden_size = MIN2(requested_eden_size,
pointer_delta(from_start, eden_start, sizeof(char)));
}
eden_end = eden_start + eden_size; eden_end = eden_start + eden_size;
assert(eden_end >= eden_start, "addition overflowed") assert(eden_end >= eden_start, "addition overflowed")
@ -371,12 +404,14 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
to_start = MAX2(to_start, eden_start + alignment); to_start = MAX2(to_start, eden_start + alignment);
// Compute how big eden can be, then adjust end. // Compute how big eden can be, then adjust end.
// See comment in PSYoungGen::resize_spaces() on // See comments above on calculating eden_end.
// calculating eden_end. size_t eden_size;
const size_t eden_size = MIN2(requested_eden_size, if (maintain_minimum) {
pointer_delta(to_start, eden_size = pointer_delta(to_start, eden_start, sizeof(char));
eden_start, } else {
sizeof(char))); eden_size = MIN2(requested_eden_size,
pointer_delta(to_start, eden_start, sizeof(char)));
}
eden_end = eden_start + eden_size; eden_end = eden_start + eden_size;
assert(eden_end >= eden_start, "addition overflowed") assert(eden_end >= eden_start, "addition overflowed")
@ -423,9 +458,47 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
size_t old_from = from_space()->capacity_in_bytes(); size_t old_from = from_space()->capacity_in_bytes();
size_t old_to = to_space()->capacity_in_bytes(); size_t old_to = to_space()->capacity_in_bytes();
eden_space()->initialize(edenMR, true); if (ZapUnusedHeapArea) {
to_space()->initialize(toMR , true); // NUMA is a special case because a numa space is not mangled
from_space()->initialize(fromMR, false); // Note, not cleared! // in order to not prematurely bind its address to memory to
// the wrong memory (i.e., don't want the GC thread to first
// touch the memory). The survivor spaces are not numa
// spaces and are mangled.
if (UseNUMA) {
if (eden_from_to_order) {
mangle_survivors(from_space(), fromMR, to_space(), toMR);
} else {
mangle_survivors(to_space(), toMR, from_space(), fromMR);
}
}
// If not mangling the spaces, do some checking to verify that
// the spaces are already mangled.
// The spaces should be correctly mangled at this point so
// do some checking here. Note that they are not being mangled
// in the calls to initialize().
// Must check mangling before the spaces are reshaped. Otherwise,
// the bottom or end of one space may have moved into an area
// covered by another space and a failure of the check may
// not correctly indicate which space is not properly mangled.
HeapWord* limit = (HeapWord*) virtual_space()->high();
eden_space()->check_mangled_unused_area(limit);
from_space()->check_mangled_unused_area(limit);
to_space()->check_mangled_unused_area(limit);
}
// When an existing space is being initialized, it is not
// mangled because the space has been previously mangled.
eden_space()->initialize(edenMR,
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
to_space()->initialize(toMR,
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
from_space()->initialize(fromMR,
SpaceDecorator::DontClear,
SpaceDecorator::DontMangle);
PSScavenge::set_young_generation_boundary(eden_space()->bottom()); PSScavenge::set_young_generation_boundary(eden_space()->bottom());
assert(from_space()->top() == old_from_top, "from top changed!"); assert(from_space()->top() == old_from_top, "from top changed!");
@ -446,7 +519,6 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
} }
space_invariants(); space_invariants();
} }
void ASPSYoungGen::reset_after_change() { void ASPSYoungGen::reset_after_change() {
assert_locked_or_safepoint(Heap_lock); assert_locked_or_safepoint(Heap_lock);
@ -458,7 +530,9 @@ void ASPSYoungGen::reset_after_change() {
HeapWord* eden_bottom = eden_space()->bottom(); HeapWord* eden_bottom = eden_space()->bottom();
if (new_eden_bottom != eden_bottom) { if (new_eden_bottom != eden_bottom) {
MemRegion eden_mr(new_eden_bottom, eden_space()->end()); MemRegion eden_mr(new_eden_bottom, eden_space()->end());
eden_space()->initialize(eden_mr, true); eden_space()->initialize(eden_mr,
SpaceDecorator::Clear,
SpaceDecorator::Mangle);
PSScavenge::set_young_generation_boundary(eden_space()->bottom()); PSScavenge::set_young_generation_boundary(eden_space()->bottom());
} }
MemRegion cmr((HeapWord*)virtual_space()->low(), MemRegion cmr((HeapWord*)virtual_space()->low(),

View file

@ -666,9 +666,9 @@ void CardTableExtension::resize_commit_uncommit(int changed_region,
HeapWord* new_end_for_commit = HeapWord* new_end_for_commit =
MIN2(cur_committed.end(), _guard_region.start()); MIN2(cur_committed.end(), _guard_region.start());
MemRegion new_committed = if(new_start_aligned < new_end_for_commit) {
MemRegion(new_start_aligned, new_end_for_commit); MemRegion new_committed =
if(!new_committed.is_empty()) { MemRegion(new_start_aligned, new_end_for_commit);
if (!os::commit_memory((char*)new_committed.start(), if (!os::commit_memory((char*)new_committed.start(),
new_committed.byte_size())) { new_committed.byte_size())) {
vm_exit_out_of_memory(new_committed.byte_size(), vm_exit_out_of_memory(new_committed.byte_size(),

View file

@ -938,3 +938,23 @@ void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
// Delegate the resize to the generation. // Delegate the resize to the generation.
_old_gen->resize(desired_free_space); _old_gen->resize(desired_free_space);
} }
#ifndef PRODUCT
void ParallelScavengeHeap::record_gen_tops_before_GC() {
if (ZapUnusedHeapArea) {
young_gen()->record_spaces_top();
old_gen()->record_spaces_top();
perm_gen()->record_spaces_top();
}
}
void ParallelScavengeHeap::gen_mangle_unused_area() {
if (ZapUnusedHeapArea) {
young_gen()->eden_space()->mangle_unused_area();
young_gen()->to_space()->mangle_unused_area();
young_gen()->from_space()->mangle_unused_area();
old_gen()->object_space()->mangle_unused_area();
perm_gen()->object_space()->mangle_unused_area();
}
}
#endif

View file

@ -213,6 +213,12 @@ class ParallelScavengeHeap : public CollectedHeap {
// Resize the old generation. The reserved space for the // Resize the old generation. The reserved space for the
// generation may be expanded in preparation for the resize. // generation may be expanded in preparation for the resize.
void resize_old_gen(size_t desired_free_space); void resize_old_gen(size_t desired_free_space);
// Save the tops of the spaces in all generations
void record_gen_tops_before_GC() PRODUCT_RETURN;
// Mangle the unused parts of all spaces in the heap
void gen_mangle_unused_area() PRODUCT_RETURN;
}; };
inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val) inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)

View file

@ -98,6 +98,9 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
// Increment the invocation count // Increment the invocation count
heap->increment_total_collections(true /* full */); heap->increment_total_collections(true /* full */);
// Save information needed to minimize mangling
heap->record_gen_tops_before_GC();
// We need to track unique mark sweep invocations as well. // We need to track unique mark sweep invocations as well.
_total_invocations++; _total_invocations++;
@ -188,6 +191,12 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
deallocate_stacks(); deallocate_stacks();
if (ZapUnusedHeapArea) {
// Do a complete mangle (top to end) because the usage for
// scratch does not maintain a top pointer.
young_gen->to_space()->mangle_unused_area_complete();
}
eden_empty = young_gen->eden_space()->is_empty(); eden_empty = young_gen->eden_space()->is_empty();
if (!eden_empty) { if (!eden_empty) {
eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen); eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
@ -198,7 +207,7 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
Universe::update_heap_info_at_gc(); Universe::update_heap_info_at_gc();
survivors_empty = young_gen->from_space()->is_empty() && survivors_empty = young_gen->from_space()->is_empty() &&
young_gen->to_space()->is_empty(); young_gen->to_space()->is_empty();
young_gen_empty = eden_empty && survivors_empty; young_gen_empty = eden_empty && survivors_empty;
BarrierSet* bs = heap->barrier_set(); BarrierSet* bs = heap->barrier_set();
@ -344,6 +353,11 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
perm_gen->verify_object_start_array(); perm_gen->verify_object_start_array();
} }
if (ZapUnusedHeapArea) {
old_gen->object_space()->check_mangled_unused_area_complete();
perm_gen->object_space()->check_mangled_unused_area_complete();
}
NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
if (PrintHeapAtGC) { if (PrintHeapAtGC) {

View file

@ -438,5 +438,7 @@ void PSMarkSweepDecorator::compact(bool mangle_free_space ) {
"should point inside space"); "should point inside space");
space()->set_top(compaction_top()); space()->set_top(compaction_top());
if (mangle_free_space) space()->mangle_unused_area(); if (mangle_free_space) {
space()->mangle_unused_area();
}
} }

View file

@ -87,6 +87,15 @@ void PSOldGen::initialize_work(const char* perf_data_name, int level) {
MemRegion cmr((HeapWord*)virtual_space()->low(), MemRegion cmr((HeapWord*)virtual_space()->low(),
(HeapWord*)virtual_space()->high()); (HeapWord*)virtual_space()->high());
if (ZapUnusedHeapArea) {
// Mangle newly committed space immediately rather than
// waiting for the initialization of the space even though
// mangling is related to spaces. Doing it here eliminates
// the need to carry along information that a complete mangling
// (bottom to end) needs to be done.
SpaceMangler::mangle_region(cmr);
}
Universe::heap()->barrier_set()->resize_covered_region(cmr); Universe::heap()->barrier_set()->resize_covered_region(cmr);
CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set(); CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set();
@ -112,7 +121,9 @@ void PSOldGen::initialize_work(const char* perf_data_name, int level) {
if (_object_space == NULL) if (_object_space == NULL)
vm_exit_during_initialization("Could not allocate an old gen space"); vm_exit_during_initialization("Could not allocate an old gen space");
object_space()->initialize(cmr, true); object_space()->initialize(cmr,
SpaceDecorator::Clear,
SpaceDecorator::Mangle);
_object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio); _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
@ -232,6 +243,19 @@ bool PSOldGen::expand_by(size_t bytes) {
assert_locked_or_safepoint(Heap_lock); assert_locked_or_safepoint(Heap_lock);
bool result = virtual_space()->expand_by(bytes); bool result = virtual_space()->expand_by(bytes);
if (result) { if (result) {
if (ZapUnusedHeapArea) {
// We need to mangle the newly expanded area. The memregion spans
// end -> new_end, we assume that top -> end is already mangled.
// Do the mangling before post_resize() is called because
// the space is available for allocation after post_resize();
HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
assert(object_space()->end() < virtual_space_high,
"Should be true before post_resize()");
MemRegion mangle_region(object_space()->end(), virtual_space_high);
// Note that the object space has not yet been updated to
// coincede with the new underlying virtual space.
SpaceMangler::mangle_region(mangle_region);
}
post_resize(); post_resize();
if (UsePerfData) { if (UsePerfData) {
_space_counters->update_capacity(); _space_counters->update_capacity();
@ -348,16 +372,7 @@ void PSOldGen::post_resize() {
start_array()->set_covered_region(new_memregion); start_array()->set_covered_region(new_memregion);
Universe::heap()->barrier_set()->resize_covered_region(new_memregion); Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
// Did we expand?
HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high(); HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
if (object_space()->end() < virtual_space_high) {
// We need to mangle the newly expanded area. The memregion spans
// end -> new_end, we assume that top -> end is already mangled.
// This cannot be safely tested for, as allocation may be taking
// place.
MemRegion mangle_region(object_space()->end(), virtual_space_high);
object_space()->mangle_region(mangle_region);
}
// ALWAYS do this last!! // ALWAYS do this last!!
object_space()->set_end(virtual_space_high); object_space()->set_end(virtual_space_high);
@ -462,3 +477,10 @@ void PSOldGen::verify_object_start_array() {
VerifyObjectStartArrayClosure check( this, &_start_array ); VerifyObjectStartArrayClosure check( this, &_start_array );
object_iterate(&check); object_iterate(&check);
} }
#ifndef PRODUCT
void PSOldGen::record_spaces_top() {
assert(ZapUnusedHeapArea, "Not mangling unused space");
object_space()->set_top_for_allocations();
}
#endif

View file

@ -185,4 +185,8 @@ class PSOldGen : public CHeapObj {
// Printing support // Printing support
virtual const char* name() const { return _name; } virtual const char* name() const { return _name; }
// Debugging support
// Save the tops of all spaces for later use during mangling.
void record_spaces_top() PRODUCT_RETURN;
}; };

View file

@ -200,8 +200,8 @@ void PSParallelCompact::print_chunk_ranges()
for (unsigned int id = 0; id < last_space_id; ++id) { for (unsigned int id = 0; id < last_space_id; ++id) {
const MutableSpace* space = _space_info[id].space(); const MutableSpace* space = _space_info[id].space();
tty->print_cr("%u %s " tty->print_cr("%u %s "
SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10") " ", SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
id, space_names[id], id, space_names[id],
summary_data().addr_to_chunk_idx(space->bottom()), summary_data().addr_to_chunk_idx(space->bottom()),
summary_data().addr_to_chunk_idx(space->top()), summary_data().addr_to_chunk_idx(space->top()),
@ -213,8 +213,8 @@ void PSParallelCompact::print_chunk_ranges()
void void
print_generic_summary_chunk(size_t i, const ParallelCompactData::ChunkData* c) print_generic_summary_chunk(size_t i, const ParallelCompactData::ChunkData* c)
{ {
#define CHUNK_IDX_FORMAT SIZE_FORMAT_W("7") #define CHUNK_IDX_FORMAT SIZE_FORMAT_W(7)
#define CHUNK_DATA_FORMAT SIZE_FORMAT_W("5") #define CHUNK_DATA_FORMAT SIZE_FORMAT_W(5)
ParallelCompactData& sd = PSParallelCompact::summary_data(); ParallelCompactData& sd = PSParallelCompact::summary_data();
size_t dci = c->destination() ? sd.addr_to_chunk_idx(c->destination()) : 0; size_t dci = c->destination() ? sd.addr_to_chunk_idx(c->destination()) : 0;
@ -269,9 +269,9 @@ print_initial_summary_chunk(size_t i,
const ParallelCompactData::ChunkData* c, const ParallelCompactData::ChunkData* c,
bool newline = true) bool newline = true)
{ {
tty->print(SIZE_FORMAT_W("5") " " PTR_FORMAT " " tty->print(SIZE_FORMAT_W(5) " " PTR_FORMAT " "
SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " "
SIZE_FORMAT_W("5") " " SIZE_FORMAT_W("5") " %d", SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
i, c->destination(), i, c->destination(),
c->partial_obj_size(), c->live_obj_size(), c->partial_obj_size(), c->live_obj_size(),
c->data_size(), c->source_chunk(), c->destination_count()); c->data_size(), c->source_chunk(), c->destination_count());
@ -326,7 +326,7 @@ print_initial_summary_data(ParallelCompactData& summary_data,
} }
print_initial_summary_chunk(i, c, false); print_initial_summary_chunk(i, c, false);
tty->print_cr(" %12.10f " SIZE_FORMAT_W("10") " " SIZE_FORMAT_W("10"), tty->print_cr(" %12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
reclaimed_ratio, dead_to_right, live_to_right); reclaimed_ratio, dead_to_right, live_to_right);
live_to_right -= c->data_size(); live_to_right -= c->data_size();
@ -338,8 +338,8 @@ print_initial_summary_data(ParallelCompactData& summary_data,
print_initial_summary_chunk(i, summary_data.chunk(i)); print_initial_summary_chunk(i, summary_data.chunk(i));
} }
tty->print_cr("max: " SIZE_FORMAT_W("4") " d2r=" SIZE_FORMAT_W("10") " " tty->print_cr("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " "
"l2r=" SIZE_FORMAT_W("10") " max_ratio=%14.12f", "l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
max_reclaimed_ratio_chunk, max_dead_to_right, max_reclaimed_ratio_chunk, max_dead_to_right,
max_live_to_right, max_reclaimed_ratio); max_live_to_right, max_reclaimed_ratio);
} }
@ -1060,6 +1060,10 @@ void PSParallelCompact::post_compact()
ref_processor()->enqueue_discovered_references(NULL); ref_processor()->enqueue_discovered_references(NULL);
if (ZapUnusedHeapArea) {
heap->gen_mangle_unused_area();
}
// Update time of last GC // Update time of last GC
reset_millis_since_last_gc(); reset_millis_since_last_gc();
} }
@ -1119,8 +1123,8 @@ PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
HeapWord* chunk_destination = cp->destination(); HeapWord* chunk_destination = cp->destination();
const size_t cur_deadwood = pointer_delta(dense_prefix, chunk_destination); const size_t cur_deadwood = pointer_delta(dense_prefix, chunk_destination);
if (TraceParallelOldGCDensePrefix && Verbose) { if (TraceParallelOldGCDensePrefix && Verbose) {
tty->print_cr("c#=" SIZE_FORMAT_W("04") " dst=" PTR_FORMAT " " tty->print_cr("c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " "
"dp=" SIZE_FORMAT_W("08") " " "cdw=" SIZE_FORMAT_W("08"), "dp=" SIZE_FORMAT_W(8) " " "cdw=" SIZE_FORMAT_W(8),
sd.chunk(cp), chunk_destination, sd.chunk(cp), chunk_destination,
dense_prefix, cur_deadwood); dense_prefix, cur_deadwood);
} }
@ -1145,7 +1149,7 @@ PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
return dense_prefix; return dense_prefix;
} }
if (TraceParallelOldGCDensePrefix && Verbose) { if (TraceParallelOldGCDensePrefix && Verbose) {
tty->print_cr("backing up from c=" SIZE_FORMAT_W("4") " d2r=%10.8f " tty->print_cr("backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f "
"pc_d2r=%10.8f", sd.chunk(cp), density_to_right, "pc_d2r=%10.8f", sd.chunk(cp), density_to_right,
prev_chunk_density_to_right); prev_chunk_density_to_right);
} }
@ -1182,7 +1186,7 @@ void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
const size_t live_to_right = new_top - cp->destination(); const size_t live_to_right = new_top - cp->destination();
const size_t dead_to_right = space->top() - addr - live_to_right; const size_t dead_to_right = space->top() - addr - live_to_right;
tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W("05") " " tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " "
"spl=" SIZE_FORMAT " " "spl=" SIZE_FORMAT " "
"d2l=" SIZE_FORMAT " d2l%%=%6.4f " "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
"d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
@ -1522,48 +1526,53 @@ void
PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction) PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
{ {
assert(id < last_space_id, "id out of range"); assert(id < last_space_id, "id out of range");
assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom(),
"should have been set in summarize_spaces_quick()");
const MutableSpace* space = _space_info[id].space(); const MutableSpace* space = _space_info[id].space();
HeapWord** new_top_addr = _space_info[id].new_top_addr(); if (_space_info[id].new_top() != space->bottom()) {
HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction); _space_info[id].set_dense_prefix(dense_prefix_end);
_space_info[id].set_dense_prefix(dense_prefix_end);
#ifndef PRODUCT #ifndef PRODUCT
if (TraceParallelOldGCDensePrefix) { if (TraceParallelOldGCDensePrefix) {
print_dense_prefix_stats("ratio", id, maximum_compaction, dense_prefix_end); print_dense_prefix_stats("ratio", id, maximum_compaction,
HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction); dense_prefix_end);
print_dense_prefix_stats("density", id, maximum_compaction, addr); HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
} print_dense_prefix_stats("density", id, maximum_compaction, addr);
}
#endif // #ifndef PRODUCT #endif // #ifndef PRODUCT
// If dead space crosses the dense prefix boundary, it is (at least partially) // If dead space crosses the dense prefix boundary, it is (at least
// filled with a dummy object, marked live and added to the summary data. // partially) filled with a dummy object, marked live and added to the
// This simplifies the copy/update phase and must be done before the final // summary data. This simplifies the copy/update phase and must be done
// locations of objects are determined, to prevent leaving a fragment of dead // before the final locations of objects are determined, to prevent leaving
// space that is too small to fill with an object. // a fragment of dead space that is too small to fill with an object.
if (!maximum_compaction && dense_prefix_end != space->bottom()) { if (!maximum_compaction && dense_prefix_end != space->bottom()) {
fill_dense_prefix_end(id); fill_dense_prefix_end(id);
} }
// Compute the destination of each Chunk, and thus each object. // Compute the destination of each Chunk, and thus each object.
_summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end); _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
_summary_data.summarize(dense_prefix_end, space->end(), _summary_data.summarize(dense_prefix_end, space->end(),
dense_prefix_end, space->top(), dense_prefix_end, space->top(),
new_top_addr); _space_info[id].new_top_addr());
}
if (TraceParallelOldGCSummaryPhase) { if (TraceParallelOldGCSummaryPhase) {
const size_t chunk_size = ParallelCompactData::ChunkSize; const size_t chunk_size = ParallelCompactData::ChunkSize;
HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
const size_t dp_chunk = _summary_data.addr_to_chunk_idx(dense_prefix_end); const size_t dp_chunk = _summary_data.addr_to_chunk_idx(dense_prefix_end);
const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom()); const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
const HeapWord* nt_aligned_up = _summary_data.chunk_align_up(*new_top_addr); HeapWord* const new_top = _space_info[id].new_top();
const HeapWord* nt_aligned_up = _summary_data.chunk_align_up(new_top);
const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end); const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " " tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
"dp_chunk=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " " "dp_chunk=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
"cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT, "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
id, space->capacity_in_words(), dense_prefix_end, id, space->capacity_in_words(), dense_prefix_end,
dp_chunk, dp_words / chunk_size, dp_chunk, dp_words / chunk_size,
cr_words / chunk_size, *new_top_addr); cr_words / chunk_size, new_top);
} }
} }
@ -1632,7 +1641,7 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
const size_t live = pointer_delta(_space_info[id].new_top(), const size_t live = pointer_delta(_space_info[id].new_top(),
space->bottom()); space->bottom());
const size_t available = pointer_delta(target_space_end, *new_top_addr); const size_t available = pointer_delta(target_space_end, *new_top_addr);
if (live <= available) { if (live > 0 && live <= available) {
// All the live data will fit. // All the live data will fit.
if (TraceParallelOldGCSummaryPhase) { if (TraceParallelOldGCSummaryPhase) {
tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT, tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT,
@ -1642,16 +1651,18 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
space->bottom(), space->top(), space->bottom(), space->top(),
new_top_addr); new_top_addr);
// Reset the new_top value for the space.
_space_info[id].set_new_top(space->bottom());
// Clear the source_chunk field for each chunk in the space. // Clear the source_chunk field for each chunk in the space.
HeapWord* const new_top = _space_info[id].new_top();
HeapWord* const clear_end = _summary_data.chunk_align_up(new_top);
ChunkData* beg_chunk = _summary_data.addr_to_chunk_ptr(space->bottom()); ChunkData* beg_chunk = _summary_data.addr_to_chunk_ptr(space->bottom());
ChunkData* end_chunk = _summary_data.addr_to_chunk_ptr(space->top() - 1); ChunkData* end_chunk = _summary_data.addr_to_chunk_ptr(clear_end);
while (beg_chunk <= end_chunk) { while (beg_chunk < end_chunk) {
beg_chunk->set_source_chunk(0); beg_chunk->set_source_chunk(0);
++beg_chunk; ++beg_chunk;
} }
// Reset the new_top value for the space.
_space_info[id].set_new_top(space->bottom());
} }
} }
@ -1961,6 +1972,11 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
PSPermGen* perm_gen = heap->perm_gen(); PSPermGen* perm_gen = heap->perm_gen();
PSAdaptiveSizePolicy* size_policy = heap->size_policy(); PSAdaptiveSizePolicy* size_policy = heap->size_policy();
if (ZapUnusedHeapArea) {
// Save information needed to minimize mangling
heap->record_gen_tops_before_GC();
}
_print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes; _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
// Make sure data structures are sane, make the heap parsable, and do other // Make sure data structures are sane, make the heap parsable, and do other
@ -2129,17 +2145,19 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
size_t max_eden_size = young_gen->max_size() - size_t max_eden_size = young_gen->max_size() -
young_gen->from_space()->capacity_in_bytes() - young_gen->from_space()->capacity_in_bytes() -
young_gen->to_space()->capacity_in_bytes(); young_gen->to_space()->capacity_in_bytes();
size_policy->compute_generation_free_space(young_gen->used_in_bytes(), size_policy->compute_generation_free_space(
young_gen->eden_space()->used_in_bytes(), young_gen->used_in_bytes(),
old_gen->used_in_bytes(), young_gen->eden_space()->used_in_bytes(),
perm_gen->used_in_bytes(), old_gen->used_in_bytes(),
young_gen->eden_space()->capacity_in_bytes(), perm_gen->used_in_bytes(),
old_gen->max_gen_size(), young_gen->eden_space()->capacity_in_bytes(),
max_eden_size, old_gen->max_gen_size(),
true /* full gc*/, max_eden_size,
gc_cause); true /* full gc*/,
gc_cause);
heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); heap->resize_old_gen(
size_policy->calculated_old_free_size_in_bytes());
// Don't resize the young generation at an major collection. A // Don't resize the young generation at an major collection. A
// desired young generation size may have been calculated but // desired young generation size may have been calculated but
@ -2212,6 +2230,11 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
perm_gen->verify_object_start_array(); perm_gen->verify_object_start_array();
} }
if (ZapUnusedHeapArea) {
old_gen->object_space()->check_mangled_unused_area_complete();
perm_gen->object_space()->check_mangled_unused_area_complete();
}
NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
collection_exit.update(); collection_exit.update();
@ -2499,7 +2522,7 @@ void PSParallelCompact::enqueue_chunk_draining_tasks(GCTaskQueue* q,
if (TraceParallelOldGCCompactionPhase && Verbose) { if (TraceParallelOldGCCompactionPhase && Verbose) {
const size_t count_mod_8 = fillable_chunks & 7; const size_t count_mod_8 = fillable_chunks & 7;
if (count_mod_8 == 0) gclog_or_tty->print("fillable: "); if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
gclog_or_tty->print(" " SIZE_FORMAT_W("7"), cur); gclog_or_tty->print(" " SIZE_FORMAT_W(7), cur);
if (count_mod_8 == 7) gclog_or_tty->cr(); if (count_mod_8 == 7) gclog_or_tty->cr();
} }

View file

@ -716,6 +716,99 @@ class BitBlockUpdateClosure: public ParMarkBitMapClosure {
virtual IterationStatus do_addr(HeapWord* addr, size_t words); virtual IterationStatus do_addr(HeapWord* addr, size_t words);
}; };
// The UseParallelOldGC collector is a stop-the-world garbage
// collector that does parts of the collection using parallel threads.
// The collection includes the tenured generation and the young
// generation. The permanent generation is collected at the same
// time as the other two generations but the permanent generation
// is collect by a single GC thread. The permanent generation is
// collected serially because of the requirement that during the
// processing of a klass AAA, any objects reference by AAA must
// already have been processed. This requirement is enforced by
// a left (lower address) to right (higher address) sliding compaction.
//
// There are four phases of the collection.
//
// - marking phase
// - summary phase
// - compacting phase
// - clean up phase
//
// Roughly speaking these phases correspond, respectively, to
// - mark all the live objects
// - calculate the destination of each object at the end of the collection
// - move the objects to their destination
// - update some references and reinitialize some variables
//
// These three phases are invoked in PSParallelCompact::invoke_no_policy().
// The marking phase is implemented in PSParallelCompact::marking_phase()
// and does a complete marking of the heap.
// The summary phase is implemented in PSParallelCompact::summary_phase().
// The move and update phase is implemented in PSParallelCompact::compact().
//
// A space that is being collected is divided into chunks and with
// each chunk is associated an object of type ParallelCompactData.
// Each chunk is of a fixed size and typically will contain more than
// 1 object and may have parts of objects at the front and back of the
// chunk.
//
// chunk -----+---------------------+----------
// objects covered [ AAA )[ BBB )[ CCC )[ DDD )
//
// The marking phase does a complete marking of all live objects in the
// heap. The marking also compiles the size of the data for
// all live objects covered by the chunk. This size includes the
// part of any live object spanning onto the chunk (part of AAA
// if it is live) from the front, all live objects contained in the chunk
// (BBB and/or CCC if they are live), and the part of any live objects
// covered by the chunk that extends off the chunk (part of DDD if it is
// live). The marking phase uses multiple GC threads and marking is
// done in a bit array of type ParMarkBitMap. The marking of the
// bit map is done atomically as is the accumulation of the size of the
// live objects covered by a chunk.
//
// The summary phase calculates the total live data to the left of
// each chunk XXX. Based on that total and the bottom of the space,
// it can calculate the starting location of the live data in XXX.
// The summary phase calculates for each chunk XXX quantites such as
//
// - the amount of live data at the beginning of a chunk from an object
// entering the chunk.
// - the location of the first live data on the chunk
// - a count of the number of chunks receiving live data from XXX.
//
// See ParallelCompactData for precise details. The summary phase also
// calculates the dense prefix for the compaction. The dense prefix
// is a portion at the beginning of the space that is not moved. The
// objects in the dense prefix do need to have their object references
// updated. See method summarize_dense_prefix().
//
// The summary phase is done using 1 GC thread.
//
// The compaction phase moves objects to their new location and updates
// all references in the object.
//
// A current exception is that objects that cross a chunk boundary
// are moved but do not have their references updated. References are
// not updated because it cannot easily be determined if the klass
// pointer KKK for the object AAA has been updated. KKK likely resides
// in a chunk to the left of the chunk containing AAA. These AAA's
// have there references updated at the end in a clean up phase.
// See the method PSParallelCompact::update_deferred_objects(). An
// alternate strategy is being investigated for this deferral of updating.
//
// Compaction is done on a chunk basis. A chunk that is ready to be
// filled is put on a ready list and GC threads take chunk off the list
// and fill them. A chunk is ready to be filled if it
// empty of live objects. Such a chunk may have been initially
// empty (only contained
// dead objects) or may have had all its live objects copied out already.
// A chunk that compacts into itself is also ready for filling. The
// ready list is initially filled with empty chunks and chunks compacting
// into themselves. There is always at least 1 chunk that can be put on
// the ready list. The chunks are atomically added and removed from
// the ready list.
//
class PSParallelCompact : AllStatic { class PSParallelCompact : AllStatic {
public: public:
// Convenient access to type names. // Convenient access to type names.

View file

@ -265,6 +265,11 @@ bool PSScavenge::invoke_no_policy() {
young_gen->eden_space()->accumulate_statistics(); young_gen->eden_space()->accumulate_statistics();
} }
if (ZapUnusedHeapArea) {
// Save information needed to minimize mangling
heap->record_gen_tops_before_GC();
}
if (PrintHeapAtGC) { if (PrintHeapAtGC) {
Universe::print_heap_before_gc(); Universe::print_heap_before_gc();
} }
@ -315,7 +320,7 @@ bool PSScavenge::invoke_no_policy() {
if (!ScavengeWithObjectsInToSpace) { if (!ScavengeWithObjectsInToSpace) {
assert(young_gen->to_space()->is_empty(), assert(young_gen->to_space()->is_empty(),
"Attempt to scavenge with live objects in to_space"); "Attempt to scavenge with live objects in to_space");
young_gen->to_space()->clear(); young_gen->to_space()->clear(SpaceDecorator::Mangle);
} else if (ZapUnusedHeapArea) { } else if (ZapUnusedHeapArea) {
young_gen->to_space()->mangle_unused_area(); young_gen->to_space()->mangle_unused_area();
} }
@ -437,8 +442,10 @@ bool PSScavenge::invoke_no_policy() {
if (!promotion_failure_occurred) { if (!promotion_failure_occurred) {
// Swap the survivor spaces. // Swap the survivor spaces.
young_gen->eden_space()->clear();
young_gen->from_space()->clear();
young_gen->eden_space()->clear(SpaceDecorator::Mangle);
young_gen->from_space()->clear(SpaceDecorator::Mangle);
young_gen->swap_spaces(); young_gen->swap_spaces();
size_t survived = young_gen->from_space()->used_in_bytes(); size_t survived = young_gen->from_space()->used_in_bytes();
@ -600,6 +607,12 @@ bool PSScavenge::invoke_no_policy() {
Universe::print_heap_after_gc(); Universe::print_heap_after_gc();
} }
if (ZapUnusedHeapArea) {
young_gen->eden_space()->check_mangled_unused_area_complete();
young_gen->from_space()->check_mangled_unused_area_complete();
young_gen->to_space()->check_mangled_unused_area_complete();
}
scavenge_exit.update(); scavenge_exit.update();
if (PrintGCTaskTimeStamps) { if (PrintGCTaskTimeStamps) {

View file

@ -36,7 +36,7 @@ PSYoungGen::PSYoungGen(size_t initial_size,
void PSYoungGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) { void PSYoungGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
assert(_init_gen_size != 0, "Should have a finite size"); assert(_init_gen_size != 0, "Should have a finite size");
_virtual_space = new PSVirtualSpace(rs, alignment); _virtual_space = new PSVirtualSpace(rs, alignment);
if (!_virtual_space->expand_by(_init_gen_size)) { if (!virtual_space()->expand_by(_init_gen_size)) {
vm_exit_during_initialization("Could not reserve enough space for " vm_exit_during_initialization("Could not reserve enough space for "
"object heap"); "object heap");
} }
@ -49,13 +49,20 @@ void PSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
void PSYoungGen::initialize_work() { void PSYoungGen::initialize_work() {
_reserved = MemRegion((HeapWord*)_virtual_space->low_boundary(), _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
(HeapWord*)_virtual_space->high_boundary()); (HeapWord*)virtual_space()->high_boundary());
MemRegion cmr((HeapWord*)_virtual_space->low(), MemRegion cmr((HeapWord*)virtual_space()->low(),
(HeapWord*)_virtual_space->high()); (HeapWord*)virtual_space()->high());
Universe::heap()->barrier_set()->resize_covered_region(cmr); Universe::heap()->barrier_set()->resize_covered_region(cmr);
if (ZapUnusedHeapArea) {
// Mangle newly committed space immediately because it
// can be done here more simply that after the new
// spaces have been computed.
SpaceMangler::mangle_region(cmr);
}
if (UseNUMA) { if (UseNUMA) {
_eden_space = new MutableNUMASpace(); _eden_space = new MutableNUMASpace();
} else { } else {
@ -89,7 +96,7 @@ void PSYoungGen::initialize_work() {
// Compute maximum space sizes for performance counters // Compute maximum space sizes for performance counters
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
size_t alignment = heap->intra_heap_alignment(); size_t alignment = heap->intra_heap_alignment();
size_t size = _virtual_space->reserved_size(); size_t size = virtual_space()->reserved_size();
size_t max_survivor_size; size_t max_survivor_size;
size_t max_eden_size; size_t max_eden_size;
@ -142,7 +149,7 @@ void PSYoungGen::compute_initial_space_boundaries() {
// Compute sizes // Compute sizes
size_t alignment = heap->intra_heap_alignment(); size_t alignment = heap->intra_heap_alignment();
size_t size = _virtual_space->committed_size(); size_t size = virtual_space()->committed_size();
size_t survivor_size = size / InitialSurvivorRatio; size_t survivor_size = size / InitialSurvivorRatio;
survivor_size = align_size_down(survivor_size, alignment); survivor_size = align_size_down(survivor_size, alignment);
@ -164,18 +171,18 @@ void PSYoungGen::compute_initial_space_boundaries() {
} }
void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) { void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
assert(eden_size < _virtual_space->committed_size(), "just checking"); assert(eden_size < virtual_space()->committed_size(), "just checking");
assert(eden_size > 0 && survivor_size > 0, "just checking"); assert(eden_size > 0 && survivor_size > 0, "just checking");
// Initial layout is Eden, to, from. After swapping survivor spaces, // Initial layout is Eden, to, from. After swapping survivor spaces,
// that leaves us with Eden, from, to, which is step one in our two // that leaves us with Eden, from, to, which is step one in our two
// step resize-with-live-data procedure. // step resize-with-live-data procedure.
char *eden_start = _virtual_space->low(); char *eden_start = virtual_space()->low();
char *to_start = eden_start + eden_size; char *to_start = eden_start + eden_size;
char *from_start = to_start + survivor_size; char *from_start = to_start + survivor_size;
char *from_end = from_start + survivor_size; char *from_end = from_start + survivor_size;
assert(from_end == _virtual_space->high(), "just checking"); assert(from_end == virtual_space()->high(), "just checking");
assert(is_object_aligned((intptr_t)eden_start), "checking alignment"); assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
assert(is_object_aligned((intptr_t)to_start), "checking alignment"); assert(is_object_aligned((intptr_t)to_start), "checking alignment");
assert(is_object_aligned((intptr_t)from_start), "checking alignment"); assert(is_object_aligned((intptr_t)from_start), "checking alignment");
@ -184,9 +191,9 @@ void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
MemRegion to_mr ((HeapWord*)to_start, (HeapWord*)from_start); MemRegion to_mr ((HeapWord*)to_start, (HeapWord*)from_start);
MemRegion from_mr((HeapWord*)from_start, (HeapWord*)from_end); MemRegion from_mr((HeapWord*)from_start, (HeapWord*)from_end);
eden_space()->initialize(eden_mr, true); eden_space()->initialize(eden_mr, true, ZapUnusedHeapArea);
to_space()->initialize(to_mr , true); to_space()->initialize(to_mr , true, ZapUnusedHeapArea);
from_space()->initialize(from_mr, true); from_space()->initialize(from_mr, true, ZapUnusedHeapArea);
} }
#ifndef PRODUCT #ifndef PRODUCT
@ -207,7 +214,7 @@ void PSYoungGen::space_invariants() {
char* to_start = (char*)to_space()->bottom(); char* to_start = (char*)to_space()->bottom();
char* to_end = (char*)to_space()->end(); char* to_end = (char*)to_space()->end();
guarantee(eden_start >= _virtual_space->low(), "eden bottom"); guarantee(eden_start >= virtual_space()->low(), "eden bottom");
guarantee(eden_start < eden_end, "eden space consistency"); guarantee(eden_start < eden_end, "eden space consistency");
guarantee(from_start < from_end, "from space consistency"); guarantee(from_start < from_end, "from space consistency");
guarantee(to_start < to_end, "to space consistency"); guarantee(to_start < to_end, "to space consistency");
@ -217,29 +224,29 @@ void PSYoungGen::space_invariants() {
// Eden, from, to // Eden, from, to
guarantee(eden_end <= from_start, "eden/from boundary"); guarantee(eden_end <= from_start, "eden/from boundary");
guarantee(from_end <= to_start, "from/to boundary"); guarantee(from_end <= to_start, "from/to boundary");
guarantee(to_end <= _virtual_space->high(), "to end"); guarantee(to_end <= virtual_space()->high(), "to end");
} else { } else {
// Eden, to, from // Eden, to, from
guarantee(eden_end <= to_start, "eden/to boundary"); guarantee(eden_end <= to_start, "eden/to boundary");
guarantee(to_end <= from_start, "to/from boundary"); guarantee(to_end <= from_start, "to/from boundary");
guarantee(from_end <= _virtual_space->high(), "from end"); guarantee(from_end <= virtual_space()->high(), "from end");
} }
// More checks that the virtual space is consistent with the spaces // More checks that the virtual space is consistent with the spaces
assert(_virtual_space->committed_size() >= assert(virtual_space()->committed_size() >=
(eden_space()->capacity_in_bytes() + (eden_space()->capacity_in_bytes() +
to_space()->capacity_in_bytes() + to_space()->capacity_in_bytes() +
from_space()->capacity_in_bytes()), "Committed size is inconsistent"); from_space()->capacity_in_bytes()), "Committed size is inconsistent");
assert(_virtual_space->committed_size() <= _virtual_space->reserved_size(), assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
"Space invariant"); "Space invariant");
char* eden_top = (char*)eden_space()->top(); char* eden_top = (char*)eden_space()->top();
char* from_top = (char*)from_space()->top(); char* from_top = (char*)from_space()->top();
char* to_top = (char*)to_space()->top(); char* to_top = (char*)to_space()->top();
assert(eden_top <= _virtual_space->high(), "eden top"); assert(eden_top <= virtual_space()->high(), "eden top");
assert(from_top <= _virtual_space->high(), "from top"); assert(from_top <= virtual_space()->high(), "from top");
assert(to_top <= _virtual_space->high(), "to top"); assert(to_top <= virtual_space()->high(), "to top");
_virtual_space->verify(); virtual_space()->verify();
} }
#endif #endif
@ -265,8 +272,8 @@ void PSYoungGen::resize(size_t eden_size, size_t survivor_size) {
bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) { bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
const size_t alignment = _virtual_space->alignment(); const size_t alignment = virtual_space()->alignment();
size_t orig_size = _virtual_space->committed_size(); size_t orig_size = virtual_space()->committed_size();
bool size_changed = false; bool size_changed = false;
// There used to be this guarantee there. // There used to be this guarantee there.
@ -288,10 +295,18 @@ bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
// Grow the generation // Grow the generation
size_t change = desired_size - orig_size; size_t change = desired_size - orig_size;
assert(change % alignment == 0, "just checking"); assert(change % alignment == 0, "just checking");
if (!_virtual_space->expand_by(change)) { HeapWord* prev_high = (HeapWord*) virtual_space()->high();
if (!virtual_space()->expand_by(change)) {
return false; // Error if we fail to resize! return false; // Error if we fail to resize!
} }
if (ZapUnusedHeapArea) {
// Mangle newly committed space immediately because it
// can be done here more simply that after the new
// spaces have been computed.
HeapWord* new_high = (HeapWord*) virtual_space()->high();
MemRegion mangle_region(prev_high, new_high);
SpaceMangler::mangle_region(mangle_region);
}
size_changed = true; size_changed = true;
} else if (desired_size < orig_size) { } else if (desired_size < orig_size) {
size_t desired_change = orig_size - desired_size; size_t desired_change = orig_size - desired_size;
@ -321,19 +336,95 @@ bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
post_resize(); post_resize();
if (Verbose && PrintGC) { if (Verbose && PrintGC) {
size_t current_size = _virtual_space->committed_size(); size_t current_size = virtual_space()->committed_size();
gclog_or_tty->print_cr("PSYoung generation size changed: " gclog_or_tty->print_cr("PSYoung generation size changed: "
SIZE_FORMAT "K->" SIZE_FORMAT "K", SIZE_FORMAT "K->" SIZE_FORMAT "K",
orig_size/K, current_size/K); orig_size/K, current_size/K);
} }
} }
guarantee(eden_plus_survivors <= _virtual_space->committed_size() || guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
_virtual_space->committed_size() == max_size(), "Sanity"); virtual_space()->committed_size() == max_size(), "Sanity");
return true; return true;
} }
#ifndef PRODUCT
// In the numa case eden is not mangled so a survivor space
// moving into a region previously occupied by a survivor
// may find an unmangled region. Also in the PS case eden
// to-space and from-space may not touch (i.e., there may be
// gaps between them due to movement while resizing the
// spaces). Those gaps must be mangled.
void PSYoungGen::mangle_survivors(MutableSpace* s1,
MemRegion s1MR,
MutableSpace* s2,
MemRegion s2MR) {
// Check eden and gap between eden and from-space, in deciding
// what to mangle in from-space. Check the gap between from-space
// and to-space when deciding what to mangle.
//
// +--------+ +----+ +---+
// | eden | |s1 | |s2 |
// +--------+ +----+ +---+
// +-------+ +-----+
// |s1MR | |s2MR |
// +-------+ +-----+
// All of survivor-space is properly mangled so find the
// upper bound on the mangling for any portion above current s1.
HeapWord* delta_end = MIN2(s1->bottom(), s1MR.end());
MemRegion delta1_left;
if (s1MR.start() < delta_end) {
delta1_left = MemRegion(s1MR.start(), delta_end);
s1->mangle_region(delta1_left);
}
// Find any portion to the right of the current s1.
HeapWord* delta_start = MAX2(s1->end(), s1MR.start());
MemRegion delta1_right;
if (delta_start < s1MR.end()) {
delta1_right = MemRegion(delta_start, s1MR.end());
s1->mangle_region(delta1_right);
}
// Similarly for the second survivor space except that
// any of the new region that overlaps with the current
// region of the first survivor space has already been
// mangled.
delta_end = MIN2(s2->bottom(), s2MR.end());
delta_start = MAX2(s2MR.start(), s1->end());
MemRegion delta2_left;
if (s2MR.start() < delta_end) {
delta2_left = MemRegion(s2MR.start(), delta_end);
s2->mangle_region(delta2_left);
}
delta_start = MAX2(s2->end(), s2MR.start());
MemRegion delta2_right;
if (delta_start < s2MR.end()) {
s2->mangle_region(delta2_right);
}
if (TraceZapUnusedHeapArea) {
// s1
gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
"New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
s1->bottom(), s1->end(), s1MR.start(), s1MR.end());
gclog_or_tty->print_cr(" Mangle before: [" PTR_FORMAT ", "
PTR_FORMAT ") Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
delta1_left.start(), delta1_left.end(), delta1_right.start(),
delta1_right.end());
// s2
gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
"New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
s2->bottom(), s2->end(), s2MR.start(), s2MR.end());
gclog_or_tty->print_cr(" Mangle before: [" PTR_FORMAT ", "
PTR_FORMAT ") Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
delta2_left.start(), delta2_left.end(), delta2_right.start(),
delta2_right.end());
}
}
#endif // NOT PRODUCT
void PSYoungGen::resize_spaces(size_t requested_eden_size, void PSYoungGen::resize_spaces(size_t requested_eden_size,
size_t requested_survivor_size) { size_t requested_survivor_size) {
@ -396,9 +487,11 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
const bool maintain_minimum = const bool maintain_minimum =
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size(); (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
bool eden_from_to_order = from_start < to_start;
// Check whether from space is below to space // Check whether from space is below to space
if (from_start < to_start) { if (eden_from_to_order) {
// Eden, from, to // Eden, from, to
eden_from_to_order = true;
if (PrintAdaptiveSizePolicy && Verbose) { if (PrintAdaptiveSizePolicy && Verbose) {
gclog_or_tty->print_cr(" Eden, from, to:"); gclog_or_tty->print_cr(" Eden, from, to:");
} }
@ -435,7 +528,7 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
// extra calculations. // extra calculations.
// First calculate an optimal to-space // First calculate an optimal to-space
to_end = (char*)_virtual_space->high(); to_end = (char*)virtual_space()->high();
to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size, to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
sizeof(char)); sizeof(char));
@ -491,7 +584,7 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
// to space as if we were able to resize from space, even though from // to space as if we were able to resize from space, even though from
// space is not modified. // space is not modified.
// Giving eden priority was tried and gave poorer performance. // Giving eden priority was tried and gave poorer performance.
to_end = (char*)pointer_delta(_virtual_space->high(), to_end = (char*)pointer_delta(virtual_space()->high(),
(char*)requested_survivor_size, (char*)requested_survivor_size,
sizeof(char)); sizeof(char));
to_end = MIN2(to_end, from_start); to_end = MIN2(to_end, from_start);
@ -560,9 +653,45 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
size_t old_from = from_space()->capacity_in_bytes(); size_t old_from = from_space()->capacity_in_bytes();
size_t old_to = to_space()->capacity_in_bytes(); size_t old_to = to_space()->capacity_in_bytes();
eden_space()->initialize(edenMR, true); if (ZapUnusedHeapArea) {
to_space()->initialize(toMR , true); // NUMA is a special case because a numa space is not mangled
from_space()->initialize(fromMR, false); // Note, not cleared! // in order to not prematurely bind its address to memory to
// the wrong memory (i.e., don't want the GC thread to first
// touch the memory). The survivor spaces are not numa
// spaces and are mangled.
if (UseNUMA) {
if (eden_from_to_order) {
mangle_survivors(from_space(), fromMR, to_space(), toMR);
} else {
mangle_survivors(to_space(), toMR, from_space(), fromMR);
}
}
// If not mangling the spaces, do some checking to verify that
// the spaces are already mangled.
// The spaces should be correctly mangled at this point so
// do some checking here. Note that they are not being mangled
// in the calls to initialize().
// Must check mangling before the spaces are reshaped. Otherwise,
// the bottom or end of one space may have moved into an area
// covered by another space and a failure of the check may
// not correctly indicate which space is not properly mangled.
HeapWord* limit = (HeapWord*) virtual_space()->high();
eden_space()->check_mangled_unused_area(limit);
from_space()->check_mangled_unused_area(limit);
to_space()->check_mangled_unused_area(limit);
}
// When an existing space is being initialized, it is not
// mangled because the space has been previously mangled.
eden_space()->initialize(edenMR,
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
to_space()->initialize(toMR,
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
from_space()->initialize(fromMR,
SpaceDecorator::DontClear,
SpaceDecorator::DontMangle);
assert(from_space()->top() == old_from_top, "from top changed!"); assert(from_space()->top() == old_from_top, "from top changed!");
@ -671,7 +800,7 @@ void PSYoungGen::print_on(outputStream* st) const {
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
capacity_in_bytes()/K, used_in_bytes()/K); capacity_in_bytes()/K, used_in_bytes()/K);
} }
_virtual_space->print_space_boundaries_on(st); virtual_space()->print_space_boundaries_on(st);
st->print(" eden"); eden_space()->print_on(st); st->print(" eden"); eden_space()->print_on(st);
st->print(" from"); from_space()->print_on(st); st->print(" from"); from_space()->print_on(st);
st->print(" to "); to_space()->print_on(st); st->print(" to "); to_space()->print_on(st);
@ -774,7 +903,9 @@ void PSYoungGen::reset_survivors_after_shrink() {
// Was there a shrink of the survivor space? // Was there a shrink of the survivor space?
if (new_end < space_shrinking->end()) { if (new_end < space_shrinking->end()) {
MemRegion mr(space_shrinking->bottom(), new_end); MemRegion mr(space_shrinking->bottom(), new_end);
space_shrinking->initialize(mr, false /* clear */); space_shrinking->initialize(mr,
SpaceDecorator::DontClear,
SpaceDecorator::Mangle);
} }
} }
@ -809,3 +940,12 @@ void PSYoungGen::verify(bool allow_dirty) {
from_space()->verify(allow_dirty); from_space()->verify(allow_dirty);
to_space()->verify(allow_dirty); to_space()->verify(allow_dirty);
} }
#ifndef PRODUCT
void PSYoungGen::record_spaces_top() {
assert(ZapUnusedHeapArea, "Not mangling unused space");
eden_space()->set_top_for_allocations();
from_space()->set_top_for_allocations();
to_space()->set_top_for_allocations();
}
#endif

View file

@ -179,4 +179,12 @@ class PSYoungGen : public CHeapObj {
// Space boundary invariant checker // Space boundary invariant checker
void space_invariants() PRODUCT_RETURN; void space_invariants() PRODUCT_RETURN;
// Helper for mangling survivor spaces.
void mangle_survivors(MutableSpace* s1,
MemRegion s1MR,
MutableSpace* s2,
MemRegion s2MR) PRODUCT_RETURN;
void record_spaces_top() PRODUCT_RETURN;
}; };

View file

@ -58,6 +58,12 @@ class AdaptiveWeightedAverage : public CHeapObj {
_average(0.0), _sample_count(0), _weight(weight), _last_sample(0.0) { _average(0.0), _sample_count(0), _weight(weight), _last_sample(0.0) {
} }
void clear() {
_average = 0;
_sample_count = 0;
_last_sample = 0;
}
// Accessors // Accessors
float average() const { return _average; } float average() const { return _average; }
unsigned weight() const { return _weight; } unsigned weight() const { return _weight; }
@ -115,6 +121,12 @@ class AdaptivePaddedAverage : public AdaptiveWeightedAverage {
float deviation() const { return _deviation; } float deviation() const { return _deviation; }
unsigned padding() const { return _padding; } unsigned padding() const { return _padding; }
void clear() {
AdaptiveWeightedAverage::clear();
_padded_avg = 0;
_deviation = 0;
}
// Override // Override
void sample(float new_sample); void sample(float new_sample);
}; };

View file

@ -42,19 +42,31 @@ MutableNUMASpace::~MutableNUMASpace() {
delete lgrp_spaces(); delete lgrp_spaces();
} }
#ifndef PRODUCT
void MutableNUMASpace::mangle_unused_area() { void MutableNUMASpace::mangle_unused_area() {
for (int i = 0; i < lgrp_spaces()->length(); i++) { // This method should do nothing.
LGRPSpace *ls = lgrp_spaces()->at(i); // It can be called on a numa space during a full compaction.
MutableSpace *s = ls->space();
if (!os::numa_has_static_binding()) {
HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
if (top < s->end()) {
ls->add_invalid_region(MemRegion(top, s->end()));
}
}
s->mangle_unused_area();
}
} }
void MutableNUMASpace::mangle_unused_area_complete() {
// This method should do nothing.
// It can be called on a numa space during a full compaction.
}
void MutableNUMASpace::mangle_region(MemRegion mr) {
// This method should do nothing because numa spaces are not mangled.
}
void MutableNUMASpace::set_top_for_allocations(HeapWord* v) {
assert(false, "Do not mangle MutableNUMASpace's");
}
void MutableNUMASpace::set_top_for_allocations() {
// This method should do nothing.
}
void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) {
// This method should do nothing.
}
void MutableNUMASpace::check_mangled_unused_area_complete() {
// This method should do nothing.
}
#endif // NOT_PRODUCT
// There may be unallocated holes in the middle chunks // There may be unallocated holes in the middle chunks
// that should be filled with dead objects to ensure parseability. // that should be filled with dead objects to ensure parseability.
@ -129,7 +141,20 @@ size_t MutableNUMASpace::free_in_words() const {
size_t MutableNUMASpace::tlab_capacity(Thread *thr) const { size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
guarantee(thr != NULL, "No thread"); guarantee(thr != NULL, "No thread");
int lgrp_id = thr->lgrp_id(); int lgrp_id = thr->lgrp_id();
assert(lgrp_id != -1, "No lgrp_id set"); if (lgrp_id == -1) {
// This case can occur after the topology of the system has
// changed. Thread can change their location, the new home
// group will be determined during the first allocation
// attempt. For now we can safely assume that all spaces
// have equal size because the whole space will be reinitialized.
if (lgrp_spaces()->length() > 0) {
return capacity_in_bytes() / lgrp_spaces()->length();
} else {
assert(false, "There should be at least one locality group");
return 0;
}
}
// That's the normal case, where we know the locality group of the thread.
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
if (i == -1) { if (i == -1) {
return 0; return 0;
@ -138,9 +163,17 @@ size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
} }
size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const { size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
// Please see the comments for tlab_capacity().
guarantee(thr != NULL, "No thread"); guarantee(thr != NULL, "No thread");
int lgrp_id = thr->lgrp_id(); int lgrp_id = thr->lgrp_id();
assert(lgrp_id != -1, "No lgrp_id set"); if (lgrp_id == -1) {
if (lgrp_spaces()->length() > 0) {
return free_in_bytes() / lgrp_spaces()->length();
} else {
assert(false, "There should be at least one locality group");
return 0;
}
}
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
if (i == -1) { if (i == -1) {
return 0; return 0;
@ -238,12 +271,20 @@ void MutableNUMASpace::free_region(MemRegion mr) {
void MutableNUMASpace::update() { void MutableNUMASpace::update() {
if (update_layout(false)) { if (update_layout(false)) {
// If the topology has changed, make all chunks zero-sized. // If the topology has changed, make all chunks zero-sized.
// And clear the alloc-rate statistics.
// In future we may want to handle this more gracefully in order
// to avoid the reallocation of the pages as much as possible.
for (int i = 0; i < lgrp_spaces()->length(); i++) { for (int i = 0; i < lgrp_spaces()->length(); i++) {
MutableSpace *s = lgrp_spaces()->at(i)->space(); LGRPSpace *ls = lgrp_spaces()->at(i);
MutableSpace *s = ls->space();
s->set_end(s->bottom()); s->set_end(s->bottom());
s->set_top(s->bottom()); s->set_top(s->bottom());
ls->clear_alloc_rate();
} }
initialize(region(), true); // A NUMA space is never mangled
initialize(region(),
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
} else { } else {
bool should_initialize = false; bool should_initialize = false;
if (!os::numa_has_static_binding()) { if (!os::numa_has_static_binding()) {
@ -257,7 +298,10 @@ void MutableNUMASpace::update() {
if (should_initialize || if (should_initialize ||
(UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) { (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) {
initialize(region(), true); // A NUMA space is never mangled
initialize(region(),
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
} }
} }
@ -448,14 +492,17 @@ void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersecti
} }
} }
void MutableNUMASpace::initialize(MemRegion mr, bool clear_space) { void MutableNUMASpace::initialize(MemRegion mr,
bool clear_space,
bool mangle_space) {
assert(clear_space, "Reallocation will destory data!"); assert(clear_space, "Reallocation will destory data!");
assert(lgrp_spaces()->length() > 0, "There should be at least one space"); assert(lgrp_spaces()->length() > 0, "There should be at least one space");
MemRegion old_region = region(), new_region; MemRegion old_region = region(), new_region;
set_bottom(mr.start()); set_bottom(mr.start());
set_end(mr.end()); set_end(mr.end());
MutableSpace::set_top(bottom()); // Must always clear the space
clear(SpaceDecorator::DontMangle);
// Compute chunk sizes // Compute chunk sizes
size_t prev_page_size = page_size(); size_t prev_page_size = page_size();
@ -586,10 +633,8 @@ void MutableNUMASpace::initialize(MemRegion mr, bool clear_space) {
bias_region(top_region, ls->lgrp_id()); bias_region(top_region, ls->lgrp_id());
} }
// If we clear the region, we would mangle it in debug. That would cause page // Clear space (set top = bottom) but never mangle.
// allocation in a different place. Hence setting the top directly. s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle);
s->initialize(new_region, false);
s->set_top(s->bottom());
set_adaptation_cycles(samples_count()); set_adaptation_cycles(samples_count());
} }
@ -641,10 +686,12 @@ void MutableNUMASpace::set_top(HeapWord* value) {
MutableSpace::set_top(value); MutableSpace::set_top(value);
} }
void MutableNUMASpace::clear() { void MutableNUMASpace::clear(bool mangle_space) {
MutableSpace::set_top(bottom()); MutableSpace::set_top(bottom());
for (int i = 0; i < lgrp_spaces()->length(); i++) { for (int i = 0; i < lgrp_spaces()->length(); i++) {
lgrp_spaces()->at(i)->space()->clear(); // Never mangle NUMA spaces because the mangling will
// bind the memory to a possibly unwanted lgroup.
lgrp_spaces()->at(i)->space()->clear(SpaceDecorator::DontMangle);
} }
} }

View file

@ -112,6 +112,7 @@ class MutableNUMASpace : public MutableSpace {
int lgrp_id() const { return _lgrp_id; } int lgrp_id() const { return _lgrp_id; }
MutableSpace* space() const { return _space; } MutableSpace* space() const { return _space; }
AdaptiveWeightedAverage* alloc_rate() const { return _alloc_rate; } AdaptiveWeightedAverage* alloc_rate() const { return _alloc_rate; }
void clear_alloc_rate() { _alloc_rate->clear(); }
SpaceStats* space_stats() { return &_space_stats; } SpaceStats* space_stats() { return &_space_stats; }
void clear_space_stats() { _space_stats = SpaceStats(); } void clear_space_stats() { _space_stats = SpaceStats(); }
@ -171,14 +172,21 @@ class MutableNUMASpace : public MutableSpace {
MutableNUMASpace(); MutableNUMASpace();
virtual ~MutableNUMASpace(); virtual ~MutableNUMASpace();
// Space initialization. // Space initialization.
virtual void initialize(MemRegion mr, bool clear_space); virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
// Update space layout if necessary. Do all adaptive resizing job. // Update space layout if necessary. Do all adaptive resizing job.
virtual void update(); virtual void update();
// Update allocation rate averages. // Update allocation rate averages.
virtual void accumulate_statistics(); virtual void accumulate_statistics();
virtual void clear(); virtual void clear(bool mangle_space);
virtual void mangle_unused_area(); virtual void mangle_unused_area() PRODUCT_RETURN;
virtual void mangle_unused_area_complete() PRODUCT_RETURN;
virtual void mangle_region(MemRegion mr) PRODUCT_RETURN;
virtual void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
virtual void check_mangled_unused_area_complete() PRODUCT_RETURN;
virtual void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
virtual void set_top_for_allocations() PRODUCT_RETURN;
virtual void ensure_parsability(); virtual void ensure_parsability();
virtual size_t used_in_words() const; virtual size_t used_in_words() const;
virtual size_t free_in_words() const; virtual size_t free_in_words() const;

View file

@ -25,7 +25,17 @@
# include "incls/_precompiled.incl" # include "incls/_precompiled.incl"
# include "incls/_mutableSpace.cpp.incl" # include "incls/_mutableSpace.cpp.incl"
void MutableSpace::initialize(MemRegion mr, bool clear_space) { MutableSpace::MutableSpace(): ImmutableSpace(), _top(NULL) {
_mangler = new MutableSpaceMangler(this);
}
MutableSpace::~MutableSpace() {
delete _mangler;
}
void MutableSpace::initialize(MemRegion mr,
bool clear_space,
bool mangle_space) {
HeapWord* bottom = mr.start(); HeapWord* bottom = mr.start();
HeapWord* end = mr.end(); HeapWord* end = mr.end();
@ -34,14 +44,51 @@ void MutableSpace::initialize(MemRegion mr, bool clear_space) {
set_bottom(bottom); set_bottom(bottom);
set_end(end); set_end(end);
if (clear_space) clear(); if (clear_space) {
clear(mangle_space);
}
} }
void MutableSpace::clear() { void MutableSpace::clear(bool mangle_space) {
set_top(bottom()); set_top(bottom());
if (ZapUnusedHeapArea) mangle_unused_area(); if (ZapUnusedHeapArea && mangle_space) {
mangle_unused_area();
}
} }
#ifndef PRODUCT
void MutableSpace::check_mangled_unused_area(HeapWord* limit) {
mangler()->check_mangled_unused_area(limit);
}
void MutableSpace::check_mangled_unused_area_complete() {
mangler()->check_mangled_unused_area_complete();
}
// Mangle only the unused space that has not previously
// been mangled and that has not been allocated since being
// mangled.
void MutableSpace::mangle_unused_area() {
mangler()->mangle_unused_area();
}
void MutableSpace::mangle_unused_area_complete() {
mangler()->mangle_unused_area_complete();
}
void MutableSpace::mangle_region(MemRegion mr) {
SpaceMangler::mangle_region(mr);
}
void MutableSpace::set_top_for_allocations(HeapWord* v) {
mangler()->set_top_for_allocations(v);
}
void MutableSpace::set_top_for_allocations() {
mangler()->set_top_for_allocations(top());
}
#endif
// This version requires locking. */ // This version requires locking. */
HeapWord* MutableSpace::allocate(size_t size) { HeapWord* MutableSpace::allocate(size_t size) {
assert(Heap_lock->owned_by_self() || assert(Heap_lock->owned_by_self() ||

View file

@ -30,14 +30,23 @@
// Invariant: (ImmutableSpace +) bottom() <= top() <= end() // Invariant: (ImmutableSpace +) bottom() <= top() <= end()
// top() is inclusive and end() is exclusive. // top() is inclusive and end() is exclusive.
class MutableSpaceMangler;
class MutableSpace: public ImmutableSpace { class MutableSpace: public ImmutableSpace {
friend class VMStructs; friend class VMStructs;
// Helper for mangling unused space in debug builds
MutableSpaceMangler* _mangler;
protected: protected:
HeapWord* _top; HeapWord* _top;
MutableSpaceMangler* mangler() { return _mangler; }
public: public:
virtual ~MutableSpace() {} virtual ~MutableSpace();
MutableSpace() { _top = NULL; } MutableSpace();
// Accessors // Accessors
HeapWord* top() const { return _top; } HeapWord* top() const { return _top; }
virtual void set_top(HeapWord* value) { _top = value; } virtual void set_top(HeapWord* value) { _top = value; }
@ -52,21 +61,30 @@ class MutableSpace: public ImmutableSpace {
MemRegion used_region() { return MemRegion(bottom(), top()); } MemRegion used_region() { return MemRegion(bottom(), top()); }
// Initialization // Initialization
virtual void initialize(MemRegion mr, bool clear_space); virtual void initialize(MemRegion mr,
virtual void clear(); bool clear_space,
bool mangle_space);
virtual void clear(bool mangle_space);
// Does the usual initialization but optionally resets top to bottom.
#if 0 // MANGLE_SPACE
void initialize(MemRegion mr, bool clear_space, bool reset_top);
#endif
virtual void update() { } virtual void update() { }
virtual void accumulate_statistics() { } virtual void accumulate_statistics() { }
// Overwrites the unused portion of this space. Note that some collectors // Methods used in mangling. See descriptions under SpaceMangler.
// may use this "scratch" space during collections. virtual void mangle_unused_area() PRODUCT_RETURN;
virtual void mangle_unused_area() { virtual void mangle_unused_area_complete() PRODUCT_RETURN;
mangle_region(MemRegion(_top, _end)); virtual void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
} virtual void check_mangled_unused_area_complete() PRODUCT_RETURN;
virtual void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
// Used to save the space's current top for later use during mangling.
virtual void set_top_for_allocations() PRODUCT_RETURN;
virtual void ensure_parsability() { } virtual void ensure_parsability() { }
void mangle_region(MemRegion mr) { virtual void mangle_region(MemRegion mr) PRODUCT_RETURN;
debug_only(Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord));
}
// Boolean querries. // Boolean querries.
bool is_empty() const { return used_in_words() == 0; } bool is_empty() const { return used_in_words() == 0; }

View file

@ -0,0 +1,140 @@
/*
* Copyright 2002-2005 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
# include "incls/_precompiled.incl"
# include "incls/_spaceDecorator.cpp.incl"
// Catch-all file for utility classes
#ifndef PRODUCT
// Returns true is the location q matches the mangling
// pattern.
bool SpaceMangler::is_mangled(HeapWord* q) {
// This test loses precision but is good enough
return badHeapWord == (max_juint & (uintptr_t) q->value());
}
void SpaceMangler::set_top_for_allocations(HeapWord* v) {
if (v < end()) {
assert(is_mangled(v), "The high water mark is not mangled");
}
_top_for_allocations = v;
}
// Mangle only the unused space that has not previously
// been mangled and that has not been allocated since being
// mangled.
void SpaceMangler::mangle_unused_area() {
assert(ZapUnusedHeapArea, "Mangling should not be in use");
// Mangle between top and the high water mark. Safeguard
// against the space changing since top_for_allocations was
// set.
HeapWord* mangled_end = MIN2(top_for_allocations(), end());
if (top() < mangled_end) {
MemRegion mangle_mr(top(), mangled_end);
SpaceMangler::mangle_region(mangle_mr);
// Light weight check of mangling.
check_mangled_unused_area(end());
}
// Complete check of unused area which is functional when
// DEBUG_MANGLING is defined.
check_mangled_unused_area_complete();
}
// A complete mangle is expected in the
// exceptional case where top_for_allocations is not
// properly tracking the high water mark for mangling.
// This can be the case when to-space is being used for
// scratch space during a mark-sweep-compact. See
// contribute_scratch() and PSMarkSweep::allocate_stacks().
void SpaceMangler::mangle_unused_area_complete() {
assert(ZapUnusedHeapArea, "Mangling should not be in use");
MemRegion mangle_mr(top(), end());
SpaceMangler::mangle_region(mangle_mr);
}
// Simply mangle the MemRegion mr.
void SpaceMangler::mangle_region(MemRegion mr) {
assert(ZapUnusedHeapArea, "Mangling should not be in use");
#ifdef ASSERT
if(TraceZapUnusedHeapArea) {
gclog_or_tty->print("Mangling [0x%x to 0x%x)", mr.start(), mr.end());
}
Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord);
if(TraceZapUnusedHeapArea) {
gclog_or_tty->print_cr(" done");
}
#endif
}
// Check that top, top_for_allocations and the last
// word of the space are mangled. In a tight memory
// situation even this light weight mangling could
// cause paging by touching the end of the space.
void SpaceMangler::check_mangled_unused_area(HeapWord* limit) {
if (CheckZapUnusedHeapArea) {
// This method can be called while the spaces are
// being reshaped so skip the test if the end of the
// space is beyond the specified limit;
if (end() > limit) return;
assert(top() == end() ||
(is_mangled(top())), "Top not mangled");
assert((top_for_allocations() < top()) ||
(top_for_allocations() >= end()) ||
(is_mangled(top_for_allocations())),
"Older unused not mangled");
assert(top() == end() ||
(is_mangled(end() - 1)), "End not properly mangled");
// Only does checking when DEBUG_MANGLING is defined.
check_mangled_unused_area_complete();
}
}
#undef DEBUG_MANGLING
// This should only be used while debugging the mangling
// because of the high cost of checking the completeness.
void SpaceMangler::check_mangled_unused_area_complete() {
if (CheckZapUnusedHeapArea) {
assert(ZapUnusedHeapArea, "Not mangling unused area");
#ifdef DEBUG_MANGLING
HeapWord* q = top();
HeapWord* limit = end();
bool passed = true;
while (q < limit) {
if (!is_mangled(q)) {
passed = false;
break;
}
q++;
}
assert(passed, "Mangling is not complete");
#endif
}
}
#undef DEBUG_MANGLING
#endif // not PRODUCT

View file

@ -0,0 +1,141 @@
/*
* Copyright 2002-2005 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
class SpaceDecorator: public AllStatic {
public:
// Initialization flags.
static const bool Clear = true;
static const bool DontClear = false;
static const bool Mangle = true;
static const bool DontMangle = false;
};
// Functionality for use with class Space and class MutableSpace.
// The approach taken with the mangling is to mangle all
// the space initially and then to mangle areas that have
// been allocated since the last collection. Mangling is
// done in the context of a generation and in the context
// of a space.
// The space in a generation is mangled when it is first
// initialized and when the generation grows. The spaces
// are not necessarily up-to-date when this mangling occurs
// and the method mangle_region() is used.
// After allocations have been done in a space, the space generally
// need to be remangled. Remangling is only done on the
// recently allocated regions in the space. Typically, that is
// the region between the new top and the top just before a
// garbage collection.
// An exception to the usual mangling in a space is done when the
// space is used for an extraordinary purpose. Specifically, when
// to-space is used as scratch space for a mark-sweep-compact
// collection.
// Spaces are mangled after a collection. If the generation
// grows after a collection, the added space is mangled as part of
// the growth of the generation. No additional mangling is needed when the
// spaces are resized after an expansion.
// The class SpaceMangler keeps a pointer to the top of the allocated
// area and provides the methods for doing the piece meal mangling.
// Methods for doing sparces and full checking of the mangling are
// included. The full checking is done if DEBUG_MANGLING is defined.
// GenSpaceMangler is used with the GenCollectedHeap collectors and
// MutableSpaceMangler is used with the ParallelScavengeHeap collectors.
// These subclasses abstract the differences in the types of spaces used
// by each heap.
class SpaceMangler: public CHeapObj {
friend class VMStructs;
// High water mark for allocations. Typically, the space above
// this point have been mangle previously and don't need to be
// touched again. Space belows this point has been allocated
// and remangling is needed between the current top and this
// high water mark.
HeapWord* _top_for_allocations;
HeapWord* top_for_allocations() { return _top_for_allocations; }
public:
// Setting _top_for_allocations to NULL at initialization
// makes it always below top so that mangling done as part
// of the initialize() call of a space does nothing (as it
// should since the mangling is done as part of the constructor
// for the space.
SpaceMangler() : _top_for_allocations(NULL) {}
// Methods for top and end that delegate to the specific
// space type.
virtual HeapWord* top() const = 0;
virtual HeapWord* end() const = 0;
// Return true if q matches the mangled pattern.
static bool is_mangled(HeapWord* q) PRODUCT_RETURN0;
// Used to save the an address in a space for later use during mangling.
void set_top_for_allocations(HeapWord* v);
// Overwrites the unused portion of this space.
// Mangle only the region not previously mangled [top, top_previously_mangled)
void mangle_unused_area();
// Mangle all the unused region [top, end)
void mangle_unused_area_complete();
// Do some sparse checking on the area that should have been mangled.
void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
// Do a complete check of the area that should be mangled.
void check_mangled_unused_area_complete() PRODUCT_RETURN;
// Mangle the MemRegion. This is a non-space specific mangler. It
// is used during the initial mangling of a space before the space
// is fully constructed. Also is used when a generation is expanded
// and possibly before the spaces have been reshaped to to the new
// size of the generation.
static void mangle_region(MemRegion mr);
};
class ContiguousSpace;
// For use with GenCollectedHeap's
class GenSpaceMangler: public SpaceMangler {
ContiguousSpace* _sp;
ContiguousSpace* sp() { return _sp; }
HeapWord* top() const { return _sp->top(); }
HeapWord* end() const { return _sp->end(); }
public:
GenSpaceMangler(ContiguousSpace* sp) : SpaceMangler(), _sp(sp) {}
};
// For use with ParallelScavengeHeap's.
class MutableSpaceMangler: public SpaceMangler {
MutableSpace* _sp;
MutableSpace* sp() { return _sp; }
HeapWord* top() const { return _sp->top(); }
HeapWord* end() const { return _sp->end(); }
public:
MutableSpaceMangler(MutableSpace* sp) : SpaceMangler(), _sp(sp) {}
};

View file

@ -1405,6 +1405,7 @@ defNewGeneration.cpp java.hpp
defNewGeneration.cpp oop.inline.hpp defNewGeneration.cpp oop.inline.hpp
defNewGeneration.cpp referencePolicy.hpp defNewGeneration.cpp referencePolicy.hpp
defNewGeneration.cpp space.inline.hpp defNewGeneration.cpp space.inline.hpp
defNewGeneration.cpp spaceDecorator.hpp
defNewGeneration.cpp thread_<os_family>.inline.hpp defNewGeneration.cpp thread_<os_family>.inline.hpp
defNewGeneration.hpp ageTable.hpp defNewGeneration.hpp ageTable.hpp
@ -1789,6 +1790,7 @@ generation.cpp generation.inline.hpp
generation.cpp java.hpp generation.cpp java.hpp
generation.cpp oop.hpp generation.cpp oop.hpp
generation.cpp oop.inline.hpp generation.cpp oop.inline.hpp
generation.cpp spaceDecorator.hpp
generation.cpp space.inline.hpp generation.cpp space.inline.hpp
generation.hpp allocation.hpp generation.hpp allocation.hpp
@ -3722,6 +3724,7 @@ space.cpp oop.inline2.hpp
space.cpp safepoint.hpp space.cpp safepoint.hpp
space.cpp space.hpp space.cpp space.hpp
space.cpp space.inline.hpp space.cpp space.inline.hpp
space.cpp spaceDecorator.hpp
space.cpp systemDictionary.hpp space.cpp systemDictionary.hpp
space.cpp universe.inline.hpp space.cpp universe.inline.hpp
space.cpp vmSymbols.hpp space.cpp vmSymbols.hpp
@ -3744,6 +3747,13 @@ space.inline.hpp safepoint.hpp
space.inline.hpp space.hpp space.inline.hpp space.hpp
space.inline.hpp universe.hpp space.inline.hpp universe.hpp
spaceDecorator.hpp globalDefinitions.hpp
spaceDecorator.hpp mutableSpace.hpp
spaceDecorator.hpp space.hpp
spaceDecorator.cpp copy.hpp
spaceDecorator.cpp spaceDecorator.hpp
specialized_oop_closures.cpp ostream.hpp specialized_oop_closures.cpp ostream.hpp
specialized_oop_closures.cpp specialized_oop_closures.hpp specialized_oop_closures.cpp specialized_oop_closures.hpp

View file

@ -51,6 +51,7 @@ dump.cpp oop.hpp
dump.cpp oopFactory.hpp dump.cpp oopFactory.hpp
dump.cpp resourceArea.hpp dump.cpp resourceArea.hpp
dump.cpp signature.hpp dump.cpp signature.hpp
dump.cpp spaceDecorator.hpp
dump.cpp symbolTable.hpp dump.cpp symbolTable.hpp
dump.cpp systemDictionary.hpp dump.cpp systemDictionary.hpp
dump.cpp vmThread.hpp dump.cpp vmThread.hpp

View file

@ -172,15 +172,25 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
_to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
_gen_counters); _gen_counters);
compute_space_boundaries(0); compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
update_counters(); update_counters();
_next_gen = NULL; _next_gen = NULL;
_tenuring_threshold = MaxTenuringThreshold; _tenuring_threshold = MaxTenuringThreshold;
_pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
} }
void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size) { void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); bool clear_space,
bool mangle_space) {
uintx alignment =
GenCollectedHeap::heap()->collector_policy()->min_alignment();
// If the spaces are being cleared (only done at heap initialization
// currently), the survivor spaces need not be empty.
// Otherwise, no care is taken for used areas in the survivor spaces
// so check.
assert(clear_space || (to()->is_empty() && from()->is_empty()),
"Initialization of the survivor spaces assumes these are empty");
// Compute sizes // Compute sizes
uintx size = _virtual_space.committed_size(); uintx size = _virtual_space.committed_size();
@ -214,16 +224,41 @@ void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size) {
MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
eden()->initialize(edenMR, (minimum_eden_size == 0)); // A minimum eden size implies that there is a part of eden that
// If minumum_eden_size != 0, we will not have cleared any // is being used and that affects the initialization of any
// newly formed eden.
bool live_in_eden = minimum_eden_size > 0;
// If not clearing the spaces, do some checking to verify that
// the space are already mangled.
if (!clear_space) {
// Must check mangling before the spaces are reshaped. Otherwise,
// the bottom or end of one space may have moved into another
// a failure of the check may not correctly indicate which space
// is not properly mangled.
if (ZapUnusedHeapArea) {
HeapWord* limit = (HeapWord*) _virtual_space.high();
eden()->check_mangled_unused_area(limit);
from()->check_mangled_unused_area(limit);
to()->check_mangled_unused_area(limit);
}
}
// Reset the spaces for their new regions.
eden()->initialize(edenMR,
clear_space && !live_in_eden,
SpaceDecorator::Mangle);
// If clear_space and live_in_eden, we will not have cleared any
// portion of eden above its top. This can cause newly // portion of eden above its top. This can cause newly
// expanded space not to be mangled if using ZapUnusedHeapArea. // expanded space not to be mangled if using ZapUnusedHeapArea.
// We explicitly do such mangling here. // We explicitly do such mangling here.
if (ZapUnusedHeapArea && (minimum_eden_size != 0)) { if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
eden()->mangle_unused_area(); eden()->mangle_unused_area();
} }
from()->initialize(fromMR, true); from()->initialize(fromMR, clear_space, mangle_space);
to()->initialize(toMR , true); to()->initialize(toMR, clear_space, mangle_space);
// Set next compaction spaces.
eden()->set_next_compaction_space(from()); eden()->set_next_compaction_space(from());
// The to-space is normally empty before a compaction so need // The to-space is normally empty before a compaction so need
// not be considered. The exception is during promotion // not be considered. The exception is during promotion
@ -250,7 +285,16 @@ void DefNewGeneration::swap_spaces() {
bool DefNewGeneration::expand(size_t bytes) { bool DefNewGeneration::expand(size_t bytes) {
MutexLocker x(ExpandHeap_lock); MutexLocker x(ExpandHeap_lock);
HeapWord* prev_high = (HeapWord*) _virtual_space.high();
bool success = _virtual_space.expand_by(bytes); bool success = _virtual_space.expand_by(bytes);
if (success && ZapUnusedHeapArea) {
// Mangle newly committed space immediately because it
// can be done here more simply that after the new
// spaces have been computed.
HeapWord* new_high = (HeapWord*) _virtual_space.high();
MemRegion mangle_region(prev_high, new_high);
SpaceMangler::mangle_region(mangle_region);
}
// Do not attempt an expand-to-the reserve size. The // Do not attempt an expand-to-the reserve size. The
// request should properly observe the maximum size of // request should properly observe the maximum size of
@ -262,7 +306,8 @@ bool DefNewGeneration::expand(size_t bytes) {
// value. // value.
if (GC_locker::is_active()) { if (GC_locker::is_active()) {
if (PrintGC && Verbose) { if (PrintGC && Verbose) {
gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); gclog_or_tty->print_cr("Garbage collection disabled, "
"expanded heap instead");
} }
} }
@ -326,16 +371,24 @@ void DefNewGeneration::compute_new_size() {
changed = true; changed = true;
} }
if (changed) { if (changed) {
compute_space_boundaries(eden()->used()); // The spaces have already been mangled at this point but
MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); // may not have been cleared (set top = bottom) and should be.
// Mangling was done when the heap was being expanded.
compute_space_boundaries(eden()->used(),
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
MemRegion cmr((HeapWord*)_virtual_space.low(),
(HeapWord*)_virtual_space.high());
Universe::heap()->barrier_set()->resize_covered_region(cmr); Universe::heap()->barrier_set()->resize_covered_region(cmr);
if (Verbose && PrintGC) { if (Verbose && PrintGC) {
size_t new_size_after = _virtual_space.committed_size(); size_t new_size_after = _virtual_space.committed_size();
size_t eden_size_after = eden()->capacity(); size_t eden_size_after = eden()->capacity();
size_t survivor_size_after = from()->capacity(); size_t survivor_size_after = from()->capacity();
gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
SIZE_FORMAT "K [eden="
SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
new_size_before/K, new_size_after/K, eden_size_after/K, survivor_size_after/K); new_size_before/K, new_size_after/K,
eden_size_after/K, survivor_size_after/K);
if (WizardMode) { if (WizardMode) {
gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
thread_increase_size/K, threads_count); thread_increase_size/K, threads_count);
@ -480,7 +533,7 @@ void DefNewGeneration::collect(bool full,
ScanWeakRefClosure scan_weak_ref(this); ScanWeakRefClosure scan_weak_ref(this);
age_table()->clear(); age_table()->clear();
to()->clear(); to()->clear(SpaceDecorator::Mangle);
gch->rem_set()->prepare_for_younger_refs_iterate(false); gch->rem_set()->prepare_for_younger_refs_iterate(false);
@ -525,8 +578,18 @@ void DefNewGeneration::collect(bool full,
soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, NULL); soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, NULL);
if (!promotion_failed()) { if (!promotion_failed()) {
// Swap the survivor spaces. // Swap the survivor spaces.
eden()->clear(); eden()->clear(SpaceDecorator::Mangle);
from()->clear(); from()->clear(SpaceDecorator::Mangle);
if (ZapUnusedHeapArea) {
// This is now done here because of the piece-meal mangling which
// can check for valid mangling at intermediate points in the
// collection(s). When a minor collection fails to collect
// sufficient space resizing of the young generation can occur
// an redistribute the spaces in the young generation. Mangle
// here so that unzapped regions don't get distributed to
// other spaces.
to()->mangle_unused_area();
}
swap_spaces(); swap_spaces();
assert(to()->is_empty(), "to space should be empty now"); assert(to()->is_empty(), "to space should be empty now");
@ -753,6 +816,15 @@ void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* reque
} }
} }
void DefNewGeneration::reset_scratch() {
// If contributing scratch in to_space, mangle all of
// to_space if ZapUnusedHeapArea. This is needed because
// top is not maintained while using to-space as scratch.
if (ZapUnusedHeapArea) {
to()->mangle_unused_area_complete();
}
}
bool DefNewGeneration::collection_attempt_is_safe() { bool DefNewGeneration::collection_attempt_is_safe() {
if (!to()->is_empty()) { if (!to()->is_empty()) {
return false; return false;
@ -806,11 +878,25 @@ void DefNewGeneration::gc_epilogue(bool full) {
} }
} }
if (ZapUnusedHeapArea) {
eden()->check_mangled_unused_area_complete();
from()->check_mangled_unused_area_complete();
to()->check_mangled_unused_area_complete();
}
// update the generation and space performance counters // update the generation and space performance counters
update_counters(); update_counters();
gch->collector_policy()->counters()->update_counters(); gch->collector_policy()->counters()->update_counters();
} }
void DefNewGeneration::record_spaces_top() {
assert(ZapUnusedHeapArea, "Not mangling unused space");
eden()->set_top_for_allocations();
to()->set_top_for_allocations();
from()->set_top_for_allocations();
}
void DefNewGeneration::update_counters() { void DefNewGeneration::update_counters() {
if (UsePerfData) { if (UsePerfData) {
_eden_counters->update_all(); _eden_counters->update_all();

View file

@ -279,6 +279,9 @@ protected:
virtual void gc_prologue(bool full); virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full); virtual void gc_epilogue(bool full);
// Save the tops for eden, from, and to
virtual void record_spaces_top();
// Doesn't require additional work during GC prologue and epilogue // Doesn't require additional work during GC prologue and epilogue
virtual bool performs_in_place_marking() const { return false; } virtual bool performs_in_place_marking() const { return false; }
@ -299,9 +302,12 @@ protected:
// For non-youngest collection, the DefNewGeneration can contribute // For non-youngest collection, the DefNewGeneration can contribute
// "to-space". // "to-space".
void contribute_scratch(ScratchBlock*& list, Generation* requestor, virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
size_t max_alloc_words); size_t max_alloc_words);
// Reset for contribution of "to-space".
virtual void reset_scratch();
// GC support // GC support
virtual void compute_new_size(); virtual void compute_new_size();
virtual void collect(bool full, virtual void collect(bool full,
@ -331,7 +337,12 @@ protected:
void verify(bool allow_dirty); void verify(bool allow_dirty);
protected: protected:
void compute_space_boundaries(uintx minimum_eden_size); // If clear_space is true, clear the survivor spaces. Eden is
// cleared if the minimum size of eden is 0. If mangle_space
// is true, also mangle the space in debug mode.
void compute_space_boundaries(uintx minimum_eden_size,
bool clear_space,
bool mangle_space);
// Scavenge support // Scavenge support
void swap_spaces(); void swap_spaces();
}; };

View file

@ -645,7 +645,7 @@ public:
class ClearSpaceClosure : public SpaceClosure { class ClearSpaceClosure : public SpaceClosure {
public: public:
void do_space(Space* s) { void do_space(Space* s) {
s->clear(); s->clear(SpaceDecorator::Mangle);
} }
}; };

View file

@ -465,6 +465,11 @@ void GenCollectedHeap::do_collection(bool full,
_gens[i]->stat_record()->invocations++; _gens[i]->stat_record()->invocations++;
_gens[i]->stat_record()->accumulated_time.start(); _gens[i]->stat_record()->accumulated_time.start();
// Must be done anew before each collection because
// a previous collection will do mangling and will
// change top of some spaces.
record_gen_tops_before_GC();
if (PrintGC && Verbose) { if (PrintGC && Verbose) {
gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
i, i,
@ -1058,6 +1063,12 @@ ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
return res; return res;
} }
void GenCollectedHeap::release_scratch() {
for (int i = 0; i < _n_gens; i++) {
_gens[i]->reset_scratch();
}
}
size_t GenCollectedHeap::large_typearray_limit() { size_t GenCollectedHeap::large_typearray_limit() {
return gen_policy()->large_typearray_limit(); return gen_policy()->large_typearray_limit();
} }
@ -1285,6 +1296,24 @@ void GenCollectedHeap::gc_epilogue(bool full) {
always_do_update_barrier = UseConcMarkSweepGC; always_do_update_barrier = UseConcMarkSweepGC;
}; };
#ifndef PRODUCT
class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
private:
public:
void do_generation(Generation* gen) {
gen->record_spaces_top();
}
};
void GenCollectedHeap::record_gen_tops_before_GC() {
if (ZapUnusedHeapArea) {
GenGCSaveTopsBeforeGCClosure blk;
generation_iterate(&blk, false); // not old-to-young.
perm_gen()->record_spaces_top();
}
}
#endif // not PRODUCT
class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure { class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
public: public:
void do_generation(Generation* gen) { void do_generation(Generation* gen) {

View file

@ -259,6 +259,9 @@ public:
// be provided are returned as a list of ScratchBlocks, sorted by // be provided are returned as a list of ScratchBlocks, sorted by
// decreasing size. // decreasing size.
ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words); ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
// Allow each generation to reset any scratch space that it has
// contributed as it needs.
void release_scratch();
size_t large_typearray_limit(); size_t large_typearray_limit();
@ -482,6 +485,9 @@ private:
bool should_do_concurrent_full_gc(GCCause::Cause cause); bool should_do_concurrent_full_gc(GCCause::Cause cause);
void collect_mostly_concurrent(GCCause::Cause cause); void collect_mostly_concurrent(GCCause::Cause cause);
// Save the tops of the spaces in all generations
void record_gen_tops_before_GC() PRODUCT_RETURN;
protected: protected:
virtual void gc_prologue(bool full); virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full); virtual void gc_epilogue(bool full);

View file

@ -190,6 +190,10 @@ void GenMarkSweep::allocate_stacks() {
void GenMarkSweep::deallocate_stacks() { void GenMarkSweep::deallocate_stacks() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
gch->release_scratch();
if (_preserved_oop_stack) { if (_preserved_oop_stack) {
delete _preserved_mark_stack; delete _preserved_mark_stack;
_preserved_mark_stack = NULL; _preserved_mark_stack = NULL;

View file

@ -32,6 +32,12 @@ Generation::Generation(ReservedSpace rs, size_t initial_size, int level) :
vm_exit_during_initialization("Could not reserve enough space for " vm_exit_during_initialization("Could not reserve enough space for "
"object heap"); "object heap");
} }
// Mangle all of the the initial generation.
if (ZapUnusedHeapArea) {
MemRegion mangle_region((HeapWord*)_virtual_space.low(),
(HeapWord*)_virtual_space.high());
SpaceMangler::mangle_region(mangle_region);
}
_reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(), _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(),
(HeapWord*)_virtual_space.high_boundary()); (HeapWord*)_virtual_space.high_boundary());
} }
@ -505,8 +511,11 @@ bool OneContigSpaceCardGeneration::grow_by(size_t bytes) {
_bts->resize(new_word_size); _bts->resize(new_word_size);
// Fix for bug #4668531 // Fix for bug #4668531
MemRegion mangle_region(_the_space->end(), (HeapWord*)_virtual_space.high()); if (ZapUnusedHeapArea) {
_the_space->mangle_region(mangle_region); MemRegion mangle_region(_the_space->end(),
(HeapWord*)_virtual_space.high());
SpaceMangler::mangle_region(mangle_region);
}
// Expand space -- also expands space's BOT // Expand space -- also expands space's BOT
// (which uses (part of) shared array above) // (which uses (part of) shared array above)
@ -622,6 +631,14 @@ void OneContigSpaceCardGeneration::gc_epilogue(bool full) {
// update the generation and space performance counters // update the generation and space performance counters
update_counters(); update_counters();
if (ZapUnusedHeapArea) {
the_space()->check_mangled_unused_area_complete();
}
}
void OneContigSpaceCardGeneration::record_spaces_top() {
assert(ZapUnusedHeapArea, "Not mangling unused space");
the_space()->set_top_for_allocations();
} }
void OneContigSpaceCardGeneration::verify(bool allow_dirty) { void OneContigSpaceCardGeneration::verify(bool allow_dirty) {

View file

@ -376,6 +376,9 @@ class Generation: public CHeapObj {
// The default is to do nothing. // The default is to do nothing.
virtual void gc_epilogue(bool full) {}; virtual void gc_epilogue(bool full) {};
// Save the high water marks for the used space in a generation.
virtual void record_spaces_top() {};
// Some generations may need to be "fixed-up" after some allocation // Some generations may need to be "fixed-up" after some allocation
// activity to make them parsable again. The default is to do nothing. // activity to make them parsable again. The default is to do nothing.
virtual void ensure_parsability() {}; virtual void ensure_parsability() {};
@ -476,6 +479,10 @@ class Generation: public CHeapObj {
virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor, virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
size_t max_alloc_words) {} size_t max_alloc_words) {}
// Give each generation an opportunity to do clean up for any
// contributed scratch.
virtual void reset_scratch() {};
// When an older generation has been collected, and perhaps resized, // When an older generation has been collected, and perhaps resized,
// this method will be invoked on all younger generations (from older to // this method will be invoked on all younger generations (from older to
// younger), allowing them to resize themselves as appropriate. // younger), allowing them to resize themselves as appropriate.
@ -699,6 +706,8 @@ class OneContigSpaceCardGeneration: public CardGeneration {
virtual void gc_epilogue(bool full); virtual void gc_epilogue(bool full);
virtual void record_spaces_top();
virtual void verify(bool allow_dirty); virtual void verify(bool allow_dirty);
virtual void print_on(outputStream* st) const; virtual void print_on(outputStream* st) const;
}; };

View file

@ -232,30 +232,44 @@ ContiguousSpace::new_dcto_cl(OopClosure* cl,
return new ContiguousSpaceDCTOC(this, cl, precision, boundary); return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
} }
void Space::initialize(MemRegion mr, bool clear_space) { void Space::initialize(MemRegion mr,
bool clear_space,
bool mangle_space) {
HeapWord* bottom = mr.start(); HeapWord* bottom = mr.start();
HeapWord* end = mr.end(); HeapWord* end = mr.end();
assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end), assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
"invalid space boundaries"); "invalid space boundaries");
set_bottom(bottom); set_bottom(bottom);
set_end(end); set_end(end);
if (clear_space) clear(); if (clear_space) clear(mangle_space);
} }
void Space::clear() { void Space::clear(bool mangle_space) {
if (ZapUnusedHeapArea) mangle_unused_area(); if (ZapUnusedHeapArea && mangle_space) {
mangle_unused_area();
}
} }
void ContiguousSpace::initialize(MemRegion mr, bool clear_space) ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL) {
_mangler = new GenSpaceMangler(this);
}
ContiguousSpace::~ContiguousSpace() {
delete _mangler;
}
void ContiguousSpace::initialize(MemRegion mr,
bool clear_space,
bool mangle_space)
{ {
CompactibleSpace::initialize(mr, clear_space); CompactibleSpace::initialize(mr, clear_space, mangle_space);
_concurrent_iteration_safe_limit = top(); _concurrent_iteration_safe_limit = top();
} }
void ContiguousSpace::clear() { void ContiguousSpace::clear(bool mangle_space) {
set_top(bottom()); set_top(bottom());
set_saved_mark(); set_saved_mark();
Space::clear(); Space::clear(mangle_space);
} }
bool Space::is_in(const void* p) const { bool Space::is_in(const void* p) const {
@ -271,8 +285,8 @@ bool ContiguousSpace::is_free_block(const HeapWord* p) const {
return p >= _top; return p >= _top;
} }
void OffsetTableContigSpace::clear() { void OffsetTableContigSpace::clear(bool mangle_space) {
ContiguousSpace::clear(); ContiguousSpace::clear(mangle_space);
_offsets.initialize_threshold(); _offsets.initialize_threshold();
} }
@ -288,17 +302,46 @@ void OffsetTableContigSpace::set_end(HeapWord* new_end) {
Space::set_end(new_end); Space::set_end(new_end);
} }
#ifndef PRODUCT
void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
mangler()->set_top_for_allocations(v);
}
void ContiguousSpace::set_top_for_allocations() {
mangler()->set_top_for_allocations(top());
}
void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
mangler()->check_mangled_unused_area(limit);
}
void ContiguousSpace::check_mangled_unused_area_complete() {
mangler()->check_mangled_unused_area_complete();
}
// Mangled only the unused space that has not previously
// been mangled and that has not been allocated since being
// mangled.
void ContiguousSpace::mangle_unused_area() { void ContiguousSpace::mangle_unused_area() {
// to-space is used for storing marks during mark-sweep mangler()->mangle_unused_area();
mangle_region(MemRegion(top(), end())); }
void ContiguousSpace::mangle_unused_area_complete() {
mangler()->mangle_unused_area_complete();
} }
void ContiguousSpace::mangle_region(MemRegion mr) { void ContiguousSpace::mangle_region(MemRegion mr) {
debug_only(Copy::fill_to_words(mr.start(), mr.word_size(), badHeapWord)); // Although this method uses SpaceMangler::mangle_region() which
// is not specific to a space, the when the ContiguousSpace version
// is called, it is always with regard to a space and this
// bounds checking is appropriate.
MemRegion space_mr(bottom(), end());
assert(space_mr.contains(mr), "Mangling outside space");
SpaceMangler::mangle_region(mr);
} }
#endif // NOT_PRODUCT
void CompactibleSpace::initialize(MemRegion mr, bool clear_space) { void CompactibleSpace::initialize(MemRegion mr,
Space::initialize(mr, clear_space); bool clear_space,
bool mangle_space) {
Space::initialize(mr, clear_space, mangle_space);
_compaction_top = bottom(); _compaction_top = bottom();
_next_compaction_space = NULL; _next_compaction_space = NULL;
} }
@ -820,8 +863,8 @@ void ContiguousSpace::allocate_temporary_filler(int factor) {
} }
} }
void EdenSpace::clear() { void EdenSpace::clear(bool mangle_space) {
ContiguousSpace::clear(); ContiguousSpace::clear(mangle_space);
set_soft_end(end()); set_soft_end(end());
} }
@ -878,7 +921,7 @@ OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOff
_par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true) _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
{ {
_offsets.set_contig_space(this); _offsets.set_contig_space(this);
initialize(mr, true); initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
} }

View file

@ -131,15 +131,17 @@ class Space: public CHeapObj {
return MemRegion(bottom(), saved_mark_word()); return MemRegion(bottom(), saved_mark_word());
} }
// Initialization // Initialization. These may be run to reset an existing
virtual void initialize(MemRegion mr, bool clear_space); // Space.
virtual void clear(); virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
virtual void clear(bool mangle_space);
// For detecting GC bugs. Should only be called at GC boundaries, since // For detecting GC bugs. Should only be called at GC boundaries, since
// some unused space may be used as scratch space during GC's. // some unused space may be used as scratch space during GC's.
// Default implementation does nothing. We also call this when expanding // Default implementation does nothing. We also call this when expanding
// a space to satisfy an allocation request. See bug #4668531 // a space to satisfy an allocation request. See bug #4668531
virtual void mangle_unused_area() {} virtual void mangle_unused_area() {}
virtual void mangle_unused_area_complete() {}
virtual void mangle_region(MemRegion mr) {} virtual void mangle_region(MemRegion mr) {}
// Testers // Testers
@ -354,7 +356,7 @@ private:
CompactibleSpace* _next_compaction_space; CompactibleSpace* _next_compaction_space;
public: public:
virtual void initialize(MemRegion mr, bool clear_space); virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
// Used temporarily during a compaction phase to hold the value // Used temporarily during a compaction phase to hold the value
// top should have when compaction is complete. // top should have when compaction is complete.
@ -724,12 +726,14 @@ protected:
/* continuously, but those that weren't need to have their thresholds */ \ /* continuously, but those that weren't need to have their thresholds */ \
/* re-initialized. Also mangles unused area for debugging. */ \ /* re-initialized. Also mangles unused area for debugging. */ \
if (is_empty()) { \ if (is_empty()) { \
clear(); \ clear(SpaceDecorator::Mangle); \
} else { \ } else { \
if (ZapUnusedHeapArea) mangle_unused_area(); \ if (ZapUnusedHeapArea) mangle_unused_area(); \
} \ } \
} }
class GenSpaceMangler;
// A space in which the free area is contiguous. It therefore supports // A space in which the free area is contiguous. It therefore supports
// faster allocation, and compaction. // faster allocation, and compaction.
class ContiguousSpace: public CompactibleSpace { class ContiguousSpace: public CompactibleSpace {
@ -738,13 +742,21 @@ class ContiguousSpace: public CompactibleSpace {
protected: protected:
HeapWord* _top; HeapWord* _top;
HeapWord* _concurrent_iteration_safe_limit; HeapWord* _concurrent_iteration_safe_limit;
// A helper for mangling the unused area of the space in debug builds.
GenSpaceMangler* _mangler;
GenSpaceMangler* mangler() { return _mangler; }
// Allocation helpers (return NULL if full). // Allocation helpers (return NULL if full).
inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value); inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value); inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
public: public:
virtual void initialize(MemRegion mr, bool clear_space);
ContiguousSpace();
~ContiguousSpace();
virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
// Accessors // Accessors
HeapWord* top() const { return _top; } HeapWord* top() const { return _top; }
@ -753,15 +765,34 @@ class ContiguousSpace: public CompactibleSpace {
void set_saved_mark() { _saved_mark_word = top(); } void set_saved_mark() { _saved_mark_word = top(); }
void reset_saved_mark() { _saved_mark_word = bottom(); } void reset_saved_mark() { _saved_mark_word = bottom(); }
virtual void clear(); virtual void clear(bool mangle_space);
WaterMark bottom_mark() { return WaterMark(this, bottom()); } WaterMark bottom_mark() { return WaterMark(this, bottom()); }
WaterMark top_mark() { return WaterMark(this, top()); } WaterMark top_mark() { return WaterMark(this, top()); }
WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); } WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); }
bool saved_mark_at_top() const { return saved_mark_word() == top(); } bool saved_mark_at_top() const { return saved_mark_word() == top(); }
void mangle_unused_area(); // In debug mode mangle (write it with a particular bit
void mangle_region(MemRegion mr); // pattern) the unused part of a space.
// Used to save the an address in a space for later use during mangling.
void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
// Used to save the space's current top for later use during mangling.
void set_top_for_allocations() PRODUCT_RETURN;
// Mangle regions in the space from the current top up to the
// previously mangled part of the space.
void mangle_unused_area() PRODUCT_RETURN;
// Mangle [top, end)
void mangle_unused_area_complete() PRODUCT_RETURN;
// Mangle the given MemRegion.
void mangle_region(MemRegion mr) PRODUCT_RETURN;
// Do some sparse checking on the area that should have been mangled.
void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
// Check the complete area that should have been mangled.
// This code may be NULL depending on the macro DEBUG_MANGLING.
void check_mangled_unused_area_complete() PRODUCT_RETURN;
// Size computations: sizes in bytes. // Size computations: sizes in bytes.
size_t capacity() const { return byte_size(bottom(), end()); } size_t capacity() const { return byte_size(bottom(), end()); }
@ -956,7 +987,7 @@ class EdenSpace : public ContiguousSpace {
void set_soft_end(HeapWord* value) { _soft_end = value; } void set_soft_end(HeapWord* value) { _soft_end = value; }
// Override. // Override.
void clear(); void clear(bool mangle_space);
// Set both the 'hard' and 'soft' limits (_end and _soft_end). // Set both the 'hard' and 'soft' limits (_end and _soft_end).
void set_end(HeapWord* value) { void set_end(HeapWord* value) {
@ -1000,7 +1031,7 @@ class OffsetTableContigSpace: public ContiguousSpace {
void set_bottom(HeapWord* value); void set_bottom(HeapWord* value);
void set_end(HeapWord* value); void set_end(HeapWord* value);
void clear(); void clear(bool mangle_space);
inline HeapWord* block_start(const void* p) const; inline HeapWord* block_start(const void* p) const;

View file

@ -589,9 +589,15 @@ class CommandLineFlags {
develop(bool, ZapJNIHandleArea, trueInDebug, \ develop(bool, ZapJNIHandleArea, trueInDebug, \
"Zap freed JNI handle space with 0xFEFEFEFE") \ "Zap freed JNI handle space with 0xFEFEFEFE") \
\ \
develop(bool, ZapUnusedHeapArea, false, \ develop(bool, ZapUnusedHeapArea, trueInDebug, \
"Zap unused heap space with 0xBAADBABE") \ "Zap unused heap space with 0xBAADBABE") \
\ \
develop(bool, TraceZapUnusedHeapArea, false, \
"Trace zapping of unused heap space") \
\
develop(bool, CheckZapUnusedHeapArea, false, \
"Check zapping of unused heap space") \
\
develop(bool, PrintVMMessages, true, \ develop(bool, PrintVMMessages, true, \
"Print vm messages on console") \ "Print vm messages on console") \
\ \

View file

@ -97,8 +97,12 @@ const int SerializePageShiftCount = 3;
// object size. // object size.
class HeapWord { class HeapWord {
friend class VMStructs; friend class VMStructs;
private: private:
char* i; char* i;
#ifdef ASSERT
public:
char* value() { return i; }
#endif
}; };
// HeapWordSize must be 2^LogHeapWordSize. // HeapWordSize must be 2^LogHeapWordSize.