This commit is contained in:
Jon Masamitsu 2008-12-12 15:37:46 -08:00
commit 31308ae8e4
27 changed files with 1010 additions and 398 deletions

View file

@ -2954,7 +2954,7 @@ public:
// The object has been either evacuated or is dead. Fill it with a // The object has been either evacuated or is dead. Fill it with a
// dummy object. // dummy object.
MemRegion mr((HeapWord*)obj, obj->size()); MemRegion mr((HeapWord*)obj, obj->size());
SharedHeap::fill_region_with_object(mr); CollectedHeap::fill_with_object(mr);
_cm->clearRangeBothMaps(mr); _cm->clearRangeBothMaps(mr);
} }
} }
@ -3225,7 +3225,7 @@ void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
// Otherwise, try to claim it. // Otherwise, try to claim it.
block = r->par_allocate(free_words); block = r->par_allocate(free_words);
} while (block == NULL); } while (block == NULL);
SharedHeap::fill_region_with_object(MemRegion(block, free_words)); fill_with_object(block, free_words);
} }
#define use_local_bitmaps 1 #define use_local_bitmaps 1
@ -3619,9 +3619,8 @@ public:
guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1), guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1),
"should contain whole object"); "should contain whole object");
alloc_buffer(purpose)->undo_allocation(obj, word_sz); alloc_buffer(purpose)->undo_allocation(obj, word_sz);
} } else {
else { CollectedHeap::fill_with_object(obj, word_sz);
SharedHeap::fill_region_with_object(MemRegion(obj, word_sz));
add_to_undo_waste(word_sz); add_to_undo_waste(word_sz);
} }
} }

View file

@ -102,7 +102,7 @@ HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
HeapWord* tmp = hr->allocate(sz); HeapWord* tmp = hr->allocate(sz);
assert(tmp != NULL, "Humongous allocation failure"); assert(tmp != NULL, "Humongous allocation failure");
MemRegion mr = MemRegion(tmp, sz); MemRegion mr = MemRegion(tmp, sz);
SharedHeap::fill_region_with_object(mr); CollectedHeap::fill_with_object(mr);
hr->declare_filled_region_to_BOT(mr); hr->declare_filled_region_to_BOT(mr);
if (i == first) { if (i == first) {
first_hr->set_startsHumongous(); first_hr->set_startsHumongous();

View file

@ -51,14 +51,14 @@ void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
if (_retained) { if (_retained) {
// If the buffer had been retained shorten the previous filler object. // If the buffer had been retained shorten the previous filler object.
assert(_retained_filler.end() <= _top, "INVARIANT"); assert(_retained_filler.end() <= _top, "INVARIANT");
SharedHeap::fill_region_with_object(_retained_filler); CollectedHeap::fill_with_object(_retained_filler);
// Wasted space book-keeping, otherwise (normally) done in invalidate() // Wasted space book-keeping, otherwise (normally) done in invalidate()
_wasted += _retained_filler.word_size(); _wasted += _retained_filler.word_size();
_retained = false; _retained = false;
} }
assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained."); assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained.");
if (_top < _hard_end) { if (_top < _hard_end) {
SharedHeap::fill_region_with_object(MemRegion(_top, _hard_end)); CollectedHeap::fill_with_object(_top, _hard_end);
if (!retain) { if (!retain) {
invalidate(); invalidate();
} else { } else {
@ -155,7 +155,7 @@ ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
// modifying the _next_threshold state in the BOT. // modifying the _next_threshold state in the BOT.
void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr, void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
bool contig) { bool contig) {
SharedHeap::fill_region_with_object(mr); CollectedHeap::fill_with_object(mr);
if (contig) { if (contig) {
_bt.alloc_block(mr.start(), mr.end()); _bt.alloc_block(mr.start(), mr.end());
} else { } else {
@ -171,7 +171,7 @@ HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
"or else _true_end should be equal to _hard_end"); "or else _true_end should be equal to _hard_end");
assert(_retained, "or else _true_end should be equal to _hard_end"); assert(_retained, "or else _true_end should be equal to _hard_end");
assert(_retained_filler.end() <= _top, "INVARIANT"); assert(_retained_filler.end() <= _top, "INVARIANT");
SharedHeap::fill_region_with_object(_retained_filler); CollectedHeap::fill_with_object(_retained_filler);
if (_top < _hard_end) { if (_top < _hard_end) {
fill_region_with_block(MemRegion(_top, _hard_end), true); fill_region_with_block(MemRegion(_top, _hard_end), true);
} }
@ -316,11 +316,9 @@ void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
while (_top <= chunk_boundary) { while (_top <= chunk_boundary) {
assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve, assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,
"Consequence of last card handling above."); "Consequence of last card handling above.");
MemRegion chunk_portion(chunk_boundary, _hard_end); _bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end);
_bt.BlockOffsetArray::alloc_block(chunk_portion.start(), CollectedHeap::fill_with_object(chunk_boundary, _hard_end);
chunk_portion.end()); _hard_end = chunk_boundary;
SharedHeap::fill_region_with_object(chunk_portion);
_hard_end = chunk_portion.start();
chunk_boundary -= ChunkSizeInWords; chunk_boundary -= ChunkSizeInWords;
} }
_end = _hard_end - AlignmentReserve; _end = _hard_end - AlignmentReserve;

View file

@ -201,7 +201,7 @@ void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,
"Should contain whole object."); "Should contain whole object.");
to_space_alloc_buffer()->undo_allocation(obj, word_sz); to_space_alloc_buffer()->undo_allocation(obj, word_sz);
} else { } else {
SharedHeap::fill_region_with_object(MemRegion(obj, word_sz)); CollectedHeap::fill_with_object(obj, word_sz);
} }
} }

View file

@ -389,7 +389,7 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
// full GC. // full GC.
const size_t alignment = old_gen->virtual_space()->alignment(); const size_t alignment = old_gen->virtual_space()->alignment();
const size_t eden_used = eden_space->used_in_bytes(); const size_t eden_used = eden_space->used_in_bytes();
const size_t promoted = (size_t)(size_policy->avg_promoted()->padded_average()); const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
const size_t absorb_size = align_size_up(eden_used + promoted, alignment); const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
const size_t eden_capacity = eden_space->capacity_in_bytes(); const size_t eden_capacity = eden_space->capacity_in_bytes();
@ -416,16 +416,14 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
// Fill the unused part of the old gen. // Fill the unused part of the old gen.
MutableSpace* const old_space = old_gen->object_space(); MutableSpace* const old_space = old_gen->object_space();
MemRegion old_gen_unused(old_space->top(), old_space->end()); HeapWord* const unused_start = old_space->top();
size_t const unused_words = pointer_delta(old_space->end(), unused_start);
// If the unused part of the old gen cannot be filled, skip if (unused_words > 0) {
// absorbing eden. if (unused_words < CollectedHeap::min_fill_size()) {
if (old_gen_unused.word_size() < SharedHeap::min_fill_size()) { return false; // If the old gen cannot be filled, must give up.
return false;
} }
CollectedHeap::fill_with_objects(unused_start, unused_words);
if (!old_gen_unused.is_empty()) {
SharedHeap::fill_region_with_object(old_gen_unused);
} }
// Take the live data from eden and set both top and end in the old gen to // Take the live data from eden and set both top and end in the old gen to
@ -441,9 +439,8 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
// Update the object start array for the filler object and the data from eden. // Update the object start array for the filler object and the data from eden.
ObjectStartArray* const start_array = old_gen->start_array(); ObjectStartArray* const start_array = old_gen->start_array();
HeapWord* const start = old_gen_unused.start(); for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) { start_array->allocate_block(p);
start_array->allocate_block(addr);
} }
// Could update the promoted average here, but it is not typically updated at // Could update the promoted average here, but it is not typically updated at

View file

@ -275,22 +275,9 @@ bool PSMarkSweepDecorator::insert_deadspace(size_t& allowed_deadspace_words,
HeapWord* q, size_t deadlength) { HeapWord* q, size_t deadlength) {
if (allowed_deadspace_words >= deadlength) { if (allowed_deadspace_words >= deadlength) {
allowed_deadspace_words -= deadlength; allowed_deadspace_words -= deadlength;
oop(q)->set_mark(markOopDesc::prototype()->set_marked()); CollectedHeap::fill_with_object(q, deadlength);
const size_t aligned_min_int_array_size = oop(q)->set_mark(oop(q)->mark()->set_marked());
align_object_size(typeArrayOopDesc::header_size(T_INT)); assert((int) deadlength == oop(q)->size(), "bad filler object size");
if (deadlength >= aligned_min_int_array_size) {
oop(q)->set_klass(Universe::intArrayKlassObj());
assert(((deadlength - aligned_min_int_array_size) * (HeapWordSize/sizeof(jint))) < (size_t)max_jint,
"deadspace too big for Arrayoop");
typeArrayOop(q)->set_length((int)((deadlength - aligned_min_int_array_size)
* (HeapWordSize/sizeof(jint))));
} else {
assert((int) deadlength == instanceOopDesc::header_size(),
"size for smallest fake dead object doesn't match");
oop(q)->set_klass(SystemDictionary::object_klass());
}
assert((int) deadlength == oop(q)->size(),
"make sure size for fake dead object match");
// Recall that we required "q == compaction_top". // Recall that we required "q == compaction_top".
return true; return true;
} else { } else {

View file

@ -88,6 +88,72 @@ GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops_moved_to = NULL;
GrowableArray<size_t> * PSParallelCompact::_last_gc_live_oops_size = NULL; GrowableArray<size_t> * PSParallelCompact::_last_gc_live_oops_size = NULL;
#endif #endif
void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
HeapWord* destination)
{
assert(src_region_idx != 0, "invalid src_region_idx");
assert(partial_obj_size != 0, "invalid partial_obj_size argument");
assert(destination != NULL, "invalid destination argument");
_src_region_idx = src_region_idx;
_partial_obj_size = partial_obj_size;
_destination = destination;
// These fields may not be updated below, so make sure they're clear.
assert(_dest_region_addr == NULL, "should have been cleared");
assert(_first_src_addr == NULL, "should have been cleared");
// Determine the number of destination regions for the partial object.
HeapWord* const last_word = destination + partial_obj_size - 1;
const ParallelCompactData& sd = PSParallelCompact::summary_data();
HeapWord* const beg_region_addr = sd.region_align_down(destination);
HeapWord* const end_region_addr = sd.region_align_down(last_word);
if (beg_region_addr == end_region_addr) {
// One destination region.
_destination_count = 1;
if (end_region_addr == destination) {
// The destination falls on a region boundary, thus the first word of the
// partial object will be the first word copied to the destination region.
_dest_region_addr = end_region_addr;
_first_src_addr = sd.region_to_addr(src_region_idx);
}
} else {
// Two destination regions. When copied, the partial object will cross a
// destination region boundary, so a word somewhere within the partial
// object will be the first word copied to the second destination region.
_destination_count = 2;
_dest_region_addr = end_region_addr;
const size_t ofs = pointer_delta(end_region_addr, destination);
assert(ofs < _partial_obj_size, "sanity");
_first_src_addr = sd.region_to_addr(src_region_idx) + ofs;
}
}
void SplitInfo::clear()
{
_src_region_idx = 0;
_partial_obj_size = 0;
_destination = NULL;
_destination_count = 0;
_dest_region_addr = NULL;
_first_src_addr = NULL;
assert(!is_valid(), "sanity");
}
#ifdef ASSERT
void SplitInfo::verify_clear()
{
assert(_src_region_idx == 0, "not clear");
assert(_partial_obj_size == 0, "not clear");
assert(_destination == NULL, "not clear");
assert(_destination_count == 0, "not clear");
assert(_dest_region_addr == NULL, "not clear");
assert(_first_src_addr == NULL, "not clear");
}
#endif // #ifdef ASSERT
#ifndef PRODUCT #ifndef PRODUCT
const char* PSParallelCompact::space_names[] = { const char* PSParallelCompact::space_names[] = {
"perm", "old ", "eden", "from", "to " "perm", "old ", "eden", "from", "to "
@ -416,21 +482,134 @@ ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
} }
} }
bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end, // Find the point at which a space can be split and, if necessary, record the
HeapWord* source_beg, HeapWord* source_end, // split point.
HeapWord** target_next, //
HeapWord** source_next) { // If the current src region (which overflowed the destination space) doesn't
// This is too strict. // have a partial object, the split point is at the beginning of the current src
// assert(region_offset(source_beg) == 0, "not RegionSize aligned"); // region (an "easy" split, no extra bookkeeping required).
//
// If the current src region has a partial object, the split point is in the
// region where that partial object starts (call it the split_region). If
// split_region has a partial object, then the split point is just after that
// partial object (a "hard" split where we have to record the split data and
// zero the partial_obj_size field). With a "hard" split, we know that the
// partial_obj ends within split_region because the partial object that caused
// the overflow starts in split_region. If split_region doesn't have a partial
// obj, then the split is at the beginning of split_region (another "easy"
// split).
HeapWord*
ParallelCompactData::summarize_split_space(size_t src_region,
SplitInfo& split_info,
HeapWord* destination,
HeapWord* target_end,
HeapWord** target_next)
{
assert(destination <= target_end, "sanity");
assert(destination + _region_data[src_region].data_size() > target_end,
"region should not fit into target space");
size_t split_region = src_region;
HeapWord* split_destination = destination;
size_t partial_obj_size = _region_data[src_region].partial_obj_size();
if (destination + partial_obj_size > target_end) {
// The split point is just after the partial object (if any) in the
// src_region that contains the start of the object that overflowed the
// destination space.
//
// Find the start of the "overflow" object and set split_region to the
// region containing it.
HeapWord* const overflow_obj = _region_data[src_region].partial_obj_addr();
split_region = addr_to_region_idx(overflow_obj);
// Clear the source_region field of all destination regions whose first word
// came from data after the split point (a non-null source_region field
// implies a region must be filled).
//
// An alternative to the simple loop below: clear during post_compact(),
// which uses memcpy instead of individual stores, and is easy to
// parallelize. (The downside is that it clears the entire RegionData
// object as opposed to just one field.)
//
// post_compact() would have to clear the summary data up to the highest
// address that was written during the summary phase, which would be
//
// max(top, max(new_top, clear_top))
//
// where clear_top is a new field in SpaceInfo. Would have to set clear_top
// to destination + partial_obj_size, where both have the values passed to
// this routine.
const RegionData* const sr = region(split_region);
const size_t beg_idx =
addr_to_region_idx(region_align_up(sr->destination() +
sr->partial_obj_size()));
const size_t end_idx =
addr_to_region_idx(region_align_up(destination + partial_obj_size));
if (TraceParallelOldGCSummaryPhase) { if (TraceParallelOldGCSummaryPhase) {
tty->print_cr("tb=" PTR_FORMAT " te=" PTR_FORMAT " " gclog_or_tty->print_cr("split: clearing source_region field in ["
"sb=" PTR_FORMAT " se=" PTR_FORMAT " " SIZE_FORMAT ", " SIZE_FORMAT ")",
"tn=" PTR_FORMAT " sn=" PTR_FORMAT, beg_idx, end_idx);
target_beg, target_end, }
source_beg, source_end, for (size_t idx = beg_idx; idx < end_idx; ++idx) {
target_next != 0 ? *target_next : (HeapWord*) 0, _region_data[idx].set_source_region(0);
source_next != 0 ? *source_next : (HeapWord*) 0); }
// Set split_destination and partial_obj_size to reflect the split region.
split_destination = sr->destination();
partial_obj_size = sr->partial_obj_size();
}
// The split is recorded only if a partial object extends onto the region.
if (partial_obj_size != 0) {
_region_data[split_region].set_partial_obj_size(0);
split_info.record(split_region, partial_obj_size, split_destination);
}
// Setup the continuation addresses.
*target_next = split_destination + partial_obj_size;
HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
if (TraceParallelOldGCSummaryPhase) {
const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
gclog_or_tty->print_cr("%s split: src=" PTR_FORMAT " src_c=" SIZE_FORMAT
" pos=" SIZE_FORMAT,
split_type, source_next, split_region,
partial_obj_size);
gclog_or_tty->print_cr("%s split: dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT
" tn=" PTR_FORMAT,
split_type, split_destination,
addr_to_region_idx(split_destination),
*target_next);
if (partial_obj_size != 0) {
HeapWord* const po_beg = split_info.destination();
HeapWord* const po_end = po_beg + split_info.partial_obj_size();
gclog_or_tty->print_cr("%s split: "
"po_beg=" PTR_FORMAT " " SIZE_FORMAT " "
"po_end=" PTR_FORMAT " " SIZE_FORMAT,
split_type,
po_beg, addr_to_region_idx(po_beg),
po_end, addr_to_region_idx(po_end));
}
}
return source_next;
}
bool ParallelCompactData::summarize(SplitInfo& split_info,
HeapWord* source_beg, HeapWord* source_end,
HeapWord** source_next,
HeapWord* target_beg, HeapWord* target_end,
HeapWord** target_next)
{
if (TraceParallelOldGCSummaryPhase) {
HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
tty->print_cr("sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
"tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
source_beg, source_end, source_next_val,
target_beg, target_end, *target_next);
} }
size_t cur_region = addr_to_region_idx(source_beg); size_t cur_region = addr_to_region_idx(source_beg);
@ -438,27 +617,22 @@ bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end,
HeapWord *dest_addr = target_beg; HeapWord *dest_addr = target_beg;
while (cur_region < end_region) { while (cur_region < end_region) {
size_t words = _region_data[cur_region].data_size(); // The destination must be set even if the region has no data.
#if 1
assert(pointer_delta(target_end, dest_addr) >= words,
"source region does not fit into target region");
#else
// XXX - need some work on the corner cases here. If the region does not
// fit, then must either make sure any partial_obj from the region fits, or
// "undo" the initial part of the partial_obj that is in the previous
// region.
if (dest_addr + words >= target_end) {
// Let the caller know where to continue.
*target_next = dest_addr;
*source_next = region_to_addr(cur_region);
return false;
}
#endif // #if 1
_region_data[cur_region].set_destination(dest_addr); _region_data[cur_region].set_destination(dest_addr);
// Set the destination_count for cur_region, and if necessary, update size_t words = _region_data[cur_region].data_size();
if (words > 0) {
// If cur_region does not fit entirely into the target space, find a point
// at which the source space can be 'split' so that part is copied to the
// target space and the rest is copied elsewhere.
if (dest_addr + words > target_end) {
assert(source_next != NULL, "source_next is NULL when splitting");
*source_next = summarize_split_space(cur_region, split_info, dest_addr,
target_end, target_next);
return false;
}
// Compute the destination_count for cur_region, and if necessary, update
// source_region for a destination region. The source_region field is // source_region for a destination region. The source_region field is
// updated if cur_region is the first (left-most) region to be copied to a // updated if cur_region is the first (left-most) region to be copied to a
// destination region. // destination region.
@ -467,16 +641,29 @@ bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end,
// data that compacts into itself does not count itself as a destination. // data that compacts into itself does not count itself as a destination.
// This maintains the invariant that a zero count means the region is // This maintains the invariant that a zero count means the region is
// available and can be claimed and then filled. // available and can be claimed and then filled.
if (words > 0) { uint destination_count = 0;
if (split_info.is_split(cur_region)) {
// The current region has been split: the partial object will be copied
// to one destination space and the remaining data will be copied to
// another destination space. Adjust the initial destination_count and,
// if necessary, set the source_region field if the partial object will
// cross a destination region boundary.
destination_count = split_info.destination_count();
if (destination_count == 2) {
size_t dest_idx = addr_to_region_idx(split_info.dest_region_addr());
_region_data[dest_idx].set_source_region(cur_region);
}
}
HeapWord* const last_addr = dest_addr + words - 1; HeapWord* const last_addr = dest_addr + words - 1;
const size_t dest_region_1 = addr_to_region_idx(dest_addr); const size_t dest_region_1 = addr_to_region_idx(dest_addr);
const size_t dest_region_2 = addr_to_region_idx(last_addr); const size_t dest_region_2 = addr_to_region_idx(last_addr);
#if 0
// Initially assume that the destination regions will be the same and // Initially assume that the destination regions will be the same and
// adjust the value below if necessary. Under this assumption, if // adjust the value below if necessary. Under this assumption, if
// cur_region == dest_region_2, then cur_region will be compacted // cur_region == dest_region_2, then cur_region will be compacted
// completely into itself. // completely into itself.
uint destination_count = cur_region == dest_region_2 ? 0 : 1; destination_count += cur_region == dest_region_2 ? 0 : 1;
if (dest_region_1 != dest_region_2) { if (dest_region_1 != dest_region_2) {
// Destination regions differ; adjust destination_count. // Destination regions differ; adjust destination_count.
destination_count += 1; destination_count += 1;
@ -487,25 +674,6 @@ bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end,
// region. // region.
_region_data[dest_region_1].set_source_region(cur_region); _region_data[dest_region_1].set_source_region(cur_region);
} }
#else
// Initially assume that the destination regions will be different and
// adjust the value below if necessary. Under this assumption, if
// cur_region == dest_region2, then cur_region will be compacted partially
// into dest_region_1 and partially into itself.
uint destination_count = cur_region == dest_region_2 ? 1 : 2;
if (dest_region_1 != dest_region_2) {
// Data from cur_region will be copied to the start of dest_region_2.
_region_data[dest_region_2].set_source_region(cur_region);
} else {
// Destination regions are the same; adjust destination_count.
destination_count -= 1;
if (region_offset(dest_addr) == 0) {
// Data from cur_region will be copied to the start of the destination
// region.
_region_data[dest_region_1].set_source_region(cur_region);
}
}
#endif // #if 0
_region_data[cur_region].set_destination_count(destination_count); _region_data[cur_region].set_destination_count(destination_count);
_region_data[cur_region].set_data_location(region_to_addr(cur_region)); _region_data[cur_region].set_data_location(region_to_addr(cur_region));
@ -749,6 +917,13 @@ PSParallelCompact::clear_data_covering_space(SpaceId id)
const size_t end_region = const size_t end_region =
_summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top)); _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
_summary_data.clear_range(beg_region, end_region); _summary_data.clear_range(beg_region, end_region);
// Clear the data used to 'split' regions.
SplitInfo& split_info = _space_info[id].split_info();
if (split_info.is_valid()) {
split_info.clear();
}
DEBUG_ONLY(split_info.verify_clear();)
} }
void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values) void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
@ -807,10 +982,11 @@ void PSParallelCompact::post_compact()
{ {
TraceTime tm("post compact", print_phases(), true, gclog_or_tty); TraceTime tm("post compact", print_phases(), true, gclog_or_tty);
// Clear the marking bitmap and summary data and update top() in each space.
for (unsigned int id = perm_space_id; id < last_space_id; ++id) { for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
// Clear the marking bitmap, summary data and split info.
clear_data_covering_space(SpaceId(id)); clear_data_covering_space(SpaceId(id));
_space_info[id].space()->set_top(_space_info[id].new_top()); // Update top(). Must be done after clearing the bitmap and summary data.
_space_info[id].publish_new_top();
} }
MutableSpace* const eden_space = _space_info[eden_space_id].space(); MutableSpace* const eden_space = _space_info[eden_space_id].space();
@ -1151,6 +1327,13 @@ HeapWord*
PSParallelCompact::compute_dense_prefix(const SpaceId id, PSParallelCompact::compute_dense_prefix(const SpaceId id,
bool maximum_compaction) bool maximum_compaction)
{ {
if (ParallelOldGCSplitALot) {
if (_space_info[id].dense_prefix() != _space_info[id].space()->bottom()) {
// The value was chosen to provoke splitting a young gen space; use it.
return _space_info[id].dense_prefix();
}
}
const size_t region_size = ParallelCompactData::RegionSize; const size_t region_size = ParallelCompactData::RegionSize;
const ParallelCompactData& sd = summary_data(); const ParallelCompactData& sd = summary_data();
@ -1239,14 +1422,169 @@ PSParallelCompact::compute_dense_prefix(const SpaceId id,
return sd.region_to_addr(best_cp); return sd.region_to_addr(best_cp);
} }
#ifndef PRODUCT
void
PSParallelCompact::fill_with_live_objects(SpaceId id, HeapWord* const start,
size_t words)
{
if (TraceParallelOldGCSummaryPhase) {
tty->print_cr("fill_with_live_objects [" PTR_FORMAT " " PTR_FORMAT ") "
SIZE_FORMAT, start, start + words, words);
}
ObjectStartArray* const start_array = _space_info[id].start_array();
CollectedHeap::fill_with_objects(start, words);
for (HeapWord* p = start; p < start + words; p += oop(p)->size()) {
_mark_bitmap.mark_obj(p, words);
_summary_data.add_obj(p, words);
start_array->allocate_block(p);
}
}
void
PSParallelCompact::summarize_new_objects(SpaceId id, HeapWord* start)
{
ParallelCompactData& sd = summary_data();
MutableSpace* space = _space_info[id].space();
// Find the source and destination start addresses.
HeapWord* const src_addr = sd.region_align_down(start);
HeapWord* dst_addr;
if (src_addr < start) {
dst_addr = sd.addr_to_region_ptr(src_addr)->destination();
} else if (src_addr > space->bottom()) {
// The start (the original top() value) is aligned to a region boundary so
// the associated region does not have a destination. Compute the
// destination from the previous region.
RegionData* const cp = sd.addr_to_region_ptr(src_addr) - 1;
dst_addr = cp->destination() + cp->data_size();
} else {
// Filling the entire space.
dst_addr = space->bottom();
}
assert(dst_addr != NULL, "sanity");
// Update the summary data.
bool result = _summary_data.summarize(_space_info[id].split_info(),
src_addr, space->top(), NULL,
dst_addr, space->end(),
_space_info[id].new_top_addr());
assert(result, "should not fail: bad filler object size");
}
void
PSParallelCompact::provoke_split(bool & max_compaction)
{
const size_t region_size = ParallelCompactData::RegionSize;
ParallelCompactData& sd = summary_data();
MutableSpace* const eden_space = _space_info[eden_space_id].space();
MutableSpace* const from_space = _space_info[from_space_id].space();
const size_t eden_live = pointer_delta(eden_space->top(),
_space_info[eden_space_id].new_top());
const size_t from_live = pointer_delta(from_space->top(),
_space_info[from_space_id].new_top());
const size_t min_fill_size = CollectedHeap::min_fill_size();
const size_t eden_free = pointer_delta(eden_space->end(), eden_space->top());
const size_t eden_fillable = eden_free >= min_fill_size ? eden_free : 0;
const size_t from_free = pointer_delta(from_space->end(), from_space->top());
const size_t from_fillable = from_free >= min_fill_size ? from_free : 0;
// Choose the space to split; need at least 2 regions live (or fillable).
SpaceId id;
MutableSpace* space;
size_t live_words;
size_t fill_words;
if (eden_live + eden_fillable >= region_size * 2) {
id = eden_space_id;
space = eden_space;
live_words = eden_live;
fill_words = eden_fillable;
} else if (from_live + from_fillable >= region_size * 2) {
id = from_space_id;
space = from_space;
live_words = from_live;
fill_words = from_fillable;
} else {
return; // Give up.
}
assert(fill_words == 0 || fill_words >= min_fill_size, "sanity");
if (live_words < region_size * 2) {
// Fill from top() to end() w/live objects of mixed sizes.
HeapWord* const fill_start = space->top();
live_words += fill_words;
space->set_top(fill_start + fill_words);
if (ZapUnusedHeapArea) {
space->set_top_for_allocations();
}
HeapWord* cur_addr = fill_start;
while (fill_words > 0) {
const size_t r = (size_t)os::random() % (region_size / 2) + min_fill_size;
size_t cur_size = MIN2(align_object_size_(r), fill_words);
if (fill_words - cur_size < min_fill_size) {
cur_size = fill_words; // Avoid leaving a fragment too small to fill.
}
CollectedHeap::fill_with_object(cur_addr, cur_size);
mark_bitmap()->mark_obj(cur_addr, cur_size);
sd.add_obj(cur_addr, cur_size);
cur_addr += cur_size;
fill_words -= cur_size;
}
summarize_new_objects(id, fill_start);
}
max_compaction = false;
// Manipulate the old gen so that it has room for about half of the live data
// in the target young gen space (live_words / 2).
id = old_space_id;
space = _space_info[id].space();
const size_t free_at_end = space->free_in_words();
const size_t free_target = align_object_size(live_words / 2);
const size_t dead = pointer_delta(space->top(), _space_info[id].new_top());
if (free_at_end >= free_target + min_fill_size) {
// Fill space above top() and set the dense prefix so everything survives.
HeapWord* const fill_start = space->top();
const size_t fill_size = free_at_end - free_target;
space->set_top(space->top() + fill_size);
if (ZapUnusedHeapArea) {
space->set_top_for_allocations();
}
fill_with_live_objects(id, fill_start, fill_size);
summarize_new_objects(id, fill_start);
_space_info[id].set_dense_prefix(sd.region_align_down(space->top()));
} else if (dead + free_at_end > free_target) {
// Find a dense prefix that makes the right amount of space available.
HeapWord* cur = sd.region_align_down(space->top());
HeapWord* cur_destination = sd.addr_to_region_ptr(cur)->destination();
size_t dead_to_right = pointer_delta(space->end(), cur_destination);
while (dead_to_right < free_target) {
cur -= region_size;
cur_destination = sd.addr_to_region_ptr(cur)->destination();
dead_to_right = pointer_delta(space->end(), cur_destination);
}
_space_info[id].set_dense_prefix(cur);
}
}
#endif // #ifndef PRODUCT
void PSParallelCompact::summarize_spaces_quick() void PSParallelCompact::summarize_spaces_quick()
{ {
for (unsigned int i = 0; i < last_space_id; ++i) { for (unsigned int i = 0; i < last_space_id; ++i) {
const MutableSpace* space = _space_info[i].space(); const MutableSpace* space = _space_info[i].space();
bool result = _summary_data.summarize(space->bottom(), space->end(), HeapWord** nta = _space_info[i].new_top_addr();
space->bottom(), space->top(), bool result = _summary_data.summarize(_space_info[i].split_info(),
_space_info[i].new_top_addr()); space->bottom(), space->top(), NULL,
assert(result, "should never fail"); space->bottom(), space->end(), nta);
assert(result, "space must fit into itself");
_space_info[i].set_dense_prefix(space->bottom()); _space_info[i].set_dense_prefix(space->bottom());
} }
} }
@ -1308,8 +1646,7 @@ void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
} }
#endif // #ifdef _LP64 #endif // #ifdef _LP64
MemRegion region(obj_beg, obj_len); CollectedHeap::fill_with_object(obj_beg, obj_len);
SharedHeap::fill_region_with_object(region);
_mark_bitmap.mark_obj(obj_beg, obj_len); _mark_bitmap.mark_obj(obj_beg, obj_len);
_summary_data.add_obj(obj_beg, obj_len); _summary_data.add_obj(obj_beg, obj_len);
assert(start_array(id) != NULL, "sanity"); assert(start_array(id) != NULL, "sanity");
@ -1317,12 +1654,24 @@ void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
} }
} }
void
PSParallelCompact::clear_source_region(HeapWord* beg_addr, HeapWord* end_addr)
{
RegionData* const beg_ptr = _summary_data.addr_to_region_ptr(beg_addr);
HeapWord* const end_aligned_up = _summary_data.region_align_up(end_addr);
RegionData* const end_ptr = _summary_data.addr_to_region_ptr(end_aligned_up);
for (RegionData* cur = beg_ptr; cur < end_ptr; ++cur) {
cur->set_source_region(0);
}
}
void void
PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction) PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
{ {
assert(id < last_space_id, "id out of range"); assert(id < last_space_id, "id out of range");
assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom(), assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom() ||
"should have been set in summarize_spaces_quick()"); ParallelOldGCSplitALot && id == old_space_id,
"should have been reset in summarize_spaces_quick()");
const MutableSpace* space = _space_info[id].space(); const MutableSpace* space = _space_info[id].space();
if (_space_info[id].new_top() != space->bottom()) { if (_space_info[id].new_top() != space->bottom()) {
@ -1338,21 +1687,25 @@ PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
} }
#endif // #ifndef PRODUCT #endif // #ifndef PRODUCT
// Recompute the summary data, taking into account the dense prefix. If
// every last byte will be reclaimed, then the existing summary data which
// compacts everything can be left in place.
if (!maximum_compaction && dense_prefix_end != space->bottom()) {
// If dead space crosses the dense prefix boundary, it is (at least // If dead space crosses the dense prefix boundary, it is (at least
// partially) filled with a dummy object, marked live and added to the // partially) filled with a dummy object, marked live and added to the
// summary data. This simplifies the copy/update phase and must be done // summary data. This simplifies the copy/update phase and must be done
// before the final locations of objects are determined, to prevent leaving // before the final locations of objects are determined, to prevent
// a fragment of dead space that is too small to fill with an object. // leaving a fragment of dead space that is too small to fill.
if (!maximum_compaction && dense_prefix_end != space->bottom()) {
fill_dense_prefix_end(id); fill_dense_prefix_end(id);
}
// Compute the destination of each Region, and thus each object. // Compute the destination of each Region, and thus each object.
_summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end); _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
_summary_data.summarize(dense_prefix_end, space->end(), _summary_data.summarize(_space_info[id].split_info(),
dense_prefix_end, space->top(), dense_prefix_end, space->top(), NULL,
dense_prefix_end, space->end(),
_space_info[id].new_top_addr()); _space_info[id].new_top_addr());
} }
}
if (TraceParallelOldGCSummaryPhase) { if (TraceParallelOldGCSummaryPhase) {
const size_t region_size = ParallelCompactData::RegionSize; const size_t region_size = ParallelCompactData::RegionSize;
@ -1371,6 +1724,30 @@ PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
} }
} }
#ifndef PRODUCT
void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
HeapWord* dst_beg, HeapWord* dst_end,
SpaceId src_space_id,
HeapWord* src_beg, HeapWord* src_end)
{
if (TraceParallelOldGCSummaryPhase) {
tty->print_cr("summarizing %d [%s] into %d [%s]: "
"src=" PTR_FORMAT "-" PTR_FORMAT " "
SIZE_FORMAT "-" SIZE_FORMAT " "
"dst=" PTR_FORMAT "-" PTR_FORMAT " "
SIZE_FORMAT "-" SIZE_FORMAT,
src_space_id, space_names[src_space_id],
dst_space_id, space_names[dst_space_id],
src_beg, src_end,
_summary_data.addr_to_region_idx(src_beg),
_summary_data.addr_to_region_idx(src_end),
dst_beg, dst_end,
_summary_data.addr_to_region_idx(dst_beg),
_summary_data.addr_to_region_idx(dst_end));
}
}
#endif // #ifndef PRODUCT
void PSParallelCompact::summary_phase(ParCompactionManager* cm, void PSParallelCompact::summary_phase(ParCompactionManager* cm,
bool maximum_compaction) bool maximum_compaction)
{ {
@ -1403,57 +1780,98 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
// The amount of live data that will end up in old space (assuming it fits). // The amount of live data that will end up in old space (assuming it fits).
size_t old_space_total_live = 0; size_t old_space_total_live = 0;
unsigned int id; assert(perm_space_id < old_space_id, "should not count perm data here");
for (id = old_space_id; id < last_space_id; ++id) { for (unsigned int id = old_space_id; id < last_space_id; ++id) {
old_space_total_live += pointer_delta(_space_info[id].new_top(), old_space_total_live += pointer_delta(_space_info[id].new_top(),
_space_info[id].space()->bottom()); _space_info[id].space()->bottom());
} }
const MutableSpace* old_space = _space_info[old_space_id].space(); MutableSpace* const old_space = _space_info[old_space_id].space();
if (old_space_total_live > old_space->capacity_in_words()) { const size_t old_capacity = old_space->capacity_in_words();
if (old_space_total_live > old_capacity) {
// XXX - should also try to expand // XXX - should also try to expand
maximum_compaction = true; maximum_compaction = true;
} else if (!UseParallelOldGCDensePrefix) {
maximum_compaction = true;
} }
#ifndef PRODUCT
if (ParallelOldGCSplitALot && old_space_total_live < old_capacity) {
if (total_invocations() % ParallelOldGCSplitInterval == 0) {
provoke_split(maximum_compaction);
}
}
#endif // #ifndef PRODUCT
// Permanent and Old generations. // Permanent and Old generations.
summarize_space(perm_space_id, maximum_compaction); summarize_space(perm_space_id, maximum_compaction);
summarize_space(old_space_id, maximum_compaction); summarize_space(old_space_id, maximum_compaction);
// Summarize the remaining spaces (those in the young gen) into old space. If // Summarize the remaining spaces in the young gen. The initial target space
// the live data from a space doesn't fit, the existing summarization is left // is the old gen. If a space does not fit entirely into the target, then the
// intact, so the data is compacted down within the space itself. // remainder is compacted into the space itself and that space becomes the new
HeapWord** new_top_addr = _space_info[old_space_id].new_top_addr(); // target.
HeapWord* const target_space_end = old_space->end(); SpaceId dst_space_id = old_space_id;
for (id = eden_space_id; id < last_space_id; ++id) { HeapWord* dst_space_end = old_space->end();
HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
const MutableSpace* space = _space_info[id].space(); const MutableSpace* space = _space_info[id].space();
const size_t live = pointer_delta(_space_info[id].new_top(), const size_t live = pointer_delta(_space_info[id].new_top(),
space->bottom()); space->bottom());
const size_t available = pointer_delta(target_space_end, *new_top_addr); const size_t available = pointer_delta(dst_space_end, *new_top_addr);
NOT_PRODUCT(summary_phase_msg(dst_space_id, *new_top_addr, dst_space_end,
SpaceId(id), space->bottom(), space->top());)
if (live > 0 && live <= available) { if (live > 0 && live <= available) {
// All the live data will fit. // All the live data will fit.
if (TraceParallelOldGCSummaryPhase) { bool done = _summary_data.summarize(_space_info[id].split_info(),
tty->print_cr("summarizing %d into old_space @ " PTR_FORMAT,
id, *new_top_addr);
}
_summary_data.summarize(*new_top_addr, target_space_end,
space->bottom(), space->top(), space->bottom(), space->top(),
NULL,
*new_top_addr, dst_space_end,
new_top_addr); new_top_addr);
assert(done, "space must fit into old gen");
// XXX - this is necessary because decrement_destination_counts() tests
// source_region() to determine if a region will be filled. Probably
// better to pass src_space->new_top() into decrement_destination_counts
// and test that instead.
//
// Clear the source_region field for each region in the space. // Clear the source_region field for each region in the space.
HeapWord* const new_top = _space_info[id].new_top(); clear_source_region(space->bottom(), _space_info[id].new_top());
HeapWord* const clear_end = _summary_data.region_align_up(new_top);
RegionData* beg_region =
_summary_data.addr_to_region_ptr(space->bottom());
RegionData* end_region = _summary_data.addr_to_region_ptr(clear_end);
while (beg_region < end_region) {
beg_region->set_source_region(0);
++beg_region;
}
// Reset the new_top value for the space. // Reset the new_top value for the space.
_space_info[id].set_new_top(space->bottom()); _space_info[id].set_new_top(space->bottom());
} else if (live > 0) {
// Attempt to fit part of the source space into the target space.
HeapWord* next_src_addr = NULL;
bool done = _summary_data.summarize(_space_info[id].split_info(),
space->bottom(), space->top(),
&next_src_addr,
*new_top_addr, dst_space_end,
new_top_addr);
assert(!done, "space should not fit into old gen");
assert(next_src_addr != NULL, "sanity");
// The source space becomes the new target, so the remainder is compacted
// within the space itself.
dst_space_id = SpaceId(id);
dst_space_end = space->end();
new_top_addr = _space_info[id].new_top_addr();
HeapWord* const clear_end = _space_info[id].new_top();
NOT_PRODUCT(summary_phase_msg(dst_space_id,
space->bottom(), dst_space_end,
SpaceId(id), next_src_addr, space->top());)
done = _summary_data.summarize(_space_info[id].split_info(),
next_src_addr, space->top(),
NULL,
space->bottom(), dst_space_end,
new_top_addr);
assert(done, "space must fit when compacted into itself");
assert(*new_top_addr <= space->top(), "usage should not grow");
// XXX - this should go away. See comments above.
//
// Clear the source_region field in regions at the end of the space that
// will not be filled.
HeapWord* const clear_beg = _summary_data.region_align_up(*new_top_addr);
clear_source_region(clear_beg, clear_end);
} }
} }
@ -1807,9 +2225,14 @@ bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_po
// Fill the unused part of the old gen. // Fill the unused part of the old gen.
MutableSpace* const old_space = old_gen->object_space(); MutableSpace* const old_space = old_gen->object_space();
MemRegion old_gen_unused(old_space->top(), old_space->end()); HeapWord* const unused_start = old_space->top();
if (!old_gen_unused.is_empty()) { size_t const unused_words = pointer_delta(old_space->end(), unused_start);
SharedHeap::fill_region_with_object(old_gen_unused);
if (unused_words > 0) {
if (unused_words < CollectedHeap::min_fill_size()) {
return false; // If the old gen cannot be filled, must give up.
}
CollectedHeap::fill_with_objects(unused_start, unused_words);
} }
// Take the live data from eden and set both top and end in the old gen to // Take the live data from eden and set both top and end in the old gen to
@ -1825,9 +2248,8 @@ bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_po
// Update the object start array for the filler object and the data from eden. // Update the object start array for the filler object and the data from eden.
ObjectStartArray* const start_array = old_gen->start_array(); ObjectStartArray* const start_array = old_gen->start_array();
HeapWord* const start = old_gen_unused.start(); for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) { start_array->allocate_block(p);
start_array->allocate_block(addr);
} }
// Could update the promoted average here, but it is not typically updated at // Could update the promoted average here, but it is not typically updated at
@ -2048,14 +2470,13 @@ void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
// regions in the dense prefix. Assume that 1 gc thread // regions in the dense prefix. Assume that 1 gc thread
// will work on opening the gaps and the remaining gc threads // will work on opening the gaps and the remaining gc threads
// will work on the dense prefix. // will work on the dense prefix.
SpaceId space_id = old_space_id; unsigned int space_id;
while (space_id != last_space_id) { for (space_id = old_space_id; space_id < last_space_id; ++ space_id) {
HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix(); HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
const MutableSpace* const space = _space_info[space_id].space(); const MutableSpace* const space = _space_info[space_id].space();
if (dense_prefix_end == space->bottom()) { if (dense_prefix_end == space->bottom()) {
// There is no dense prefix for this space. // There is no dense prefix for this space.
space_id = next_compaction_space_id(space_id);
continue; continue;
} }
@ -2105,8 +2526,7 @@ void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
// region_index_end is not processed // region_index_end is not processed
size_t region_index_end = MIN2(region_index_start + regions_per_thread, size_t region_index_end = MIN2(region_index_start + regions_per_thread,
region_index_end_dense_prefix); region_index_end_dense_prefix);
q->enqueue(new UpdateDensePrefixTask( q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
space_id,
region_index_start, region_index_start,
region_index_end)); region_index_end));
region_index_start = region_index_end; region_index_start = region_index_end;
@ -2115,13 +2535,11 @@ void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
// This gets any part of the dense prefix that did not // This gets any part of the dense prefix that did not
// fit evenly. // fit evenly.
if (region_index_start < region_index_end_dense_prefix) { if (region_index_start < region_index_end_dense_prefix) {
q->enqueue(new UpdateDensePrefixTask( q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
space_id,
region_index_start, region_index_start,
region_index_end_dense_prefix)); region_index_end_dense_prefix));
} }
space_id = next_compaction_space_id(space_id); }
} // End tasks for dense prefix
} }
void PSParallelCompact::enqueue_region_stealing_tasks( void PSParallelCompact::enqueue_region_stealing_tasks(
@ -2567,16 +2985,24 @@ PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
return m->bit_to_addr(cur_beg); return m->bit_to_addr(cur_beg);
} }
HeapWord* HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
PSParallelCompact::first_src_addr(HeapWord* const dest_addr, SpaceId src_space_id,
size_t src_region_idx) size_t src_region_idx)
{ {
ParMarkBitMap* const bitmap = mark_bitmap(); assert(summary_data().is_region_aligned(dest_addr), "not aligned");
const SplitInfo& split_info = _space_info[src_space_id].split_info();
if (split_info.dest_region_addr() == dest_addr) {
// The partial object ending at the split point contains the first word to
// be copied to dest_addr.
return split_info.first_src_addr();
}
const ParallelCompactData& sd = summary_data(); const ParallelCompactData& sd = summary_data();
ParMarkBitMap* const bitmap = mark_bitmap();
const size_t RegionSize = ParallelCompactData::RegionSize; const size_t RegionSize = ParallelCompactData::RegionSize;
assert(sd.is_region_aligned(dest_addr), "not aligned"); assert(sd.is_region_aligned(dest_addr), "not aligned");
const RegionData* const src_region_ptr = sd.region(src_region_idx); const RegionData* const src_region_ptr = sd.region(src_region_idx);
const size_t partial_obj_size = src_region_ptr->partial_obj_size(); const size_t partial_obj_size = src_region_ptr->partial_obj_size();
HeapWord* const src_region_destination = src_region_ptr->destination(); HeapWord* const src_region_destination = src_region_ptr->destination();
@ -2737,7 +3163,7 @@ void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
HeapWord* src_space_top = _space_info[src_space_id].space()->top(); HeapWord* src_space_top = _space_info[src_space_id].space()->top();
MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words); MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
closure.set_source(first_src_addr(dest_addr, src_region_idx)); closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
// Adjust src_region_idx to prepare for decrementing destination counts (the // Adjust src_region_idx to prepare for decrementing destination counts (the
// destination count is not decremented when a region is copied to itself). // destination count is not decremented when a region is copied to itself).
@ -3008,34 +3434,3 @@ void PSParallelCompact::compact_prologue() {
summary_data().calc_new_pointer(Universe::intArrayKlassObj()); summary_data().calc_new_pointer(Universe::intArrayKlassObj());
} }
// The initial implementation of this method created a field
// _next_compaction_space_id in SpaceInfo and initialized
// that field in SpaceInfo::initialize_space_info(). That
// required that _next_compaction_space_id be declared a
// SpaceId in SpaceInfo and that would have required that
// either SpaceId be declared in a separate class or that
// it be declared in SpaceInfo. It didn't seem consistent
// to declare it in SpaceInfo (didn't really fit logically).
// Alternatively, defining a separate class to define SpaceId
// seem excessive. This implementation is simple and localizes
// the knowledge.
PSParallelCompact::SpaceId
PSParallelCompact::next_compaction_space_id(SpaceId id) {
assert(id < last_space_id, "id out of range");
switch (id) {
case perm_space_id :
return last_space_id;
case old_space_id :
return eden_space_id;
case eden_space_id :
return from_space_id;
case from_space_id :
return to_space_id;
case to_space_id :
return last_space_id;
default:
assert(false, "Bad space id");
return last_space_id;
}
}

View file

@ -36,6 +36,123 @@ class PreGCValues;
class MoveAndUpdateClosure; class MoveAndUpdateClosure;
class RefProcTaskExecutor; class RefProcTaskExecutor;
// The SplitInfo class holds the information needed to 'split' a source region
// so that the live data can be copied to two destination *spaces*. Normally,
// all the live data in a region is copied to a single destination space (e.g.,
// everything live in a region in eden is copied entirely into the old gen).
// However, when the heap is nearly full, all the live data in eden may not fit
// into the old gen. Copying only some of the regions from eden to old gen
// requires finding a region that does not contain a partial object (i.e., no
// live object crosses the region boundary) somewhere near the last object that
// does fit into the old gen. Since it's not always possible to find such a
// region, splitting is necessary for predictable behavior.
//
// A region is always split at the end of the partial object. This avoids
// additional tests when calculating the new location of a pointer, which is a
// very hot code path. The partial object and everything to its left will be
// copied to another space (call it dest_space_1). The live data to the right
// of the partial object will be copied either within the space itself, or to a
// different destination space (distinct from dest_space_1).
//
// Split points are identified during the summary phase, when region
// destinations are computed: data about the split, including the
// partial_object_size, is recorded in a SplitInfo record and the
// partial_object_size field in the summary data is set to zero. The zeroing is
// possible (and necessary) since the partial object will move to a different
// destination space than anything to its right, thus the partial object should
// not affect the locations of any objects to its right.
//
// The recorded data is used during the compaction phase, but only rarely: when
// the partial object on the split region will be copied across a destination
// region boundary. This test is made once each time a region is filled, and is
// a simple address comparison, so the overhead is negligible (see
// PSParallelCompact::first_src_addr()).
//
// Notes:
//
// Only regions with partial objects are split; a region without a partial
// object does not need any extra bookkeeping.
//
// At most one region is split per space, so the amount of data required is
// constant.
//
// A region is split only when the destination space would overflow. Once that
// happens, the destination space is abandoned and no other data (even from
// other source spaces) is targeted to that destination space. Abandoning the
// destination space may leave a somewhat large unused area at the end, if a
// large object caused the overflow.
//
// Future work:
//
// More bookkeeping would be required to continue to use the destination space.
// The most general solution would allow data from regions in two different
// source spaces to be "joined" in a single destination region. At the very
// least, additional code would be required in next_src_region() to detect the
// join and skip to an out-of-order source region. If the join region was also
// the last destination region to which a split region was copied (the most
// likely case), then additional work would be needed to get fill_region() to
// stop iteration and switch to a new source region at the right point. Basic
// idea would be to use a fake value for the top of the source space. It is
// doable, if a bit tricky.
//
// A simpler (but less general) solution would fill the remainder of the
// destination region with a dummy object and continue filling the next
// destination region.
class SplitInfo
{
public:
// Return true if this split info is valid (i.e., if a split has been
// recorded). The very first region cannot have a partial object and thus is
// never split, so 0 is the 'invalid' value.
bool is_valid() const { return _src_region_idx > 0; }
// Return true if this split holds data for the specified source region.
inline bool is_split(size_t source_region) const;
// The index of the split region, the size of the partial object on that
// region and the destination of the partial object.
size_t src_region_idx() const { return _src_region_idx; }
size_t partial_obj_size() const { return _partial_obj_size; }
HeapWord* destination() const { return _destination; }
// The destination count of the partial object referenced by this split
// (either 1 or 2). This must be added to the destination count of the
// remainder of the source region.
unsigned int destination_count() const { return _destination_count; }
// If a word within the partial object will be written to the first word of a
// destination region, this is the address of the destination region;
// otherwise this is NULL.
HeapWord* dest_region_addr() const { return _dest_region_addr; }
// If a word within the partial object will be written to the first word of a
// destination region, this is the address of that word within the partial
// object; otherwise this is NULL.
HeapWord* first_src_addr() const { return _first_src_addr; }
// Record the data necessary to split the region src_region_idx.
void record(size_t src_region_idx, size_t partial_obj_size,
HeapWord* destination);
void clear();
DEBUG_ONLY(void verify_clear();)
private:
size_t _src_region_idx;
size_t _partial_obj_size;
HeapWord* _destination;
unsigned int _destination_count;
HeapWord* _dest_region_addr;
HeapWord* _first_src_addr;
};
inline bool SplitInfo::is_split(size_t region_idx) const
{
return _src_region_idx == region_idx && is_valid();
}
class SpaceInfo class SpaceInfo
{ {
public: public:
@ -58,18 +175,23 @@ class SpaceInfo
// is no start array. // is no start array.
ObjectStartArray* start_array() const { return _start_array; } ObjectStartArray* start_array() const { return _start_array; }
SplitInfo& split_info() { return _split_info; }
void set_space(MutableSpace* s) { _space = s; } void set_space(MutableSpace* s) { _space = s; }
void set_new_top(HeapWord* addr) { _new_top = addr; } void set_new_top(HeapWord* addr) { _new_top = addr; }
void set_min_dense_prefix(HeapWord* addr) { _min_dense_prefix = addr; } void set_min_dense_prefix(HeapWord* addr) { _min_dense_prefix = addr; }
void set_dense_prefix(HeapWord* addr) { _dense_prefix = addr; } void set_dense_prefix(HeapWord* addr) { _dense_prefix = addr; }
void set_start_array(ObjectStartArray* s) { _start_array = s; } void set_start_array(ObjectStartArray* s) { _start_array = s; }
void publish_new_top() const { _space->set_top(_new_top); }
private: private:
MutableSpace* _space; MutableSpace* _space;
HeapWord* _new_top; HeapWord* _new_top;
HeapWord* _min_dense_prefix; HeapWord* _min_dense_prefix;
HeapWord* _dense_prefix; HeapWord* _dense_prefix;
ObjectStartArray* _start_array; ObjectStartArray* _start_array;
SplitInfo _split_info;
}; };
class ParallelCompactData class ParallelCompactData
@ -230,9 +352,14 @@ public:
// must be region-aligned; end need not be. // must be region-aligned; end need not be.
void summarize_dense_prefix(HeapWord* beg, HeapWord* end); void summarize_dense_prefix(HeapWord* beg, HeapWord* end);
bool summarize(HeapWord* target_beg, HeapWord* target_end, HeapWord* summarize_split_space(size_t src_region, SplitInfo& split_info,
HeapWord* destination, HeapWord* target_end,
HeapWord** target_next);
bool summarize(SplitInfo& split_info,
HeapWord* source_beg, HeapWord* source_end, HeapWord* source_beg, HeapWord* source_end,
HeapWord** target_next, HeapWord** source_next = 0); HeapWord** source_next,
HeapWord* target_beg, HeapWord* target_end,
HeapWord** target_next);
void clear(); void clear();
void clear_range(size_t beg_region, size_t end_region); void clear_range(size_t beg_region, size_t end_region);
@ -838,13 +965,27 @@ class PSParallelCompact : AllStatic {
// non-empty. // non-empty.
static void fill_dense_prefix_end(SpaceId id); static void fill_dense_prefix_end(SpaceId id);
// Clear the summary data source_region field for the specified addresses.
static void clear_source_region(HeapWord* beg_addr, HeapWord* end_addr);
#ifndef PRODUCT
// Routines to provoke splitting a young gen space (ParallelOldGCSplitALot).
// Fill the region [start, start + words) with live object(s). Only usable
// for the old and permanent generations.
static void fill_with_live_objects(SpaceId id, HeapWord* const start,
size_t words);
// Include the new objects in the summary data.
static void summarize_new_objects(SpaceId id, HeapWord* start);
// Add live objects and/or choose the dense prefix to provoke splitting.
static void provoke_split(bool & maximum_compaction);
#endif
static void summarize_spaces_quick(); static void summarize_spaces_quick();
static void summarize_space(SpaceId id, bool maximum_compaction); static void summarize_space(SpaceId id, bool maximum_compaction);
static void summary_phase(ParCompactionManager* cm, bool maximum_compaction); static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
// The space that is compacted after space_id.
static SpaceId next_compaction_space_id(SpaceId space_id);
// Adjust addresses in roots. Does not adjust addresses in heap. // Adjust addresses in roots. Does not adjust addresses in heap.
static void adjust_roots(); static void adjust_roots();
@ -999,6 +1140,7 @@ class PSParallelCompact : AllStatic {
// Return the address of the word to be copied to dest_addr, which must be // Return the address of the word to be copied to dest_addr, which must be
// aligned to a region boundary. // aligned to a region boundary.
static HeapWord* first_src_addr(HeapWord* const dest_addr, static HeapWord* first_src_addr(HeapWord* const dest_addr,
SpaceId src_space_id,
size_t src_region_idx); size_t src_region_idx);
// Determine the next source region, set closure.source() to the start of the // Determine the next source region, set closure.source() to the start of the
@ -1081,6 +1223,10 @@ class PSParallelCompact : AllStatic {
const SpaceId id, const SpaceId id,
const bool maximum_compaction, const bool maximum_compaction,
HeapWord* const addr); HeapWord* const addr);
static void summary_phase_msg(SpaceId dst_space_id,
HeapWord* dst_beg, HeapWord* dst_end,
SpaceId src_space_id,
HeapWord* src_beg, HeapWord* src_end);
#endif // #ifndef PRODUCT #endif // #ifndef PRODUCT
#ifdef ASSERT #ifdef ASSERT
@ -1324,31 +1470,28 @@ inline void UpdateOnlyClosure::do_addr(HeapWord* addr)
oop(addr)->update_contents(compaction_manager()); oop(addr)->update_contents(compaction_manager());
} }
class FillClosure: public ParMarkBitMapClosure { class FillClosure: public ParMarkBitMapClosure
{
public: public:
FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) : FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm), ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
_space_id(space_id), _start_array(PSParallelCompact::start_array(space_id))
_start_array(PSParallelCompact::start_array(space_id)) { {
assert(_space_id == PSParallelCompact::perm_space_id || assert(space_id == PSParallelCompact::perm_space_id ||
_space_id == PSParallelCompact::old_space_id, space_id == PSParallelCompact::old_space_id,
"cannot use FillClosure in the young gen"); "cannot use FillClosure in the young gen");
assert(bitmap() != NULL, "need a bitmap");
assert(_start_array != NULL, "need a start array");
}
void fill_region(HeapWord* addr, size_t size) {
MemRegion region(addr, size);
SharedHeap::fill_region_with_object(region);
_start_array->allocate_block(addr);
} }
virtual IterationStatus do_addr(HeapWord* addr, size_t size) { virtual IterationStatus do_addr(HeapWord* addr, size_t size) {
fill_region(addr, size); CollectedHeap::fill_with_objects(addr, size);
HeapWord* const end = addr + size;
do {
_start_array->allocate_block(addr);
addr += oop(addr)->size();
} while (addr < end);
return ParMarkBitMap::incomplete; return ParMarkBitMap::incomplete;
} }
private: private:
const PSParallelCompact::SpaceId _space_id;
ObjectStartArray* const _start_array; ObjectStartArray* const _start_array;
}; };

View file

@ -499,26 +499,15 @@ oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
// We lost, someone else "owns" this object // We lost, someone else "owns" this object
guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed."); guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
// Unallocate the space used. NOTE! We may have directly allocated // Try to deallocate the space. If it was directly allocated we cannot
// the object. If so, we cannot deallocate it, so we have to test! // deallocate it, so we have to test. If the deallocation fails,
// overwrite with a filler object.
if (new_obj_is_tenured) { if (new_obj_is_tenured) {
if (!_old_lab.unallocate_object(new_obj)) { if (!_old_lab.unallocate_object(new_obj)) {
// The promotion lab failed to unallocate the object. CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
// We need to overwrite the object with a filler that
// contains no interior pointers.
MemRegion mr((HeapWord*)new_obj, new_obj_size);
// Clean this up and move to oopFactory (see bug 4718422)
SharedHeap::fill_region_with_object(mr);
}
} else {
if (!_young_lab.unallocate_object(new_obj)) {
// The promotion lab failed to unallocate the object.
// We need to overwrite the object with a filler that
// contains no interior pointers.
MemRegion mr((HeapWord*)new_obj, new_obj_size);
// Clean this up and move to oopFactory (see bug 4718422)
SharedHeap::fill_region_with_object(mr);
} }
} else if (!_young_lab.unallocate_object(new_obj)) {
CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
} }
// don't update this before the unallocation! // don't update this before the unallocation!

View file

@ -76,8 +76,8 @@ void MutableNUMASpace::ensure_parsability() {
MutableSpace *s = ls->space(); MutableSpace *s = ls->space();
if (s->top() < top()) { // For all spaces preceeding the one containing top() if (s->top() < top()) { // For all spaces preceeding the one containing top()
if (s->free_in_words() > 0) { if (s->free_in_words() > 0) {
SharedHeap::fill_region_with_object(MemRegion(s->top(), s->end()));
size_t area_touched_words = pointer_delta(s->end(), s->top()); size_t area_touched_words = pointer_delta(s->end(), s->top());
CollectedHeap::fill_with_object(s->top(), area_touched_words);
#ifndef ASSERT #ifndef ASSERT
if (!ZapUnusedHeapArea) { if (!ZapUnusedHeapArea) {
area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)), area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
@ -686,11 +686,11 @@ void MutableNUMASpace::set_top(HeapWord* value) {
// a minimal object; assuming that's not the last chunk in which case we don't care. // a minimal object; assuming that's not the last chunk in which case we don't care.
if (i < lgrp_spaces()->length() - 1) { if (i < lgrp_spaces()->length() - 1) {
size_t remainder = pointer_delta(s->end(), value); size_t remainder = pointer_delta(s->end(), value);
const size_t minimal_object_size = oopDesc::header_size(); const size_t min_fill_size = CollectedHeap::min_fill_size();
if (remainder < minimal_object_size && remainder > 0) { if (remainder < min_fill_size && remainder > 0) {
// Add a filler object of a minimal size, it will cross the chunk boundary. // Add a minimum size filler object; it will cross the chunk boundary.
SharedHeap::fill_region_with_object(MemRegion(value, minimal_object_size)); CollectedHeap::fill_with_object(value, min_fill_size);
value += minimal_object_size; value += min_fill_size;
assert(!s->contains(value), "Should be in the next chunk"); assert(!s->contains(value), "Should be in the next chunk");
// Restart the loop from the same chunk, since the value has moved // Restart the loop from the same chunk, since the value has moved
// to the next one. // to the next one.

View file

@ -30,12 +30,21 @@
int CollectedHeap::_fire_out_of_memory_count = 0; int CollectedHeap::_fire_out_of_memory_count = 0;
#endif #endif
size_t CollectedHeap::_filler_array_max_size = 0;
// Memory state functions. // Memory state functions.
CollectedHeap::CollectedHeap() : CollectedHeap::CollectedHeap()
_reserved(), _barrier_set(NULL), _is_gc_active(false), {
_total_collections(0), _total_full_collections(0), const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
_gc_cause(GCCause::_no_gc), _gc_lastcause(GCCause::_no_gc) { const size_t elements_per_word = HeapWordSize / sizeof(jint);
_filler_array_max_size = align_object_size(filler_array_hdr_size() +
max_len * elements_per_word);
_barrier_set = NULL;
_is_gc_active = false;
_total_collections = _total_full_collections = 0;
_gc_cause = _gc_lastcause = GCCause::_no_gc;
NOT_PRODUCT(_promotion_failure_alot_count = 0;) NOT_PRODUCT(_promotion_failure_alot_count = 0;)
NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
@ -128,6 +137,95 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
return obj; return obj;
} }
size_t CollectedHeap::filler_array_hdr_size() {
return size_t(arrayOopDesc::header_size(T_INT));
}
size_t CollectedHeap::filler_array_min_size() {
return align_object_size(filler_array_hdr_size());
}
size_t CollectedHeap::filler_array_max_size() {
return _filler_array_max_size;
}
#ifdef ASSERT
void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
{
assert(words >= min_fill_size(), "too small to fill");
assert(words % MinObjAlignment == 0, "unaligned size");
assert(Universe::heap()->is_in_reserved(start), "not in heap");
assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
}
void CollectedHeap::zap_filler_array(HeapWord* start, size_t words)
{
if (ZapFillerObjects) {
Copy::fill_to_words(start + filler_array_hdr_size(),
words - filler_array_hdr_size(), 0XDEAFBABE);
}
}
#endif // ASSERT
void
CollectedHeap::fill_with_array(HeapWord* start, size_t words)
{
assert(words >= filler_array_min_size(), "too small for an array");
assert(words <= filler_array_max_size(), "too big for a single object");
const size_t payload_size = words - filler_array_hdr_size();
const size_t len = payload_size * HeapWordSize / sizeof(jint);
// Set the length first for concurrent GC.
((arrayOop)start)->set_length((int)len);
post_allocation_setup_common(Universe::fillerArrayKlassObj(), start,
words);
DEBUG_ONLY(zap_filler_array(start, words);)
}
void
CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words)
{
assert(words <= filler_array_max_size(), "too big for a single object");
if (words >= filler_array_min_size()) {
fill_with_array(start, words);
} else if (words > 0) {
assert(words == min_fill_size(), "unaligned size");
post_allocation_setup_common(SystemDictionary::object_klass(), start,
words);
}
}
void CollectedHeap::fill_with_object(HeapWord* start, size_t words)
{
DEBUG_ONLY(fill_args_check(start, words);)
HandleMark hm; // Free handles before leaving.
fill_with_object_impl(start, words);
}
void CollectedHeap::fill_with_objects(HeapWord* start, size_t words)
{
DEBUG_ONLY(fill_args_check(start, words);)
HandleMark hm; // Free handles before leaving.
#ifdef LP64
// A single array can fill ~8G, so multiple objects are needed only in 64-bit.
// First fill with arrays, ensuring that any remaining space is big enough to
// fill. The remainder is filled with a single object.
const size_t min = min_fill_size();
const size_t max = filler_array_max_size();
while (words > max) {
const size_t cur = words - max >= min ? max : max - min;
fill_with_array(start, cur);
start += cur;
words -= cur;
}
#endif
fill_with_object_impl(start, words);
}
oop CollectedHeap::new_store_barrier(oop new_obj) { oop CollectedHeap::new_store_barrier(oop new_obj) {
// %%% This needs refactoring. (It was imported from the server compiler.) // %%% This needs refactoring. (It was imported from the server compiler.)
guarantee(can_elide_tlab_store_barriers(), "store barrier elision not supported"); guarantee(can_elide_tlab_store_barriers(), "store barrier elision not supported");

View file

@ -47,6 +47,9 @@ class CollectedHeap : public CHeapObj {
static int _fire_out_of_memory_count; static int _fire_out_of_memory_count;
#endif #endif
// Used for filler objects (static, but initialized in ctor).
static size_t _filler_array_max_size;
protected: protected:
MemRegion _reserved; MemRegion _reserved;
BarrierSet* _barrier_set; BarrierSet* _barrier_set;
@ -119,6 +122,21 @@ class CollectedHeap : public CHeapObj {
// Clears an allocated object. // Clears an allocated object.
inline static void init_obj(HeapWord* obj, size_t size); inline static void init_obj(HeapWord* obj, size_t size);
// Filler object utilities.
static inline size_t filler_array_hdr_size();
static inline size_t filler_array_min_size();
static inline size_t filler_array_max_size();
DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words);)
// Fill with a single array; caller must ensure filler_array_min_size() <=
// words <= filler_array_max_size().
static inline void fill_with_array(HeapWord* start, size_t words);
// Fill with a single object (either an int array or a java.lang.Object).
static inline void fill_with_object_impl(HeapWord* start, size_t words);
// Verification functions // Verification functions
virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size) virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
PRODUCT_RETURN; PRODUCT_RETURN;
@ -294,6 +312,27 @@ class CollectedHeap : public CHeapObj {
// The boundary between a "large" and "small" array of primitives, in words. // The boundary between a "large" and "small" array of primitives, in words.
virtual size_t large_typearray_limit() = 0; virtual size_t large_typearray_limit() = 0;
// Utilities for turning raw memory into filler objects.
//
// min_fill_size() is the smallest region that can be filled.
// fill_with_objects() can fill arbitrary-sized regions of the heap using
// multiple objects. fill_with_object() is for regions known to be smaller
// than the largest array of integers; it uses a single object to fill the
// region and has slightly less overhead.
static size_t min_fill_size() {
return size_t(align_object_size(oopDesc::header_size()));
}
static void fill_with_objects(HeapWord* start, size_t words);
static void fill_with_object(HeapWord* start, size_t words);
static void fill_with_object(MemRegion region) {
fill_with_object(region.start(), region.word_size());
}
static void fill_with_object(HeapWord* start, HeapWord* end) {
fill_with_object(start, pointer_delta(end, start));
}
// Some heaps may offer a contiguous region for shared non-blocking // Some heaps may offer a contiguous region for shared non-blocking
// allocation, via inlined code (by exporting the address of the top and // allocation, via inlined code (by exporting the address of the top and
// end fields defining the extent of the contiguous allocation region.) // end fields defining the extent of the contiguous allocation region.)

View file

@ -34,7 +34,6 @@ void CollectedHeap::post_allocation_setup_common(KlassHandle klass,
void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass, void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass,
HeapWord* objPtr, HeapWord* objPtr,
size_t size) { size_t size) {
oop obj = (oop)objPtr; oop obj = (oop)objPtr;
assert(obj != NULL, "NULL object pointer"); assert(obj != NULL, "NULL object pointer");
@ -44,9 +43,6 @@ void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass,
// May be bootstrapping // May be bootstrapping
obj->set_mark(markOopDesc::prototype()); obj->set_mark(markOopDesc::prototype());
} }
// support low memory notifications (no-op if not enabled)
LowMemoryDetector::detect_low_memory_for_collected_pools();
} }
void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass, void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
@ -65,6 +61,9 @@ void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
// Support for jvmti and dtrace // Support for jvmti and dtrace
inline void post_allocation_notify(KlassHandle klass, oop obj) { inline void post_allocation_notify(KlassHandle klass, oop obj) {
// support low memory notifications (no-op if not enabled)
LowMemoryDetector::detect_low_memory_for_collected_pools();
// support for JVMTI VMObjectAlloc event (no-op if not enabled) // support for JVMTI VMObjectAlloc event (no-op if not enabled)
JvmtiExport::vm_object_alloc_event_collector(obj); JvmtiExport::vm_object_alloc_event_collector(obj);

View file

@ -28,6 +28,7 @@ collectedHeap.cpp collectedHeap.hpp
collectedHeap.cpp collectedHeap.inline.hpp collectedHeap.cpp collectedHeap.inline.hpp
collectedHeap.cpp init.hpp collectedHeap.cpp init.hpp
collectedHeap.cpp oop.inline.hpp collectedHeap.cpp oop.inline.hpp
collectedHeap.cpp systemDictionary.hpp
collectedHeap.cpp thread_<os_family>.inline.hpp collectedHeap.cpp thread_<os_family>.inline.hpp
collectedHeap.hpp allocation.hpp collectedHeap.hpp allocation.hpp

View file

@ -26,20 +26,24 @@
#include "incls/_permGen.cpp.incl" #include "incls/_permGen.cpp.incl"
HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) { HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) {
MutexLocker ml(Heap_lock);
GCCause::Cause next_cause = GCCause::_permanent_generation_full; GCCause::Cause next_cause = GCCause::_permanent_generation_full;
GCCause::Cause prev_cause = GCCause::_no_gc; GCCause::Cause prev_cause = GCCause::_no_gc;
unsigned int gc_count_before, full_gc_count_before;
HeapWord* obj;
for (;;) { for (;;) {
HeapWord* obj = gen->allocate(size, false); {
if (obj != NULL) { MutexLocker ml(Heap_lock);
if ((obj = gen->allocate(size, false)) != NULL) {
return obj; return obj;
} }
if (gen->capacity() < _capacity_expansion_limit || if (gen->capacity() < _capacity_expansion_limit ||
prev_cause != GCCause::_no_gc) { prev_cause != GCCause::_no_gc) {
obj = gen->expand_and_allocate(size, false); obj = gen->expand_and_allocate(size, false);
} }
if (obj == NULL && prev_cause != GCCause::_last_ditch_collection) { if (obj != NULL || prev_cause == GCCause::_last_ditch_collection) {
return obj;
}
if (GC_locker::is_active_and_needs_gc()) { if (GC_locker::is_active_and_needs_gc()) {
// If this thread is not in a jni critical section, we stall // If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and // the requestor until the critical section has cleared and
@ -61,12 +65,12 @@ HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) {
return NULL; return NULL;
} }
} }
// Read the GC count while holding the Heap_lock // Read the GC count while holding the Heap_lock
unsigned int gc_count_before = SharedHeap::heap()->total_collections(); gc_count_before = SharedHeap::heap()->total_collections();
unsigned int full_gc_count_before = SharedHeap::heap()->total_full_collections(); full_gc_count_before = SharedHeap::heap()->total_full_collections();
{ }
MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
// Give up heap lock above, VMThread::execute below gets it back
VM_GenCollectForPermanentAllocation op(size, gc_count_before, full_gc_count_before, VM_GenCollectForPermanentAllocation op(size, gc_count_before, full_gc_count_before,
next_cause); next_cause);
VMThread::execute(&op); VMThread::execute(&op);
@ -80,12 +84,8 @@ HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) {
if (obj != NULL) { if (obj != NULL) {
return obj; return obj;
} }
}
prev_cause = next_cause; prev_cause = next_cause;
next_cause = GCCause::_last_ditch_collection; next_cause = GCCause::_last_ditch_collection;
} else {
return obj;
}
} }
} }

View file

@ -248,46 +248,6 @@ void SharedHeap::ref_processing_init() {
perm_gen()->ref_processor_init(); perm_gen()->ref_processor_init();
} }
void SharedHeap::fill_region_with_object(MemRegion mr) {
// Disable the posting of JVMTI VMObjectAlloc events as we
// don't want the filling of tlabs with filler arrays to be
// reported to the profiler.
NoJvmtiVMObjectAllocMark njm;
// Disable low memory detector because there is no real allocation.
LowMemoryDetectorDisabler lmd_dis;
// It turns out that post_allocation_setup_array takes a handle, so the
// call below contains an implicit conversion. Best to free that handle
// as soon as possible.
HandleMark hm;
size_t word_size = mr.word_size();
size_t aligned_array_header_size =
align_object_size(typeArrayOopDesc::header_size(T_INT));
if (word_size >= aligned_array_header_size) {
const size_t array_length =
pointer_delta(mr.end(), mr.start()) -
typeArrayOopDesc::header_size(T_INT);
const size_t array_length_words =
array_length * (HeapWordSize/sizeof(jint));
post_allocation_setup_array(Universe::intArrayKlassObj(),
mr.start(),
mr.word_size(),
(int)array_length_words);
#ifdef ASSERT
HeapWord* elt_words = (mr.start() + typeArrayOopDesc::header_size(T_INT));
Copy::fill_to_words(elt_words, array_length, 0xDEAFBABE);
#endif
} else {
assert(word_size == (size_t)oopDesc::header_size(), "Unaligned?");
post_allocation_setup_obj(SystemDictionary::object_klass(),
mr.start(),
mr.word_size());
}
}
// Some utilities. // Some utilities.
void SharedHeap::print_size_transition(outputStream* out, void SharedHeap::print_size_transition(outputStream* out,
size_t bytes_before, size_t bytes_before,

View file

@ -108,14 +108,6 @@ public:
void set_perm(PermGen* perm_gen) { _perm_gen = perm_gen; } void set_perm(PermGen* perm_gen) { _perm_gen = perm_gen; }
// A helper function that fills a region of the heap with
// with a single object.
static void fill_region_with_object(MemRegion mr);
// Minimum garbage fill object size
static size_t min_fill_size() { return (size_t)align_object_size(oopDesc::header_size()); }
static size_t min_fill_size_in_bytes() { return min_fill_size() * HeapWordSize; }
// This function returns the "GenRemSet" object that allows us to scan // This function returns the "GenRemSet" object that allows us to scan
// generations; at least the perm gen, possibly more in a fully // generations; at least the perm gen, possibly more in a fully
// generational heap. // generational heap.

View file

@ -409,19 +409,9 @@ bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
HeapWord* q, size_t deadlength) { HeapWord* q, size_t deadlength) {
if (allowed_deadspace_words >= deadlength) { if (allowed_deadspace_words >= deadlength) {
allowed_deadspace_words -= deadlength; allowed_deadspace_words -= deadlength;
oop(q)->set_mark(markOopDesc::prototype()->set_marked()); CollectedHeap::fill_with_object(q, deadlength);
const size_t min_int_array_size = typeArrayOopDesc::header_size(T_INT); oop(q)->set_mark(oop(q)->mark()->set_marked());
if (deadlength >= min_int_array_size) { assert((int) deadlength == oop(q)->size(), "bad filler object size");
oop(q)->set_klass(Universe::intArrayKlassObj());
typeArrayOop(q)->set_length((int)((deadlength - min_int_array_size)
* (HeapWordSize/sizeof(jint))));
} else {
assert((int) deadlength == instanceOopDesc::header_size(),
"size for smallest fake dead object doesn't match");
oop(q)->set_klass(SystemDictionary::object_klass());
}
assert((int) deadlength == oop(q)->size(),
"make sure size for fake dead object match");
// Recall that we required "q == compaction_top". // Recall that we required "q == compaction_top".
return true; return true;
} else { } else {

View file

@ -387,7 +387,7 @@ void TenuredGeneration::par_promote_alloc_undo(int thread_num,
"should contain whole object"); "should contain whole object");
buf->undo_allocation(obj, word_sz); buf->undo_allocation(obj, word_sz);
} else { } else {
SharedHeap::fill_region_with_object(MemRegion(obj, word_sz)); CollectedHeap::fill_with_object(obj, word_sz);
} }
} }

View file

@ -100,8 +100,7 @@ void ThreadLocalAllocBuffer::accumulate_statistics() {
void ThreadLocalAllocBuffer::make_parsable(bool retire) { void ThreadLocalAllocBuffer::make_parsable(bool retire) {
if (end() != NULL) { if (end() != NULL) {
invariants(); invariants();
MemRegion mr(top(), hard_end()); CollectedHeap::fill_with_object(top(), hard_end());
SharedHeap::fill_region_with_object(mr);
if (retire || ZeroTLAB) { // "Reset" the TLAB if (retire || ZeroTLAB) { // "Reset" the TLAB
set_start(NULL); set_start(NULL);

View file

@ -49,6 +49,7 @@ klassOop Universe::_constantPoolKlassObj = NULL;
klassOop Universe::_constantPoolCacheKlassObj = NULL; klassOop Universe::_constantPoolCacheKlassObj = NULL;
klassOop Universe::_compiledICHolderKlassObj = NULL; klassOop Universe::_compiledICHolderKlassObj = NULL;
klassOop Universe::_systemObjArrayKlassObj = NULL; klassOop Universe::_systemObjArrayKlassObj = NULL;
klassOop Universe::_fillerArrayKlassObj = NULL;
oop Universe::_int_mirror = NULL; oop Universe::_int_mirror = NULL;
oop Universe::_float_mirror = NULL; oop Universe::_float_mirror = NULL;
oop Universe::_double_mirror = NULL; oop Universe::_double_mirror = NULL;
@ -126,6 +127,7 @@ void Universe::system_classes_do(void f(klassOop)) {
f(instanceKlassKlassObj()); f(instanceKlassKlassObj());
f(constantPoolKlassObj()); f(constantPoolKlassObj());
f(systemObjArrayKlassObj()); f(systemObjArrayKlassObj());
f(fillerArrayKlassObj());
} }
void Universe::oops_do(OopClosure* f, bool do_all) { void Universe::oops_do(OopClosure* f, bool do_all) {
@ -180,6 +182,7 @@ void Universe::oops_do(OopClosure* f, bool do_all) {
f->do_oop((oop*)&_constantPoolCacheKlassObj); f->do_oop((oop*)&_constantPoolCacheKlassObj);
f->do_oop((oop*)&_compiledICHolderKlassObj); f->do_oop((oop*)&_compiledICHolderKlassObj);
f->do_oop((oop*)&_systemObjArrayKlassObj); f->do_oop((oop*)&_systemObjArrayKlassObj);
f->do_oop((oop*)&_fillerArrayKlassObj);
f->do_oop((oop*)&_the_empty_byte_array); f->do_oop((oop*)&_the_empty_byte_array);
f->do_oop((oop*)&_the_empty_short_array); f->do_oop((oop*)&_the_empty_short_array);
f->do_oop((oop*)&_the_empty_int_array); f->do_oop((oop*)&_the_empty_int_array);
@ -265,6 +268,7 @@ void Universe::genesis(TRAPS) {
_compiledICHolderKlassObj = compiledICHolderKlass::create_klass(CHECK); _compiledICHolderKlassObj = compiledICHolderKlass::create_klass(CHECK);
_systemObjArrayKlassObj = objArrayKlassKlass::cast(objArrayKlassKlassObj())->allocate_system_objArray_klass(CHECK); _systemObjArrayKlassObj = objArrayKlassKlass::cast(objArrayKlassKlassObj())->allocate_system_objArray_klass(CHECK);
_fillerArrayKlassObj = typeArrayKlass::create_klass(T_INT, sizeof(jint), "<filler>", CHECK);
_the_empty_byte_array = oopFactory::new_permanent_byteArray(0, CHECK); _the_empty_byte_array = oopFactory::new_permanent_byteArray(0, CHECK);
_the_empty_short_array = oopFactory::new_permanent_shortArray(0, CHECK); _the_empty_short_array = oopFactory::new_permanent_shortArray(0, CHECK);
@ -274,7 +278,6 @@ void Universe::genesis(TRAPS) {
_the_array_interfaces_array = oopFactory::new_system_objArray(2, CHECK); _the_array_interfaces_array = oopFactory::new_system_objArray(2, CHECK);
_vm_exception = oopFactory::new_symbol("vm exception holder", CHECK); _vm_exception = oopFactory::new_symbol("vm exception holder", CHECK);
} else { } else {
FileMapInfo *mapinfo = FileMapInfo::current_info(); FileMapInfo *mapinfo = FileMapInfo::current_info();
char* buffer = mapinfo->region_base(CompactingPermGenGen::md); char* buffer = mapinfo->region_base(CompactingPermGenGen::md);
void** vtbl_list = (void**)buffer; void** vtbl_list = (void**)buffer;

View file

@ -92,6 +92,7 @@ class LatestMethodOopCache : public CommonMethodOopCache {
class Universe: AllStatic { class Universe: AllStatic {
// Ugh. Universe is much too friendly.
friend class MarkSweep; friend class MarkSweep;
friend class oopDesc; friend class oopDesc;
friend class ClassLoader; friend class ClassLoader;
@ -132,6 +133,7 @@ class Universe: AllStatic {
static klassOop _constantPoolCacheKlassObj; static klassOop _constantPoolCacheKlassObj;
static klassOop _compiledICHolderKlassObj; static klassOop _compiledICHolderKlassObj;
static klassOop _systemObjArrayKlassObj; static klassOop _systemObjArrayKlassObj;
static klassOop _fillerArrayKlassObj;
// Known objects in the VM // Known objects in the VM
@ -264,6 +266,7 @@ class Universe: AllStatic {
static klassOop constantPoolCacheKlassObj() { return _constantPoolCacheKlassObj; } static klassOop constantPoolCacheKlassObj() { return _constantPoolCacheKlassObj; }
static klassOop compiledICHolderKlassObj() { return _compiledICHolderKlassObj; } static klassOop compiledICHolderKlassObj() { return _compiledICHolderKlassObj; }
static klassOop systemObjArrayKlassObj() { return _systemObjArrayKlassObj; } static klassOop systemObjArrayKlassObj() { return _systemObjArrayKlassObj; }
static klassOop fillerArrayKlassObj() { return _fillerArrayKlassObj; }
// Known objects in tbe VM // Known objects in tbe VM
static oop int_mirror() { return check_mirror(_int_mirror); static oop int_mirror() { return check_mirror(_int_mirror);

View file

@ -96,19 +96,20 @@ class arrayOopDesc : public oopDesc {
: typesize_in_bytes/HeapWordSize); : typesize_in_bytes/HeapWordSize);
} }
// This method returns the maximum length that can passed into // Return the maximum length of an array of BasicType. The length can passed
// typeArrayOop::object_size(scale, length, header_size) without causing an // to typeArrayOop::object_size(scale, length, header_size) without causing an
// overflow. We substract an extra 2*wordSize to guard against double word // overflow.
// alignments. It gets the scale from the type2aelembytes array.
static int32_t max_array_length(BasicType type) { static int32_t max_array_length(BasicType type) {
assert(type >= 0 && type < T_CONFLICT, "wrong type"); assert(type >= 0 && type < T_CONFLICT, "wrong type");
assert(type2aelembytes(type) != 0, "wrong type"); assert(type2aelembytes(type) != 0, "wrong type");
// We use max_jint, since object_size is internally represented by an 'int' const int bytes_per_element = type2aelembytes(type);
// This gives us an upper bound of max_jint words for the size of the oop. if (bytes_per_element < HeapWordSize) {
int32_t max_words = (max_jint - header_size(type) - 2); return max_jint;
int elembytes = type2aelembytes(type);
jlong len = ((jlong)max_words * HeapWordSize) / elembytes;
return (len > max_jint) ? max_jint : (int32_t)len;
} }
const int32_t max_words = align_size_down(max_jint, MinObjAlignment);
const int32_t max_element_words = max_words - header_size(type);
const int32_t words_per_element = bytes_per_element >> LogHeapWordSize;
return max_element_words / words_per_element;
}
}; };

View file

@ -36,13 +36,14 @@ bool typeArrayKlass::compute_is_subtype_of(klassOop k) {
return element_type() == tak->element_type(); return element_type() == tak->element_type();
} }
klassOop typeArrayKlass::create_klass(BasicType type, int scale, TRAPS) { klassOop typeArrayKlass::create_klass(BasicType type, int scale,
const char* name_str, TRAPS) {
typeArrayKlass o; typeArrayKlass o;
symbolHandle sym(symbolOop(NULL)); symbolHandle sym(symbolOop(NULL));
// bootstrapping: don't create sym if symbolKlass not created yet // bootstrapping: don't create sym if symbolKlass not created yet
if (Universe::symbolKlassObj() != NULL) { if (Universe::symbolKlassObj() != NULL && name_str != NULL) {
sym = oopFactory::new_symbol_handle(external_name(type), CHECK_NULL); sym = oopFactory::new_symbol_handle(name_str, CHECK_NULL);
} }
KlassHandle klassklass (THREAD, Universe::typeArrayKlassKlassObj()); KlassHandle klassklass (THREAD, Universe::typeArrayKlassKlassObj());

View file

@ -39,7 +39,11 @@ class typeArrayKlass : public arrayKlass {
// klass allocation // klass allocation
DEFINE_ALLOCATE_PERMANENT(typeArrayKlass); DEFINE_ALLOCATE_PERMANENT(typeArrayKlass);
static klassOop create_klass(BasicType type, int scale, TRAPS); static klassOop create_klass(BasicType type, int scale, const char* name_str,
TRAPS);
static inline klassOop create_klass(BasicType type, int scale, TRAPS) {
return create_klass(type, scale, external_name(type), CHECK_NULL);
}
int oop_size(oop obj) const; int oop_size(oop obj) const;
int klass_oop_size() const { return object_size(); } int klass_oop_size() const { return object_size(); }

View file

@ -1517,6 +1517,16 @@ bool Arguments::check_vm_args_consistency() {
MarkSweepAlwaysCompactCount = 1; // Move objects every gc. MarkSweepAlwaysCompactCount = 1; // Move objects every gc.
} }
if (UseParallelOldGC && ParallelOldGCSplitALot) {
// Settings to encourage splitting.
if (!FLAG_IS_CMDLINE(NewRatio)) {
FLAG_SET_CMDLINE(intx, NewRatio, 2);
}
if (!FLAG_IS_CMDLINE(ScavengeBeforeFullGC)) {
FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false);
}
}
status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit"); status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit");
status = status && verify_percentage(GCTimeLimit, "GCTimeLimit"); status = status && verify_percentage(GCTimeLimit, "GCTimeLimit");
if (GCTimeLimit == 100) { if (GCTimeLimit == 100) {

View file

@ -625,6 +625,9 @@ class CommandLineFlags {
develop(bool, CheckZapUnusedHeapArea, false, \ develop(bool, CheckZapUnusedHeapArea, false, \
"Check zapping of unused heap space") \ "Check zapping of unused heap space") \
\ \
develop(bool, ZapFillerObjects, trueInDebug, \
"Zap filler objects with 0xDEAFBABE") \
\
develop(bool, PrintVMMessages, true, \ develop(bool, PrintVMMessages, true, \
"Print vm messages on console") \ "Print vm messages on console") \
\ \
@ -1200,11 +1203,12 @@ class CommandLineFlags {
product(uintx, ParallelCMSThreads, 0, \ product(uintx, ParallelCMSThreads, 0, \
"Max number of threads CMS will use for concurrent work") \ "Max number of threads CMS will use for concurrent work") \
\ \
develop(bool, ParallelOldMTUnsafeMarkBitMap, false, \ develop(bool, ParallelOldGCSplitALot, false, \
"Use the Parallel Old MT unsafe in marking the bitmap") \ "Provoke splitting (copying data from a young gen space to" \
"multiple destination spaces)") \
\ \
develop(bool, ParallelOldMTUnsafeUpdateLiveData, false, \ develop(uintx, ParallelOldGCSplitInterval, 3, \
"Use the Parallel Old MT unsafe in update of live size") \ "How often to provoke splitting a young gen space") \
\ \
develop(bool, TraceRegionTasksQueuing, false, \ develop(bool, TraceRegionTasksQueuing, false, \
"Trace the queuing of the region tasks") \ "Trace the queuing of the region tasks") \