8139867: Change how startsHumongous and continuesHumongous regions work in G1

Reviewed-by: tschatzl, tbenson
This commit is contained in:
David Lindholm 2015-11-09 09:19:39 +01:00
parent fbac3a147e
commit 6bda88594d
26 changed files with 192 additions and 463 deletions

View file

@ -802,12 +802,8 @@ class CheckBitmapClearHRClosure : public HeapRegionClosure {
// This closure can be called concurrently to the mutator, so we must make sure // This closure can be called concurrently to the mutator, so we must make sure
// that the result of the getNextMarkedWordAddress() call is compared to the // that the result of the getNextMarkedWordAddress() call is compared to the
// value passed to it as limit to detect any found bits. // value passed to it as limit to detect any found bits.
// We can use the region's orig_end() for the limit and the comparison value // end never changes in G1.
// as it always contains the "real" end of the region that never changes and HeapWord* end = r->end();
// has no side effects.
// Due to the latter, there can also be no problem with the compiler generating
// reloads of the orig_end() call.
HeapWord* end = r->orig_end();
return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
} }
}; };
@ -821,9 +817,7 @@ bool ConcurrentMark::nextMarkBitmapIsClear() {
class NoteStartOfMarkHRClosure: public HeapRegionClosure { class NoteStartOfMarkHRClosure: public HeapRegionClosure {
public: public:
bool doHeapRegion(HeapRegion* r) { bool doHeapRegion(HeapRegion* r) {
if (!r->is_continues_humongous()) {
r->note_start_of_marking(); r->note_start_of_marking();
}
return false; return false;
} }
}; };
@ -1282,22 +1276,10 @@ protected:
// Takes a region that's not empty (i.e., it has at least one // Takes a region that's not empty (i.e., it has at least one
// live object in it and sets its corresponding bit on the region // live object in it and sets its corresponding bit on the region
// bitmap to 1. If the region is "starts humongous" it will also set // bitmap to 1.
// to 1 the bits on the region bitmap that correspond to its
// associated "continues humongous" regions.
void set_bit_for_region(HeapRegion* hr) { void set_bit_for_region(HeapRegion* hr) {
assert(!hr->is_continues_humongous(), "should have filtered those out");
BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
if (!hr->is_starts_humongous()) {
// Normal (non-humongous) case: just set the bit.
_region_bm->par_at_put(index, true); _region_bm->par_at_put(index, true);
} else {
// Starts humongous case: calculate how many regions are part of
// this humongous region and then set the bit range.
BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
_region_bm->par_at_put_range(index, end_index, true);
}
} }
public: public:
@ -1321,18 +1303,6 @@ public:
_bm(bm), _region_marked_bytes(0) { } _bm(bm), _region_marked_bytes(0) { }
bool doHeapRegion(HeapRegion* hr) { bool doHeapRegion(HeapRegion* hr) {
if (hr->is_continues_humongous()) {
// We will ignore these here and process them when their
// associated "starts humongous" region is processed (see
// set_bit_for_heap_region()). Note that we cannot rely on their
// associated "starts humongous" region to have their bit set to
// 1 since, due to the region chunking in the parallel region
// iteration, a "continues humongous" region might be visited
// before its associated "starts humongous".
return false;
}
HeapWord* ntams = hr->next_top_at_mark_start(); HeapWord* ntams = hr->next_top_at_mark_start();
HeapWord* start = hr->bottom(); HeapWord* start = hr->bottom();
@ -1370,6 +1340,11 @@ public:
// Add the size of this object to the number of marked bytes. // Add the size of this object to the number of marked bytes.
marked_bytes += (size_t)obj_sz * HeapWordSize; marked_bytes += (size_t)obj_sz * HeapWordSize;
// This will happen if we are handling a humongous object that spans
// several heap regions.
if (obj_end > hr->end()) {
break;
}
// Find the next marked object after this one. // Find the next marked object after this one.
start = _bm->getNextMarkedWordAddress(obj_end, ntams); start = _bm->getNextMarkedWordAddress(obj_end, ntams);
} }
@ -1442,17 +1417,6 @@ public:
int failures() const { return _failures; } int failures() const { return _failures; }
bool doHeapRegion(HeapRegion* hr) { bool doHeapRegion(HeapRegion* hr) {
if (hr->is_continues_humongous()) {
// We will ignore these here and process them when their
// associated "starts humongous" region is processed (see
// set_bit_for_heap_region()). Note that we cannot rely on their
// associated "starts humongous" region to have their bit set to
// 1 since, due to the region chunking in the parallel region
// iteration, a "continues humongous" region might be visited
// before its associated "starts humongous".
return false;
}
int failures = 0; int failures = 0;
// Call the CalcLiveObjectsClosure to walk the marking bitmap for // Call the CalcLiveObjectsClosure to walk the marking bitmap for
@ -1465,11 +1429,26 @@ public:
size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
size_t act_marked_bytes = hr->next_marked_bytes(); size_t act_marked_bytes = hr->next_marked_bytes();
if (exp_marked_bytes > act_marked_bytes) {
if (hr->is_starts_humongous()) {
// For start_humongous regions, the size of the whole object will be
// in exp_marked_bytes.
HeapRegion* region = hr;
int num_regions;
for (num_regions = 0; region != NULL; num_regions++) {
region = _g1h->next_region_in_humongous(region);
}
if ((num_regions-1) * HeapRegion::GrainBytes >= exp_marked_bytes) {
failures += 1;
} else if (num_regions * HeapRegion::GrainBytes < exp_marked_bytes) {
failures += 1;
}
} else {
// We're not OK if expected marked bytes > actual marked bytes. It means // We're not OK if expected marked bytes > actual marked bytes. It means
// we have missed accounting some objects during the actual marking. // we have missed accounting some objects during the actual marking.
if (exp_marked_bytes > act_marked_bytes) {
failures += 1; failures += 1;
} }
}
// Verify the bit, for this region, in the actual and expected // Verify the bit, for this region, in the actual and expected
// (which was just calculated) region bit maps. // (which was just calculated) region bit maps.
@ -1569,18 +1548,6 @@ class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
CMCountDataClosureBase(g1h, region_bm, card_bm) { } CMCountDataClosureBase(g1h, region_bm, card_bm) { }
bool doHeapRegion(HeapRegion* hr) { bool doHeapRegion(HeapRegion* hr) {
if (hr->is_continues_humongous()) {
// We will ignore these here and process them when their
// associated "starts humongous" region is processed (see
// set_bit_for_heap_region()). Note that we cannot rely on their
// associated "starts humongous" region to have their bit set to
// 1 since, due to the region chunking in the parallel region
// iteration, a "continues humongous" region might be visited
// before its associated "starts humongous".
return false;
}
HeapWord* ntams = hr->next_top_at_mark_start(); HeapWord* ntams = hr->next_top_at_mark_start();
HeapWord* top = hr->top(); HeapWord* top = hr->top();
@ -1677,7 +1644,7 @@ public:
const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
bool doHeapRegion(HeapRegion *hr) { bool doHeapRegion(HeapRegion *hr) {
if (hr->is_continues_humongous() || hr->is_archive()) { if (hr->is_archive()) {
return false; return false;
} }
// We use a claim value of zero here because all regions // We use a claim value of zero here because all regions
@ -1689,7 +1656,6 @@ public:
_freed_bytes += hr->used(); _freed_bytes += hr->used();
hr->set_containing_set(NULL); hr->set_containing_set(NULL);
if (hr->is_humongous()) { if (hr->is_humongous()) {
assert(hr->is_starts_humongous(), "we should only see starts humongous");
_humongous_regions_removed.increment(1u, hr->capacity()); _humongous_regions_removed.increment(1u, hr->capacity());
_g1->free_humongous_region(hr, _local_cleanup_list, true); _g1->free_humongous_region(hr, _local_cleanup_list, true);
} else { } else {
@ -2338,7 +2304,7 @@ private:
// circumspect about treating the argument as an object. // circumspect about treating the argument as an object.
void do_entry(void* entry) const { void do_entry(void* entry) const {
_task->increment_refs_reached(); _task->increment_refs_reached();
HeapRegion* hr = _g1h->heap_region_containing_raw(entry); HeapRegion* hr = _g1h->heap_region_containing(entry);
if (entry < hr->next_top_at_mark_start()) { if (entry < hr->next_top_at_mark_start()) {
// Until we get here, we don't know whether entry refers to a valid // Until we get here, we don't know whether entry refers to a valid
// object; it could instead have been a stale reference. // object; it could instead have been a stale reference.
@ -2488,32 +2454,9 @@ ConcurrentMark::claim_region(uint worker_id) {
while (finger < _heap_end) { while (finger < _heap_end) {
assert(_g1h->is_in_g1_reserved(finger), "invariant"); assert(_g1h->is_in_g1_reserved(finger), "invariant");
// Note on how this code handles humongous regions. In the HeapRegion* curr_region = _g1h->heap_region_containing(finger);
// normal case the finger will reach the start of a "starts
// humongous" (SH) region. Its end will either be the end of the
// last "continues humongous" (CH) region in the sequence, or the
// standard end of the SH region (if the SH is the only region in
// the sequence). That way claim_region() will skip over the CH
// regions. However, there is a subtle race between a CM thread
// executing this method and a mutator thread doing a humongous
// object allocation. The two are not mutually exclusive as the CM
// thread does not need to hold the Heap_lock when it gets
// here. So there is a chance that claim_region() will come across
// a free region that's in the progress of becoming a SH or a CH
// region. In the former case, it will either
// a) Miss the update to the region's end, in which case it will
// visit every subsequent CH region, will find their bitmaps
// empty, and do nothing, or
// b) Will observe the update of the region's end (in which case
// it will skip the subsequent CH regions).
// If it comes across a region that suddenly becomes CH, the
// scenario will be similar to b). So, the race between
// claim_region() and a humongous object allocation might force us
// to do a bit of unnecessary work (due to some unnecessary bitmap
// iterations) but it should not introduce and correctness issues.
HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
// Above heap_region_containing_raw may return NULL as we always scan claim // Above heap_region_containing may return NULL as we always scan claim
// until the end of the heap. In this case, just jump to the next region. // until the end of the heap. In this case, just jump to the next region.
HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
@ -2589,16 +2532,9 @@ void ConcurrentMark::verify_no_cset_oops() {
// Verify the global finger // Verify the global finger
HeapWord* global_finger = finger(); HeapWord* global_finger = finger();
if (global_finger != NULL && global_finger < _heap_end) { if (global_finger != NULL && global_finger < _heap_end) {
// The global finger always points to a heap region boundary. We
// use heap_region_containing_raw() to get the containing region
// given that the global finger could be pointing to a free region
// which subsequently becomes continues humongous. If that
// happens, heap_region_containing() will return the bottom of the
// corresponding starts humongous region and the check below will
// not hold any more.
// Since we always iterate over all regions, we might get a NULL HeapRegion // Since we always iterate over all regions, we might get a NULL HeapRegion
// here. // here.
HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); HeapRegion* global_hr = _g1h->heap_region_containing(global_finger);
guarantee(global_hr == NULL || global_finger == global_hr->bottom(), guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
"global finger: " PTR_FORMAT " region: " HR_FORMAT, "global finger: " PTR_FORMAT " region: " HR_FORMAT,
p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
@ -2611,7 +2547,7 @@ void ConcurrentMark::verify_no_cset_oops() {
HeapWord* task_finger = task->finger(); HeapWord* task_finger = task->finger();
if (task_finger != NULL && task_finger < _heap_end) { if (task_finger != NULL && task_finger < _heap_end) {
// See above note on the global finger verification. // See above note on the global finger verification.
HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
guarantee(task_hr == NULL || task_finger == task_hr->bottom() || guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
!task_hr->in_collection_set(), !task_hr->in_collection_set(),
"task finger: " PTR_FORMAT " region: " HR_FORMAT, "task finger: " PTR_FORMAT " region: " HR_FORMAT,
@ -2639,17 +2575,6 @@ class AggregateCountDataHRClosure: public HeapRegionClosure {
_cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
bool doHeapRegion(HeapRegion* hr) { bool doHeapRegion(HeapRegion* hr) {
if (hr->is_continues_humongous()) {
// We will ignore these here and process them when their
// associated "starts humongous" region is processed.
// Note that we cannot rely on their associated
// "starts humongous" region to have their bit set to 1
// since, due to the region chunking in the parallel region
// iteration, a "continues humongous" region might be visited
// before its associated "starts humongous".
return false;
}
HeapWord* start = hr->bottom(); HeapWord* start = hr->bottom();
HeapWord* limit = hr->next_top_at_mark_start(); HeapWord* limit = hr->next_top_at_mark_start();
HeapWord* end = hr->end(); HeapWord* end = hr->end();
@ -2957,8 +2882,6 @@ G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
void CMTask::setup_for_region(HeapRegion* hr) { void CMTask::setup_for_region(HeapRegion* hr) {
assert(hr != NULL, assert(hr != NULL,
"claim_region() should have filtered out NULL regions"); "claim_region() should have filtered out NULL regions");
assert(!hr->is_continues_humongous(),
"claim_region() should have filtered out continues humongous regions");
_curr_region = hr; _curr_region = hr;
_finger = hr->bottom(); _finger = hr->bottom();
update_region_limit(); update_region_limit();

View file

@ -772,16 +772,13 @@ public:
size_t* marked_bytes_array, size_t* marked_bytes_array,
BitMap* task_card_bm); BitMap* task_card_bm);
// Counts the given memory region in the task/worker counting
// data structures for the given worker id.
inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id);
// Counts the given object in the given task/worker counting // Counts the given object in the given task/worker counting
// data structures. // data structures.
inline void count_object(oop obj, inline void count_object(oop obj,
HeapRegion* hr, HeapRegion* hr,
size_t* marked_bytes_array, size_t* marked_bytes_array,
BitMap* task_card_bm); BitMap* task_card_bm,
size_t word_size);
// Attempts to mark the given object and, if successful, counts // Attempts to mark the given object and, if successful, counts
// the object in the given task/worker counting structures. // the object in the given task/worker counting structures.

View file

@ -89,9 +89,7 @@ inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
size_t region_size_bytes = mr.byte_size(); size_t region_size_bytes = mr.byte_size();
uint index = hr->hrm_index(); uint index = hr->hrm_index();
assert(!hr->is_continues_humongous(), "should not be HC region");
assert(hr == g1h->heap_region_containing(start), "sanity"); assert(hr == g1h->heap_region_containing(start), "sanity");
assert(hr == g1h->heap_region_containing(mr.last()), "sanity");
assert(marked_bytes_array != NULL, "pre-condition"); assert(marked_bytes_array != NULL, "pre-condition");
assert(task_card_bm != NULL, "pre-condition"); assert(task_card_bm != NULL, "pre-condition");
@ -116,23 +114,23 @@ inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
set_card_bitmap_range(task_card_bm, start_idx, end_idx, false /* is_par */); set_card_bitmap_range(task_card_bm, start_idx, end_idx, false /* is_par */);
} }
// Counts the given memory region in the task/worker counting
// data structures for the given worker id.
inline void ConcurrentMark::count_region(MemRegion mr,
HeapRegion* hr,
uint worker_id) {
size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
BitMap* task_card_bm = count_card_bitmap_for(worker_id);
count_region(mr, hr, marked_bytes_array, task_card_bm);
}
// Counts the given object in the given task/worker counting data structures. // Counts the given object in the given task/worker counting data structures.
inline void ConcurrentMark::count_object(oop obj, inline void ConcurrentMark::count_object(oop obj,
HeapRegion* hr, HeapRegion* hr,
size_t* marked_bytes_array, size_t* marked_bytes_array,
BitMap* task_card_bm) { BitMap* task_card_bm,
MemRegion mr((HeapWord*)obj, obj->size()); size_t word_size) {
assert(!hr->is_continues_humongous(), "Cannot enter count_object with continues humongous");
if (!hr->is_starts_humongous()) {
MemRegion mr((HeapWord*)obj, word_size);
count_region(mr, hr, marked_bytes_array, task_card_bm); count_region(mr, hr, marked_bytes_array, task_card_bm);
} else {
do {
MemRegion mr(hr->bottom(), hr->top());
count_region(mr, hr, marked_bytes_array, task_card_bm);
hr = _g1h->next_region_in_humongous(hr);
} while (hr != NULL);
}
} }
// Attempts to mark the given object and, if successful, counts // Attempts to mark the given object and, if successful, counts
@ -141,10 +139,9 @@ inline bool ConcurrentMark::par_mark_and_count(oop obj,
HeapRegion* hr, HeapRegion* hr,
size_t* marked_bytes_array, size_t* marked_bytes_array,
BitMap* task_card_bm) { BitMap* task_card_bm) {
HeapWord* addr = (HeapWord*)obj; if (_nextMarkBitMap->parMark((HeapWord*)obj)) {
if (_nextMarkBitMap->parMark(addr)) {
// Update the task specific count data for the object. // Update the task specific count data for the object.
count_object(obj, hr, marked_bytes_array, task_card_bm); count_object(obj, hr, marked_bytes_array, task_card_bm, obj->size());
return true; return true;
} }
return false; return false;
@ -157,10 +154,10 @@ inline bool ConcurrentMark::par_mark_and_count(oop obj,
size_t word_size, size_t word_size,
HeapRegion* hr, HeapRegion* hr,
uint worker_id) { uint worker_id) {
HeapWord* addr = (HeapWord*)obj; if (_nextMarkBitMap->parMark((HeapWord*)obj)) {
if (_nextMarkBitMap->parMark(addr)) { size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
MemRegion mr(addr, word_size); BitMap* task_card_bm = count_card_bitmap_for(worker_id);
count_region(mr, hr, worker_id); count_object(obj, hr, marked_bytes_array, task_card_bm, word_size);
return true; return true;
} }
return false; return false;
@ -351,7 +348,7 @@ inline void CMTask::deal_with_reference(oop obj) {
// Only get the containing region if the object is not marked on the // Only get the containing region if the object is not marked on the
// bitmap (otherwise, it's a waste of time since we won't do // bitmap (otherwise, it's a waste of time since we won't do
// anything with it). // anything with it).
HeapRegion* hr = _g1h->heap_region_containing_raw(obj); HeapRegion* hr = _g1h->heap_region_containing(obj);
if (!hr->obj_allocated_since_next_marking(obj)) { if (!hr->obj_allocated_since_next_marking(obj)) {
make_reference_grey(obj, hr); make_reference_grey(obj, hr);
} }
@ -371,7 +368,7 @@ inline void ConcurrentMark::grayRoot(oop obj, size_t word_size,
assert(obj != NULL, "pre-condition"); assert(obj != NULL, "pre-condition");
HeapWord* addr = (HeapWord*) obj; HeapWord* addr = (HeapWord*) obj;
if (hr == NULL) { if (hr == NULL) {
hr = _g1h->heap_region_containing_raw(addr); hr = _g1h->heap_region_containing(addr);
} else { } else {
assert(hr->is_in(addr), "pre-condition"); assert(hr->is_in(addr), "pre-condition");
} }
@ -380,16 +377,6 @@ inline void ConcurrentMark::grayRoot(oop obj, size_t word_size,
// header it's impossible to get back a HC region. // header it's impossible to get back a HC region.
assert(!hr->is_continues_humongous(), "sanity"); assert(!hr->is_continues_humongous(), "sanity");
// We cannot assert that word_size == obj->size() given that obj
// might not be in a consistent state (another thread might be in
// the process of copying it). So the best thing we can do is to
// assert that word_size is under an upper bound which is its
// containing region's capacity.
assert(word_size * HeapWordSize <= hr->capacity(),
"size: " SIZE_FORMAT " capacity: " SIZE_FORMAT " " HR_FORMAT,
word_size * HeapWordSize, hr->capacity(),
HR_FORMAT_PARAMS(hr));
if (addr < hr->next_top_at_mark_start()) { if (addr < hr->next_top_at_mark_start()) {
if (!_nextMarkBitMap->isMarked(addr)) { if (!_nextMarkBitMap->isMarked(addr)) {
par_mark_and_count(obj, word_size, hr, worker_id); par_mark_and_count(obj, word_size, hr, worker_id);

View file

@ -499,18 +499,14 @@ HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
return _next_offset_threshold; return _next_offset_threshold;
} }
void void G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* obj_top) {
G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
assert(new_top <= _end, "_end should have already been updated");
// The first BOT entry should have offset 0. // The first BOT entry should have offset 0.
reset_bot(); reset_bot();
alloc_block(_bottom, new_top); alloc_block(_bottom, obj_top);
} }
#ifndef PRODUCT #ifndef PRODUCT
void void G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
G1BlockOffsetArray::print_on(out); G1BlockOffsetArray::print_on(out);
out->print_cr(" next offset threshold: " PTR_FORMAT, p2i(_next_offset_threshold)); out->print_cr(" next offset threshold: " PTR_FORMAT, p2i(_next_offset_threshold));
out->print_cr(" next offset index: " SIZE_FORMAT, _next_offset_index); out->print_cr(" next offset index: " SIZE_FORMAT, _next_offset_index);

View file

@ -361,9 +361,10 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
// implementation, that's true because NULL is represented as 0, and thus // implementation, that's true because NULL is represented as 0, and thus
// never exceeds the "_next_offset_threshold". // never exceeds the "_next_offset_threshold".
void alloc_block(HeapWord* blk_start, HeapWord* blk_end) { void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
if (blk_end > _next_offset_threshold) if (blk_end > _next_offset_threshold) {
alloc_block_work1(blk_start, blk_end); alloc_block_work1(blk_start, blk_end);
} }
}
void alloc_block(HeapWord* blk, size_t size) { void alloc_block(HeapWord* blk, size_t size) {
alloc_block(blk, blk+size); alloc_block(blk, blk+size);
} }
@ -371,7 +372,7 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
HeapWord* block_start_unsafe(const void* addr); HeapWord* block_start_unsafe(const void* addr);
HeapWord* block_start_unsafe_const(const void* addr) const; HeapWord* block_start_unsafe_const(const void* addr) const;
void set_for_starts_humongous(HeapWord* new_top); void set_for_starts_humongous(HeapWord* obj_top);
virtual void print_on(outputStream* out) PRODUCT_RETURN; virtual void print_on(outputStream* out) PRODUCT_RETURN;
}; };

View file

@ -123,7 +123,6 @@ G1BlockOffsetArray::block_at_or_preceding(const void* addr,
// to go back by. // to go back by.
size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset); size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset);
q -= (N_words * n_cards_back); q -= (N_words * n_cards_back);
assert(q >= gsp()->bottom(), "Went below bottom!");
index -= n_cards_back; index -= n_cards_back;
offset = _array->offset_array(index); offset = _array->offset_array(index);
} }

View file

@ -36,7 +36,7 @@ void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop_work(T* p) {
T oop_or_narrowoop = oopDesc::load_heap_oop(p); T oop_or_narrowoop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(oop_or_narrowoop)) { if (!oopDesc::is_null(oop_or_narrowoop)) {
oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop); oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
HeapRegion* hr = _g1h->heap_region_containing_raw(o); HeapRegion* hr = _g1h->heap_region_containing(o);
assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in collection set then evacuation failed and nm must already be in the remset"); assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in collection set then evacuation failed and nm must already be in the remset");
hr->add_strong_code_root(_nm); hr->add_strong_code_root(_nm);
} }

View file

@ -320,12 +320,8 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
// The header of the new object will be placed at the bottom of // The header of the new object will be placed at the bottom of
// the first region. // the first region.
HeapWord* new_obj = first_hr->bottom(); HeapWord* new_obj = first_hr->bottom();
// This will be the new end of the first region in the series that // This will be the new top of the new object.
// should also match the end of the last region in the series. HeapWord* obj_top = new_obj + word_size;
HeapWord* new_end = new_obj + word_size_sum;
// This will be the new top of the first region that will reflect
// this allocation.
HeapWord* new_top = new_obj + word_size;
// First, we need to zero the header of the space that we will be // First, we need to zero the header of the space that we will be
// allocating. When we update top further down, some refinement // allocating. When we update top further down, some refinement
@ -346,7 +342,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
// will also update the BOT covering all the regions to reflect // will also update the BOT covering all the regions to reflect
// that there is a single object that starts at the bottom of the // that there is a single object that starts at the bottom of the
// first region. // first region.
first_hr->set_starts_humongous(new_top, new_end); first_hr->set_starts_humongous(obj_top);
first_hr->set_allocation_context(context); first_hr->set_allocation_context(context);
// Then, if there are any, we will set up the "continues // Then, if there are any, we will set up the "continues
// humongous" regions. // humongous" regions.
@ -356,9 +352,6 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
hr->set_continues_humongous(first_hr); hr->set_continues_humongous(first_hr);
hr->set_allocation_context(context); hr->set_allocation_context(context);
} }
// If we have "continues humongous" regions (hr != NULL), then the
// end of the last one should match new_end.
assert(hr == NULL || hr->end() == new_end, "sanity");
// Up to this point no concurrent thread would have been able to // Up to this point no concurrent thread would have been able to
// do any scanning on any region in this series. All the top // do any scanning on any region in this series. All the top
@ -371,58 +364,39 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
// Now that the BOT and the object header have been initialized, // Now that the BOT and the object header have been initialized,
// we can update top of the "starts humongous" region. // we can update top of the "starts humongous" region.
assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), first_hr->set_top(MIN2(first_hr->end(), obj_top));
"new_top should be in this region");
first_hr->set_top(new_top);
if (_hr_printer.is_active()) { if (_hr_printer.is_active()) {
HeapWord* bottom = first_hr->bottom(); _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, first_hr->top());
HeapWord* end = first_hr->orig_end();
if ((first + 1) == last) {
// the series has a single humongous region
_hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
} else {
// the series has more than one humongous regions
_hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
}
} }
// Now, we will update the top fields of the "continues humongous" // Now, we will update the top fields of the "continues humongous"
// regions. The reason we need to do this is that, otherwise, // regions.
// these regions would look empty and this will confuse parts of
// G1. For example, the code that looks for a consecutive number
// of empty regions will consider them empty and try to
// re-allocate them. We can extend is_empty() to also include
// !is_continues_humongous(), but it is easier to just update the top
// fields here. The way we set top for all regions (i.e., top ==
// end for all regions but the last one, top == new_top for the
// last one) is actually used when we will free up the humongous
// region in free_humongous_region().
hr = NULL; hr = NULL;
for (uint i = first + 1; i < last; ++i) { for (uint i = first + 1; i < last; ++i) {
hr = region_at(i); hr = region_at(i);
if ((i + 1) == last) { if ((i + 1) == last) {
// last continues humongous region // last continues humongous region
assert(hr->bottom() < new_top && new_top <= hr->end(), assert(hr->bottom() < obj_top && obj_top <= hr->end(),
"new_top should fall on this region"); "new_top should fall on this region");
hr->set_top(new_top); hr->set_top(obj_top);
_hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top); _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, obj_top);
} else { } else {
// not last one // not last one
assert(new_top > hr->end(), "new_top should be above this region"); assert(obj_top > hr->end(), "obj_top should be above this region");
hr->set_top(hr->end()); hr->set_top(hr->end());
_hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end()); _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
} }
} }
// If we have continues humongous regions (hr != NULL), then the // If we have continues humongous regions (hr != NULL), its top should
// end of the last one should match new_end and its top should // match obj_top.
// match new_top. assert(hr == NULL || (hr->top() == obj_top), "sanity");
assert(hr == NULL ||
(hr->end() == new_end && hr->top() == new_top), "sanity");
check_bitmaps("Humongous Region Allocation", first_hr); check_bitmaps("Humongous Region Allocation", first_hr);
assert(first_hr->used() == word_size * HeapWordSize, "invariant"); increase_used(word_size * HeapWordSize);
increase_used(first_hr->used());
_humongous_set.add(first_hr); for (uint i = first; i < last; ++i) {
_humongous_set.add(region_at(i));
}
return new_obj; return new_obj;
} }
@ -1139,15 +1113,15 @@ public:
bool doHeapRegion(HeapRegion* r) { bool doHeapRegion(HeapRegion* r) {
HeapRegionRemSet* hrrs = r->rem_set(); HeapRegionRemSet* hrrs = r->rem_set();
_g1h->reset_gc_time_stamps(r);
if (r->is_continues_humongous()) { if (r->is_continues_humongous()) {
// We'll assert that the strong code root list and RSet is empty // We'll assert that the strong code root list and RSet is empty
assert(hrrs->strong_code_roots_list_length() == 0, "sanity"); assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
assert(hrrs->occupied() == 0, "RSet should be empty"); assert(hrrs->occupied() == 0, "RSet should be empty");
return false; } else {
}
_g1h->reset_gc_time_stamps(r);
hrrs->clear(); hrrs->clear();
}
// You might think here that we could clear just the cards // You might think here that we could clear just the cards
// corresponding to the used region. But no: if we leave a dirty card // corresponding to the used region. But no: if we leave a dirty card
// in a region we might allocate into, then it would prevent that card // in a region we might allocate into, then it would prevent that card
@ -1205,12 +1179,7 @@ public:
if (hr->is_free()) { if (hr->is_free()) {
// We only generate output for non-empty regions. // We only generate output for non-empty regions.
} else if (hr->is_starts_humongous()) { } else if (hr->is_starts_humongous()) {
if (hr->region_num() == 1) {
// single humongous region
_hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
} else {
_hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous); _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
}
} else if (hr->is_continues_humongous()) { } else if (hr->is_continues_humongous()) {
_hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous); _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
} else if (hr->is_archive()) { } else if (hr->is_archive()) {
@ -2217,17 +2186,7 @@ size_t G1CollectedHeap::capacity() const {
} }
void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) { void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
assert(!hr->is_continues_humongous(), "pre-condition");
hr->reset_gc_time_stamp(); hr->reset_gc_time_stamp();
if (hr->is_starts_humongous()) {
uint first_index = hr->hrm_index() + 1;
uint last_index = hr->last_hc_index();
for (uint i = first_index; i < last_index; i += 1) {
HeapRegion* chr = region_at(i);
assert(chr->is_continues_humongous(), "sanity");
chr->reset_gc_time_stamp();
}
}
} }
#ifndef PRODUCT #ifndef PRODUCT
@ -2295,9 +2254,7 @@ class SumUsedClosure: public HeapRegionClosure {
public: public:
SumUsedClosure() : _used(0) {} SumUsedClosure() : _used(0) {}
bool doHeapRegion(HeapRegion* r) { bool doHeapRegion(HeapRegion* r) {
if (!r->is_continues_humongous()) {
_used += r->used(); _used += r->used();
}
return false; return false;
} }
size_t result() { return _used; } size_t result() { return _used; }
@ -2518,9 +2475,9 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
bool G1CollectedHeap::is_in(const void* p) const { bool G1CollectedHeap::is_in(const void* p) const {
if (_hrm.reserved().contains(p)) { if (_hrm.reserved().contains(p)) {
// Given that we know that p is in the reserved space, // Given that we know that p is in the reserved space,
// heap_region_containing_raw() should successfully // heap_region_containing() should successfully
// return the containing region. // return the containing region.
HeapRegion* hr = heap_region_containing_raw(p); HeapRegion* hr = heap_region_containing(p);
return hr->is_in(p); return hr->is_in(p);
} else { } else {
return false; return false;
@ -3057,7 +3014,7 @@ public:
r->verify(_vo, &failures); r->verify(_vo, &failures);
if (failures) { if (failures) {
_failures = true; _failures = true;
} else { } else if (!r->is_starts_humongous()) {
VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo); VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
r->object_iterate(&not_dead_yet_cl); r->object_iterate(&not_dead_yet_cl);
if (_vo != VerifyOption_G1UseNextMarking) { if (_vo != VerifyOption_G1UseNextMarking) {
@ -5311,24 +5268,10 @@ void G1CollectedHeap::free_region(HeapRegion* hr,
void G1CollectedHeap::free_humongous_region(HeapRegion* hr, void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
FreeRegionList* free_list, FreeRegionList* free_list,
bool par) { bool par) {
assert(hr->is_starts_humongous(), "this is only for starts humongous regions"); assert(hr->is_humongous(), "this is only for humongous regions");
assert(free_list != NULL, "pre-condition"); assert(free_list != NULL, "pre-condition");
size_t hr_capacity = hr->capacity();
// We need to read this before we make the region non-humongous,
// otherwise the information will be gone.
uint last_index = hr->last_hc_index();
hr->clear_humongous(); hr->clear_humongous();
free_region(hr, free_list, par); free_region(hr, free_list, par);
uint i = hr->hrm_index() + 1;
while (i < last_index) {
HeapRegion* curr_hr = region_at(i);
assert(curr_hr->is_continues_humongous(), "invariant");
curr_hr->clear_humongous();
free_region(curr_hr, free_list, par);
i += 1;
}
} }
void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
@ -5492,8 +5435,6 @@ public:
bool failures() { return _failures; } bool failures() { return _failures; }
virtual bool doHeapRegion(HeapRegion* hr) { virtual bool doHeapRegion(HeapRegion* hr) {
if (hr->is_continues_humongous()) return false;
bool result = _g1h->verify_bitmaps(_caller, hr); bool result = _g1h->verify_bitmaps(_caller, hr);
if (!result) { if (!result) {
_failures = true; _failures = true;
@ -5767,11 +5708,10 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
!r->rem_set()->is_empty()) { !r->rem_set()->is_empty()) {
if (G1TraceEagerReclaimHumongousObjects) { if (G1TraceEagerReclaimHumongousObjects) {
gclog_or_tty->print_cr("Live humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", gclog_or_tty->print_cr("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
region_idx, region_idx,
(size_t)obj->size() * HeapWordSize, (size_t)obj->size() * HeapWordSize,
p2i(r->bottom()), p2i(r->bottom()),
r->region_num(),
r->rem_set()->occupied(), r->rem_set()->occupied(),
r->rem_set()->strong_code_roots_list_length(), r->rem_set()->strong_code_roots_list_length(),
next_bitmap->isMarked(r->bottom()), next_bitmap->isMarked(r->bottom()),
@ -5788,11 +5728,10 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
PTR_FORMAT " is not.", p2i(r->bottom())); PTR_FORMAT " is not.", p2i(r->bottom()));
if (G1TraceEagerReclaimHumongousObjects) { if (G1TraceEagerReclaimHumongousObjects) {
gclog_or_tty->print_cr("Dead humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", gclog_or_tty->print_cr("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
region_idx, region_idx,
(size_t)obj->size() * HeapWordSize, (size_t)obj->size() * HeapWordSize,
p2i(r->bottom()), p2i(r->bottom()),
r->region_num(),
r->rem_set()->occupied(), r->rem_set()->occupied(),
r->rem_set()->strong_code_roots_list_length(), r->rem_set()->strong_code_roots_list_length(),
next_bitmap->isMarked(r->bottom()), next_bitmap->isMarked(r->bottom()),
@ -5804,10 +5743,14 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
if (next_bitmap->isMarked(r->bottom())) { if (next_bitmap->isMarked(r->bottom())) {
next_bitmap->clear(r->bottom()); next_bitmap->clear(r->bottom());
} }
do {
HeapRegion* next = g1h->next_region_in_humongous(r);
_freed_bytes += r->used(); _freed_bytes += r->used();
r->set_containing_set(NULL); r->set_containing_set(NULL);
_humongous_regions_removed.increment(1u, r->capacity()); _humongous_regions_removed.increment(1u, r->capacity());
g1h->free_humongous_region(r, _free_region_list, false); g1h->free_humongous_region(r, _free_region_list, false);
r = next;
} while (r != NULL);
return false; return false;
} }
@ -6042,10 +5985,6 @@ public:
} }
bool doHeapRegion(HeapRegion* r) { bool doHeapRegion(HeapRegion* r) {
if (r->is_continues_humongous()) {
return false;
}
if (r->is_empty()) { if (r->is_empty()) {
// Add free regions to the free list // Add free regions to the free list
r->set_free(); r->set_free();
@ -6233,14 +6172,10 @@ public:
_old_count(), _humongous_count(), _free_count(){ } _old_count(), _humongous_count(), _free_count(){ }
bool doHeapRegion(HeapRegion* hr) { bool doHeapRegion(HeapRegion* hr) {
if (hr->is_continues_humongous()) {
return false;
}
if (hr->is_young()) { if (hr->is_young()) {
// TODO // TODO
} else if (hr->is_starts_humongous()) { } else if (hr->is_humongous()) {
assert(hr->containing_set() == _humongous_set, "Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()); assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index());
_humongous_count.increment(1u, hr->capacity()); _humongous_count.increment(1u, hr->capacity());
} else if (hr->is_empty()) { } else if (hr->is_empty()) {
assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index()); assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());

View file

@ -1178,7 +1178,6 @@ public:
void prepend_to_freelist(FreeRegionList* list); void prepend_to_freelist(FreeRegionList* list);
void decrement_summary_bytes(size_t bytes); void decrement_summary_bytes(size_t bytes);
// Returns "TRUE" iff "p" points into the committed areas of the heap.
virtual bool is_in(const void* p) const; virtual bool is_in(const void* p) const;
#ifdef ASSERT #ifdef ASSERT
// Returns whether p is in one of the available areas of the heap. Slow but // Returns whether p is in one of the available areas of the heap. Slow but
@ -1243,6 +1242,10 @@ public:
// Return the region with the given index. It assumes the index is valid. // Return the region with the given index. It assumes the index is valid.
inline HeapRegion* region_at(uint index) const; inline HeapRegion* region_at(uint index) const;
// Return the next region (by index) that is part of the same
// humongous object that hr is part of.
inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
// Calculate the region index of the given address. Given address must be // Calculate the region index of the given address. Given address must be
// within the heap. // within the heap.
inline uint addr_to_region(HeapWord* addr) const; inline uint addr_to_region(HeapWord* addr) const;
@ -1280,11 +1283,6 @@ public:
// Returns the HeapRegion that contains addr. addr must not be NULL. // Returns the HeapRegion that contains addr. addr must not be NULL.
template <class T> template <class T>
inline HeapRegion* heap_region_containing_raw(const T addr) const;
// Returns the HeapRegion that contains addr. addr must not be NULL.
// If addr is within a humongous continues region, it returns its humongous start region.
template <class T>
inline HeapRegion* heap_region_containing(const T addr) const; inline HeapRegion* heap_region_containing(const T addr) const;
// A CollectedHeap is divided into a dense sequence of "blocks"; that is, // A CollectedHeap is divided into a dense sequence of "blocks"; that is,

View file

@ -65,6 +65,10 @@ inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
// Return the region with the given index. It assumes the index is valid. // Return the region with the given index. It assumes the index is valid.
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); } inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
return _hrm.next_region_in_humongous(hr);
}
inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const { inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
assert(is_in_reserved(addr), assert(is_in_reserved(addr),
"Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")", "Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
@ -77,7 +81,7 @@ inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
} }
template <class T> template <class T>
inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const { inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
assert(addr != NULL, "invariant"); assert(addr != NULL, "invariant");
assert(is_in_g1_reserved((const void*) addr), assert(is_in_g1_reserved((const void*) addr),
"Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")", "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
@ -85,15 +89,6 @@ inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) con
return _hrm.addr_to_region((HeapWord*) addr); return _hrm.addr_to_region((HeapWord*) addr);
} }
template <class T>
inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
HeapRegion* hr = heap_region_containing_raw(addr);
if (hr->is_continues_humongous()) {
return hr->humongous_start_region();
}
return hr;
}
inline void G1CollectedHeap::reset_gc_time_stamp() { inline void G1CollectedHeap::reset_gc_time_stamp() {
_gc_time_stamp = 0; _gc_time_stamp = 0;
OrderAccess::fence(); OrderAccess::fence();
@ -124,9 +119,9 @@ G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
assert_heap_not_locked(); assert_heap_not_locked();
// Assign the containing region to containing_hr so that we don't // Assign the containing region to containing_hr so that we don't
// have to keep calling heap_region_containing_raw() in the // have to keep calling heap_region_containing() in the
// asserts below. // asserts below.
DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);) DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);)
assert(word_size > 0, "pre-condition"); assert(word_size > 0, "pre-condition");
assert(containing_hr->is_in(start), "it should contain start"); assert(containing_hr->is_in(start), "it should contain start");
assert(containing_hr->is_young(), "it should be young"); assert(containing_hr->is_young(), "it should be young");

View file

@ -51,7 +51,6 @@ const char* G1HRPrinter::region_type_name(RegionType type) {
case Eden: return "Eden"; case Eden: return "Eden";
case Survivor: return "Survivor"; case Survivor: return "Survivor";
case Old: return "Old"; case Old: return "Old";
case SingleHumongous: return "SingleH";
case StartsHumongous: return "StartsH"; case StartsHumongous: return "StartsH";
case ContinuesHumongous: return "ContinuesH"; case ContinuesHumongous: return "ContinuesH";
case Archive: return "Archive"; case Archive: return "Archive";

View file

@ -50,7 +50,6 @@ public:
Eden, Eden,
Survivor, Survivor,
Old, Old,
SingleHumongous,
StartsHumongous, StartsHumongous,
ContinuesHumongous, ContinuesHumongous,
Archive Archive

View file

@ -279,8 +279,8 @@ public:
} else { } else {
assert(hr->is_empty(), "Should have been cleared in phase 2."); assert(hr->is_empty(), "Should have been cleared in phase 2.");
} }
hr->reset_during_compaction();
} }
hr->reset_during_compaction();
} else if (!hr->is_pinned()) { } else if (!hr->is_pinned()) {
hr->compact(); hr->compact();
} }
@ -334,9 +334,6 @@ void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) {
HeapWord* end = hr->end(); HeapWord* end = hr->end();
FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep"); FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
assert(hr->is_starts_humongous(),
"Only the start of a humongous region should be freed.");
hr->set_containing_set(NULL); hr->set_containing_set(NULL);
_humongous_regions_removed.increment(1u, hr->capacity()); _humongous_regions_removed.increment(1u, hr->capacity());
@ -373,15 +370,12 @@ void G1PrepareCompactClosure::update_sets() {
bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) { bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) {
if (hr->is_humongous()) { if (hr->is_humongous()) {
if (hr->is_starts_humongous()) { oop obj = oop(hr->humongous_start_region()->bottom());
oop obj = oop(hr->bottom()); if (hr->is_starts_humongous() && obj->is_gc_marked()) {
if (obj->is_gc_marked()) {
obj->forward_to(obj); obj->forward_to(obj);
} else {
free_humongous_region(hr);
} }
} else { if (!obj->is_gc_marked()) {
assert(hr->is_continues_humongous(), "Invalid humongous."); free_humongous_region(hr);
} }
} else if (!hr->is_pinned()) { } else if (!hr->is_pinned()) {
prepare_for_compaction(hr, hr->end()); prepare_for_compaction(hr, hr->end());

View file

@ -222,7 +222,7 @@ inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
template <class T> template <class T>
void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) { void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
if (_g1->heap_region_containing_raw(new_obj)->is_young()) { if (_g1->heap_region_containing(new_obj)->is_young()) {
_scanned_klass->record_modified_oops(); _scanned_klass->record_modified_oops();
} }
} }

View file

@ -216,7 +216,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
oop const old, oop const old,
markOop const old_mark) { markOop const old_mark) {
const size_t word_sz = old->size(); const size_t word_sz = old->size();
HeapRegion* const from_region = _g1h->heap_region_containing_raw(old); HeapRegion* const from_region = _g1h->heap_region_containing(old);
// +1 to make the -1 indexes valid... // +1 to make the -1 indexes valid...
const int young_index = from_region->young_index_in_cset()+1; const int young_index = from_region->young_index_in_cset()+1;
assert( (from_region->is_young() && young_index > 0) || assert( (from_region->is_young() && young_index > 0) ||
@ -294,9 +294,9 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
if (G1StringDedup::is_enabled()) { if (G1StringDedup::is_enabled()) {
const bool is_from_young = state.is_young(); const bool is_from_young = state.is_young();
const bool is_to_young = dest_state.is_young(); const bool is_to_young = dest_state.is_young();
assert(is_from_young == _g1h->heap_region_containing_raw(old)->is_young(), assert(is_from_young == _g1h->heap_region_containing(old)->is_young(),
"sanity"); "sanity");
assert(is_to_young == _g1h->heap_region_containing_raw(obj)->is_young(), assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(),
"sanity"); "sanity");
G1StringDedup::enqueue_from_evacuation(is_from_young, G1StringDedup::enqueue_from_evacuation(is_from_young,
is_to_young, is_to_young,
@ -314,7 +314,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
oop* old_p = set_partial_array_mask(old); oop* old_p = set_partial_array_mask(old);
push_on_queue(old_p); push_on_queue(old_p);
} else { } else {
HeapRegion* const to_region = _g1h->heap_region_containing_raw(obj_ptr); HeapRegion* const to_region = _g1h->heap_region_containing(obj_ptr);
_scanner.set_region(to_region); _scanner.set_region(to_region);
obj->oop_iterate_backwards(&_scanner); obj->oop_iterate_backwards(&_scanner);
} }

View file

@ -101,7 +101,7 @@ inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
// so that the heap remains parsable in case of evacuation failure. // so that the heap remains parsable in case of evacuation failure.
to_obj_array->set_length(end); to_obj_array->set_length(end);
} }
_scanner.set_region(_g1h->heap_region_containing_raw(to_obj)); _scanner.set_region(_g1h->heap_region_containing(to_obj));
// Process indexes [start,end). It will also process the header // Process indexes [start,end). It will also process the header
// along with the first chunk (i.e., the chunk with start == 0). // along with the first chunk (i.e., the chunk with start == 0).
// Note that at this point the length field of to_obj_array is not // Note that at this point the length field of to_obj_array is not
@ -115,10 +115,7 @@ inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) { template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
if (!has_partial_array_mask(ref_to_scan)) { if (!has_partial_array_mask(ref_to_scan)) {
// Note: we can use "raw" versions of "region_containing" because HeapRegion* r = _g1h->heap_region_containing(ref_to_scan);
// "obj_to_scan" is definitely in the heap, and is not in a
// humongous region.
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
do_oop_evac(ref_to_scan, r); do_oop_evac(ref_to_scan, r);
} else { } else {
do_oop_partial_array((oop*)ref_to_scan); do_oop_partial_array((oop*)ref_to_scan);

View file

@ -60,7 +60,7 @@ inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, uint tid) {
assert(_g1->is_in_reserved(obj), "must be in heap"); assert(_g1->is_in_reserved(obj), "must be in heap");
#endif // ASSERT #endif // ASSERT
assert(from == NULL || from->is_in_reserved(p), "p is not in from"); assert(from->is_in_reserved(p) || from->is_starts_humongous(), "p is not in from");
HeapRegion* to = _g1->heap_region_containing(obj); HeapRegion* to = _g1->heap_region_containing(obj);
if (from != to) { if (from != to) {

View file

@ -52,7 +52,7 @@ void G1StringDedup::stop() {
bool G1StringDedup::is_candidate_from_mark(oop obj) { bool G1StringDedup::is_candidate_from_mark(oop obj) {
if (java_lang_String::is_instance_inlined(obj)) { if (java_lang_String::is_instance_inlined(obj)) {
bool from_young = G1CollectedHeap::heap()->heap_region_containing_raw(obj)->is_young(); bool from_young = G1CollectedHeap::heap()->heap_region_containing(obj)->is_young();
if (from_young && obj->age() < StringDeduplicationAgeThreshold) { if (from_young && obj->age() < StringDeduplicationAgeThreshold) {
// Candidate found. String is being evacuated from young to old but has not // Candidate found. String is being evacuated from young to old but has not
// reached the deduplication age threshold, i.e. has not previously been a // reached the deduplication age threshold, i.e. has not previously been a

View file

@ -67,7 +67,7 @@ void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
// not considered dead, either because it is marked (in the mark bitmap) // not considered dead, either because it is marked (in the mark bitmap)
// or it was allocated after marking finished, then we add it. Otherwise // or it was allocated after marking finished, then we add it. Otherwise
// we can safely ignore the object. // we can safely ignore the object.
if (!g1h->is_obj_dead(oop(cur), _hr)) { if (!g1h->is_obj_dead(oop(cur))) {
oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr); oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr);
} else { } else {
oop_size = _hr->block_size(cur); oop_size = _hr->block_size(cur);
@ -81,7 +81,7 @@ void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
HeapWord* next_obj = cur + oop_size; HeapWord* next_obj = cur + oop_size;
while (next_obj < top) { while (next_obj < top) {
// Keep filtering the remembered set. // Keep filtering the remembered set.
if (!g1h->is_obj_dead(cur_oop, _hr)) { if (!g1h->is_obj_dead(cur_oop)) {
// Bottom lies entirely below top, so we can call the // Bottom lies entirely below top, so we can call the
// non-memRegion version of oop_iterate below. // non-memRegion version of oop_iterate below.
cur_oop->oop_iterate(_rs_scan); cur_oop->oop_iterate(_rs_scan);
@ -93,7 +93,7 @@ void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
} }
// Last object. Need to do dead-obj filtering here too. // Last object. Need to do dead-obj filtering here too.
if (!g1h->is_obj_dead(oop(cur), _hr)) { if (!g1h->is_obj_dead(oop(cur))) {
oop(cur)->oop_iterate(_rs_scan, mr); oop(cur)->oop_iterate(_rs_scan, mr);
} }
} }
@ -162,8 +162,6 @@ void HeapRegion::reset_after_compaction() {
void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) { void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
assert(_humongous_start_region == NULL, assert(_humongous_start_region == NULL,
"we should have already filtered out humongous regions"); "we should have already filtered out humongous regions");
assert(_end == orig_end(),
"we should have already filtered out humongous regions");
assert(!in_collection_set(), assert(!in_collection_set(),
"Should not clear heap region %u in the collection set", hrm_index()); "Should not clear heap region %u in the collection set", hrm_index());
@ -213,24 +211,18 @@ void HeapRegion::calc_gc_efficiency() {
_gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
} }
void HeapRegion::set_starts_humongous(HeapWord* new_top, HeapWord* new_end) { void HeapRegion::set_starts_humongous(HeapWord* obj_top) {
assert(!is_humongous(), "sanity / pre-condition"); assert(!is_humongous(), "sanity / pre-condition");
assert(end() == orig_end(),
"Should be normal before the humongous object allocation");
assert(top() == bottom(), "should be empty"); assert(top() == bottom(), "should be empty");
assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
_type.set_starts_humongous(); _type.set_starts_humongous();
_humongous_start_region = this; _humongous_start_region = this;
set_end(new_end); _offsets.set_for_starts_humongous(obj_top);
_offsets.set_for_starts_humongous(new_top);
} }
void HeapRegion::set_continues_humongous(HeapRegion* first_hr) { void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
assert(!is_humongous(), "sanity / pre-condition"); assert(!is_humongous(), "sanity / pre-condition");
assert(end() == orig_end(),
"Should be normal before the humongous object allocation");
assert(top() == bottom(), "should be empty"); assert(top() == bottom(), "should be empty");
assert(first_hr->is_starts_humongous(), "pre-condition"); assert(first_hr->is_starts_humongous(), "pre-condition");
@ -241,18 +233,6 @@ void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
void HeapRegion::clear_humongous() { void HeapRegion::clear_humongous() {
assert(is_humongous(), "pre-condition"); assert(is_humongous(), "pre-condition");
if (is_starts_humongous()) {
assert(top() <= end(), "pre-condition");
set_end(orig_end());
if (top() > end()) {
// at least one "continues humongous" region after it
set_top(end());
}
} else {
// continues humongous
assert(end() == orig_end(), "sanity");
}
assert(capacity() == HeapRegion::GrainBytes, "pre-condition"); assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
_humongous_start_region = NULL; _humongous_start_region = NULL;
} }
@ -290,11 +270,6 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
hr_clear(false /*par*/, false /*clear_space*/); hr_clear(false /*par*/, false /*clear_space*/);
set_top(bottom()); set_top(bottom());
record_timestamp(); record_timestamp();
assert(mr.end() == orig_end(),
"Given region end address " PTR_FORMAT " should match exactly "
"bottom plus one region size, i.e. " PTR_FORMAT,
p2i(mr.end()), p2i(orig_end()));
} }
CompactibleSpace* HeapRegion::next_compaction_space() const { CompactibleSpace* HeapRegion::next_compaction_space() const {
@ -832,7 +807,14 @@ void HeapRegion::verify(VerifyOption vo,
_offsets.verify(); _offsets.verify();
} }
if (p != top()) { if (is_region_humongous) {
oop obj = oop(this->humongous_start_region()->bottom());
if ((HeapWord*)obj > bottom() || (HeapWord*)obj + obj->size() < bottom()) {
gclog_or_tty->print_cr("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj));
}
}
if (!is_region_humongous && p != top()) {
gclog_or_tty->print_cr("end of last object " PTR_FORMAT " " gclog_or_tty->print_cr("end of last object " PTR_FORMAT " "
"does not match top " PTR_FORMAT, p2i(p), p2i(top())); "does not match top " PTR_FORMAT, p2i(p), p2i(top()));
*failures = true; *failures = true;
@ -840,7 +822,6 @@ void HeapRegion::verify(VerifyOption vo,
} }
HeapWord* the_end = end(); HeapWord* the_end = end();
assert(p == top(), "it should still hold");
// Do some extra BOT consistency checking for addresses in the // Do some extra BOT consistency checking for addresses in the
// range [top, end). BOT look-ups in this range should yield // range [top, end). BOT look-ups in this range should yield
// top. No point in doing that if top == end (there's nothing there). // top. No point in doing that if top == end (there's nothing there).
@ -931,6 +912,7 @@ void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
} }
void G1OffsetTableContigSpace::set_end(HeapWord* new_end) { void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
assert(new_end == _bottom + HeapRegion::GrainWords, "set_end should only ever be set to _bottom + HeapRegion::GrainWords");
Space::set_end(new_end); Space::set_end(new_end);
_offsets.resize(new_end - bottom()); _offsets.resize(new_end - bottom());
} }

View file

@ -43,6 +43,15 @@
// The solution is to remove this method from the definition // The solution is to remove this method from the definition
// of a Space. // of a Space.
// Each heap region is self contained. top() and end() can never
// be set beyond the end of the region. For humongous objects,
// the first region is a StartsHumongous region. If the humongous
// object is larger than a heap region, the following regions will
// be of type ContinuesHumongous. In this case the top() of the
// StartHumongous region and all ContinuesHumongous regions except
// the last will point to their own end. For the last ContinuesHumongous
// region, top() will equal the object's top.
class G1CollectedHeap; class G1CollectedHeap;
class HeapRegionRemSet; class HeapRegionRemSet;
class HeapRegionRemSetIterator; class HeapRegionRemSetIterator;
@ -389,8 +398,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
size_t garbage_bytes() { size_t garbage_bytes() {
size_t used_at_mark_start_bytes = size_t used_at_mark_start_bytes =
(prev_top_at_mark_start() - bottom()) * HeapWordSize; (prev_top_at_mark_start() - bottom()) * HeapWordSize;
assert(used_at_mark_start_bytes >= marked_bytes(),
"Can't mark more than we have.");
return used_at_mark_start_bytes - marked_bytes(); return used_at_mark_start_bytes - marked_bytes();
} }
@ -409,7 +416,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
void add_to_marked_bytes(size_t incr_bytes) { void add_to_marked_bytes(size_t incr_bytes) {
_next_marked_bytes = _next_marked_bytes + incr_bytes; _next_marked_bytes = _next_marked_bytes + incr_bytes;
assert(_next_marked_bytes <= used(), "invariant" );
} }
void zero_marked_bytes() { void zero_marked_bytes() {
@ -445,57 +451,13 @@ class HeapRegion: public G1OffsetTableContigSpace {
return _humongous_start_region; return _humongous_start_region;
} }
// Return the number of distinct regions that are covered by this region:
// 1 if the region is not humongous, >= 1 if the region is humongous.
uint region_num() const {
if (!is_humongous()) {
return 1U;
} else {
assert(is_starts_humongous(), "doesn't make sense on HC regions");
assert(capacity() % HeapRegion::GrainBytes == 0, "sanity");
return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes);
}
}
// Return the index + 1 of the last HC regions that's associated
// with this HS region.
uint last_hc_index() const {
assert(is_starts_humongous(), "don't call this otherwise");
return hrm_index() + region_num();
}
// Same as Space::is_in_reserved, but will use the original size of the region.
// The original size is different only for start humongous regions. They get
// their _end set up to be the end of the last continues region of the
// corresponding humongous object.
bool is_in_reserved_raw(const void* p) const {
return _bottom <= p && p < orig_end();
}
// Makes the current region be a "starts humongous" region, i.e., // Makes the current region be a "starts humongous" region, i.e.,
// the first region in a series of one or more contiguous regions // the first region in a series of one or more contiguous regions
// that will contain a single "humongous" object. The two parameters // that will contain a single "humongous" object.
// are as follows:
// //
// new_top : The new value of the top field of this region which // obj_top : points to the end of the humongous object that's being
// points to the end of the humongous object that's being // allocated.
// allocated. If there is more than one region in the series, top void set_starts_humongous(HeapWord* obj_top);
// will lie beyond this region's original end field and on the last
// region in the series.
//
// new_end : The new value of the end field of this region which
// points to the end of the last region in the series. If there is
// one region in the series (namely: this one) end will be the same
// as the original end of this region.
//
// Updating top and end as described above makes this region look as
// if it spans the entire space taken up by all the regions in the
// series and an single allocation moved its top to new_top. This
// ensures that the space (capacity / allocated) taken up by all
// humongous regions can be calculated by just looking at the
// "starts humongous" regions and by ignoring the "continues
// humongous" regions.
void set_starts_humongous(HeapWord* new_top, HeapWord* new_end);
// Makes the current region be a "continues humongous' // Makes the current region be a "continues humongous'
// region. first_hr is the "start humongous" region of the series // region. first_hr is the "start humongous" region of the series
@ -566,9 +528,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; } void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; } bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
// For the start region of a humongous sequence, it's original end().
HeapWord* orig_end() const { return _bottom + GrainWords; }
// Reset HR stuff to default values. // Reset HR stuff to default values.
void hr_clear(bool par, bool clear_space, bool locked = false); void hr_clear(bool par, bool clear_space, bool locked = false);
void par_clear(); void par_clear();
@ -614,8 +573,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
bool is_marked() { return _prev_top_at_mark_start != bottom(); } bool is_marked() { return _prev_top_at_mark_start != bottom(); }
void reset_during_compaction() { void reset_during_compaction() {
assert(is_starts_humongous(), assert(is_humongous(),
"should only be called for starts humongous regions"); "should only be called for humongous regions");
zero_marked_bytes(); zero_marked_bytes();
init_top_at_mark_start(); init_top_at_mark_start();

View file

@ -115,6 +115,11 @@ G1OffsetTableContigSpace::block_start_const(const void* p) const {
inline bool inline bool
HeapRegion::block_is_obj(const HeapWord* p) const { HeapRegion::block_is_obj(const HeapWord* p) const {
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
if (!this->is_in(p)) {
assert(is_continues_humongous(), "This case can only happen for humongous regions");
return (p == humongous_start_region()->bottom());
}
if (ClassUnloadingWithConcurrentMark) { if (ClassUnloadingWithConcurrentMark) {
return !g1h->is_obj_dead(oop(p), this); return !g1h->is_obj_dead(oop(p), this);
} }
@ -176,10 +181,6 @@ inline void HeapRegion::note_end_of_marking() {
_prev_top_at_mark_start = _next_top_at_mark_start; _prev_top_at_mark_start = _next_top_at_mark_start;
_prev_marked_bytes = _next_marked_bytes; _prev_marked_bytes = _next_marked_bytes;
_next_marked_bytes = 0; _next_marked_bytes = 0;
assert(_prev_marked_bytes <=
(size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
HeapWordSize, "invariant");
} }
inline void HeapRegion::note_start_of_copying(bool during_initial_mark) { inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {

View file

@ -343,63 +343,18 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, Heap
continue; continue;
} }
HeapRegion* r = _regions.get_by_index(index); HeapRegion* r = _regions.get_by_index(index);
// We'll ignore "continues humongous" regions (we'll process them // We'll ignore regions already claimed.
// when we come across their corresponding "start humongous"
// region) and regions already claimed.
// However, if the iteration is specified as concurrent, the values for // However, if the iteration is specified as concurrent, the values for
// is_starts_humongous and is_continues_humongous can not be trusted, // is_starts_humongous and is_continues_humongous can not be trusted,
// and we should just blindly iterate over regions regardless of their // and we should just blindly iterate over regions regardless of their
// humongous status. // humongous status.
if (hrclaimer->is_region_claimed(index) || (!concurrent && r->is_continues_humongous())) { if (hrclaimer->is_region_claimed(index)) {
continue; continue;
} }
// OK, try to claim it // OK, try to claim it
if (!hrclaimer->claim_region(index)) { if (!hrclaimer->claim_region(index)) {
continue; continue;
} }
// Success!
// As mentioned above, special treatment of humongous regions can only be
// done if we are iterating non-concurrently.
if (!concurrent && r->is_starts_humongous()) {
// If the region is "starts humongous" we'll iterate over its
// "continues humongous" first; in fact we'll do them
// first. The order is important. In one case, calling the
// closure on the "starts humongous" region might de-allocate
// and clear all its "continues humongous" regions and, as a
// result, we might end up processing them twice. So, we'll do
// them first (note: most closures will ignore them anyway) and
// then we'll do the "starts humongous" region.
for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
HeapRegion* chr = _regions.get_by_index(ch_index);
assert(chr->is_continues_humongous(), "Must be humongous region");
assert(chr->humongous_start_region() == r,
"Must work on humongous continuation of the original start region "
PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr));
assert(!hrclaimer->is_region_claimed(ch_index),
"Must not have been claimed yet because claiming of humongous continuation first claims the start region");
// Claim the region so no other worker tries to process the region. When a worker processes a
// starts_humongous region it may also process the associated continues_humongous regions.
// The continues_humongous regions can be changed to free regions. Unless this worker claims
// all of these regions, other workers might try claim and process these newly free regions.
bool claim_result = hrclaimer->claim_region(ch_index);
guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
bool res2 = blk->doHeapRegion(chr);
if (res2) {
return;
}
// Right now, this holds (i.e., no closure that actually
// does something with "continues humongous" regions
// clears them). We might have to weaken it in the future,
// but let's leave these two asserts here for extra safety.
assert(chr->is_continues_humongous(), "should still be the case");
assert(chr->humongous_start_region() == r, "sanity");
}
}
bool res = blk->doHeapRegion(r); bool res = blk->doHeapRegion(r);
if (res) { if (res) {
return; return;
@ -508,12 +463,8 @@ void HeapRegionManager::verify() {
// this method may be called, we have only completed allocation of the regions, // this method may be called, we have only completed allocation of the regions,
// but not put into a region set. // but not put into a region set.
prev_committed = true; prev_committed = true;
if (hr->is_starts_humongous()) {
prev_end = hr->orig_end();
} else {
prev_end = hr->end(); prev_end = hr->end();
} }
}
for (uint i = _allocated_heapregions_length; i < max_length(); i++) { for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i); guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i);
} }

View file

@ -150,6 +150,10 @@ public:
// is valid. // is valid.
inline HeapRegion* at(uint index) const; inline HeapRegion* at(uint index) const;
// Return the next region (by index) that is part of the same
// humongous object that hr is part of.
inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
// If addr is within the committed space return its corresponding // If addr is within the committed space return its corresponding
// HeapRegion, otherwise return NULL. // HeapRegion, otherwise return NULL.
inline HeapRegion* addr_to_region(HeapWord* addr) const; inline HeapRegion* addr_to_region(HeapWord* addr) const;

View file

@ -47,6 +47,18 @@ inline HeapRegion* HeapRegionManager::at(uint index) const {
return hr; return hr;
} }
inline HeapRegion* HeapRegionManager::next_region_in_humongous(HeapRegion* hr) const {
uint index = hr->hrm_index();
assert(is_available(index), "pre-condition");
assert(hr->is_humongous(), "next_region_in_humongous should only be called for a humongous region.");
index++;
if (index < max_length() && is_available(index) && at(index)->is_continues_humongous()) {
return at(index);
} else {
return NULL;
}
}
inline void HeapRegionManager::insert_into_free_list(HeapRegion* hr) { inline void HeapRegionManager::insert_into_free_list(HeapRegion* hr) {
_free_list.add_ordered(hr); _free_list.add_ordered(hr);
} }

View file

@ -105,7 +105,7 @@ protected:
// now reused for the corresponding start humongous region, we need to // now reused for the corresponding start humongous region, we need to
// make sure that we detect this. Thus, we call is_in_reserved_raw() // make sure that we detect this. Thus, we call is_in_reserved_raw()
// instead of just is_in_reserved() here. // instead of just is_in_reserved() here.
if (loc_hr->is_in_reserved_raw(from)) { if (loc_hr->is_in_reserved(from)) {
size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom()); size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
CardIdx_t from_card = (CardIdx_t) CardIdx_t from_card = (CardIdx_t)
hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize); hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
@ -433,7 +433,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
} }
// Note that this may be a continued H region. // Note that this may be a continued H region.
HeapRegion* from_hr = _g1h->heap_region_containing_raw(from); HeapRegion* from_hr = _g1h->heap_region_containing(from);
RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index(); RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index();
// If the region is already coarsened, return. // If the region is already coarsened, return.
@ -765,7 +765,7 @@ bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
} }
bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const { bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
HeapRegion* hr = _g1h->heap_region_containing_raw(from); HeapRegion* hr = _g1h->heap_region_containing(from);
RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index(); RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index();
// Is this region in the coarse map? // Is this region in the coarse map?
if (_coarse_map.at(hr_ind)) return true; if (_coarse_map.at(hr_ind)) return true;

View file

@ -88,7 +88,7 @@ inline bool requires_marking(const void* entry, G1CollectedHeap* heap) {
assert(heap->is_in_reserved(entry), assert(heap->is_in_reserved(entry),
"Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry)); "Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry));
HeapRegion* region = heap->heap_region_containing_raw(entry); HeapRegion* region = heap->heap_region_containing(entry);
assert(region != NULL, "No region for " PTR_FORMAT, p2i(entry)); assert(region != NULL, "No region for " PTR_FORMAT, p2i(entry));
if (entry >= region->next_top_at_mark_start()) { if (entry >= region->next_top_at_mark_start()) {
return false; return false;