8054818: Refactor HeapRegionSeq to manage heap region and auxiliary data

Let HeapRegionSeq manage the heap region and auxiliary data to decrease the amount of responsibilities of G1CollectedHeap, and encapsulate this work from other code.

Reviewed-by: jwilhelm, jmasa, mgerdin, brutisso
This commit is contained in:
Thomas Schatzl 2014-08-18 16:10:44 +02:00
parent 71775c28e2
commit 2ee5ee89d4
21 changed files with 832 additions and 953 deletions

View file

@ -47,19 +47,21 @@ inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
}
inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
return _hrs.reserved().start() + index * HeapRegion::GrainWords;
}
template <class T>
inline HeapRegion*
G1CollectedHeap::heap_region_containing_raw(const T addr) const {
inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const {
assert(addr != NULL, "invariant");
assert(_g1_reserved.contains((const void*) addr),
assert(is_in_g1_reserved((const void*) addr),
err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")",
p2i((void*)addr), p2i(_g1_reserved.start()), p2i(_g1_reserved.end())));
p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())));
return _hrs.addr_to_region((HeapWord*) addr);
}
template <class T>
inline HeapRegion*
G1CollectedHeap::heap_region_containing(const T addr) const {
inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
HeapRegion* hr = heap_region_containing_raw(addr);
if (hr->continuesHumongous()) {
return hr->humongous_start_region();
@ -89,10 +91,9 @@ inline bool G1CollectedHeap::obj_in_cs(oop obj) {
return r != NULL && r->in_collection_set();
}
inline HeapWord*
G1CollectedHeap::attempt_allocation(size_t word_size,
unsigned int* gc_count_before_ret,
int* gclocker_retry_count_ret) {
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
unsigned int* gc_count_before_ret,
int* gclocker_retry_count_ret) {
assert_heap_not_locked_and_not_at_safepoint();
assert(!isHumongous(word_size), "attempt_allocation() should not "
"be called for humongous allocation requests");
@ -252,8 +253,7 @@ G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
}
}
inline bool
G1CollectedHeap::evacuation_should_fail() {
inline bool G1CollectedHeap::evacuation_should_fail() {
if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
return false;
}