mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-19 02:24:40 +02:00
8042668: GC Support for shared heap ranges in CDS
Added "Archive Region" support to the G1 GC Reviewed-by: tschatzl, brutisso
This commit is contained in:
parent
edcd4cb94b
commit
df2efa1c37
24 changed files with 786 additions and 51 deletions
|
@ -107,7 +107,8 @@ void CollectionSetChooser::verify() {
|
||||||
HeapRegion *curr = regions_at(index++);
|
HeapRegion *curr = regions_at(index++);
|
||||||
guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
|
guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
|
||||||
guarantee(!curr->is_young(), "should not be young!");
|
guarantee(!curr->is_young(), "should not be young!");
|
||||||
guarantee(!curr->is_humongous(), "should not be humongous!");
|
guarantee(!curr->is_pinned(),
|
||||||
|
err_msg("Pinned region should not be in collection set (index %u)", curr->hrm_index()));
|
||||||
if (prev != NULL) {
|
if (prev != NULL) {
|
||||||
guarantee(order_regions(prev, curr) != 1,
|
guarantee(order_regions(prev, curr) != 1,
|
||||||
err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
|
err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
|
||||||
|
@ -149,8 +150,8 @@ void CollectionSetChooser::sort_regions() {
|
||||||
|
|
||||||
|
|
||||||
void CollectionSetChooser::add_region(HeapRegion* hr) {
|
void CollectionSetChooser::add_region(HeapRegion* hr) {
|
||||||
assert(!hr->is_humongous(),
|
assert(!hr->is_pinned(),
|
||||||
"Humongous regions shouldn't be added to the collection set");
|
err_msg("Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index()));
|
||||||
assert(!hr->is_young(), "should not be young!");
|
assert(!hr->is_young(), "should not be young!");
|
||||||
_regions.append(hr);
|
_regions.append(hr);
|
||||||
_length++;
|
_length++;
|
||||||
|
|
|
@ -103,13 +103,12 @@ public:
|
||||||
void sort_regions();
|
void sort_regions();
|
||||||
|
|
||||||
// Determine whether to add the given region to the CSet chooser or
|
// Determine whether to add the given region to the CSet chooser or
|
||||||
// not. Currently, we skip humongous regions (we never add them to
|
// not. Currently, we skip pinned regions and regions whose live
|
||||||
// the CSet, we only reclaim them during cleanup) and regions whose
|
// bytes are over the threshold. Humongous regions may be reclaimed during cleanup.
|
||||||
// live bytes are over the threshold.
|
|
||||||
bool should_add(HeapRegion* hr) {
|
bool should_add(HeapRegion* hr) {
|
||||||
assert(hr->is_marked(), "pre-condition");
|
assert(hr->is_marked(), "pre-condition");
|
||||||
assert(!hr->is_young(), "should never consider young regions");
|
assert(!hr->is_young(), "should never consider young regions");
|
||||||
return !hr->is_humongous() &&
|
return !hr->is_pinned() &&
|
||||||
hr->live_bytes() < _region_live_threshold_bytes;
|
hr->live_bytes() < _region_live_threshold_bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1784,7 +1784,7 @@ public:
|
||||||
const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
|
const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion *hr) {
|
bool doHeapRegion(HeapRegion *hr) {
|
||||||
if (hr->is_continues_humongous()) {
|
if (hr->is_continues_humongous() || hr->is_archive()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// We use a claim value of zero here because all regions
|
// We use a claim value of zero here because all regions
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include "gc/g1/g1Allocator.hpp"
|
#include "gc/g1/g1Allocator.hpp"
|
||||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||||
|
#include "gc/g1/g1MarkSweep.hpp"
|
||||||
#include "gc/g1/heapRegion.inline.hpp"
|
#include "gc/g1/heapRegion.inline.hpp"
|
||||||
#include "gc/g1/heapRegionSet.inline.hpp"
|
#include "gc/g1/heapRegionSet.inline.hpp"
|
||||||
|
|
||||||
|
@ -44,6 +45,8 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
|
||||||
HeapRegion** retained_old) {
|
HeapRegion** retained_old) {
|
||||||
HeapRegion* retained_region = *retained_old;
|
HeapRegion* retained_region = *retained_old;
|
||||||
*retained_old = NULL;
|
*retained_old = NULL;
|
||||||
|
assert(retained_region == NULL || !retained_region->is_archive(),
|
||||||
|
err_msg("Archive region should not be alloc region (index %u)", retained_region->hrm_index()));
|
||||||
|
|
||||||
// We will discard the current GC alloc region if:
|
// We will discard the current GC alloc region if:
|
||||||
// a) it's in the collection set (it can happen!),
|
// a) it's in the collection set (it can happen!),
|
||||||
|
@ -168,3 +171,153 @@ void G1DefaultParGCAllocator::waste(size_t& wasted, size_t& undo_wasted) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) {
|
||||||
|
// Create the archive allocator, and also enable archive object checking
|
||||||
|
// in mark-sweep, since we will be creating archive regions.
|
||||||
|
G1ArchiveAllocator* result = new G1ArchiveAllocator(g1h);
|
||||||
|
G1MarkSweep::enable_archive_object_check();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool G1ArchiveAllocator::alloc_new_region() {
|
||||||
|
// Allocate the highest free region in the reserved heap,
|
||||||
|
// and add it to our list of allocated regions. It is marked
|
||||||
|
// archive and added to the old set.
|
||||||
|
HeapRegion* hr = _g1h->alloc_highest_free_region();
|
||||||
|
if (hr == NULL) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
assert(hr->is_empty(), err_msg("expected empty region (index %u)", hr->hrm_index()));
|
||||||
|
hr->set_archive();
|
||||||
|
_g1h->_old_set.add(hr);
|
||||||
|
_g1h->_hr_printer.alloc(hr, G1HRPrinter::Archive);
|
||||||
|
_allocated_regions.append(hr);
|
||||||
|
_allocation_region = hr;
|
||||||
|
|
||||||
|
// Set up _bottom and _max to begin allocating in the lowest
|
||||||
|
// min_region_size'd chunk of the allocated G1 region.
|
||||||
|
_bottom = hr->bottom();
|
||||||
|
_max = _bottom + HeapRegion::min_region_size_in_words();
|
||||||
|
|
||||||
|
// Tell mark-sweep that objects in this region are not to be marked.
|
||||||
|
G1MarkSweep::mark_range_archive(MemRegion(_bottom, HeapRegion::GrainWords));
|
||||||
|
|
||||||
|
// Since we've modified the old set, call update_sizes.
|
||||||
|
_g1h->g1mm()->update_sizes();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
|
||||||
|
assert(word_size != 0, "size must not be zero");
|
||||||
|
if (_allocation_region == NULL) {
|
||||||
|
if (!alloc_new_region()) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
HeapWord* old_top = _allocation_region->top();
|
||||||
|
assert(_bottom >= _allocation_region->bottom(),
|
||||||
|
err_msg("inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
|
||||||
|
p2i(_bottom), p2i(_allocation_region->bottom())));
|
||||||
|
assert(_max <= _allocation_region->end(),
|
||||||
|
err_msg("inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
|
||||||
|
p2i(_max), p2i(_allocation_region->end())));
|
||||||
|
assert(_bottom <= old_top && old_top <= _max,
|
||||||
|
err_msg("inconsistent allocation state: expected "
|
||||||
|
PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
|
||||||
|
p2i(_bottom), p2i(old_top), p2i(_max)));
|
||||||
|
|
||||||
|
// Allocate the next word_size words in the current allocation chunk.
|
||||||
|
// If allocation would cross the _max boundary, insert a filler and begin
|
||||||
|
// at the base of the next min_region_size'd chunk. Also advance to the next
|
||||||
|
// chunk if we don't yet cross the boundary, but the remainder would be too
|
||||||
|
// small to fill.
|
||||||
|
HeapWord* new_top = old_top + word_size;
|
||||||
|
size_t remainder = pointer_delta(_max, new_top);
|
||||||
|
if ((new_top > _max) ||
|
||||||
|
((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
|
||||||
|
if (old_top != _max) {
|
||||||
|
size_t fill_size = pointer_delta(_max, old_top);
|
||||||
|
CollectedHeap::fill_with_object(old_top, fill_size);
|
||||||
|
_summary_bytes_used += fill_size * HeapWordSize;
|
||||||
|
}
|
||||||
|
_allocation_region->set_top(_max);
|
||||||
|
old_top = _bottom = _max;
|
||||||
|
|
||||||
|
// Check if we've just used up the last min_region_size'd chunk
|
||||||
|
// in the current region, and if so, allocate a new one.
|
||||||
|
if (_bottom != _allocation_region->end()) {
|
||||||
|
_max = _bottom + HeapRegion::min_region_size_in_words();
|
||||||
|
} else {
|
||||||
|
if (!alloc_new_region()) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
old_top = _allocation_region->bottom();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_allocation_region->set_top(old_top + word_size);
|
||||||
|
_summary_bytes_used += word_size * HeapWordSize;
|
||||||
|
|
||||||
|
return old_top;
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
|
||||||
|
size_t end_alignment_in_bytes) {
|
||||||
|
assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
|
||||||
|
err_msg("alignment " SIZE_FORMAT " too large", end_alignment_in_bytes));
|
||||||
|
assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize),
|
||||||
|
err_msg("alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize));
|
||||||
|
|
||||||
|
// If we've allocated nothing, simply return.
|
||||||
|
if (_allocation_region == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If an end alignment was requested, insert filler objects.
|
||||||
|
if (end_alignment_in_bytes != 0) {
|
||||||
|
HeapWord* currtop = _allocation_region->top();
|
||||||
|
HeapWord* newtop = (HeapWord*)align_pointer_up(currtop, end_alignment_in_bytes);
|
||||||
|
size_t fill_size = pointer_delta(newtop, currtop);
|
||||||
|
if (fill_size != 0) {
|
||||||
|
if (fill_size < CollectedHeap::min_fill_size()) {
|
||||||
|
// If the required fill is smaller than we can represent,
|
||||||
|
// bump up to the next aligned address. We know we won't exceed the current
|
||||||
|
// region boundary because the max supported alignment is smaller than the min
|
||||||
|
// region size, and because the allocation code never leaves space smaller than
|
||||||
|
// the min_fill_size at the top of the current allocation region.
|
||||||
|
newtop = (HeapWord*)align_pointer_up(currtop + CollectedHeap::min_fill_size(),
|
||||||
|
end_alignment_in_bytes);
|
||||||
|
fill_size = pointer_delta(newtop, currtop);
|
||||||
|
}
|
||||||
|
HeapWord* fill = archive_mem_allocate(fill_size);
|
||||||
|
CollectedHeap::fill_with_objects(fill, fill_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loop through the allocated regions, and create MemRegions summarizing
|
||||||
|
// the allocated address range, combining contiguous ranges. Add the
|
||||||
|
// MemRegions to the GrowableArray provided by the caller.
|
||||||
|
int index = _allocated_regions.length() - 1;
|
||||||
|
assert(_allocated_regions.at(index) == _allocation_region,
|
||||||
|
err_msg("expected region %u at end of array, found %u",
|
||||||
|
_allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index()));
|
||||||
|
HeapWord* base_address = _allocation_region->bottom();
|
||||||
|
HeapWord* top = base_address;
|
||||||
|
|
||||||
|
while (index >= 0) {
|
||||||
|
HeapRegion* next = _allocated_regions.at(index);
|
||||||
|
HeapWord* new_base = next->bottom();
|
||||||
|
HeapWord* new_top = next->top();
|
||||||
|
if (new_base != top) {
|
||||||
|
ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
|
||||||
|
base_address = new_base;
|
||||||
|
}
|
||||||
|
top = new_top;
|
||||||
|
index = index - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(top != base_address, err_msg("zero-sized range, address " PTR_FORMAT, p2i(base_address)));
|
||||||
|
ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
|
||||||
|
_allocated_regions.clear();
|
||||||
|
_allocation_region = NULL;
|
||||||
|
};
|
||||||
|
|
|
@ -269,4 +269,72 @@ public:
|
||||||
virtual void waste(size_t& wasted, size_t& undo_wasted);
|
virtual void waste(size_t& wasted, size_t& undo_wasted);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// G1ArchiveAllocator is used to allocate memory in archive
|
||||||
|
// regions. Such regions are not modifiable by GC, being neither
|
||||||
|
// scavenged nor compacted, or even marked in the object header.
|
||||||
|
// They can contain no pointers to non-archive heap regions,
|
||||||
|
class G1ArchiveAllocator : public CHeapObj<mtGC> {
|
||||||
|
|
||||||
|
protected:
|
||||||
|
G1CollectedHeap* _g1h;
|
||||||
|
|
||||||
|
// The current allocation region
|
||||||
|
HeapRegion* _allocation_region;
|
||||||
|
|
||||||
|
// Regions allocated for the current archive range.
|
||||||
|
GrowableArray<HeapRegion*> _allocated_regions;
|
||||||
|
|
||||||
|
// The number of bytes used in the current range.
|
||||||
|
size_t _summary_bytes_used;
|
||||||
|
|
||||||
|
// Current allocation window within the current region.
|
||||||
|
HeapWord* _bottom;
|
||||||
|
HeapWord* _top;
|
||||||
|
HeapWord* _max;
|
||||||
|
|
||||||
|
// Allocate a new region for this archive allocator.
|
||||||
|
// Allocation is from the top of the reserved heap downward.
|
||||||
|
bool alloc_new_region();
|
||||||
|
|
||||||
|
public:
|
||||||
|
G1ArchiveAllocator(G1CollectedHeap* g1h) :
|
||||||
|
_g1h(g1h),
|
||||||
|
_allocation_region(NULL),
|
||||||
|
_allocated_regions((ResourceObj::set_allocation_type((address) &_allocated_regions,
|
||||||
|
ResourceObj::C_HEAP),
|
||||||
|
2), true /* C_Heap */),
|
||||||
|
_summary_bytes_used(0),
|
||||||
|
_bottom(NULL),
|
||||||
|
_top(NULL),
|
||||||
|
_max(NULL) { }
|
||||||
|
|
||||||
|
virtual ~G1ArchiveAllocator() {
|
||||||
|
assert(_allocation_region == NULL, "_allocation_region not NULL");
|
||||||
|
}
|
||||||
|
|
||||||
|
static G1ArchiveAllocator* create_allocator(G1CollectedHeap* g1h);
|
||||||
|
|
||||||
|
// Allocate memory for an individual object.
|
||||||
|
HeapWord* archive_mem_allocate(size_t word_size);
|
||||||
|
|
||||||
|
// Return the memory ranges used in the current archive, after
|
||||||
|
// aligning to the requested alignment.
|
||||||
|
void complete_archive(GrowableArray<MemRegion>* ranges,
|
||||||
|
size_t end_alignment_in_bytes);
|
||||||
|
|
||||||
|
// The number of bytes allocated by this allocator.
|
||||||
|
size_t used() {
|
||||||
|
return _summary_bytes_used;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear the count of bytes allocated in prior G1 regions. This
|
||||||
|
// must be done when recalculate_use is used to reset the counter
|
||||||
|
// for the generic allocator, since it counts bytes in all G1
|
||||||
|
// regions, including those still associated with this allocator.
|
||||||
|
void clear_used() {
|
||||||
|
_summary_bytes_used = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_G1_G1ALLOCATOR_HPP
|
#endif // SHARE_VM_GC_G1_G1ALLOCATOR_HPP
|
||||||
|
|
|
@ -128,6 +128,14 @@ public:
|
||||||
return biased_base()[biased_index];
|
return biased_base()[biased_index];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Return the index of the element of the given array that covers the given
|
||||||
|
// word in the heap.
|
||||||
|
idx_t get_index_by_address(HeapWord* value) const {
|
||||||
|
idx_t biased_index = ((uintptr_t)value) >> this->shift_by();
|
||||||
|
this->verify_biased_index(biased_index);
|
||||||
|
return biased_index - _bias;
|
||||||
|
}
|
||||||
|
|
||||||
// Set the value of the array entry that corresponds to the given array.
|
// Set the value of the array entry that corresponds to the given array.
|
||||||
void set_by_address(HeapWord * address, T value) {
|
void set_by_address(HeapWord * address, T value) {
|
||||||
idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
|
idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
|
||||||
|
@ -135,6 +143,18 @@ public:
|
||||||
biased_base()[biased_index] = value;
|
biased_base()[biased_index] = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set the value of all array entries that correspond to addresses
|
||||||
|
// in the specified MemRegion.
|
||||||
|
void set_by_address(MemRegion range, T value) {
|
||||||
|
idx_t biased_start = ((uintptr_t)range.start()) >> this->shift_by();
|
||||||
|
idx_t biased_last = ((uintptr_t)range.last()) >> this->shift_by();
|
||||||
|
this->verify_biased_index(biased_start);
|
||||||
|
this->verify_biased_index(biased_last);
|
||||||
|
for (idx_t i = biased_start; i <= biased_last; i++) {
|
||||||
|
biased_base()[i] = value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Returns the address of the element the given address maps to
|
// Returns the address of the element the given address maps to
|
||||||
T* address_mapped_to(HeapWord* address) {
|
T* address_mapped_to(HeapWord* address) {
|
||||||
|
|
|
@ -405,7 +405,7 @@ HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
|
||||||
// can move in an incremental collection.
|
// can move in an incremental collection.
|
||||||
bool G1CollectedHeap::is_scavengable(const void* p) {
|
bool G1CollectedHeap::is_scavengable(const void* p) {
|
||||||
HeapRegion* hr = heap_region_containing(p);
|
HeapRegion* hr = heap_region_containing(p);
|
||||||
return !hr->is_humongous();
|
return !hr->is_pinned();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Private methods.
|
// Private methods.
|
||||||
|
@ -908,6 +908,207 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void G1CollectedHeap::begin_archive_alloc_range() {
|
||||||
|
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||||
|
if (_archive_allocator == NULL) {
|
||||||
|
_archive_allocator = G1ArchiveAllocator::create_allocator(this);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
|
||||||
|
// Allocations in archive regions cannot be of a size that would be considered
|
||||||
|
// humongous even for a minimum-sized region, because G1 region sizes/boundaries
|
||||||
|
// may be different at archive-restore time.
|
||||||
|
return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
|
||||||
|
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||||
|
assert(_archive_allocator != NULL, "_archive_allocator not initialized");
|
||||||
|
if (is_archive_alloc_too_large(word_size)) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
return _archive_allocator->archive_mem_allocate(word_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
|
||||||
|
size_t end_alignment_in_bytes) {
|
||||||
|
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||||
|
assert(_archive_allocator != NULL, "_archive_allocator not initialized");
|
||||||
|
|
||||||
|
// Call complete_archive to do the real work, filling in the MemRegion
|
||||||
|
// array with the archive regions.
|
||||||
|
_archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
|
||||||
|
delete _archive_allocator;
|
||||||
|
_archive_allocator = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
|
||||||
|
assert(ranges != NULL, "MemRegion array NULL");
|
||||||
|
assert(count != 0, "No MemRegions provided");
|
||||||
|
MemRegion reserved = _hrm.reserved();
|
||||||
|
for (size_t i = 0; i < count; i++) {
|
||||||
|
if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
|
||||||
|
assert(ranges != NULL, "MemRegion array NULL");
|
||||||
|
assert(count != 0, "No MemRegions provided");
|
||||||
|
MutexLockerEx x(Heap_lock);
|
||||||
|
|
||||||
|
MemRegion reserved = _hrm.reserved();
|
||||||
|
HeapWord* prev_last_addr = NULL;
|
||||||
|
HeapRegion* prev_last_region = NULL;
|
||||||
|
|
||||||
|
// Temporarily disable pretouching of heap pages. This interface is used
|
||||||
|
// when mmap'ing archived heap data in, so pre-touching is wasted.
|
||||||
|
FlagSetting fs(AlwaysPreTouch, false);
|
||||||
|
|
||||||
|
// Enable archive object checking in G1MarkSweep. We have to let it know
|
||||||
|
// about each archive range, so that objects in those ranges aren't marked.
|
||||||
|
G1MarkSweep::enable_archive_object_check();
|
||||||
|
|
||||||
|
// For each specified MemRegion range, allocate the corresponding G1
|
||||||
|
// regions and mark them as archive regions. We expect the ranges in
|
||||||
|
// ascending starting address order, without overlap.
|
||||||
|
for (size_t i = 0; i < count; i++) {
|
||||||
|
MemRegion curr_range = ranges[i];
|
||||||
|
HeapWord* start_address = curr_range.start();
|
||||||
|
size_t word_size = curr_range.word_size();
|
||||||
|
HeapWord* last_address = curr_range.last();
|
||||||
|
size_t commits = 0;
|
||||||
|
|
||||||
|
guarantee(reserved.contains(start_address) && reserved.contains(last_address),
|
||||||
|
err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
|
||||||
|
p2i(start_address), p2i(last_address)));
|
||||||
|
guarantee(start_address > prev_last_addr,
|
||||||
|
err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
|
||||||
|
p2i(start_address), p2i(prev_last_addr)));
|
||||||
|
prev_last_addr = last_address;
|
||||||
|
|
||||||
|
// Check for ranges that start in the same G1 region in which the previous
|
||||||
|
// range ended, and adjust the start address so we don't try to allocate
|
||||||
|
// the same region again. If the current range is entirely within that
|
||||||
|
// region, skip it, just adjusting the recorded top.
|
||||||
|
HeapRegion* start_region = _hrm.addr_to_region(start_address);
|
||||||
|
if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
|
||||||
|
start_address = start_region->end();
|
||||||
|
if (start_address > last_address) {
|
||||||
|
_allocator->increase_used(word_size * HeapWordSize);
|
||||||
|
start_region->set_top(last_address + 1);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
start_region->set_top(start_address);
|
||||||
|
curr_range = MemRegion(start_address, last_address + 1);
|
||||||
|
start_region = _hrm.addr_to_region(start_address);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform the actual region allocation, exiting if it fails.
|
||||||
|
// Then note how much new space we have allocated.
|
||||||
|
if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
_allocator->increase_used(word_size * HeapWordSize);
|
||||||
|
if (commits != 0) {
|
||||||
|
ergo_verbose1(ErgoHeapSizing,
|
||||||
|
"attempt heap expansion",
|
||||||
|
ergo_format_reason("allocate archive regions")
|
||||||
|
ergo_format_byte("total size"),
|
||||||
|
HeapRegion::GrainWords * HeapWordSize * commits);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark each G1 region touched by the range as archive, add it to the old set,
|
||||||
|
// and set the allocation context and top.
|
||||||
|
HeapRegion* curr_region = _hrm.addr_to_region(start_address);
|
||||||
|
HeapRegion* last_region = _hrm.addr_to_region(last_address);
|
||||||
|
prev_last_region = last_region;
|
||||||
|
|
||||||
|
while (curr_region != NULL) {
|
||||||
|
assert(curr_region->is_empty() && !curr_region->is_pinned(),
|
||||||
|
err_msg("Region already in use (index %u)", curr_region->hrm_index()));
|
||||||
|
_hr_printer.alloc(curr_region, G1HRPrinter::Archive);
|
||||||
|
curr_region->set_allocation_context(AllocationContext::system());
|
||||||
|
curr_region->set_archive();
|
||||||
|
_old_set.add(curr_region);
|
||||||
|
if (curr_region != last_region) {
|
||||||
|
curr_region->set_top(curr_region->end());
|
||||||
|
curr_region = _hrm.next_region_in_heap(curr_region);
|
||||||
|
} else {
|
||||||
|
curr_region->set_top(last_address + 1);
|
||||||
|
curr_region = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Notify mark-sweep of the archive range.
|
||||||
|
G1MarkSweep::mark_range_archive(curr_range);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
|
||||||
|
assert(ranges != NULL, "MemRegion array NULL");
|
||||||
|
assert(count != 0, "No MemRegions provided");
|
||||||
|
MemRegion reserved = _hrm.reserved();
|
||||||
|
HeapWord *prev_last_addr = NULL;
|
||||||
|
HeapRegion* prev_last_region = NULL;
|
||||||
|
|
||||||
|
// For each MemRegion, create filler objects, if needed, in the G1 regions
|
||||||
|
// that contain the address range. The address range actually within the
|
||||||
|
// MemRegion will not be modified. That is assumed to have been initialized
|
||||||
|
// elsewhere, probably via an mmap of archived heap data.
|
||||||
|
MutexLockerEx x(Heap_lock);
|
||||||
|
for (size_t i = 0; i < count; i++) {
|
||||||
|
HeapWord* start_address = ranges[i].start();
|
||||||
|
HeapWord* last_address = ranges[i].last();
|
||||||
|
|
||||||
|
assert(reserved.contains(start_address) && reserved.contains(last_address),
|
||||||
|
err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
|
||||||
|
p2i(start_address), p2i(last_address)));
|
||||||
|
assert(start_address > prev_last_addr,
|
||||||
|
err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
|
||||||
|
p2i(start_address), p2i(prev_last_addr)));
|
||||||
|
|
||||||
|
HeapRegion* start_region = _hrm.addr_to_region(start_address);
|
||||||
|
HeapRegion* last_region = _hrm.addr_to_region(last_address);
|
||||||
|
HeapWord* bottom_address = start_region->bottom();
|
||||||
|
|
||||||
|
// Check for a range beginning in the same region in which the
|
||||||
|
// previous one ended.
|
||||||
|
if (start_region == prev_last_region) {
|
||||||
|
bottom_address = prev_last_addr + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the regions were all marked as archive regions by
|
||||||
|
// alloc_archive_regions.
|
||||||
|
HeapRegion* curr_region = start_region;
|
||||||
|
while (curr_region != NULL) {
|
||||||
|
guarantee(curr_region->is_archive(),
|
||||||
|
err_msg("Expected archive region at index %u", curr_region->hrm_index()));
|
||||||
|
if (curr_region != last_region) {
|
||||||
|
curr_region = _hrm.next_region_in_heap(curr_region);
|
||||||
|
} else {
|
||||||
|
curr_region = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
prev_last_addr = last_address;
|
||||||
|
prev_last_region = last_region;
|
||||||
|
|
||||||
|
// Fill the memory below the allocated range with dummy object(s),
|
||||||
|
// if the region bottom does not match the range start, or if the previous
|
||||||
|
// range ended within the same G1 region, and there is a gap.
|
||||||
|
if (start_address != bottom_address) {
|
||||||
|
size_t fill_size = pointer_delta(start_address, bottom_address);
|
||||||
|
G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
|
||||||
|
_allocator->increase_used(fill_size * HeapWordSize);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||||
uint* gc_count_before_ret,
|
uint* gc_count_before_ret,
|
||||||
uint* gclocker_retry_count_ret) {
|
uint* gclocker_retry_count_ret) {
|
||||||
|
@ -1132,6 +1333,8 @@ public:
|
||||||
}
|
}
|
||||||
} else if (hr->is_continues_humongous()) {
|
} else if (hr->is_continues_humongous()) {
|
||||||
_hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
|
_hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
|
||||||
|
} else if (hr->is_archive()) {
|
||||||
|
_hr_printer->post_compaction(hr, G1HRPrinter::Archive);
|
||||||
} else if (hr->is_old()) {
|
} else if (hr->is_old()) {
|
||||||
_hr_printer->post_compaction(hr, G1HRPrinter::Old);
|
_hr_printer->post_compaction(hr, G1HRPrinter::Old);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1723,6 +1926,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||||
_humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
|
_humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
|
||||||
_humongous_reclaim_candidates(),
|
_humongous_reclaim_candidates(),
|
||||||
_has_humongous_reclaim_candidates(false),
|
_has_humongous_reclaim_candidates(false),
|
||||||
|
_archive_allocator(NULL),
|
||||||
_free_regions_coming(false),
|
_free_regions_coming(false),
|
||||||
_young_list(new YoungList(this)),
|
_young_list(new YoungList(this)),
|
||||||
_gc_time_stamp(0),
|
_gc_time_stamp(0),
|
||||||
|
@ -1748,7 +1952,11 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||||
_workers->initialize_workers();
|
_workers->initialize_workers();
|
||||||
|
|
||||||
_allocator = G1Allocator::create_allocator(this);
|
_allocator = G1Allocator::create_allocator(this);
|
||||||
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
|
_humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
|
||||||
|
|
||||||
|
// Override the default _filler_array_max_size so that no humongous filler
|
||||||
|
// objects are created.
|
||||||
|
_filler_array_max_size = _humongous_object_threshold_in_words;
|
||||||
|
|
||||||
uint n_queues = ParallelGCThreads;
|
uint n_queues = ParallelGCThreads;
|
||||||
_task_queues = new RefToScanQueueSet(n_queues);
|
_task_queues = new RefToScanQueueSet(n_queues);
|
||||||
|
@ -2163,7 +2371,11 @@ void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
|
||||||
|
|
||||||
// Computes the sum of the storage used by the various regions.
|
// Computes the sum of the storage used by the various regions.
|
||||||
size_t G1CollectedHeap::used() const {
|
size_t G1CollectedHeap::used() const {
|
||||||
return _allocator->used();
|
size_t result = _allocator->used();
|
||||||
|
if (_archive_allocator != NULL) {
|
||||||
|
result += _archive_allocator->used();
|
||||||
|
}
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t G1CollectedHeap::used_unlocked() const {
|
size_t G1CollectedHeap::used_unlocked() const {
|
||||||
|
@ -2576,7 +2788,7 @@ void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
|
||||||
|
|
||||||
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
|
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
|
||||||
HeapRegion* result = _hrm.next_region_in_heap(from);
|
HeapRegion* result = _hrm.next_region_in_heap(from);
|
||||||
while (result != NULL && result->is_humongous()) {
|
while (result != NULL && result->is_pinned()) {
|
||||||
result = _hrm.next_region_in_heap(result);
|
result = _hrm.next_region_in_heap(result);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
|
@ -2884,6 +3096,31 @@ public:
|
||||||
size_t live_bytes() { return _live_bytes; }
|
size_t live_bytes() { return _live_bytes; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class VerifyArchiveOopClosure: public OopClosure {
|
||||||
|
public:
|
||||||
|
VerifyArchiveOopClosure(HeapRegion *hr) { }
|
||||||
|
void do_oop(narrowOop *p) { do_oop_work(p); }
|
||||||
|
void do_oop( oop *p) { do_oop_work(p); }
|
||||||
|
|
||||||
|
template <class T> void do_oop_work(T *p) {
|
||||||
|
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||||
|
guarantee(obj == NULL || G1MarkSweep::in_archive_range(obj),
|
||||||
|
err_msg("Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
|
||||||
|
p2i(p), p2i(obj)));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class VerifyArchiveRegionClosure: public ObjectClosure {
|
||||||
|
public:
|
||||||
|
VerifyArchiveRegionClosure(HeapRegion *hr) { }
|
||||||
|
// Verify that all object pointers are to archive regions.
|
||||||
|
void do_object(oop o) {
|
||||||
|
VerifyArchiveOopClosure checkOop(NULL);
|
||||||
|
assert(o != NULL, "Should not be here for NULL oops");
|
||||||
|
o->oop_iterate_no_header(&checkOop);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
class VerifyRegionClosure: public HeapRegionClosure {
|
class VerifyRegionClosure: public HeapRegionClosure {
|
||||||
private:
|
private:
|
||||||
bool _par;
|
bool _par;
|
||||||
|
@ -2903,6 +3140,13 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* r) {
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
|
// For archive regions, verify there are no heap pointers to
|
||||||
|
// non-pinned regions. For all others, verify liveness info.
|
||||||
|
if (r->is_archive()) {
|
||||||
|
VerifyArchiveRegionClosure verify_oop_pointers(r);
|
||||||
|
r->object_iterate(&verify_oop_pointers);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
if (!r->is_continues_humongous()) {
|
if (!r->is_continues_humongous()) {
|
||||||
bool failures = false;
|
bool failures = false;
|
||||||
r->verify(_vo, &failures);
|
r->verify(_vo, &failures);
|
||||||
|
@ -3087,7 +3331,7 @@ bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
|
||||||
switch (vo) {
|
switch (vo) {
|
||||||
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
|
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
|
||||||
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
|
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
|
||||||
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
|
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked() && !hr->is_archive();
|
||||||
default: ShouldNotReachHere();
|
default: ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
return false; // keep some compilers happy
|
return false; // keep some compilers happy
|
||||||
|
@ -3098,7 +3342,10 @@ bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
|
||||||
switch (vo) {
|
switch (vo) {
|
||||||
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
|
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
|
||||||
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
|
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
|
||||||
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
|
case VerifyOption_G1UseMarkWord: {
|
||||||
|
HeapRegion* hr = _hrm.addr_to_region((HeapWord*)obj);
|
||||||
|
return !obj->is_gc_marked() && !hr->is_archive();
|
||||||
|
}
|
||||||
default: ShouldNotReachHere();
|
default: ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
return false; // keep some compilers happy
|
return false; // keep some compilers happy
|
||||||
|
@ -3131,7 +3378,7 @@ void G1CollectedHeap::print_extended_on(outputStream* st) const {
|
||||||
st->cr();
|
st->cr();
|
||||||
st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
|
st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
|
||||||
"HS=humongous(starts), HC=humongous(continues), "
|
"HS=humongous(starts), HC=humongous(continues), "
|
||||||
"CS=collection set, F=free, TS=gc time stamp, "
|
"CS=collection set, F=free, A=archive, TS=gc time stamp, "
|
||||||
"PTAMS=previous top-at-mark-start, "
|
"PTAMS=previous top-at-mark-start, "
|
||||||
"NTAMS=next top-at-mark-start)");
|
"NTAMS=next top-at-mark-start)");
|
||||||
PrintRegionClosure blk(st);
|
PrintRegionClosure blk(st);
|
||||||
|
@ -3852,6 +4099,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||||
|
|
||||||
if (evacuation_failed()) {
|
if (evacuation_failed()) {
|
||||||
_allocator->set_used(recalculate_used());
|
_allocator->set_used(recalculate_used());
|
||||||
|
if (_archive_allocator != NULL) {
|
||||||
|
_archive_allocator->clear_used();
|
||||||
|
}
|
||||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||||
if (_evacuation_failed_info_array[i].has_failed()) {
|
if (_evacuation_failed_info_array[i].has_failed()) {
|
||||||
_gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
|
_gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
|
||||||
|
@ -6173,13 +6423,18 @@ public:
|
||||||
assert(!r->is_young(), "we should not come across young regions");
|
assert(!r->is_young(), "we should not come across young regions");
|
||||||
|
|
||||||
if (r->is_humongous()) {
|
if (r->is_humongous()) {
|
||||||
// We ignore humongous regions, we left the humongous set unchanged
|
// We ignore humongous regions. We left the humongous set unchanged.
|
||||||
} else {
|
} else {
|
||||||
// Objects that were compacted would have ended up on regions
|
// Objects that were compacted would have ended up on regions
|
||||||
// that were previously old or free.
|
// that were previously old or free. Archive regions (which are
|
||||||
|
// old) will not have been touched.
|
||||||
assert(r->is_free() || r->is_old(), "invariant");
|
assert(r->is_free() || r->is_old(), "invariant");
|
||||||
// We now consider them old, so register as such.
|
// We now consider them old, so register as such. Leave
|
||||||
|
// archive regions set that way, however, while still adding
|
||||||
|
// them to the old set.
|
||||||
|
if (!r->is_archive()) {
|
||||||
r->set_old();
|
r->set_old();
|
||||||
|
}
|
||||||
_old_set->add(r);
|
_old_set->add(r);
|
||||||
}
|
}
|
||||||
_total_used += r->used();
|
_total_used += r->used();
|
||||||
|
@ -6205,6 +6460,9 @@ void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
|
||||||
|
|
||||||
if (!free_list_only) {
|
if (!free_list_only) {
|
||||||
_allocator->set_used(cl.total_used());
|
_allocator->set_used(cl.total_used());
|
||||||
|
if (_archive_allocator != NULL) {
|
||||||
|
_archive_allocator->clear_used();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
assert(_allocator->used_unlocked() == recalculate_used(),
|
assert(_allocator->used_unlocked() == recalculate_used(),
|
||||||
err_msg("inconsistent _allocator->used_unlocked(), "
|
err_msg("inconsistent _allocator->used_unlocked(), "
|
||||||
|
@ -6305,6 +6563,25 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||||
_hr_printer.retire(alloc_region);
|
_hr_printer.retire(alloc_region);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
|
||||||
|
bool expanded = false;
|
||||||
|
uint index = _hrm.find_highest_free(&expanded);
|
||||||
|
|
||||||
|
if (index != G1_NO_HRM_INDEX) {
|
||||||
|
if (expanded) {
|
||||||
|
ergo_verbose1(ErgoHeapSizing,
|
||||||
|
"attempt heap expansion",
|
||||||
|
ergo_format_reason("requested address range outside heap bounds")
|
||||||
|
ergo_format_byte("region size"),
|
||||||
|
HeapRegion::GrainWords * HeapWordSize);
|
||||||
|
}
|
||||||
|
_hrm.allocate_free_regions_starting_at(index, 1);
|
||||||
|
return region_at(index);
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// Heap region set verification
|
// Heap region set verification
|
||||||
|
|
||||||
class VerifyRegionListsClosure : public HeapRegionClosure {
|
class VerifyRegionListsClosure : public HeapRegionClosure {
|
||||||
|
@ -6341,6 +6618,9 @@ public:
|
||||||
assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
|
assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
|
||||||
_old_count.increment(1u, hr->capacity());
|
_old_count.increment(1u, hr->capacity());
|
||||||
} else {
|
} else {
|
||||||
|
// There are no other valid region types. Check for one invalid
|
||||||
|
// one we can identify: pinned without old or humongous set.
|
||||||
|
assert(!hr->is_pinned(), err_msg("Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index()));
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -188,6 +188,7 @@ class G1CollectedHeap : public CollectedHeap {
|
||||||
friend class SurvivorGCAllocRegion;
|
friend class SurvivorGCAllocRegion;
|
||||||
friend class OldGCAllocRegion;
|
friend class OldGCAllocRegion;
|
||||||
friend class G1Allocator;
|
friend class G1Allocator;
|
||||||
|
friend class G1ArchiveAllocator;
|
||||||
|
|
||||||
// Closures used in implementation.
|
// Closures used in implementation.
|
||||||
friend class G1ParScanThreadState;
|
friend class G1ParScanThreadState;
|
||||||
|
@ -250,6 +251,9 @@ private:
|
||||||
// Class that handles the different kinds of allocations.
|
// Class that handles the different kinds of allocations.
|
||||||
G1Allocator* _allocator;
|
G1Allocator* _allocator;
|
||||||
|
|
||||||
|
// Class that handles archive allocation ranges.
|
||||||
|
G1ArchiveAllocator* _archive_allocator;
|
||||||
|
|
||||||
// Statistics for each allocation context
|
// Statistics for each allocation context
|
||||||
AllocationContextStats _allocation_context_stats;
|
AllocationContextStats _allocation_context_stats;
|
||||||
|
|
||||||
|
@ -576,6 +580,10 @@ protected:
|
||||||
void retire_gc_alloc_region(HeapRegion* alloc_region,
|
void retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||||
size_t allocated_bytes, InCSetState dest);
|
size_t allocated_bytes, InCSetState dest);
|
||||||
|
|
||||||
|
// Allocate the highest free region in the reserved heap. This will commit
|
||||||
|
// regions as necessary.
|
||||||
|
HeapRegion* alloc_highest_free_region();
|
||||||
|
|
||||||
// - if explicit_gc is true, the GC is for a System.gc() or a heap
|
// - if explicit_gc is true, the GC is for a System.gc() or a heap
|
||||||
// inspection request and should collect the entire heap
|
// inspection request and should collect the entire heap
|
||||||
// - if clear_all_soft_refs is true, all soft references should be
|
// - if clear_all_soft_refs is true, all soft references should be
|
||||||
|
@ -731,6 +739,44 @@ public:
|
||||||
void free_humongous_region(HeapRegion* hr,
|
void free_humongous_region(HeapRegion* hr,
|
||||||
FreeRegionList* free_list,
|
FreeRegionList* free_list,
|
||||||
bool par);
|
bool par);
|
||||||
|
|
||||||
|
// Facility for allocating in 'archive' regions in high heap memory and
|
||||||
|
// recording the allocated ranges. These should all be called from the
|
||||||
|
// VM thread at safepoints, without the heap lock held. They can be used
|
||||||
|
// to create and archive a set of heap regions which can be mapped at the
|
||||||
|
// same fixed addresses in a subsequent JVM invocation.
|
||||||
|
void begin_archive_alloc_range();
|
||||||
|
|
||||||
|
// Check if the requested size would be too large for an archive allocation.
|
||||||
|
bool is_archive_alloc_too_large(size_t word_size);
|
||||||
|
|
||||||
|
// Allocate memory of the requested size from the archive region. This will
|
||||||
|
// return NULL if the size is too large or if no memory is available. It
|
||||||
|
// does not trigger a garbage collection.
|
||||||
|
HeapWord* archive_mem_allocate(size_t word_size);
|
||||||
|
|
||||||
|
// Optionally aligns the end address and returns the allocated ranges in
|
||||||
|
// an array of MemRegions in order of ascending addresses.
|
||||||
|
void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
|
||||||
|
size_t end_alignment_in_bytes = 0);
|
||||||
|
|
||||||
|
// Facility for allocating a fixed range within the heap and marking
|
||||||
|
// the containing regions as 'archive'. For use at JVM init time, when the
|
||||||
|
// caller may mmap archived heap data at the specified range(s).
|
||||||
|
// Verify that the MemRegions specified in the argument array are within the
|
||||||
|
// reserved heap.
|
||||||
|
bool check_archive_addresses(MemRegion* range, size_t count);
|
||||||
|
|
||||||
|
// Commit the appropriate G1 regions containing the specified MemRegions
|
||||||
|
// and mark them as 'archive' regions. The regions in the array must be
|
||||||
|
// non-overlapping and in order of ascending address.
|
||||||
|
bool alloc_archive_regions(MemRegion* range, size_t count);
|
||||||
|
|
||||||
|
// Insert any required filler objects in the G1 regions around the specified
|
||||||
|
// ranges to make the regions parseable. This must be called after
|
||||||
|
// alloc_archive_regions, and after class loading has occurred.
|
||||||
|
void fill_archive_regions(MemRegion* range, size_t count);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
// Shrink the garbage-first heap by at most the given size (in bytes!).
|
// Shrink the garbage-first heap by at most the given size (in bytes!).
|
||||||
|
@ -1395,6 +1441,11 @@ public:
|
||||||
return word_size > _humongous_object_threshold_in_words;
|
return word_size > _humongous_object_threshold_in_words;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns the humongous threshold for a specific region size
|
||||||
|
static size_t humongous_threshold_for(size_t region_size) {
|
||||||
|
return (region_size / 2);
|
||||||
|
}
|
||||||
|
|
||||||
// Update mod union table with the set of dirty cards.
|
// Update mod union table with the set of dirty cards.
|
||||||
void updateModUnion();
|
void updateModUnion();
|
||||||
|
|
||||||
|
@ -1441,21 +1492,23 @@ public:
|
||||||
|
|
||||||
// Determine if an object is dead, given the object and also
|
// Determine if an object is dead, given the object and also
|
||||||
// the region to which the object belongs. An object is dead
|
// the region to which the object belongs. An object is dead
|
||||||
// iff a) it was not allocated since the last mark and b) it
|
// iff a) it was not allocated since the last mark, b) it
|
||||||
// is not marked.
|
// is not marked, and c) it is not in an archive region.
|
||||||
bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
|
bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
|
||||||
return
|
return
|
||||||
!hr->obj_allocated_since_prev_marking(obj) &&
|
!hr->obj_allocated_since_prev_marking(obj) &&
|
||||||
!isMarkedPrev(obj);
|
!isMarkedPrev(obj) &&
|
||||||
|
!hr->is_archive();
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function returns true when an object has been
|
// This function returns true when an object has been
|
||||||
// around since the previous marking and hasn't yet
|
// around since the previous marking and hasn't yet
|
||||||
// been marked during this marking.
|
// been marked during this marking, and is not in an archive region.
|
||||||
bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
|
bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
|
||||||
return
|
return
|
||||||
!hr->obj_allocated_since_next_marking(obj) &&
|
!hr->obj_allocated_since_next_marking(obj) &&
|
||||||
!isMarkedNext(obj);
|
!isMarkedNext(obj) &&
|
||||||
|
!hr->is_archive();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine if an object is dead, given only the object itself.
|
// Determine if an object is dead, given only the object itself.
|
||||||
|
|
|
@ -190,7 +190,7 @@ public:
|
||||||
bool during_initial_mark = _g1h->collector_state()->during_initial_mark_pause();
|
bool during_initial_mark = _g1h->collector_state()->during_initial_mark_pause();
|
||||||
bool during_conc_mark = _g1h->collector_state()->mark_in_progress();
|
bool during_conc_mark = _g1h->collector_state()->mark_in_progress();
|
||||||
|
|
||||||
assert(!hr->is_humongous(), "sanity");
|
assert(!hr->is_pinned(), err_msg("Unexpected pinned region at index %u", hr->hrm_index()));
|
||||||
assert(hr->in_collection_set(), "bad CS");
|
assert(hr->in_collection_set(), "bad CS");
|
||||||
|
|
||||||
if (_hrclaimer->claim_region(hr->hrm_index())) {
|
if (_hrclaimer->claim_region(hr->hrm_index())) {
|
||||||
|
|
|
@ -54,6 +54,7 @@ const char* G1HRPrinter::region_type_name(RegionType type) {
|
||||||
case SingleHumongous: return "SingleH";
|
case SingleHumongous: return "SingleH";
|
||||||
case StartsHumongous: return "StartsH";
|
case StartsHumongous: return "StartsH";
|
||||||
case ContinuesHumongous: return "ContinuesH";
|
case ContinuesHumongous: return "ContinuesH";
|
||||||
|
case Archive: return "Archive";
|
||||||
default: ShouldNotReachHere();
|
default: ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
// trying to keep the Windows compiler happy
|
// trying to keep the Windows compiler happy
|
||||||
|
|
|
@ -52,7 +52,8 @@ public:
|
||||||
Old,
|
Old,
|
||||||
SingleHumongous,
|
SingleHumongous,
|
||||||
StartsHumongous,
|
StartsHumongous,
|
||||||
ContinuesHumongous
|
ContinuesHumongous,
|
||||||
|
Archive
|
||||||
} RegionType;
|
} RegionType;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
|
|
|
@ -57,6 +57,9 @@
|
||||||
|
|
||||||
class HeapRegion;
|
class HeapRegion;
|
||||||
|
|
||||||
|
bool G1MarkSweep::_archive_check_enabled = false;
|
||||||
|
G1ArchiveRegionMap G1MarkSweep::_archive_region_map;
|
||||||
|
|
||||||
void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
|
void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
|
||||||
bool clear_all_softrefs) {
|
bool clear_all_softrefs) {
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
|
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
|
||||||
|
@ -212,7 +215,7 @@ class G1AdjustPointersClosure: public HeapRegionClosure {
|
||||||
// point all the oops to the new location
|
// point all the oops to the new location
|
||||||
MarkSweep::adjust_pointers(obj);
|
MarkSweep::adjust_pointers(obj);
|
||||||
}
|
}
|
||||||
} else {
|
} else if (!r->is_pinned()) {
|
||||||
// This really ought to be "as_CompactibleSpace"...
|
// This really ought to be "as_CompactibleSpace"...
|
||||||
r->adjust_pointers();
|
r->adjust_pointers();
|
||||||
}
|
}
|
||||||
|
@ -275,7 +278,7 @@ public:
|
||||||
}
|
}
|
||||||
hr->reset_during_compaction();
|
hr->reset_during_compaction();
|
||||||
}
|
}
|
||||||
} else {
|
} else if (!hr->is_pinned()) {
|
||||||
hr->compact();
|
hr->compact();
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
@ -298,6 +301,26 @@ void G1MarkSweep::mark_sweep_phase4() {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void G1MarkSweep::enable_archive_object_check() {
|
||||||
|
assert(!_archive_check_enabled, "archive range check already enabled");
|
||||||
|
_archive_check_enabled = true;
|
||||||
|
size_t length = Universe::heap()->max_capacity();
|
||||||
|
_archive_region_map.initialize((HeapWord*)Universe::heap()->base(),
|
||||||
|
(HeapWord*)Universe::heap()->base() + length,
|
||||||
|
HeapRegion::GrainBytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1MarkSweep::mark_range_archive(MemRegion range) {
|
||||||
|
assert(_archive_check_enabled, "archive range check not enabled");
|
||||||
|
_archive_region_map.set_by_address(range, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool G1MarkSweep::in_archive_range(oop object) {
|
||||||
|
// This is the out-of-line part of is_archive_object test, done separately
|
||||||
|
// to avoid additional performance impact when the check is not enabled.
|
||||||
|
return _archive_region_map.get_by_address((HeapWord*)object);
|
||||||
|
}
|
||||||
|
|
||||||
void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) {
|
void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) {
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
g1h->heap_region_iterate(blk);
|
g1h->heap_region_iterate(blk);
|
||||||
|
@ -357,7 +380,7 @@ bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) {
|
||||||
} else {
|
} else {
|
||||||
assert(hr->is_continues_humongous(), "Invalid humongous.");
|
assert(hr->is_continues_humongous(), "Invalid humongous.");
|
||||||
}
|
}
|
||||||
} else {
|
} else if (!hr->is_pinned()) {
|
||||||
prepare_for_compaction(hr, hr->end());
|
prepare_for_compaction(hr, hr->end());
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -44,6 +44,7 @@ class ReferenceProcessor;
|
||||||
//
|
//
|
||||||
// Class unloading will only occur when a full gc is invoked.
|
// Class unloading will only occur when a full gc is invoked.
|
||||||
class G1PrepareCompactClosure;
|
class G1PrepareCompactClosure;
|
||||||
|
class G1ArchiveRegionMap;
|
||||||
|
|
||||||
class G1MarkSweep : AllStatic {
|
class G1MarkSweep : AllStatic {
|
||||||
public:
|
public:
|
||||||
|
@ -54,7 +55,22 @@ class G1MarkSweep : AllStatic {
|
||||||
static STWGCTimer* gc_timer() { return GenMarkSweep::_gc_timer; }
|
static STWGCTimer* gc_timer() { return GenMarkSweep::_gc_timer; }
|
||||||
static SerialOldTracer* gc_tracer() { return GenMarkSweep::_gc_tracer; }
|
static SerialOldTracer* gc_tracer() { return GenMarkSweep::_gc_tracer; }
|
||||||
|
|
||||||
|
// Create the _archive_region_map which is used to identify archive objects.
|
||||||
|
static void enable_archive_object_check();
|
||||||
|
|
||||||
|
// Mark the regions containing the specified address range as archive regions.
|
||||||
|
static void mark_range_archive(MemRegion range);
|
||||||
|
|
||||||
|
// Check if an object is in an archive region using the _archive_region_map.
|
||||||
|
static bool in_archive_range(oop object);
|
||||||
|
|
||||||
|
// Check if archive object checking is enabled, to avoid calling in_archive_range
|
||||||
|
// unnecessarily.
|
||||||
|
static bool archive_check_enabled() { return G1MarkSweep::_archive_check_enabled; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
static bool _archive_check_enabled;
|
||||||
|
static G1ArchiveRegionMap _archive_region_map;
|
||||||
|
|
||||||
// Mark live objects
|
// Mark live objects
|
||||||
static void mark_sweep_phase1(bool& marked_for_deopt,
|
static void mark_sweep_phase1(bool& marked_for_deopt,
|
||||||
|
@ -93,4 +109,12 @@ class G1PrepareCompactClosure : public HeapRegionClosure {
|
||||||
bool doHeapRegion(HeapRegion* hr);
|
bool doHeapRegion(HeapRegion* hr);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// G1ArchiveRegionMap is a boolean array used to mark G1 regions as
|
||||||
|
// archive regions. This allows a quick check for whether an object
|
||||||
|
// should not be marked because it is in an archive region.
|
||||||
|
class G1ArchiveRegionMap : public G1BiasedMappedArray<bool> {
|
||||||
|
protected:
|
||||||
|
bool default_value() const { return false; }
|
||||||
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_G1_G1MARKSWEEP_HPP
|
#endif // SHARE_VM_GC_G1_G1MARKSWEEP_HPP
|
||||||
|
|
|
@ -103,6 +103,10 @@ size_t HeapRegion::max_region_size() {
|
||||||
return HeapRegionBounds::max_size();
|
return HeapRegionBounds::max_size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t HeapRegion::min_region_size_in_words() {
|
||||||
|
return HeapRegionBounds::min_size() >> LogHeapWordSize;
|
||||||
|
}
|
||||||
|
|
||||||
void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
|
void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
|
||||||
size_t region_size = G1HeapRegionSize;
|
size_t region_size = G1HeapRegionSize;
|
||||||
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
|
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
|
||||||
|
@ -716,7 +720,7 @@ public:
|
||||||
HeapRegion* to = _g1h->heap_region_containing(obj);
|
HeapRegion* to = _g1h->heap_region_containing(obj);
|
||||||
if (from != NULL && to != NULL &&
|
if (from != NULL && to != NULL &&
|
||||||
from != to &&
|
from != to &&
|
||||||
!to->is_humongous()) {
|
!to->is_pinned()) {
|
||||||
jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
|
jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
|
||||||
jbyte cv_field = *_bs->byte_for_const(p);
|
jbyte cv_field = *_bs->byte_for_const(p);
|
||||||
const jbyte dirty = CardTableModRefBS::dirty_card_val();
|
const jbyte dirty = CardTableModRefBS::dirty_card_val();
|
||||||
|
|
|
@ -331,6 +331,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t max_region_size();
|
static size_t max_region_size();
|
||||||
|
static size_t min_region_size_in_words();
|
||||||
|
|
||||||
// It sets up the heap region size (GrainBytes / GrainWords), as
|
// It sets up the heap region size (GrainBytes / GrainWords), as
|
||||||
// well as other related fields that are based on the heap region
|
// well as other related fields that are based on the heap region
|
||||||
|
@ -417,6 +418,15 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||||
|
|
||||||
bool is_old() const { return _type.is_old(); }
|
bool is_old() const { return _type.is_old(); }
|
||||||
|
|
||||||
|
// A pinned region contains objects which are not moved by garbage collections.
|
||||||
|
// Humongous regions and archive regions are pinned.
|
||||||
|
bool is_pinned() const { return _type.is_pinned(); }
|
||||||
|
|
||||||
|
// An archive region is a pinned region, also tagged as old, which
|
||||||
|
// should not be marked during mark/sweep. This allows the address
|
||||||
|
// space to be shared by JVM instances.
|
||||||
|
bool is_archive() const { return _type.is_archive(); }
|
||||||
|
|
||||||
// For a humongous region, region in which it starts.
|
// For a humongous region, region in which it starts.
|
||||||
HeapRegion* humongous_start_region() const {
|
HeapRegion* humongous_start_region() const {
|
||||||
return _humongous_start_region;
|
return _humongous_start_region;
|
||||||
|
@ -670,6 +680,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||||
|
|
||||||
void set_old() { _type.set_old(); }
|
void set_old() { _type.set_old(); }
|
||||||
|
|
||||||
|
void set_archive() { _type.set_archive(); }
|
||||||
|
|
||||||
// Determine if an object has been allocated since the last
|
// Determine if an object has been allocated since the last
|
||||||
// mark performed by the collector. This returns true iff the object
|
// mark performed by the collector. This returns true iff the object
|
||||||
// is within the unmarked area of the region.
|
// is within the unmarked area of the region.
|
||||||
|
|
|
@ -278,6 +278,55 @@ uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx)
|
||||||
return num_regions;
|
return num_regions;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint HeapRegionManager::find_highest_free(bool* expanded) {
|
||||||
|
// Loop downwards from the highest region index, looking for an
|
||||||
|
// entry which is either free or not yet committed. If not yet
|
||||||
|
// committed, expand_at that index.
|
||||||
|
uint curr = max_length() - 1;
|
||||||
|
while (true) {
|
||||||
|
HeapRegion *hr = _regions.get_by_index(curr);
|
||||||
|
if (hr == NULL) {
|
||||||
|
uint res = expand_at(curr, 1);
|
||||||
|
if (res == 1) {
|
||||||
|
*expanded = true;
|
||||||
|
return curr;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (hr->is_free()) {
|
||||||
|
*expanded = false;
|
||||||
|
return curr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (curr == 0) {
|
||||||
|
return G1_NO_HRM_INDEX;
|
||||||
|
}
|
||||||
|
curr--;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count) {
|
||||||
|
size_t commits = 0;
|
||||||
|
uint start_index = (uint)_regions.get_index_by_address(range.start());
|
||||||
|
uint last_index = (uint)_regions.get_index_by_address(range.last());
|
||||||
|
|
||||||
|
// Ensure that each G1 region in the range is free, returning false if not.
|
||||||
|
// Commit those that are not yet available, and keep count.
|
||||||
|
for (uint curr_index = start_index; curr_index <= last_index; curr_index++) {
|
||||||
|
if (!is_available(curr_index)) {
|
||||||
|
commits++;
|
||||||
|
expand_at(curr_index, 1);
|
||||||
|
}
|
||||||
|
HeapRegion* curr_region = _regions.get_by_index(curr_index);
|
||||||
|
if (!curr_region->is_free()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
allocate_free_regions_starting_at(start_index, (last_index - start_index) + 1);
|
||||||
|
*commit_count = commits;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const {
|
void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const {
|
||||||
const uint start_index = hrclaimer->start_region_for_worker(worker_id);
|
const uint start_index = hrclaimer->start_region_for_worker(worker_id);
|
||||||
|
|
||||||
|
|
|
@ -221,6 +221,16 @@ public:
|
||||||
|
|
||||||
HeapRegion* next_region_in_heap(const HeapRegion* r) const;
|
HeapRegion* next_region_in_heap(const HeapRegion* r) const;
|
||||||
|
|
||||||
|
// Find the highest free or uncommitted region in the reserved heap,
|
||||||
|
// and if uncommitted, commit it. If none are available, return G1_NO_HRM_INDEX.
|
||||||
|
// Set the 'expanded' boolean true if a new region was committed.
|
||||||
|
uint find_highest_free(bool* expanded);
|
||||||
|
|
||||||
|
// Allocate the regions that contain the address range specified, committing the
|
||||||
|
// regions if necessary. Return false if any of the regions is already committed
|
||||||
|
// and not free, and return the number of regions newly committed in commit_count.
|
||||||
|
bool allocate_containing_regions(MemRegion range, size_t* commit_count);
|
||||||
|
|
||||||
// Apply blk->doHeapRegion() on all committed regions in address order,
|
// Apply blk->doHeapRegion() on all committed regions in address order,
|
||||||
// terminating the iteration early if doHeapRegion() returns true.
|
// terminating the iteration early if doHeapRegion() returns true.
|
||||||
void iterate(HeapRegionClosure* blk) const;
|
void iterate(HeapRegionClosure* blk) const;
|
||||||
|
|
|
@ -42,7 +42,8 @@ void HeapRegionSetBase::verify_region(HeapRegion* hr) {
|
||||||
assert(hr->is_humongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrm_index(), name()));
|
assert(hr->is_humongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrm_index(), name()));
|
||||||
assert(hr->is_free() == regions_free(), err_msg("Wrong free state for region %u and set %s", hr->hrm_index(), name()));
|
assert(hr->is_free() == regions_free(), err_msg("Wrong free state for region %u and set %s", hr->hrm_index(), name()));
|
||||||
assert(!hr->is_free() || hr->is_empty(), err_msg("Free region %u is not empty for set %s", hr->hrm_index(), name()));
|
assert(!hr->is_free() || hr->is_empty(), err_msg("Free region %u is not empty for set %s", hr->hrm_index(), name()));
|
||||||
assert(!hr->is_empty() || hr->is_free(), err_msg("Empty region %u is not free for set %s", hr->hrm_index(), name()));
|
assert(!hr->is_empty() || hr->is_free() || hr->is_archive(),
|
||||||
|
err_msg("Empty region %u is not free or archive for set %s", hr->hrm_index(), name()));
|
||||||
assert(hr->rem_set()->verify_ready_for_par_iteration(), err_msg("Wrong iteration state %u", hr->hrm_index()));
|
assert(hr->rem_set()->verify_ready_for_par_iteration(), err_msg("Wrong iteration state %u", hr->hrm_index()));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -33,6 +33,7 @@ bool HeapRegionType::is_valid(Tag tag) {
|
||||||
case StartsHumongousTag:
|
case StartsHumongousTag:
|
||||||
case ContinuesHumongousTag:
|
case ContinuesHumongousTag:
|
||||||
case OldTag:
|
case OldTag:
|
||||||
|
case ArchiveTag:
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
@ -47,6 +48,7 @@ const char* HeapRegionType::get_str() const {
|
||||||
case StartsHumongousTag: return "HUMS";
|
case StartsHumongousTag: return "HUMS";
|
||||||
case ContinuesHumongousTag: return "HUMC";
|
case ContinuesHumongousTag: return "HUMC";
|
||||||
case OldTag: return "OLD";
|
case OldTag: return "OLD";
|
||||||
|
case ArchiveTag: return "ARC";
|
||||||
}
|
}
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
// keep some compilers happy
|
// keep some compilers happy
|
||||||
|
@ -62,6 +64,7 @@ const char* HeapRegionType::get_short_str() const {
|
||||||
case StartsHumongousTag: return "HS";
|
case StartsHumongousTag: return "HS";
|
||||||
case ContinuesHumongousTag: return "HC";
|
case ContinuesHumongousTag: return "HC";
|
||||||
case OldTag: return "O";
|
case OldTag: return "O";
|
||||||
|
case ArchiveTag: return "A";
|
||||||
}
|
}
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
// keep some compilers happy
|
// keep some compilers happy
|
||||||
|
|
|
@ -44,15 +44,18 @@ private:
|
||||||
//
|
//
|
||||||
// 0000 0 [ 0] Free
|
// 0000 0 [ 0] Free
|
||||||
//
|
//
|
||||||
// 0001 0 Young Mask
|
// 0001 0 [ 2] Young Mask
|
||||||
// 0001 0 [ 2] Eden
|
// 0001 0 [ 2] Eden
|
||||||
// 0001 1 [ 3] Survivor
|
// 0001 1 [ 3] Survivor
|
||||||
//
|
//
|
||||||
// 0010 0 Humongous Mask
|
// 0010 0 [ 4] Humongous Mask
|
||||||
// 0010 0 [ 4] Starts Humongous
|
// 0100 0 [ 8] Pinned Mask
|
||||||
// 0010 1 [ 5] Continues Humongous
|
// 0110 0 [12] Starts Humongous
|
||||||
|
// 0110 1 [13] Continues Humongous
|
||||||
//
|
//
|
||||||
// 01000 [ 8] Old
|
// 1000 0 [16] Old Mask
|
||||||
|
//
|
||||||
|
// 1100 0 [24] Archive
|
||||||
typedef enum {
|
typedef enum {
|
||||||
FreeTag = 0,
|
FreeTag = 0,
|
||||||
|
|
||||||
|
@ -61,10 +64,14 @@ private:
|
||||||
SurvTag = YoungMask + 1,
|
SurvTag = YoungMask + 1,
|
||||||
|
|
||||||
HumongousMask = 4,
|
HumongousMask = 4,
|
||||||
StartsHumongousTag = HumongousMask,
|
PinnedMask = 8,
|
||||||
ContinuesHumongousTag = HumongousMask + 1,
|
StartsHumongousTag = HumongousMask | PinnedMask,
|
||||||
|
ContinuesHumongousTag = HumongousMask | PinnedMask + 1,
|
||||||
|
|
||||||
OldTag = 8
|
OldMask = 16,
|
||||||
|
OldTag = OldMask,
|
||||||
|
|
||||||
|
ArchiveTag = PinnedMask | OldMask
|
||||||
} Tag;
|
} Tag;
|
||||||
|
|
||||||
volatile Tag _tag;
|
volatile Tag _tag;
|
||||||
|
@ -108,7 +115,13 @@ public:
|
||||||
bool is_starts_humongous() const { return get() == StartsHumongousTag; }
|
bool is_starts_humongous() const { return get() == StartsHumongousTag; }
|
||||||
bool is_continues_humongous() const { return get() == ContinuesHumongousTag; }
|
bool is_continues_humongous() const { return get() == ContinuesHumongousTag; }
|
||||||
|
|
||||||
bool is_old() const { return get() == OldTag; }
|
bool is_archive() const { return get() == ArchiveTag; }
|
||||||
|
|
||||||
|
// is_old regions may or may not also be pinned
|
||||||
|
bool is_old() const { return (get() & OldMask) != 0; }
|
||||||
|
|
||||||
|
// is_pinned regions may be archive or humongous
|
||||||
|
bool is_pinned() const { return (get() & PinnedMask) != 0; }
|
||||||
|
|
||||||
// Setters
|
// Setters
|
||||||
|
|
||||||
|
@ -123,6 +136,8 @@ public:
|
||||||
|
|
||||||
void set_old() { set(OldTag); }
|
void set_old() { set(OldTag); }
|
||||||
|
|
||||||
|
void set_archive() { set_from(ArchiveTag, FreeTag); }
|
||||||
|
|
||||||
// Misc
|
// Misc
|
||||||
|
|
||||||
const char* get_str() const;
|
const char* get_str() const;
|
||||||
|
|
|
@ -313,7 +313,7 @@ void MarkSweep::restore_marks() {
|
||||||
|
|
||||||
MarkSweep::IsAliveClosure MarkSweep::is_alive;
|
MarkSweep::IsAliveClosure MarkSweep::is_alive;
|
||||||
|
|
||||||
bool MarkSweep::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); }
|
bool MarkSweep::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked() || is_archive_object(p); }
|
||||||
|
|
||||||
MarkSweep::KeepAliveClosure MarkSweep::keep_alive;
|
MarkSweep::KeepAliveClosure MarkSweep::keep_alive;
|
||||||
|
|
||||||
|
|
|
@ -147,6 +147,9 @@ class MarkSweep : AllStatic {
|
||||||
// Reference Processing
|
// Reference Processing
|
||||||
static ReferenceProcessor* const ref_processor() { return _ref_processor; }
|
static ReferenceProcessor* const ref_processor() { return _ref_processor; }
|
||||||
|
|
||||||
|
// Archive Object handling
|
||||||
|
static inline bool is_archive_object(oop object);
|
||||||
|
|
||||||
static STWGCTimer* gc_timer() { return _gc_timer; }
|
static STWGCTimer* gc_timer() { return _gc_timer; }
|
||||||
static SerialOldTracer* gc_tracer() { return _gc_tracer; }
|
static SerialOldTracer* gc_tracer() { return _gc_tracer; }
|
||||||
|
|
||||||
|
|
|
@ -37,6 +37,7 @@
|
||||||
#include "utilities/stack.inline.hpp"
|
#include "utilities/stack.inline.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
#include "gc/g1/g1StringDedup.hpp"
|
#include "gc/g1/g1StringDedup.hpp"
|
||||||
|
#include "gc/g1/g1MarkSweep.hpp"
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
|
|
||||||
inline void MarkSweep::mark_object(oop obj) {
|
inline void MarkSweep::mark_object(oop obj) {
|
||||||
|
@ -57,6 +58,15 @@ inline void MarkSweep::mark_object(oop obj) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline bool MarkSweep::is_archive_object(oop object) {
|
||||||
|
#if INCLUDE_ALL_GCS
|
||||||
|
return (G1MarkSweep::archive_check_enabled() &&
|
||||||
|
G1MarkSweep::in_archive_range(object));
|
||||||
|
#else
|
||||||
|
return false;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
inline void MarkSweep::follow_klass(Klass* klass) {
|
inline void MarkSweep::follow_klass(Klass* klass) {
|
||||||
oop op = klass->klass_holder();
|
oop op = klass->klass_holder();
|
||||||
MarkSweep::mark_and_push(&op);
|
MarkSweep::mark_and_push(&op);
|
||||||
|
@ -74,7 +84,8 @@ template <class T> inline void MarkSweep::follow_root(T* p) {
|
||||||
T heap_oop = oopDesc::load_heap_oop(p);
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
if (!oopDesc::is_null(heap_oop)) {
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
if (!obj->mark()->is_marked()) {
|
if (!obj->mark()->is_marked() &&
|
||||||
|
!is_archive_object(obj)) {
|
||||||
mark_object(obj);
|
mark_object(obj);
|
||||||
follow_object(obj);
|
follow_object(obj);
|
||||||
}
|
}
|
||||||
|
@ -87,7 +98,8 @@ template <class T> inline void MarkSweep::mark_and_push(T* p) {
|
||||||
T heap_oop = oopDesc::load_heap_oop(p);
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
if (!oopDesc::is_null(heap_oop)) {
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
if (!obj->mark()->is_marked()) {
|
if (!obj->mark()->is_marked() &&
|
||||||
|
!is_archive_object(obj)) {
|
||||||
mark_object(obj);
|
mark_object(obj);
|
||||||
_marking_stack.push(obj);
|
_marking_stack.push(obj);
|
||||||
}
|
}
|
||||||
|
@ -111,18 +123,21 @@ template <class T> inline void MarkSweep::adjust_pointer(T* p) {
|
||||||
assert(Universe::heap()->is_in(obj), "should be in heap");
|
assert(Universe::heap()->is_in(obj), "should be in heap");
|
||||||
|
|
||||||
oop new_obj = oop(obj->mark()->decode_pointer());
|
oop new_obj = oop(obj->mark()->decode_pointer());
|
||||||
assert(new_obj != NULL || // is forwarding ptr?
|
assert(is_archive_object(obj) || // no forwarding of archive objects
|
||||||
|
new_obj != NULL || // is forwarding ptr?
|
||||||
obj->mark() == markOopDesc::prototype() || // not gc marked?
|
obj->mark() == markOopDesc::prototype() || // not gc marked?
|
||||||
(UseBiasedLocking && obj->mark()->has_bias_pattern()),
|
(UseBiasedLocking && obj->mark()->has_bias_pattern()),
|
||||||
// not gc marked?
|
// not gc marked?
|
||||||
"should be forwarded");
|
"should be forwarded");
|
||||||
if (new_obj != NULL) {
|
if (new_obj != NULL) {
|
||||||
|
if (!is_archive_object(obj)) {
|
||||||
assert(Universe::heap()->is_in_reserved(new_obj),
|
assert(Universe::heap()->is_in_reserved(new_obj),
|
||||||
"should be in object space");
|
"should be in object space");
|
||||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
template <class T> inline void MarkSweep::KeepAliveClosure::do_oop_work(T* p) {
|
template <class T> inline void MarkSweep::KeepAliveClosure::do_oop_work(T* p) {
|
||||||
mark_and_push(p);
|
mark_and_push(p);
|
||||||
|
|
|
@ -88,9 +88,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||||
static int _fire_out_of_memory_count;
|
static int _fire_out_of_memory_count;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Used for filler objects (static, but initialized in ctor).
|
|
||||||
static size_t _filler_array_max_size;
|
|
||||||
|
|
||||||
GCHeapLog* _gc_heap_log;
|
GCHeapLog* _gc_heap_log;
|
||||||
|
|
||||||
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
|
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
|
||||||
|
@ -102,6 +99,9 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||||
BarrierSet* _barrier_set;
|
BarrierSet* _barrier_set;
|
||||||
bool _is_gc_active;
|
bool _is_gc_active;
|
||||||
|
|
||||||
|
// Used for filler objects (static, but initialized in ctor).
|
||||||
|
static size_t _filler_array_max_size;
|
||||||
|
|
||||||
unsigned int _total_collections; // ... started
|
unsigned int _total_collections; // ... started
|
||||||
unsigned int _total_full_collections; // ... started
|
unsigned int _total_full_collections; // ... started
|
||||||
NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
|
NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue