8200169: Flatten G1Allocator class hierarchy

Reviewed-by: tschatzl, sangheki
This commit is contained in:
Stefan Johansson 2018-04-06 11:41:21 +02:00
parent db903e5748
commit b758cec4ad
5 changed files with 153 additions and 191 deletions

View file

@ -33,8 +33,8 @@
#include "gc/g1/heapRegionType.hpp"
#include "utilities/align.hpp"
G1DefaultAllocator::G1DefaultAllocator(G1CollectedHeap* heap) :
G1Allocator(heap),
G1Allocator::G1Allocator(G1CollectedHeap* heap) :
_g1h(heap),
_survivor_is_full(false),
_old_is_full(false),
_retained_old_gc_alloc_region(NULL),
@ -42,16 +42,20 @@ G1DefaultAllocator::G1DefaultAllocator(G1CollectedHeap* heap) :
_old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)) {
}
void G1DefaultAllocator::init_mutator_alloc_region() {
void G1Allocator::init_mutator_alloc_region() {
assert(_mutator_alloc_region.get() == NULL, "pre-condition");
_mutator_alloc_region.init();
}
void G1DefaultAllocator::release_mutator_alloc_region() {
void G1Allocator::release_mutator_alloc_region() {
_mutator_alloc_region.release();
assert(_mutator_alloc_region.get() == NULL, "post-condition");
}
bool G1Allocator::is_retained_old_region(HeapRegion* hr) {
return _retained_old_gc_alloc_region == hr;
}
void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
OldGCAllocRegion* old,
HeapRegion** retained_old) {
@ -87,7 +91,7 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
}
}
void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
void G1Allocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
assert_at_safepoint_on_vm_thread();
_survivor_is_full = false;
@ -100,7 +104,7 @@ void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info)
&_retained_old_gc_alloc_region);
}
void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) {
void G1Allocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) {
evacuation_info.set_allocation_regions(survivor_gc_alloc_region()->count() +
old_gc_alloc_region()->count());
survivor_gc_alloc_region()->release();
@ -112,25 +116,25 @@ void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_inf
_retained_old_gc_alloc_region = old_gc_alloc_region()->release();
}
void G1DefaultAllocator::abandon_gc_alloc_regions() {
void G1Allocator::abandon_gc_alloc_regions() {
assert(survivor_gc_alloc_region()->get() == NULL, "pre-condition");
assert(old_gc_alloc_region()->get() == NULL, "pre-condition");
_retained_old_gc_alloc_region = NULL;
}
bool G1DefaultAllocator::survivor_is_full() const {
bool G1Allocator::survivor_is_full() const {
return _survivor_is_full;
}
bool G1DefaultAllocator::old_is_full() const {
bool G1Allocator::old_is_full() const {
return _old_is_full;
}
void G1DefaultAllocator::set_survivor_full() {
void G1Allocator::set_survivor_full() {
_survivor_is_full = true;
}
void G1DefaultAllocator::set_old_full() {
void G1Allocator::set_old_full() {
_old_is_full = true;
}
@ -151,6 +155,19 @@ size_t G1Allocator::unsafe_max_tlab_alloc() {
}
}
size_t G1Allocator::used_in_alloc_regions() {
assert(Heap_lock->owner() != NULL, "Should be owned on this thread's behalf.");
size_t result = 0;
// Read only once in case it is set to NULL concurrently
HeapRegion* hr = mutator_alloc_region()->get();
if (hr != NULL) {
result += hr->used();
}
return result;
}
HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
size_t word_size) {
size_t temp = 0;
@ -221,13 +238,30 @@ HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size,
return result;
}
uint G1PLABAllocator::calc_survivor_alignment_bytes() {
assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
// No need to align objects in the survivors differently, return 0
// which means "survivor alignment is not used".
return 0;
} else {
assert(SurvivorAlignmentInBytes > 0, "sanity");
return SurvivorAlignmentInBytes;
}
}
G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
_g1h(G1CollectedHeap::heap()),
_allocator(allocator),
_surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)),
_tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)),
_survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
for (size_t i = 0; i < ARRAY_SIZE(_direct_allocated); i++) {
_direct_allocated[i] = 0;
for (uint state = 0; state < InCSetState::Num; state++) {
_direct_allocated[state] = 0;
_alloc_buffers[state] = NULL;
}
_alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
_alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer;
}
bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const {
@ -282,18 +316,7 @@ void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t wo
alloc_buffer(dest)->undo_allocation(obj, word_sz);
}
G1DefaultPLABAllocator::G1DefaultPLABAllocator(G1Allocator* allocator) :
G1PLABAllocator(allocator),
_surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)),
_tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)) {
for (uint state = 0; state < InCSetState::Num; state++) {
_alloc_buffers[state] = NULL;
}
_alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
_alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer;
}
void G1DefaultPLABAllocator::flush_and_retire_stats() {
void G1PLABAllocator::flush_and_retire_stats() {
for (uint state = 0; state < InCSetState::Num; state++) {
PLAB* const buf = _alloc_buffers[state];
if (buf != NULL) {
@ -305,7 +328,7 @@ void G1DefaultPLABAllocator::flush_and_retire_stats() {
}
}
void G1DefaultPLABAllocator::waste(size_t& wasted, size_t& undo_wasted) {
void G1PLABAllocator::waste(size_t& wasted, size_t& undo_wasted) {
wasted = 0;
undo_wasted = 0;
for (uint state = 0; state < InCSetState::Num; state++) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,82 +37,13 @@ class EvacuationInfo;
// Also keeps track of retained regions across GCs.
class G1Allocator : public CHeapObj<mtGC> {
friend class VMStructs;
protected:
private:
G1CollectedHeap* _g1h;
virtual MutatorAllocRegion* mutator_alloc_region() = 0;
virtual bool survivor_is_full() const = 0;
virtual bool old_is_full() const = 0;
virtual void set_survivor_full() = 0;
virtual void set_old_full() = 0;
// Accessors to the allocation regions.
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region() = 0;
virtual OldGCAllocRegion* old_gc_alloc_region() = 0;
// Allocation attempt during GC for a survivor object / PLAB.
inline HeapWord* survivor_attempt_allocation(size_t min_word_size,
size_t desired_word_size,
size_t* actual_word_size);
// Allocation attempt during GC for an old object / PLAB.
inline HeapWord* old_attempt_allocation(size_t min_word_size,
size_t desired_word_size,
size_t* actual_word_size);
public:
G1Allocator(G1CollectedHeap* heap) : _g1h(heap) { }
virtual ~G1Allocator() { }
#ifdef ASSERT
// Do we currently have an active mutator region to allocate into?
bool has_mutator_alloc_region() { return mutator_alloc_region()->get() != NULL; }
#endif
virtual void init_mutator_alloc_region() = 0;
virtual void release_mutator_alloc_region() = 0;
virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
virtual void abandon_gc_alloc_regions() = 0;
// Management of retained regions.
virtual bool is_retained_old_region(HeapRegion* hr) = 0;
void reuse_retained_old_region(EvacuationInfo& evacuation_info,
OldGCAllocRegion* old,
HeapRegion** retained);
// Allocate blocks of memory during mutator time.
inline HeapWord* attempt_allocation(size_t word_size);
inline HeapWord* attempt_allocation_locked(size_t word_size);
inline HeapWord* attempt_allocation_force(size_t word_size);
size_t unsafe_max_tlab_alloc();
// Allocate blocks of memory during garbage collection. Will ensure an
// allocation region, either by picking one or expanding the
// heap, and then allocate a block of the given size. The block
// may not be a humongous - it must fit into a single heap region.
HeapWord* par_allocate_during_gc(InCSetState dest,
size_t word_size);
HeapWord* par_allocate_during_gc(InCSetState dest,
size_t min_word_size,
size_t desired_word_size,
size_t* actual_word_size);
virtual size_t used_in_alloc_regions() = 0;
};
// The default allocation region manager for G1. Provides a single mutator, survivor
// and old generation allocation region.
// Can retain the (single) old generation allocation region across GCs.
class G1DefaultAllocator : public G1Allocator {
private:
bool _survivor_is_full;
bool _old_is_full;
protected:
// Alloc region used to satisfy mutator allocation requests.
MutatorAllocRegion _mutator_alloc_region;
@ -125,50 +56,67 @@ protected:
OldGCAllocRegion _old_gc_alloc_region;
HeapRegion* _retained_old_gc_alloc_region;
bool survivor_is_full() const;
bool old_is_full() const;
void set_survivor_full();
void set_old_full();
void reuse_retained_old_region(EvacuationInfo& evacuation_info,
OldGCAllocRegion* old,
HeapRegion** retained);
// Accessors to the allocation regions.
inline MutatorAllocRegion* mutator_alloc_region();
inline SurvivorGCAllocRegion* survivor_gc_alloc_region();
inline OldGCAllocRegion* old_gc_alloc_region();
// Allocation attempt during GC for a survivor object / PLAB.
HeapWord* survivor_attempt_allocation(size_t min_word_size,
size_t desired_word_size,
size_t* actual_word_size);
// Allocation attempt during GC for an old object / PLAB.
HeapWord* old_attempt_allocation(size_t min_word_size,
size_t desired_word_size,
size_t* actual_word_size);
public:
G1DefaultAllocator(G1CollectedHeap* heap);
G1Allocator(G1CollectedHeap* heap);
virtual bool survivor_is_full() const;
virtual bool old_is_full() const ;
#ifdef ASSERT
// Do we currently have an active mutator region to allocate into?
bool has_mutator_alloc_region() { return mutator_alloc_region()->get() != NULL; }
#endif
virtual void set_survivor_full();
virtual void set_old_full();
void init_mutator_alloc_region();
void release_mutator_alloc_region();
virtual void init_mutator_alloc_region();
virtual void release_mutator_alloc_region();
void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
void release_gc_alloc_regions(EvacuationInfo& evacuation_info);
void abandon_gc_alloc_regions();
bool is_retained_old_region(HeapRegion* hr);
virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info);
virtual void abandon_gc_alloc_regions();
// Allocate blocks of memory during mutator time.
virtual bool is_retained_old_region(HeapRegion* hr) {
return _retained_old_gc_alloc_region == hr;
}
inline HeapWord* attempt_allocation(size_t word_size);
inline HeapWord* attempt_allocation_locked(size_t word_size);
inline HeapWord* attempt_allocation_force(size_t word_size);
virtual MutatorAllocRegion* mutator_alloc_region() {
return &_mutator_alloc_region;
}
size_t unsafe_max_tlab_alloc();
size_t used_in_alloc_regions();
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region() {
return &_survivor_gc_alloc_region;
}
// Allocate blocks of memory during garbage collection. Will ensure an
// allocation region, either by picking one or expanding the
// heap, and then allocate a block of the given size. The block
// may not be a humongous - it must fit into a single heap region.
HeapWord* par_allocate_during_gc(InCSetState dest,
size_t word_size);
virtual OldGCAllocRegion* old_gc_alloc_region() {
return &_old_gc_alloc_region;
}
virtual size_t used_in_alloc_regions() {
assert(Heap_lock->owner() != NULL,
"Should be owned on this thread's behalf.");
size_t result = 0;
// Read only once in case it is set to NULL concurrently
HeapRegion* hr = mutator_alloc_region()->get();
if (hr != NULL) {
result += hr->used();
}
return result;
}
HeapWord* par_allocate_during_gc(InCSetState dest,
size_t min_word_size,
size_t desired_word_size,
size_t* actual_word_size);
};
// Manages the PLABs used during garbage collection. Interface for allocation from PLABs.
@ -176,10 +124,14 @@ public:
// statistics.
class G1PLABAllocator : public CHeapObj<mtGC> {
friend class G1ParScanThreadState;
protected:
private:
G1CollectedHeap* _g1h;
G1Allocator* _allocator;
PLAB _surviving_alloc_buffer;
PLAB _tenured_alloc_buffer;
PLAB* _alloc_buffers[InCSetState::Num];
// The survivor alignment in effect in bytes.
// == 0 : don't align survivors
// != 0 : align survivors to that alignment
@ -190,32 +142,18 @@ protected:
// Number of words allocated directly (not counting PLAB allocation).
size_t _direct_allocated[InCSetState::Num];
virtual void flush_and_retire_stats() = 0;
virtual PLAB* alloc_buffer(InCSetState dest) = 0;
void flush_and_retire_stats();
inline PLAB* alloc_buffer(InCSetState dest);
// Calculate the survivor space object alignment in bytes. Returns that or 0 if
// there are no restrictions on survivor alignment.
static uint calc_survivor_alignment_bytes() {
assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
// No need to align objects in the survivors differently, return 0
// which means "survivor alignment is not used".
return 0;
} else {
assert(SurvivorAlignmentInBytes > 0, "sanity");
return SurvivorAlignmentInBytes;
}
}
HeapWord* allocate_new_plab(InCSetState dest,
size_t word_sz);
static uint calc_survivor_alignment_bytes();
bool may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const;
public:
G1PLABAllocator(G1Allocator* allocator);
virtual ~G1PLABAllocator() { }
virtual void waste(size_t& wasted, size_t& undo_wasted) = 0;
void waste(size_t& wasted, size_t& undo_wasted);
// Allocate word_sz words in dest, either directly into the regions or by
// allocating a new PLAB. Returns the address of the allocated memory, NULL if
@ -230,42 +168,13 @@ public:
inline HeapWord* plab_allocate(InCSetState dest,
size_t word_sz);
HeapWord* allocate(InCSetState dest,
size_t word_sz,
bool* refill_failed) {
HeapWord* const obj = plab_allocate(dest, word_sz);
if (obj != NULL) {
return obj;
}
return allocate_direct_or_new_plab(dest, word_sz, refill_failed);
}
inline HeapWord* allocate(InCSetState dest,
size_t word_sz,
bool* refill_failed);
void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz);
};
// The default PLAB allocator for G1. Keeps the current (single) PLAB for survivor
// and old generation allocation.
class G1DefaultPLABAllocator : public G1PLABAllocator {
PLAB _surviving_alloc_buffer;
PLAB _tenured_alloc_buffer;
PLAB* _alloc_buffers[InCSetState::Num];
public:
G1DefaultPLABAllocator(G1Allocator* _allocator);
virtual PLAB* alloc_buffer(InCSetState dest) {
assert(dest.is_valid(),
"Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value());
assert(_alloc_buffers[dest.value()] != NULL,
"Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value());
return _alloc_buffers[dest.value()];
}
virtual void flush_and_retire_stats();
virtual void waste(size_t& wasted, size_t& undo_wasted);
};
// G1ArchiveRegionMap is a boolean array used to mark G1 regions as
// archive regions. This allows a quick check for whether an object
// should not be marked because it is in an archive region.

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,21 +29,41 @@
#include "gc/g1/g1AllocRegion.inline.hpp"
#include "gc/shared/plab.inline.hpp"
HeapWord* G1Allocator::attempt_allocation(size_t word_size) {
inline MutatorAllocRegion* G1Allocator::mutator_alloc_region() {
return &_mutator_alloc_region;
}
inline SurvivorGCAllocRegion* G1Allocator::survivor_gc_alloc_region() {
return &_survivor_gc_alloc_region;
}
inline OldGCAllocRegion* G1Allocator::old_gc_alloc_region() {
return &_old_gc_alloc_region;
}
inline HeapWord* G1Allocator::attempt_allocation(size_t word_size) {
return mutator_alloc_region()->attempt_allocation(word_size);
}
HeapWord* G1Allocator::attempt_allocation_locked(size_t word_size) {
inline HeapWord* G1Allocator::attempt_allocation_locked(size_t word_size) {
HeapWord* result = mutator_alloc_region()->attempt_allocation_locked(word_size);
assert(result != NULL || mutator_alloc_region()->get() == NULL,
"Must not have a mutator alloc region if there is no memory, but is " PTR_FORMAT, p2i(mutator_alloc_region()->get()));
return result;
}
HeapWord* G1Allocator::attempt_allocation_force(size_t word_size) {
inline HeapWord* G1Allocator::attempt_allocation_force(size_t word_size) {
return mutator_alloc_region()->attempt_allocation_force(word_size);
}
inline PLAB* G1PLABAllocator::alloc_buffer(InCSetState dest) {
assert(dest.is_valid(),
"Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value());
assert(_alloc_buffers[dest.value()] != NULL,
"Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value());
return _alloc_buffers[dest.value()];
}
inline HeapWord* G1PLABAllocator::plab_allocate(InCSetState dest,
size_t word_sz) {
PLAB* buffer = alloc_buffer(dest);
@ -54,6 +74,16 @@ inline HeapWord* G1PLABAllocator::plab_allocate(InCSetState dest,
}
}
inline HeapWord* G1PLABAllocator::allocate(InCSetState dest,
size_t word_sz,
bool* refill_failed) {
HeapWord* const obj = plab_allocate(dest, word_sz);
if (obj != NULL) {
return obj;
}
return allocate_direct_or_new_plab(dest, word_sz, refill_failed);
}
// Create the maps which is used to identify archive objects.
inline void G1ArchiveAllocator::enable_archive_object_check() {
if (_archive_check_enabled) {

View file

@ -1426,7 +1426,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
_workers->initialize_workers();
_verifier = new G1HeapVerifier(this);
_allocator = new G1DefaultAllocator(this);
_allocator = new G1Allocator(this);
_heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());

View file

@ -66,7 +66,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id,
_surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
memset(_surviving_young_words, 0, real_length * sizeof(size_t));
_plab_allocator = new G1DefaultPLABAllocator(_g1h->allocator());
_plab_allocator = new G1PLABAllocator(_g1h->allocator());
_dest[InCSetState::NotInCSet] = InCSetState::NotInCSet;
// The dest for Young is used when the objects are aged enough to