mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-18 01:54:47 +02:00
8220311: Implementation: NUMA-Aware Memory Allocation for G1, Survivor (2/3)
Reviewed-by: kbarrett, sjohanss, tschatzl
This commit is contained in:
parent
52116d808c
commit
eaa6355cb0
9 changed files with 172 additions and 92 deletions
|
@ -349,7 +349,7 @@ HeapRegion* MutatorAllocRegion::release() {
|
|||
HeapRegion* G1GCAllocRegion::allocate_new_region(size_t word_size,
|
||||
bool force) {
|
||||
assert(!force, "not supported for GC alloc regions");
|
||||
return _g1h->new_gc_alloc_region(word_size, _purpose);
|
||||
return _g1h->new_gc_alloc_region(word_size, _purpose, _node_index);
|
||||
}
|
||||
|
||||
void G1GCAllocRegion::retire_region(HeapRegion* alloc_region,
|
||||
|
|
|
@ -270,8 +270,8 @@ protected:
|
|||
|
||||
class SurvivorGCAllocRegion : public G1GCAllocRegion {
|
||||
public:
|
||||
SurvivorGCAllocRegion(G1EvacStats* stats)
|
||||
: G1GCAllocRegion("Survivor GC Alloc Region", false /* bot_updates */, stats, G1HeapRegionAttr::Young) { }
|
||||
SurvivorGCAllocRegion(G1EvacStats* stats, uint node_index)
|
||||
: G1GCAllocRegion("Survivor GC Alloc Region", false /* bot_updates */, stats, G1HeapRegionAttr::Young, node_index) { }
|
||||
};
|
||||
|
||||
class OldGCAllocRegion : public G1GCAllocRegion {
|
||||
|
|
|
@ -42,21 +42,27 @@ G1Allocator::G1Allocator(G1CollectedHeap* heap) :
|
|||
_old_is_full(false),
|
||||
_num_alloc_regions(_numa->num_active_nodes()),
|
||||
_mutator_alloc_regions(NULL),
|
||||
_survivor_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Young)),
|
||||
_survivor_gc_alloc_regions(NULL),
|
||||
_old_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Old)),
|
||||
_retained_old_gc_alloc_region(NULL) {
|
||||
|
||||
_mutator_alloc_regions = NEW_C_HEAP_ARRAY(MutatorAllocRegion, _num_alloc_regions, mtGC);
|
||||
_survivor_gc_alloc_regions = NEW_C_HEAP_ARRAY(SurvivorGCAllocRegion, _num_alloc_regions, mtGC);
|
||||
G1EvacStats* stat = heap->alloc_buffer_stats(G1HeapRegionAttr::Young);
|
||||
|
||||
for (uint i = 0; i < _num_alloc_regions; i++) {
|
||||
::new(_mutator_alloc_regions + i) MutatorAllocRegion(i);
|
||||
::new(_survivor_gc_alloc_regions + i) SurvivorGCAllocRegion(stat, i);
|
||||
}
|
||||
}
|
||||
|
||||
G1Allocator::~G1Allocator() {
|
||||
for (uint i = 0; i < _num_alloc_regions; i++) {
|
||||
_mutator_alloc_regions[i].~MutatorAllocRegion();
|
||||
_survivor_gc_alloc_regions[i].~SurvivorGCAllocRegion();
|
||||
}
|
||||
FREE_C_HEAP_ARRAY(MutatorAllocRegion, _mutator_alloc_regions);
|
||||
FREE_C_HEAP_ARRAY(SurvivorGCAllocRegion, _survivor_gc_alloc_regions);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
|
@ -123,7 +129,10 @@ void G1Allocator::init_gc_alloc_regions(G1EvacuationInfo& evacuation_info) {
|
|||
_survivor_is_full = false;
|
||||
_old_is_full = false;
|
||||
|
||||
_survivor_gc_alloc_region.init();
|
||||
for (uint i = 0; i < _num_alloc_regions; i++) {
|
||||
survivor_gc_alloc_region(i)->init();
|
||||
}
|
||||
|
||||
_old_gc_alloc_region.init();
|
||||
reuse_retained_old_region(evacuation_info,
|
||||
&_old_gc_alloc_region,
|
||||
|
@ -131,9 +140,14 @@ void G1Allocator::init_gc_alloc_regions(G1EvacuationInfo& evacuation_info) {
|
|||
}
|
||||
|
||||
void G1Allocator::release_gc_alloc_regions(G1EvacuationInfo& evacuation_info) {
|
||||
evacuation_info.set_allocation_regions(survivor_gc_alloc_region()->count() +
|
||||
uint survivor_region_count = 0;
|
||||
for (uint node_index = 0; node_index < _num_alloc_regions; node_index++) {
|
||||
survivor_region_count += survivor_gc_alloc_region(node_index)->count();
|
||||
survivor_gc_alloc_region(node_index)->release();
|
||||
}
|
||||
evacuation_info.set_allocation_regions(survivor_region_count +
|
||||
old_gc_alloc_region()->count());
|
||||
survivor_gc_alloc_region()->release();
|
||||
|
||||
// If we have an old GC alloc region to release, we'll save it in
|
||||
// _retained_old_gc_alloc_region. If we don't
|
||||
// _retained_old_gc_alloc_region will become NULL. This is what we
|
||||
|
@ -143,7 +157,9 @@ void G1Allocator::release_gc_alloc_regions(G1EvacuationInfo& evacuation_info) {
|
|||
}
|
||||
|
||||
void G1Allocator::abandon_gc_alloc_regions() {
|
||||
assert(survivor_gc_alloc_region()->get() == NULL, "pre-condition");
|
||||
for (uint i = 0; i < _num_alloc_regions; i++) {
|
||||
assert(survivor_gc_alloc_region(i)->get() == NULL, "pre-condition");
|
||||
}
|
||||
assert(old_gc_alloc_region()->get() == NULL, "pre-condition");
|
||||
_retained_old_gc_alloc_region = NULL;
|
||||
}
|
||||
|
@ -193,9 +209,10 @@ size_t G1Allocator::used_in_alloc_regions() {
|
|||
|
||||
|
||||
HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
|
||||
size_t word_size) {
|
||||
size_t word_size,
|
||||
uint node_index) {
|
||||
size_t temp = 0;
|
||||
HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp);
|
||||
HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, node_index);
|
||||
assert(result == NULL || temp == word_size,
|
||||
"Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
|
||||
word_size, temp, p2i(result));
|
||||
|
@ -205,10 +222,11 @@ HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
|
|||
HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
|
||||
size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_word_size) {
|
||||
size_t* actual_word_size,
|
||||
uint node_index) {
|
||||
switch (dest.type()) {
|
||||
case G1HeapRegionAttr::Young:
|
||||
return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size);
|
||||
return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size, node_index);
|
||||
case G1HeapRegionAttr::Old:
|
||||
return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size);
|
||||
default:
|
||||
|
@ -219,18 +237,19 @@ HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
|
|||
|
||||
HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_word_size) {
|
||||
size_t* actual_word_size,
|
||||
uint node_index) {
|
||||
assert(!_g1h->is_humongous(desired_word_size),
|
||||
"we should not be seeing humongous-size allocations in this path");
|
||||
|
||||
HeapWord* result = survivor_gc_alloc_region()->attempt_allocation(min_word_size,
|
||||
desired_word_size,
|
||||
actual_word_size);
|
||||
HeapWord* result = survivor_gc_alloc_region(node_index)->attempt_allocation(min_word_size,
|
||||
desired_word_size,
|
||||
actual_word_size);
|
||||
if (result == NULL && !survivor_is_full()) {
|
||||
MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
result = survivor_gc_alloc_region()->attempt_allocation_locked(min_word_size,
|
||||
desired_word_size,
|
||||
actual_word_size);
|
||||
result = survivor_gc_alloc_region(node_index)->attempt_allocation_locked(min_word_size,
|
||||
desired_word_size,
|
||||
actual_word_size);
|
||||
if (result == NULL) {
|
||||
set_survivor_full();
|
||||
}
|
||||
|
@ -277,15 +296,25 @@ uint G1PLABAllocator::calc_survivor_alignment_bytes() {
|
|||
G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_allocator(allocator),
|
||||
_surviving_alloc_buffer(_g1h->desired_plab_sz(G1HeapRegionAttr::Young)),
|
||||
_tenured_alloc_buffer(_g1h->desired_plab_sz(G1HeapRegionAttr::Old)),
|
||||
_survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
|
||||
for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
|
||||
for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {
|
||||
_direct_allocated[state] = 0;
|
||||
_alloc_buffers[state] = NULL;
|
||||
uint length = alloc_buffers_length(state);
|
||||
_alloc_buffers[state] = NEW_C_HEAP_ARRAY(PLAB*, length, mtGC);
|
||||
for (uint node_index = 0; node_index < length; node_index++) {
|
||||
_alloc_buffers[state][node_index] = new PLAB(_g1h->desired_plab_sz(state));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
G1PLABAllocator::~G1PLABAllocator() {
|
||||
for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {
|
||||
uint length = alloc_buffers_length(state);
|
||||
for (uint node_index = 0; node_index < length; node_index++) {
|
||||
delete _alloc_buffers[state][node_index];
|
||||
}
|
||||
FREE_C_HEAP_ARRAY(PLAB*, _alloc_buffers[state]);
|
||||
}
|
||||
_alloc_buffers[G1HeapRegionAttr::Young] = &_surviving_alloc_buffer;
|
||||
_alloc_buffers[G1HeapRegionAttr::Old] = &_tenured_alloc_buffer;
|
||||
}
|
||||
|
||||
bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const {
|
||||
|
@ -294,7 +323,8 @@ bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, siz
|
|||
|
||||
HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(G1HeapRegionAttr dest,
|
||||
size_t word_sz,
|
||||
bool* plab_refill_failed) {
|
||||
bool* plab_refill_failed,
|
||||
uint node_index) {
|
||||
size_t plab_word_size = _g1h->desired_plab_sz(dest);
|
||||
size_t required_in_plab = PLAB::size_required_for_allocation(word_sz);
|
||||
|
||||
|
@ -303,14 +333,15 @@ HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(G1HeapRegionAttr dest,
|
|||
if ((required_in_plab <= plab_word_size) &&
|
||||
may_throw_away_buffer(required_in_plab, plab_word_size)) {
|
||||
|
||||
PLAB* alloc_buf = alloc_buffer(dest);
|
||||
PLAB* alloc_buf = alloc_buffer(dest, node_index);
|
||||
alloc_buf->retire();
|
||||
|
||||
size_t actual_plab_size = 0;
|
||||
HeapWord* buf = _allocator->par_allocate_during_gc(dest,
|
||||
required_in_plab,
|
||||
plab_word_size,
|
||||
&actual_plab_size);
|
||||
&actual_plab_size,
|
||||
node_index);
|
||||
|
||||
assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)),
|
||||
"Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
|
||||
|
@ -329,35 +360,39 @@ HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(G1HeapRegionAttr dest,
|
|||
*plab_refill_failed = true;
|
||||
}
|
||||
// Try direct allocation.
|
||||
HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz);
|
||||
HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, node_index);
|
||||
if (result != NULL) {
|
||||
_direct_allocated[dest.type()] += word_sz;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void G1PLABAllocator::undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz) {
|
||||
alloc_buffer(dest)->undo_allocation(obj, word_sz);
|
||||
void G1PLABAllocator::undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz, uint node_index) {
|
||||
alloc_buffer(dest, node_index)->undo_allocation(obj, word_sz);
|
||||
}
|
||||
|
||||
void G1PLABAllocator::flush_and_retire_stats() {
|
||||
for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
|
||||
PLAB* const buf = _alloc_buffers[state];
|
||||
if (buf != NULL) {
|
||||
G1EvacStats* stats = _g1h->alloc_buffer_stats(state);
|
||||
buf->flush_and_retire_stats(stats);
|
||||
stats->add_direct_allocated(_direct_allocated[state]);
|
||||
_direct_allocated[state] = 0;
|
||||
for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {
|
||||
G1EvacStats* stats = _g1h->alloc_buffer_stats(state);
|
||||
for (uint node_index = 0; node_index < alloc_buffers_length(state); node_index++) {
|
||||
PLAB* const buf = alloc_buffer(state, node_index);
|
||||
if (buf != NULL) {
|
||||
buf->flush_and_retire_stats(stats);
|
||||
}
|
||||
}
|
||||
stats->add_direct_allocated(_direct_allocated[state]);
|
||||
_direct_allocated[state] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
size_t G1PLABAllocator::waste() const {
|
||||
size_t result = 0;
|
||||
for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
|
||||
PLAB * const buf = _alloc_buffers[state];
|
||||
if (buf != NULL) {
|
||||
result += buf->waste();
|
||||
for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {
|
||||
for (uint node_index = 0; node_index < alloc_buffers_length(state); node_index++) {
|
||||
PLAB* const buf = alloc_buffer(state, node_index);
|
||||
if (buf != NULL) {
|
||||
result += buf->waste();
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
|
@ -365,10 +400,12 @@ size_t G1PLABAllocator::waste() const {
|
|||
|
||||
size_t G1PLABAllocator::undo_waste() const {
|
||||
size_t result = 0;
|
||||
for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
|
||||
PLAB * const buf = _alloc_buffers[state];
|
||||
if (buf != NULL) {
|
||||
result += buf->undo_waste();
|
||||
for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {
|
||||
for (uint node_index = 0; node_index < alloc_buffers_length(state); node_index++) {
|
||||
PLAB* const buf = alloc_buffer(state, node_index);
|
||||
if (buf != NULL) {
|
||||
result += buf->undo_waste();
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
|
|
|
@ -54,7 +54,7 @@ private:
|
|||
|
||||
// Alloc region used to satisfy allocation requests by the GC for
|
||||
// survivor objects.
|
||||
SurvivorGCAllocRegion _survivor_gc_alloc_region;
|
||||
SurvivorGCAllocRegion* _survivor_gc_alloc_regions;
|
||||
|
||||
// Alloc region used to satisfy allocation requests by the GC for
|
||||
// old objects.
|
||||
|
@ -74,13 +74,14 @@ private:
|
|||
|
||||
// Accessors to the allocation regions.
|
||||
inline MutatorAllocRegion* mutator_alloc_region(uint node_index);
|
||||
inline SurvivorGCAllocRegion* survivor_gc_alloc_region();
|
||||
inline SurvivorGCAllocRegion* survivor_gc_alloc_region(uint node_index);
|
||||
inline OldGCAllocRegion* old_gc_alloc_region();
|
||||
|
||||
// Allocation attempt during GC for a survivor object / PLAB.
|
||||
HeapWord* survivor_attempt_allocation(size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_word_size);
|
||||
size_t* actual_word_size,
|
||||
uint node_index);
|
||||
|
||||
// Allocation attempt during GC for an old object / PLAB.
|
||||
HeapWord* old_attempt_allocation(size_t min_word_size,
|
||||
|
@ -94,6 +95,8 @@ public:
|
|||
G1Allocator(G1CollectedHeap* heap);
|
||||
~G1Allocator();
|
||||
|
||||
uint num_nodes() { return (uint)_num_alloc_regions; }
|
||||
|
||||
#ifdef ASSERT
|
||||
// Do we currently have an active mutator region to allocate into?
|
||||
bool has_mutator_alloc_region();
|
||||
|
@ -123,12 +126,14 @@ public:
|
|||
// heap, and then allocate a block of the given size. The block
|
||||
// may not be a humongous - it must fit into a single heap region.
|
||||
HeapWord* par_allocate_during_gc(G1HeapRegionAttr dest,
|
||||
size_t word_size);
|
||||
size_t word_size,
|
||||
uint node_index);
|
||||
|
||||
HeapWord* par_allocate_during_gc(G1HeapRegionAttr dest,
|
||||
size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_word_size);
|
||||
size_t* actual_word_size,
|
||||
uint node_index);
|
||||
};
|
||||
|
||||
// Manages the PLABs used during garbage collection. Interface for allocation from PLABs.
|
||||
|
@ -137,12 +142,12 @@ public:
|
|||
class G1PLABAllocator : public CHeapObj<mtGC> {
|
||||
friend class G1ParScanThreadState;
|
||||
private:
|
||||
typedef G1HeapRegionAttr::region_type_t region_type_t;
|
||||
|
||||
G1CollectedHeap* _g1h;
|
||||
G1Allocator* _allocator;
|
||||
|
||||
PLAB _surviving_alloc_buffer;
|
||||
PLAB _tenured_alloc_buffer;
|
||||
PLAB* _alloc_buffers[G1HeapRegionAttr::Num];
|
||||
PLAB** _alloc_buffers[G1HeapRegionAttr::Num];
|
||||
|
||||
// The survivor alignment in effect in bytes.
|
||||
// == 0 : don't align survivors
|
||||
|
@ -155,7 +160,13 @@ private:
|
|||
size_t _direct_allocated[G1HeapRegionAttr::Num];
|
||||
|
||||
void flush_and_retire_stats();
|
||||
inline PLAB* alloc_buffer(G1HeapRegionAttr dest);
|
||||
inline PLAB* alloc_buffer(G1HeapRegionAttr dest, uint node_index) const;
|
||||
inline PLAB* alloc_buffer(region_type_t dest, uint node_index) const;
|
||||
|
||||
// Returns the number of allocation buffers for the given dest.
|
||||
// There is only 1 buffer for Old while Young may have multiple buffers depending on
|
||||
// active NUMA nodes.
|
||||
inline uint alloc_buffers_length(region_type_t dest) const;
|
||||
|
||||
// Calculate the survivor space object alignment in bytes. Returns that or 0 if
|
||||
// there are no restrictions on survivor alignment.
|
||||
|
@ -164,6 +175,7 @@ private:
|
|||
bool may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const;
|
||||
public:
|
||||
G1PLABAllocator(G1Allocator* allocator);
|
||||
~G1PLABAllocator();
|
||||
|
||||
size_t waste() const;
|
||||
size_t undo_waste() const;
|
||||
|
@ -174,18 +186,21 @@ public:
|
|||
// PLAB failed or not.
|
||||
HeapWord* allocate_direct_or_new_plab(G1HeapRegionAttr dest,
|
||||
size_t word_sz,
|
||||
bool* plab_refill_failed);
|
||||
bool* plab_refill_failed,
|
||||
uint node_index);
|
||||
|
||||
// Allocate word_sz words in the PLAB of dest. Returns the address of the
|
||||
// allocated memory, NULL if not successful.
|
||||
inline HeapWord* plab_allocate(G1HeapRegionAttr dest,
|
||||
size_t word_sz);
|
||||
size_t word_sz,
|
||||
uint node_index);
|
||||
|
||||
inline HeapWord* allocate(G1HeapRegionAttr dest,
|
||||
size_t word_sz,
|
||||
bool* refill_failed);
|
||||
bool* refill_failed,
|
||||
uint node_index);
|
||||
|
||||
void undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz);
|
||||
void undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz, uint node_index);
|
||||
};
|
||||
|
||||
// G1ArchiveRegionMap is a boolean array used to mark G1 regions as
|
||||
|
|
|
@ -39,8 +39,9 @@ inline MutatorAllocRegion* G1Allocator::mutator_alloc_region(uint node_index) {
|
|||
return &_mutator_alloc_regions[node_index];
|
||||
}
|
||||
|
||||
inline SurvivorGCAllocRegion* G1Allocator::survivor_gc_alloc_region() {
|
||||
return &_survivor_gc_alloc_region;
|
||||
inline SurvivorGCAllocRegion* G1Allocator::survivor_gc_alloc_region(uint node_index) {
|
||||
assert(node_index < _num_alloc_regions, "Invalid index: %u", node_index);
|
||||
return &_survivor_gc_alloc_regions[node_index];
|
||||
}
|
||||
|
||||
inline OldGCAllocRegion* G1Allocator::old_gc_alloc_region() {
|
||||
|
@ -71,17 +72,39 @@ inline HeapWord* G1Allocator::attempt_allocation_force(size_t word_size) {
|
|||
return mutator_alloc_region(node_index)->attempt_allocation_force(word_size);
|
||||
}
|
||||
|
||||
inline PLAB* G1PLABAllocator::alloc_buffer(G1HeapRegionAttr dest) {
|
||||
inline PLAB* G1PLABAllocator::alloc_buffer(G1HeapRegionAttr dest, uint node_index) const {
|
||||
assert(dest.is_valid(),
|
||||
"Allocation buffer index out of bounds: %s", dest.get_type_str());
|
||||
assert(_alloc_buffers[dest.type()] != NULL,
|
||||
"Allocation buffer is NULL: %s", dest.get_type_str());
|
||||
return _alloc_buffers[dest.type()];
|
||||
return alloc_buffer(dest.type(), node_index);
|
||||
}
|
||||
|
||||
inline PLAB* G1PLABAllocator::alloc_buffer(region_type_t dest, uint node_index) const {
|
||||
assert(dest < G1HeapRegionAttr::Num,
|
||||
"Allocation buffer index out of bounds: %u", dest);
|
||||
|
||||
if (dest == G1HeapRegionAttr::Young) {
|
||||
assert(node_index < alloc_buffers_length(dest),
|
||||
"Allocation buffer index out of bounds: %u, %u", dest, node_index);
|
||||
return _alloc_buffers[dest][node_index];
|
||||
} else {
|
||||
return _alloc_buffers[dest][0];
|
||||
}
|
||||
}
|
||||
|
||||
inline uint G1PLABAllocator::alloc_buffers_length(region_type_t dest) const {
|
||||
if (dest == G1HeapRegionAttr::Young) {
|
||||
return _allocator->num_nodes();
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
inline HeapWord* G1PLABAllocator::plab_allocate(G1HeapRegionAttr dest,
|
||||
size_t word_sz) {
|
||||
PLAB* buffer = alloc_buffer(dest);
|
||||
size_t word_sz,
|
||||
uint node_index) {
|
||||
PLAB* buffer = alloc_buffer(dest, node_index);
|
||||
if (_survivor_alignment_bytes == 0 || !dest.is_young()) {
|
||||
return buffer->allocate(word_sz);
|
||||
} else {
|
||||
|
@ -91,12 +114,13 @@ inline HeapWord* G1PLABAllocator::plab_allocate(G1HeapRegionAttr dest,
|
|||
|
||||
inline HeapWord* G1PLABAllocator::allocate(G1HeapRegionAttr dest,
|
||||
size_t word_sz,
|
||||
bool* refill_failed) {
|
||||
HeapWord* const obj = plab_allocate(dest, word_sz);
|
||||
bool* refill_failed,
|
||||
uint node_index) {
|
||||
HeapWord* const obj = plab_allocate(dest, word_sz, node_index);
|
||||
if (obj != NULL) {
|
||||
return obj;
|
||||
}
|
||||
return allocate_direct_or_new_plab(dest, word_sz, refill_failed);
|
||||
return allocate_direct_or_new_plab(dest, word_sz, refill_failed, node_index);
|
||||
}
|
||||
|
||||
// Create the maps which is used to identify archive objects.
|
||||
|
|
|
@ -4605,7 +4605,7 @@ bool G1CollectedHeap::has_more_regions(G1HeapRegionAttr dest) {
|
|||
}
|
||||
}
|
||||
|
||||
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest) {
|
||||
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest, uint node_index) {
|
||||
assert(FreeList_lock->owned_by_self(), "pre-condition");
|
||||
|
||||
if (!has_more_regions(dest)) {
|
||||
|
@ -4621,7 +4621,8 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionA
|
|||
|
||||
HeapRegion* new_alloc_region = new_region(word_size,
|
||||
type,
|
||||
true /* do_expand */);
|
||||
true /* do_expand */,
|
||||
node_index);
|
||||
|
||||
if (new_alloc_region != NULL) {
|
||||
if (type.is_survivor()) {
|
||||
|
|
|
@ -475,7 +475,7 @@ private:
|
|||
|
||||
// For GC alloc regions.
|
||||
bool has_more_regions(G1HeapRegionAttr dest);
|
||||
HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest);
|
||||
HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest, uint node_index);
|
||||
void retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||
size_t allocated_bytes, G1HeapRegionAttr dest);
|
||||
|
||||
|
|
|
@ -152,11 +152,11 @@ void G1ParScanThreadState::trim_queue() {
|
|||
} while (!_refs->is_empty());
|
||||
}
|
||||
|
||||
HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr const region_attr,
|
||||
G1HeapRegionAttr* dest,
|
||||
HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest,
|
||||
size_t word_sz,
|
||||
bool previous_plab_refill_failed) {
|
||||
assert(region_attr.is_in_cset_or_humongous(), "Unexpected region attr type: %s", region_attr.get_type_str());
|
||||
bool previous_plab_refill_failed,
|
||||
uint node_index) {
|
||||
|
||||
assert(dest->is_in_cset_or_humongous(), "Unexpected dest: %s region attr", dest->get_type_str());
|
||||
|
||||
// Right now we only have two types of regions (young / old) so
|
||||
|
@ -165,7 +165,8 @@ HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr const reg
|
|||
bool plab_refill_in_old_failed = false;
|
||||
HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old,
|
||||
word_sz,
|
||||
&plab_refill_in_old_failed);
|
||||
&plab_refill_in_old_failed,
|
||||
node_index);
|
||||
// Make sure that we won't attempt to copy any other objects out
|
||||
// of a survivor region (given that apparently we cannot allocate
|
||||
// any new ones) to avoid coming into this slow path again and again.
|
||||
|
@ -204,8 +205,8 @@ G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const r
|
|||
|
||||
void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr,
|
||||
oop const old, size_t word_sz, uint age,
|
||||
HeapWord * const obj_ptr) const {
|
||||
PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr);
|
||||
HeapWord * const obj_ptr, uint node_index) const {
|
||||
PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index);
|
||||
if (alloc_buf->contains(obj_ptr)) {
|
||||
_g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age,
|
||||
dest_attr.type() == G1HeapRegionAttr::Old,
|
||||
|
@ -228,15 +229,19 @@ oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_a
|
|||
if (_old_gen_is_full && dest_attr.is_old()) {
|
||||
return handle_evacuation_failure_par(old, old_mark);
|
||||
}
|
||||
HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz);
|
||||
HeapRegion* const from_region = _g1h->heap_region_containing(old);
|
||||
uint node_index = from_region->node_index();
|
||||
|
||||
HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
|
||||
|
||||
// PLAB allocations should succeed most of the time, so we'll
|
||||
// normally check against NULL once and that's it.
|
||||
if (obj_ptr == NULL) {
|
||||
bool plab_refill_failed = false;
|
||||
obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_attr, word_sz, &plab_refill_failed);
|
||||
obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_attr, word_sz, &plab_refill_failed, node_index);
|
||||
if (obj_ptr == NULL) {
|
||||
obj_ptr = allocate_in_next_plab(region_attr, &dest_attr, word_sz, plab_refill_failed);
|
||||
assert(region_attr.is_in_cset(), "Unexpected region attr type: %s", region_attr.get_type_str());
|
||||
obj_ptr = allocate_in_next_plab(&dest_attr, word_sz, plab_refill_failed, node_index);
|
||||
if (obj_ptr == NULL) {
|
||||
// This will either forward-to-self, or detect that someone else has
|
||||
// installed a forwarding pointer.
|
||||
|
@ -245,7 +250,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_a
|
|||
}
|
||||
if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
|
||||
// The events are checked individually as part of the actual commit
|
||||
report_promotion_event(dest_attr, old, word_sz, age, obj_ptr);
|
||||
report_promotion_event(dest_attr, old, word_sz, age, obj_ptr, node_index);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -257,7 +262,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_a
|
|||
if (_g1h->evacuation_should_fail()) {
|
||||
// Doing this after all the allocation attempts also tests the
|
||||
// undo_allocation() method too.
|
||||
_plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz);
|
||||
_plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
|
||||
return handle_evacuation_failure_par(old, old_mark);
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
@ -270,7 +275,6 @@ oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_a
|
|||
if (forward_ptr == NULL) {
|
||||
Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
|
||||
|
||||
HeapRegion* const from_region = _g1h->heap_region_containing(old);
|
||||
const uint young_index = from_region->young_index_in_cset();
|
||||
|
||||
assert((from_region->is_young() && young_index > 0) ||
|
||||
|
@ -323,7 +327,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_a
|
|||
}
|
||||
return obj;
|
||||
} else {
|
||||
_plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz);
|
||||
_plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
|
||||
return forward_ptr;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -187,22 +187,21 @@ private:
|
|||
inline void dispatch_reference(StarTask ref);
|
||||
|
||||
// Tries to allocate word_sz in the PLAB of the next "generation" after trying to
|
||||
// allocate into dest. State is the original (source) cset state for the object
|
||||
// that is allocated for. Previous_plab_refill_failed indicates whether previously
|
||||
// a PLAB refill into "state" failed.
|
||||
// allocate into dest. Previous_plab_refill_failed indicates whether previous
|
||||
// PLAB refill for the original (source) object failed.
|
||||
// Returns a non-NULL pointer if successful, and updates dest if required.
|
||||
// Also determines whether we should continue to try to allocate into the various
|
||||
// generations or just end trying to allocate.
|
||||
HeapWord* allocate_in_next_plab(G1HeapRegionAttr const region_attr,
|
||||
G1HeapRegionAttr* dest,
|
||||
HeapWord* allocate_in_next_plab(G1HeapRegionAttr* dest,
|
||||
size_t word_sz,
|
||||
bool previous_plab_refill_failed);
|
||||
bool previous_plab_refill_failed,
|
||||
uint node_index);
|
||||
|
||||
inline G1HeapRegionAttr next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age);
|
||||
|
||||
void report_promotion_event(G1HeapRegionAttr const dest_attr,
|
||||
oop const old, size_t word_sz, uint age,
|
||||
HeapWord * const obj_ptr) const;
|
||||
HeapWord * const obj_ptr, uint node_index) const;
|
||||
|
||||
inline bool needs_partial_trimming() const;
|
||||
inline bool is_partially_trimmed() const;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue