8073052: Rename and clean up the allocation manager hierarchy in g1Allocator.?pp

Fix naming of G1ParGCAllocator, decrease dependencies between G1CollectedHeap, G1Allocator and G1AllocRegion, additional documentation.

Reviewed-by: mgerdin, jmasa, kbarrett
This commit is contained in:
Thomas Schatzl 2015-08-06 15:49:50 +02:00
parent e787253b19
commit 6e3ffb1aae
18 changed files with 281 additions and 217 deletions

View file

@ -300,5 +300,3 @@ HeapRegion* OldGCAllocRegion::release() {
}
return G1AllocRegion::release();
}

View file

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc/g1/g1Allocator.hpp"
#include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1MarkSweep.hpp"
@ -116,7 +116,77 @@ void G1DefaultAllocator::abandon_gc_alloc_regions() {
G1PLAB::G1PLAB(size_t gclab_word_size) :
PLAB(gclab_word_size), _retired(true) { }
HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) {
// Return the remaining space in the cur alloc region, but not less than
// the min TLAB size.
// Also, this value can be at most the humongous object threshold,
// since we can't allow tlabs to grow big enough to accommodate
// humongous objects.
HeapRegion* hr = mutator_alloc_region(context)->get();
size_t max_tlab = _g1h->max_tlab_size() * wordSize;
if (hr == NULL) {
return max_tlab;
} else {
return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
}
}
HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
size_t word_size,
AllocationContext_t context) {
switch (dest.value()) {
case InCSetState::Young:
return survivor_attempt_allocation(word_size, context);
case InCSetState::Old:
return old_attempt_allocation(word_size, context);
default:
ShouldNotReachHere();
return NULL; // Keep some compilers happy
}
}
HeapWord* G1Allocator::survivor_attempt_allocation(size_t word_size,
AllocationContext_t context) {
assert(!_g1h->is_humongous(word_size),
"we should not be seeing humongous-size allocations in this path");
HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(word_size,
false /* bot_updates */);
if (result == NULL) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
false /* bot_updates */);
}
if (result != NULL) {
_g1h->dirty_young_block(result, word_size);
}
return result;
}
HeapWord* G1Allocator::old_attempt_allocation(size_t word_size,
AllocationContext_t context) {
assert(!_g1h->is_humongous(word_size),
"we should not be seeing humongous-size allocations in this path");
HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(word_size,
true /* bot_updates */);
if (result == NULL) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
true /* bot_updates */);
}
return result;
}
G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
_g1h(G1CollectedHeap::heap()),
_allocator(allocator),
_survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
}
HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
size_t word_sz,
AllocationContext_t context) {
size_t gclab_word_size = _g1h->desired_plab_sz(dest);
@ -124,7 +194,7 @@ HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
G1PLAB* alloc_buf = alloc_buffer(dest, context);
alloc_buf->retire();
HeapWord* buf = _g1h->par_allocate_during_gc(dest, gclab_word_size, context);
HeapWord* buf = _allocator->par_allocate_during_gc(dest, gclab_word_size, context);
if (buf == NULL) {
return NULL; // Let caller handle allocation failure.
}
@ -136,14 +206,18 @@ HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
assert(obj != NULL, "buffer was definitely big enough...");
return obj;
} else {
return _g1h->par_allocate_during_gc(dest, word_sz, context);
return _allocator->par_allocate_during_gc(dest, word_sz, context);
}
}
G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
G1ParGCAllocator(g1h),
_surviving_alloc_buffer(g1h->desired_plab_sz(InCSetState::Young)),
_tenured_alloc_buffer(g1h->desired_plab_sz(InCSetState::Old)) {
void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
}
G1DefaultPLABAllocator::G1DefaultPLABAllocator(G1Allocator* allocator) :
G1PLABAllocator(allocator),
_surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)),
_tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)) {
for (uint state = 0; state < InCSetState::Num; state++) {
_alloc_buffers[state] = NULL;
}
@ -151,7 +225,7 @@ G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
_alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer;
}
void G1DefaultParGCAllocator::retire_alloc_buffers() {
void G1DefaultPLABAllocator::retire_alloc_buffers() {
for (uint state = 0; state < InCSetState::Num; state++) {
G1PLAB* const buf = _alloc_buffers[state];
if (buf != NULL) {
@ -160,7 +234,7 @@ void G1DefaultParGCAllocator::retire_alloc_buffers() {
}
}
void G1DefaultParGCAllocator::waste(size_t& wasted, size_t& undo_wasted) {
void G1DefaultPLABAllocator::waste(size_t& wasted, size_t& undo_wasted) {
wasted = 0;
undo_wasted = 0;
for (uint state = 0; state < InCSetState::Num; state++) {

View file

@ -33,17 +33,36 @@
class EvacuationInfo;
// Base class for G1 allocators.
// Interface to keep track of which regions G1 is currently allocating into. Provides
// some accessors (e.g. allocating into them, or getting their occupancy).
// Also keeps track of retained regions across GCs.
class G1Allocator : public CHeapObj<mtGC> {
friend class VMStructs;
protected:
G1CollectedHeap* _g1h;
virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0;
// Accessors to the allocation regions.
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0;
// Allocation attempt during GC for a survivor object / PLAB.
inline HeapWord* survivor_attempt_allocation(size_t word_size,
AllocationContext_t context);
// Allocation attempt during GC for an old object / PLAB.
inline HeapWord* old_attempt_allocation(size_t word_size,
AllocationContext_t context);
public:
G1Allocator(G1CollectedHeap* heap) : _g1h(heap) { }
virtual ~G1Allocator() { }
static G1Allocator* create_allocator(G1CollectedHeap* g1h);
#ifdef ASSERT
// Do we currently have an active mutator region to allocate into?
bool has_mutator_alloc_region(AllocationContext_t context) { return mutator_alloc_region(context)->get() != NULL; }
#endif
virtual void init_mutator_alloc_region() = 0;
virtual void release_mutator_alloc_region() = 0;
@ -51,24 +70,35 @@ public:
virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
virtual void abandon_gc_alloc_regions() = 0;
virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0;
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0;
virtual size_t used_in_alloc_regions() = 0;
virtual bool is_retained_old_region(HeapRegion* hr) = 0;
// Management of retained regions.
virtual bool is_retained_old_region(HeapRegion* hr) = 0;
void reuse_retained_old_region(EvacuationInfo& evacuation_info,
OldGCAllocRegion* old,
HeapRegion** retained);
virtual HeapRegion* new_heap_region(uint hrs_index,
G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr) {
return new HeapRegion(hrs_index, sharedOffsetArray, mr);
}
// Allocate blocks of memory during mutator time.
inline HeapWord* attempt_allocation(size_t word_size, AllocationContext_t context);
inline HeapWord* attempt_allocation_locked(size_t word_size, AllocationContext_t context);
inline HeapWord* attempt_allocation_force(size_t word_size, AllocationContext_t context);
size_t unsafe_max_tlab_alloc(AllocationContext_t context);
// Allocate blocks of memory during garbage collection. Will ensure an
// allocation region, either by picking one or expanding the
// heap, and then allocate a block of the given size. The block
// may not be a humongous - it must fit into a single heap region.
HeapWord* par_allocate_during_gc(InCSetState dest,
size_t word_size,
AllocationContext_t context);
virtual size_t used_in_alloc_regions() = 0;
};
// The default allocator for G1.
// The default allocation region manager for G1. Provides a single mutator, survivor
// and old generation allocation region.
// Can retain the (single) old generation allocation region across GCs.
class G1DefaultAllocator : public G1Allocator {
protected:
// Alloc region used to satisfy mutator allocation requests.
@ -152,10 +182,14 @@ public:
}
};
class G1ParGCAllocator : public CHeapObj<mtGC> {
// Manages the PLABs used during garbage collection. Interface for allocation from PLABs.
// Needs to handle multiple contexts, extra alignment in any "survivor" area and some
// statistics.
class G1PLABAllocator : public CHeapObj<mtGC> {
friend class G1ParScanThreadState;
protected:
G1CollectedHeap* _g1h;
G1Allocator* _allocator;
// The survivor alignment in effect in bytes.
// == 0 : don't align survivors
@ -182,11 +216,10 @@ protected:
}
public:
G1ParGCAllocator(G1CollectedHeap* g1h) :
_g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()) { }
virtual ~G1ParGCAllocator() { }
G1PLABAllocator(G1Allocator* allocator);
virtual ~G1PLABAllocator() { }
static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
static G1PLABAllocator* create_allocator(G1Allocator* allocator);
virtual void waste(size_t& wasted, size_t& undo_wasted) = 0;
@ -219,18 +252,18 @@ public:
return allocate_direct_or_new_plab(dest, word_sz, context);
}
void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
}
void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context);
};
class G1DefaultParGCAllocator : public G1ParGCAllocator {
// The default PLAB allocator for G1. Keeps the current (single) PLAB for survivor
// and old generation allocation.
class G1DefaultPLABAllocator : public G1PLABAllocator {
G1PLAB _surviving_alloc_buffer;
G1PLAB _tenured_alloc_buffer;
G1PLAB* _alloc_buffers[InCSetState::Num];
public:
G1DefaultParGCAllocator(G1CollectedHeap* g1h);
G1DefaultPLABAllocator(G1Allocator* _allocator);
virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) {
assert(dest.is_valid(),

View file

@ -0,0 +1,46 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1ALLOCATOR_INLINE_HPP
#define SHARE_VM_GC_G1_G1ALLOCATOR_INLINE_HPP
#include "gc/g1/g1Allocator.hpp"
#include "gc/g1/g1AllocRegion.inline.hpp"
HeapWord* G1Allocator::attempt_allocation(size_t word_size, AllocationContext_t context) {
return mutator_alloc_region(context)->attempt_allocation(word_size, false /* bot_updates */);
}
HeapWord* G1Allocator::attempt_allocation_locked(size_t word_size, AllocationContext_t context) {
HeapWord* result = mutator_alloc_region(context)->attempt_allocation_locked(word_size, false /* bot_updates */);
assert(result != NULL || mutator_alloc_region(context)->get() == NULL,
err_msg("Must not have a mutator alloc region if there is no memory, but is " PTR_FORMAT, p2i(mutator_alloc_region(context)->get())));
return result;
}
HeapWord* G1Allocator::attempt_allocation_force(size_t word_size, AllocationContext_t context) {
return mutator_alloc_region(context)->attempt_allocation_force(word_size, false /* bot_updates */);
}
#endif // SHARE_VM_GC_G1_G1ALLOCATOR_HPP

View file

@ -30,6 +30,6 @@ G1Allocator* G1Allocator::create_allocator(G1CollectedHeap* g1h) {
return new G1DefaultAllocator(g1h);
}
G1ParGCAllocator* G1ParGCAllocator::create_allocator(G1CollectedHeap* g1h) {
return new G1DefaultParGCAllocator(g1h);
G1PLABAllocator* G1PLABAllocator::create_allocator(G1Allocator* allocator) {
return new G1DefaultPLABAllocator(allocator);
}

View file

@ -31,7 +31,7 @@
#include "gc/g1/concurrentG1Refine.hpp"
#include "gc/g1/concurrentG1RefineThread.hpp"
#include "gc/g1/concurrentMarkThread.inline.hpp"
#include "gc/g1/g1AllocRegion.inline.hpp"
#include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp"
@ -815,22 +815,16 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
{
MutexLockerEx x(Heap_lock);
result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
false /* bot_updates */);
result = _allocator->attempt_allocation_locked(word_size, context);
if (result != NULL) {
return result;
}
// If we reach here, attempt_allocation_locked() above failed to
// allocate a new region. So the mutator alloc region should be NULL.
assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
if (GC_locker::is_active_and_needs_gc()) {
if (g1_policy()->can_expand_young_list()) {
// No need for an ergo verbose message here,
// can_expand_young_list() does this when it returns true.
result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,
false /* bot_updates */);
result = _allocator->attempt_allocation_force(word_size, context);
if (result != NULL) {
return result;
}
@ -890,8 +884,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
// first attempt (without holding the Heap_lock) here and the
// follow-on attempt will be at the start of the next loop
// iteration (after taking the Heap_lock).
result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
false /* bot_updates */);
result = _allocator->attempt_allocation(word_size, context);
if (result != NULL) {
return result;
}
@ -1109,6 +1102,29 @@ void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
}
}
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
uint* gc_count_before_ret,
uint* gclocker_retry_count_ret) {
assert_heap_not_locked_and_not_at_safepoint();
assert(!is_humongous(word_size), "attempt_allocation() should not "
"be called for humongous allocation requests");
AllocationContext_t context = AllocationContext::current();
HeapWord* result = _allocator->attempt_allocation(word_size, context);
if (result == NULL) {
result = attempt_allocation_slow(word_size,
context,
gc_count_before_ret,
gclocker_retry_count_ret);
}
assert_heap_not_locked();
if (result != NULL) {
dirty_young_block(result, word_size);
}
return result;
}
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
uint* gc_count_before_ret,
uint* gclocker_retry_count_ret) {
@ -1231,13 +1247,11 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
AllocationContext_t context,
bool expect_null_mutator_alloc_region) {
assert_at_safepoint(true /* should_be_vm_thread */);
assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
!expect_null_mutator_alloc_region,
assert(!_allocator->has_mutator_alloc_region(context) || !expect_null_mutator_alloc_region,
"the current alloc region was unexpectedly found to be non-NULL");
if (!is_humongous(word_size)) {
return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
false /* bot_updates */);
return _allocator->attempt_allocation_locked(word_size, context);
} else {
HeapWord* result = humongous_obj_allocate(word_size, context);
if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
@ -2373,7 +2387,6 @@ void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
}
// Computes the sum of the storage used by the various regions.
size_t G1CollectedHeap::used() const {
size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
@ -2632,6 +2645,11 @@ bool G1CollectedHeap::is_in_exact(const void* p) const {
}
#endif
bool G1CollectedHeap::obj_in_cs(oop obj) {
HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
return r != NULL && r->in_collection_set();
}
// Iteration functions.
// Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
@ -2833,20 +2851,8 @@ size_t G1CollectedHeap::max_tlab_size() const {
}
size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
// Return the remaining space in the cur alloc region, but not less than
// the min TLAB size.
// Also, this value can be at most the humongous object threshold,
// since we can't allow tlabs to grow big enough to accommodate
// humongous objects.
HeapRegion* hr = _allocator->mutator_alloc_region(AllocationContext::current())->get();
size_t max_tlab = max_tlab_size() * wordSize;
if (hr == NULL) {
return max_tlab;
} else {
return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
}
AllocationContext_t context = AllocationContext::current();
return _allocator->unsafe_max_tlab_alloc(context);
}
size_t G1CollectedHeap::max_capacity() const {
@ -5009,7 +5015,7 @@ public:
bool G1STWIsAliveClosure::do_object_b(oop p) {
// An object is reachable if it is outside the collection set,
// or is inside and copied.
return !_g1->obj_in_cs(p) || p->is_forwarded();
return !_g1->is_in_cset(p) || p->is_forwarded();
}
// Non Copying Keep Alive closure

View file

@ -27,7 +27,6 @@
#include "gc/g1/concurrentMark.hpp"
#include "gc/g1/evacuationInfo.hpp"
#include "gc/g1/g1AllocRegion.hpp"
#include "gc/g1/g1AllocationContext.hpp"
#include "gc/g1/g1Allocator.hpp"
#include "gc/g1/g1BiasedArray.hpp"
@ -193,7 +192,7 @@ class G1CollectedHeap : public CollectedHeap {
// Closures used in implementation.
friend class G1ParScanThreadState;
friend class G1ParTask;
friend class G1ParGCAllocator;
friend class G1PLABAllocator;
friend class G1PrepareCompactClosure;
// Other related classes.
@ -248,7 +247,7 @@ private:
// The sequence of all heap regions in the heap.
HeapRegionManager _hrm;
// Class that handles the different kinds of allocations.
// Handles non-humongous allocations in the G1CollectedHeap.
G1Allocator* _allocator;
// Outside of GC pauses, the number of bytes used in all regions other
@ -280,22 +279,6 @@ private:
// start of each GC.
bool _expand_heap_after_alloc_failure;
// It resets the mutator alloc region before new allocations can take place.
void init_mutator_alloc_region();
// It releases the mutator alloc region.
void release_mutator_alloc_region();
// It initializes the GC alloc regions at the start of a GC.
void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
// It releases the GC alloc regions at the end of a GC.
void release_gc_alloc_regions(EvacuationInfo& evacuation_info);
// It does any cleanup that needs to be done on the GC alloc regions
// before a Full GC.
void abandon_gc_alloc_regions();
// Helper for monitoring and management support.
G1MonitoringSupport* _g1mm;
@ -557,25 +540,6 @@ protected:
// belongs to a young region.
inline void dirty_young_block(HeapWord* start, size_t word_size);
// Allocate blocks during garbage collection. Will ensure an
// allocation region, either by picking one or expanding the
// heap, and then allocate a block of the given size. The block
// may not be a humongous - it must fit into a single heap region.
inline HeapWord* par_allocate_during_gc(InCSetState dest,
size_t word_size,
AllocationContext_t context);
// Ensure that no further allocations can happen in "r", bearing in mind
// that parallel threads might be attempting allocations.
void par_allocate_remaining_space(HeapRegion* r);
// Allocation attempt during GC for a survivor object / PLAB.
inline HeapWord* survivor_attempt_allocation(size_t word_size,
AllocationContext_t context);
// Allocation attempt during GC for an old object / PLAB.
inline HeapWord* old_attempt_allocation(size_t word_size,
AllocationContext_t context);
// These methods are the "callbacks" from the G1AllocRegion class.
// For mutator alloc regions.
@ -725,6 +689,9 @@ public:
G1HRPrinter* hr_printer() { return &_hr_printer; }
// Allocates a new heap region instance.
HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
// Frees a non-humongous region by initializing its contents and
// adding it to the free list that's passed as a parameter (this is
// usually a local list which will be appended to the master free
@ -1263,7 +1230,7 @@ public:
// Return "TRUE" iff the given object address is within the collection
// set. Slow implementation.
inline bool obj_in_cs(oop obj);
bool obj_in_cs(oop obj);
inline bool is_in_cset(const HeapRegion *hr);
inline bool is_in_cset(oop obj);

View file

@ -26,7 +26,6 @@
#define SHARE_VM_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
#include "gc/g1/concurrentMark.hpp"
#include "gc/g1/g1AllocRegion.inline.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp"
@ -57,20 +56,6 @@ size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
}
HeapWord* G1CollectedHeap::par_allocate_during_gc(InCSetState dest,
size_t word_size,
AllocationContext_t context) {
switch (dest.value()) {
case InCSetState::Young:
return survivor_attempt_allocation(word_size, context);
case InCSetState::Old:
return old_attempt_allocation(word_size, context);
default:
ShouldNotReachHere();
return NULL; // Keep some compilers happy
}
}
// Inline functions for G1CollectedHeap
inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
@ -126,67 +111,6 @@ inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
_old_set.remove(hr);
}
inline bool G1CollectedHeap::obj_in_cs(oop obj) {
HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
return r != NULL && r->in_collection_set();
}
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
uint* gc_count_before_ret,
uint* gclocker_retry_count_ret) {
assert_heap_not_locked_and_not_at_safepoint();
assert(!is_humongous(word_size), "attempt_allocation() should not "
"be called for humongous allocation requests");
AllocationContext_t context = AllocationContext::current();
HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
false /* bot_updates */);
if (result == NULL) {
result = attempt_allocation_slow(word_size,
context,
gc_count_before_ret,
gclocker_retry_count_ret);
}
assert_heap_not_locked();
if (result != NULL) {
dirty_young_block(result, word_size);
}
return result;
}
inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size,
AllocationContext_t context) {
assert(!is_humongous(word_size),
"we should not be seeing humongous-size allocations in this path");
HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size,
false /* bot_updates */);
if (result == NULL) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
false /* bot_updates */);
}
if (result != NULL) {
dirty_young_block(result, word_size);
}
return result;
}
inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size,
AllocationContext_t context) {
assert(!is_humongous(word_size),
"we should not be seeing humongous-size allocations in this path");
HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size,
true /* bot_updates */);
if (result == NULL) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
true /* bot_updates */);
}
return result;
}
// It dirties the cards that cover the block so that so that the post
// write barrier never queues anything when updating objects on this
// block. It is assumed (and in fact we assert) that the block

View file

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/heapRegion.inline.hpp"
bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
jlong* totals,
@ -31,3 +32,8 @@ bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
jint len) {
return false;
}
HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
MemRegion mr) {
return new HeapRegion(hrs_index, bot_shared(), mr);
}

View file

@ -31,6 +31,7 @@
#include "gc/g1/g1ErgoVerbose.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1Log.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/shared/gcPolicyCounters.hpp"
#include "runtime/arguments.hpp"

View file

@ -31,6 +31,7 @@
#include "gc/g1/g1ParScanThreadState.inline.hpp"
#include "gc/g1/g1RemSet.hpp"
#include "gc/g1/g1RemSet.inline.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "memory/iterator.inline.hpp"
#include "runtime/prefetch.inline.hpp"

View file

@ -59,7 +59,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
_surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
_g1_par_allocator = G1ParGCAllocator::create_allocator(_g1h);
_plab_allocator = G1PLABAllocator::create_allocator(_g1h->allocator());
_dest[InCSetState::NotInCSet] = InCSetState::NotInCSet;
// The dest for Young is used when the objects are aged enough to
@ -71,8 +71,8 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
}
G1ParScanThreadState::~G1ParScanThreadState() {
_g1_par_allocator->retire_alloc_buffers();
delete _g1_par_allocator;
_plab_allocator->retire_alloc_buffers();
delete _plab_allocator;
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
}
@ -97,7 +97,7 @@ G1ParScanThreadState::print_termination_stats(int i,
const double term_ms = term_time() * 1000.0;
size_t alloc_buffer_waste = 0;
size_t undo_waste = 0;
_g1_par_allocator->waste(alloc_buffer_waste, undo_waste);
_plab_allocator->waste(alloc_buffer_waste, undo_waste);
st->print_cr("%3d %9.2f %9.2f %6.2f "
"%9.2f %6.2f " SIZE_FORMAT_W(8) " "
SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
@ -167,8 +167,9 @@ HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state,
// Right now we only have two types of regions (young / old) so
// let's keep the logic here simple. We can generalize it when necessary.
if (dest->is_young()) {
HeapWord* const obj_ptr = _g1_par_allocator->allocate(InCSetState::Old,
word_sz, context);
HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old,
word_sz,
context);
if (obj_ptr == NULL) {
return NULL;
}
@ -209,12 +210,12 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
uint age = 0;
InCSetState dest_state = next_state(state, old_mark, age);
HeapWord* obj_ptr = _g1_par_allocator->plab_allocate(dest_state, word_sz, context);
HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz, context);
// PLAB allocations should succeed most of the time, so we'll
// normally check against NULL once and that's it.
if (obj_ptr == NULL) {
obj_ptr = _g1_par_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context);
obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context);
if (obj_ptr == NULL) {
obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context);
if (obj_ptr == NULL) {
@ -233,7 +234,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
if (_g1h->evacuation_should_fail()) {
// Doing this after all the allocation attempts also tests the
// undo_allocation() method too.
_g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
_plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
return handle_evacuation_failure_par(old, old_mark);
}
#endif // !PRODUCT
@ -295,7 +296,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
}
return obj;
} else {
_g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
_plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
return forward_ptr;
}
}

View file

@ -46,7 +46,7 @@ class G1ParScanThreadState : public StackObj {
G1SATBCardTableModRefBS* _ct_bs;
G1RemSet* _g1_rem;
G1ParGCAllocator* _g1_par_allocator;
G1PLABAllocator* _plab_allocator;
ageTable _age_table;
InCSetState _dest[InCSetState::Num];

View file

@ -34,6 +34,7 @@
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1RemSet.inline.hpp"
#include "gc/g1/g1RootProcessor.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/mutex.hpp"

View file

@ -497,20 +497,10 @@ class HeapRegion: public G1OffsetTableContigSpace {
return _rem_set;
}
bool in_collection_set() const;
inline bool in_collection_set() const;
HeapRegion* next_in_collection_set() {
assert(in_collection_set(), "should only invoke on member of CS.");
assert(_next_in_special_set == NULL ||
_next_in_special_set->in_collection_set(),
"Malformed CS.");
return _next_in_special_set;
}
void set_next_in_collection_set(HeapRegion* r) {
assert(in_collection_set(), "should only invoke on member of CS.");
assert(r == NULL || r->in_collection_set(), "Malformed CS.");
_next_in_special_set = r;
}
inline HeapRegion* next_in_collection_set() const;
inline void set_next_in_collection_set(HeapRegion* r);
void set_allocation_context(AllocationContext_t context) {
_allocation_context = context;

View file

@ -26,7 +26,7 @@
#define SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/space.hpp"
#include "oops/oop.inline.hpp"
@ -200,4 +200,18 @@ inline bool HeapRegion::in_collection_set() const {
return G1CollectedHeap::heap()->is_in_cset(this);
}
inline HeapRegion* HeapRegion::next_in_collection_set() const {
assert(in_collection_set(), "should only invoke on member of CS.");
assert(_next_in_special_set == NULL ||
_next_in_special_set->in_collection_set(),
"Malformed CS.");
return _next_in_special_set;
}
void HeapRegion::set_next_in_collection_set(HeapRegion* r) {
assert(in_collection_set(), "should only invoke on member of CS.");
assert(r == NULL || r->in_collection_set(), "Malformed CS.");
_next_in_special_set = r;
}
#endif // SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP

View file

@ -70,7 +70,7 @@ HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
assert(reserved().contains(mr), "invariant");
return g1h->allocator()->new_heap_region(hrm_index, g1h->bot_shared(), mr);
return g1h->new_heap_region(hrm_index, mr);
}
void HeapRegionManager::commit_regions(uint index, size_t num_regions) {

View file

@ -94,7 +94,8 @@ void VM_G1IncCollectionPause::doit() {
if (_word_size > 0) {
// An allocation has been requested. So, try to do that first.
_result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
_result = g1h->attempt_allocation_at_safepoint(_word_size,
allocation_context(),
false /* expect_null_cur_alloc_region */);
if (_result != NULL) {
// If we can successfully allocate before we actually do the
@ -147,7 +148,8 @@ void VM_G1IncCollectionPause::doit() {
g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
if (_pause_succeeded && _word_size > 0) {
// An allocation had been requested.
_result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
_result = g1h->attempt_allocation_at_safepoint(_word_size,
allocation_context(),
true /* expect_null_cur_alloc_region */);
} else {
assert(_result == NULL, "invariant");