mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-21 11:34:38 +02:00
Merge
This commit is contained in:
commit
faf320aede
10 changed files with 1060 additions and 403 deletions
|
@ -1051,6 +1051,7 @@ public:
|
||||||
void work(int worker_i) {
|
void work(int worker_i) {
|
||||||
assert(Thread::current()->is_ConcurrentGC_thread(),
|
assert(Thread::current()->is_ConcurrentGC_thread(),
|
||||||
"this should only be done by a conc GC thread");
|
"this should only be done by a conc GC thread");
|
||||||
|
ResourceMark rm;
|
||||||
|
|
||||||
double start_vtime = os::elapsedVTime();
|
double start_vtime = os::elapsedVTime();
|
||||||
|
|
||||||
|
@ -1888,6 +1889,9 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
ReferenceProcessor* rp = g1h->ref_processor();
|
ReferenceProcessor* rp = g1h->ref_processor();
|
||||||
|
|
||||||
|
// See the comment in G1CollectedHeap::ref_processing_init()
|
||||||
|
// about how reference processing currently works in G1.
|
||||||
|
|
||||||
// Process weak references.
|
// Process weak references.
|
||||||
rp->setup_policy(clear_all_soft_refs);
|
rp->setup_policy(clear_all_soft_refs);
|
||||||
assert(_markStack.isEmpty(), "mark stack should be empty");
|
assert(_markStack.isEmpty(), "mark stack should be empty");
|
||||||
|
@ -2918,7 +2922,11 @@ public:
|
||||||
CMOopClosure(G1CollectedHeap* g1h,
|
CMOopClosure(G1CollectedHeap* g1h,
|
||||||
ConcurrentMark* cm,
|
ConcurrentMark* cm,
|
||||||
CMTask* task)
|
CMTask* task)
|
||||||
: _g1h(g1h), _cm(cm), _task(task) { }
|
: _g1h(g1h), _cm(cm), _task(task)
|
||||||
|
{
|
||||||
|
_ref_processor = g1h->ref_processor();
|
||||||
|
assert(_ref_processor != NULL, "should not be NULL");
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
void CMTask::setup_for_region(HeapRegion* hr) {
|
void CMTask::setup_for_region(HeapRegion* hr) {
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -290,6 +290,63 @@ private:
|
||||||
// started is maintained in _total_full_collections in CollectedHeap.
|
// started is maintained in _total_full_collections in CollectedHeap.
|
||||||
volatile unsigned int _full_collections_completed;
|
volatile unsigned int _full_collections_completed;
|
||||||
|
|
||||||
|
// These are macros so that, if the assert fires, we get the correct
|
||||||
|
// line number, file, etc.
|
||||||
|
|
||||||
|
#define heap_locking_asserts_err_msg(__extra_message) \
|
||||||
|
err_msg("%s : Heap_lock %slocked, %sat a safepoint", \
|
||||||
|
(__extra_message), \
|
||||||
|
(!Heap_lock->owned_by_self()) ? "NOT " : "", \
|
||||||
|
(!SafepointSynchronize::is_at_safepoint()) ? "NOT " : "")
|
||||||
|
|
||||||
|
#define assert_heap_locked() \
|
||||||
|
do { \
|
||||||
|
assert(Heap_lock->owned_by_self(), \
|
||||||
|
heap_locking_asserts_err_msg("should be holding the Heap_lock")); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define assert_heap_locked_or_at_safepoint() \
|
||||||
|
do { \
|
||||||
|
assert(Heap_lock->owned_by_self() || \
|
||||||
|
SafepointSynchronize::is_at_safepoint(), \
|
||||||
|
heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
|
||||||
|
"should be at a safepoint")); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define assert_heap_locked_and_not_at_safepoint() \
|
||||||
|
do { \
|
||||||
|
assert(Heap_lock->owned_by_self() && \
|
||||||
|
!SafepointSynchronize::is_at_safepoint(), \
|
||||||
|
heap_locking_asserts_err_msg("should be holding the Heap_lock and " \
|
||||||
|
"should not be at a safepoint")); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define assert_heap_not_locked() \
|
||||||
|
do { \
|
||||||
|
assert(!Heap_lock->owned_by_self(), \
|
||||||
|
heap_locking_asserts_err_msg("should not be holding the Heap_lock")); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define assert_heap_not_locked_and_not_at_safepoint() \
|
||||||
|
do { \
|
||||||
|
assert(!Heap_lock->owned_by_self() && \
|
||||||
|
!SafepointSynchronize::is_at_safepoint(), \
|
||||||
|
heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \
|
||||||
|
"should not be at a safepoint")); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define assert_at_safepoint() \
|
||||||
|
do { \
|
||||||
|
assert(SafepointSynchronize::is_at_safepoint(), \
|
||||||
|
heap_locking_asserts_err_msg("should be at a safepoint")); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define assert_not_at_safepoint() \
|
||||||
|
do { \
|
||||||
|
assert(!SafepointSynchronize::is_at_safepoint(), \
|
||||||
|
heap_locking_asserts_err_msg("should not be at a safepoint")); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
// Returns "true" iff none of the gc alloc regions have any allocations
|
// Returns "true" iff none of the gc alloc regions have any allocations
|
||||||
|
@ -329,31 +386,162 @@ protected:
|
||||||
|
|
||||||
// Attempt to allocate an object of the given (very large) "word_size".
|
// Attempt to allocate an object of the given (very large) "word_size".
|
||||||
// Returns "NULL" on failure.
|
// Returns "NULL" on failure.
|
||||||
virtual HeapWord* humongousObjAllocate(size_t word_size);
|
virtual HeapWord* humongous_obj_allocate(size_t word_size);
|
||||||
|
|
||||||
// If possible, allocate a block of the given word_size, else return "NULL".
|
// The following two methods, allocate_new_tlab() and
|
||||||
// Returning NULL will trigger GC or heap expansion.
|
// mem_allocate(), are the two main entry points from the runtime
|
||||||
// These two methods have rather awkward pre- and
|
// into the G1's allocation routines. They have the following
|
||||||
// post-conditions. If they are called outside a safepoint, then
|
// assumptions:
|
||||||
// they assume that the caller is holding the heap lock. Upon return
|
//
|
||||||
// they release the heap lock, if they are returning a non-NULL
|
// * They should both be called outside safepoints.
|
||||||
// value. attempt_allocation_slow() also dirties the cards of a
|
//
|
||||||
// newly-allocated young region after it releases the heap
|
// * They should both be called without holding the Heap_lock.
|
||||||
// lock. This change in interface was the neatest way to achieve
|
//
|
||||||
// this card dirtying without affecting mem_allocate(), which is a
|
// * All allocation requests for new TLABs should go to
|
||||||
// more frequently called method. We tried two or three different
|
// allocate_new_tlab().
|
||||||
// approaches, but they were even more hacky.
|
//
|
||||||
HeapWord* attempt_allocation(size_t word_size,
|
// * All non-TLAB allocation requests should go to mem_allocate()
|
||||||
bool permit_collection_pause = true);
|
// and mem_allocate() should never be called with is_tlab == true.
|
||||||
|
//
|
||||||
|
// * If the GC locker is active we currently stall until we can
|
||||||
|
// allocate a new young region. This will be changed in the
|
||||||
|
// near future (see CR 6994056).
|
||||||
|
//
|
||||||
|
// * If either call cannot satisfy the allocation request using the
|
||||||
|
// current allocating region, they will try to get a new one. If
|
||||||
|
// this fails, they will attempt to do an evacuation pause and
|
||||||
|
// retry the allocation.
|
||||||
|
//
|
||||||
|
// * If all allocation attempts fail, even after trying to schedule
|
||||||
|
// an evacuation pause, allocate_new_tlab() will return NULL,
|
||||||
|
// whereas mem_allocate() will attempt a heap expansion and/or
|
||||||
|
// schedule a Full GC.
|
||||||
|
//
|
||||||
|
// * We do not allow humongous-sized TLABs. So, allocate_new_tlab
|
||||||
|
// should never be called with word_size being humongous. All
|
||||||
|
// humongous allocation requests should go to mem_allocate() which
|
||||||
|
// will satisfy them with a special path.
|
||||||
|
|
||||||
HeapWord* attempt_allocation_slow(size_t word_size,
|
virtual HeapWord* allocate_new_tlab(size_t word_size);
|
||||||
bool permit_collection_pause = true);
|
|
||||||
|
virtual HeapWord* mem_allocate(size_t word_size,
|
||||||
|
bool is_noref,
|
||||||
|
bool is_tlab, /* expected to be false */
|
||||||
|
bool* gc_overhead_limit_was_exceeded);
|
||||||
|
|
||||||
|
// The following methods, allocate_from_cur_allocation_region(),
|
||||||
|
// attempt_allocation(), replace_cur_alloc_region_and_allocate(),
|
||||||
|
// attempt_allocation_slow(), and attempt_allocation_humongous()
|
||||||
|
// have very awkward pre- and post-conditions with respect to
|
||||||
|
// locking:
|
||||||
|
//
|
||||||
|
// If they are called outside a safepoint they assume the caller
|
||||||
|
// holds the Heap_lock when it calls them. However, on exit they
|
||||||
|
// will release the Heap_lock if they return a non-NULL result, but
|
||||||
|
// keep holding the Heap_lock if they return a NULL result. The
|
||||||
|
// reason for this is that we need to dirty the cards that span
|
||||||
|
// allocated blocks on young regions to avoid having to take the
|
||||||
|
// slow path of the write barrier (for performance reasons we don't
|
||||||
|
// update RSets for references whose source is a young region, so we
|
||||||
|
// don't need to look at dirty cards on young regions). But, doing
|
||||||
|
// this card dirtying while holding the Heap_lock can be a
|
||||||
|
// scalability bottleneck, especially given that some allocation
|
||||||
|
// requests might be of non-trivial size (and the larger the region
|
||||||
|
// size is, the fewer allocations requests will be considered
|
||||||
|
// humongous, as the humongous size limit is a fraction of the
|
||||||
|
// region size). So, when one of these calls succeeds in allocating
|
||||||
|
// a block it does the card dirtying after it releases the Heap_lock
|
||||||
|
// which is why it will return without holding it.
|
||||||
|
//
|
||||||
|
// The above assymetry is the reason why locking / unlocking is done
|
||||||
|
// explicitly (i.e., with Heap_lock->lock() and
|
||||||
|
// Heap_lock->unlocked()) instead of using MutexLocker and
|
||||||
|
// MutexUnlocker objects. The latter would ensure that the lock is
|
||||||
|
// unlocked / re-locked at every possible exit out of the basic
|
||||||
|
// block. However, we only want that action to happen in selected
|
||||||
|
// places.
|
||||||
|
//
|
||||||
|
// Further, if the above methods are called during a safepoint, then
|
||||||
|
// naturally there's no assumption about the Heap_lock being held or
|
||||||
|
// there's no attempt to unlock it. The parameter at_safepoint
|
||||||
|
// indicates whether the call is made during a safepoint or not (as
|
||||||
|
// an optimization, to avoid reading the global flag with
|
||||||
|
// SafepointSynchronize::is_at_safepoint()).
|
||||||
|
//
|
||||||
|
// The methods share these parameters:
|
||||||
|
//
|
||||||
|
// * word_size : the size of the allocation request in words
|
||||||
|
// * at_safepoint : whether the call is done at a safepoint; this
|
||||||
|
// also determines whether a GC is permitted
|
||||||
|
// (at_safepoint == false) or not (at_safepoint == true)
|
||||||
|
// * do_dirtying : whether the method should dirty the allocated
|
||||||
|
// block before returning
|
||||||
|
//
|
||||||
|
// They all return either the address of the block, if they
|
||||||
|
// successfully manage to allocate it, or NULL.
|
||||||
|
|
||||||
|
// It tries to satisfy an allocation request out of the current
|
||||||
|
// allocating region, which is passed as a parameter. It assumes
|
||||||
|
// that the caller has checked that the current allocating region is
|
||||||
|
// not NULL. Given that the caller has to check the current
|
||||||
|
// allocating region for at least NULL, it might as well pass it as
|
||||||
|
// the first parameter so that the method doesn't have to read it
|
||||||
|
// from the _cur_alloc_region field again.
|
||||||
|
inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
|
||||||
|
size_t word_size);
|
||||||
|
|
||||||
|
// It attempts to allocate out of the current alloc region. If that
|
||||||
|
// fails, it retires the current alloc region (if there is one),
|
||||||
|
// tries to get a new one and retries the allocation.
|
||||||
|
inline HeapWord* attempt_allocation(size_t word_size);
|
||||||
|
|
||||||
|
// It assumes that the current alloc region has been retired and
|
||||||
|
// tries to allocate a new one. If it's successful, it performs
|
||||||
|
// the allocation out of the new current alloc region and updates
|
||||||
|
// _cur_alloc_region.
|
||||||
|
HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size,
|
||||||
|
bool at_safepoint,
|
||||||
|
bool do_dirtying);
|
||||||
|
|
||||||
|
// The slow path when we are unable to allocate a new current alloc
|
||||||
|
// region to satisfy an allocation request (i.e., when
|
||||||
|
// attempt_allocation() fails). It will try to do an evacuation
|
||||||
|
// pause, which might stall due to the GC locker, and retry the
|
||||||
|
// allocation attempt when appropriate.
|
||||||
|
HeapWord* attempt_allocation_slow(size_t word_size);
|
||||||
|
|
||||||
|
// The method that tries to satisfy a humongous allocation
|
||||||
|
// request. If it cannot satisfy it it will try to do an evacuation
|
||||||
|
// pause to perhaps reclaim enough space to be able to satisfy the
|
||||||
|
// allocation request afterwards.
|
||||||
|
HeapWord* attempt_allocation_humongous(size_t word_size,
|
||||||
|
bool at_safepoint);
|
||||||
|
|
||||||
|
// It does the common work when we are retiring the current alloc region.
|
||||||
|
inline void retire_cur_alloc_region_common(HeapRegion* cur_alloc_region);
|
||||||
|
|
||||||
|
// It retires the current alloc region, which is passed as a
|
||||||
|
// parameter (since, typically, the caller is already holding on to
|
||||||
|
// it). It sets _cur_alloc_region to NULL.
|
||||||
|
void retire_cur_alloc_region(HeapRegion* cur_alloc_region);
|
||||||
|
|
||||||
|
// It attempts to do an allocation immediately before or after an
|
||||||
|
// evacuation pause and can only be called by the VM thread. It has
|
||||||
|
// slightly different assumptions that the ones before (i.e.,
|
||||||
|
// assumes that the current alloc region has been retired).
|
||||||
|
HeapWord* attempt_allocation_at_safepoint(size_t word_size,
|
||||||
|
bool expect_null_cur_alloc_region);
|
||||||
|
|
||||||
|
// It dirties the cards that cover the block so that so that the post
|
||||||
|
// write barrier never queues anything when updating objects on this
|
||||||
|
// block. It is assumed (and in fact we assert) that the block
|
||||||
|
// belongs to a young region.
|
||||||
|
inline void dirty_young_block(HeapWord* start, size_t word_size);
|
||||||
|
|
||||||
// Allocate blocks during garbage collection. Will ensure an
|
// Allocate blocks during garbage collection. Will ensure an
|
||||||
// allocation region, either by picking one or expanding the
|
// allocation region, either by picking one or expanding the
|
||||||
// heap, and then allocate a block of the given size. The block
|
// heap, and then allocate a block of the given size. The block
|
||||||
// may not be a humongous - it must fit into a single heap region.
|
// may not be a humongous - it must fit into a single heap region.
|
||||||
HeapWord* allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
|
|
||||||
HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
|
HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
|
||||||
|
|
||||||
HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
|
HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
|
||||||
|
@ -370,12 +558,14 @@ protected:
|
||||||
void retire_alloc_region(HeapRegion* alloc_region, bool par);
|
void retire_alloc_region(HeapRegion* alloc_region, bool par);
|
||||||
|
|
||||||
// - if explicit_gc is true, the GC is for a System.gc() or a heap
|
// - if explicit_gc is true, the GC is for a System.gc() or a heap
|
||||||
// inspection request and should collect the entire heap
|
// inspection request and should collect the entire heap
|
||||||
// - if clear_all_soft_refs is true, all soft references are cleared
|
// - if clear_all_soft_refs is true, all soft references should be
|
||||||
// during the GC
|
// cleared during the GC
|
||||||
// - if explicit_gc is false, word_size describes the allocation that
|
// - if explicit_gc is false, word_size describes the allocation that
|
||||||
// the GC should attempt (at least) to satisfy
|
// the GC should attempt (at least) to satisfy
|
||||||
void do_collection(bool explicit_gc,
|
// - it returns false if it is unable to do the collection due to the
|
||||||
|
// GC locker being active, true otherwise
|
||||||
|
bool do_collection(bool explicit_gc,
|
||||||
bool clear_all_soft_refs,
|
bool clear_all_soft_refs,
|
||||||
size_t word_size);
|
size_t word_size);
|
||||||
|
|
||||||
|
@ -391,13 +581,13 @@ protected:
|
||||||
// Callback from VM_G1CollectForAllocation operation.
|
// Callback from VM_G1CollectForAllocation operation.
|
||||||
// This function does everything necessary/possible to satisfy a
|
// This function does everything necessary/possible to satisfy a
|
||||||
// failed allocation request (including collection, expansion, etc.)
|
// failed allocation request (including collection, expansion, etc.)
|
||||||
HeapWord* satisfy_failed_allocation(size_t word_size);
|
HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
|
||||||
|
|
||||||
// Attempting to expand the heap sufficiently
|
// Attempting to expand the heap sufficiently
|
||||||
// to support an allocation of the given "word_size". If
|
// to support an allocation of the given "word_size". If
|
||||||
// successful, perform the allocation and return the address of the
|
// successful, perform the allocation and return the address of the
|
||||||
// allocated block, or else "NULL".
|
// allocated block, or else "NULL".
|
||||||
virtual HeapWord* expand_and_allocate(size_t word_size);
|
HeapWord* expand_and_allocate(size_t word_size);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Expand the garbage-first heap by at least the given size (in bytes!).
|
// Expand the garbage-first heap by at least the given size (in bytes!).
|
||||||
|
@ -478,21 +668,27 @@ protected:
|
||||||
void reset_taskqueue_stats();
|
void reset_taskqueue_stats();
|
||||||
#endif // TASKQUEUE_STATS
|
#endif // TASKQUEUE_STATS
|
||||||
|
|
||||||
// Do an incremental collection: identify a collection set, and evacuate
|
// Schedule the VM operation that will do an evacuation pause to
|
||||||
// its live objects elsewhere.
|
// satisfy an allocation request of word_size. *succeeded will
|
||||||
virtual void do_collection_pause();
|
// return whether the VM operation was successful (it did do an
|
||||||
|
// evacuation pause) or not (another thread beat us to it or the GC
|
||||||
|
// locker was active). Given that we should not be holding the
|
||||||
|
// Heap_lock when we enter this method, we will pass the
|
||||||
|
// gc_count_before (i.e., total_collections()) as a parameter since
|
||||||
|
// it has to be read while holding the Heap_lock. Currently, both
|
||||||
|
// methods that call do_collection_pause() release the Heap_lock
|
||||||
|
// before the call, so it's easy to read gc_count_before just before.
|
||||||
|
HeapWord* do_collection_pause(size_t word_size,
|
||||||
|
unsigned int gc_count_before,
|
||||||
|
bool* succeeded);
|
||||||
|
|
||||||
// The guts of the incremental collection pause, executed by the vm
|
// The guts of the incremental collection pause, executed by the vm
|
||||||
// thread.
|
// thread. It returns false if it is unable to do the collection due
|
||||||
virtual void do_collection_pause_at_safepoint(double target_pause_time_ms);
|
// to the GC locker being active, true otherwise
|
||||||
|
bool do_collection_pause_at_safepoint(double target_pause_time_ms);
|
||||||
|
|
||||||
// Actually do the work of evacuating the collection set.
|
// Actually do the work of evacuating the collection set.
|
||||||
virtual void evacuate_collection_set();
|
void evacuate_collection_set();
|
||||||
|
|
||||||
// If this is an appropriate right time, do a collection pause.
|
|
||||||
// The "word_size" argument, if non-zero, indicates the size of an
|
|
||||||
// allocation request that is prompting this query.
|
|
||||||
void do_collection_pause_if_appropriate(size_t word_size);
|
|
||||||
|
|
||||||
// The g1 remembered set of the heap.
|
// The g1 remembered set of the heap.
|
||||||
G1RemSet* _g1_rem_set;
|
G1RemSet* _g1_rem_set;
|
||||||
|
@ -762,11 +958,6 @@ public:
|
||||||
#endif // PRODUCT
|
#endif // PRODUCT
|
||||||
|
|
||||||
// These virtual functions do the actual allocation.
|
// These virtual functions do the actual allocation.
|
||||||
virtual HeapWord* mem_allocate(size_t word_size,
|
|
||||||
bool is_noref,
|
|
||||||
bool is_tlab,
|
|
||||||
bool* gc_overhead_limit_was_exceeded);
|
|
||||||
|
|
||||||
// Some heaps may offer a contiguous region for shared non-blocking
|
// Some heaps may offer a contiguous region for shared non-blocking
|
||||||
// allocation, via inlined code (by exporting the address of the top and
|
// allocation, via inlined code (by exporting the address of the top and
|
||||||
// end fields defining the extent of the contiguous allocation region.)
|
// end fields defining the extent of the contiguous allocation region.)
|
||||||
|
@ -1046,7 +1237,6 @@ public:
|
||||||
virtual bool supports_tlab_allocation() const;
|
virtual bool supports_tlab_allocation() const;
|
||||||
virtual size_t tlab_capacity(Thread* thr) const;
|
virtual size_t tlab_capacity(Thread* thr) const;
|
||||||
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
|
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
|
||||||
virtual HeapWord* allocate_new_tlab(size_t word_size);
|
|
||||||
|
|
||||||
// Can a compiler initialize a new object without store barriers?
|
// Can a compiler initialize a new object without store barriers?
|
||||||
// This permission only extends from the creation of a new object
|
// This permission only extends from the creation of a new object
|
||||||
|
@ -1186,7 +1376,6 @@ public:
|
||||||
static G1CollectedHeap* heap();
|
static G1CollectedHeap* heap();
|
||||||
|
|
||||||
void empty_young_list();
|
void empty_young_list();
|
||||||
bool should_set_young_locked();
|
|
||||||
|
|
||||||
void set_region_short_lived_locked(HeapRegion* hr);
|
void set_region_short_lived_locked(HeapRegion* hr);
|
||||||
// add appropriate methods for any other surv rate groups
|
// add appropriate methods for any other surv rate groups
|
||||||
|
@ -1339,8 +1528,6 @@ public:
|
||||||
protected:
|
protected:
|
||||||
size_t _max_heap_capacity;
|
size_t _max_heap_capacity;
|
||||||
|
|
||||||
// debug_only(static void check_for_valid_allocation_state();)
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Temporary: call to mark things unimplemented for the G1 heap (e.g.,
|
// Temporary: call to mark things unimplemented for the G1 heap (e.g.,
|
||||||
// MemoryService). In productization, we can make this assert false
|
// MemoryService). In productization, we can make this assert false
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
|
|
||||||
#include "gc_implementation/g1/concurrentMark.hpp"
|
#include "gc_implementation/g1/concurrentMark.hpp"
|
||||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||||
|
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||||
#include "gc_implementation/g1/heapRegionSeq.hpp"
|
#include "gc_implementation/g1/heapRegionSeq.hpp"
|
||||||
#include "utilities/taskqueue.hpp"
|
#include "utilities/taskqueue.hpp"
|
||||||
|
|
||||||
|
@ -58,37 +59,114 @@ inline bool G1CollectedHeap::obj_in_cs(oop obj) {
|
||||||
return r != NULL && r->in_collection_set();
|
return r != NULL && r->in_collection_set();
|
||||||
}
|
}
|
||||||
|
|
||||||
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
|
// See the comment in the .hpp file about the locking protocol and
|
||||||
bool permit_collection_pause) {
|
// assumptions of this method (and other related ones).
|
||||||
HeapWord* res = NULL;
|
inline HeapWord*
|
||||||
|
G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
|
||||||
|
size_t word_size) {
|
||||||
|
assert_heap_locked_and_not_at_safepoint();
|
||||||
|
assert(cur_alloc_region != NULL, "pre-condition of the method");
|
||||||
|
assert(cur_alloc_region == _cur_alloc_region, "pre-condition of the method");
|
||||||
|
assert(cur_alloc_region->is_young(),
|
||||||
|
"we only support young current alloc regions");
|
||||||
|
assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() "
|
||||||
|
"should not be used for humongous allocations");
|
||||||
|
assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug.");
|
||||||
|
|
||||||
assert( SafepointSynchronize::is_at_safepoint() ||
|
assert(!cur_alloc_region->is_empty(),
|
||||||
Heap_lock->owned_by_self(), "pre-condition of the call" );
|
err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty",
|
||||||
|
cur_alloc_region->bottom(), cur_alloc_region->end()));
|
||||||
|
// This allocate method does BOT updates and we don't need them in
|
||||||
|
// the young generation. This will be fixed in the near future by
|
||||||
|
// CR 6994297.
|
||||||
|
HeapWord* result = cur_alloc_region->allocate(word_size);
|
||||||
|
if (result != NULL) {
|
||||||
|
assert(is_in(result), "result should be in the heap");
|
||||||
|
Heap_lock->unlock();
|
||||||
|
|
||||||
// All humongous allocation requests should go through the slow path in
|
// Do the dirtying after we release the Heap_lock.
|
||||||
// attempt_allocation_slow().
|
dirty_young_block(result, word_size);
|
||||||
if (!isHumongous(word_size) && _cur_alloc_region != NULL) {
|
return result;
|
||||||
// If this allocation causes a region to become non empty,
|
|
||||||
// then we need to update our free_regions count.
|
|
||||||
|
|
||||||
if (_cur_alloc_region->is_empty()) {
|
|
||||||
res = _cur_alloc_region->allocate(word_size);
|
|
||||||
if (res != NULL)
|
|
||||||
_free_regions--;
|
|
||||||
} else {
|
|
||||||
res = _cur_alloc_region->allocate(word_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (res != NULL) {
|
|
||||||
if (!SafepointSynchronize::is_at_safepoint()) {
|
|
||||||
assert( Heap_lock->owned_by_self(), "invariant" );
|
|
||||||
Heap_lock->unlock();
|
|
||||||
}
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// attempt_allocation_slow will also unlock the heap lock when appropriate.
|
|
||||||
return attempt_allocation_slow(word_size, permit_collection_pause);
|
assert_heap_locked();
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// See the comment in the .hpp file about the locking protocol and
|
||||||
|
// assumptions of this method (and other related ones).
|
||||||
|
inline HeapWord*
|
||||||
|
G1CollectedHeap::attempt_allocation(size_t word_size) {
|
||||||
|
assert_heap_locked_and_not_at_safepoint();
|
||||||
|
assert(!isHumongous(word_size), "attempt_allocation() should not be called "
|
||||||
|
"for humongous allocation requests");
|
||||||
|
|
||||||
|
HeapRegion* cur_alloc_region = _cur_alloc_region;
|
||||||
|
if (cur_alloc_region != NULL) {
|
||||||
|
HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region,
|
||||||
|
word_size);
|
||||||
|
if (result != NULL) {
|
||||||
|
assert_heap_not_locked();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_heap_locked();
|
||||||
|
|
||||||
|
// Since we couldn't successfully allocate into it, retire the
|
||||||
|
// current alloc region.
|
||||||
|
retire_cur_alloc_region(cur_alloc_region);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to get a new region and allocate out of it
|
||||||
|
HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
|
||||||
|
false, /* at safepoint */
|
||||||
|
true /* do_dirtying */);
|
||||||
|
if (result != NULL) {
|
||||||
|
assert_heap_not_locked();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_heap_locked();
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void
|
||||||
|
G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
|
||||||
|
assert_heap_locked_or_at_safepoint();
|
||||||
|
assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
|
||||||
|
"pre-condition of the call");
|
||||||
|
assert(cur_alloc_region->is_young(),
|
||||||
|
"we only support young current alloc regions");
|
||||||
|
|
||||||
|
// The region is guaranteed to be young
|
||||||
|
g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region);
|
||||||
|
_summary_bytes_used += cur_alloc_region->used();
|
||||||
|
_cur_alloc_region = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// It dirties the cards that cover the block so that so that the post
|
||||||
|
// write barrier never queues anything when updating objects on this
|
||||||
|
// block. It is assumed (and in fact we assert) that the block
|
||||||
|
// belongs to a young region.
|
||||||
|
inline void
|
||||||
|
G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
|
||||||
|
assert_heap_not_locked();
|
||||||
|
|
||||||
|
// Assign the containing region to containing_hr so that we don't
|
||||||
|
// have to keep calling heap_region_containing_raw() in the
|
||||||
|
// asserts below.
|
||||||
|
DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
|
||||||
|
assert(containing_hr != NULL && start != NULL && word_size > 0,
|
||||||
|
"pre-condition");
|
||||||
|
assert(containing_hr->is_in(start), "it should contain start");
|
||||||
|
assert(containing_hr->is_young(), "it should be young");
|
||||||
|
assert(!containing_hr->isHumongous(), "it should not be humongous");
|
||||||
|
|
||||||
|
HeapWord* end = start + word_size;
|
||||||
|
assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
|
||||||
|
|
||||||
|
MemRegion mr(start, end);
|
||||||
|
((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
|
inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
|
||||||
|
|
|
@ -458,8 +458,8 @@ void G1CollectorPolicy::calculate_young_list_min_length() {
|
||||||
double now_sec = os::elapsedTime();
|
double now_sec = os::elapsedTime();
|
||||||
double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
|
double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
|
||||||
double alloc_rate_ms = predict_alloc_rate_ms();
|
double alloc_rate_ms = predict_alloc_rate_ms();
|
||||||
int min_regions = (int) ceil(alloc_rate_ms * when_ms);
|
size_t min_regions = (size_t) ceil(alloc_rate_ms * when_ms);
|
||||||
int current_region_num = (int) _g1->young_list()->length();
|
size_t current_region_num = _g1->young_list()->length();
|
||||||
_young_list_min_length = min_regions + current_region_num;
|
_young_list_min_length = min_regions + current_region_num;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -473,9 +473,12 @@ void G1CollectorPolicy::calculate_young_list_target_length() {
|
||||||
_young_list_target_length = _young_list_fixed_length;
|
_young_list_target_length = _young_list_fixed_length;
|
||||||
else
|
else
|
||||||
_young_list_target_length = _young_list_fixed_length / 2;
|
_young_list_target_length = _young_list_fixed_length / 2;
|
||||||
|
|
||||||
_young_list_target_length = MAX2(_young_list_target_length, (size_t)1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make sure we allow the application to allocate at least one
|
||||||
|
// region before we need to do a collection again.
|
||||||
|
size_t min_length = _g1->young_list()->length() + 1;
|
||||||
|
_young_list_target_length = MAX2(_young_list_target_length, min_length);
|
||||||
calculate_survivors_policy();
|
calculate_survivors_policy();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -568,7 +571,7 @@ void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
|
||||||
|
|
||||||
// we should have at least one region in the target young length
|
// we should have at least one region in the target young length
|
||||||
_young_list_target_length =
|
_young_list_target_length =
|
||||||
MAX2((size_t) 1, final_young_length + _recorded_survivor_regions);
|
final_young_length + _recorded_survivor_regions;
|
||||||
|
|
||||||
// let's keep an eye of how long we spend on this calculation
|
// let's keep an eye of how long we spend on this calculation
|
||||||
// right now, I assume that we'll print it when we need it; we
|
// right now, I assume that we'll print it when we need it; we
|
||||||
|
@ -617,8 +620,7 @@ void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
|
||||||
_young_list_min_length);
|
_young_list_min_length);
|
||||||
#endif // TRACE_CALC_YOUNG_LENGTH
|
#endif // TRACE_CALC_YOUNG_LENGTH
|
||||||
// we'll do the pause as soon as possible by choosing the minimum
|
// we'll do the pause as soon as possible by choosing the minimum
|
||||||
_young_list_target_length =
|
_young_list_target_length = _young_list_min_length;
|
||||||
MAX2(_young_list_min_length, (size_t) 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_rs_lengths_prediction = rs_lengths;
|
_rs_lengths_prediction = rs_lengths;
|
||||||
|
@ -801,7 +803,7 @@ void G1CollectorPolicy::record_full_collection_end() {
|
||||||
_survivor_surv_rate_group->reset();
|
_survivor_surv_rate_group->reset();
|
||||||
calculate_young_list_min_length();
|
calculate_young_list_min_length();
|
||||||
calculate_young_list_target_length();
|
calculate_young_list_target_length();
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectorPolicy::record_before_bytes(size_t bytes) {
|
void G1CollectorPolicy::record_before_bytes(size_t bytes) {
|
||||||
_bytes_in_to_space_before_gc += bytes;
|
_bytes_in_to_space_before_gc += bytes;
|
||||||
|
@ -824,9 +826,9 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
|
||||||
gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
|
gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(_g1->used_regions() == _g1->recalculate_used_regions(),
|
assert(_g1->used() == _g1->recalculate_used(),
|
||||||
"sanity");
|
err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
|
||||||
assert(_g1->used() == _g1->recalculate_used(), "sanity");
|
_g1->used(), _g1->recalculate_used()));
|
||||||
|
|
||||||
double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
|
double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
|
||||||
_all_stop_world_times_ms->add(s_w_t_ms);
|
_all_stop_world_times_ms->add(s_w_t_ms);
|
||||||
|
@ -2266,24 +2268,13 @@ void G1CollectorPolicy::print_yg_surv_rate_info() const {
|
||||||
#endif // PRODUCT
|
#endif // PRODUCT
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
void
|
||||||
G1CollectorPolicy::should_add_next_region_to_young_list() {
|
G1CollectorPolicy::update_region_num(bool young) {
|
||||||
assert(in_young_gc_mode(), "should be in young GC mode");
|
if (young) {
|
||||||
bool ret;
|
|
||||||
size_t young_list_length = _g1->young_list()->length();
|
|
||||||
size_t young_list_max_length = _young_list_target_length;
|
|
||||||
if (G1FixedEdenSize) {
|
|
||||||
young_list_max_length -= _max_survivor_regions;
|
|
||||||
}
|
|
||||||
if (young_list_length < young_list_max_length) {
|
|
||||||
ret = true;
|
|
||||||
++_region_num_young;
|
++_region_num_young;
|
||||||
} else {
|
} else {
|
||||||
ret = false;
|
|
||||||
++_region_num_tenured;
|
++_region_num_tenured;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
|
@ -2327,32 +2318,6 @@ void G1CollectorPolicy::calculate_survivors_policy()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
|
||||||
G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t
|
|
||||||
word_size) {
|
|
||||||
assert(_g1->regions_accounted_for(), "Region leakage!");
|
|
||||||
double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
|
|
||||||
|
|
||||||
size_t young_list_length = _g1->young_list()->length();
|
|
||||||
size_t young_list_max_length = _young_list_target_length;
|
|
||||||
if (G1FixedEdenSize) {
|
|
||||||
young_list_max_length -= _max_survivor_regions;
|
|
||||||
}
|
|
||||||
bool reached_target_length = young_list_length >= young_list_max_length;
|
|
||||||
|
|
||||||
if (in_young_gc_mode()) {
|
|
||||||
if (reached_target_length) {
|
|
||||||
assert( young_list_length > 0 && _g1->young_list()->length() > 0,
|
|
||||||
"invariant" );
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
guarantee( false, "should not reach here" );
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
class HRSortIndexIsOKClosure: public HeapRegionClosure {
|
class HRSortIndexIsOKClosure: public HeapRegionClosure {
|
||||||
CollectionSetChooser* _chooser;
|
CollectionSetChooser* _chooser;
|
||||||
|
|
|
@ -993,11 +993,6 @@ public:
|
||||||
void record_before_bytes(size_t bytes);
|
void record_before_bytes(size_t bytes);
|
||||||
void record_after_bytes(size_t bytes);
|
void record_after_bytes(size_t bytes);
|
||||||
|
|
||||||
// Returns "true" if this is a good time to do a collection pause.
|
|
||||||
// The "word_size" argument, if non-zero, indicates the size of an
|
|
||||||
// allocation request that is prompting this query.
|
|
||||||
virtual bool should_do_collection_pause(size_t word_size) = 0;
|
|
||||||
|
|
||||||
// Choose a new collection set. Marks the chosen regions as being
|
// Choose a new collection set. Marks the chosen regions as being
|
||||||
// "in_collection_set", and links them together. The head and number of
|
// "in_collection_set", and links them together. The head and number of
|
||||||
// the collection set are available via access methods.
|
// the collection set are available via access methods.
|
||||||
|
@ -1116,7 +1111,16 @@ public:
|
||||||
// do that for any other surv rate groups
|
// do that for any other surv rate groups
|
||||||
}
|
}
|
||||||
|
|
||||||
bool should_add_next_region_to_young_list();
|
bool is_young_list_full() {
|
||||||
|
size_t young_list_length = _g1->young_list()->length();
|
||||||
|
size_t young_list_max_length = _young_list_target_length;
|
||||||
|
if (G1FixedEdenSize) {
|
||||||
|
young_list_max_length -= _max_survivor_regions;
|
||||||
|
}
|
||||||
|
|
||||||
|
return young_list_length >= young_list_max_length;
|
||||||
|
}
|
||||||
|
void update_region_num(bool young);
|
||||||
|
|
||||||
bool in_young_gc_mode() {
|
bool in_young_gc_mode() {
|
||||||
return _in_young_gc_mode;
|
return _in_young_gc_mode;
|
||||||
|
@ -1270,7 +1274,6 @@ public:
|
||||||
_collectionSetChooser = new CollectionSetChooser();
|
_collectionSetChooser = new CollectionSetChooser();
|
||||||
}
|
}
|
||||||
void record_collection_pause_end();
|
void record_collection_pause_end();
|
||||||
bool should_do_collection_pause(size_t word_size);
|
|
||||||
// This is not needed any more, after the CSet choosing code was
|
// This is not needed any more, after the CSet choosing code was
|
||||||
// changed to use the pause prediction work. But let's leave the
|
// changed to use the pause prediction work. But let's leave the
|
||||||
// hook in just in case.
|
// hook in just in case.
|
||||||
|
|
|
@ -27,13 +27,22 @@
|
||||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||||
#include "gc_implementation/g1/vm_operations_g1.hpp"
|
#include "gc_implementation/g1/vm_operations_g1.hpp"
|
||||||
#include "gc_implementation/shared/isGCActiveMark.hpp"
|
#include "gc_implementation/shared/isGCActiveMark.hpp"
|
||||||
|
#include "gc_implementation/g1/vm_operations_g1.hpp"
|
||||||
#include "runtime/interfaceSupport.hpp"
|
#include "runtime/interfaceSupport.hpp"
|
||||||
|
|
||||||
|
VM_G1CollectForAllocation::VM_G1CollectForAllocation(
|
||||||
|
unsigned int gc_count_before,
|
||||||
|
size_t word_size)
|
||||||
|
: VM_G1OperationWithAllocRequest(gc_count_before, word_size) {
|
||||||
|
guarantee(word_size > 0, "an allocation should always be requested");
|
||||||
|
}
|
||||||
|
|
||||||
void VM_G1CollectForAllocation::doit() {
|
void VM_G1CollectForAllocation::doit() {
|
||||||
JvmtiGCForAllocationMarker jgcm;
|
JvmtiGCForAllocationMarker jgcm;
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
_res = g1h->satisfy_failed_allocation(_size);
|
_result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
|
||||||
assert(g1h->is_in_or_null(_res), "result not in heap");
|
assert(_result == NULL || _pause_succeeded,
|
||||||
|
"if we get back a result, the pause should have succeeded");
|
||||||
}
|
}
|
||||||
|
|
||||||
void VM_G1CollectFull::doit() {
|
void VM_G1CollectFull::doit() {
|
||||||
|
@ -43,6 +52,25 @@ void VM_G1CollectFull::doit() {
|
||||||
g1h->do_full_collection(false /* clear_all_soft_refs */);
|
g1h->do_full_collection(false /* clear_all_soft_refs */);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
VM_G1IncCollectionPause::VM_G1IncCollectionPause(
|
||||||
|
unsigned int gc_count_before,
|
||||||
|
size_t word_size,
|
||||||
|
bool should_initiate_conc_mark,
|
||||||
|
double target_pause_time_ms,
|
||||||
|
GCCause::Cause gc_cause)
|
||||||
|
: VM_G1OperationWithAllocRequest(gc_count_before, word_size),
|
||||||
|
_should_initiate_conc_mark(should_initiate_conc_mark),
|
||||||
|
_target_pause_time_ms(target_pause_time_ms),
|
||||||
|
_full_collections_completed_before(0) {
|
||||||
|
guarantee(target_pause_time_ms > 0.0,
|
||||||
|
err_msg("target_pause_time_ms = %1.6lf should be positive",
|
||||||
|
target_pause_time_ms));
|
||||||
|
guarantee(word_size == 0 || gc_cause == GCCause::_g1_inc_collection_pause,
|
||||||
|
"we can only request an allocation if the GC cause is for "
|
||||||
|
"an incremental GC pause");
|
||||||
|
_gc_cause = gc_cause;
|
||||||
|
}
|
||||||
|
|
||||||
void VM_G1IncCollectionPause::doit() {
|
void VM_G1IncCollectionPause::doit() {
|
||||||
JvmtiGCForAllocationMarker jgcm;
|
JvmtiGCForAllocationMarker jgcm;
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
|
@ -51,6 +79,18 @@ void VM_G1IncCollectionPause::doit() {
|
||||||
(_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)),
|
(_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)),
|
||||||
"only a GC locker or a System.gc() induced GC should start a cycle");
|
"only a GC locker or a System.gc() induced GC should start a cycle");
|
||||||
|
|
||||||
|
if (_word_size > 0) {
|
||||||
|
// An allocation has been requested. So, try to do that first.
|
||||||
|
_result = g1h->attempt_allocation_at_safepoint(_word_size,
|
||||||
|
false /* expect_null_cur_alloc_region */);
|
||||||
|
if (_result != NULL) {
|
||||||
|
// If we can successfully allocate before we actually do the
|
||||||
|
// pause then we will consider this pause successful.
|
||||||
|
_pause_succeeded = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
GCCauseSetter x(g1h, _gc_cause);
|
GCCauseSetter x(g1h, _gc_cause);
|
||||||
if (_should_initiate_conc_mark) {
|
if (_should_initiate_conc_mark) {
|
||||||
// It's safer to read full_collections_completed() here, given
|
// It's safer to read full_collections_completed() here, given
|
||||||
|
@ -63,7 +103,16 @@ void VM_G1IncCollectionPause::doit() {
|
||||||
// will do so if one is not already in progress.
|
// will do so if one is not already in progress.
|
||||||
bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle();
|
bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle();
|
||||||
}
|
}
|
||||||
g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
|
|
||||||
|
_pause_succeeded =
|
||||||
|
g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
|
||||||
|
if (_pause_succeeded && _word_size > 0) {
|
||||||
|
// An allocation had been requested.
|
||||||
|
_result = g1h->attempt_allocation_at_safepoint(_word_size,
|
||||||
|
true /* expect_null_cur_alloc_region */);
|
||||||
|
} else {
|
||||||
|
assert(_result == NULL, "invariant");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void VM_G1IncCollectionPause::doit_epilogue() {
|
void VM_G1IncCollectionPause::doit_epilogue() {
|
||||||
|
|
|
@ -31,19 +31,33 @@
|
||||||
// VM_GC_Operation:
|
// VM_GC_Operation:
|
||||||
// - VM_CGC_Operation
|
// - VM_CGC_Operation
|
||||||
// - VM_G1CollectFull
|
// - VM_G1CollectFull
|
||||||
// - VM_G1CollectForAllocation
|
// - VM_G1OperationWithAllocRequest
|
||||||
// - VM_G1IncCollectionPause
|
// - VM_G1CollectForAllocation
|
||||||
// - VM_G1PopRegionCollectionPause
|
// - VM_G1IncCollectionPause
|
||||||
|
|
||||||
|
class VM_G1OperationWithAllocRequest: public VM_GC_Operation {
|
||||||
|
protected:
|
||||||
|
size_t _word_size;
|
||||||
|
HeapWord* _result;
|
||||||
|
bool _pause_succeeded;
|
||||||
|
|
||||||
|
public:
|
||||||
|
VM_G1OperationWithAllocRequest(unsigned int gc_count_before,
|
||||||
|
size_t word_size)
|
||||||
|
: VM_GC_Operation(gc_count_before),
|
||||||
|
_word_size(word_size), _result(NULL), _pause_succeeded(false) { }
|
||||||
|
HeapWord* result() { return _result; }
|
||||||
|
bool pause_succeeded() { return _pause_succeeded; }
|
||||||
|
};
|
||||||
|
|
||||||
class VM_G1CollectFull: public VM_GC_Operation {
|
class VM_G1CollectFull: public VM_GC_Operation {
|
||||||
public:
|
public:
|
||||||
VM_G1CollectFull(unsigned int gc_count_before,
|
VM_G1CollectFull(unsigned int gc_count_before,
|
||||||
unsigned int full_gc_count_before,
|
unsigned int full_gc_count_before,
|
||||||
GCCause::Cause cause)
|
GCCause::Cause cause)
|
||||||
: VM_GC_Operation(gc_count_before, full_gc_count_before) {
|
: VM_GC_Operation(gc_count_before, full_gc_count_before) {
|
||||||
_gc_cause = cause;
|
_gc_cause = cause;
|
||||||
}
|
}
|
||||||
~VM_G1CollectFull() {}
|
|
||||||
virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
|
virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
|
||||||
virtual void doit();
|
virtual void doit();
|
||||||
virtual const char* name() const {
|
virtual const char* name() const {
|
||||||
|
@ -51,45 +65,28 @@ class VM_G1CollectFull: public VM_GC_Operation {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class VM_G1CollectForAllocation: public VM_GC_Operation {
|
class VM_G1CollectForAllocation: public VM_G1OperationWithAllocRequest {
|
||||||
private:
|
public:
|
||||||
HeapWord* _res;
|
VM_G1CollectForAllocation(unsigned int gc_count_before,
|
||||||
size_t _size; // size of object to be allocated
|
size_t word_size);
|
||||||
public:
|
|
||||||
VM_G1CollectForAllocation(size_t size, int gc_count_before)
|
|
||||||
: VM_GC_Operation(gc_count_before) {
|
|
||||||
_size = size;
|
|
||||||
_res = NULL;
|
|
||||||
}
|
|
||||||
~VM_G1CollectForAllocation() {}
|
|
||||||
virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; }
|
virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; }
|
||||||
virtual void doit();
|
virtual void doit();
|
||||||
virtual const char* name() const {
|
virtual const char* name() const {
|
||||||
return "garbage-first collection to satisfy allocation";
|
return "garbage-first collection to satisfy allocation";
|
||||||
}
|
}
|
||||||
HeapWord* result() { return _res; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class VM_G1IncCollectionPause: public VM_GC_Operation {
|
class VM_G1IncCollectionPause: public VM_G1OperationWithAllocRequest {
|
||||||
private:
|
private:
|
||||||
bool _should_initiate_conc_mark;
|
bool _should_initiate_conc_mark;
|
||||||
double _target_pause_time_ms;
|
double _target_pause_time_ms;
|
||||||
unsigned int _full_collections_completed_before;
|
unsigned int _full_collections_completed_before;
|
||||||
public:
|
public:
|
||||||
VM_G1IncCollectionPause(unsigned int gc_count_before,
|
VM_G1IncCollectionPause(unsigned int gc_count_before,
|
||||||
|
size_t word_size,
|
||||||
bool should_initiate_conc_mark,
|
bool should_initiate_conc_mark,
|
||||||
double target_pause_time_ms,
|
double target_pause_time_ms,
|
||||||
GCCause::Cause cause)
|
GCCause::Cause gc_cause);
|
||||||
: VM_GC_Operation(gc_count_before),
|
|
||||||
_full_collections_completed_before(0),
|
|
||||||
_should_initiate_conc_mark(should_initiate_conc_mark),
|
|
||||||
_target_pause_time_ms(target_pause_time_ms) {
|
|
||||||
guarantee(target_pause_time_ms > 0.0,
|
|
||||||
err_msg("target_pause_time_ms = %1.6lf should be positive",
|
|
||||||
target_pause_time_ms));
|
|
||||||
|
|
||||||
_gc_cause = cause;
|
|
||||||
}
|
|
||||||
virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
|
virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
|
||||||
virtual void doit();
|
virtual void doit();
|
||||||
virtual void doit_epilogue();
|
virtual void doit_epilogue();
|
||||||
|
@ -103,14 +100,9 @@ public:
|
||||||
class VM_CGC_Operation: public VM_Operation {
|
class VM_CGC_Operation: public VM_Operation {
|
||||||
VoidClosure* _cl;
|
VoidClosure* _cl;
|
||||||
const char* _printGCMessage;
|
const char* _printGCMessage;
|
||||||
public:
|
public:
|
||||||
VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg) :
|
VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg)
|
||||||
_cl(cl),
|
: _cl(cl), _printGCMessage(printGCMsg) { }
|
||||||
_printGCMessage(printGCMsg)
|
|
||||||
{}
|
|
||||||
|
|
||||||
~VM_CGC_Operation() {}
|
|
||||||
|
|
||||||
virtual VMOp_Type type() const { return VMOp_CGC_Operation; }
|
virtual VMOp_Type type() const { return VMOp_CGC_Operation; }
|
||||||
virtual void doit();
|
virtual void doit();
|
||||||
virtual bool doit_prologue();
|
virtual bool doit_prologue();
|
||||||
|
|
|
@ -770,9 +770,8 @@ void ReferenceProcessor::abandon_partial_discovery() {
|
||||||
// loop over the lists
|
// loop over the lists
|
||||||
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
|
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
|
||||||
if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
|
if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
|
||||||
gclog_or_tty->print_cr(
|
gclog_or_tty->print_cr("\nAbandoning %s discovered list",
|
||||||
"\nAbandoning %s discovered list",
|
list_name(i));
|
||||||
list_name(i));
|
|
||||||
}
|
}
|
||||||
abandon_partial_discovered_list(_discoveredSoftRefs[i]);
|
abandon_partial_discovered_list(_discoveredSoftRefs[i]);
|
||||||
}
|
}
|
||||||
|
@ -1059,9 +1058,7 @@ inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt)
|
||||||
// During a multi-threaded discovery phase,
|
// During a multi-threaded discovery phase,
|
||||||
// each thread saves to its "own" list.
|
// each thread saves to its "own" list.
|
||||||
Thread* thr = Thread::current();
|
Thread* thr = Thread::current();
|
||||||
assert(thr->is_GC_task_thread(),
|
id = thr->as_Worker_thread()->id();
|
||||||
"Dubious cast from Thread* to WorkerThread*?");
|
|
||||||
id = ((WorkerThread*)thr)->id();
|
|
||||||
} else {
|
} else {
|
||||||
// single-threaded discovery, we save in round-robin
|
// single-threaded discovery, we save in round-robin
|
||||||
// fashion to each of the lists.
|
// fashion to each of the lists.
|
||||||
|
@ -1095,8 +1092,7 @@ inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt)
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
if (TraceReferenceGC && PrintGCDetails) {
|
if (TraceReferenceGC && PrintGCDetails) {
|
||||||
gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT,
|
gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, list);
|
||||||
id, list);
|
|
||||||
}
|
}
|
||||||
return list;
|
return list;
|
||||||
}
|
}
|
||||||
|
@ -1135,6 +1131,11 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
|
||||||
if (_discovered_list_needs_barrier) {
|
if (_discovered_list_needs_barrier) {
|
||||||
_bs->write_ref_field((void*)discovered_addr, current_head);
|
_bs->write_ref_field((void*)discovered_addr, current_head);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (TraceReferenceGC) {
|
||||||
|
gclog_or_tty->print_cr("Enqueued reference (mt) (" INTPTR_FORMAT ": %s)",
|
||||||
|
obj, obj->blueprint()->internal_name());
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// If retest was non NULL, another thread beat us to it:
|
// If retest was non NULL, another thread beat us to it:
|
||||||
// The reference has already been discovered...
|
// The reference has already been discovered...
|
||||||
|
@ -1239,8 +1240,8 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
|
||||||
// Check assumption that an object is not potentially
|
// Check assumption that an object is not potentially
|
||||||
// discovered twice except by concurrent collectors that potentially
|
// discovered twice except by concurrent collectors that potentially
|
||||||
// trace the same Reference object twice.
|
// trace the same Reference object twice.
|
||||||
assert(UseConcMarkSweepGC,
|
assert(UseConcMarkSweepGC || UseG1GC,
|
||||||
"Only possible with an incremental-update concurrent collector");
|
"Only possible with a concurrent marking collector");
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1293,26 +1294,14 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
|
||||||
}
|
}
|
||||||
list->set_head(obj);
|
list->set_head(obj);
|
||||||
list->inc_length(1);
|
list->inc_length(1);
|
||||||
}
|
|
||||||
|
|
||||||
// In the MT discovery case, it is currently possible to see
|
if (TraceReferenceGC) {
|
||||||
// the following message multiple times if several threads
|
|
||||||
// discover a reference about the same time. Only one will
|
|
||||||
// however have actually added it to the disocvered queue.
|
|
||||||
// One could let add_to_discovered_list_mt() return an
|
|
||||||
// indication for success in queueing (by 1 thread) or
|
|
||||||
// failure (by all other threads), but I decided the extra
|
|
||||||
// code was not worth the effort for something that is
|
|
||||||
// only used for debugging support.
|
|
||||||
if (TraceReferenceGC) {
|
|
||||||
oop referent = java_lang_ref_Reference::referent(obj);
|
|
||||||
if (PrintGCDetails) {
|
|
||||||
gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",
|
gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",
|
||||||
obj, obj->blueprint()->internal_name());
|
obj, obj->blueprint()->internal_name());
|
||||||
}
|
}
|
||||||
assert(referent->is_oop(), "Enqueued a bad referent");
|
|
||||||
}
|
}
|
||||||
assert(obj->is_oop(), "Enqueued a bad reference");
|
assert(obj->is_oop(), "Enqueued a bad reference");
|
||||||
|
assert(java_lang_ref_Reference::referent(obj)->is_oop(), "Enqueued a bad referent");
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -78,6 +78,8 @@ class GCTaskQueue;
|
||||||
class ThreadClosure;
|
class ThreadClosure;
|
||||||
class IdealGraphPrinter;
|
class IdealGraphPrinter;
|
||||||
|
|
||||||
|
class WorkerThread;
|
||||||
|
|
||||||
// Class hierarchy
|
// Class hierarchy
|
||||||
// - Thread
|
// - Thread
|
||||||
// - NamedThread
|
// - NamedThread
|
||||||
|
@ -289,6 +291,10 @@ class Thread: public ThreadShadow {
|
||||||
virtual bool is_Watcher_thread() const { return false; }
|
virtual bool is_Watcher_thread() const { return false; }
|
||||||
virtual bool is_ConcurrentGC_thread() const { return false; }
|
virtual bool is_ConcurrentGC_thread() const { return false; }
|
||||||
virtual bool is_Named_thread() const { return false; }
|
virtual bool is_Named_thread() const { return false; }
|
||||||
|
virtual bool is_Worker_thread() const { return false; }
|
||||||
|
|
||||||
|
// Casts
|
||||||
|
virtual WorkerThread* as_Worker_thread() const { return NULL; }
|
||||||
|
|
||||||
virtual char* name() const { return (char*)"Unknown thread"; }
|
virtual char* name() const { return (char*)"Unknown thread"; }
|
||||||
|
|
||||||
|
@ -628,9 +634,16 @@ class WorkerThread: public NamedThread {
|
||||||
private:
|
private:
|
||||||
uint _id;
|
uint _id;
|
||||||
public:
|
public:
|
||||||
WorkerThread() : _id(0) { }
|
WorkerThread() : _id(0) { }
|
||||||
void set_id(uint work_id) { _id = work_id; }
|
virtual bool is_Worker_thread() const { return true; }
|
||||||
uint id() const { return _id; }
|
|
||||||
|
virtual WorkerThread* as_Worker_thread() const {
|
||||||
|
assert(is_Worker_thread(), "Dubious cast to WorkerThread*?");
|
||||||
|
return (WorkerThread*) this;
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_id(uint work_id) { _id = work_id; }
|
||||||
|
uint id() const { return _id; }
|
||||||
};
|
};
|
||||||
|
|
||||||
// A single WatcherThread is used for simulating timer interrupts.
|
// A single WatcherThread is used for simulating timer interrupts.
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue