mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-21 11:34:38 +02:00
Merge
This commit is contained in:
commit
faf320aede
10 changed files with 1060 additions and 403 deletions
|
@ -1051,6 +1051,7 @@ public:
|
|||
void work(int worker_i) {
|
||||
assert(Thread::current()->is_ConcurrentGC_thread(),
|
||||
"this should only be done by a conc GC thread");
|
||||
ResourceMark rm;
|
||||
|
||||
double start_vtime = os::elapsedVTime();
|
||||
|
||||
|
@ -1888,6 +1889,9 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
|||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
ReferenceProcessor* rp = g1h->ref_processor();
|
||||
|
||||
// See the comment in G1CollectedHeap::ref_processing_init()
|
||||
// about how reference processing currently works in G1.
|
||||
|
||||
// Process weak references.
|
||||
rp->setup_policy(clear_all_soft_refs);
|
||||
assert(_markStack.isEmpty(), "mark stack should be empty");
|
||||
|
@ -2918,7 +2922,11 @@ public:
|
|||
CMOopClosure(G1CollectedHeap* g1h,
|
||||
ConcurrentMark* cm,
|
||||
CMTask* task)
|
||||
: _g1h(g1h), _cm(cm), _task(task) { }
|
||||
: _g1h(g1h), _cm(cm), _task(task)
|
||||
{
|
||||
_ref_processor = g1h->ref_processor();
|
||||
assert(_ref_processor != NULL, "should not be NULL");
|
||||
}
|
||||
};
|
||||
|
||||
void CMTask::setup_for_region(HeapRegion* hr) {
|
||||
|
|
|
@ -58,10 +58,11 @@ size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
|
|||
// INVARIANTS/NOTES
|
||||
//
|
||||
// All allocation activity covered by the G1CollectedHeap interface is
|
||||
// serialized by acquiring the HeapLock. This happens in
|
||||
// mem_allocate_work, which all such allocation functions call.
|
||||
// (Note that this does not apply to TLAB allocation, which is not part
|
||||
// of this interface: it is done by clients of this interface.)
|
||||
// serialized by acquiring the HeapLock. This happens in mem_allocate
|
||||
// and allocate_new_tlab, which are the "entry" points to the
|
||||
// allocation code from the rest of the JVM. (Note that this does not
|
||||
// apply to TLAB allocation, which is not part of this interface: it
|
||||
// is done by clients of this interface.)
|
||||
|
||||
// Local to this file.
|
||||
|
||||
|
@ -536,15 +537,17 @@ HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose,
|
|||
// If could fit into free regions w/o expansion, try.
|
||||
// Otherwise, if can expand, do so.
|
||||
// Otherwise, if using ex regions might help, try with ex given back.
|
||||
HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) {
|
||||
HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
||||
assert_heap_locked_or_at_safepoint();
|
||||
assert(regions_accounted_for(), "Region leakage!");
|
||||
|
||||
// We can't allocate H regions while cleanupComplete is running, since
|
||||
// some of the regions we find to be empty might not yet be added to the
|
||||
// unclean list. (If we're already at a safepoint, this call is
|
||||
// unnecessary, not to mention wrong.)
|
||||
if (!SafepointSynchronize::is_at_safepoint())
|
||||
// We can't allocate humongous regions while cleanupComplete is
|
||||
// running, since some of the regions we find to be empty might not
|
||||
// yet be added to the unclean list. If we're already at a
|
||||
// safepoint, this call is unnecessary, not to mention wrong.
|
||||
if (!SafepointSynchronize::is_at_safepoint()) {
|
||||
wait_for_cleanup_complete();
|
||||
}
|
||||
|
||||
size_t num_regions =
|
||||
round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
|
||||
|
@ -598,106 +601,391 @@ HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) {
|
|||
return res;
|
||||
}
|
||||
|
||||
void
|
||||
G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) {
|
||||
// The cleanup operation might update _summary_bytes_used
|
||||
// concurrently with this method. So, right now, if we don't wait
|
||||
// for it to complete, updates to _summary_bytes_used might get
|
||||
// lost. This will be resolved in the near future when the operation
|
||||
// of the free region list is revamped as part of CR 6977804.
|
||||
wait_for_cleanup_complete();
|
||||
|
||||
retire_cur_alloc_region_common(cur_alloc_region);
|
||||
assert(_cur_alloc_region == NULL, "post-condition");
|
||||
}
|
||||
|
||||
// See the comment in the .hpp file about the locking protocol and
|
||||
// assumptions of this method (and other related ones).
|
||||
HeapWord*
|
||||
G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||
bool permit_collection_pause) {
|
||||
HeapWord* res = NULL;
|
||||
HeapRegion* allocated_young_region = NULL;
|
||||
G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size,
|
||||
bool at_safepoint,
|
||||
bool do_dirtying) {
|
||||
assert_heap_locked_or_at_safepoint();
|
||||
assert(_cur_alloc_region == NULL,
|
||||
"replace_cur_alloc_region_and_allocate() should only be called "
|
||||
"after retiring the previous current alloc region");
|
||||
assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
|
||||
"at_safepoint and is_at_safepoint() should be a tautology");
|
||||
|
||||
assert( SafepointSynchronize::is_at_safepoint() ||
|
||||
Heap_lock->owned_by_self(), "pre condition of the call" );
|
||||
|
||||
if (isHumongous(word_size)) {
|
||||
// Allocation of a humongous object can, in a sense, complete a
|
||||
// partial region, if the previous alloc was also humongous, and
|
||||
// caused the test below to succeed.
|
||||
if (permit_collection_pause)
|
||||
do_collection_pause_if_appropriate(word_size);
|
||||
res = humongousObjAllocate(word_size);
|
||||
assert(_cur_alloc_region == NULL
|
||||
|| !_cur_alloc_region->isHumongous(),
|
||||
"Prevent a regression of this bug.");
|
||||
|
||||
} else {
|
||||
// We may have concurrent cleanup working at the time. Wait for it
|
||||
// to complete. In the future we would probably want to make the
|
||||
// concurrent cleanup truly concurrent by decoupling it from the
|
||||
// allocation.
|
||||
if (!SafepointSynchronize::is_at_safepoint())
|
||||
if (!g1_policy()->is_young_list_full()) {
|
||||
if (!at_safepoint) {
|
||||
// The cleanup operation might update _summary_bytes_used
|
||||
// concurrently with this method. So, right now, if we don't
|
||||
// wait for it to complete, updates to _summary_bytes_used might
|
||||
// get lost. This will be resolved in the near future when the
|
||||
// operation of the free region list is revamped as part of
|
||||
// CR 6977804. If we're already at a safepoint, this call is
|
||||
// unnecessary, not to mention wrong.
|
||||
wait_for_cleanup_complete();
|
||||
// If we do a collection pause, this will be reset to a non-NULL
|
||||
// value. If we don't, nulling here ensures that we allocate a new
|
||||
// region below.
|
||||
if (_cur_alloc_region != NULL) {
|
||||
// We're finished with the _cur_alloc_region.
|
||||
// As we're builing (at least the young portion) of the collection
|
||||
// set incrementally we'll add the current allocation region to
|
||||
// the collection set here.
|
||||
if (_cur_alloc_region->is_young()) {
|
||||
g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region);
|
||||
}
|
||||
_summary_bytes_used += _cur_alloc_region->used();
|
||||
_cur_alloc_region = NULL;
|
||||
}
|
||||
assert(_cur_alloc_region == NULL, "Invariant.");
|
||||
// Completion of a heap region is perhaps a good point at which to do
|
||||
// a collection pause.
|
||||
if (permit_collection_pause)
|
||||
do_collection_pause_if_appropriate(word_size);
|
||||
// Make sure we have an allocation region available.
|
||||
if (_cur_alloc_region == NULL) {
|
||||
if (!SafepointSynchronize::is_at_safepoint())
|
||||
wait_for_cleanup_complete();
|
||||
bool next_is_young = should_set_young_locked();
|
||||
// If the next region is not young, make sure it's zero-filled.
|
||||
_cur_alloc_region = newAllocRegion(word_size, !next_is_young);
|
||||
if (_cur_alloc_region != NULL) {
|
||||
_summary_bytes_used -= _cur_alloc_region->used();
|
||||
if (next_is_young) {
|
||||
set_region_short_lived_locked(_cur_alloc_region);
|
||||
allocated_young_region = _cur_alloc_region;
|
||||
}
|
||||
}
|
||||
}
|
||||
assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(),
|
||||
"Prevent a regression of this bug.");
|
||||
|
||||
// Now retry the allocation.
|
||||
if (_cur_alloc_region != NULL) {
|
||||
if (allocated_young_region != NULL) {
|
||||
// We need to ensure that the store to top does not
|
||||
// float above the setting of the young type.
|
||||
HeapRegion* new_cur_alloc_region = newAllocRegion(word_size,
|
||||
false /* zero_filled */);
|
||||
if (new_cur_alloc_region != NULL) {
|
||||
assert(new_cur_alloc_region->is_empty(),
|
||||
"the newly-allocated region should be empty, "
|
||||
"as right now we only allocate new regions out of the free list");
|
||||
g1_policy()->update_region_num(true /* next_is_young */);
|
||||
_summary_bytes_used -= new_cur_alloc_region->used();
|
||||
set_region_short_lived_locked(new_cur_alloc_region);
|
||||
|
||||
assert(!new_cur_alloc_region->isHumongous(),
|
||||
"Catch a regression of this bug.");
|
||||
|
||||
// We need to ensure that the stores to _cur_alloc_region and,
|
||||
// subsequently, to top do not float above the setting of the
|
||||
// young type.
|
||||
OrderAccess::storestore();
|
||||
}
|
||||
res = _cur_alloc_region->allocate(word_size);
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: fails frequently in PRT
|
||||
assert(regions_accounted_for(), "Region leakage!");
|
||||
// Now allocate out of the new current alloc region. We could
|
||||
// have re-used allocate_from_cur_alloc_region() but its
|
||||
// operation is slightly different to what we need here. First,
|
||||
// allocate_from_cur_alloc_region() is only called outside a
|
||||
// safepoint and will always unlock the Heap_lock if it returns
|
||||
// a non-NULL result. Second, it assumes that the current alloc
|
||||
// region is what's already assigned in _cur_alloc_region. What
|
||||
// we want here is to actually do the allocation first before we
|
||||
// assign the new region to _cur_alloc_region. This ordering is
|
||||
// not currently important, but it will be essential when we
|
||||
// change the code to support CAS allocation in the future (see
|
||||
// CR 6994297).
|
||||
//
|
||||
// This allocate method does BOT updates and we don't need them in
|
||||
// the young generation. This will be fixed in the near future by
|
||||
// CR 6994297.
|
||||
HeapWord* result = new_cur_alloc_region->allocate(word_size);
|
||||
assert(result != NULL, "we just allocate out of an empty region "
|
||||
"so allocation should have been successful");
|
||||
assert(is_in(result), "result should be in the heap");
|
||||
|
||||
if (res != NULL) {
|
||||
if (!SafepointSynchronize::is_at_safepoint()) {
|
||||
assert( permit_collection_pause, "invariant" );
|
||||
assert( Heap_lock->owned_by_self(), "invariant" );
|
||||
_cur_alloc_region = new_cur_alloc_region;
|
||||
|
||||
if (!at_safepoint) {
|
||||
Heap_lock->unlock();
|
||||
}
|
||||
|
||||
if (allocated_young_region != NULL) {
|
||||
HeapRegion* hr = allocated_young_region;
|
||||
HeapWord* bottom = hr->bottom();
|
||||
HeapWord* end = hr->end();
|
||||
MemRegion mr(bottom, end);
|
||||
((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
|
||||
// do the dirtying, if necessary, after we release the Heap_lock
|
||||
if (do_dirtying) {
|
||||
dirty_young_block(result, word_size);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
assert( SafepointSynchronize::is_at_safepoint() ||
|
||||
(res == NULL && Heap_lock->owned_by_self()) ||
|
||||
(res != NULL && !Heap_lock->owned_by_self()),
|
||||
"post condition of the call" );
|
||||
assert(_cur_alloc_region == NULL, "we failed to allocate a new current "
|
||||
"alloc region, it should still be NULL");
|
||||
assert_heap_locked_or_at_safepoint();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return res;
|
||||
// See the comment in the .hpp file about the locking protocol and
|
||||
// assumptions of this method (and other related ones).
|
||||
HeapWord*
|
||||
G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
|
||||
assert_heap_locked_and_not_at_safepoint();
|
||||
assert(!isHumongous(word_size), "attempt_allocation_slow() should not be "
|
||||
"used for humongous allocations");
|
||||
|
||||
// We will loop while succeeded is false, which means that we tried
|
||||
// to do a collection, but the VM op did not succeed. So, when we
|
||||
// exit the loop, either one of the allocation attempts was
|
||||
// successful, or we succeeded in doing the VM op but which was
|
||||
// unable to allocate after the collection.
|
||||
for (int try_count = 1; /* we'll return or break */; try_count += 1) {
|
||||
bool succeeded = true;
|
||||
|
||||
{
|
||||
// We may have concurrent cleanup working at the time. Wait for
|
||||
// it to complete. In the future we would probably want to make
|
||||
// the concurrent cleanup truly concurrent by decoupling it from
|
||||
// the allocation. This will happen in the near future as part
|
||||
// of CR 6977804 which will revamp the operation of the free
|
||||
// region list. The fact that wait_for_cleanup_complete() will
|
||||
// do a wait() means that we'll give up the Heap_lock. So, it's
|
||||
// possible that when we exit wait_for_cleanup_complete() we
|
||||
// might be able to allocate successfully (since somebody else
|
||||
// might have done a collection meanwhile). So, we'll attempt to
|
||||
// allocate again, just in case. When we make cleanup truly
|
||||
// concurrent with allocation, we should remove this allocation
|
||||
// attempt as it's redundant (we only reach here after an
|
||||
// allocation attempt has been unsuccessful).
|
||||
wait_for_cleanup_complete();
|
||||
HeapWord* result = attempt_allocation(word_size);
|
||||
if (result != NULL) {
|
||||
assert_heap_not_locked();
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
if (GC_locker::is_active_and_needs_gc()) {
|
||||
// We are locked out of GC because of the GC locker. Right now,
|
||||
// we'll just stall until the GC locker-induced GC
|
||||
// completes. This will be fixed in the near future by extending
|
||||
// the eden while waiting for the GC locker to schedule the GC
|
||||
// (see CR 6994056).
|
||||
|
||||
// If this thread is not in a jni critical section, we stall
|
||||
// the requestor until the critical section has cleared and
|
||||
// GC allowed. When the critical section clears, a GC is
|
||||
// initiated by the last thread exiting the critical section; so
|
||||
// we retry the allocation sequence from the beginning of the loop,
|
||||
// rather than causing more, now probably unnecessary, GC attempts.
|
||||
JavaThread* jthr = JavaThread::current();
|
||||
assert(jthr != NULL, "sanity");
|
||||
if (!jthr->in_critical()) {
|
||||
MutexUnlocker mul(Heap_lock);
|
||||
GC_locker::stall_until_clear();
|
||||
|
||||
// We'll then fall off the end of the ("if GC locker active")
|
||||
// if-statement and retry the allocation further down in the
|
||||
// loop.
|
||||
} else {
|
||||
if (CheckJNICalls) {
|
||||
fatal("Possible deadlock due to allocating while"
|
||||
" in jni critical section");
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
// We are not locked out. So, let's try to do a GC. The VM op
|
||||
// will retry the allocation before it completes.
|
||||
|
||||
// Read the GC count while holding the Heap_lock
|
||||
unsigned int gc_count_before = SharedHeap::heap()->total_collections();
|
||||
|
||||
Heap_lock->unlock();
|
||||
|
||||
HeapWord* result =
|
||||
do_collection_pause(word_size, gc_count_before, &succeeded);
|
||||
assert_heap_not_locked();
|
||||
if (result != NULL) {
|
||||
assert(succeeded, "the VM op should have succeeded");
|
||||
|
||||
// Allocations that take place on VM operations do not do any
|
||||
// card dirtying and we have to do it here.
|
||||
dirty_young_block(result, word_size);
|
||||
return result;
|
||||
}
|
||||
|
||||
Heap_lock->lock();
|
||||
}
|
||||
|
||||
assert_heap_locked();
|
||||
|
||||
// We can reach here when we were unsuccessful in doing a GC,
|
||||
// because another thread beat us to it, or because we were locked
|
||||
// out of GC due to the GC locker. In either case a new alloc
|
||||
// region might be available so we will retry the allocation.
|
||||
HeapWord* result = attempt_allocation(word_size);
|
||||
if (result != NULL) {
|
||||
assert_heap_not_locked();
|
||||
return result;
|
||||
}
|
||||
|
||||
// So far our attempts to allocate failed. The only time we'll go
|
||||
// around the loop and try again is if we tried to do a GC and the
|
||||
// VM op that we tried to schedule was not successful because
|
||||
// another thread beat us to it. If that happened it's possible
|
||||
// that by the time we grabbed the Heap_lock again and tried to
|
||||
// allocate other threads filled up the young generation, which
|
||||
// means that the allocation attempt after the GC also failed. So,
|
||||
// it's worth trying to schedule another GC pause.
|
||||
if (succeeded) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Give a warning if we seem to be looping forever.
|
||||
if ((QueuedAllocationWarningCount > 0) &&
|
||||
(try_count % QueuedAllocationWarningCount == 0)) {
|
||||
warning("G1CollectedHeap::attempt_allocation_slow() "
|
||||
"retries %d times", try_count);
|
||||
}
|
||||
}
|
||||
|
||||
assert_heap_locked();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// See the comment in the .hpp file about the locking protocol and
|
||||
// assumptions of this method (and other related ones).
|
||||
HeapWord*
|
||||
G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||
bool at_safepoint) {
|
||||
// This is the method that will allocate a humongous object. All
|
||||
// allocation paths that attempt to allocate a humongous object
|
||||
// should eventually reach here. Currently, the only paths are from
|
||||
// mem_allocate() and attempt_allocation_at_safepoint().
|
||||
assert_heap_locked_or_at_safepoint();
|
||||
assert(isHumongous(word_size), "attempt_allocation_humongous() "
|
||||
"should only be used for humongous allocations");
|
||||
assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
|
||||
"at_safepoint and is_at_safepoint() should be a tautology");
|
||||
|
||||
HeapWord* result = NULL;
|
||||
|
||||
// We will loop while succeeded is false, which means that we tried
|
||||
// to do a collection, but the VM op did not succeed. So, when we
|
||||
// exit the loop, either one of the allocation attempts was
|
||||
// successful, or we succeeded in doing the VM op but which was
|
||||
// unable to allocate after the collection.
|
||||
for (int try_count = 1; /* we'll return or break */; try_count += 1) {
|
||||
bool succeeded = true;
|
||||
|
||||
// Given that humongous objects are not allocated in young
|
||||
// regions, we'll first try to do the allocation without doing a
|
||||
// collection hoping that there's enough space in the heap.
|
||||
result = humongous_obj_allocate(word_size);
|
||||
assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(),
|
||||
"catch a regression of this bug.");
|
||||
if (result != NULL) {
|
||||
if (!at_safepoint) {
|
||||
// If we're not at a safepoint, unlock the Heap_lock.
|
||||
Heap_lock->unlock();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// If we failed to allocate the humongous object, we should try to
|
||||
// do a collection pause (if we're allowed) in case it reclaims
|
||||
// enough space for the allocation to succeed after the pause.
|
||||
if (!at_safepoint) {
|
||||
// Read the GC count while holding the Heap_lock
|
||||
unsigned int gc_count_before = SharedHeap::heap()->total_collections();
|
||||
|
||||
// If we're allowed to do a collection we're not at a
|
||||
// safepoint, so it is safe to unlock the Heap_lock.
|
||||
Heap_lock->unlock();
|
||||
|
||||
result = do_collection_pause(word_size, gc_count_before, &succeeded);
|
||||
assert_heap_not_locked();
|
||||
if (result != NULL) {
|
||||
assert(succeeded, "the VM op should have succeeded");
|
||||
return result;
|
||||
}
|
||||
|
||||
// If we get here, the VM operation either did not succeed
|
||||
// (i.e., another thread beat us to it) or it succeeded but
|
||||
// failed to allocate the object.
|
||||
|
||||
// If we're allowed to do a collection we're not at a
|
||||
// safepoint, so it is safe to lock the Heap_lock.
|
||||
Heap_lock->lock();
|
||||
}
|
||||
|
||||
assert(result == NULL, "otherwise we should have exited the loop earlier");
|
||||
|
||||
// So far our attempts to allocate failed. The only time we'll go
|
||||
// around the loop and try again is if we tried to do a GC and the
|
||||
// VM op that we tried to schedule was not successful because
|
||||
// another thread beat us to it. That way it's possible that some
|
||||
// space was freed up by the thread that successfully scheduled a
|
||||
// GC. So it's worth trying to allocate again.
|
||||
if (succeeded) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Give a warning if we seem to be looping forever.
|
||||
if ((QueuedAllocationWarningCount > 0) &&
|
||||
(try_count % QueuedAllocationWarningCount == 0)) {
|
||||
warning("G1CollectedHeap::attempt_allocation_humongous "
|
||||
"retries %d times", try_count);
|
||||
}
|
||||
}
|
||||
|
||||
assert_heap_locked_or_at_safepoint();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
|
||||
bool expect_null_cur_alloc_region) {
|
||||
assert_at_safepoint();
|
||||
assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region,
|
||||
err_msg("the current alloc region was unexpectedly found "
|
||||
"to be non-NULL, cur alloc region: "PTR_FORMAT" "
|
||||
"expect_null_cur_alloc_region: %d word_size: "SIZE_FORMAT,
|
||||
_cur_alloc_region, expect_null_cur_alloc_region, word_size));
|
||||
|
||||
if (!isHumongous(word_size)) {
|
||||
if (!expect_null_cur_alloc_region) {
|
||||
HeapRegion* cur_alloc_region = _cur_alloc_region;
|
||||
if (cur_alloc_region != NULL) {
|
||||
// This allocate method does BOT updates and we don't need them in
|
||||
// the young generation. This will be fixed in the near future by
|
||||
// CR 6994297.
|
||||
HeapWord* result = cur_alloc_region->allocate(word_size);
|
||||
if (result != NULL) {
|
||||
assert(is_in(result), "result should be in the heap");
|
||||
|
||||
// We will not do any dirtying here. This is guaranteed to be
|
||||
// called during a safepoint and the thread that scheduled the
|
||||
// pause will do the dirtying if we return a non-NULL result.
|
||||
return result;
|
||||
}
|
||||
|
||||
retire_cur_alloc_region_common(cur_alloc_region);
|
||||
}
|
||||
}
|
||||
|
||||
assert(_cur_alloc_region == NULL,
|
||||
"at this point we should have no cur alloc region");
|
||||
return replace_cur_alloc_region_and_allocate(word_size,
|
||||
true, /* at_safepoint */
|
||||
false /* do_dirtying */);
|
||||
} else {
|
||||
return attempt_allocation_humongous(word_size,
|
||||
true /* at_safepoint */);
|
||||
}
|
||||
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
|
||||
assert_heap_not_locked_and_not_at_safepoint();
|
||||
assert(!isHumongous(word_size), "we do not allow TLABs of humongous size");
|
||||
|
||||
Heap_lock->lock();
|
||||
|
||||
// First attempt: try allocating out of the current alloc region or
|
||||
// after replacing the current alloc region.
|
||||
HeapWord* result = attempt_allocation(word_size);
|
||||
if (result != NULL) {
|
||||
assert_heap_not_locked();
|
||||
return result;
|
||||
}
|
||||
|
||||
assert_heap_locked();
|
||||
|
||||
// Second attempt: go into the even slower path where we might
|
||||
// try to schedule a collection.
|
||||
result = attempt_allocation_slow(word_size);
|
||||
if (result != NULL) {
|
||||
assert_heap_not_locked();
|
||||
return result;
|
||||
}
|
||||
|
||||
assert_heap_locked();
|
||||
Heap_lock->unlock();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
HeapWord*
|
||||
|
@ -705,46 +993,82 @@ G1CollectedHeap::mem_allocate(size_t word_size,
|
|||
bool is_noref,
|
||||
bool is_tlab,
|
||||
bool* gc_overhead_limit_was_exceeded) {
|
||||
debug_only(check_for_valid_allocation_state());
|
||||
assert(no_gc_in_progress(), "Allocation during gc not allowed");
|
||||
HeapWord* result = NULL;
|
||||
assert_heap_not_locked_and_not_at_safepoint();
|
||||
assert(!is_tlab, "mem_allocate() this should not be called directly "
|
||||
"to allocate TLABs");
|
||||
|
||||
// Loop until the allocation is satisified,
|
||||
// or unsatisfied after GC.
|
||||
for (int try_count = 1; /* return or throw */; try_count += 1) {
|
||||
int gc_count_before;
|
||||
for (int try_count = 1; /* we'll return */; try_count += 1) {
|
||||
unsigned int gc_count_before;
|
||||
{
|
||||
Heap_lock->lock();
|
||||
result = attempt_allocation(word_size);
|
||||
|
||||
if (!isHumongous(word_size)) {
|
||||
// First attempt: try allocating out of the current alloc
|
||||
// region or after replacing the current alloc region.
|
||||
HeapWord* result = attempt_allocation(word_size);
|
||||
if (result != NULL) {
|
||||
// attempt_allocation should have unlocked the heap lock
|
||||
assert(is_in(result), "result not in heap");
|
||||
assert_heap_not_locked();
|
||||
return result;
|
||||
}
|
||||
|
||||
assert_heap_locked();
|
||||
|
||||
// Second attempt: go into the even slower path where we might
|
||||
// try to schedule a collection.
|
||||
result = attempt_allocation_slow(word_size);
|
||||
if (result != NULL) {
|
||||
assert_heap_not_locked();
|
||||
return result;
|
||||
}
|
||||
} else {
|
||||
HeapWord* result = attempt_allocation_humongous(word_size,
|
||||
false /* at_safepoint */);
|
||||
if (result != NULL) {
|
||||
assert_heap_not_locked();
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
assert_heap_locked();
|
||||
// Read the gc count while the heap lock is held.
|
||||
gc_count_before = SharedHeap::heap()->total_collections();
|
||||
// We cannot be at a safepoint, so it is safe to unlock the Heap_lock
|
||||
Heap_lock->unlock();
|
||||
}
|
||||
|
||||
// Create the garbage collection operation...
|
||||
VM_G1CollectForAllocation op(word_size,
|
||||
gc_count_before);
|
||||
|
||||
VM_G1CollectForAllocation op(gc_count_before, word_size);
|
||||
// ...and get the VM thread to execute it.
|
||||
VMThread::execute(&op);
|
||||
if (op.prologue_succeeded()) {
|
||||
result = op.result();
|
||||
assert(result == NULL || is_in(result), "result not in heap");
|
||||
|
||||
assert_heap_not_locked();
|
||||
if (op.prologue_succeeded() && op.pause_succeeded()) {
|
||||
// If the operation was successful we'll return the result even
|
||||
// if it is NULL. If the allocation attempt failed immediately
|
||||
// after a Full GC, it's unlikely we'll be able to allocate now.
|
||||
HeapWord* result = op.result();
|
||||
if (result != NULL && !isHumongous(word_size)) {
|
||||
// Allocations that take place on VM operations do not do any
|
||||
// card dirtying and we have to do it here. We only have to do
|
||||
// this for non-humongous allocations, though.
|
||||
dirty_young_block(result, word_size);
|
||||
}
|
||||
return result;
|
||||
} else {
|
||||
assert(op.result() == NULL,
|
||||
"the result should be NULL if the VM op did not succeed");
|
||||
}
|
||||
|
||||
// Give a warning if we seem to be looping forever.
|
||||
if ((QueuedAllocationWarningCount > 0) &&
|
||||
(try_count % QueuedAllocationWarningCount == 0)) {
|
||||
warning("G1CollectedHeap::mem_allocate_work retries %d times",
|
||||
try_count);
|
||||
warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
|
||||
}
|
||||
}
|
||||
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
void G1CollectedHeap::abandon_cur_alloc_region() {
|
||||
|
@ -841,11 +1165,11 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
void G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
bool clear_all_soft_refs,
|
||||
size_t word_size) {
|
||||
if (GC_locker::check_active_before_gc()) {
|
||||
return; // GC is disabled (e.g. JNI GetXXXCritical operation)
|
||||
return false;
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
|
@ -929,6 +1253,9 @@ void G1CollectedHeap::do_collection(bool explicit_gc,
|
|||
g1_policy()->set_full_young_gcs(true);
|
||||
}
|
||||
|
||||
// See the comment in G1CollectedHeap::ref_processing_init() about
|
||||
// how reference processing currently works in G1.
|
||||
|
||||
// Temporarily make reference _discovery_ single threaded (non-MT).
|
||||
ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false);
|
||||
|
||||
|
@ -1047,10 +1374,17 @@ void G1CollectedHeap::do_collection(bool explicit_gc,
|
|||
if (PrintHeapAtGC) {
|
||||
Universe::print_heap_after_gc();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
|
||||
do_collection(true, /* explicit_gc */
|
||||
// do_collection() will return whether it succeeded in performing
|
||||
// the GC. Currently, there is no facility on the
|
||||
// do_full_collection() API to notify the caller than the collection
|
||||
// did not succeed (e.g., because it was locked out by the GC
|
||||
// locker). So, right now, we'll ignore the return value.
|
||||
bool dummy = do_collection(true, /* explicit_gc */
|
||||
clear_all_soft_refs,
|
||||
0 /* word_size */);
|
||||
}
|
||||
|
@ -1175,36 +1509,63 @@ resize_if_necessary_after_full_collection(size_t word_size) {
|
|||
|
||||
|
||||
HeapWord*
|
||||
G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
|
||||
HeapWord* result = NULL;
|
||||
G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
||||
bool* succeeded) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(),
|
||||
"satisfy_failed_allocation() should only be called at a safepoint");
|
||||
assert(Thread::current()->is_VM_thread(),
|
||||
"satisfy_failed_allocation() should only be called by the VM thread");
|
||||
|
||||
*succeeded = true;
|
||||
// Let's attempt the allocation first.
|
||||
HeapWord* result = attempt_allocation_at_safepoint(word_size,
|
||||
false /* expect_null_cur_alloc_region */);
|
||||
if (result != NULL) {
|
||||
assert(*succeeded, "sanity");
|
||||
return result;
|
||||
}
|
||||
|
||||
// In a G1 heap, we're supposed to keep allocation from failing by
|
||||
// incremental pauses. Therefore, at least for now, we'll favor
|
||||
// expansion over collection. (This might change in the future if we can
|
||||
// do something smarter than full collection to satisfy a failed alloc.)
|
||||
|
||||
result = expand_and_allocate(word_size);
|
||||
if (result != NULL) {
|
||||
assert(is_in(result), "result not in heap");
|
||||
assert(*succeeded, "sanity");
|
||||
return result;
|
||||
}
|
||||
|
||||
// OK, I guess we have to try collection.
|
||||
|
||||
do_collection(false, false, word_size);
|
||||
|
||||
result = attempt_allocation(word_size, /*permit_collection_pause*/false);
|
||||
// Expansion didn't work, we'll try to do a Full GC.
|
||||
bool gc_succeeded = do_collection(false, /* explicit_gc */
|
||||
false, /* clear_all_soft_refs */
|
||||
word_size);
|
||||
if (!gc_succeeded) {
|
||||
*succeeded = false;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Retry the allocation
|
||||
result = attempt_allocation_at_safepoint(word_size,
|
||||
true /* expect_null_cur_alloc_region */);
|
||||
if (result != NULL) {
|
||||
assert(is_in(result), "result not in heap");
|
||||
assert(*succeeded, "sanity");
|
||||
return result;
|
||||
}
|
||||
|
||||
// Try collecting soft references.
|
||||
do_collection(false, true, word_size);
|
||||
result = attempt_allocation(word_size, /*permit_collection_pause*/false);
|
||||
// Then, try a Full GC that will collect all soft references.
|
||||
gc_succeeded = do_collection(false, /* explicit_gc */
|
||||
true, /* clear_all_soft_refs */
|
||||
word_size);
|
||||
if (!gc_succeeded) {
|
||||
*succeeded = false;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Retry the allocation once more
|
||||
result = attempt_allocation_at_safepoint(word_size,
|
||||
true /* expect_null_cur_alloc_region */);
|
||||
if (result != NULL) {
|
||||
assert(is_in(result), "result not in heap");
|
||||
assert(*succeeded, "sanity");
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -1215,6 +1576,7 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
|
|||
// space available is large enough for the allocation, then a more
|
||||
// complete compaction phase than we've tried so far might be
|
||||
// appropriate.
|
||||
assert(*succeeded, "sanity");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1224,14 +1586,20 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
|
|||
// allocated block, or else "NULL".
|
||||
|
||||
HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(),
|
||||
"expand_and_allocate() should only be called at a safepoint");
|
||||
assert(Thread::current()->is_VM_thread(),
|
||||
"expand_and_allocate() should only be called by the VM thread");
|
||||
|
||||
size_t expand_bytes = word_size * HeapWordSize;
|
||||
if (expand_bytes < MinHeapDeltaBytes) {
|
||||
expand_bytes = MinHeapDeltaBytes;
|
||||
}
|
||||
expand(expand_bytes);
|
||||
assert(regions_accounted_for(), "Region leakage!");
|
||||
HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */);
|
||||
return result;
|
||||
|
||||
return attempt_allocation_at_safepoint(word_size,
|
||||
false /* expect_null_cur_alloc_region */);
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) {
|
||||
|
@ -1649,6 +2017,24 @@ jint G1CollectedHeap::initialize() {
|
|||
}
|
||||
|
||||
void G1CollectedHeap::ref_processing_init() {
|
||||
// Reference processing in G1 currently works as follows:
|
||||
//
|
||||
// * There is only one reference processor instance that
|
||||
// 'spans' the entire heap. It is created by the code
|
||||
// below.
|
||||
// * Reference discovery is not enabled during an incremental
|
||||
// pause (see 6484982).
|
||||
// * Discoverered refs are not enqueued nor are they processed
|
||||
// during an incremental pause (see 6484982).
|
||||
// * Reference discovery is enabled at initial marking.
|
||||
// * Reference discovery is disabled and the discovered
|
||||
// references processed etc during remarking.
|
||||
// * Reference discovery is MT (see below).
|
||||
// * Reference discovery requires a barrier (see below).
|
||||
// * Reference processing is currently not MT (see 6608385).
|
||||
// * A full GC enables (non-MT) reference discovery and
|
||||
// processes any discovered references.
|
||||
|
||||
SharedHeap::ref_processing_init();
|
||||
MemRegion mr = reserved_region();
|
||||
_ref_processor = ReferenceProcessor::create_ref_processor(
|
||||
|
@ -1842,20 +2228,24 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
|
|||
unsigned int full_gc_count_before;
|
||||
{
|
||||
MutexLocker ml(Heap_lock);
|
||||
|
||||
// Don't want to do a GC until cleanup is completed. This
|
||||
// limitation will be removed in the near future when the
|
||||
// operation of the free region list is revamped as part of
|
||||
// CR 6977804.
|
||||
wait_for_cleanup_complete();
|
||||
|
||||
// Read the GC count while holding the Heap_lock
|
||||
gc_count_before = SharedHeap::heap()->total_collections();
|
||||
full_gc_count_before = SharedHeap::heap()->total_full_collections();
|
||||
|
||||
// Don't want to do a GC until cleanup is completed.
|
||||
wait_for_cleanup_complete();
|
||||
|
||||
// We give up heap lock; VMThread::execute gets it back below
|
||||
}
|
||||
|
||||
if (should_do_concurrent_full_gc(cause)) {
|
||||
// Schedule an initial-mark evacuation pause that will start a
|
||||
// concurrent cycle.
|
||||
// concurrent cycle. We're setting word_size to 0 which means that
|
||||
// we are not requesting a post-GC allocation.
|
||||
VM_G1IncCollectionPause op(gc_count_before,
|
||||
0, /* word_size */
|
||||
true, /* should_initiate_conc_mark */
|
||||
g1_policy()->max_pause_time_ms(),
|
||||
cause);
|
||||
|
@ -1864,8 +2254,10 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
|
|||
if (cause == GCCause::_gc_locker
|
||||
DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
|
||||
|
||||
// Schedule a standard evacuation pause.
|
||||
// Schedule a standard evacuation pause. We're setting word_size
|
||||
// to 0 which means that we are not requesting a post-GC allocation.
|
||||
VM_G1IncCollectionPause op(gc_count_before,
|
||||
0, /* word_size */
|
||||
false, /* should_initiate_conc_mark */
|
||||
g1_policy()->max_pause_time_ms(),
|
||||
cause);
|
||||
|
@ -2221,14 +2613,6 @@ size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
|
|||
}
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
|
||||
assert(!isHumongous(word_size),
|
||||
err_msg("a TLAB should not be of humongous size, "
|
||||
"word_size = "SIZE_FORMAT, word_size));
|
||||
bool dummy;
|
||||
return G1CollectedHeap::mem_allocate(word_size, false, true, &dummy);
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::allocs_are_zero_filled() {
|
||||
return false;
|
||||
}
|
||||
|
@ -2633,27 +3017,26 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
|
|||
// always_do_update_barrier = true;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::do_collection_pause() {
|
||||
assert(Heap_lock->owned_by_self(), "we assume we'reholding the Heap_lock");
|
||||
|
||||
// Read the GC count while holding the Heap_lock
|
||||
// we need to do this _before_ wait_for_cleanup_complete(), to
|
||||
// ensure that we do not give up the heap lock and potentially
|
||||
// pick up the wrong count
|
||||
unsigned int gc_count_before = SharedHeap::heap()->total_collections();
|
||||
|
||||
// Don't want to do a GC pause while cleanup is being completed!
|
||||
wait_for_cleanup_complete();
|
||||
|
||||
HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
|
||||
unsigned int gc_count_before,
|
||||
bool* succeeded) {
|
||||
assert_heap_not_locked_and_not_at_safepoint();
|
||||
g1_policy()->record_stop_world_start();
|
||||
{
|
||||
MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
|
||||
VM_G1IncCollectionPause op(gc_count_before,
|
||||
word_size,
|
||||
false, /* should_initiate_conc_mark */
|
||||
g1_policy()->max_pause_time_ms(),
|
||||
GCCause::_g1_inc_collection_pause);
|
||||
VMThread::execute(&op);
|
||||
}
|
||||
|
||||
HeapWord* result = op.result();
|
||||
bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
|
||||
assert(result == NULL || ret_succeeded,
|
||||
"the result should be NULL if the VM did not succeed");
|
||||
*succeeded = ret_succeeded;
|
||||
|
||||
assert_heap_not_locked();
|
||||
return result;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -2797,10 +3180,10 @@ void G1CollectedHeap::reset_taskqueue_stats() {
|
|||
}
|
||||
#endif // TASKQUEUE_STATS
|
||||
|
||||
void
|
||||
bool
|
||||
G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
if (GC_locker::check_active_before_gc()) {
|
||||
return; // GC is disabled (e.g. JNI GetXXXCritical operation)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (PrintHeapAtGC) {
|
||||
|
@ -2871,6 +3254,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
|||
|
||||
COMPILER2_PRESENT(DerivedPointerTable::clear());
|
||||
|
||||
// Please see comment in G1CollectedHeap::ref_processing_init()
|
||||
// to see how reference processing currently works in G1.
|
||||
//
|
||||
// We want to turn off ref discovery, if necessary, and turn it back on
|
||||
// on again later if we do. XXX Dubious: why is discovery disabled?
|
||||
bool was_enabled = ref_processor()->discovery_enabled();
|
||||
|
@ -3068,6 +3454,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
|||
(total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
|
||||
g1_rem_set()->print_summary_info();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
|
||||
|
@ -3298,6 +3686,7 @@ void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
|
|||
// untag the GC alloc regions and tear down the GC alloc region
|
||||
// list. It's desirable that no regions are tagged as GC alloc
|
||||
// outside GCs.
|
||||
|
||||
forget_alloc_region_list();
|
||||
|
||||
// The current alloc regions contain objs that have survived
|
||||
|
@ -3361,19 +3750,6 @@ void G1CollectedHeap::finalize_for_evac_failure() {
|
|||
|
||||
// *** Sequential G1 Evacuation
|
||||
|
||||
HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) {
|
||||
HeapRegion* alloc_region = _gc_alloc_regions[purpose];
|
||||
// let the caller handle alloc failure
|
||||
if (alloc_region == NULL) return NULL;
|
||||
assert(isHumongous(word_size) || !alloc_region->isHumongous(),
|
||||
"Either the object is humongous or the region isn't");
|
||||
HeapWord* block = alloc_region->allocate(word_size);
|
||||
if (block == NULL) {
|
||||
block = allocate_during_gc_slow(purpose, alloc_region, false, word_size);
|
||||
}
|
||||
return block;
|
||||
}
|
||||
|
||||
class G1IsAliveClosure: public BoolObjectClosure {
|
||||
G1CollectedHeap* _g1;
|
||||
public:
|
||||
|
@ -4316,6 +4692,10 @@ g1_process_strong_roots(bool collecting_perm_gen,
|
|||
}
|
||||
// Finish with the ref_processor roots.
|
||||
if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
|
||||
// We need to treat the discovered reference lists as roots and
|
||||
// keep entries (which are added by the marking threads) on them
|
||||
// live until they can be processed at the end of marking.
|
||||
ref_processor()->weak_oops_do(scan_non_heap_roots);
|
||||
ref_processor()->oops_do(scan_non_heap_roots);
|
||||
}
|
||||
g1_policy()->record_collection_pause_end_G1_strong_roots();
|
||||
|
@ -4381,6 +4761,11 @@ void G1CollectedHeap::evacuate_collection_set() {
|
|||
// on individual heap regions when we allocate from
|
||||
// them in parallel, so this seems like the correct place for this.
|
||||
retire_all_alloc_regions();
|
||||
|
||||
// Weak root processing.
|
||||
// Note: when JSR 292 is enabled and code blobs can contain
|
||||
// non-perm oops then we will need to process the code blobs
|
||||
// here too.
|
||||
{
|
||||
G1IsAliveClosure is_alive(this);
|
||||
G1KeepAliveClosure keep_alive(this);
|
||||
|
@ -4625,12 +5010,6 @@ void G1CollectedHeap::cleanUpCardTable() {
|
|||
#endif
|
||||
}
|
||||
|
||||
void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) {
|
||||
if (g1_policy()->should_do_collection_pause(word_size)) {
|
||||
do_collection_pause();
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
|
||||
double young_time_ms = 0.0;
|
||||
double non_young_time_ms = 0.0;
|
||||
|
@ -4789,6 +5168,7 @@ void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) {
|
|||
}
|
||||
|
||||
void G1CollectedHeap::wait_for_cleanup_complete() {
|
||||
assert_not_at_safepoint();
|
||||
MutexLockerEx x(Cleanup_mon);
|
||||
wait_for_cleanup_complete_locked();
|
||||
}
|
||||
|
@ -5093,13 +5473,6 @@ size_t G1CollectedHeap::count_free_regions_list() {
|
|||
return n + m;
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::should_set_young_locked() {
|
||||
assert(heap_lock_held_for_gc(),
|
||||
"the heap lock should already be held by or for this thread");
|
||||
return (g1_policy()->in_young_gc_mode() &&
|
||||
g1_policy()->should_add_next_region_to_young_list());
|
||||
}
|
||||
|
||||
void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
|
||||
assert(heap_lock_held_for_gc(),
|
||||
"the heap lock should already be held by or for this thread");
|
||||
|
|
|
@ -290,6 +290,63 @@ private:
|
|||
// started is maintained in _total_full_collections in CollectedHeap.
|
||||
volatile unsigned int _full_collections_completed;
|
||||
|
||||
// These are macros so that, if the assert fires, we get the correct
|
||||
// line number, file, etc.
|
||||
|
||||
#define heap_locking_asserts_err_msg(__extra_message) \
|
||||
err_msg("%s : Heap_lock %slocked, %sat a safepoint", \
|
||||
(__extra_message), \
|
||||
(!Heap_lock->owned_by_self()) ? "NOT " : "", \
|
||||
(!SafepointSynchronize::is_at_safepoint()) ? "NOT " : "")
|
||||
|
||||
#define assert_heap_locked() \
|
||||
do { \
|
||||
assert(Heap_lock->owned_by_self(), \
|
||||
heap_locking_asserts_err_msg("should be holding the Heap_lock")); \
|
||||
} while (0)
|
||||
|
||||
#define assert_heap_locked_or_at_safepoint() \
|
||||
do { \
|
||||
assert(Heap_lock->owned_by_self() || \
|
||||
SafepointSynchronize::is_at_safepoint(), \
|
||||
heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
|
||||
"should be at a safepoint")); \
|
||||
} while (0)
|
||||
|
||||
#define assert_heap_locked_and_not_at_safepoint() \
|
||||
do { \
|
||||
assert(Heap_lock->owned_by_self() && \
|
||||
!SafepointSynchronize::is_at_safepoint(), \
|
||||
heap_locking_asserts_err_msg("should be holding the Heap_lock and " \
|
||||
"should not be at a safepoint")); \
|
||||
} while (0)
|
||||
|
||||
#define assert_heap_not_locked() \
|
||||
do { \
|
||||
assert(!Heap_lock->owned_by_self(), \
|
||||
heap_locking_asserts_err_msg("should not be holding the Heap_lock")); \
|
||||
} while (0)
|
||||
|
||||
#define assert_heap_not_locked_and_not_at_safepoint() \
|
||||
do { \
|
||||
assert(!Heap_lock->owned_by_self() && \
|
||||
!SafepointSynchronize::is_at_safepoint(), \
|
||||
heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \
|
||||
"should not be at a safepoint")); \
|
||||
} while (0)
|
||||
|
||||
#define assert_at_safepoint() \
|
||||
do { \
|
||||
assert(SafepointSynchronize::is_at_safepoint(), \
|
||||
heap_locking_asserts_err_msg("should be at a safepoint")); \
|
||||
} while (0)
|
||||
|
||||
#define assert_not_at_safepoint() \
|
||||
do { \
|
||||
assert(!SafepointSynchronize::is_at_safepoint(), \
|
||||
heap_locking_asserts_err_msg("should not be at a safepoint")); \
|
||||
} while (0)
|
||||
|
||||
protected:
|
||||
|
||||
// Returns "true" iff none of the gc alloc regions have any allocations
|
||||
|
@ -329,31 +386,162 @@ protected:
|
|||
|
||||
// Attempt to allocate an object of the given (very large) "word_size".
|
||||
// Returns "NULL" on failure.
|
||||
virtual HeapWord* humongousObjAllocate(size_t word_size);
|
||||
virtual HeapWord* humongous_obj_allocate(size_t word_size);
|
||||
|
||||
// If possible, allocate a block of the given word_size, else return "NULL".
|
||||
// Returning NULL will trigger GC or heap expansion.
|
||||
// These two methods have rather awkward pre- and
|
||||
// post-conditions. If they are called outside a safepoint, then
|
||||
// they assume that the caller is holding the heap lock. Upon return
|
||||
// they release the heap lock, if they are returning a non-NULL
|
||||
// value. attempt_allocation_slow() also dirties the cards of a
|
||||
// newly-allocated young region after it releases the heap
|
||||
// lock. This change in interface was the neatest way to achieve
|
||||
// this card dirtying without affecting mem_allocate(), which is a
|
||||
// more frequently called method. We tried two or three different
|
||||
// approaches, but they were even more hacky.
|
||||
HeapWord* attempt_allocation(size_t word_size,
|
||||
bool permit_collection_pause = true);
|
||||
// The following two methods, allocate_new_tlab() and
|
||||
// mem_allocate(), are the two main entry points from the runtime
|
||||
// into the G1's allocation routines. They have the following
|
||||
// assumptions:
|
||||
//
|
||||
// * They should both be called outside safepoints.
|
||||
//
|
||||
// * They should both be called without holding the Heap_lock.
|
||||
//
|
||||
// * All allocation requests for new TLABs should go to
|
||||
// allocate_new_tlab().
|
||||
//
|
||||
// * All non-TLAB allocation requests should go to mem_allocate()
|
||||
// and mem_allocate() should never be called with is_tlab == true.
|
||||
//
|
||||
// * If the GC locker is active we currently stall until we can
|
||||
// allocate a new young region. This will be changed in the
|
||||
// near future (see CR 6994056).
|
||||
//
|
||||
// * If either call cannot satisfy the allocation request using the
|
||||
// current allocating region, they will try to get a new one. If
|
||||
// this fails, they will attempt to do an evacuation pause and
|
||||
// retry the allocation.
|
||||
//
|
||||
// * If all allocation attempts fail, even after trying to schedule
|
||||
// an evacuation pause, allocate_new_tlab() will return NULL,
|
||||
// whereas mem_allocate() will attempt a heap expansion and/or
|
||||
// schedule a Full GC.
|
||||
//
|
||||
// * We do not allow humongous-sized TLABs. So, allocate_new_tlab
|
||||
// should never be called with word_size being humongous. All
|
||||
// humongous allocation requests should go to mem_allocate() which
|
||||
// will satisfy them with a special path.
|
||||
|
||||
HeapWord* attempt_allocation_slow(size_t word_size,
|
||||
bool permit_collection_pause = true);
|
||||
virtual HeapWord* allocate_new_tlab(size_t word_size);
|
||||
|
||||
virtual HeapWord* mem_allocate(size_t word_size,
|
||||
bool is_noref,
|
||||
bool is_tlab, /* expected to be false */
|
||||
bool* gc_overhead_limit_was_exceeded);
|
||||
|
||||
// The following methods, allocate_from_cur_allocation_region(),
|
||||
// attempt_allocation(), replace_cur_alloc_region_and_allocate(),
|
||||
// attempt_allocation_slow(), and attempt_allocation_humongous()
|
||||
// have very awkward pre- and post-conditions with respect to
|
||||
// locking:
|
||||
//
|
||||
// If they are called outside a safepoint they assume the caller
|
||||
// holds the Heap_lock when it calls them. However, on exit they
|
||||
// will release the Heap_lock if they return a non-NULL result, but
|
||||
// keep holding the Heap_lock if they return a NULL result. The
|
||||
// reason for this is that we need to dirty the cards that span
|
||||
// allocated blocks on young regions to avoid having to take the
|
||||
// slow path of the write barrier (for performance reasons we don't
|
||||
// update RSets for references whose source is a young region, so we
|
||||
// don't need to look at dirty cards on young regions). But, doing
|
||||
// this card dirtying while holding the Heap_lock can be a
|
||||
// scalability bottleneck, especially given that some allocation
|
||||
// requests might be of non-trivial size (and the larger the region
|
||||
// size is, the fewer allocations requests will be considered
|
||||
// humongous, as the humongous size limit is a fraction of the
|
||||
// region size). So, when one of these calls succeeds in allocating
|
||||
// a block it does the card dirtying after it releases the Heap_lock
|
||||
// which is why it will return without holding it.
|
||||
//
|
||||
// The above assymetry is the reason why locking / unlocking is done
|
||||
// explicitly (i.e., with Heap_lock->lock() and
|
||||
// Heap_lock->unlocked()) instead of using MutexLocker and
|
||||
// MutexUnlocker objects. The latter would ensure that the lock is
|
||||
// unlocked / re-locked at every possible exit out of the basic
|
||||
// block. However, we only want that action to happen in selected
|
||||
// places.
|
||||
//
|
||||
// Further, if the above methods are called during a safepoint, then
|
||||
// naturally there's no assumption about the Heap_lock being held or
|
||||
// there's no attempt to unlock it. The parameter at_safepoint
|
||||
// indicates whether the call is made during a safepoint or not (as
|
||||
// an optimization, to avoid reading the global flag with
|
||||
// SafepointSynchronize::is_at_safepoint()).
|
||||
//
|
||||
// The methods share these parameters:
|
||||
//
|
||||
// * word_size : the size of the allocation request in words
|
||||
// * at_safepoint : whether the call is done at a safepoint; this
|
||||
// also determines whether a GC is permitted
|
||||
// (at_safepoint == false) or not (at_safepoint == true)
|
||||
// * do_dirtying : whether the method should dirty the allocated
|
||||
// block before returning
|
||||
//
|
||||
// They all return either the address of the block, if they
|
||||
// successfully manage to allocate it, or NULL.
|
||||
|
||||
// It tries to satisfy an allocation request out of the current
|
||||
// allocating region, which is passed as a parameter. It assumes
|
||||
// that the caller has checked that the current allocating region is
|
||||
// not NULL. Given that the caller has to check the current
|
||||
// allocating region for at least NULL, it might as well pass it as
|
||||
// the first parameter so that the method doesn't have to read it
|
||||
// from the _cur_alloc_region field again.
|
||||
inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
|
||||
size_t word_size);
|
||||
|
||||
// It attempts to allocate out of the current alloc region. If that
|
||||
// fails, it retires the current alloc region (if there is one),
|
||||
// tries to get a new one and retries the allocation.
|
||||
inline HeapWord* attempt_allocation(size_t word_size);
|
||||
|
||||
// It assumes that the current alloc region has been retired and
|
||||
// tries to allocate a new one. If it's successful, it performs
|
||||
// the allocation out of the new current alloc region and updates
|
||||
// _cur_alloc_region.
|
||||
HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size,
|
||||
bool at_safepoint,
|
||||
bool do_dirtying);
|
||||
|
||||
// The slow path when we are unable to allocate a new current alloc
|
||||
// region to satisfy an allocation request (i.e., when
|
||||
// attempt_allocation() fails). It will try to do an evacuation
|
||||
// pause, which might stall due to the GC locker, and retry the
|
||||
// allocation attempt when appropriate.
|
||||
HeapWord* attempt_allocation_slow(size_t word_size);
|
||||
|
||||
// The method that tries to satisfy a humongous allocation
|
||||
// request. If it cannot satisfy it it will try to do an evacuation
|
||||
// pause to perhaps reclaim enough space to be able to satisfy the
|
||||
// allocation request afterwards.
|
||||
HeapWord* attempt_allocation_humongous(size_t word_size,
|
||||
bool at_safepoint);
|
||||
|
||||
// It does the common work when we are retiring the current alloc region.
|
||||
inline void retire_cur_alloc_region_common(HeapRegion* cur_alloc_region);
|
||||
|
||||
// It retires the current alloc region, which is passed as a
|
||||
// parameter (since, typically, the caller is already holding on to
|
||||
// it). It sets _cur_alloc_region to NULL.
|
||||
void retire_cur_alloc_region(HeapRegion* cur_alloc_region);
|
||||
|
||||
// It attempts to do an allocation immediately before or after an
|
||||
// evacuation pause and can only be called by the VM thread. It has
|
||||
// slightly different assumptions that the ones before (i.e.,
|
||||
// assumes that the current alloc region has been retired).
|
||||
HeapWord* attempt_allocation_at_safepoint(size_t word_size,
|
||||
bool expect_null_cur_alloc_region);
|
||||
|
||||
// It dirties the cards that cover the block so that so that the post
|
||||
// write barrier never queues anything when updating objects on this
|
||||
// block. It is assumed (and in fact we assert) that the block
|
||||
// belongs to a young region.
|
||||
inline void dirty_young_block(HeapWord* start, size_t word_size);
|
||||
|
||||
// Allocate blocks during garbage collection. Will ensure an
|
||||
// allocation region, either by picking one or expanding the
|
||||
// heap, and then allocate a block of the given size. The block
|
||||
// may not be a humongous - it must fit into a single heap region.
|
||||
HeapWord* allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
|
||||
HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
|
||||
|
||||
HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
|
||||
|
@ -371,11 +559,13 @@ protected:
|
|||
|
||||
// - if explicit_gc is true, the GC is for a System.gc() or a heap
|
||||
// inspection request and should collect the entire heap
|
||||
// - if clear_all_soft_refs is true, all soft references are cleared
|
||||
// during the GC
|
||||
// - if clear_all_soft_refs is true, all soft references should be
|
||||
// cleared during the GC
|
||||
// - if explicit_gc is false, word_size describes the allocation that
|
||||
// the GC should attempt (at least) to satisfy
|
||||
void do_collection(bool explicit_gc,
|
||||
// - it returns false if it is unable to do the collection due to the
|
||||
// GC locker being active, true otherwise
|
||||
bool do_collection(bool explicit_gc,
|
||||
bool clear_all_soft_refs,
|
||||
size_t word_size);
|
||||
|
||||
|
@ -391,13 +581,13 @@ protected:
|
|||
// Callback from VM_G1CollectForAllocation operation.
|
||||
// This function does everything necessary/possible to satisfy a
|
||||
// failed allocation request (including collection, expansion, etc.)
|
||||
HeapWord* satisfy_failed_allocation(size_t word_size);
|
||||
HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
|
||||
|
||||
// Attempting to expand the heap sufficiently
|
||||
// to support an allocation of the given "word_size". If
|
||||
// successful, perform the allocation and return the address of the
|
||||
// allocated block, or else "NULL".
|
||||
virtual HeapWord* expand_and_allocate(size_t word_size);
|
||||
HeapWord* expand_and_allocate(size_t word_size);
|
||||
|
||||
public:
|
||||
// Expand the garbage-first heap by at least the given size (in bytes!).
|
||||
|
@ -478,21 +668,27 @@ protected:
|
|||
void reset_taskqueue_stats();
|
||||
#endif // TASKQUEUE_STATS
|
||||
|
||||
// Do an incremental collection: identify a collection set, and evacuate
|
||||
// its live objects elsewhere.
|
||||
virtual void do_collection_pause();
|
||||
// Schedule the VM operation that will do an evacuation pause to
|
||||
// satisfy an allocation request of word_size. *succeeded will
|
||||
// return whether the VM operation was successful (it did do an
|
||||
// evacuation pause) or not (another thread beat us to it or the GC
|
||||
// locker was active). Given that we should not be holding the
|
||||
// Heap_lock when we enter this method, we will pass the
|
||||
// gc_count_before (i.e., total_collections()) as a parameter since
|
||||
// it has to be read while holding the Heap_lock. Currently, both
|
||||
// methods that call do_collection_pause() release the Heap_lock
|
||||
// before the call, so it's easy to read gc_count_before just before.
|
||||
HeapWord* do_collection_pause(size_t word_size,
|
||||
unsigned int gc_count_before,
|
||||
bool* succeeded);
|
||||
|
||||
// The guts of the incremental collection pause, executed by the vm
|
||||
// thread.
|
||||
virtual void do_collection_pause_at_safepoint(double target_pause_time_ms);
|
||||
// thread. It returns false if it is unable to do the collection due
|
||||
// to the GC locker being active, true otherwise
|
||||
bool do_collection_pause_at_safepoint(double target_pause_time_ms);
|
||||
|
||||
// Actually do the work of evacuating the collection set.
|
||||
virtual void evacuate_collection_set();
|
||||
|
||||
// If this is an appropriate right time, do a collection pause.
|
||||
// The "word_size" argument, if non-zero, indicates the size of an
|
||||
// allocation request that is prompting this query.
|
||||
void do_collection_pause_if_appropriate(size_t word_size);
|
||||
void evacuate_collection_set();
|
||||
|
||||
// The g1 remembered set of the heap.
|
||||
G1RemSet* _g1_rem_set;
|
||||
|
@ -762,11 +958,6 @@ public:
|
|||
#endif // PRODUCT
|
||||
|
||||
// These virtual functions do the actual allocation.
|
||||
virtual HeapWord* mem_allocate(size_t word_size,
|
||||
bool is_noref,
|
||||
bool is_tlab,
|
||||
bool* gc_overhead_limit_was_exceeded);
|
||||
|
||||
// Some heaps may offer a contiguous region for shared non-blocking
|
||||
// allocation, via inlined code (by exporting the address of the top and
|
||||
// end fields defining the extent of the contiguous allocation region.)
|
||||
|
@ -1046,7 +1237,6 @@ public:
|
|||
virtual bool supports_tlab_allocation() const;
|
||||
virtual size_t tlab_capacity(Thread* thr) const;
|
||||
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
|
||||
virtual HeapWord* allocate_new_tlab(size_t word_size);
|
||||
|
||||
// Can a compiler initialize a new object without store barriers?
|
||||
// This permission only extends from the creation of a new object
|
||||
|
@ -1186,7 +1376,6 @@ public:
|
|||
static G1CollectedHeap* heap();
|
||||
|
||||
void empty_young_list();
|
||||
bool should_set_young_locked();
|
||||
|
||||
void set_region_short_lived_locked(HeapRegion* hr);
|
||||
// add appropriate methods for any other surv rate groups
|
||||
|
@ -1339,8 +1528,6 @@ public:
|
|||
protected:
|
||||
size_t _max_heap_capacity;
|
||||
|
||||
// debug_only(static void check_for_valid_allocation_state();)
|
||||
|
||||
public:
|
||||
// Temporary: call to mark things unimplemented for the G1 heap (e.g.,
|
||||
// MemoryService). In productization, we can make this assert false
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
|
||||
#include "gc_implementation/g1/concurrentMark.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.hpp"
|
||||
#include "utilities/taskqueue.hpp"
|
||||
|
||||
|
@ -58,37 +59,114 @@ inline bool G1CollectedHeap::obj_in_cs(oop obj) {
|
|||
return r != NULL && r->in_collection_set();
|
||||
}
|
||||
|
||||
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
|
||||
bool permit_collection_pause) {
|
||||
HeapWord* res = NULL;
|
||||
// See the comment in the .hpp file about the locking protocol and
|
||||
// assumptions of this method (and other related ones).
|
||||
inline HeapWord*
|
||||
G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
|
||||
size_t word_size) {
|
||||
assert_heap_locked_and_not_at_safepoint();
|
||||
assert(cur_alloc_region != NULL, "pre-condition of the method");
|
||||
assert(cur_alloc_region == _cur_alloc_region, "pre-condition of the method");
|
||||
assert(cur_alloc_region->is_young(),
|
||||
"we only support young current alloc regions");
|
||||
assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() "
|
||||
"should not be used for humongous allocations");
|
||||
assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug.");
|
||||
|
||||
assert( SafepointSynchronize::is_at_safepoint() ||
|
||||
Heap_lock->owned_by_self(), "pre-condition of the call" );
|
||||
|
||||
// All humongous allocation requests should go through the slow path in
|
||||
// attempt_allocation_slow().
|
||||
if (!isHumongous(word_size) && _cur_alloc_region != NULL) {
|
||||
// If this allocation causes a region to become non empty,
|
||||
// then we need to update our free_regions count.
|
||||
|
||||
if (_cur_alloc_region->is_empty()) {
|
||||
res = _cur_alloc_region->allocate(word_size);
|
||||
if (res != NULL)
|
||||
_free_regions--;
|
||||
} else {
|
||||
res = _cur_alloc_region->allocate(word_size);
|
||||
}
|
||||
|
||||
if (res != NULL) {
|
||||
if (!SafepointSynchronize::is_at_safepoint()) {
|
||||
assert( Heap_lock->owned_by_self(), "invariant" );
|
||||
assert(!cur_alloc_region->is_empty(),
|
||||
err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty",
|
||||
cur_alloc_region->bottom(), cur_alloc_region->end()));
|
||||
// This allocate method does BOT updates and we don't need them in
|
||||
// the young generation. This will be fixed in the near future by
|
||||
// CR 6994297.
|
||||
HeapWord* result = cur_alloc_region->allocate(word_size);
|
||||
if (result != NULL) {
|
||||
assert(is_in(result), "result should be in the heap");
|
||||
Heap_lock->unlock();
|
||||
|
||||
// Do the dirtying after we release the Heap_lock.
|
||||
dirty_young_block(result, word_size);
|
||||
return result;
|
||||
}
|
||||
return res;
|
||||
|
||||
assert_heap_locked();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// See the comment in the .hpp file about the locking protocol and
|
||||
// assumptions of this method (and other related ones).
|
||||
inline HeapWord*
|
||||
G1CollectedHeap::attempt_allocation(size_t word_size) {
|
||||
assert_heap_locked_and_not_at_safepoint();
|
||||
assert(!isHumongous(word_size), "attempt_allocation() should not be called "
|
||||
"for humongous allocation requests");
|
||||
|
||||
HeapRegion* cur_alloc_region = _cur_alloc_region;
|
||||
if (cur_alloc_region != NULL) {
|
||||
HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region,
|
||||
word_size);
|
||||
if (result != NULL) {
|
||||
assert_heap_not_locked();
|
||||
return result;
|
||||
}
|
||||
|
||||
assert_heap_locked();
|
||||
|
||||
// Since we couldn't successfully allocate into it, retire the
|
||||
// current alloc region.
|
||||
retire_cur_alloc_region(cur_alloc_region);
|
||||
}
|
||||
// attempt_allocation_slow will also unlock the heap lock when appropriate.
|
||||
return attempt_allocation_slow(word_size, permit_collection_pause);
|
||||
|
||||
// Try to get a new region and allocate out of it
|
||||
HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
|
||||
false, /* at safepoint */
|
||||
true /* do_dirtying */);
|
||||
if (result != NULL) {
|
||||
assert_heap_not_locked();
|
||||
return result;
|
||||
}
|
||||
|
||||
assert_heap_locked();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
inline void
|
||||
G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
|
||||
assert_heap_locked_or_at_safepoint();
|
||||
assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
|
||||
"pre-condition of the call");
|
||||
assert(cur_alloc_region->is_young(),
|
||||
"we only support young current alloc regions");
|
||||
|
||||
// The region is guaranteed to be young
|
||||
g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region);
|
||||
_summary_bytes_used += cur_alloc_region->used();
|
||||
_cur_alloc_region = NULL;
|
||||
}
|
||||
|
||||
// It dirties the cards that cover the block so that so that the post
|
||||
// write barrier never queues anything when updating objects on this
|
||||
// block. It is assumed (and in fact we assert) that the block
|
||||
// belongs to a young region.
|
||||
inline void
|
||||
G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
|
||||
assert_heap_not_locked();
|
||||
|
||||
// Assign the containing region to containing_hr so that we don't
|
||||
// have to keep calling heap_region_containing_raw() in the
|
||||
// asserts below.
|
||||
DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
|
||||
assert(containing_hr != NULL && start != NULL && word_size > 0,
|
||||
"pre-condition");
|
||||
assert(containing_hr->is_in(start), "it should contain start");
|
||||
assert(containing_hr->is_young(), "it should be young");
|
||||
assert(!containing_hr->isHumongous(), "it should not be humongous");
|
||||
|
||||
HeapWord* end = start + word_size;
|
||||
assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
|
||||
|
||||
MemRegion mr(start, end);
|
||||
((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
|
||||
}
|
||||
|
||||
inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
|
||||
|
|
|
@ -458,8 +458,8 @@ void G1CollectorPolicy::calculate_young_list_min_length() {
|
|||
double now_sec = os::elapsedTime();
|
||||
double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
|
||||
double alloc_rate_ms = predict_alloc_rate_ms();
|
||||
int min_regions = (int) ceil(alloc_rate_ms * when_ms);
|
||||
int current_region_num = (int) _g1->young_list()->length();
|
||||
size_t min_regions = (size_t) ceil(alloc_rate_ms * when_ms);
|
||||
size_t current_region_num = _g1->young_list()->length();
|
||||
_young_list_min_length = min_regions + current_region_num;
|
||||
}
|
||||
}
|
||||
|
@ -473,9 +473,12 @@ void G1CollectorPolicy::calculate_young_list_target_length() {
|
|||
_young_list_target_length = _young_list_fixed_length;
|
||||
else
|
||||
_young_list_target_length = _young_list_fixed_length / 2;
|
||||
|
||||
_young_list_target_length = MAX2(_young_list_target_length, (size_t)1);
|
||||
}
|
||||
|
||||
// Make sure we allow the application to allocate at least one
|
||||
// region before we need to do a collection again.
|
||||
size_t min_length = _g1->young_list()->length() + 1;
|
||||
_young_list_target_length = MAX2(_young_list_target_length, min_length);
|
||||
calculate_survivors_policy();
|
||||
}
|
||||
|
||||
|
@ -568,7 +571,7 @@ void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
|
|||
|
||||
// we should have at least one region in the target young length
|
||||
_young_list_target_length =
|
||||
MAX2((size_t) 1, final_young_length + _recorded_survivor_regions);
|
||||
final_young_length + _recorded_survivor_regions;
|
||||
|
||||
// let's keep an eye of how long we spend on this calculation
|
||||
// right now, I assume that we'll print it when we need it; we
|
||||
|
@ -617,8 +620,7 @@ void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
|
|||
_young_list_min_length);
|
||||
#endif // TRACE_CALC_YOUNG_LENGTH
|
||||
// we'll do the pause as soon as possible by choosing the minimum
|
||||
_young_list_target_length =
|
||||
MAX2(_young_list_min_length, (size_t) 1);
|
||||
_young_list_target_length = _young_list_min_length;
|
||||
}
|
||||
|
||||
_rs_lengths_prediction = rs_lengths;
|
||||
|
@ -801,7 +803,7 @@ void G1CollectorPolicy::record_full_collection_end() {
|
|||
_survivor_surv_rate_group->reset();
|
||||
calculate_young_list_min_length();
|
||||
calculate_young_list_target_length();
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::record_before_bytes(size_t bytes) {
|
||||
_bytes_in_to_space_before_gc += bytes;
|
||||
|
@ -824,9 +826,9 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
|
|||
gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
|
||||
}
|
||||
|
||||
assert(_g1->used_regions() == _g1->recalculate_used_regions(),
|
||||
"sanity");
|
||||
assert(_g1->used() == _g1->recalculate_used(), "sanity");
|
||||
assert(_g1->used() == _g1->recalculate_used(),
|
||||
err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
|
||||
_g1->used(), _g1->recalculate_used()));
|
||||
|
||||
double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
|
||||
_all_stop_world_times_ms->add(s_w_t_ms);
|
||||
|
@ -2266,24 +2268,13 @@ void G1CollectorPolicy::print_yg_surv_rate_info() const {
|
|||
#endif // PRODUCT
|
||||
}
|
||||
|
||||
bool
|
||||
G1CollectorPolicy::should_add_next_region_to_young_list() {
|
||||
assert(in_young_gc_mode(), "should be in young GC mode");
|
||||
bool ret;
|
||||
size_t young_list_length = _g1->young_list()->length();
|
||||
size_t young_list_max_length = _young_list_target_length;
|
||||
if (G1FixedEdenSize) {
|
||||
young_list_max_length -= _max_survivor_regions;
|
||||
}
|
||||
if (young_list_length < young_list_max_length) {
|
||||
ret = true;
|
||||
void
|
||||
G1CollectorPolicy::update_region_num(bool young) {
|
||||
if (young) {
|
||||
++_region_num_young;
|
||||
} else {
|
||||
ret = false;
|
||||
++_region_num_tenured;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
@ -2327,32 +2318,6 @@ void G1CollectorPolicy::calculate_survivors_policy()
|
|||
}
|
||||
}
|
||||
|
||||
bool
|
||||
G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t
|
||||
word_size) {
|
||||
assert(_g1->regions_accounted_for(), "Region leakage!");
|
||||
double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
|
||||
|
||||
size_t young_list_length = _g1->young_list()->length();
|
||||
size_t young_list_max_length = _young_list_target_length;
|
||||
if (G1FixedEdenSize) {
|
||||
young_list_max_length -= _max_survivor_regions;
|
||||
}
|
||||
bool reached_target_length = young_list_length >= young_list_max_length;
|
||||
|
||||
if (in_young_gc_mode()) {
|
||||
if (reached_target_length) {
|
||||
assert( young_list_length > 0 && _g1->young_list()->length() > 0,
|
||||
"invariant" );
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
guarantee( false, "should not reach here" );
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
class HRSortIndexIsOKClosure: public HeapRegionClosure {
|
||||
CollectionSetChooser* _chooser;
|
||||
|
|
|
@ -993,11 +993,6 @@ public:
|
|||
void record_before_bytes(size_t bytes);
|
||||
void record_after_bytes(size_t bytes);
|
||||
|
||||
// Returns "true" if this is a good time to do a collection pause.
|
||||
// The "word_size" argument, if non-zero, indicates the size of an
|
||||
// allocation request that is prompting this query.
|
||||
virtual bool should_do_collection_pause(size_t word_size) = 0;
|
||||
|
||||
// Choose a new collection set. Marks the chosen regions as being
|
||||
// "in_collection_set", and links them together. The head and number of
|
||||
// the collection set are available via access methods.
|
||||
|
@ -1116,7 +1111,16 @@ public:
|
|||
// do that for any other surv rate groups
|
||||
}
|
||||
|
||||
bool should_add_next_region_to_young_list();
|
||||
bool is_young_list_full() {
|
||||
size_t young_list_length = _g1->young_list()->length();
|
||||
size_t young_list_max_length = _young_list_target_length;
|
||||
if (G1FixedEdenSize) {
|
||||
young_list_max_length -= _max_survivor_regions;
|
||||
}
|
||||
|
||||
return young_list_length >= young_list_max_length;
|
||||
}
|
||||
void update_region_num(bool young);
|
||||
|
||||
bool in_young_gc_mode() {
|
||||
return _in_young_gc_mode;
|
||||
|
@ -1270,7 +1274,6 @@ public:
|
|||
_collectionSetChooser = new CollectionSetChooser();
|
||||
}
|
||||
void record_collection_pause_end();
|
||||
bool should_do_collection_pause(size_t word_size);
|
||||
// This is not needed any more, after the CSet choosing code was
|
||||
// changed to use the pause prediction work. But let's leave the
|
||||
// hook in just in case.
|
||||
|
|
|
@ -27,13 +27,22 @@
|
|||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc_implementation/g1/vm_operations_g1.hpp"
|
||||
#include "gc_implementation/shared/isGCActiveMark.hpp"
|
||||
#include "gc_implementation/g1/vm_operations_g1.hpp"
|
||||
#include "runtime/interfaceSupport.hpp"
|
||||
|
||||
VM_G1CollectForAllocation::VM_G1CollectForAllocation(
|
||||
unsigned int gc_count_before,
|
||||
size_t word_size)
|
||||
: VM_G1OperationWithAllocRequest(gc_count_before, word_size) {
|
||||
guarantee(word_size > 0, "an allocation should always be requested");
|
||||
}
|
||||
|
||||
void VM_G1CollectForAllocation::doit() {
|
||||
JvmtiGCForAllocationMarker jgcm;
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
_res = g1h->satisfy_failed_allocation(_size);
|
||||
assert(g1h->is_in_or_null(_res), "result not in heap");
|
||||
_result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
|
||||
assert(_result == NULL || _pause_succeeded,
|
||||
"if we get back a result, the pause should have succeeded");
|
||||
}
|
||||
|
||||
void VM_G1CollectFull::doit() {
|
||||
|
@ -43,6 +52,25 @@ void VM_G1CollectFull::doit() {
|
|||
g1h->do_full_collection(false /* clear_all_soft_refs */);
|
||||
}
|
||||
|
||||
VM_G1IncCollectionPause::VM_G1IncCollectionPause(
|
||||
unsigned int gc_count_before,
|
||||
size_t word_size,
|
||||
bool should_initiate_conc_mark,
|
||||
double target_pause_time_ms,
|
||||
GCCause::Cause gc_cause)
|
||||
: VM_G1OperationWithAllocRequest(gc_count_before, word_size),
|
||||
_should_initiate_conc_mark(should_initiate_conc_mark),
|
||||
_target_pause_time_ms(target_pause_time_ms),
|
||||
_full_collections_completed_before(0) {
|
||||
guarantee(target_pause_time_ms > 0.0,
|
||||
err_msg("target_pause_time_ms = %1.6lf should be positive",
|
||||
target_pause_time_ms));
|
||||
guarantee(word_size == 0 || gc_cause == GCCause::_g1_inc_collection_pause,
|
||||
"we can only request an allocation if the GC cause is for "
|
||||
"an incremental GC pause");
|
||||
_gc_cause = gc_cause;
|
||||
}
|
||||
|
||||
void VM_G1IncCollectionPause::doit() {
|
||||
JvmtiGCForAllocationMarker jgcm;
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
@ -51,6 +79,18 @@ void VM_G1IncCollectionPause::doit() {
|
|||
(_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)),
|
||||
"only a GC locker or a System.gc() induced GC should start a cycle");
|
||||
|
||||
if (_word_size > 0) {
|
||||
// An allocation has been requested. So, try to do that first.
|
||||
_result = g1h->attempt_allocation_at_safepoint(_word_size,
|
||||
false /* expect_null_cur_alloc_region */);
|
||||
if (_result != NULL) {
|
||||
// If we can successfully allocate before we actually do the
|
||||
// pause then we will consider this pause successful.
|
||||
_pause_succeeded = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
GCCauseSetter x(g1h, _gc_cause);
|
||||
if (_should_initiate_conc_mark) {
|
||||
// It's safer to read full_collections_completed() here, given
|
||||
|
@ -63,7 +103,16 @@ void VM_G1IncCollectionPause::doit() {
|
|||
// will do so if one is not already in progress.
|
||||
bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle();
|
||||
}
|
||||
|
||||
_pause_succeeded =
|
||||
g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
|
||||
if (_pause_succeeded && _word_size > 0) {
|
||||
// An allocation had been requested.
|
||||
_result = g1h->attempt_allocation_at_safepoint(_word_size,
|
||||
true /* expect_null_cur_alloc_region */);
|
||||
} else {
|
||||
assert(_result == NULL, "invariant");
|
||||
}
|
||||
}
|
||||
|
||||
void VM_G1IncCollectionPause::doit_epilogue() {
|
||||
|
|
|
@ -31,19 +31,33 @@
|
|||
// VM_GC_Operation:
|
||||
// - VM_CGC_Operation
|
||||
// - VM_G1CollectFull
|
||||
// - VM_G1OperationWithAllocRequest
|
||||
// - VM_G1CollectForAllocation
|
||||
// - VM_G1IncCollectionPause
|
||||
// - VM_G1PopRegionCollectionPause
|
||||
|
||||
class VM_G1OperationWithAllocRequest: public VM_GC_Operation {
|
||||
protected:
|
||||
size_t _word_size;
|
||||
HeapWord* _result;
|
||||
bool _pause_succeeded;
|
||||
|
||||
public:
|
||||
VM_G1OperationWithAllocRequest(unsigned int gc_count_before,
|
||||
size_t word_size)
|
||||
: VM_GC_Operation(gc_count_before),
|
||||
_word_size(word_size), _result(NULL), _pause_succeeded(false) { }
|
||||
HeapWord* result() { return _result; }
|
||||
bool pause_succeeded() { return _pause_succeeded; }
|
||||
};
|
||||
|
||||
class VM_G1CollectFull: public VM_GC_Operation {
|
||||
public:
|
||||
public:
|
||||
VM_G1CollectFull(unsigned int gc_count_before,
|
||||
unsigned int full_gc_count_before,
|
||||
GCCause::Cause cause)
|
||||
: VM_GC_Operation(gc_count_before, full_gc_count_before) {
|
||||
_gc_cause = cause;
|
||||
}
|
||||
~VM_G1CollectFull() {}
|
||||
virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
|
||||
virtual void doit();
|
||||
virtual const char* name() const {
|
||||
|
@ -51,45 +65,28 @@ class VM_G1CollectFull: public VM_GC_Operation {
|
|||
}
|
||||
};
|
||||
|
||||
class VM_G1CollectForAllocation: public VM_GC_Operation {
|
||||
private:
|
||||
HeapWord* _res;
|
||||
size_t _size; // size of object to be allocated
|
||||
public:
|
||||
VM_G1CollectForAllocation(size_t size, int gc_count_before)
|
||||
: VM_GC_Operation(gc_count_before) {
|
||||
_size = size;
|
||||
_res = NULL;
|
||||
}
|
||||
~VM_G1CollectForAllocation() {}
|
||||
class VM_G1CollectForAllocation: public VM_G1OperationWithAllocRequest {
|
||||
public:
|
||||
VM_G1CollectForAllocation(unsigned int gc_count_before,
|
||||
size_t word_size);
|
||||
virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; }
|
||||
virtual void doit();
|
||||
virtual const char* name() const {
|
||||
return "garbage-first collection to satisfy allocation";
|
||||
}
|
||||
HeapWord* result() { return _res; }
|
||||
};
|
||||
|
||||
class VM_G1IncCollectionPause: public VM_GC_Operation {
|
||||
class VM_G1IncCollectionPause: public VM_G1OperationWithAllocRequest {
|
||||
private:
|
||||
bool _should_initiate_conc_mark;
|
||||
double _target_pause_time_ms;
|
||||
unsigned int _full_collections_completed_before;
|
||||
public:
|
||||
VM_G1IncCollectionPause(unsigned int gc_count_before,
|
||||
size_t word_size,
|
||||
bool should_initiate_conc_mark,
|
||||
double target_pause_time_ms,
|
||||
GCCause::Cause cause)
|
||||
: VM_GC_Operation(gc_count_before),
|
||||
_full_collections_completed_before(0),
|
||||
_should_initiate_conc_mark(should_initiate_conc_mark),
|
||||
_target_pause_time_ms(target_pause_time_ms) {
|
||||
guarantee(target_pause_time_ms > 0.0,
|
||||
err_msg("target_pause_time_ms = %1.6lf should be positive",
|
||||
target_pause_time_ms));
|
||||
|
||||
_gc_cause = cause;
|
||||
}
|
||||
GCCause::Cause gc_cause);
|
||||
virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
|
||||
virtual void doit();
|
||||
virtual void doit_epilogue();
|
||||
|
@ -103,14 +100,9 @@ public:
|
|||
class VM_CGC_Operation: public VM_Operation {
|
||||
VoidClosure* _cl;
|
||||
const char* _printGCMessage;
|
||||
public:
|
||||
VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg) :
|
||||
_cl(cl),
|
||||
_printGCMessage(printGCMsg)
|
||||
{}
|
||||
|
||||
~VM_CGC_Operation() {}
|
||||
|
||||
public:
|
||||
VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg)
|
||||
: _cl(cl), _printGCMessage(printGCMsg) { }
|
||||
virtual VMOp_Type type() const { return VMOp_CGC_Operation; }
|
||||
virtual void doit();
|
||||
virtual bool doit_prologue();
|
||||
|
|
|
@ -770,8 +770,7 @@ void ReferenceProcessor::abandon_partial_discovery() {
|
|||
// loop over the lists
|
||||
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
|
||||
if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
|
||||
gclog_or_tty->print_cr(
|
||||
"\nAbandoning %s discovered list",
|
||||
gclog_or_tty->print_cr("\nAbandoning %s discovered list",
|
||||
list_name(i));
|
||||
}
|
||||
abandon_partial_discovered_list(_discoveredSoftRefs[i]);
|
||||
|
@ -1059,9 +1058,7 @@ inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt)
|
|||
// During a multi-threaded discovery phase,
|
||||
// each thread saves to its "own" list.
|
||||
Thread* thr = Thread::current();
|
||||
assert(thr->is_GC_task_thread(),
|
||||
"Dubious cast from Thread* to WorkerThread*?");
|
||||
id = ((WorkerThread*)thr)->id();
|
||||
id = thr->as_Worker_thread()->id();
|
||||
} else {
|
||||
// single-threaded discovery, we save in round-robin
|
||||
// fashion to each of the lists.
|
||||
|
@ -1095,8 +1092,7 @@ inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt)
|
|||
ShouldNotReachHere();
|
||||
}
|
||||
if (TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT,
|
||||
id, list);
|
||||
gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, list);
|
||||
}
|
||||
return list;
|
||||
}
|
||||
|
@ -1135,6 +1131,11 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
|
|||
if (_discovered_list_needs_barrier) {
|
||||
_bs->write_ref_field((void*)discovered_addr, current_head);
|
||||
}
|
||||
|
||||
if (TraceReferenceGC) {
|
||||
gclog_or_tty->print_cr("Enqueued reference (mt) (" INTPTR_FORMAT ": %s)",
|
||||
obj, obj->blueprint()->internal_name());
|
||||
}
|
||||
} else {
|
||||
// If retest was non NULL, another thread beat us to it:
|
||||
// The reference has already been discovered...
|
||||
|
@ -1239,8 +1240,8 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
|
|||
// Check assumption that an object is not potentially
|
||||
// discovered twice except by concurrent collectors that potentially
|
||||
// trace the same Reference object twice.
|
||||
assert(UseConcMarkSweepGC,
|
||||
"Only possible with an incremental-update concurrent collector");
|
||||
assert(UseConcMarkSweepGC || UseG1GC,
|
||||
"Only possible with a concurrent marking collector");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -1293,26 +1294,14 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
|
|||
}
|
||||
list->set_head(obj);
|
||||
list->inc_length(1);
|
||||
}
|
||||
|
||||
// In the MT discovery case, it is currently possible to see
|
||||
// the following message multiple times if several threads
|
||||
// discover a reference about the same time. Only one will
|
||||
// however have actually added it to the disocvered queue.
|
||||
// One could let add_to_discovered_list_mt() return an
|
||||
// indication for success in queueing (by 1 thread) or
|
||||
// failure (by all other threads), but I decided the extra
|
||||
// code was not worth the effort for something that is
|
||||
// only used for debugging support.
|
||||
if (TraceReferenceGC) {
|
||||
oop referent = java_lang_ref_Reference::referent(obj);
|
||||
if (PrintGCDetails) {
|
||||
gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",
|
||||
obj, obj->blueprint()->internal_name());
|
||||
}
|
||||
assert(referent->is_oop(), "Enqueued a bad referent");
|
||||
}
|
||||
assert(obj->is_oop(), "Enqueued a bad reference");
|
||||
assert(java_lang_ref_Reference::referent(obj)->is_oop(), "Enqueued a bad referent");
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -78,6 +78,8 @@ class GCTaskQueue;
|
|||
class ThreadClosure;
|
||||
class IdealGraphPrinter;
|
||||
|
||||
class WorkerThread;
|
||||
|
||||
// Class hierarchy
|
||||
// - Thread
|
||||
// - NamedThread
|
||||
|
@ -289,6 +291,10 @@ class Thread: public ThreadShadow {
|
|||
virtual bool is_Watcher_thread() const { return false; }
|
||||
virtual bool is_ConcurrentGC_thread() const { return false; }
|
||||
virtual bool is_Named_thread() const { return false; }
|
||||
virtual bool is_Worker_thread() const { return false; }
|
||||
|
||||
// Casts
|
||||
virtual WorkerThread* as_Worker_thread() const { return NULL; }
|
||||
|
||||
virtual char* name() const { return (char*)"Unknown thread"; }
|
||||
|
||||
|
@ -629,6 +635,13 @@ private:
|
|||
uint _id;
|
||||
public:
|
||||
WorkerThread() : _id(0) { }
|
||||
virtual bool is_Worker_thread() const { return true; }
|
||||
|
||||
virtual WorkerThread* as_Worker_thread() const {
|
||||
assert(is_Worker_thread(), "Dubious cast to WorkerThread*?");
|
||||
return (WorkerThread*) this;
|
||||
}
|
||||
|
||||
void set_id(uint work_id) { _id = work_id; }
|
||||
uint id() const { return _id; }
|
||||
};
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue