8255019: Shenandoah: Split STW and concurrent mark into separate classes

Reviewed-by: rkennke, shade
This commit is contained in:
Zhengyu Gu 2021-01-14 17:42:52 +00:00
parent aba3431c4e
commit da6bcf966a
20 changed files with 943 additions and 631 deletions

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
* Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,13 +35,14 @@
#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
#include "gc/shenandoah/shenandoahMarkCompact.hpp"
#include "gc/shenandoah/shenandoahConcurrentMark.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahMark.inline.hpp"
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "gc/shenandoah/shenandoahStringDedup.hpp"
#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
@ -51,63 +52,6 @@
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
class ShenandoahInitMarkRootsClosure : public OopClosure {
private:
ShenandoahObjToScanQueue* _queue;
ShenandoahHeap* _heap;
ShenandoahMarkingContext* const _mark_context;
template <class T>
inline void do_oop_work(T* p) {
ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context, false);
}
public:
ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
_queue(q),
_heap(ShenandoahHeap::heap()),
_mark_context(_heap->marking_context()) {};
void do_oop(narrowOop* p) { do_oop_work(p); }
void do_oop(oop* p) { do_oop_work(p); }
};
ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
MetadataVisitingOopIterateClosure(rp),
_queue(q),
_heap(ShenandoahHeap::heap()),
_mark_context(_heap->marking_context()),
_weak(false)
{ }
class ShenandoahInitMarkRootsTask : public AbstractGangTask {
private:
ShenandoahRootScanner* _rp;
public:
ShenandoahInitMarkRootsTask(ShenandoahRootScanner* rp) :
AbstractGangTask("Shenandoah Init Mark Roots"),
_rp(rp) {
}
void work(uint worker_id) {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
ShenandoahParallelWorkerSession worker_session(worker_id);
ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues();
assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
ShenandoahObjToScanQueue* q = queues->queue(worker_id);
ShenandoahInitMarkRootsClosure mark_cl(q);
do_work(heap, &mark_cl, worker_id);
}
private:
void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {
_rp->roots_do(worker_id, oops);
}
};
class ShenandoahUpdateRootsTask : public AbstractGangTask {
private:
@ -138,8 +82,8 @@ public:
class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
private:
ShenandoahConcurrentMark* _cm;
TaskTerminator* _terminator;
ShenandoahConcurrentMark* const _cm;
TaskTerminator* const _terminator;
public:
ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, TaskTerminator* terminator) :
@ -192,51 +136,6 @@ public:
}
};
// Process concurrent roots at safepoints
template <typename T>
class ShenandoahProcessConcurrentRootsTask : public AbstractGangTask {
private:
ShenandoahConcurrentRootScanner<false /* concurrent */> _rs;
ShenandoahConcurrentMark* const _cm;
ShenandoahReferenceProcessor* _rp;
public:
ShenandoahProcessConcurrentRootsTask(ShenandoahConcurrentMark* cm,
ShenandoahPhaseTimings::Phase phase,
uint nworkers);
void work(uint worker_id);
};
template <typename T>
ShenandoahProcessConcurrentRootsTask<T>::ShenandoahProcessConcurrentRootsTask(ShenandoahConcurrentMark* cm,
ShenandoahPhaseTimings::Phase phase,
uint nworkers) :
AbstractGangTask("Shenandoah Process Concurrent Roots"),
_rs(nworkers, phase),
_cm(cm),
_rp(ShenandoahHeap::heap()->ref_processor()) {
}
template <typename T>
void ShenandoahProcessConcurrentRootsTask<T>::work(uint worker_id) {
ShenandoahParallelWorkerSession worker_session(worker_id);
ShenandoahObjToScanQueue* q = _cm->task_queues()->queue(worker_id);
T cl(q, _rp);
_rs.oops_do(&cl, worker_id);
}
class ShenandoahClaimThreadClosure : public ThreadClosure {
private:
const uintx _claim_token;
public:
ShenandoahClaimThreadClosure() :
_claim_token(Threads::thread_claim_token()) {}
virtual void do_thread(Thread* thread) {
thread->claim_threads_do(false /*is_par*/, _claim_token);
}
};
class ShenandoahFinalMarkingTask : public AbstractGangTask {
private:
ShenandoahConcurrentMark* _cm;
@ -246,13 +145,6 @@ private:
public:
ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, TaskTerminator* terminator, bool dedup_string) :
AbstractGangTask("Shenandoah Final Mark"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
// Full GC does not need to remark threads and drain SATB buffers, but we need to claim the
// threads - it requires a StrongRootsScope around the task.
if (ShenandoahHeap::heap()->is_full_gc_in_progress()) {
ShenandoahClaimThreadClosure tc;
Threads::threads_do(&tc);
}
}
void work(uint worker_id) {
@ -261,13 +153,15 @@ public:
ShenandoahParallelWorkerSession worker_session(worker_id);
ShenandoahReferenceProcessor* rp = heap->ref_processor();
if (!heap->is_full_gc_in_progress()) {
// First drain remaining SATB buffers.
{
ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
ShenandoahSATBBufferClosure cl(q);
SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
while (satb_mq_set.apply_closure_to_completed_buffer(&cl)) {}
assert(!heap->has_forwarded_objects(), "Not expected");
bool do_nmethods = heap->unload_classes() && !ShenandoahConcurrentRoots::can_do_concurrent_class_unloading();
ShenandoahMarkRefsClosure mark_cl(q, rp);
MarkingCodeBlobClosure blobsCl(&mark_cl, !CodeBlobToOopClosure::FixRelocations);
@ -285,27 +179,47 @@ public:
}
};
void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
class ShenandoahInitMarkRootsTask : public AbstractGangTask {
private:
ShenandoahRootScanner _root_scanner;
ShenandoahObjToScanQueueSet* const _task_queues;
public:
ShenandoahInitMarkRootsTask(uint n_workers, ShenandoahObjToScanQueueSet* task_queues) :
AbstractGangTask("Shenandoah Init Mark Roots"),
_root_scanner(n_workers, ShenandoahPhaseTimings::scan_roots),
_task_queues(task_queues) {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
}
ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahGCPhase phase(root_phase);
void work(uint worker_id) {
ShenandoahParallelWorkerSession worker_session(worker_id);
assert(_task_queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
ShenandoahReferenceProcessor* ref_processor = heap->ref_processor();
ref_processor->reset_thread_locals();
ref_processor->set_soft_reference_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
ShenandoahObjToScanQueue* q = _task_queues->queue(worker_id);
ShenandoahInitMarkRootsClosure mark_cl(q);
_root_scanner.roots_do(worker_id, &mark_cl);
}
};
WorkGang* workers = heap->workers();
ShenandoahConcurrentMark::ShenandoahConcurrentMark() :
ShenandoahMark() {}
void ShenandoahConcurrentMark::mark_stw_roots() {
assert(Thread::current()->is_VM_thread(), "Can only do this in VMThread");
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
assert(!ShenandoahHeap::heap()->has_forwarded_objects(), "Not expected");
ShenandoahGCPhase phase(ShenandoahPhaseTimings::scan_roots);
WorkGang* workers = ShenandoahHeap::heap()->workers();
uint nworkers = workers->active_workers();
assert(nworkers <= task_queues()->size(), "Just check");
ShenandoahRootScanner root_proc(nworkers, root_phase);
TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
task_queues()->reserve(nworkers);
ShenandoahInitMarkRootsTask mark_roots(&root_proc);
ShenandoahInitMarkRootsTask mark_roots(nworkers, task_queues());
workers->run_task(&mark_roots);
}
@ -323,11 +237,13 @@ void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_p
DerivedPointerTable::clear();
#endif
uint nworkers = _heap->workers()->active_workers();
ShenandoahHeap* const heap = ShenandoahHeap::heap();
WorkGang* workers = heap->workers();
uint nworkers = workers->active_workers();
ShenandoahRootUpdater root_updater(nworkers, root_phase);
ShenandoahUpdateRootsTask update_roots(&root_updater, check_alive);
_heap->workers()->run_task(&update_roots);
workers->run_task(&update_roots);
#if COMPILER2_OR_JVMCI
DerivedPointerTable::update_pointers();
@ -361,8 +277,8 @@ void ShenandoahConcurrentMark::update_thread_roots(ShenandoahPhaseTimings::Phase
#if COMPILER2_OR_JVMCI
DerivedPointerTable::clear();
#endif
WorkGang* workers = _heap->workers();
ShenandoahHeap* const heap = ShenandoahHeap::heap();
WorkGang* workers = heap->workers();
bool is_par = workers->active_workers() > 1;
ShenandoahUpdateThreadRootsTask task(is_par, root_phase);
@ -373,25 +289,11 @@ void ShenandoahConcurrentMark::update_thread_roots(ShenandoahPhaseTimings::Phase
#endif
}
void ShenandoahConcurrentMark::initialize(uint workers) {
_heap = ShenandoahHeap::heap();
uint num_queues = MAX2(workers, 1U);
_task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
for (uint i = 0; i < num_queues; ++i) {
ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
task_queue->initialize();
_task_queues->register_queue(i, task_queue);
}
}
// Mark concurrent roots during concurrent phases
class ShenandoahMarkConcurrentRootsTask : public AbstractGangTask {
private:
SuspendibleThreadSetJoiner _sts_joiner;
ShenandoahConcurrentRootScanner<true /* concurrent */> _rs;
ShenandoahConcurrentRootScanner _root_scanner;
ShenandoahObjToScanQueueSet* const _queue_set;
ShenandoahReferenceProcessor* const _rp;
@ -408,7 +310,7 @@ ShenandoahMarkConcurrentRootsTask::ShenandoahMarkConcurrentRootsTask(ShenandoahO
ShenandoahPhaseTimings::Phase phase,
uint nworkers) :
AbstractGangTask("Shenandoah Concurrent Mark Roots"),
_rs(nworkers, phase),
_root_scanner(nworkers, phase),
_queue_set(qs),
_rp(rp) {
assert(!ShenandoahHeap::heap()->has_forwarded_objects(), "Not expected");
@ -418,23 +320,26 @@ void ShenandoahMarkConcurrentRootsTask::work(uint worker_id) {
ShenandoahConcurrentWorkerSession worker_session(worker_id);
ShenandoahObjToScanQueue* q = _queue_set->queue(worker_id);
ShenandoahMarkRefsClosure cl(q, _rp);
_rs.oops_do(&cl, worker_id);
_root_scanner.roots_do(&cl, worker_id);
}
void ShenandoahConcurrentMark::mark_from_roots() {
WorkGang* workers = _heap->workers();
uint nworkers = workers->active_workers();
void ShenandoahConcurrentMark::mark_concurrent_roots() {
ShenandoahHeap* const heap = ShenandoahHeap::heap();
assert(!heap->has_forwarded_objects(), "Not expected");
ShenandoahReferenceProcessor* rp = _heap->ref_processor();
WorkGang* workers = heap->workers();
ShenandoahReferenceProcessor* rp = heap->ref_processor();
task_queues()->reserve(workers->active_workers());
ShenandoahMarkConcurrentRootsTask task(task_queues(), rp, ShenandoahPhaseTimings::conc_mark_roots, workers->active_workers());
task_queues()->reserve(nworkers);
{
ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_mark_roots);
// Use separate task to mark concurrent roots, since it may hold ClassLoaderData_lock and CodeCache_lock
ShenandoahMarkConcurrentRootsTask task(task_queues(), rp, ShenandoahPhaseTimings::conc_mark_roots, nworkers);
workers->run_task(&task);
}
}
void ShenandoahConcurrentMark::concurrent_mark() {
ShenandoahHeap* const heap = ShenandoahHeap::heap();
WorkGang* workers = heap->workers();
uint nworkers = workers->active_workers();
task_queues()->reserve(nworkers);
{
TaskTerminator terminator(nworkers, task_queues());
@ -442,28 +347,19 @@ void ShenandoahConcurrentMark::mark_from_roots() {
workers->run_task(&task);
}
assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
assert(task_queues()->is_empty() || heap->cancelled_gc(), "Should be empty when not cancelled");
}
void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
void ShenandoahConcurrentMark::finish_mark() {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
assert(Thread::current()->is_VM_thread(), "Must by VM Thread");
finish_mark_work();
assert(task_queues()->is_empty(), "Should be empty");
TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
}
uint nworkers = _heap->workers()->active_workers();
{
// Full GC does not execute concurrent cycle. Degenerated cycle may bypass concurrent cycle.
// In those cases, concurrent roots might not be scanned, scan them here. Ideally, this
// should piggyback to ShenandoahFinalMarkingTask, but it makes time tracking very hard.
// Given full GC and degenerated GC are rare, use a separate task.
if (_heap->is_degenerated_gc_in_progress() || _heap->is_full_gc_in_progress()) {
ShenandoahPhaseTimings::Phase phase = _heap->is_full_gc_in_progress() ?
ShenandoahPhaseTimings::full_gc_scan_conc_roots :
ShenandoahPhaseTimings::degen_gc_scan_conc_roots;
ShenandoahGCPhase gc_phase(phase);
ShenandoahProcessConcurrentRootsTask<ShenandoahMarkRefsClosure> task(this, phase, nworkers);
_heap->workers()->run_task(&task);
}
void ShenandoahConcurrentMark::finish_mark_work() {
// Finally mark everything else we've got in our queues during the previous steps.
// It does two different things for concurrent vs. mark-compact GC:
// - For concurrent GC, it starts with empty task queues, drains the remaining
@ -471,161 +367,22 @@ void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
// - For mark-compact GC, it starts out with the task queues seeded by initial
// root scan, and completes the closure, thus marking through all live objects
// The implementation is the same, so it's shared here.
{
ShenandoahGCPhase phase(full_gc ?
ShenandoahPhaseTimings::full_gc_mark_finish_queues :
ShenandoahPhaseTimings::finish_queues);
ShenandoahHeap* const heap = ShenandoahHeap::heap();
ShenandoahGCPhase phase(ShenandoahPhaseTimings::finish_queues);
uint nworkers = heap->workers()->active_workers();
task_queues()->reserve(nworkers);
StrongRootsScope scope(nworkers);
TaskTerminator terminator(nworkers, task_queues());
ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
_heap->workers()->run_task(&task);
}
heap->workers()->run_task(&task);
assert(task_queues()->is_empty(), "Should be empty");
}
assert(task_queues()->is_empty(), "Should be empty");
TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
}
void ShenandoahConcurrentMark::cancel() {
// Clean up marking stacks.
ShenandoahObjToScanQueueSet* queues = task_queues();
queues->clear();
// Cancel SATB buffers.
ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
}
ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
return _task_queues->queue(worker_id);
}
template <bool CANCELLABLE>
void ShenandoahConcurrentMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor* rp,
bool strdedup) {
ShenandoahObjToScanQueue* q = get_queue(w);
ShenandoahLiveData* ld = _heap->get_liveness_cache(w);
// TODO: We can clean up this if we figure out how to do templated oop closures that
// play nice with specialized_oop_iterators.
if (_heap->unload_classes()) {
if (_heap->has_forwarded_objects()) {
if (strdedup) {
ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp);
mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
} else {
ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
}
} else {
if (strdedup) {
ShenandoahMarkRefsMetadataDedupClosure cl(q, rp);
mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
} else {
ShenandoahMarkRefsMetadataClosure cl(q, rp);
mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
}
}
} else {
if (_heap->has_forwarded_objects()) {
if (strdedup) {
ShenandoahMarkUpdateRefsDedupClosure cl(q, rp);
mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
} else {
ShenandoahMarkUpdateRefsClosure cl(q, rp);
mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
}
} else {
if (strdedup) {
ShenandoahMarkRefsDedupClosure cl(q, rp);
mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
} else {
ShenandoahMarkRefsClosure cl(q, rp);
mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
}
}
}
_heap->flush_liveness_cache(w);
}
template <class T, bool CANCELLABLE>
void ShenandoahConcurrentMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator) {
uintx stride = ShenandoahMarkLoopStride;
ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahObjToScanQueueSet* queues = task_queues();
ShenandoahObjToScanQueue* q;
ShenandoahMarkTask t;
_heap->ref_processor()->set_mark_closure(worker_id, cl);
/*
* Process outstanding queues, if any.
*
* There can be more queues than workers. To deal with the imbalance, we claim
* extra queues first. Since marking can push new tasks into the queue associated
* with this worker id, we come back to process this queue in the normal loop.
*/
assert(queues->get_reserved() == heap->workers()->active_workers(),
"Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
q = queues->claim_next();
while (q != NULL) {
if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
return;
}
for (uint i = 0; i < stride; i++) {
if (q->pop(t)) {
do_task<T>(q, cl, live_data, &t);
} else {
assert(q->is_empty(), "Must be empty");
q = queues->claim_next();
break;
}
}
}
q = get_queue(worker_id);
ShenandoahSATBBufferClosure drain_satb(q);
SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
/*
* Normal marking loop:
*/
while (true) {
if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
return;
}
while (satb_mq_set.completed_buffers_num() > 0) {
satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
}
uint work = 0;
for (uint i = 0; i < stride; i++) {
if (q->pop(t) ||
queues->steal(worker_id, t)) {
do_task<T>(q, cl, live_data, &t);
work++;
} else {
break;
}
}
if (work == 0) {
// No work encountered in current stride, try to terminate.
// Need to leave the STS here otherwise it might block safepoints.
ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
ShenandoahTerminatorTerminator tt(heap);
if (terminator->offer_termination(&tt)) return;
}
}
clear();
ShenandoahReferenceProcessor* rp = ShenandoahHeap::heap()->ref_processor();
rp->abandon_partial_discovery();
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
* Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,69 +27,38 @@
#include "gc/shared/taskqueue.hpp"
#include "gc/shared/taskTerminator.hpp"
#include "gc/shenandoah/shenandoahMark.hpp"
#include "gc/shenandoah/shenandoahOopClosures.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "gc/shenandoah/shenandoahTaskqueue.hpp"
class ShenandoahStrDedupQueue;
class ShenandoahReferenceProcessor;
class ShenandoahConcurrentMark: public CHeapObj<mtGC> {
class ShenandoahConcurrentMark: public ShenandoahMark {
friend class ShenandoahConcurrentMarkingTask;
friend class ShenandoahFinalMarkingTask;
public:
ShenandoahConcurrentMark();
// When concurrent stack processing is not supported
void mark_stw_roots();
void mark_concurrent_roots();
// Concurrent mark
void concurrent_mark();
// Finish mark at a safepoint
void finish_mark();
static void cancel();
// TODO: where to put them
static void update_roots(ShenandoahPhaseTimings::Phase root_phase);
static void update_thread_roots(ShenandoahPhaseTimings::Phase root_phase);
private:
ShenandoahHeap* _heap;
ShenandoahObjToScanQueueSet* _task_queues;
public:
void initialize(uint workers);
void cancel();
// ---------- Marking loop and tasks
//
private:
template <class T>
inline void do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, ShenandoahMarkTask* task);
template <class T>
inline void do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop array, bool weak);
template <class T>
inline void do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop array, int chunk, int pow, bool weak);
inline void count_liveness(ShenandoahLiveData* live_data, oop obj);
template <class T, bool CANCELLABLE>
void mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *t);
template <bool CANCELLABLE>
void mark_loop_prework(uint worker_id, TaskTerminator *terminator, ShenandoahReferenceProcessor* rp, bool strdedup);
public:
void mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor* rp,
bool cancellable, bool strdedup) {
if (cancellable) {
mark_loop_prework<true>(worker_id, terminator, rp, strdedup);
} else {
mark_loop_prework<false>(worker_id, terminator, rp, strdedup);
}
}
template<class T, UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP>
static inline void mark_through_ref(T* p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context, bool weak);
void mark_from_roots();
void finish_mark_from_roots(bool full_gc);
void mark_roots(ShenandoahPhaseTimings::Phase root_phase);
void update_roots(ShenandoahPhaseTimings::Phase root_phase);
void update_thread_roots(ShenandoahPhaseTimings::Phase root_phase);
// ---------- Helpers
// Used from closures, need to be public
//
public:
ShenandoahObjToScanQueue* get_queue(uint worker_id);
ShenandoahObjToScanQueueSet* task_queues() { return _task_queues; }
void finish_mark_work();
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
* Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,12 +24,13 @@
#include "precompiled.hpp"
#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
#include "gc/shenandoah/shenandoahConcurrentMark.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahControlThread.hpp"
#include "gc/shenandoah/shenandoahFreeSet.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahMark.inline.hpp"
#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
@ -397,6 +398,10 @@ void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cau
// Start initial mark under STW
heap->vmop_entry_init_mark();
// Concurrent mark roots
heap->entry_mark_roots();
if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return;
// Continue concurrent mark
heap->entry_mark();
if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return;

View file

@ -38,7 +38,7 @@
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahCollectionSet.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
#include "gc/shenandoah/shenandoahConcurrentMark.hpp"
#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
#include "gc/shenandoah/shenandoahControlThread.hpp"
#include "gc/shenandoah/shenandoahFreeSet.hpp"
@ -59,7 +59,7 @@
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
#include "gc/shenandoah/shenandoahStringDedup.hpp"
#include "gc/shenandoah/shenandoahTaskqueue.hpp"
#include "gc/shenandoah/shenandoahSTWMark.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "gc/shenandoah/shenandoahVerifier.hpp"
#include "gc/shenandoah/shenandoahCodeRoots.hpp"
@ -249,7 +249,7 @@ jint ShenandoahHeap::initialize() {
"Cannot commit bitmap memory");
}
_marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
_marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
if (ShenandoahVerify) {
ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
@ -464,8 +464,6 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
_shenandoah_policy(policy),
_heuristics(NULL),
_free_set(NULL),
_scm(new ShenandoahConcurrentMark()),
_full_gc(new ShenandoahMarkCompact()),
_pacer(NULL),
_verifier(NULL),
_phase_timings(NULL),
@ -613,9 +611,6 @@ void ShenandoahHeap::post_initialize() {
// Now, we will let WorkGang to initialize gclab when new worker is created.
_workers->set_initialize_gclab();
_scm->initialize(_max_workers);
_full_gc->initialize(_gc_timer);
_heuristics->initialize();
JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
@ -1606,10 +1601,16 @@ void ShenandoahHeap::op_init_mark() {
parallel_heap_region_iterate(&cl);
}
// Weak reference processing
ShenandoahReferenceProcessor* rp = ref_processor();
rp->reset_thread_locals();
rp->set_soft_reference_policy(soft_ref_policy()->should_clear_all_soft_refs());
// Make above changes visible to worker threads
OrderAccess::fence();
concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots);
ShenandoahConcurrentMark mark;
mark.mark_stw_roots();
if (ShenandoahPacing) {
pacer()->setup_for_mark();
@ -1623,8 +1624,14 @@ void ShenandoahHeap::op_init_mark() {
}
}
void ShenandoahHeap::op_mark_roots() {
ShenandoahConcurrentMark mark;
mark.mark_concurrent_roots();
}
void ShenandoahHeap::op_mark() {
concurrent_mark()->mark_from_roots();
ShenandoahConcurrentMark mark;
mark.concurrent_mark();
}
class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
@ -1678,17 +1685,75 @@ void ShenandoahHeap::op_final_mark() {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
assert(!has_forwarded_objects(), "No forwarded objects on this path");
// It is critical that we
// evacuate roots right after finishing marking, so that we don't
// get unmarked objects in the roots.
if (!cancelled_gc()) {
concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false);
finish_mark();
prepare_evacuation();
} else {
// If this cycle was updating references, we need to keep the has_forwarded_objects
// flag on, for subsequent phases to deal with it.
ShenandoahConcurrentMark::cancel();
set_concurrent_mark_in_progress(false);
}
}
void ShenandoahHeap::op_conc_evac() {
ShenandoahEvacuationTask task(this, _collection_set, true);
workers()->run_task(&task);
}
class ShenandoahUpdateThreadClosure : public HandshakeClosure {
private:
ShenandoahUpdateRefsClosure _cl;
public:
ShenandoahUpdateThreadClosure();
void do_thread(Thread* thread);
};
ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
HandshakeClosure("Shenandoah Update Thread Roots") {
}
void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
if (thread->is_Java_thread()) {
JavaThread* jt = thread->as_Java_thread();
ResourceMark rm;
jt->oops_do(&_cl, NULL);
}
}
void ShenandoahHeap::op_update_thread_roots() {
ShenandoahUpdateThreadClosure cl;
Handshake::execute(&cl);
}
void ShenandoahHeap::op_stw_evac() {
ShenandoahEvacuationTask task(this, _collection_set, false);
workers()->run_task(&task);
}
void ShenandoahHeap::op_updaterefs() {
update_heap_references(true);
}
void ShenandoahHeap::op_cleanup_early() {
free_set()->recycle_trash();
}
void ShenandoahHeap::op_cleanup_complete() {
free_set()->recycle_trash();
}
// Helpers
void ShenandoahHeap::finish_mark() {
assert(!cancelled_gc(), "Should not continue");
ShenandoahConcurrentMark mark;
mark.finish_mark();
// Marking is completed, deactivate SATB barrier
set_concurrent_mark_in_progress(false);
mark_complete_marking_context();
}
void ShenandoahHeap::prepare_evacuation() {
// Notify JVMTI that the tagmap table will need cleaning.
JvmtiTagMap::set_needs_cleaning();
@ -1765,7 +1830,7 @@ void ShenandoahHeap::op_final_mark() {
if (ShenandoahVerify) {
// If OOM while evacuating/updating of roots, there is no guarantee of their consistencies
if (!cancelled_gc()) {
// We only evacuate/update thread roots at this pause
// We only evacuate/update thread at this pause
verifier()->verify_roots_no_forwarded(ShenandoahRootVerifier::ThreadRoots);
}
verifier()->verify_during_evacuation();
@ -1779,64 +1844,6 @@ void ShenandoahHeap::op_final_mark() {
Universe::verify();
}
}
} else {
// If this cycle was updating references, we need to keep the has_forwarded_objects
// flag on, for subsequent phases to deal with it.
concurrent_mark()->cancel();
set_concurrent_mark_in_progress(false);
// Abandon reference processing right away: pre-cleaning must have failed.
ShenandoahReferenceProcessor* rp = ref_processor();
rp->abandon_partial_discovery();
}
}
void ShenandoahHeap::op_conc_evac() {
ShenandoahEvacuationTask task(this, _collection_set, true);
workers()->run_task(&task);
}
class ShenandoahUpdateThreadClosure : public HandshakeClosure {
private:
ShenandoahUpdateRefsClosure _cl;
public:
ShenandoahUpdateThreadClosure();
void do_thread(Thread* thread);
};
ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
HandshakeClosure("Shenandoah Update Thread Roots") {
}
void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
if (thread->is_Java_thread()) {
JavaThread* jt = thread->as_Java_thread();
ResourceMark rm;
jt->oops_do(&_cl, NULL);
}
}
void ShenandoahHeap::op_update_thread_roots() {
ShenandoahUpdateThreadClosure cl;
Handshake::execute(&cl);
}
void ShenandoahHeap::op_stw_evac() {
ShenandoahEvacuationTask task(this, _collection_set, false);
workers()->run_task(&task);
}
void ShenandoahHeap::op_updaterefs() {
update_heap_references(true);
}
void ShenandoahHeap::op_cleanup_early() {
free_set()->recycle_trash();
}
void ShenandoahHeap::op_cleanup_complete() {
free_set()->recycle_trash();
}
class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
@ -2142,7 +2149,9 @@ void ShenandoahHeap::op_full(GCCause::Cause cause) {
ShenandoahMetricsSnapshot metrics;
metrics.snap_before();
full_gc()->do_it(cause);
ShenandoahMarkCompact full_gc;
full_gc.initialize(_gc_timer);
full_gc.do_it(cause);
metrics.snap_after();
@ -2179,18 +2188,30 @@ void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
//
// Note that we can only do this for "outside-cycle" degens, otherwise we would risk
// changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
// Degenerated from concurrent mark roots, reset for STW mark
if (is_concurrent_mark_in_progress()) {
ShenandoahConcurrentMark::cancel();
set_concurrent_mark_in_progress(false);
}
set_unload_classes(heuristics()->can_unload_classes());
op_reset();
op_init_mark();
if (cancelled_gc()) {
op_degenerated_fail();
return;
// STW root scan
{
assert(!has_forwarded_objects(), "Should not have forwarded heap");
ShenandoahSTWMark mark(false /*full_gc*/);
mark.mark();
assert(!cancelled_gc(), "STW mark can not OOM");
}
case _degenerated_mark:
op_final_mark();
if (point == _degenerated_mark) {
finish_mark();
}
prepare_evacuation();
if (cancelled_gc()) {
op_degenerated_fail();
return;
@ -2787,7 +2808,7 @@ void ShenandoahHeap::op_final_updaterefs() {
}
if (is_degenerated_gc_in_progress()) {
concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);
ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);
}
// Has to be done before cset is clear
@ -3039,6 +3060,21 @@ void ShenandoahHeap::entry_degenerated(int point) {
set_degenerated_gc_in_progress(false);
}
void ShenandoahHeap::entry_mark_roots() {
TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
const char* msg = "Concurrent marking roots";
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
EventMark em("%s", msg);
ShenandoahWorkerScope scope(workers(),
ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
"concurrent marking roots");
try_inject_alloc_failure();
op_mark_roots();
}
void ShenandoahHeap::entry_mark() {
TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
* Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,7 +48,6 @@ class ShenandoahGCSession;
class ShenandoahGCStateResetter;
class ShenandoahHeuristics;
class ShenandoahMarkingContext;
class ShenandoahMarkCompact;
class ShenandoahMode;
class ShenandoahPhaseTimings;
class ShenandoahHeap;
@ -56,11 +55,9 @@ class ShenandoahHeapRegion;
class ShenandoahHeapRegionClosure;
class ShenandoahCollectionSet;
class ShenandoahFreeSet;
class ShenandoahConcurrentMark;
class ShenandoahMarkCompact;
class ShenandoahMonitoringSupport;
class ShenandoahReferenceProcessor;
class ShenandoahPacer;
class ShenandoahReferenceProcessor;
class ShenandoahVerifier;
class ShenandoahWorkGang;
class VMStructs;
@ -389,6 +386,7 @@ public:
// Entry methods to normally concurrent GC operations. These set up logging, monitoring
// for concurrent operation.
void entry_reset();
void entry_mark_roots();
void entry_mark();
void entry_weak_refs();
void entry_weak_roots();
@ -414,6 +412,7 @@ private:
void op_degenerated_futile();
void op_reset();
void op_mark_roots();
void op_mark();
void op_weak_refs();
void op_weak_roots();
@ -437,30 +436,31 @@ private:
const char* conc_mark_event_message() const;
const char* degen_event_message(ShenandoahDegenPoint point) const;
// Helpers
void finish_mark();
void prepare_evacuation();
// ---------- GC subsystems
//
// Mark support
private:
ShenandoahControlThread* _control_thread;
ShenandoahCollectorPolicy* _shenandoah_policy;
ShenandoahMode* _gc_mode;
ShenandoahHeuristics* _heuristics;
ShenandoahFreeSet* _free_set;
ShenandoahConcurrentMark* _scm;
ShenandoahMarkCompact* _full_gc;
ShenandoahPacer* _pacer;
ShenandoahVerifier* _verifier;
ShenandoahPhaseTimings* _phase_timings;
ShenandoahControlThread* control_thread() { return _control_thread; }
ShenandoahMarkCompact* full_gc() { return _full_gc; }
public:
ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
ShenandoahMode* mode() const { return _gc_mode; }
ShenandoahHeuristics* heuristics() const { return _heuristics; }
ShenandoahFreeSet* free_set() const { return _free_set; }
ShenandoahConcurrentMark* concurrent_mark() { return _scm; }
ShenandoahPacer* pacer() const { return _pacer; }
ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; }

View file

@ -0,0 +1,200 @@
/*
* Copyright (c) 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shared/gcTrace.hpp"
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahMark.inline.hpp"
#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "gc/shenandoah/shenandoahVerifier.hpp"
#include "memory/iterator.inline.hpp"
ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
MetadataVisitingOopIterateClosure(rp),
_queue(q),
_heap(ShenandoahHeap::heap()),
_mark_context(_heap->marking_context()),
_weak(false)
{ }
ShenandoahInitMarkRootsClosure::ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
_queue(q),
_heap(ShenandoahHeap::heap()),
_mark_context(_heap->marking_context()) {
}
ShenandoahMark::ShenandoahMark() :
_task_queues(ShenandoahHeap::heap()->marking_context()->task_queues()) {
}
void ShenandoahMark::clear() {
// Clean up marking stacks.
ShenandoahObjToScanQueueSet* queues = ShenandoahHeap::heap()->marking_context()->task_queues();
queues->clear();
// Cancel SATB buffers.
ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
}
template <bool CANCELLABLE>
void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp,
bool strdedup) {
ShenandoahObjToScanQueue* q = get_queue(w);
ShenandoahHeap* const heap = ShenandoahHeap::heap();
ShenandoahLiveData* ld = heap->get_liveness_cache(w);
// TODO: We can clean up this if we figure out how to do templated oop closures that
// play nice with specialized_oop_iterators.
if (heap->unload_classes()) {
if (heap->has_forwarded_objects()) {
if (strdedup) {
ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp);
mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
} else {
ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
}
} else {
if (strdedup) {
ShenandoahMarkRefsMetadataDedupClosure cl(q, rp);
mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
} else {
ShenandoahMarkRefsMetadataClosure cl(q, rp);
mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
}
}
} else {
if (heap->has_forwarded_objects()) {
if (strdedup) {
ShenandoahMarkUpdateRefsDedupClosure cl(q, rp);
mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
} else {
ShenandoahMarkUpdateRefsClosure cl(q, rp);
mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
}
} else {
if (strdedup) {
ShenandoahMarkRefsDedupClosure cl(q, rp);
mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
} else {
ShenandoahMarkRefsClosure cl(q, rp);
mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
}
}
}
heap->flush_liveness_cache(w);
}
template <class T, bool CANCELLABLE>
void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator) {
uintx stride = ShenandoahMarkLoopStride;
ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahObjToScanQueueSet* queues = task_queues();
ShenandoahObjToScanQueue* q;
ShenandoahMarkTask t;
heap->ref_processor()->set_mark_closure(worker_id, cl);
/*
* Process outstanding queues, if any.
*
* There can be more queues than workers. To deal with the imbalance, we claim
* extra queues first. Since marking can push new tasks into the queue associated
* with this worker id, we come back to process this queue in the normal loop.
*/
assert(queues->get_reserved() == heap->workers()->active_workers(),
"Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
q = queues->claim_next();
while (q != NULL) {
if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
return;
}
for (uint i = 0; i < stride; i++) {
if (q->pop(t)) {
do_task<T>(q, cl, live_data, &t);
} else {
assert(q->is_empty(), "Must be empty");
q = queues->claim_next();
break;
}
}
}
q = get_queue(worker_id);
ShenandoahSATBBufferClosure drain_satb(q);
SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
/*
* Normal marking loop:
*/
while (true) {
if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
return;
}
while (satb_mq_set.completed_buffers_num() > 0) {
satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
}
uint work = 0;
for (uint i = 0; i < stride; i++) {
if (q->pop(t) ||
queues->steal(worker_id, t)) {
do_task<T>(q, cl, live_data, &t);
work++;
} else {
break;
}
}
if (work == 0) {
// No work encountered in current stride, try to terminate.
// Need to leave the STS here otherwise it might block safepoints.
ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
ShenandoahTerminatorTerminator tt(heap);
if (terminator->offer_termination(&tt)) return;
}
}
}
void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
bool cancellable, bool strdedup) {
if (cancellable) {
mark_loop_prework<true>(worker_id, terminator, rp, strdedup);
} else {
mark_loop_prework<false>(worker_id, terminator, rp, strdedup);
}
}

View file

@ -0,0 +1,98 @@
/*
* Copyright (c) 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHMARK_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHMARK_HPP
#include "gc/shared/taskTerminator.hpp"
#include "gc/shenandoah/shenandoahOopClosures.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "gc/shenandoah/shenandoahTaskqueue.hpp"
class ShenandoahCMDrainMarkingStackClosure;
class ShenandoahInitMarkRootsClosure : public OopClosure {
private:
ShenandoahObjToScanQueue* const _queue;
ShenandoahHeap* const _heap;
ShenandoahMarkingContext* const _mark_context;
template <class T>
inline void do_oop_work(T* p);
public:
ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q);
void do_oop(narrowOop* p) { do_oop_work(p); }
void do_oop(oop* p) { do_oop_work(p); }
};
// Base class for mark
// Mark class does not maintain states. Instead, mark states are
// maintained by task queues, mark bitmap and SATB buffers (concurrent mark)
class ShenandoahMark: public StackObj {
friend class ShenandoahCMDrainMarkingStackClosure;
protected:
ShenandoahObjToScanQueueSet* const _task_queues;
protected:
ShenandoahMark();
public:
template<class T, UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP>
static inline void mark_through_ref(T* p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context, bool weak);
static void clear();
// Helpers
inline ShenandoahObjToScanQueueSet* task_queues() const;
inline ShenandoahObjToScanQueue* get_queue(uint index) const;
// ---------- Marking loop and tasks
private:
template <class T>
inline void do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, ShenandoahMarkTask* task);
template <class T>
inline void do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop array, bool weak);
template <class T>
inline void do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop array, int chunk, int pow, bool weak);
inline void count_liveness(ShenandoahLiveData* live_data, oop obj);
template <class T, bool CANCELLABLE>
void mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *t);
template <bool CANCELLABLE>
void mark_loop_prework(uint worker_id, TaskTerminator *terminator, ShenandoahReferenceProcessor *rp, bool strdedup);
protected:
void mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
bool cancellable, bool strdedup);
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHMARK_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved.
* Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,13 +22,13 @@
*
*/
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
#include "gc/shenandoah/shenandoahAsserts.hpp"
#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
#include "gc/shenandoah/shenandoahConcurrentMark.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahMark.hpp"
#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
#include "gc/shenandoah/shenandoahStringDedup.inline.hpp"
#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
@ -39,12 +39,17 @@
#include "utilities/powerOfTwo.hpp"
template <class T>
void ShenandoahConcurrentMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, ShenandoahMarkTask* task) {
void ShenandoahInitMarkRootsClosure::do_oop_work(T* p) {
ShenandoahMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context, false);
}
template <class T>
void ShenandoahMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, ShenandoahMarkTask* task) {
oop obj = task->obj();
shenandoah_assert_not_forwarded(NULL, obj);
shenandoah_assert_marked(NULL, obj);
shenandoah_assert_not_in_cset_except(NULL, obj, _heap->cancelled_gc());
shenandoah_assert_not_in_cset_except(NULL, obj, ShenandoahHeap::heap()->cancelled_gc());
// Are we in weak subgraph scan?
bool weak = task->is_weak();
@ -77,9 +82,10 @@ void ShenandoahConcurrentMark::do_task(ShenandoahObjToScanQueue* q, T* cl, Shena
}
}
inline void ShenandoahConcurrentMark::count_liveness(ShenandoahLiveData* live_data, oop obj) {
size_t region_idx = _heap->heap_region_index_containing(obj);
ShenandoahHeapRegion* region = _heap->get_region(region_idx);
inline void ShenandoahMark::count_liveness(ShenandoahLiveData* live_data, oop obj) {
ShenandoahHeap* const heap = ShenandoahHeap::heap();
size_t region_idx = heap->heap_region_index_containing(obj);
ShenandoahHeapRegion* region = heap->get_region(region_idx);
size_t size = obj->size();
if (!region->is_humongous_start()) {
@ -99,7 +105,7 @@ inline void ShenandoahConcurrentMark::count_liveness(ShenandoahLiveData* live_da
size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
for (size_t i = region_idx; i < region_idx + num_regions; i++) {
ShenandoahHeapRegion* chain_reg = _heap->get_region(i);
ShenandoahHeapRegion* chain_reg = heap->get_region(i);
assert(chain_reg->is_humongous(), "Expecting a humongous region");
chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize);
}
@ -107,7 +113,7 @@ inline void ShenandoahConcurrentMark::count_liveness(ShenandoahLiveData* live_da
}
template <class T>
inline void ShenandoahConcurrentMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj, bool weak) {
inline void ShenandoahMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj, bool weak) {
assert(obj->is_objArray(), "expect object array");
objArrayOop array = objArrayOop(obj);
int len = array->length();
@ -174,7 +180,7 @@ inline void ShenandoahConcurrentMark::do_chunked_array_start(ShenandoahObjToScan
}
template <class T>
inline void ShenandoahConcurrentMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop obj, int chunk, int pow, bool weak) {
inline void ShenandoahMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop obj, int chunk, int pow, bool weak) {
assert(obj->is_objArray(), "expect object array");
objArrayOop array = objArrayOop(obj);
@ -229,13 +235,13 @@ public:
void do_buffer_impl(void **buffer, size_t size) {
for (size_t i = 0; i < size; ++i) {
oop *p = (oop *) &buffer[i];
ShenandoahConcurrentMark::mark_through_ref<oop, NONE, STRING_DEDUP>(p, _heap, _queue, _mark_context, false);
ShenandoahMark::mark_through_ref<oop, NONE, STRING_DEDUP>(p, _heap, _queue, _mark_context, false);
}
}
};
template<class T, UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP>
inline void ShenandoahConcurrentMark::mark_through_ref(T *p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context, bool weak) {
inline void ShenandoahMark::mark_through_ref(T *p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context, bool weak) {
T o = RawAccess<>::oop_load(p);
if (!CompressedOops::is_null(o)) {
oop obj = CompressedOops::decode_not_null(o);
@ -288,4 +294,11 @@ inline void ShenandoahConcurrentMark::mark_through_ref(T *p, ShenandoahHeap* hea
}
}
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP
ShenandoahObjToScanQueueSet* ShenandoahMark::task_queues() const {
return _task_queues;
}
ShenandoahObjToScanQueue* ShenandoahMark::get_queue(uint index) const {
return _task_queues->queue(index);
}
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2020, Red Hat, Inc. All rights reserved.
* Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,11 +28,12 @@
#include "gc/shared/preservedMarks.inline.hpp"
#include "gc/shared/tlab_globals.hpp"
#include "gc/shenandoah/shenandoahForwarding.inline.hpp"
#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
#include "gc/shenandoah/shenandoahConcurrentMark.hpp"
#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
#include "gc/shenandoah/shenandoahCollectionSet.hpp"
#include "gc/shenandoah/shenandoahFreeSet.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "gc/shenandoah/shenandoahMark.inline.hpp"
#include "gc/shenandoah/shenandoahMarkCompact.hpp"
#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
@ -40,6 +41,7 @@
#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
#include "gc/shenandoah/shenandoahSTWMark.hpp"
#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "gc/shenandoah/shenandoahVerifier.hpp"
@ -114,14 +116,14 @@ void ShenandoahMarkCompact::do_it(GCCause::Cause gc_cause) {
// b. Cancel concurrent mark, if in progress
if (heap->is_concurrent_mark_in_progress()) {
heap->concurrent_mark()->cancel();
ShenandoahConcurrentMark::cancel();
heap->set_concurrent_mark_in_progress(false);
}
assert(!heap->is_concurrent_mark_in_progress(), "sanity");
// c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
if (has_forwarded_objects) {
heap->concurrent_mark()->update_roots(ShenandoahPhaseTimings::full_gc_update_roots);
ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::full_gc_update_roots);
}
// d. Reset the bitmaps for new marking
@ -238,17 +240,14 @@ void ShenandoahMarkCompact::phase1_mark_heap() {
ShenandoahPrepareForMarkClosure cl;
heap->heap_region_iterate(&cl);
ShenandoahConcurrentMark* cm = heap->concurrent_mark();
heap->set_unload_classes(heap->heuristics()->can_unload_classes());
ShenandoahReferenceProcessor* rp = heap->ref_processor();
// enable ("weak") refs discovery
rp->set_soft_reference_policy(true); // forcefully purge all soft references
cm->mark_roots(ShenandoahPhaseTimings::full_gc_scan_roots);
cm->finish_mark_from_roots(/* full_gc = */ true);
heap->mark_complete_marking_context();
ShenandoahSTWMark mark(true /*full_gc*/);
mark.mark();
heap->parallel_cleaning(true /* full_gc */);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2019, Red Hat, Inc. All rights reserved.
* Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,7 +52,7 @@
class PreservedMarksSet;
class ShenandoahMarkCompact : public CHeapObj<mtGC> {
class ShenandoahMarkCompact : public StackObj {
friend class ShenandoahPrepareForCompactionObjectClosure;
private:
GCTimer* _gc_timer;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Red Hat, Inc. All rights reserved.
* Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,13 +27,30 @@
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
#include "gc/shenandoah/shenandoahMarkingContext.hpp"
#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
#include "utilities/stack.inline.hpp"
ShenandoahMarkingContext::ShenandoahMarkingContext(MemRegion heap_region, MemRegion bitmap_region, size_t num_regions) :
ShenandoahMarkingContext::ShenandoahMarkingContext(MemRegion heap_region, MemRegion bitmap_region, size_t num_regions, uint max_queues) :
_mark_bit_map(heap_region, bitmap_region),
_top_bitmaps(NEW_C_HEAP_ARRAY(HeapWord*, num_regions, mtGC)),
_top_at_mark_starts_base(NEW_C_HEAP_ARRAY(HeapWord*, num_regions, mtGC)),
_top_at_mark_starts(_top_at_mark_starts_base -
((uintx) heap_region.start() >> ShenandoahHeapRegion::region_size_bytes_shift())) {
((uintx) heap_region.start() >> ShenandoahHeapRegion::region_size_bytes_shift())),
_task_queues(new ShenandoahObjToScanQueueSet(max_queues)) {
assert(max_queues > 0, "At least one queue");
for (uint i = 0; i < max_queues; ++i) {
ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
task_queue->initialize();
_task_queues->register_queue(i, task_queue);
}
}
ShenandoahMarkingContext::~ShenandoahMarkingContext() {
for (uint i = 0; i < _task_queues->size(); ++i) {
ShenandoahObjToScanQueue* q = _task_queues->queue(i);
delete q;
}
delete _task_queues;
}
bool ShenandoahMarkingContext::is_bitmap_clear() const {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
* Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,11 +31,14 @@
#include "memory/memRegion.hpp"
#include "oops/oopsHierarchy.hpp"
class ShenandoahObjToScanQueueSet;
/**
* Encapsulate a marking bitmap with the top-at-mark-start and top-bitmaps array.
*/
class ShenandoahMarkingContext : public CHeapObj<mtGC> {
private:
// Marking bitmap
ShenandoahMarkBitMap _mark_bit_map;
HeapWord** const _top_bitmaps;
@ -44,8 +47,12 @@ private:
ShenandoahSharedFlag _is_complete;
// Marking task queues
ShenandoahObjToScanQueueSet* _task_queues;
public:
ShenandoahMarkingContext(MemRegion heap_region, MemRegion bitmap_region, size_t num_regions);
ShenandoahMarkingContext(MemRegion heap_region, MemRegion bitmap_region, size_t num_regions, uint max_queues);
~ShenandoahMarkingContext();
/*
* Marks the object. Returns true if the object has not been marked before and has
@ -80,6 +87,8 @@ public:
void mark_complete();
void mark_incomplete();
// Task queues
ShenandoahObjToScanQueueSet* task_queues() const { return _task_queues; }
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
* Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,11 +26,11 @@
#define SHARE_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_INLINE_HPP
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
#include "gc/shenandoah/shenandoahMark.inline.hpp"
template<class T, UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP>
inline void ShenandoahMarkRefsSuperClosure::work(T *p) {
ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, STRING_DEDUP>(p, _heap, _queue, _mark_context, _weak);
ShenandoahMark::mark_through_ref<T, UPDATE_REFS, STRING_DEDUP>(p, _heap, _queue, _mark_context, _weak);
}
template <class T>

View file

@ -100,13 +100,13 @@ bool ShenandoahPhaseTimings::is_worker_phase(Phase phase) {
case scan_roots:
case update_roots:
case final_update_refs_roots:
case full_gc_scan_roots:
case full_gc_mark:
case full_gc_update_roots:
case full_gc_adjust_roots:
case degen_gc_scan_conc_roots:
case degen_gc_stw_mark:
case degen_gc_mark:
case degen_gc_update_roots:
case full_gc_weakrefs:
case full_gc_scan_conc_roots:
case full_gc_purge_class_unload:
case full_gc_purge_weak_par:
case degen_gc_weakrefs:
@ -130,7 +130,7 @@ bool ShenandoahPhaseTimings::is_root_work_phase(Phase phase) {
case init_evac:
case final_update_refs_roots:
case degen_gc_update_roots:
case full_gc_scan_roots:
case full_gc_mark:
case full_gc_update_roots:
case full_gc_adjust_roots:
return true;

View file

@ -43,6 +43,7 @@ class outputStream;
f(CNT_PREFIX ## StringDedupTableRoots, DESC_PREFIX "Dedup Table Roots") \
f(CNT_PREFIX ## StringDedupQueueRoots, DESC_PREFIX "Dedup Queue Roots") \
f(CNT_PREFIX ## WeakRefProc, DESC_PREFIX "Weak References") \
f(CNT_PREFIX ## ParallelMark, DESC_PREFIX "Parallel Mark") \
// end
#define SHENANDOAH_PHASE_DO(f) \
@ -116,8 +117,10 @@ class outputStream;
\
f(degen_gc_gross, "Pause Degenerated GC (G)") \
f(degen_gc, "Pause Degenerated GC (N)") \
f(degen_gc_scan_conc_roots, " Degen Mark Roots") \
SHENANDOAH_PAR_PHASE_DO(degen_gc_conc_mark_, " DM: ", f) \
f(degen_gc_stw_mark, " Degen STW Mark") \
SHENANDOAH_PAR_PHASE_DO(degen_gc_stw_mark_, " DSM: ", f) \
f(degen_gc_mark, " Degen Mark") \
SHENANDOAH_PAR_PHASE_DO(degen_gc_mark_, " DM: ", f) \
f(degen_gc_weakrefs, " Weak References") \
SHENANDOAH_PAR_PHASE_DO(degen_gc_weakrefs_p_, " WRP: ", f) \
f(degen_gc_purge, " System Purge") \
@ -135,12 +138,8 @@ class outputStream;
f(full_gc_prepare, " Prepare") \
f(full_gc_update_roots, " Update Roots") \
SHENANDOAH_PAR_PHASE_DO(full_gc_update_roots_, " FU: ", f) \
f(full_gc_scan_roots, " Scan Roots") \
SHENANDOAH_PAR_PHASE_DO(full_gc_scan_roots_, " FS: ", f) \
f(full_gc_scan_conc_roots, " Scan Concurrent Roots") \
SHENANDOAH_PAR_PHASE_DO(full_gc_scan_conc_roots, " FCS: ", f) \
f(full_gc_mark, " Mark") \
f(full_gc_mark_finish_queues, " Finish Queues") \
SHENANDOAH_PAR_PHASE_DO(full_gc_mark_, " FM: ", f) \
f(full_gc_weakrefs, " Weak References") \
SHENANDOAH_PAR_PHASE_DO(full_gc_weakrefs_p_, " WRP: ", f) \
f(full_gc_purge, " System Purge") \

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved.
* Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -131,7 +131,6 @@ ShenandoahRootProcessor::ShenandoahRootProcessor(ShenandoahPhaseTimings::Phase p
_heap(ShenandoahHeap::heap()),
_phase(phase),
_worker_phase(phase) {
assert(SafepointSynchronize::is_at_safepoint(), "Must at safepoint");
}
ShenandoahRootScanner::ShenandoahRootScanner(uint n_workers, ShenandoahPhaseTimings::Phase phase) :
@ -157,6 +156,53 @@ void ShenandoahRootScanner::roots_do(uint worker_id, OopClosure* oops, CodeBlobC
_thread_roots.threads_do(&tc_cl, worker_id);
}
ShenandoahSTWRootScanner::ShenandoahSTWRootScanner(ShenandoahPhaseTimings::Phase phase) :
ShenandoahRootProcessor(phase),
_thread_roots(phase, ShenandoahHeap::heap()->workers()->active_workers() > 1),
_code_roots(phase),
_cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()),
_vm_roots(phase),
_dedup_roots(phase),
_unload_classes(ShenandoahHeap::heap()->unload_classes()) {
}
ShenandoahConcurrentRootScanner::ShenandoahConcurrentRootScanner(uint n_workers,
ShenandoahPhaseTimings::Phase phase) :
ShenandoahRootProcessor(phase),
_vm_roots(phase),
_cld_roots(phase, n_workers),
_codecache_snapshot(NULL),
_phase(phase) {
if (!ShenandoahHeap::heap()->unload_classes()) {
CodeCache_lock->lock_without_safepoint_check();
_codecache_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration();
}
assert(!ShenandoahHeap::heap()->has_forwarded_objects(), "Not expecting forwarded pointers during concurrent marking");
}
ShenandoahConcurrentRootScanner::~ShenandoahConcurrentRootScanner() {
if (!ShenandoahHeap::heap()->unload_classes()) {
ShenandoahCodeRoots::table()->finish_iteration(_codecache_snapshot);
CodeCache_lock->unlock();
}
}
void ShenandoahConcurrentRootScanner::roots_do(OopClosure* oops, uint worker_id) {
ShenandoahHeap* const heap = ShenandoahHeap::heap();
CLDToOopClosure clds_cl(oops, ClassLoaderData::_claim_strong);
_vm_roots.oops_do(oops, worker_id);
if (!heap->unload_classes()) {
AlwaysTrueClosure always_true;
_cld_roots.cld_do(&clds_cl, worker_id);
ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations);
_codecache_snapshot->parallel_blobs_do(&blobs);
} else {
_cld_roots.always_strong_cld_do(&clds_cl, worker_id);
}
}
ShenandoahRootEvacuator::ShenandoahRootEvacuator(uint n_workers,
ShenandoahPhaseTimings::Phase phase) :
ShenandoahRootProcessor(phase),

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved.
* Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -163,11 +163,28 @@ private:
void roots_do(uint worker_id, OopClosure* oops, CodeBlobClosure* code, ThreadClosure* tc = NULL);
};
template <bool CONCURRENT>
class ShenandoahConcurrentRootScanner {
// STW root scanner
class ShenandoahSTWRootScanner : public ShenandoahRootProcessor {
private:
ShenandoahVMRoots<CONCURRENT> _vm_roots;
ShenandoahClassLoaderDataRoots<CONCURRENT, false /* single-threaded*/>
ShenandoahThreadRoots _thread_roots;
ShenandoahCodeCacheRoots _code_roots;
ShenandoahClassLoaderDataRoots<false /*concurrent*/, false /* single_thread*/>
_cld_roots;
ShenandoahVMRoots<false /*concurrent*/>
_vm_roots;
ShenandoahStringDedupRoots _dedup_roots;
const bool _unload_classes;
public:
ShenandoahSTWRootScanner(ShenandoahPhaseTimings::Phase phase);
template <typename T>
void roots_do(T* oops, uint worker_id);
};
class ShenandoahConcurrentRootScanner : public ShenandoahRootProcessor {
private:
ShenandoahVMRoots<true /*concurrent*/> _vm_roots;
ShenandoahClassLoaderDataRoots<true /*concurrent*/, false /* single-threaded*/>
_cld_roots;
ShenandoahNMethodTableSnapshot* _codecache_snapshot;
ShenandoahPhaseTimings::Phase _phase;
@ -176,7 +193,7 @@ public:
ShenandoahConcurrentRootScanner(uint n_workers, ShenandoahPhaseTimings::Phase phase);
~ShenandoahConcurrentRootScanner();
void oops_do(OopClosure* oops, uint worker_id);
void roots_do(OopClosure* oops, uint worker_id);
};
// This scanner is only for SH::object_iteration() and only supports single-threaded

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2020, Red Hat, Inc. All rights reserved.
* Copyright (c) 2019, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -116,7 +116,6 @@ void ShenandoahClassLoaderDataRoots<CONCURRENT, SINGLE_THREADED>::cld_do_impl(Cl
}
}
template <bool CONCURRENT, bool SINGLE_THREADED>
void ShenandoahClassLoaderDataRoots<CONCURRENT, SINGLE_THREADED>::always_strong_cld_do(CLDClosure* clds, uint worker_id) {
cld_do_impl(&ClassLoaderDataGraph::always_strong_cld_do, clds, worker_id);
@ -144,48 +143,33 @@ public:
}
};
template <bool CONCURRENT>
ShenandoahConcurrentRootScanner<CONCURRENT>::ShenandoahConcurrentRootScanner(uint n_workers,
ShenandoahPhaseTimings::Phase phase) :
_vm_roots(phase),
_cld_roots(phase, n_workers),
_codecache_snapshot(NULL),
_phase(phase) {
if (!ShenandoahHeap::heap()->unload_classes()) {
if (CONCURRENT) {
CodeCache_lock->lock_without_safepoint_check();
// The rationale for selecting the roots to scan is as follows:
// a. With unload_classes = true, we only want to scan the actual strong roots from the
// code cache. This will allow us to identify the dead classes, unload them, *and*
// invalidate the relevant code cache blobs. This could be only done together with
// class unloading.
// b. With unload_classes = false, we have to nominally retain all the references from code
// cache, because there could be the case of embedded class/oop in the generated code,
// which we will never visit during mark. Without code cache invalidation, as in (a),
// we risk executing that code cache blob, and crashing.
template <typename T>
void ShenandoahSTWRootScanner::roots_do(T* oops, uint worker_id) {
MarkingCodeBlobClosure blobs_cl(oops, !CodeBlobToOopClosure::FixRelocations);
CLDToOopClosure clds(oops, ClassLoaderData::_claim_strong);
ResourceMark rm;
if (_unload_classes) {
_thread_roots.oops_do(oops, &blobs_cl, worker_id);
_cld_roots.always_strong_cld_do(&clds, worker_id);
} else {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
AlwaysTrueClosure always_true;
_thread_roots.oops_do(oops, NULL, worker_id);
_code_roots.code_blobs_do(&blobs_cl, worker_id);
_cld_roots.cld_do(&clds, worker_id);
_dedup_roots.oops_do(&always_true, oops, worker_id);
}
_codecache_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration();
}
assert(!CONCURRENT || !ShenandoahHeap::heap()->has_forwarded_objects(), "Not expecting forwarded pointers during concurrent marking");
}
template <bool CONCURRENT>
ShenandoahConcurrentRootScanner<CONCURRENT>::~ShenandoahConcurrentRootScanner() {
if (!ShenandoahHeap::heap()->unload_classes()) {
ShenandoahCodeRoots::table()->finish_iteration(_codecache_snapshot);
if (CONCURRENT) {
CodeCache_lock->unlock();
}
}
}
template <bool CONCURRENT>
void ShenandoahConcurrentRootScanner<CONCURRENT>::oops_do(OopClosure* oops, uint worker_id) {
ShenandoahHeap* const heap = ShenandoahHeap::heap();
CLDToOopClosure clds_cl(oops, CONCURRENT ? ClassLoaderData::_claim_strong : ClassLoaderData::_claim_none);
_vm_roots.oops_do(oops, worker_id);
if (!heap->unload_classes()) {
_cld_roots.cld_do(&clds_cl, worker_id);
ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations);
_codecache_snapshot->parallel_blobs_do(&blobs);
} else {
_cld_roots.always_strong_cld_do(&clds_cl, worker_id);
}
_vm_roots.oops_do<T>(oops, worker_id);
}
template <typename IsAlive, typename KeepAlive>

View file

@ -0,0 +1,112 @@
/*
* Copyright (c) 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "gc/shared/workgroup.hpp"
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahMark.inline.hpp"
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
#include "gc/shenandoah/shenandoahSTWMark.hpp"
#include "gc/shenandoah/shenandoahVerifier.hpp"
class ShenandoahSTWMarkTask : public AbstractGangTask {
private:
ShenandoahSTWMark* const _mark;
public:
ShenandoahSTWMarkTask(ShenandoahSTWMark* mark);
void work(uint worker_id);
};
ShenandoahSTWMarkTask::ShenandoahSTWMarkTask(ShenandoahSTWMark* mark) :
AbstractGangTask("Shenandoah STW mark"),
_mark(mark) {
}
void ShenandoahSTWMarkTask::work(uint worker_id) {
ShenandoahParallelWorkerSession worker_session(worker_id);
_mark->mark_roots(worker_id);
_mark->finish_mark(worker_id);
}
ShenandoahSTWMark::ShenandoahSTWMark(bool full_gc) :
ShenandoahMark(),
_root_scanner(full_gc ? ShenandoahPhaseTimings::full_gc_mark : ShenandoahPhaseTimings::degen_gc_stw_mark),
_terminator(ShenandoahHeap::heap()->workers()->active_workers(), ShenandoahHeap::heap()->marking_context()->task_queues()),
_full_gc(full_gc) {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a Shenandoah safepoint");
}
void ShenandoahSTWMark::mark() {
// Weak reference processing
ShenandoahHeap* const heap = ShenandoahHeap::heap();
ShenandoahReferenceProcessor* rp = heap->ref_processor();
rp->reset_thread_locals();
rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
// Init mark, do not expect forwarded pointers in roots
if (ShenandoahVerify) {
assert(Thread::current()->is_VM_thread(), "Must be");
heap->verifier()->verify_roots_no_forwarded();
}
uint nworkers = heap->workers()->active_workers();
task_queues()->reserve(nworkers);
{
// Mark
StrongRootsScope scope(nworkers);
ShenandoahSTWMarkTask task(this);
heap->workers()->run_task(&task);
assert(task_queues()->is_empty(), "Should be empty");
}
heap->mark_complete_marking_context();
assert(task_queues()->is_empty(), "Should be empty");
TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
}
void ShenandoahSTWMark::mark_roots(uint worker_id) {
ShenandoahInitMarkRootsClosure init_mark(task_queues()->queue(worker_id));
_root_scanner.roots_do(&init_mark, worker_id);
}
void ShenandoahSTWMark::finish_mark(uint worker_id) {
ShenandoahPhaseTimings::Phase phase = _full_gc ? ShenandoahPhaseTimings::full_gc_mark : ShenandoahPhaseTimings::degen_gc_stw_mark;
ShenandoahWorkerTimingsTracker timer(phase, ShenandoahPhaseTimings::ParallelMark, worker_id);
ShenandoahReferenceProcessor* rp = ShenandoahHeap::heap()->ref_processor();
mark_loop(worker_id, &_terminator, rp,
false, // not cancellable
ShenandoahStringDedup::is_enabled());
}

View file

@ -0,0 +1,51 @@
/*
* Copyright (c) 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHSTWMARK_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHSTWMARK_HPP
#include "gc/shared/taskTerminator.hpp"
#include "gc/shenandoah/shenandoahMark.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.hpp"
class ShenandoahSTWMarkTask;
class ShenandoahSTWMark : public ShenandoahMark {
friend class ShenandoahSTWMarkTask;
private:
ShenandoahSTWRootScanner _root_scanner;
TaskTerminator _terminator;
bool _full_gc;
public:
ShenandoahSTWMark(bool full_gc);
void mark();
private:
void mark_roots(uint worker_id);
void finish_mark(uint worker_id);
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHSTWMARK_HPP