8210064: ZGC: Introduce ZConcurrentRootsIterator for scanning a subset of strong IN_NATIVE roots concurrently

Reviewed-by: pliden, kbarrett
This commit is contained in:
Erik Österlund 2018-10-16 13:14:18 +02:00
parent 5f2b11d373
commit 782fa608d2
13 changed files with 146 additions and 49 deletions

View file

@ -231,14 +231,14 @@ ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_unsafe_anonymo
}
void ClassLoaderDataGraph::cld_do(CLDClosure* cl) {
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
assert_locked_or_safepoint_weak(ClassLoaderDataGraph_lock);
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->_next) {
cl->do_cld(cld);
}
}
void ClassLoaderDataGraph::cld_unloading_do(CLDClosure* cl) {
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
assert_locked_or_safepoint_weak(ClassLoaderDataGraph_lock);
// Only walk the head until any clds not purged from prior unloading
// (CMS doesn't purge right away).
for (ClassLoaderData* cld = _unloading; cld != _saved_unloading; cld = cld->next()) {
@ -248,7 +248,7 @@ void ClassLoaderDataGraph::cld_unloading_do(CLDClosure* cl) {
}
void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) {
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
assert_locked_or_safepoint_weak(ClassLoaderDataGraph_lock);
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->_next) {
CLDClosure* closure = cld->keep_alive() ? strong : weak;
if (closure != NULL) {
@ -258,7 +258,7 @@ void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) {
}
void ClassLoaderDataGraph::always_strong_cld_do(CLDClosure* cl) {
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
assert_locked_or_safepoint_weak(ClassLoaderDataGraph_lock);
if (ClassUnloading) {
roots_cld_do(cl, NULL);
} else {

View file

@ -23,6 +23,7 @@
#include "precompiled.hpp"
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/z/zCollectedHeap.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zHeap.inline.hpp"
@ -295,6 +296,14 @@ VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
reserved_region().start() + max_capacity_in_words);
}
void ZCollectedHeap::safepoint_synchronize_begin() {
SuspendibleThreadSet::synchronize();
}
void ZCollectedHeap::safepoint_synchronize_end() {
SuspendibleThreadSet::desynchronize();
}
void ZCollectedHeap::prepare_for_verify() {
// Does nothing
}

View file

@ -117,6 +117,9 @@ public:
virtual VirtualSpaceSummary create_heap_space_summary();
virtual void safepoint_synchronize_begin();
virtual void safepoint_synchronize_end();
virtual void print_on(outputStream* st) const;
virtual void print_on_error(outputStream* st) const;
virtual void print_extended_on(outputStream* st) const;

View file

@ -336,7 +336,7 @@ void ZDriver::run_gc_cycle(GCCause::Cause cause) {
// Phase 2: Concurrent Mark
{
ZStatTimer timer(ZPhaseConcurrentMark);
ZHeap::heap()->mark();
ZHeap::heap()->mark(true /* initial */);
}
// Phase 3: Pause Mark End
@ -345,7 +345,7 @@ void ZDriver::run_gc_cycle(GCCause::Cause cause) {
while (!vm_operation(&cl)) {
// Phase 3.5: Concurrent Mark Continue
ZStatTimer timer(ZPhaseConcurrentMarkContinue);
ZHeap::heap()->mark();
ZHeap::heap()->mark(false /* initial */);
}
}

View file

@ -296,8 +296,8 @@ void ZHeap::mark_start() {
ZStatHeap::set_at_mark_start(capacity(), used());
}
void ZHeap::mark() {
_mark.mark();
void ZHeap::mark(bool initial) {
_mark.mark(initial);
}
void ZHeap::mark_flush_and_free(Thread* thread) {

View file

@ -133,7 +133,7 @@ public:
bool is_object_strongly_live(uintptr_t addr) const;
template <bool finalizable, bool publish> void mark_object(uintptr_t addr);
void mark_start();
void mark();
void mark(bool initial);
void mark_flush_and_free(Thread* thread);
bool mark_end();

View file

@ -172,8 +172,10 @@ void ZHeapIterator::objects_do(ObjectClosure* cl) {
// this the user would have expected to see ObjectFree events for
// unreachable objects in the tag map.
ZRootsIterator roots;
ZConcurrentRootsIterator concurrent_roots(false /* marking */);
ZHeapIteratorRootOopClosure root_cl(this);
roots.oops_do(&root_cl, true /* visit_jvmti_weak_export */);
concurrent_roots.oops_do(&root_cl);
}
// Drain stack

View file

@ -615,6 +615,34 @@ void ZMark::work(uint64_t timeout_in_millis) {
stacks->free(&_allocator);
}
class ZMarkConcurrentRootsIteratorClosure : public ZRootsIteratorClosure {
public:
virtual void do_oop(oop* p) {
ZBarrier::mark_barrier_on_oop_field(p, false /* finalizable */);
}
virtual void do_oop(narrowOop* p) {
ShouldNotReachHere();
}
};
class ZMarkConcurrentRootsTask : public ZTask {
private:
ZConcurrentRootsIterator _roots;
ZMarkConcurrentRootsIteratorClosure _cl;
public:
ZMarkConcurrentRootsTask(ZMark* mark) :
ZTask("ZMarkConcurrentRootsTask"),
_roots(true /* marking */),
_cl() {}
virtual void work() {
_roots.oops_do(&_cl);
}
};
class ZMarkTask : public ZTask {
private:
ZMark* const _mark;
@ -637,7 +665,12 @@ public:
}
};
void ZMark::mark() {
void ZMark::mark(bool initial) {
if (initial) {
ZMarkConcurrentRootsTask task(this);
_workers->run_concurrent(&task);
}
ZMarkTask task(this);
_workers->run_concurrent(&task);
}

View file

@ -108,7 +108,7 @@ public:
template <bool finalizable, bool publish> void mark_object(uintptr_t addr);
void start();
void mark();
void mark(bool initial);
bool end();
void flush_and_free();

View file

@ -28,6 +28,7 @@
#include "code/codeCache.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/oopStorageParState.inline.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zNMethodTable.hpp"
#include "gc/z/zOopClosures.inline.hpp"
@ -52,16 +53,20 @@ static const ZStatSubPhase ZSubPhasePauseRootsSetup("Pause Roots Setup");
static const ZStatSubPhase ZSubPhasePauseRoots("Pause Roots");
static const ZStatSubPhase ZSubPhasePauseRootsTeardown("Pause Roots Teardown");
static const ZStatSubPhase ZSubPhasePauseRootsUniverse("Pause Roots Universe");
static const ZStatSubPhase ZSubPhasePauseRootsJNIHandles("Pause Roots JNIHandles");
static const ZStatSubPhase ZSubPhasePauseRootsObjectSynchronizer("Pause Roots ObjectSynchronizer");
static const ZStatSubPhase ZSubPhasePauseRootsManagement("Pause Roots Management");
static const ZStatSubPhase ZSubPhasePauseRootsJVMTIExport("Pause Roots JVMTIExport");
static const ZStatSubPhase ZSubPhasePauseRootsJVMTIWeakExport("Pause Roots JVMTIWeakExport");
static const ZStatSubPhase ZSubPhasePauseRootsSystemDictionary("Pause Roots SystemDictionary");
static const ZStatSubPhase ZSubPhasePauseRootsClassLoaderDataGraph("Pause Roots ClassLoaderDataGraph");
static const ZStatSubPhase ZSubPhasePauseRootsThreads("Pause Roots Threads");
static const ZStatSubPhase ZSubPhasePauseRootsCodeCache("Pause Roots CodeCache");
static const ZStatSubPhase ZSubPhaseConcurrentRootsSetup("Concurrent Roots Setup");
static const ZStatSubPhase ZSubPhaseConcurrentRoots("Concurrent Roots");
static const ZStatSubPhase ZSubPhaseConcurrentRootsTeardown("Concurrent Roots Teardown");
static const ZStatSubPhase ZSubPhaseConcurrentRootsJNIHandles("Concurrent Roots JNIHandles");
static const ZStatSubPhase ZSubPhaseConcurrentRootsClassLoaderDataGraph("Concurrent Roots ClassLoaderDataGraph");
static const ZStatSubPhase ZSubPhasePauseWeakRootsSetup("Pause Weak Roots Setup");
static const ZStatSubPhase ZSubPhasePauseWeakRoots("Pause Weak Roots");
static const ZStatSubPhase ZSubPhasePauseWeakRootsTeardown("Pause Weak Roots Teardown");
@ -128,21 +133,17 @@ void ZParallelWeakOopsDo<T, F>::weak_oops_do(BoolObjectClosure* is_alive, ZRoots
}
ZRootsIterator::ZRootsIterator() :
_jni_handles_iter(JNIHandles::global_handles()),
_universe(this),
_object_synchronizer(this),
_management(this),
_jvmti_export(this),
_jvmti_weak_export(this),
_system_dictionary(this),
_jni_handles(this),
_class_loader_data_graph(this),
_threads(this),
_code_cache(this) {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
ZStatTimer timer(ZSubPhasePauseRootsSetup);
Threads::change_thread_claim_parity();
ClassLoaderDataGraph::clear_claimed_marks();
COMPILER2_PRESENT(DerivedPointerTable::clear());
CodeCache::gc_prologue();
ZNMethodTable::gc_prologue();
@ -163,11 +164,6 @@ void ZRootsIterator::do_universe(ZRootsIteratorClosure* cl) {
Universe::oops_do(cl);
}
void ZRootsIterator::do_jni_handles(ZRootsIteratorClosure* cl) {
ZStatTimer timer(ZSubPhasePauseRootsJNIHandles);
_jni_handles_iter.oops_do(cl);
}
void ZRootsIterator::do_object_synchronizer(ZRootsIteratorClosure* cl) {
ZStatTimer timer(ZSubPhasePauseRootsObjectSynchronizer);
ObjectSynchronizer::oops_do(cl);
@ -194,12 +190,6 @@ void ZRootsIterator::do_system_dictionary(ZRootsIteratorClosure* cl) {
SystemDictionary::oops_do(cl);
}
void ZRootsIterator::do_class_loader_data_graph(ZRootsIteratorClosure* cl) {
ZStatTimer timer(ZSubPhasePauseRootsClassLoaderDataGraph);
CLDToOopClosure cld_cl(cl);
ClassLoaderDataGraph::cld_do(&cld_cl);
}
void ZRootsIterator::do_threads(ZRootsIteratorClosure* cl) {
ZStatTimer timer(ZSubPhasePauseRootsThreads);
ResourceMark rm;
@ -218,8 +208,6 @@ void ZRootsIterator::oops_do(ZRootsIteratorClosure* cl, bool visit_jvmti_weak_ex
_management.oops_do(cl);
_jvmti_export.oops_do(cl);
_system_dictionary.oops_do(cl);
_jni_handles.oops_do(cl);
_class_loader_data_graph.oops_do(cl);
_threads.oops_do(cl);
_code_cache.oops_do(cl);
if (visit_jvmti_weak_export) {
@ -227,6 +215,43 @@ void ZRootsIterator::oops_do(ZRootsIteratorClosure* cl, bool visit_jvmti_weak_ex
}
}
ZConcurrentRootsIterator::ZConcurrentRootsIterator(bool marking) :
_marking(marking),
_sts_joiner(marking /* active */),
_jni_handles_iter(JNIHandles::global_handles()),
_jni_handles(this),
_class_loader_data_graph(this) {
ZStatTimer timer(ZSubPhaseConcurrentRootsSetup);
if (_marking) {
ClassLoaderDataGraph_lock->lock();
ClassLoaderDataGraph::clear_claimed_marks();
}
}
ZConcurrentRootsIterator::~ZConcurrentRootsIterator() {
ZStatTimer timer(ZSubPhaseConcurrentRootsTeardown);
if (_marking) {
ClassLoaderDataGraph_lock->unlock();
}
}
void ZConcurrentRootsIterator::do_jni_handles(ZRootsIteratorClosure* cl) {
ZStatTimer timer(ZSubPhaseConcurrentRootsJNIHandles);
_jni_handles_iter.oops_do(cl);
}
void ZConcurrentRootsIterator::do_class_loader_data_graph(ZRootsIteratorClosure* cl) {
ZStatTimer timer(ZSubPhaseConcurrentRootsClassLoaderDataGraph);
CLDToOopClosure cld_cl(cl, _marking /* must_claim */);
ClassLoaderDataGraph::cld_do(&cld_cl);
}
void ZConcurrentRootsIterator::oops_do(ZRootsIteratorClosure* cl) {
ZStatTimer timer(ZSubPhaseConcurrentRoots);
_jni_handles.oops_do(cl);
_class_loader_data_graph.oops_do(cl);
}
ZWeakRootsIterator::ZWeakRootsIterator() :
_jvmti_weak_export(this),
_jfr_weak(this) {

View file

@ -25,6 +25,7 @@
#define SHARE_GC_Z_ZROOTSITERATOR_HPP
#include "gc/shared/oopStorageParState.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "memory/allocation.hpp"
#include "memory/iterator.hpp"
#include "runtime/thread.hpp"
@ -37,8 +38,7 @@ public:
}
};
typedef OopStorage::ParState<false /* concurrent */, false /* is_const */> ZOopStorageIterator;
typedef OopStorage::ParState<true /* concurrent */, false /* is_const */> ZConcurrentOopStorageIterator;
typedef OopStorage::ParState<true /* concurrent */, false /* is_const */> ZOopStorageIterator;
template <typename T, void (T::*F)(ZRootsIteratorClosure*)>
class ZSerialOopsDo {
@ -86,16 +86,12 @@ public:
class ZRootsIterator {
private:
ZOopStorageIterator _jni_handles_iter;
void do_universe(ZRootsIteratorClosure* cl);
void do_jni_handles(ZRootsIteratorClosure* cl);
void do_object_synchronizer(ZRootsIteratorClosure* cl);
void do_management(ZRootsIteratorClosure* cl);
void do_jvmti_export(ZRootsIteratorClosure* cl);
void do_jvmti_weak_export(ZRootsIteratorClosure* cl);
void do_system_dictionary(ZRootsIteratorClosure* cl);
void do_class_loader_data_graph(ZRootsIteratorClosure* cl);
void do_threads(ZRootsIteratorClosure* cl);
void do_code_cache(ZRootsIteratorClosure* cl);
@ -105,8 +101,6 @@ private:
ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_jvmti_export> _jvmti_export;
ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_jvmti_weak_export> _jvmti_weak_export;
ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_system_dictionary> _system_dictionary;
ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_jni_handles> _jni_handles;
ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_class_loader_data_graph> _class_loader_data_graph;
ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_threads> _threads;
ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_code_cache> _code_cache;
@ -117,6 +111,25 @@ public:
void oops_do(ZRootsIteratorClosure* cl, bool visit_jvmti_weak_export = false);
};
class ZConcurrentRootsIterator {
private:
const bool _marking;
SuspendibleThreadSetJoiner _sts_joiner;
ZOopStorageIterator _jni_handles_iter;
void do_jni_handles(ZRootsIteratorClosure* cl);
void do_class_loader_data_graph(ZRootsIteratorClosure* cl);
ZParallelOopsDo<ZConcurrentRootsIterator, &ZConcurrentRootsIterator::do_jni_handles> _jni_handles;
ZParallelOopsDo<ZConcurrentRootsIterator, &ZConcurrentRootsIterator::do_class_loader_data_graph> _class_loader_data_graph;
public:
ZConcurrentRootsIterator(bool marking);
~ZConcurrentRootsIterator();
void oops_do(ZRootsIteratorClosure* cl);
};
class ZWeakRootsIterator {
private:
void do_jvmti_weak_export(BoolObjectClosure* is_alive, ZRootsIteratorClosure* cl);
@ -135,9 +148,9 @@ public:
class ZConcurrentWeakRootsIterator {
private:
ZConcurrentOopStorageIterator _vm_weak_handles_iter;
ZConcurrentOopStorageIterator _jni_weak_handles_iter;
ZConcurrentOopStorageIterator _string_table_iter;
ZOopStorageIterator _vm_weak_handles_iter;
ZOopStorageIterator _jni_weak_handles_iter;
ZOopStorageIterator _string_table_iter;
void do_vm_weak_handles(ZRootsIteratorClosure* cl);
void do_jni_weak_handles(ZRootsIteratorClosure* cl);

View file

@ -166,6 +166,16 @@ void assert_locked_or_safepoint(const Monitor * lock) {
fatal("must own lock %s", lock->name());
}
// a weaker assertion than the above
void assert_locked_or_safepoint_weak(const Monitor * lock) {
if (IgnoreLockingAssertions) return;
assert(lock != NULL, "Need non-NULL lock");
if (lock->is_locked()) return;
if (SafepointSynchronize::is_at_safepoint()) return;
if (!Universe::is_fully_initialized()) return;
fatal("must own lock %s", lock->name());
}
// a stronger assertion than the above
void assert_lock_strong(const Monitor * lock) {
if (IgnoreLockingAssertions) return;

View file

@ -199,9 +199,11 @@ class MutexLocker: StackObj {
// for debugging: check that we're already owning this lock (or are at a safepoint)
#ifdef ASSERT
void assert_locked_or_safepoint(const Monitor * lock);
void assert_locked_or_safepoint_weak(const Monitor * lock);
void assert_lock_strong(const Monitor * lock);
#else
#define assert_locked_or_safepoint(lock)
#define assert_locked_or_safepoint_weak(lock)
#define assert_lock_strong(lock)
#endif