8221102: Allow GC threads to participate in threads claiming protocol

Expand claim counter from 1bit to uintx, with rare overflow handling.

Reviewed-by: tschatzl, rkennke
This commit is contained in:
Kim Barrett 2019-04-02 13:08:38 -04:00
parent 4269bb40ac
commit e7e00f7ae0
9 changed files with 259 additions and 69 deletions

View file

@ -1766,17 +1766,17 @@ class G1RemarkThreadsClosure : public ThreadClosure {
G1CMSATBBufferClosure _cm_satb_cl;
G1CMOopClosure _cm_cl;
MarkingCodeBlobClosure _code_cl;
int _thread_parity;
uintx _claim_token;
public:
G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
_cm_satb_cl(task, g1h),
_cm_cl(g1h, task),
_code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
_thread_parity(Threads::thread_claim_parity()) {}
_claim_token(Threads::thread_claim_token()) {}
void do_thread(Thread* thread) {
if (thread->claim_oops_do(true, _thread_parity)) {
if (thread->claim_threads_do(true, _claim_token)) {
SATBMarkQueue& queue = G1ThreadLocalData::satb_mark_queue(thread);
queue.apply_closure_and_empty(&_cm_satb_cl);
if (thread->is_Java_thread()) {

View file

@ -824,7 +824,6 @@ void GenCollectedHeap::process_roots(StrongRootsScope* scope,
CLDClosure* weak_cld_closure,
CodeBlobToOopClosure* code_roots) {
// General roots.
assert(Threads::thread_claim_parity() != 0, "must have called prologue code");
assert(code_roots != NULL, "code root closure should always be set");
// _n_termination for _process_strong_tasks should be set up stream
// in a method not running in a GC worker. Otherwise the GC worker

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,7 +37,7 @@ MarkScope::~MarkScope() {
}
StrongRootsScope::StrongRootsScope(uint n_threads) : _n_threads(n_threads) {
Threads::change_thread_claim_parity();
Threads::change_thread_claim_token();
}
StrongRootsScope::~StrongRootsScope() {

View file

@ -213,15 +213,15 @@ public:
class ShenandoahSATBThreadsClosure : public ThreadClosure {
private:
ShenandoahSATBBufferClosure* _satb_cl;
int _thread_parity;
uintx _claim_token;
public:
ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
_satb_cl(satb_cl),
_thread_parity(Threads::thread_claim_parity()) {}
_claim_token(Threads::thread_claim_token()) {}
void do_thread(Thread* thread) {
if (thread->claim_oops_do(true, _thread_parity)) {
if (thread->claim_threads_do(true, _claim_token)) {
ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -179,7 +179,7 @@ ZRootsIterator::ZRootsIterator() :
_code_cache(this) {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
ZStatTimer timer(ZSubPhasePauseRootsSetup);
Threads::change_thread_claim_parity();
Threads::change_thread_claim_token();
COMPILER2_PRESENT(DerivedPointerTable::clear());
if (ClassUnloading) {
nmethod::oops_do_marking_prologue();
@ -404,7 +404,7 @@ ZThreadRootsIterator::ZThreadRootsIterator() :
_threads(this) {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
ZStatTimer timer(ZSubPhasePauseRootsSetup);
Threads::change_thread_claim_parity();
Threads::change_thread_claim_token();
}
ZThreadRootsIterator::~ZThreadRootsIterator() {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -219,7 +219,7 @@ public:
NMethodMarkingTask(NMethodMarkingThreadClosure* cl) :
AbstractGangTask("Parallel NMethod Marking"),
_cl(cl) {
Threads::change_thread_claim_parity();
Threads::change_thread_claim_token();
}
~NMethodMarkingTask() {

View file

@ -238,8 +238,8 @@ Thread::Thread() {
set_last_handle_mark(NULL);
DEBUG_ONLY(_missed_ic_stub_refill_verifier = NULL);
// This initial value ==> never claimed.
_oops_do_parity = 0;
// Initial value of zero ==> never claimed.
_threads_do_token = 0;
_threads_hazard_ptr = NULL;
_threads_list_ptr = NULL;
_nested_threads_hazard_ptr_cnt = 0;
@ -885,16 +885,14 @@ bool Thread::is_interrupted(Thread* thread, bool clear_interrupted) {
// GC Support
bool Thread::claim_oops_do_par_case(int strong_roots_parity) {
int thread_parity = _oops_do_parity;
if (thread_parity != strong_roots_parity) {
jint res = Atomic::cmpxchg(strong_roots_parity, &_oops_do_parity, thread_parity);
if (res == thread_parity) {
bool Thread::claim_par_threads_do(uintx claim_token) {
uintx token = _threads_do_token;
if (token != claim_token) {
uintx res = Atomic::cmpxchg(claim_token, &_threads_do_token, token);
if (res == token) {
return true;
} else {
guarantee(res == strong_roots_parity, "Or else what?");
return false;
}
guarantee(res == claim_token, "invariant");
}
return false;
}
@ -3472,7 +3470,7 @@ JavaThread* Threads::_thread_list = NULL;
int Threads::_number_of_threads = 0;
int Threads::_number_of_non_daemon_threads = 0;
int Threads::_return_code = 0;
int Threads::_thread_claim_parity = 0;
uintx Threads::_thread_claim_token = 1; // Never zero.
size_t JavaThread::_stack_size_at_create = 0;
#ifdef ASSERT
@ -3532,14 +3530,14 @@ void Threads::threads_do(ThreadClosure* tc) {
}
void Threads::possibly_parallel_threads_do(bool is_par, ThreadClosure* tc) {
int cp = Threads::thread_claim_parity();
uintx claim_token = Threads::thread_claim_token();
ALL_JAVA_THREADS(p) {
if (p->claim_oops_do(is_par, cp)) {
if (p->claim_threads_do(is_par, claim_token)) {
tc->do_thread(p);
}
}
VMThread* vmt = VMThread::vm_thread();
if (vmt->claim_oops_do(is_par, cp)) {
if (vmt->claim_threads_do(is_par, claim_token)) {
tc->do_thread(vmt);
}
}
@ -4525,27 +4523,39 @@ void Threads::oops_do(OopClosure* f, CodeBlobClosure* cf) {
VMThread::vm_thread()->oops_do(f, cf);
}
void Threads::change_thread_claim_parity() {
// Set the new claim parity.
assert(_thread_claim_parity >= 0 && _thread_claim_parity <= 2,
"Not in range.");
_thread_claim_parity++;
if (_thread_claim_parity == 3) _thread_claim_parity = 1;
assert(_thread_claim_parity >= 1 && _thread_claim_parity <= 2,
"Not in range.");
void Threads::change_thread_claim_token() {
if (++_thread_claim_token == 0) {
// On overflow of the token counter, there is a risk of future
// collisions between a new global token value and a stale token
// for a thread, because not all iterations visit all threads.
// (Though it's pretty much a theoretical concern for non-trivial
// token counter sizes.) To deal with the possibility, reset all
// the thread tokens to zero on global token overflow.
struct ResetClaims : public ThreadClosure {
virtual void do_thread(Thread* t) {
t->claim_threads_do(false, 0);
}
} reset_claims;
Threads::threads_do(&reset_claims);
// On overflow, update the global token to non-zero, to
// avoid the special "never claimed" initial thread value.
_thread_claim_token = 1;
}
}
#ifdef ASSERT
void assert_thread_claimed(const char* kind, Thread* t, uintx expected) {
const uintx token = t->threads_do_token();
assert(token == expected,
"%s " PTR_FORMAT " has incorrect value " UINTX_FORMAT " != "
UINTX_FORMAT, kind, p2i(t), token, expected);
}
void Threads::assert_all_threads_claimed() {
ALL_JAVA_THREADS(p) {
const int thread_parity = p->oops_do_parity();
assert((thread_parity == _thread_claim_parity),
"Thread " PTR_FORMAT " has incorrect parity %d != %d", p2i(p), thread_parity, _thread_claim_parity);
assert_thread_claimed("Thread", p, _thread_claim_token);
}
VMThread* vmt = VMThread::vm_thread();
const int thread_parity = vmt->oops_do_parity();
assert((thread_parity == _thread_claim_parity),
"VMThread " PTR_FORMAT " has incorrect parity %d != %d", p2i(vmt), thread_parity, _thread_claim_parity);
assert_thread_claimed("VMThread", VMThread::vm_thread(), _thread_claim_token);
}
#endif // ASSERT

View file

@ -338,9 +338,8 @@ class Thread: public ThreadShadow {
// Point to the last handle mark
HandleMark* _last_handle_mark;
// The parity of the last strong_roots iteration in which this thread was
// claimed as a task.
int _oops_do_parity;
// Claim value for parallel iteration over threads.
uintx _threads_do_token;
// Support for GlobalCounter
private:
@ -647,27 +646,28 @@ class Thread: public ThreadShadow {
// Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
virtual void oops_do(OopClosure* f, CodeBlobClosure* cf);
// Handles the parallel case for the method below.
// Handles the parallel case for claim_threads_do.
private:
bool claim_oops_do_par_case(int collection_parity);
bool claim_par_threads_do(uintx claim_token);
public:
// Requires that "collection_parity" is that of the current roots
// iteration. If "is_par" is false, sets the parity of "this" to
// "collection_parity", and returns "true". If "is_par" is true,
// uses an atomic instruction to set the current threads parity to
// "collection_parity", if it is not already. Returns "true" iff the
// Requires that "claim_token" is that of the current iteration.
// If "is_par" is false, sets the token of "this" to
// "claim_token", and returns "true". If "is_par" is true,
// uses an atomic instruction to set the current thread's token to
// "claim_token", if it is not already. Returns "true" iff the
// calling thread does the update, this indicates that the calling thread
// has claimed the thread's stack as a root group in the current
// collection.
bool claim_oops_do(bool is_par, int collection_parity) {
// has claimed the thread in the current iteration.
bool claim_threads_do(bool is_par, uintx claim_token) {
if (!is_par) {
_oops_do_parity = collection_parity;
_threads_do_token = claim_token;
return true;
} else {
return claim_oops_do_par_case(collection_parity);
return claim_par_threads_do(claim_token);
}
}
uintx threads_do_token() const { return _threads_do_token; }
// jvmtiRedefineClasses support
void metadata_handles_do(void f(Metadata*));
@ -750,7 +750,6 @@ protected:
Monitor* owned_locks() const { return _owned_locks; }
bool owns_locks() const { return owned_locks() != NULL; }
bool owns_locks_but_compiled_lock() const;
int oops_do_parity() const { return _oops_do_parity; }
// Deadlock detection
bool allow_allocation() { return _allow_allocation_count == 0; }
@ -2223,7 +2222,7 @@ class Threads: AllStatic {
static int _number_of_threads;
static int _number_of_non_daemon_threads;
static int _return_code;
static int _thread_claim_parity;
static uintx _thread_claim_token;
#ifdef ASSERT
static bool _vm_complete;
#endif
@ -2256,21 +2255,23 @@ class Threads: AllStatic {
// Does not include JNI_VERSION_1_1
static jboolean is_supported_jni_version(jint version);
// The "thread claim parity" provides a way for threads to be claimed
// The "thread claim token" provides a way for threads to be claimed
// by parallel worker tasks.
//
// Each thread contains a "parity" field. A task will claim the
// thread only if its parity field is the same as the global parity,
// which is updated by calling change_thread_claim_parity().
// Each thread contains a "token" field. A task will claim the
// thread only if its token is different from the global token,
// which is updated by calling change_thread_claim_token(). When
// a thread is claimed, it's token is set to the global token value
// so other threads in the same iteration pass won't claim it.
//
// For this to work change_thread_claim_parity() needs to be called
// For this to work change_thread_claim_token() needs to be called
// exactly once in sequential code before starting parallel tasks
// that should claim threads.
//
// New threads get their parity set to 0 and change_thread_claim_parity()
// never sets the global parity to 0.
static int thread_claim_parity() { return _thread_claim_parity; }
static void change_thread_claim_parity();
// New threads get their token set to 0 and change_thread_claim_token()
// never sets the global token to 0.
static uintx thread_claim_token() { return _thread_claim_token; }
static void change_thread_claim_token();
static void assert_all_threads_claimed() NOT_DEBUG_RETURN;
// Apply "f->do_oop" to all root oops in all threads.
@ -2324,6 +2325,8 @@ class Threads: AllStatic {
// Deoptimizes all frames tied to marked nmethods
static void deoptimized_wrt_marked_nmethods();
struct Test; // For private gtest access.
};

View file

@ -0,0 +1,178 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/thread.hpp"
#include "runtime/vmOperations.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"
#include "unittest.hpp"
struct Threads::Test : public AllStatic {
class VM_TestClaimOverflow;
class CountThreads;
class CheckClaims;
};
class Threads::Test::CountThreads : public ThreadClosure {
uintx _claim_token;
uint _java_threads_count;
uint _non_java_threads_count;
bool _need_claim;
public:
CountThreads(uintx claim_token, bool need_claim) :
_claim_token(claim_token),
_java_threads_count(0),
_non_java_threads_count(0),
_need_claim(need_claim)
{}
virtual void do_thread(Thread* t) {
if (!_need_claim || t->claim_threads_do(true, _claim_token)) {
if (t->is_Java_thread()) {
++_java_threads_count;
} else {
++_non_java_threads_count;
}
}
}
uint java_threads_count() const { return _java_threads_count; }
uint non_java_threads_count() const { return _non_java_threads_count; }
uint count() const { return _java_threads_count + _non_java_threads_count; }
};
class Threads::Test::CheckClaims : public ThreadClosure {
uintx _claim_token;
uint _java_threads_claimed;
uint _java_threads_unclaimed;
uint _non_java_threads_claimed;
uint _non_java_threads_unclaimed;
public:
CheckClaims(uintx claim_token) :
_claim_token(claim_token),
_java_threads_claimed(0),
_java_threads_unclaimed(0),
_non_java_threads_claimed(0),
_non_java_threads_unclaimed(0)
{}
virtual void do_thread(Thread* t) {
uintx thread_token = t->threads_do_token();
if (thread_token == _claim_token) {
if (t->is_Java_thread()) {
++_java_threads_claimed;
} else {
++_non_java_threads_claimed;
}
} else {
if (t->is_Java_thread()) {
++_java_threads_unclaimed;
} else {
++_non_java_threads_unclaimed;
}
}
}
uint java_threads_claimed() const { return _java_threads_claimed; }
uint java_threads_unclaimed() const { return _java_threads_unclaimed; }
uint non_java_threads_claimed() const { return _non_java_threads_claimed; }
uint non_java_threads_unclaimed() const { return _non_java_threads_unclaimed; }
uint claimed() const {
return _java_threads_claimed + _non_java_threads_claimed;
}
uint unclaimed() const {
return _java_threads_unclaimed + _non_java_threads_unclaimed;
}
};
class Threads::Test::VM_TestClaimOverflow : public VM_GTestExecuteAtSafepoint {
public:
void doit() {
// Prevent changes to the NJT list while we're conducting our test.
MutexLockerEx ml(NonJavaThreadsList_lock, Mutex::_no_safepoint_check_flag);
_thread_claim_token = max_uintx - 1;
ASSERT_EQ(max_uintx - 1, thread_claim_token());
CountThreads count1(thread_claim_token(), true);
threads_do(&count1);
tty->print_cr("Testing claim overflow with %u threads", count1.count());
// At least the main thread and the VM thread.
ASSERT_LE(2u, count1.count());
ASSERT_LE(1u, count1.java_threads_count());
ASSERT_LE(1u, count1.non_java_threads_count());
ASSERT_EQ(max_uintx - 1, thread_claim_token());
CheckClaims check1(thread_claim_token());
threads_do(&check1);
ASSERT_EQ(count1.count(), check1.claimed());
ASSERT_EQ(count1.java_threads_count(), check1.java_threads_claimed());
ASSERT_EQ(0u, check1.java_threads_unclaimed());
ASSERT_EQ(count1.non_java_threads_count(), check1.non_java_threads_claimed());
ASSERT_EQ(0u, check1.non_java_threads_unclaimed());
change_thread_claim_token(); // No overflow yet.
ASSERT_EQ(max_uintx, thread_claim_token());
CountThreads count2(thread_claim_token(), false); // Claimed by PPTD below
possibly_parallel_threads_do(true, &count2);
ASSERT_EQ(count1.java_threads_count(), count2.java_threads_count());
ASSERT_EQ(1u, count2.non_java_threads_count()); // Only VM thread
CheckClaims check2(thread_claim_token());
threads_do(&check2);
ASSERT_EQ(count2.java_threads_count(), check2.java_threads_claimed());
ASSERT_EQ(0u, check2.java_threads_unclaimed());
ASSERT_EQ(1u, check2.non_java_threads_claimed()); // Only VM thread
ASSERT_EQ(count1.non_java_threads_count(),
check2.non_java_threads_claimed() +
check2.non_java_threads_unclaimed());
change_thread_claim_token(); // Expect overflow.
ASSERT_EQ(uintx(1), thread_claim_token());
// Verify all threads have claim value of 0 after change overflow.
CheckClaims check3(0);
threads_do(&check3);
ASSERT_EQ(count1.count(), check3.claimed());
ASSERT_EQ(0u, check3.unclaimed());
}
};
// Test overflow handling in Threads::change_thread_claim_token().
TEST_VM(ThreadsTest, claim_overflow) {
Threads::Test::VM_TestClaimOverflow op;
ThreadInVMfromNative invm(JavaThread::current());
VMThread::execute(&op);
}