8209850: Allow NamedThreads to use GlobalCounter critical sections

Add NamedThreads iterator and make GlobalCounter use it.

Reviewed-by: eosterlund, rehn
This commit is contained in:
Kim Barrett 2018-08-23 18:14:53 -04:00
parent 8b138c684a
commit 23f0fb4cde
11 changed files with 458 additions and 69 deletions

View file

@ -43,7 +43,6 @@
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp" #include "utilities/macros.hpp"
#include "utilities/ostream.hpp" #include "utilities/ostream.hpp"
#include "utilities/spinYield.hpp"
OopStorage::AllocationListEntry::AllocationListEntry() : _prev(NULL), _next(NULL) {} OopStorage::AllocationListEntry::AllocationListEntry() : _prev(NULL), _next(NULL) {}
@ -495,48 +494,6 @@ bool OopStorage::expand_active_array() {
return true; return true;
} }
OopStorage::ProtectActive::ProtectActive() : _enter(0), _exit() {}
// Begin read-side critical section.
uint OopStorage::ProtectActive::read_enter() {
return Atomic::add(2u, &_enter);
}
// End read-side critical section.
void OopStorage::ProtectActive::read_exit(uint enter_value) {
Atomic::add(2u, &_exit[enter_value & 1]);
}
// Wait until all readers that entered the critical section before
// synchronization have exited that critical section.
void OopStorage::ProtectActive::write_synchronize() {
SpinYield spinner;
// Determine old and new exit counters, based on bit0 of the
// on-entry _enter counter.
uint value = OrderAccess::load_acquire(&_enter);
volatile uint* new_ptr = &_exit[(value + 1) & 1];
// Atomically change the in-use exit counter to the new counter, by
// adding 1 to the _enter counter (flipping bit0 between 0 and 1)
// and initializing the new exit counter to that enter value. Note:
// The new exit counter is not being used by read operations until
// this change succeeds.
uint old;
do {
old = value;
*new_ptr = ++value;
value = Atomic::cmpxchg(value, &_enter, old);
} while (old != value);
// Readers that entered the critical section before we changed the
// selected exit counter will use the old exit counter. Readers
// entering after the change will use the new exit counter. Wait
// for all the critical sections started before the change to
// complete, e.g. for the value of old_ptr to catch up with old.
volatile uint* old_ptr = &_exit[old & 1];
while (old != OrderAccess::load_acquire(old_ptr)) {
spinner.wait();
}
}
// Make new_array the _active_array. Increments new_array's refcount // Make new_array the _active_array. Increments new_array's refcount
// to account for the new reference. The assignment is atomic wrto // to account for the new reference. The assignment is atomic wrto
// obtain_active_array; once this function returns, it is safe for the // obtain_active_array; once this function returns, it is safe for the
@ -548,7 +505,10 @@ void OopStorage::replace_active_array(ActiveArray* new_array) {
// Install new_array, ensuring its initialization is complete first. // Install new_array, ensuring its initialization is complete first.
OrderAccess::release_store(&_active_array, new_array); OrderAccess::release_store(&_active_array, new_array);
// Wait for any readers that could read the old array from _active_array. // Wait for any readers that could read the old array from _active_array.
_protect_active.write_synchronize(); // Can't use GlobalCounter here, because this is called from allocate(),
// which may be called in the scope of a GlobalCounter critical section
// when inserting a StringTable entry.
_protect_active.synchronize();
// All obtain critical sections that could see the old array have // All obtain critical sections that could see the old array have
// completed, having incremented the refcount of the old array. The // completed, having incremented the refcount of the old array. The
// caller can now safely relinquish the old array. // caller can now safely relinquish the old array.
@ -560,10 +520,9 @@ void OopStorage::replace_active_array(ActiveArray* new_array) {
// _active_array. The caller must relinquish the array when done // _active_array. The caller must relinquish the array when done
// using it. // using it.
OopStorage::ActiveArray* OopStorage::obtain_active_array() const { OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
uint enter_value = _protect_active.read_enter(); SingleWriterSynchronizer::CriticalSection cs(&_protect_active);
ActiveArray* result = OrderAccess::load_acquire(&_active_array); ActiveArray* result = OrderAccess::load_acquire(&_active_array);
result->increment_refcount(); result->increment_refcount();
_protect_active.read_exit(enter_value);
return result; return result;
} }

View file

@ -29,6 +29,7 @@
#include "oops/oop.hpp" #include "oops/oop.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp" #include "utilities/macros.hpp"
#include "utilities/singleWriterSynchronizer.hpp"
class Mutex; class Mutex;
class outputStream; class outputStream;
@ -203,19 +204,6 @@ NOT_AIX( private: )
void unlink(const Block& block); void unlink(const Block& block);
}; };
// RCU-inspired protection of access to _active_array.
class ProtectActive {
volatile uint _enter;
volatile uint _exit[2];
public:
ProtectActive();
uint read_enter();
void read_exit(uint enter_value);
void write_synchronize();
};
private: private:
const char* _name; const char* _name;
ActiveArray* _active_array; ActiveArray* _active_array;
@ -229,7 +217,7 @@ private:
volatile size_t _allocation_count; volatile size_t _allocation_count;
// Protection for _active_array. // Protection for _active_array.
mutable ProtectActive _protect_active; mutable SingleWriterSynchronizer _protect_active;
// mutable because this gets set even for const iteration. // mutable because this gets set even for const iteration.
mutable bool _concurrent_iteration_active; mutable bool _concurrent_iteration_active;

View file

@ -76,6 +76,7 @@ Monitor* VMOperationRequest_lock = NULL;
Monitor* Safepoint_lock = NULL; Monitor* Safepoint_lock = NULL;
Monitor* SerializePage_lock = NULL; Monitor* SerializePage_lock = NULL;
Monitor* Threads_lock = NULL; Monitor* Threads_lock = NULL;
Mutex* NamedThreadsList_lock = NULL;
Monitor* CGC_lock = NULL; Monitor* CGC_lock = NULL;
Monitor* STS_lock = NULL; Monitor* STS_lock = NULL;
Monitor* FullGCCount_lock = NULL; Monitor* FullGCCount_lock = NULL;
@ -256,6 +257,7 @@ void mutex_init() {
def(Safepoint_lock , PaddedMonitor, safepoint, true, Monitor::_safepoint_check_sometimes); // locks SnippetCache_lock/Threads_lock def(Safepoint_lock , PaddedMonitor, safepoint, true, Monitor::_safepoint_check_sometimes); // locks SnippetCache_lock/Threads_lock
def(Threads_lock , PaddedMonitor, barrier, true, Monitor::_safepoint_check_sometimes); def(Threads_lock , PaddedMonitor, barrier, true, Monitor::_safepoint_check_sometimes);
def(NamedThreadsList_lock , PaddedMutex, leaf, true, Monitor::_safepoint_check_never);
def(VMOperationQueue_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_sometimes); // VM_thread allowed to block on these def(VMOperationQueue_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_sometimes); // VM_thread allowed to block on these
def(VMOperationRequest_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_sometimes); def(VMOperationRequest_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_sometimes);

View file

@ -72,6 +72,7 @@ extern Monitor* VMOperationRequest_lock; // a lock on Threads waiting fo
extern Monitor* Safepoint_lock; // a lock used by the safepoint abstraction extern Monitor* Safepoint_lock; // a lock used by the safepoint abstraction
extern Monitor* Threads_lock; // a lock on the Threads table of active Java threads extern Monitor* Threads_lock; // a lock on the Threads table of active Java threads
// (also used by Safepoints too to block threads creation/destruction) // (also used by Safepoints too to block threads creation/destruction)
extern Mutex* NamedThreadsList_lock; // a lock on the NamedThreads list
extern Monitor* CGC_lock; // used for coordination between extern Monitor* CGC_lock; // used for coordination between
// fore- & background GC threads. // fore- & background GC threads.
extern Monitor* STS_lock; // used for joining/leaving SuspendibleThreadSet. extern Monitor* STS_lock; // used for joining/leaving SuspendibleThreadSet.

View file

@ -114,6 +114,7 @@
#include "utilities/events.hpp" #include "utilities/events.hpp"
#include "utilities/macros.hpp" #include "utilities/macros.hpp"
#include "utilities/preserveException.hpp" #include "utilities/preserveException.hpp"
#include "utilities/singleWriterSynchronizer.hpp"
#include "utilities/vmError.hpp" #include "utilities/vmError.hpp"
#if INCLUDE_JVMCI #if INCLUDE_JVMCI
#include "jvmci/jvmciCompiler.hpp" #include "jvmci/jvmciCompiler.hpp"
@ -1206,15 +1207,61 @@ void JavaThread::allocate_threadObj(Handle thread_group, const char* thread_name
THREAD); THREAD);
} }
// List of all NamedThreads and safe iteration over that list.
class NamedThread::List {
public:
NamedThread* volatile _head;
SingleWriterSynchronizer _protect;
List() : _head(NULL), _protect() {}
};
NamedThread::List NamedThread::_the_list;
NamedThread::Iterator::Iterator() :
_protect_enter(_the_list._protect.enter()),
_current(OrderAccess::load_acquire(&_the_list._head))
{}
NamedThread::Iterator::~Iterator() {
_the_list._protect.exit(_protect_enter);
}
void NamedThread::Iterator::step() {
assert(!end(), "precondition");
_current = OrderAccess::load_acquire(&_current->_next_named_thread);
}
// NamedThread -- non-JavaThread subclasses with multiple // NamedThread -- non-JavaThread subclasses with multiple
// uniquely named instances should derive from this. // uniquely named instances should derive from this.
NamedThread::NamedThread() : Thread() { NamedThread::NamedThread() :
_name = NULL; Thread(),
_processed_thread = NULL; _name(NULL),
_gc_id = GCId::undefined(); _processed_thread(NULL),
_gc_id(GCId::undefined()),
_next_named_thread(NULL)
{
// Add this thread to _the_list.
MutexLockerEx lock(NamedThreadsList_lock, Mutex::_no_safepoint_check_flag);
_next_named_thread = _the_list._head;
OrderAccess::release_store(&_the_list._head, this);
} }
NamedThread::~NamedThread() { NamedThread::~NamedThread() {
// Remove this thread from _the_list.
{
MutexLockerEx lock(NamedThreadsList_lock, Mutex::_no_safepoint_check_flag);
NamedThread* volatile* p = &_the_list._head;
for (NamedThread* t = *p; t != NULL; p = &t->_next_named_thread, t = *p) {
if (t == this) {
*p = this->_next_named_thread;
// Wait for any in-progress iterators.
_the_list._protect.synchronize();
break;
}
}
}
if (_name != NULL) { if (_name != NULL) {
FREE_C_HEAP_ARRAY(char, _name); FREE_C_HEAP_ARRAY(char, _name);
_name = NULL; _name = NULL;

View file

@ -103,6 +103,7 @@ class WorkerThread;
// - JavaThread // - JavaThread
// - various subclasses eg CompilerThread, ServiceThread // - various subclasses eg CompilerThread, ServiceThread
// - WatcherThread // - WatcherThread
// - JfrSamplerThread
class Thread: public ThreadShadow { class Thread: public ThreadShadow {
friend class VMStructs; friend class VMStructs;
@ -776,6 +777,10 @@ class NamedThread: public Thread {
// log JavaThread being processed by oops_do // log JavaThread being processed by oops_do
JavaThread* _processed_thread; JavaThread* _processed_thread;
uint _gc_id; // The current GC id when a thread takes part in GC uint _gc_id; // The current GC id when a thread takes part in GC
NamedThread* volatile _next_named_thread;
class List;
static List _the_list;
public: public:
NamedThread(); NamedThread();
@ -791,6 +796,31 @@ class NamedThread: public Thread {
void set_gc_id(uint gc_id) { _gc_id = gc_id; } void set_gc_id(uint gc_id) { _gc_id = gc_id; }
uint gc_id() { return _gc_id; } uint gc_id() { return _gc_id; }
class Iterator;
};
// Provides iteration over the list of NamedThreads. Because list
// management occurs in the NamedThread constructor and destructor,
// entries in the list may not be fully constructed instances of a
// derived class. Threads created after an iterator is constructed
// will not be visited by the iterator. The scope of an iterator is a
// critical section; there must be no safepoint checks in that scope.
class NamedThread::Iterator : public StackObj {
uint _protect_enter;
NamedThread* _current;
// Noncopyable.
Iterator(const Iterator&);
Iterator& operator=(const Iterator&);
public:
Iterator();
~Iterator();
bool end() const { return _current == NULL; }
NamedThread* current() const { return _current; }
void step();
}; };
// Worker threads are named and have an id of an assigned work. // Worker threads are named and have an id of an assigned work.

View file

@ -71,5 +71,7 @@ void GlobalCounter::write_synchronize() {
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) { for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
ctc.do_thread(thread); ctc.do_thread(thread);
} }
ctc.do_thread(VMThread::vm_thread()); for (NamedThread::Iterator nti; !nti.end(); nti.step()) {
ctc.do_thread(nti.current());
}
} }

View file

@ -31,16 +31,16 @@
inline void GlobalCounter::critical_section_begin(Thread *thread) { inline void GlobalCounter::critical_section_begin(Thread *thread) {
assert(thread == Thread::current(), "must be current thread"); assert(thread == Thread::current(), "must be current thread");
assert(thread->is_VM_thread() || thread->is_Java_thread(), "must be VMThread or JavaThread"); assert(thread->is_Named_thread() || thread->is_Java_thread(), "must be NamedThread or JavaThread");
assert((*thread->get_rcu_counter() & COUNTER_ACTIVE) == 0x0, "nestled critical sections, not supported yet"); assert((*thread->get_rcu_counter() & COUNTER_ACTIVE) == 0x0, "nested critical sections, not supported yet");
uintx gbl_cnt = OrderAccess::load_acquire(&_global_counter._counter); uintx gbl_cnt = OrderAccess::load_acquire(&_global_counter._counter);
OrderAccess::release_store_fence(thread->get_rcu_counter(), gbl_cnt | COUNTER_ACTIVE); OrderAccess::release_store_fence(thread->get_rcu_counter(), gbl_cnt | COUNTER_ACTIVE);
} }
inline void GlobalCounter::critical_section_end(Thread *thread) { inline void GlobalCounter::critical_section_end(Thread *thread) {
assert(thread == Thread::current(), "must be current thread"); assert(thread == Thread::current(), "must be current thread");
assert(thread->is_VM_thread() || thread->is_Java_thread(), "must be VMThread or JavaThread"); assert(thread->is_Named_thread() || thread->is_Java_thread(), "must be NamedThread or JavaThread");
assert((*thread->get_rcu_counter() & COUNTER_ACTIVE) == COUNTER_ACTIVE, "must be in ctitical section"); assert((*thread->get_rcu_counter() & COUNTER_ACTIVE) == COUNTER_ACTIVE, "must be in critical section");
// Mainly for debugging we set it to 'now'. // Mainly for debugging we set it to 'now'.
uintx gbl_cnt = OrderAccess::load_acquire(&_global_counter._counter); uintx gbl_cnt = OrderAccess::load_acquire(&_global_counter._counter);
OrderAccess::release_store(thread->get_rcu_counter(), gbl_cnt); OrderAccess::release_store(thread->get_rcu_counter(), gbl_cnt);

View file

@ -0,0 +1,100 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/atomic.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
#include "utilities/singleWriterSynchronizer.hpp"
#include "utilities/macros.hpp"
SingleWriterSynchronizer::SingleWriterSynchronizer() :
_enter(0),
_exit(),
// The initial value of 1 for _waiting_for puts it on the inactive
// track, so no thread exiting a critical section will match it.
_waiting_for(1),
_wakeup()
DEBUG_ONLY(COMMA _writers(0))
{}
// Wait until all threads that entered a critical section before
// synchronization have exited that critical section.
void SingleWriterSynchronizer::synchronize() {
// Side-effect in assert balanced by debug-only dec at end.
assert(Atomic::add(1u, &_writers) == 1u, "multiple writers");
// We don't know anything about the muxing between this invocation
// and invocations in other threads. We must start with the latest
// _enter polarity, else we could clobber the wrong _exit value on
// the first iteration. So fence to ensure everything here follows
// whatever muxing was used.
OrderAccess::fence();
uint value = _enter;
// (1) Determine the old and new exit counters, based on the
// polarity (bit0 value) of the on-entry enter counter.
volatile uint* new_ptr = &_exit[(value + 1) & 1];
// (2) Change the in-use exit counter to the new counter, by adding
// 1 to the enter counter (flipping the polarity), meanwhile
// "simultaneously" initializing the new exit counter to that enter
// value. Note: The new exit counter is not being used by read
// operations until this change of _enter succeeds.
uint old;
do {
old = value;
*new_ptr = ++value;
value = Atomic::cmpxchg(value, &_enter, old);
} while (old != value);
// Critical sections entered before we changed the polarity will use
// the old exit counter. Critical sections entered after the change
// will use the new exit counter.
volatile uint* old_ptr = &_exit[old & 1];
assert(old_ptr != new_ptr, "invariant");
// (3) Inform threads in in-progress critical sections that there is
// a pending synchronize waiting. The thread that completes the
// request (_exit value == old) will signal the _wakeup semaphore to
// allow us to proceed.
_waiting_for = old;
// Write of _waiting_for must precede read of _exit and associated
// conditional semaphore wait. If they were re-ordered then a
// critical section exit could miss the wakeup request, failing to
// signal us while we're waiting.
OrderAccess::fence();
// (4) Wait for all the critical sections started before the change
// to complete, e.g. for the value of old_ptr to catch up with old.
// Loop because there could be pending wakeups unrelated to this
// synchronize request.
while (old != OrderAccess::load_acquire(old_ptr)) {
_wakeup.wait();
}
// (5) Drain any pending wakeups. A critical section exit may have
// completed our request and seen our _waiting_for before we checked
// for completion. There are also possible (though rare) spurious
// wakeup signals in the timing gap between changing the _enter
// polarity and setting _waiting_for. Enough of any of those could
// lead to semaphore overflow. This doesn't guarantee no unrelated
// wakeups for the next wait, but prevents unbounded accumulation.
while (_wakeup.trywait()) {}
DEBUG_ONLY(Atomic::dec(&_writers);)
}

View file

@ -0,0 +1,124 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_UTILITIES_SINGLEWRITERSYNCHRONIZER_HPP
#define SHARE_UTILITIES_SINGLEWRITERSYNCHRONIZER_HPP
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "runtime/semaphore.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
// Synchronization primitive inspired by RCU.
//
// Any number of threads may enter critical sections associated with a
// synchronizer object. One (at a time) other thread may wait for the
// completion of all critical sections for the synchronizer object
// that were extent when the wait was initiated. Usage is that there
// is some state that can be accessed either before or after some
// change. An accessing thread performs the access within a critical
// section. A writer thread performs the state change, and then waits
// for critical sections to complete, thereby ensuring there are no
// threads in a critical section that might have seen the old state.
//
// Generally, GlobalCounter should be used instead of this class, as
// GlobalCounter has measurably better performance and doesn't have
// the single writer at a time restriction. Use this only in
// situations where GlobalCounter won't work for some reason, such as
// nesting. But note that nesting often indicates other problems, and
// may risk deadlock.
class SingleWriterSynchronizer {
volatile uint _enter;
volatile uint _exit[2];
volatile uint _waiting_for;
Semaphore _wakeup;
DEBUG_ONLY(volatile uint _writers;)
// Noncopyable.
SingleWriterSynchronizer(const SingleWriterSynchronizer&);
SingleWriterSynchronizer& operator=(const SingleWriterSynchronizer&);
public:
SingleWriterSynchronizer();
// Enter a critical section for this synchronizer. Entering a
// critical section never blocks. While in a critical section, a
// thread should avoid blocking, or even take a long time. In
// particular, a thread must never safepoint while in a critical
// section.
// Precondition: The current thread must not already be in a
// critical section for this synchronizer.
inline uint enter();
// Exit a critical section for this synchronizer.
// Precondition: enter_value must be the result of the corresponding
// enter() for the critical section.
inline void exit(uint enter_value);
// Wait until all threads currently in a critical section for this
// synchronizer have exited their critical section. Threads that
// enter a critical section after the synchronization has started
// are not considered in the wait.
// Precondition: No other thread may be synchronizing on this
// synchronizer.
void synchronize();
// RAII class for managing enter/exit pairs.
class CriticalSection;
};
inline uint SingleWriterSynchronizer::enter() {
return Atomic::add(2u, &_enter);
}
inline void SingleWriterSynchronizer::exit(uint enter_value) {
uint exit_value = Atomic::add(2u, &_exit[enter_value & 1]);
// If this exit completes a synchronize request, wakeup possibly
// waiting synchronizer. Read of _waiting_for must follow the _exit
// update.
if (exit_value == _waiting_for) {
_wakeup.signal();
}
}
class SingleWriterSynchronizer::CriticalSection : public StackObj {
SingleWriterSynchronizer* _synchronizer;
uint _enter_value;
public:
// Enter synchronizer's critical section.
explicit CriticalSection(SingleWriterSynchronizer* synchronizer) :
_synchronizer(synchronizer),
_enter_value(synchronizer->enter())
{}
// Exit synchronizer's critical section.
~CriticalSection() {
_synchronizer->exit(_enter_value);
}
};
#endif // SHARE_UTILITIES_SINGLEWRITERSYNCHRONIZER_HPP

View file

@ -0,0 +1,136 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "runtime/thread.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalCounter.inline.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"
#include "utilities/singleWriterSynchronizer.hpp"
#include "threadHelper.inline.hpp"
#include "unittest.hpp"
class SingleWriterSynchronizerTestReader : public JavaTestThread {
SingleWriterSynchronizer* _synchronizer;
volatile uintx* _synchronized_value;
volatile int* _continue_running;
static const uint reader_iterations = 10;
public:
SingleWriterSynchronizerTestReader(Semaphore* post,
SingleWriterSynchronizer* synchronizer,
volatile uintx* synchronized_value,
volatile int* continue_running) :
JavaTestThread(post),
_synchronizer(synchronizer),
_synchronized_value(synchronized_value),
_continue_running(continue_running)
{}
virtual void main_run() {
uintx iterations = 0;
while (OrderAccess::load_acquire(_continue_running) != 0) {
++iterations;
SingleWriterSynchronizer::CriticalSection cs(_synchronizer);
uintx value = OrderAccess::load_acquire(_synchronized_value);
for (uint i = 0; i < reader_iterations; ++i) {
uintx new_value = OrderAccess::load_acquire(_synchronized_value);
// A reader can see either the value it first read after
// entering the critical section, or that value + 1. No other
// values are possible.
if (value != new_value) {
ASSERT_EQ((value + 1), new_value);
}
}
}
tty->print_cr("reader iterations: " UINTX_FORMAT, iterations);
}
};
class SingleWriterSynchronizerTestWriter : public JavaTestThread {
SingleWriterSynchronizer* _synchronizer;
volatile uintx* _synchronized_value;
volatile int* _continue_running;
public:
SingleWriterSynchronizerTestWriter(Semaphore* post,
SingleWriterSynchronizer* synchronizer,
volatile uintx* synchronized_value,
volatile int* continue_running) :
JavaTestThread(post),
_synchronizer(synchronizer),
_synchronized_value(synchronized_value),
_continue_running(continue_running)
{}
virtual void main_run() {
while (OrderAccess::load_acquire(_continue_running) != 0) {
++*_synchronized_value;
_synchronizer->synchronize();
}
tty->print_cr("writer iterations: " UINTX_FORMAT, *_synchronized_value);
}
};
const uint nreaders = 5;
const uint milliseconds_to_run = 3000;
TEST_VM(TestSingleWriterSynchronizer, stress) {
Semaphore post;
SingleWriterSynchronizer synchronizer;
volatile uintx synchronized_value = 0;
volatile int continue_running = 1;
JavaTestThread* readers[nreaders] = {};
for (uint i = 0; i < nreaders; ++i) {
readers[i] = new SingleWriterSynchronizerTestReader(&post,
&synchronizer,
&synchronized_value,
&continue_running);
readers[i]->doit();
}
JavaTestThread* writer =
new SingleWriterSynchronizerTestWriter(&post,
&synchronizer,
&synchronized_value,
&continue_running);
writer->doit();
tty->print_cr("Stressing synchronizer for %u ms", milliseconds_to_run);
{
ThreadInVMfromNative invm(JavaThread::current());
os::sleep(Thread::current(), milliseconds_to_run, true);
}
continue_running = 0;
for (uint i = 0; i < nreaders + 1; ++i) {
post.wait();
}
}