6899049: G1: Clean up code in ptrQueue.[ch]pp and ptrQueue.inline.hpp

Simplify indexing, address obsolete code, improve access/type checking.

Reviewed-by: tschatzl, pliden
This commit is contained in:
Kim Barrett 2015-11-04 13:09:57 -05:00
parent f25b7859ef
commit b256989eb3
6 changed files with 148 additions and 115 deletions

View file

@ -32,6 +32,18 @@
#include "runtime/safepoint.hpp"
#include "runtime/thread.inline.hpp"
DirtyCardQueue::DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent) :
// Dirty card queues are always active, so we create them with their
// active field set to true.
PtrQueue(qset, permanent, true /* active */)
{ }
DirtyCardQueue::~DirtyCardQueue() {
if (!is_permanent()) {
flush();
}
}
bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl,
bool consume,
uint worker_i) {
@ -40,7 +52,9 @@ bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl,
res = apply_closure_to_buffer(cl, _buf, _index, _sz,
consume,
worker_i);
if (res && consume) _index = _sz;
if (res && consume) {
_index = _sz;
}
}
return res;
}
@ -51,14 +65,18 @@ bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl,
bool consume,
uint worker_i) {
if (cl == NULL) return true;
for (size_t i = index; i < sz; i += oopSize) {
int ind = byte_index_to_index((int)i);
jbyte* card_ptr = (jbyte*)buf[ind];
size_t limit = byte_index_to_index(sz);
for (size_t i = byte_index_to_index(index); i < limit; ++i) {
jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
if (card_ptr != NULL) {
// Set the entry to null, so we don't do it again (via the test
// above) if we reconsider this buffer.
if (consume) buf[ind] = NULL;
if (!cl->do_card_ptr(card_ptr, worker_i)) return false;
if (consume) {
buf[i] = NULL;
}
if (!cl->do_card_ptr(card_ptr, worker_i)) {
return false;
}
}
}
return true;
@ -71,7 +89,7 @@ bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl,
DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
PtrQueueSet(notify_when_complete),
_mut_process_closure(NULL),
_shared_dirty_card_queue(this, true /*perm*/),
_shared_dirty_card_queue(this, true /* permanent */),
_free_ids(NULL),
_processed_buffers_mut(0), _processed_buffers_rs_thread(0)
{
@ -83,13 +101,19 @@ uint DirtyCardQueueSet::num_par_ids() {
return (uint)os::processor_count();
}
void DirtyCardQueueSet::initialize(CardTableEntryClosure* cl, Monitor* cbl_mon, Mutex* fl_lock,
void DirtyCardQueueSet::initialize(CardTableEntryClosure* cl,
Monitor* cbl_mon,
Mutex* fl_lock,
int process_completed_threshold,
int max_completed_queue,
Mutex* lock, PtrQueueSet* fl_owner) {
Mutex* lock,
DirtyCardQueueSet* fl_owner) {
_mut_process_closure = cl;
PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold,
max_completed_queue, fl_owner);
PtrQueueSet::initialize(cbl_mon,
fl_lock,
process_completed_threshold,
max_completed_queue,
fl_owner);
set_buffer_size(G1UpdateBufferSize);
_shared_dirty_card_queue.set_lock(lock);
_free_ids = new FreeIdSet((int) num_par_ids(), _cbl_mon);
@ -160,8 +184,7 @@ bool DirtyCardQueueSet::mut_process_buffer(void** buf) {
}
BufferNode*
DirtyCardQueueSet::get_completed_buffer(int stop_at) {
BufferNode* DirtyCardQueueSet::get_completed_buffer(int stop_at) {
BufferNode* nd = NULL;
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
@ -178,12 +201,11 @@ DirtyCardQueueSet::get_completed_buffer(int stop_at) {
_n_completed_buffers--;
assert(_n_completed_buffers >= 0, "Invariant");
}
debug_only(assert_completed_buffer_list_len_correct_locked());
DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
return nd;
}
bool DirtyCardQueueSet::
apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl,
bool DirtyCardQueueSet::apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl,
uint worker_i,
BufferNode* nd) {
if (nd != NULL) {
@ -259,7 +281,7 @@ void DirtyCardQueueSet::clear() {
}
_n_completed_buffers = 0;
_completed_buffers_tail = NULL;
debug_only(assert_completed_buffer_list_len_correct_locked());
DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
}
while (buffers_to_delete != NULL) {
BufferNode* nd = buffers_to_delete;
@ -291,10 +313,11 @@ void DirtyCardQueueSet::concatenate_logs() {
for (JavaThread* t = Threads::first(); t; t = t->next()) {
DirtyCardQueue& dcq = t->dirty_card_queue();
if (dcq.size() != 0) {
void **buf = t->dirty_card_queue().get_buf();
void** buf = dcq.get_buf();
// We must NULL out the unused entries, then enqueue.
for (size_t i = 0; i < t->dirty_card_queue().get_index(); i += oopSize) {
buf[PtrQueue::byte_index_to_index((int)i)] = NULL;
size_t limit = dcq.byte_index_to_index(dcq.get_index());
for (size_t i = 0; i < limit; ++i) {
buf[i] = NULL;
}
enqueue_complete_buffer(dcq.get_buf(), dcq.get_index());
dcq.reinitialize();

View file

@ -29,6 +29,7 @@
#include "memory/allocation.hpp"
class FreeIdSet;
class DirtyCardQueueSet;
// A closure class for processing card table entries. Note that we don't
// require these closure objects to be stack-allocated.
@ -42,14 +43,11 @@ public:
// A ptrQueue whose elements are "oops", pointers to object heads.
class DirtyCardQueue: public PtrQueue {
public:
DirtyCardQueue(PtrQueueSet* qset_, bool perm = false) :
// Dirty card queues are always active, so we create them with their
// active field set to true.
PtrQueue(qset_, perm, true /* active */) { }
DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent = false);
// Flush before destroying; queue may be used to capture pending work while
// doing something else, with auto-flush on completion.
~DirtyCardQueue() { if (!is_permanent()) flush(); }
~DirtyCardQueue();
// Process queue entries and release resources.
void flush() { flush_impl(); }
@ -72,7 +70,6 @@ public:
bool consume = true,
uint worker_i = 0);
void **get_buf() { return _buf;}
void set_buf(void **buf) {_buf = buf;}
size_t get_index() { return _index;}
void reinitialize() { _buf = 0; _sz = 0; _index = 0;}
};
@ -101,10 +98,13 @@ class DirtyCardQueueSet: public PtrQueueSet {
public:
DirtyCardQueueSet(bool notify_when_complete = true);
void initialize(CardTableEntryClosure* cl, Monitor* cbl_mon, Mutex* fl_lock,
void initialize(CardTableEntryClosure* cl,
Monitor* cbl_mon,
Mutex* fl_lock,
int process_completed_threshold,
int max_completed_queue,
Mutex* lock, PtrQueueSet* fl_owner = NULL);
Mutex* lock,
DirtyCardQueueSet* fl_owner = NULL);
// The number of parallel ids that can be claimed to allow collector or
// mutator threads to do card-processing work.

View file

@ -30,24 +30,25 @@
#include "runtime/mutexLocker.hpp"
#include "runtime/thread.inline.hpp"
PtrQueue::PtrQueue(PtrQueueSet* qset, bool perm, bool active) :
PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) :
_qset(qset), _buf(NULL), _index(0), _sz(0), _active(active),
_perm(perm), _lock(NULL)
_permanent(permanent), _lock(NULL)
{}
PtrQueue::~PtrQueue() {
assert(_perm || (_buf == NULL), "queue must be flushed before delete");
assert(_permanent || (_buf == NULL), "queue must be flushed before delete");
}
void PtrQueue::flush_impl() {
if (!_perm && _buf != NULL) {
if (!_permanent && _buf != NULL) {
if (_index == _sz) {
// No work to do.
qset()->deallocate_buffer(_buf);
} else {
// We must NULL out the unused entries, then enqueue.
for (size_t i = 0; i < _index; i += oopSize) {
_buf[byte_index_to_index((int)i)] = NULL;
size_t limit = byte_index_to_index(_index);
for (size_t i = 0; i < limit; ++i) {
_buf[i] = NULL;
}
qset()->enqueue_complete_buffer(_buf);
}
@ -66,8 +67,8 @@ void PtrQueue::enqueue_known_active(void* ptr) {
}
assert(_index > 0, "postcondition");
_index -= oopSize;
_buf[byte_index_to_index((int)_index)] = ptr;
_index -= sizeof(void*);
_buf[byte_index_to_index(_index)] = ptr;
assert(_index <= _sz, "Invariant.");
}
@ -100,6 +101,26 @@ PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
_fl_owner = this;
}
PtrQueueSet::~PtrQueueSet() {
// There are presently only a couple (derived) instances ever
// created, and they are permanent, so no harm currently done by
// doing nothing here.
}
void PtrQueueSet::initialize(Monitor* cbl_mon,
Mutex* fl_lock,
int process_completed_threshold,
int max_completed_queue,
PtrQueueSet *fl_owner) {
_max_completed_queue = max_completed_queue;
_process_completed_threshold = process_completed_threshold;
_completed_queue_padding = 0;
assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?");
_cbl_mon = cbl_mon;
_fl_lock = fl_lock;
_fl_owner = (fl_owner != NULL) ? fl_owner : this;
}
void** PtrQueueSet::allocate_buffer() {
assert(_sz > 0, "Didn't set a buffer size.");
MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
@ -233,7 +254,7 @@ void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) {
if (_notify_when_complete)
_cbl_mon->notify();
}
debug_only(assert_completed_buffer_list_len_correct_locked());
DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
}
int PtrQueueSet::completed_buffers_list_length() {
@ -258,7 +279,7 @@ void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() {
void PtrQueueSet::set_buffer_size(size_t sz) {
assert(_sz == 0 && sz > 0, "Should be called only once.");
_sz = sz * oopSize;
_sz = sz * sizeof(void*);
}
// Merge lists of buffers. Notify the processing threads.

View file

@ -40,44 +40,49 @@ class PtrQueueSet;
class PtrQueue VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
protected:
// Noncopyable - not defined.
PtrQueue(const PtrQueue&);
PtrQueue& operator=(const PtrQueue&);
// The ptr queue set to which this queue belongs.
PtrQueueSet* _qset;
PtrQueueSet* const _qset;
// Whether updates should be logged.
bool _active;
// The buffer.
void** _buf;
// The index at which an object was last enqueued. Starts at "_sz"
// (indicating an empty buffer) and goes towards zero.
size_t _index;
// The size of the buffer.
size_t _sz;
// If true, the queue is permanent, and doesn't need to deallocate
// its buffer in the destructor (since that obtains a lock which may not
// be legally locked by then.
bool _perm;
const bool _permanent;
protected:
// The buffer.
void** _buf;
// The (byte) index at which an object was last enqueued. Starts at "_sz"
// (indicating an empty buffer) and goes towards zero.
size_t _index;
// The (byte) size of the buffer.
size_t _sz;
// If there is a lock associated with this buffer, this is that lock.
Mutex* _lock;
PtrQueueSet* qset() { return _qset; }
bool is_permanent() const { return _perm; }
bool is_permanent() const { return _permanent; }
// Process queue entries and release resources, if not permanent.
void flush_impl();
public:
// Initialize this queue to contain a null buffer, and be part of the
// given PtrQueueSet.
PtrQueue(PtrQueueSet* qset, bool perm = false, bool active = false);
PtrQueue(PtrQueueSet* qset, bool permanent = false, bool active = false);
// Requires queue flushed or permanent.
~PtrQueue();
public:
// Associate a lock with a ptr queue.
void set_lock(Mutex* lock) { _lock = lock; }
@ -129,13 +134,9 @@ public:
bool is_active() { return _active; }
static int byte_index_to_index(int ind) {
assert((ind % oopSize) == 0, "Invariant.");
return ind / oopSize;
}
static int index_to_byte_index(int byte_ind) {
return byte_ind * oopSize;
static size_t byte_index_to_index(size_t ind) {
assert((ind % sizeof(void*)) == 0, "Invariant.");
return ind / sizeof(void*);
}
// To support compiler.
@ -246,26 +247,21 @@ protected:
return false;
}
public:
// Create an empty ptr queue set.
PtrQueueSet(bool notify_when_complete = false);
~PtrQueueSet();
// Because of init-order concerns, we can't pass these as constructor
// arguments.
void initialize(Monitor* cbl_mon, Mutex* fl_lock,
void initialize(Monitor* cbl_mon,
Mutex* fl_lock,
int process_completed_threshold,
int max_completed_queue,
PtrQueueSet *fl_owner = NULL) {
_max_completed_queue = max_completed_queue;
_process_completed_threshold = process_completed_threshold;
_completed_queue_padding = 0;
assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?");
_cbl_mon = cbl_mon;
_fl_lock = fl_lock;
_fl_owner = (fl_owner != NULL) ? fl_owner : this;
}
PtrQueueSet *fl_owner = NULL);
// Return an empty oop array of size _sz (required to be non-zero).
public:
// Return an empty array of size _sz (required to be non-zero).
void** allocate_buffer();
// Return an empty buffer to the free list. The "buf" argument is

View file

@ -33,6 +33,15 @@
#include "runtime/thread.hpp"
#include "runtime/vmThread.hpp"
ObjPtrQueue::ObjPtrQueue(SATBMarkQueueSet* qset, bool permanent) :
// SATB queues are only active during marking cycles. We create
// them with their active field set to false. If a thread is
// created during a cycle and its SATB queue needs to be activated
// before the thread starts running, we'll need to set its active
// field to true. This is done in JavaThread::initialize_queues().
PtrQueue(qset, permanent, false /* active */)
{ }
void ObjPtrQueue::flush() {
// Filter now to possibly save work later. If filtering empties the
// buffer then flush_impl can deallocate the buffer.
@ -99,7 +108,6 @@ inline bool requires_marking(const void* entry, G1CollectedHeap* heap) {
void ObjPtrQueue::filter() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
void** buf = _buf;
size_t sz = _sz;
if (buf == NULL) {
// nothing to do
@ -107,43 +115,37 @@ void ObjPtrQueue::filter() {
}
// Used for sanity checking at the end of the loop.
debug_only(size_t entries = 0; size_t retained = 0;)
DEBUG_ONLY(size_t entries = 0; size_t retained = 0;)
size_t i = sz;
size_t new_index = sz;
assert(_index <= _sz, "invariant");
void** limit = &buf[byte_index_to_index(_index)];
void** src = &buf[byte_index_to_index(_sz)];
void** dst = src;
while (i > _index) {
assert(i > 0, "we should have at least one more entry to process");
i -= oopSize;
debug_only(entries += 1;)
void** p = &buf[byte_index_to_index((int) i)];
void* entry = *p;
while (limit < src) {
DEBUG_ONLY(entries += 1;)
--src;
void* entry = *src;
// NULL the entry so that unused parts of the buffer contain NULLs
// at the end. If we are going to retain it we will copy it to its
// final place. If we have retained all entries we have visited so
// far, we'll just end up copying it to the same place.
*p = NULL;
*src = NULL;
if (requires_marking(entry, g1h) && !g1h->isMarkedNext((oop)entry)) {
assert(new_index > 0, "we should not have already filled up the buffer");
new_index -= oopSize;
assert(new_index >= i,
"new_index should never be below i, as we always compact 'up'");
void** new_p = &buf[byte_index_to_index((int) new_index)];
assert(new_p >= p, "the destination location should never be below "
"the source as we always compact 'up'");
assert(*new_p == NULL,
"we should have already cleared the destination location");
*new_p = entry;
debug_only(retained += 1;)
--dst;
assert(*dst == NULL, "filtering destination should be clear");
*dst = entry;
DEBUG_ONLY(retained += 1;);
}
}
size_t new_index = pointer_delta(dst, buf, 1);
#ifdef ASSERT
size_t entries_calc = (sz - _index) / oopSize;
size_t entries_calc = (_sz - _index) / sizeof(void*);
assert(entries == entries_calc, "the number of entries we counted "
"should match the number of entries we calculated");
size_t retained_calc = (sz - new_index) / oopSize;
size_t retained_calc = (_sz - new_index) / sizeof(void*);
assert(retained == retained_calc, "the number of retained entries we counted "
"should match the number of retained entries we calculated");
#endif // ASSERT
@ -170,11 +172,8 @@ bool ObjPtrQueue::should_enqueue_buffer() {
filter();
size_t sz = _sz;
size_t all_entries = sz / oopSize;
size_t retained_entries = (sz - _index) / oopSize;
size_t perc = retained_entries * 100 / all_entries;
bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent;
size_t percent_used = ((_sz - _index) * 100) / _sz;
bool should_enqueue = percent_used > G1SATBBufferEnqueueingThresholdPercent;
return should_enqueue;
}
@ -185,8 +184,8 @@ void ObjPtrQueue::apply_closure_and_empty(SATBBufferClosure* cl) {
assert(_index % sizeof(void*) == 0, "invariant");
assert(_sz % sizeof(void*) == 0, "invariant");
assert(_index <= _sz, "invariant");
cl->do_buffer(_buf + byte_index_to_index((int)_index),
byte_index_to_index((int)(_sz - _index)));
cl->do_buffer(_buf + byte_index_to_index(_index),
byte_index_to_index(_sz - _index));
_index = _sz;
}
}
@ -212,7 +211,7 @@ void ObjPtrQueue::print(const char* name,
SATBMarkQueueSet::SATBMarkQueueSet() :
PtrQueueSet(),
_shared_satb_queue(this, true /*perm*/) { }
_shared_satb_queue(this, true /* permanent */) { }
void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
int process_completed_threshold,
@ -299,7 +298,7 @@ bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl)
// Filtering can result in non-full completed buffers; see
// should_enqueue_buffer.
assert(_sz % sizeof(void*) == 0, "invariant");
size_t limit = ObjPtrQueue::byte_index_to_index((int)_sz);
size_t limit = ObjPtrQueue::byte_index_to_index(_sz);
for (size_t i = 0; i < limit; ++i) {
if (buf[i] != NULL) {
// Found the end of the block of NULLs; process the remainder.

View file

@ -50,13 +50,7 @@ private:
void filter();
public:
ObjPtrQueue(PtrQueueSet* qset, bool perm = false) :
// SATB queues are only active during marking cycles. We create
// them with their active field set to false. If a thread is
// created during a cycle and its SATB queue needs to be activated
// before the thread starts running, we'll need to set its active
// field to true. This is done in JavaThread::initialize_queues().
PtrQueue(qset, perm, false /* active */) { }
ObjPtrQueue(SATBMarkQueueSet* qset, bool permanent = false);
// Process queue entries and free resources.
void flush();