8178836: Improve PtrQueue index abstraction

Prefer element indexes where possible.

Reviewed-by: shade, mgerdin
This commit is contained in:
Kim Barrett 2017-05-08 07:16:10 -04:00
parent 0e0b42b18a
commit a65b58e18d
6 changed files with 140 additions and 97 deletions

View file

@ -157,8 +157,8 @@ bool DirtyCardQueueSet::apply_closure_to_buffer(CardTableEntryClosure* cl,
if (cl == NULL) return true; if (cl == NULL) return true;
bool result = true; bool result = true;
void** buf = BufferNode::make_buffer_from_node(node); void** buf = BufferNode::make_buffer_from_node(node);
size_t limit = DirtyCardQueue::byte_index_to_index(buffer_size()); size_t i = node->index();
size_t i = DirtyCardQueue::byte_index_to_index(node->index()); size_t limit = buffer_size();
for ( ; i < limit; ++i) { for ( ; i < limit; ++i) {
jbyte* card_ptr = static_cast<jbyte*>(buf[i]); jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
assert(card_ptr != NULL, "invariant"); assert(card_ptr != NULL, "invariant");
@ -168,9 +168,8 @@ bool DirtyCardQueueSet::apply_closure_to_buffer(CardTableEntryClosure* cl,
} }
} }
if (consume) { if (consume) {
size_t new_index = DirtyCardQueue::index_to_byte_index(i); assert(i <= buffer_size(), "invariant");
assert(new_index <= buffer_size(), "invariant"); node->set_index(i);
node->set_index(new_index);
} }
return result; return result;
} }

View file

@ -2781,10 +2781,7 @@ size_t G1CollectedHeap::pending_card_num() {
size_t buffer_size = dcqs.buffer_size(); size_t buffer_size = dcqs.buffer_size();
size_t buffer_num = dcqs.completed_buffers_num(); size_t buffer_num = dcqs.completed_buffers_num();
// PtrQueueSet::buffer_size() and PtrQueue:size() return sizes return buffer_size * buffer_num + extra_cards;
// in bytes - not the number of 'entries'. We need to convert
// into a number of cards.
return (buffer_size * buffer_num + extra_cards) / oopSize;
} }
class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure { class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {

View file

@ -33,8 +33,13 @@
#include <new> #include <new>
PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) : PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) :
_qset(qset), _buf(NULL), _index(0), _sz(0), _active(active), _qset(qset),
_permanent(permanent), _lock(NULL) _active(active),
_permanent(permanent),
_index(0),
_capacity_in_bytes(0),
_buf(NULL),
_lock(NULL)
{} {}
PtrQueue::~PtrQueue() { PtrQueue::~PtrQueue() {
@ -43,7 +48,7 @@ PtrQueue::~PtrQueue() {
void PtrQueue::flush_impl() { void PtrQueue::flush_impl() {
if (_buf != NULL) { if (_buf != NULL) {
BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index); BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
if (is_empty()) { if (is_empty()) {
// No work to do. // No work to do.
qset()->deallocate_buffer(node); qset()->deallocate_buffer(node);
@ -51,23 +56,21 @@ void PtrQueue::flush_impl() {
qset()->enqueue_complete_buffer(node); qset()->enqueue_complete_buffer(node);
} }
_buf = NULL; _buf = NULL;
_index = 0; set_index(0);
} }
} }
void PtrQueue::enqueue_known_active(void* ptr) { void PtrQueue::enqueue_known_active(void* ptr) {
assert(_index <= _sz, "Invariant.");
assert(_index == 0 || _buf != NULL, "invariant");
while (_index == 0) { while (_index == 0) {
handle_zero_index(); handle_zero_index();
} }
assert(_index > 0, "postcondition"); assert(_buf != NULL, "postcondition");
_index -= sizeof(void*); assert(index() > 0, "postcondition");
_buf[byte_index_to_index(_index)] = ptr; assert(index() <= capacity(), "invariant");
assert(_index <= _sz, "Invariant."); _index -= _element_size;
_buf[index()] = ptr;
} }
void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) { void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) {
@ -85,10 +88,8 @@ void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) {
} }
BufferNode* BufferNode::allocate(size_t byte_size) { BufferNode* BufferNode::allocate(size_t size) {
assert(byte_size > 0, "precondition"); size_t byte_size = size * sizeof(void*);
assert(is_size_aligned(byte_size, sizeof(void**)),
"Invalid buffer size " SIZE_FORMAT, byte_size);
void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC); void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC);
return new (data) BufferNode; return new (data) BufferNode;
} }
@ -99,10 +100,10 @@ void BufferNode::deallocate(BufferNode* node) {
} }
PtrQueueSet::PtrQueueSet(bool notify_when_complete) : PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
_buffer_size(0),
_max_completed_queue(0), _max_completed_queue(0),
_cbl_mon(NULL), _fl_lock(NULL), _cbl_mon(NULL), _fl_lock(NULL),
_notify_when_complete(notify_when_complete), _notify_when_complete(notify_when_complete),
_sz(0),
_completed_buffers_head(NULL), _completed_buffers_head(NULL),
_completed_buffers_tail(NULL), _completed_buffers_tail(NULL),
_n_completed_buffers(0), _n_completed_buffers(0),
@ -133,7 +134,6 @@ void PtrQueueSet::initialize(Monitor* cbl_mon,
} }
void** PtrQueueSet::allocate_buffer() { void** PtrQueueSet::allocate_buffer() {
assert(_sz > 0, "Didn't set a buffer size.");
BufferNode* node = NULL; BufferNode* node = NULL;
{ {
MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
@ -144,7 +144,7 @@ void** PtrQueueSet::allocate_buffer() {
} }
} }
if (node == NULL) { if (node == NULL) {
node = BufferNode::allocate(_sz); node = BufferNode::allocate(buffer_size());
} else { } else {
// Reinitialize buffer obtained from free list. // Reinitialize buffer obtained from free list.
node->set_index(0); node->set_index(0);
@ -154,7 +154,6 @@ void** PtrQueueSet::allocate_buffer() {
} }
void PtrQueueSet::deallocate_buffer(BufferNode* node) { void PtrQueueSet::deallocate_buffer(BufferNode* node) {
assert(_sz > 0, "Didn't set a buffer size.");
MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
node->set_next(_fl_owner->_buf_free_list); node->set_next(_fl_owner->_buf_free_list);
_fl_owner->_buf_free_list = node; _fl_owner->_buf_free_list = node;
@ -177,13 +176,13 @@ void PtrQueueSet::reduce_free_list() {
} }
void PtrQueue::handle_zero_index() { void PtrQueue::handle_zero_index() {
assert(_index == 0, "Precondition."); assert(index() == 0, "precondition");
// This thread records the full buffer and allocates a new one (while // This thread records the full buffer and allocates a new one (while
// holding the lock if there is one). // holding the lock if there is one).
if (_buf != NULL) { if (_buf != NULL) {
if (!should_enqueue_buffer()) { if (!should_enqueue_buffer()) {
assert(_index > 0, "the buffer can only be re-used if it's not full"); assert(index() > 0, "the buffer can only be re-used if it's not full");
return; return;
} }
@ -206,7 +205,7 @@ void PtrQueue::handle_zero_index() {
// preventing the subsequent the multiple enqueue, and // preventing the subsequent the multiple enqueue, and
// install a newly allocated buffer below. // install a newly allocated buffer below.
BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index); BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
_buf = NULL; // clear shared _buf field _buf = NULL; // clear shared _buf field
locking_enqueue_completed_buffer(node); // enqueue completed buffer locking_enqueue_completed_buffer(node); // enqueue completed buffer
@ -219,20 +218,21 @@ void PtrQueue::handle_zero_index() {
if (_buf != NULL) return; if (_buf != NULL) return;
} else { } else {
BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index); BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
if (qset()->process_or_enqueue_complete_buffer(node)) { if (qset()->process_or_enqueue_complete_buffer(node)) {
// Recycle the buffer. No allocation. // Recycle the buffer. No allocation.
assert(_buf == BufferNode::make_buffer_from_node(node), "invariant"); assert(_buf == BufferNode::make_buffer_from_node(node), "invariant");
assert(_sz == qset()->buffer_size(), "invariant"); assert(capacity() == qset()->buffer_size(), "invariant");
_index = _sz; reset();
return; return;
} }
} }
} }
// Reallocate the buffer // Set capacity in case this is the first allocation.
set_capacity(qset()->buffer_size());
// Allocate a new buffer.
_buf = qset()->allocate_buffer(); _buf = qset()->allocate_buffer();
_sz = qset()->buffer_size(); reset();
_index = _sz;
} }
bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) { bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) {
@ -296,8 +296,8 @@ void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() {
} }
void PtrQueueSet::set_buffer_size(size_t sz) { void PtrQueueSet::set_buffer_size(size_t sz) {
assert(_sz == 0 && sz > 0, "Should be called only once."); assert(_buffer_size == 0 && sz > 0, "Should be called only once.");
_sz = sz * sizeof(void*); _buffer_size = sz;
} }
// Merge lists of buffers. Notify the processing threads. // Merge lists of buffers. Notify the processing threads.

View file

@ -53,15 +53,57 @@ class PtrQueue VALUE_OBJ_CLASS_SPEC {
// be legally locked by then. // be legally locked by then.
const bool _permanent; const bool _permanent;
// The (byte) index at which an object was last enqueued. Starts at
// capacity_in_bytes (indicating an empty buffer) and goes towards zero.
// Value is always pointer-size aligned.
size_t _index;
// Size of the current buffer, in bytes.
// Value is always pointer-size aligned.
size_t _capacity_in_bytes;
static const size_t _element_size = sizeof(void*);
// Get the capacity, in bytes. The capacity must have been set.
size_t capacity_in_bytes() const {
assert(_capacity_in_bytes > 0, "capacity not set");
return _capacity_in_bytes;
}
void set_capacity(size_t entries) {
size_t byte_capacity = index_to_byte_index(entries);
assert(_capacity_in_bytes == 0 || _capacity_in_bytes == byte_capacity,
"changing capacity " SIZE_FORMAT " -> " SIZE_FORMAT,
_capacity_in_bytes, byte_capacity);
_capacity_in_bytes = byte_capacity;
}
static size_t byte_index_to_index(size_t ind) {
assert(is_size_aligned(ind, _element_size), "precondition");
return ind / _element_size;
}
static size_t index_to_byte_index(size_t ind) {
return ind * _element_size;
}
protected: protected:
// The buffer. // The buffer.
void** _buf; void** _buf;
// The (byte) index at which an object was last enqueued. Starts at "_sz"
// (indicating an empty buffer) and goes towards zero.
size_t _index;
// The (byte) size of the buffer. size_t index() const {
size_t _sz; return byte_index_to_index(_index);
}
void set_index(size_t new_index) {
size_t byte_index = index_to_byte_index(new_index);
assert(byte_index <= capacity_in_bytes(), "precondition");
_index = byte_index;
}
size_t capacity() const {
return byte_index_to_index(capacity_in_bytes());
}
// If there is a lock associated with this buffer, this is that lock. // If there is a lock associated with this buffer, this is that lock.
Mutex* _lock; Mutex* _lock;
@ -84,7 +126,12 @@ public:
// Associate a lock with a ptr queue. // Associate a lock with a ptr queue.
void set_lock(Mutex* lock) { _lock = lock; } void set_lock(Mutex* lock) { _lock = lock; }
void reset() { if (_buf != NULL) _index = _sz; } // Forcibly set empty.
void reset() {
if (_buf != NULL) {
_index = capacity_in_bytes();
}
}
void enqueue(volatile void* ptr) { void enqueue(volatile void* ptr) {
enqueue((void*)(ptr)); enqueue((void*)(ptr));
@ -109,13 +156,18 @@ public:
void enqueue_known_active(void* ptr); void enqueue_known_active(void* ptr);
size_t size() { // Return the size of the in-use region.
assert(_sz >= _index, "Invariant."); size_t size() const {
return _buf == NULL ? 0 : _sz - _index; size_t result = 0;
if (_buf != NULL) {
assert(_index <= capacity_in_bytes(), "Invariant");
result = byte_index_to_index(capacity_in_bytes() - _index);
}
return result;
} }
bool is_empty() { bool is_empty() const {
return _buf == NULL || _sz == _index; return _buf == NULL || capacity_in_bytes() == _index;
} }
// Set the "active" property of the queue to "b". An enqueue to an // Set the "active" property of the queue to "b". An enqueue to an
@ -124,22 +176,14 @@ public:
void set_active(bool b) { void set_active(bool b) {
_active = b; _active = b;
if (!b && _buf != NULL) { if (!b && _buf != NULL) {
_index = _sz; reset();
} else if (b && _buf != NULL) { } else if (b && _buf != NULL) {
assert(_index == _sz, "invariant: queues are empty when activated."); assert(index() == capacity(),
"invariant: queues are empty when activated.");
} }
} }
bool is_active() { return _active; } bool is_active() const { return _active; }
static size_t byte_index_to_index(size_t ind) {
assert((ind % sizeof(void*)) == 0, "Invariant.");
return ind / sizeof(void*);
}
static size_t index_to_byte_index(size_t ind) {
return ind * sizeof(void*);
}
// To support compiler. // To support compiler.
@ -156,7 +200,7 @@ protected:
return byte_offset_of(Derived, _buf); return byte_offset_of(Derived, _buf);
} }
static ByteSize byte_width_of_buf() { return in_ByteSize(sizeof(void*)); } static ByteSize byte_width_of_buf() { return in_ByteSize(_element_size); }
template<typename Derived> template<typename Derived>
static ByteSize byte_offset_of_active() { static ByteSize byte_offset_of_active() {
@ -183,10 +227,10 @@ public:
BufferNode* next() const { return _next; } BufferNode* next() const { return _next; }
void set_next(BufferNode* n) { _next = n; } void set_next(BufferNode* n) { _next = n; }
size_t index() const { return _index; } size_t index() const { return _index; }
void set_index(size_t i) { _index = i; } void set_index(size_t i) { _index = i; }
// Allocate a new BufferNode with the "buffer" having size bytes. // Allocate a new BufferNode with the "buffer" having size elements.
static BufferNode* allocate(size_t byte_size); static BufferNode* allocate(size_t size);
// Free a BufferNode. // Free a BufferNode.
static void deallocate(BufferNode* node); static void deallocate(BufferNode* node);
@ -213,6 +257,10 @@ public:
// set, and return completed buffers to the set. // set, and return completed buffers to the set.
// All these variables are are protected by the TLOQ_CBL_mon. XXX ??? // All these variables are are protected by the TLOQ_CBL_mon. XXX ???
class PtrQueueSet VALUE_OBJ_CLASS_SPEC { class PtrQueueSet VALUE_OBJ_CLASS_SPEC {
private:
// The size of all buffers in the set.
size_t _buffer_size;
protected: protected:
Monitor* _cbl_mon; // Protects the fields below. Monitor* _cbl_mon; // Protects the fields below.
BufferNode* _completed_buffers_head; BufferNode* _completed_buffers_head;
@ -230,9 +278,6 @@ protected:
// specifies the owner. It is set to "this" by default. // specifies the owner. It is set to "this" by default.
PtrQueueSet* _fl_owner; PtrQueueSet* _fl_owner;
// The size of all buffers in the set.
size_t _sz;
bool _all_active; bool _all_active;
// If true, notify_all on _cbl_mon when the threshold is reached. // If true, notify_all on _cbl_mon when the threshold is reached.
@ -270,11 +315,11 @@ protected:
public: public:
// Return an empty array of size _sz (required to be non-zero). // Return the buffer for a BufferNode of size buffer_size().
void** allocate_buffer(); void** allocate_buffer();
// Return an empty buffer to the free list. The "buf" argument is // Return an empty buffer to the free list. The node is required
// required to be a pointer to the head of an array of length "_sz". // to have been allocated with a size of buffer_size().
void deallocate_buffer(BufferNode* node); void deallocate_buffer(BufferNode* node);
// Declares that "buf" is a complete buffer. // Declares that "buf" is a complete buffer.
@ -296,8 +341,11 @@ public:
// can be called. And should only be called once. // can be called. And should only be called once.
void set_buffer_size(size_t sz); void set_buffer_size(size_t sz);
// Get the buffer size. // Get the buffer size. Must have been set.
size_t buffer_size() { return _sz; } size_t buffer_size() const {
assert(_buffer_size > 0, "buffer size not set");
return _buffer_size;
}
// Get/Set the number of completed buffers that triggers log processing. // Get/Set the number of completed buffers that triggers log processing.
void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; } void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; }

View file

@ -118,11 +118,10 @@ void SATBMarkQueue::filter() {
return; return;
} }
assert(_index <= _sz, "invariant");
// Two-fingered compaction toward the end. // Two-fingered compaction toward the end.
void** src = &buf[byte_index_to_index(_index)]; void** src = &buf[index()];
void** dst = &buf[byte_index_to_index(_sz)]; void** dst = &buf[capacity()];
assert(src <= dst, "invariant");
for ( ; src < dst; ++src) { for ( ; src < dst; ++src) {
// Search low to high for an entry to keep. // Search low to high for an entry to keep.
void* entry = *src; void* entry = *src;
@ -139,7 +138,7 @@ void SATBMarkQueue::filter() {
} }
// dst points to the lowest retained entry, or the end of the buffer // dst points to the lowest retained entry, or the end of the buffer
// if all the entries were filtered out. // if all the entries were filtered out.
_index = pointer_delta(dst, buf, 1); set_index(dst - buf);
} }
// This method will first apply the above filtering to the buffer. If // This method will first apply the above filtering to the buffer. If
@ -156,12 +155,13 @@ bool SATBMarkQueue::should_enqueue_buffer() {
// This method should only be called if there is a non-NULL buffer // This method should only be called if there is a non-NULL buffer
// that is full. // that is full.
assert(_index == 0, "pre-condition"); assert(index() == 0, "pre-condition");
assert(_buf != NULL, "pre-condition"); assert(_buf != NULL, "pre-condition");
filter(); filter();
size_t percent_used = ((_sz - _index) * 100) / _sz; size_t cap = capacity();
size_t percent_used = ((cap - index()) * 100) / cap;
bool should_enqueue = percent_used > G1SATBBufferEnqueueingThresholdPercent; bool should_enqueue = percent_used > G1SATBBufferEnqueueingThresholdPercent;
return should_enqueue; return should_enqueue;
} }
@ -170,27 +170,27 @@ void SATBMarkQueue::apply_closure_and_empty(SATBBufferClosure* cl) {
assert(SafepointSynchronize::is_at_safepoint(), assert(SafepointSynchronize::is_at_safepoint(),
"SATB queues must only be processed at safepoints"); "SATB queues must only be processed at safepoints");
if (_buf != NULL) { if (_buf != NULL) {
assert(_index % sizeof(void*) == 0, "invariant"); cl->do_buffer(&_buf[index()], size());
assert(_sz % sizeof(void*) == 0, "invariant"); reset();
assert(_index <= _sz, "invariant");
cl->do_buffer(_buf + byte_index_to_index(_index),
byte_index_to_index(_sz - _index));
_index = _sz;
} }
} }
#ifndef PRODUCT #ifndef PRODUCT
// Helpful for debugging // Helpful for debugging
void SATBMarkQueue::print(const char* name) { static void print_satb_buffer(const char* name,
print(name, _buf, _index, _sz); void** buf,
size_t index,
size_t capacity) {
tty->print_cr(" SATB BUFFER [%s] buf: " PTR_FORMAT " index: " SIZE_FORMAT
" capacity: " SIZE_FORMAT,
name, p2i(buf), index, capacity);
} }
void SATBMarkQueue::print(const char* name, void SATBMarkQueue::print(const char* name) {
void** buf, size_t index, size_t sz) { print_satb_buffer(name, _buf, index(), capacity());
tty->print_cr(" SATB BUFFER [%s] buf: " PTR_FORMAT " index: " SIZE_FORMAT " sz: " SIZE_FORMAT,
name, p2i(buf), index, sz);
} }
#endif // PRODUCT #endif // PRODUCT
SATBMarkQueueSet::SATBMarkQueueSet() : SATBMarkQueueSet::SATBMarkQueueSet() :
@ -275,8 +275,8 @@ bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl)
} }
if (nd != NULL) { if (nd != NULL) {
void **buf = BufferNode::make_buffer_from_node(nd); void **buf = BufferNode::make_buffer_from_node(nd);
size_t index = SATBMarkQueue::byte_index_to_index(nd->index()); size_t index = nd->index();
size_t size = SATBMarkQueue::byte_index_to_index(_sz); size_t size = buffer_size();
assert(index <= size, "invariant"); assert(index <= size, "invariant");
cl->do_buffer(buf + index, size - index); cl->do_buffer(buf + index, size - index);
deallocate_buffer(nd); deallocate_buffer(nd);
@ -303,7 +303,7 @@ void SATBMarkQueueSet::print_all(const char* msg) {
while (nd != NULL) { while (nd != NULL) {
void** buf = BufferNode::make_buffer_from_node(nd); void** buf = BufferNode::make_buffer_from_node(nd);
jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i); jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i);
SATBMarkQueue::print(buffer, buf, 0, _sz); print_satb_buffer(buffer, buf, nd->index(), buffer_size());
nd = nd->next(); nd = nd->next();
i += 1; i += 1;
} }

View file

@ -66,7 +66,6 @@ public:
#ifndef PRODUCT #ifndef PRODUCT
// Helpful for debugging // Helpful for debugging
void print(const char* name); void print(const char* name);
static void print(const char* name, void** buf, size_t index, size_t sz);
#endif // PRODUCT #endif // PRODUCT
// Compiler support. // Compiler support.