mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-26 22:34:27 +02:00
8236778: Add Atomic::fetch_and_add
Reviewed-by: kbarrett, dholmes
This commit is contained in:
parent
5013cf6e0c
commit
17106c9e9d
32 changed files with 152 additions and 146 deletions
|
@ -93,11 +93,14 @@ inline void post_membar(atomic_memory_order order) {
|
||||||
|
|
||||||
|
|
||||||
template<size_t byte_size>
|
template<size_t byte_size>
|
||||||
struct Atomic::PlatformAdd
|
struct Atomic::PlatformAdd {
|
||||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
|
||||||
{
|
|
||||||
template<typename D, typename I>
|
template<typename D, typename I>
|
||||||
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
|
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
|
||||||
|
|
||||||
|
template<typename D, typename I>
|
||||||
|
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
|
||||||
|
return add_and_fetch(dest, add_value, order) - add_value;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
|
|
|
@ -28,11 +28,14 @@
|
||||||
// Implementation of class atomic
|
// Implementation of class atomic
|
||||||
|
|
||||||
template<size_t byte_size>
|
template<size_t byte_size>
|
||||||
struct Atomic::PlatformAdd
|
struct Atomic::PlatformAdd {
|
||||||
: Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
|
|
||||||
{
|
|
||||||
template<typename D, typename I>
|
template<typename D, typename I>
|
||||||
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const;
|
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const;
|
||||||
|
|
||||||
|
template<typename D, typename I>
|
||||||
|
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
|
||||||
|
return fetch_and_add(dest, add_value, order) + add_value;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
|
|
|
@ -160,11 +160,14 @@ static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
|
||||||
#endif // ARM
|
#endif // ARM
|
||||||
|
|
||||||
template<size_t byte_size>
|
template<size_t byte_size>
|
||||||
struct Atomic::PlatformAdd
|
struct Atomic::PlatformAdd {
|
||||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
|
||||||
{
|
|
||||||
template<typename D, typename I>
|
template<typename D, typename I>
|
||||||
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
|
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
|
||||||
|
|
||||||
|
template<typename D, typename I>
|
||||||
|
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
|
||||||
|
return add_and_fetch(dest, add_value, order) - add_value;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
|
|
|
@ -33,15 +33,18 @@
|
||||||
// See https://patchwork.kernel.org/patch/3575821/
|
// See https://patchwork.kernel.org/patch/3575821/
|
||||||
|
|
||||||
template<size_t byte_size>
|
template<size_t byte_size>
|
||||||
struct Atomic::PlatformAdd
|
struct Atomic::PlatformAdd {
|
||||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
|
||||||
{
|
|
||||||
template<typename D, typename I>
|
template<typename D, typename I>
|
||||||
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
|
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
|
||||||
D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
|
D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
|
||||||
FULL_MEM_BARRIER;
|
FULL_MEM_BARRIER;
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<typename D, typename I>
|
||||||
|
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
|
||||||
|
return add_and_fetch(dest, add_value, order) - add_value;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<size_t byte_size>
|
template<size_t byte_size>
|
||||||
|
|
|
@ -67,11 +67,14 @@ inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
|
||||||
// For ARMv7 we add explicit barriers in the stubs.
|
// For ARMv7 we add explicit barriers in the stubs.
|
||||||
|
|
||||||
template<size_t byte_size>
|
template<size_t byte_size>
|
||||||
struct Atomic::PlatformAdd
|
struct Atomic::PlatformAdd {
|
||||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
|
||||||
{
|
|
||||||
template<typename D, typename I>
|
template<typename D, typename I>
|
||||||
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
|
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
|
||||||
|
|
||||||
|
template<typename D, typename I>
|
||||||
|
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
|
||||||
|
return add_and_fetch(dest, add_value, order) - add_value;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
|
|
|
@ -93,11 +93,14 @@ inline void post_membar(atomic_memory_order order) {
|
||||||
|
|
||||||
|
|
||||||
template<size_t byte_size>
|
template<size_t byte_size>
|
||||||
struct Atomic::PlatformAdd
|
struct Atomic::PlatformAdd {
|
||||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
|
||||||
{
|
|
||||||
template<typename D, typename I>
|
template<typename D, typename I>
|
||||||
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
|
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
|
||||||
|
|
||||||
|
template<typename D, typename I>
|
||||||
|
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
|
||||||
|
return add_and_fetch(dest, add_value, order) - add_value;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
|
|
|
@ -75,11 +75,14 @@ inline void z196_fast_sync() {
|
||||||
}
|
}
|
||||||
|
|
||||||
template<size_t byte_size>
|
template<size_t byte_size>
|
||||||
struct Atomic::PlatformAdd
|
struct Atomic::PlatformAdd {
|
||||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
|
||||||
{
|
|
||||||
template<typename D, typename I>
|
template<typename D, typename I>
|
||||||
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
|
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
|
||||||
|
|
||||||
|
template<typename D, typename I>
|
||||||
|
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
|
||||||
|
return add_and_fetch(dest, add_value, order) - add_value;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
|
|
|
@ -28,11 +28,14 @@
|
||||||
// Implementation of class atomic
|
// Implementation of class atomic
|
||||||
|
|
||||||
template<size_t byte_size>
|
template<size_t byte_size>
|
||||||
struct Atomic::PlatformAdd
|
struct Atomic::PlatformAdd {
|
||||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
|
||||||
{
|
|
||||||
template<typename D, typename I>
|
template<typename D, typename I>
|
||||||
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
|
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
|
||||||
|
|
||||||
|
template<typename D, typename I>
|
||||||
|
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
|
||||||
|
return add_and_fetch(dest, add_value, order) - add_value;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
|
|
|
@ -28,11 +28,14 @@
|
||||||
// Implementation of class atomic
|
// Implementation of class atomic
|
||||||
|
|
||||||
template<size_t byte_size>
|
template<size_t byte_size>
|
||||||
struct Atomic::PlatformAdd
|
struct Atomic::PlatformAdd {
|
||||||
: Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
|
|
||||||
{
|
|
||||||
template<typename D, typename I>
|
template<typename D, typename I>
|
||||||
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const;
|
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const;
|
||||||
|
|
||||||
|
template<typename D, typename I>
|
||||||
|
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
|
||||||
|
return fetch_and_add(dest, add_value, order) + add_value;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
|
|
|
@ -31,11 +31,14 @@
|
||||||
// Implementation of class atomic
|
// Implementation of class atomic
|
||||||
|
|
||||||
template<size_t byte_size>
|
template<size_t byte_size>
|
||||||
struct Atomic::PlatformAdd
|
struct Atomic::PlatformAdd {
|
||||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
|
||||||
{
|
|
||||||
template<typename D, typename I>
|
template<typename D, typename I>
|
||||||
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
|
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
|
||||||
|
|
||||||
|
template<typename D, typename I>
|
||||||
|
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
|
||||||
|
return add_and_fetch(dest, add_value, order) - add_value;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
|
|
|
@ -31,7 +31,7 @@
|
||||||
template<size_t byte_size>
|
template<size_t byte_size>
|
||||||
struct Atomic::PlatformAdd {
|
struct Atomic::PlatformAdd {
|
||||||
template<typename D, typename I>
|
template<typename D, typename I>
|
||||||
inline D operator()(D volatile* dest, I add_value, atomic_memory_order order) const {
|
inline D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
|
||||||
D old_value = *dest;
|
D old_value = *dest;
|
||||||
while (true) {
|
while (true) {
|
||||||
D new_value = old_value + add_value;
|
D new_value = old_value + add_value;
|
||||||
|
@ -41,6 +41,11 @@ struct Atomic::PlatformAdd {
|
||||||
}
|
}
|
||||||
return old_value + add_value;
|
return old_value + add_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<typename D, typename I>
|
||||||
|
inline D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
|
||||||
|
return add_and_fetch(dest, add_value, order) - add_value;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
|
|
|
@ -41,11 +41,14 @@ extern "C" {
|
||||||
}
|
}
|
||||||
|
|
||||||
template<size_t byte_size>
|
template<size_t byte_size>
|
||||||
struct Atomic::PlatformAdd
|
struct Atomic::PlatformAdd {
|
||||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
|
||||||
{
|
|
||||||
template<typename D, typename I>
|
template<typename D, typename I>
|
||||||
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
|
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
|
||||||
|
|
||||||
|
template<typename D, typename I>
|
||||||
|
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
|
||||||
|
return add_and_fetch(dest, add_value, order) - add_value;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Not using add_using_helper; see comment for cmpxchg.
|
// Not using add_using_helper; see comment for cmpxchg.
|
||||||
|
|
|
@ -54,11 +54,14 @@ template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fe
|
||||||
#pragma warning(disable: 4035) // Disables warnings reporting missing return statement
|
#pragma warning(disable: 4035) // Disables warnings reporting missing return statement
|
||||||
|
|
||||||
template<size_t byte_size>
|
template<size_t byte_size>
|
||||||
struct Atomic::PlatformAdd
|
struct Atomic::PlatformAdd {
|
||||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
|
||||||
{
|
|
||||||
template<typename D, typename I>
|
template<typename D, typename I>
|
||||||
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
|
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
|
||||||
|
|
||||||
|
template<typename D, typename I>
|
||||||
|
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
|
||||||
|
return add_and_fetch(dest, add_value, order) - add_value;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef AMD64
|
#ifdef AMD64
|
||||||
|
|
|
@ -209,7 +209,7 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t cur_idx = Atomic::add(&_hwm, 1u) - 1;
|
size_t cur_idx = Atomic::fetch_and_add(&_hwm, 1u);
|
||||||
if (cur_idx >= _chunk_capacity) {
|
if (cur_idx >= _chunk_capacity) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -282,7 +282,7 @@ void G1CMRootMemRegions::reset() {
|
||||||
|
|
||||||
void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
|
void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
|
||||||
assert_at_safepoint();
|
assert_at_safepoint();
|
||||||
size_t idx = Atomic::add(&_num_root_regions, (size_t)1) - 1;
|
size_t idx = Atomic::fetch_and_add(&_num_root_regions, 1u);
|
||||||
assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
|
assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
|
||||||
assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
|
assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
|
||||||
"end (" PTR_FORMAT ")", p2i(start), p2i(end));
|
"end (" PTR_FORMAT ")", p2i(start), p2i(end));
|
||||||
|
@ -310,7 +310,7 @@ const MemRegion* G1CMRootMemRegions::claim_next() {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t claimed_index = Atomic::add(&_claimed_root_regions, (size_t)1) - 1;
|
size_t claimed_index = Atomic::fetch_and_add(&_claimed_root_regions, 1u);
|
||||||
if (claimed_index < _num_root_regions) {
|
if (claimed_index < _num_root_regions) {
|
||||||
return &_root_regions[claimed_index];
|
return &_root_regions[claimed_index];
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,7 +70,7 @@ CardTable::CardValue* G1HotCardCache::insert(CardValue* card_ptr) {
|
||||||
return card_ptr;
|
return card_ptr;
|
||||||
}
|
}
|
||||||
// Otherwise, the card is hot.
|
// Otherwise, the card is hot.
|
||||||
size_t index = Atomic::add(&_hot_cache_idx, 1u) - 1;
|
size_t index = Atomic::fetch_and_add(&_hot_cache_idx, 1u);
|
||||||
if (index == _hot_cache_size) {
|
if (index == _hot_cache_size) {
|
||||||
// Can use relaxed store because all racing threads are writing the same
|
// Can use relaxed store because all racing threads are writing the same
|
||||||
// value and there aren't any concurrent readers.
|
// value and there aren't any concurrent readers.
|
||||||
|
|
|
@ -261,7 +261,7 @@ public:
|
||||||
virtual void work(uint worker_id) {
|
virtual void work(uint worker_id) {
|
||||||
size_t const actual_chunk_size = MAX2(chunk_size(), _page_size);
|
size_t const actual_chunk_size = MAX2(chunk_size(), _page_size);
|
||||||
while (true) {
|
while (true) {
|
||||||
char* touch_addr = Atomic::add(&_cur_addr, actual_chunk_size) - actual_chunk_size;
|
char* touch_addr = Atomic::fetch_and_add(&_cur_addr, actual_chunk_size);
|
||||||
if (touch_addr < _start_addr || touch_addr >= _end_addr) {
|
if (touch_addr < _start_addr || touch_addr >= _end_addr) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -180,7 +180,7 @@ private:
|
||||||
|
|
||||||
bool marked_as_dirty = Atomic::cmpxchg(&_contains[region], false, true) == false;
|
bool marked_as_dirty = Atomic::cmpxchg(&_contains[region], false, true) == false;
|
||||||
if (marked_as_dirty) {
|
if (marked_as_dirty) {
|
||||||
uint allocated = Atomic::add(&_cur_idx, 1u) - 1;
|
uint allocated = Atomic::fetch_and_add(&_cur_idx, 1u);
|
||||||
_buffer[allocated] = region;
|
_buffer[allocated] = region;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -232,7 +232,7 @@ private:
|
||||||
|
|
||||||
void work(uint worker_id) {
|
void work(uint worker_id) {
|
||||||
while (_cur_dirty_regions < _regions->size()) {
|
while (_cur_dirty_regions < _regions->size()) {
|
||||||
uint next = Atomic::add(&_cur_dirty_regions, _chunk_length) - _chunk_length;
|
uint next = Atomic::fetch_and_add(&_cur_dirty_regions, _chunk_length);
|
||||||
uint max = MIN2(next + _chunk_length, _regions->size());
|
uint max = MIN2(next + _chunk_length, _regions->size());
|
||||||
|
|
||||||
for (uint i = next; i < max; i++) {
|
for (uint i = next; i < max; i++) {
|
||||||
|
@ -429,7 +429,7 @@ public:
|
||||||
|
|
||||||
uint claim_cards_to_scan(uint region, uint increment) {
|
uint claim_cards_to_scan(uint region, uint increment) {
|
||||||
assert(region < _max_regions, "Tried to access invalid region %u", region);
|
assert(region < _max_regions, "Tried to access invalid region %u", region);
|
||||||
return Atomic::add(&_card_table_scan_state[region], increment) - increment;
|
return Atomic::fetch_and_add(&_card_table_scan_state[region], increment);
|
||||||
}
|
}
|
||||||
|
|
||||||
void add_dirty_region(uint const region) {
|
void add_dirty_region(uint const region) {
|
||||||
|
|
|
@ -2452,7 +2452,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
bool try_claim(PSParallelCompact::UpdateDensePrefixTask& reference) {
|
bool try_claim(PSParallelCompact::UpdateDensePrefixTask& reference) {
|
||||||
uint claimed = Atomic::add(&_counter, 1u) - 1; // -1 is so that we start with zero
|
uint claimed = Atomic::fetch_and_add(&_counter, 1u);
|
||||||
if (claimed < _insert_index) {
|
if (claimed < _insert_index) {
|
||||||
reference = _backing_array[claimed];
|
reference = _backing_array[claimed];
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -32,7 +32,7 @@ StringDedupQueue* StringDedupQueue::_queue = NULL;
|
||||||
volatile size_t StringDedupQueue::_claimed_index = 0;
|
volatile size_t StringDedupQueue::_claimed_index = 0;
|
||||||
|
|
||||||
size_t StringDedupQueue::claim() {
|
size_t StringDedupQueue::claim() {
|
||||||
return Atomic::add(&_claimed_index, size_t(1)) - 1;
|
return Atomic::fetch_and_add(&_claimed_index, 1u);
|
||||||
}
|
}
|
||||||
|
|
||||||
void StringDedupQueue::unlink_or_oops_do(StringDedupUnlinkOrOopsDoClosure* cl) {
|
void StringDedupQueue::unlink_or_oops_do(StringDedupUnlinkOrOopsDoClosure* cl) {
|
||||||
|
|
|
@ -592,7 +592,7 @@ void StringDedupTable::finish_rehash(StringDedupTable* rehashed_table) {
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t StringDedupTable::claim_table_partition(size_t partition_size) {
|
size_t StringDedupTable::claim_table_partition(size_t partition_size) {
|
||||||
return Atomic::add(&_claimed_index, partition_size) - partition_size;
|
return Atomic::fetch_and_add(&_claimed_index, partition_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void StringDedupTable::verify() {
|
void StringDedupTable::verify() {
|
||||||
|
|
|
@ -1362,7 +1362,7 @@ public:
|
||||||
|
|
||||||
size_t max = _heap->num_regions();
|
size_t max = _heap->num_regions();
|
||||||
while (_index < max) {
|
while (_index < max) {
|
||||||
size_t cur = Atomic::add(&_index, stride) - stride;
|
size_t cur = Atomic::fetch_and_add(&_index, stride);
|
||||||
size_t start = cur;
|
size_t start = cur;
|
||||||
size_t end = MIN2(cur + stride, max);
|
size_t end = MIN2(cur + stride, max);
|
||||||
if (start >= max) break;
|
if (start >= max) break;
|
||||||
|
|
|
@ -484,7 +484,7 @@ void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl)
|
||||||
ShenandoahNMethod** list = _array;
|
ShenandoahNMethod** list = _array;
|
||||||
size_t max = (size_t)_length;
|
size_t max = (size_t)_length;
|
||||||
while (_claimed < max) {
|
while (_claimed < max) {
|
||||||
size_t cur = Atomic::add(&_claimed, stride) - stride;
|
size_t cur = Atomic::fetch_and_add(&_claimed, stride);
|
||||||
size_t start = cur;
|
size_t start = cur;
|
||||||
size_t end = MIN2(cur + stride, max);
|
size_t end = MIN2(cur + stride, max);
|
||||||
if (start >= max) break;
|
if (start >= max) break;
|
||||||
|
|
|
@ -90,7 +90,7 @@ void ShenandoahNMethodTableSnapshot::parallel_blobs_do(CodeBlobClosure *f) {
|
||||||
|
|
||||||
size_t max = (size_t)_length;
|
size_t max = (size_t)_length;
|
||||||
while (_claimed < max) {
|
while (_claimed < max) {
|
||||||
size_t cur = Atomic::add(&_claimed, stride) - stride;
|
size_t cur = Atomic::fetch_and_add(&_claimed, stride);
|
||||||
size_t start = cur;
|
size_t start = cur;
|
||||||
size_t end = MIN2(cur + stride, max);
|
size_t end = MIN2(cur + stride, max);
|
||||||
if (start >= max) break;
|
if (start >= max) break;
|
||||||
|
|
|
@ -522,7 +522,7 @@ public:
|
||||||
_options);
|
_options);
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
size_t v = Atomic::add(&_claimed, 1u) - 1;
|
size_t v = Atomic::fetch_and_add(&_claimed, 1u);
|
||||||
if (v < _heap->num_regions()) {
|
if (v < _heap->num_regions()) {
|
||||||
ShenandoahHeapRegion* r = _heap->get_region(v);
|
ShenandoahHeapRegion* r = _heap->get_region(v);
|
||||||
if (!r->is_humongous() && !r->is_trash()) {
|
if (!r->is_humongous() && !r->is_trash()) {
|
||||||
|
|
|
@ -101,7 +101,7 @@ inline ZArrayIteratorImpl<T, parallel>::ZArrayIteratorImpl(ZArray<T>* array) :
|
||||||
template <typename T, bool parallel>
|
template <typename T, bool parallel>
|
||||||
inline bool ZArrayIteratorImpl<T, parallel>::next(T* elem) {
|
inline bool ZArrayIteratorImpl<T, parallel>::next(T* elem) {
|
||||||
if (parallel) {
|
if (parallel) {
|
||||||
const size_t next = Atomic::add(&_next, 1u) - 1u;
|
const size_t next = Atomic::fetch_and_add(&_next, 1u);
|
||||||
if (next < _array->size()) {
|
if (next < _array->size()) {
|
||||||
*elem = _array->at(next);
|
*elem = _array->at(next);
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -110,7 +110,7 @@ uintptr_t ZMarkStackSpace::expand_and_alloc_space(size_t size) {
|
||||||
|
|
||||||
// Increment top before end to make sure another
|
// Increment top before end to make sure another
|
||||||
// thread can't steal out newly expanded space.
|
// thread can't steal out newly expanded space.
|
||||||
addr = Atomic::add(&_top, size) - size;
|
addr = Atomic::fetch_and_add(&_top, size);
|
||||||
Atomic::add(&_end, expand_size);
|
Atomic::add(&_end, expand_size);
|
||||||
|
|
||||||
return addr;
|
return addr;
|
||||||
|
|
|
@ -58,7 +58,7 @@ void ZNMethodTableIteration::nmethods_do(NMethodClosure* cl) {
|
||||||
// Claim table partition. Each partition is currently sized to span
|
// Claim table partition. Each partition is currently sized to span
|
||||||
// two cache lines. This number is just a guess, but seems to work well.
|
// two cache lines. This number is just a guess, but seems to work well.
|
||||||
const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
|
const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
|
||||||
const size_t partition_start = MIN2(Atomic::add(&_claimed, partition_size) - partition_size, _size);
|
const size_t partition_start = MIN2(Atomic::fetch_and_add(&_claimed, partition_size), _size);
|
||||||
const size_t partition_end = MIN2(partition_start + partition_size, _size);
|
const size_t partition_end = MIN2(partition_start + partition_size, _size);
|
||||||
if (partition_start == partition_end) {
|
if (partition_start == partition_end) {
|
||||||
// End of table
|
// End of table
|
||||||
|
|
|
@ -176,7 +176,7 @@ public:
|
||||||
for (;;) {
|
for (;;) {
|
||||||
// Get granule offset
|
// Get granule offset
|
||||||
const size_t size = ZGranuleSize;
|
const size_t size = ZGranuleSize;
|
||||||
const uintptr_t offset = Atomic::add(&_start, size) - size;
|
const uintptr_t offset = Atomic::fetch_and_add(&_start, size);
|
||||||
if (offset >= _end) {
|
if (offset >= _end) {
|
||||||
// Done
|
// Done
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -38,7 +38,7 @@ inline bool ZRelocationSetIteratorImpl<parallel>::next(ZForwarding** forwarding)
|
||||||
|
|
||||||
if (parallel) {
|
if (parallel) {
|
||||||
if (_next < nforwardings) {
|
if (_next < nforwardings) {
|
||||||
const size_t next = Atomic::add(&_next, 1u) - 1u;
|
const size_t next = Atomic::fetch_and_add(&_next, 1u);
|
||||||
if (next < nforwardings) {
|
if (next < nforwardings) {
|
||||||
*forwarding = _relocation_set->_forwardings[next];
|
*forwarding = _relocation_set->_forwardings[next];
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -185,7 +185,7 @@ ZJavaThreadsIterator::ZJavaThreadsIterator() :
|
||||||
_claimed(0) {}
|
_claimed(0) {}
|
||||||
|
|
||||||
uint ZJavaThreadsIterator::claim() {
|
uint ZJavaThreadsIterator::claim() {
|
||||||
return Atomic::add(&_claimed, 1u) - 1u;
|
return Atomic::fetch_and_add(&_claimed, 1u);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ZJavaThreadsIterator::threads_do(ThreadClosure* cl) {
|
void ZJavaThreadsIterator::threads_do(ThreadClosure* cl) {
|
||||||
|
|
|
@ -98,13 +98,19 @@ public:
|
||||||
template <typename T>
|
template <typename T>
|
||||||
inline static T load_acquire(const volatile T* dest);
|
inline static T load_acquire(const volatile T* dest);
|
||||||
|
|
||||||
// Atomically add to a location. Returns updated value. add*() provide:
|
// Atomically add to a location. *add*() provide:
|
||||||
// <fence> add-value-to-dest <membar StoreLoad|StoreStore>
|
// <fence> add-value-to-dest <membar StoreLoad|StoreStore>
|
||||||
|
|
||||||
|
// Returns updated value.
|
||||||
template<typename D, typename I>
|
template<typename D, typename I>
|
||||||
inline static D add(D volatile* dest, I add_value,
|
inline static D add(D volatile* dest, I add_value,
|
||||||
atomic_memory_order order = memory_order_conservative);
|
atomic_memory_order order = memory_order_conservative);
|
||||||
|
|
||||||
|
// Returns previous value.
|
||||||
|
template<typename D, typename I>
|
||||||
|
inline static D fetch_and_add(D volatile* dest, I add_value,
|
||||||
|
atomic_memory_order order = memory_order_conservative);
|
||||||
|
|
||||||
template<typename D, typename I>
|
template<typename D, typename I>
|
||||||
inline static D sub(D volatile* dest, I sub_value,
|
inline static D sub(D volatile* dest, I sub_value,
|
||||||
atomic_memory_order order = memory_order_conservative);
|
atomic_memory_order order = memory_order_conservative);
|
||||||
|
@ -230,54 +236,34 @@ private:
|
||||||
|
|
||||||
// Platform-specific implementation of add. Support for sizes of 4
|
// Platform-specific implementation of add. Support for sizes of 4
|
||||||
// bytes and (if different) pointer size bytes are required. The
|
// bytes and (if different) pointer size bytes are required. The
|
||||||
// class is a function object that must be default constructable,
|
// class must be default constructable, with these requirements:
|
||||||
// with these requirements:
|
|
||||||
//
|
//
|
||||||
// - dest is of type D*, an integral or pointer type.
|
// - dest is of type D*, an integral or pointer type.
|
||||||
// - add_value is of type I, an integral type.
|
// - add_value is of type I, an integral type.
|
||||||
// - sizeof(I) == sizeof(D).
|
// - sizeof(I) == sizeof(D).
|
||||||
// - if D is an integral type, I == D.
|
// - if D is an integral type, I == D.
|
||||||
|
// - order is of type atomic_memory_order.
|
||||||
// - platform_add is an object of type PlatformAdd<sizeof(D)>.
|
// - platform_add is an object of type PlatformAdd<sizeof(D)>.
|
||||||
//
|
//
|
||||||
// Then
|
// Then both
|
||||||
// platform_add(dest, add_value)
|
// platform_add.add_and_fetch(dest, add_value, order)
|
||||||
// must be a valid expression, returning a result convertible to D.
|
// platform_add.fetch_and_add(dest, add_value, order)
|
||||||
|
// must be valid expressions returning a result convertible to D.
|
||||||
|
//
|
||||||
|
// add_and_fetch atomically adds add_value to the value of dest,
|
||||||
|
// returning the new value.
|
||||||
|
//
|
||||||
|
// fetch_and_add atomically adds add_value to the value of dest,
|
||||||
|
// returning the old value.
|
||||||
|
//
|
||||||
|
// When D is a pointer type P*, both add_and_fetch and fetch_and_add
|
||||||
|
// treat it as if it were an uintptr_t; they do not perform any
|
||||||
|
// scaling of add_value, as that has already been done by the caller.
|
||||||
//
|
//
|
||||||
// No definition is provided; all platforms must explicitly define
|
// No definition is provided; all platforms must explicitly define
|
||||||
// this class and any needed specializations.
|
// this class and any needed specializations.
|
||||||
template<size_t byte_size> struct PlatformAdd;
|
template<size_t byte_size> struct PlatformAdd;
|
||||||
|
|
||||||
// Helper base classes for defining PlatformAdd. To use, define
|
|
||||||
// PlatformAdd or a specialization that derives from one of these,
|
|
||||||
// and include in the PlatformAdd definition the support function
|
|
||||||
// (described below) required by the base class.
|
|
||||||
//
|
|
||||||
// These classes implement the required function object protocol for
|
|
||||||
// PlatformAdd, using a support function template provided by the
|
|
||||||
// derived class. Let add_value (of type I) and dest (of type D) be
|
|
||||||
// the arguments the object is called with. If D is a pointer type
|
|
||||||
// P*, then let addend (of type I) be add_value * sizeof(P);
|
|
||||||
// otherwise, addend is add_value.
|
|
||||||
//
|
|
||||||
// FetchAndAdd requires the derived class to provide
|
|
||||||
// fetch_and_add(dest, addend)
|
|
||||||
// atomically adding addend to the value of dest, and returning the
|
|
||||||
// old value.
|
|
||||||
//
|
|
||||||
// AddAndFetch requires the derived class to provide
|
|
||||||
// add_and_fetch(dest, addend)
|
|
||||||
// atomically adding addend to the value of dest, and returning the
|
|
||||||
// new value.
|
|
||||||
//
|
|
||||||
// When D is a pointer type P*, both fetch_and_add and add_and_fetch
|
|
||||||
// treat it as if it were a uintptr_t; they do not perform any
|
|
||||||
// scaling of the addend, as that has already been done by the
|
|
||||||
// caller.
|
|
||||||
public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
|
|
||||||
template<typename Derived> struct FetchAndAdd;
|
|
||||||
template<typename Derived> struct AddAndFetch;
|
|
||||||
private:
|
|
||||||
|
|
||||||
// Support for platforms that implement some variants of add using a
|
// Support for platforms that implement some variants of add using a
|
||||||
// (typically out of line) non-template helper function. The
|
// (typically out of line) non-template helper function. The
|
||||||
// generic arguments passed to PlatformAdd need to be translated to
|
// generic arguments passed to PlatformAdd need to be translated to
|
||||||
|
@ -512,22 +498,6 @@ struct Atomic::PlatformStore {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Define FetchAndAdd and AddAndFetch helper classes before including
|
|
||||||
// platform file, which may use these as base classes, requiring they
|
|
||||||
// be complete.
|
|
||||||
|
|
||||||
template<typename Derived>
|
|
||||||
struct Atomic::FetchAndAdd {
|
|
||||||
template<typename D, typename I>
|
|
||||||
D operator()(D volatile* dest, I add_value, atomic_memory_order order) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
template<typename Derived>
|
|
||||||
struct Atomic::AddAndFetch {
|
|
||||||
template<typename D, typename I>
|
|
||||||
D operator()(D volatile* dest, I add_value, atomic_memory_order order) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
template<typename D>
|
template<typename D>
|
||||||
inline void Atomic::inc(D volatile* dest, atomic_memory_order order) {
|
inline void Atomic::inc(D volatile* dest, atomic_memory_order order) {
|
||||||
STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
|
STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
|
||||||
|
@ -684,7 +654,13 @@ inline void Atomic::release_store_fence(volatile D* p, T v) {
|
||||||
template<typename D, typename I>
|
template<typename D, typename I>
|
||||||
inline D Atomic::add(D volatile* dest, I add_value,
|
inline D Atomic::add(D volatile* dest, I add_value,
|
||||||
atomic_memory_order order) {
|
atomic_memory_order order) {
|
||||||
return AddImpl<D, I>()(dest, add_value, order);
|
return AddImpl<D, I>::add_and_fetch(dest, add_value, order);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename D, typename I>
|
||||||
|
inline D Atomic::fetch_and_add(D volatile* dest, I add_value,
|
||||||
|
atomic_memory_order order) {
|
||||||
|
return AddImpl<D, I>::fetch_and_add(dest, add_value, order);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename D, typename I>
|
template<typename D, typename I>
|
||||||
|
@ -695,9 +671,13 @@ struct Atomic::AddImpl<
|
||||||
(sizeof(I) <= sizeof(D)) &&
|
(sizeof(I) <= sizeof(D)) &&
|
||||||
(IsSigned<I>::value == IsSigned<D>::value)>::type>
|
(IsSigned<I>::value == IsSigned<D>::value)>::type>
|
||||||
{
|
{
|
||||||
D operator()(D volatile* dest, I add_value, atomic_memory_order order) const {
|
static D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) {
|
||||||
D addend = add_value;
|
D addend = add_value;
|
||||||
return PlatformAdd<sizeof(D)>()(dest, addend, order);
|
return PlatformAdd<sizeof(D)>().add_and_fetch(dest, addend, order);
|
||||||
|
}
|
||||||
|
static D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) {
|
||||||
|
D addend = add_value;
|
||||||
|
return PlatformAdd<sizeof(D)>().fetch_and_add(dest, addend, order);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -706,41 +686,26 @@ struct Atomic::AddImpl<
|
||||||
P*, I,
|
P*, I,
|
||||||
typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
|
typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
|
||||||
{
|
{
|
||||||
P* operator()(P* volatile* dest, I add_value, atomic_memory_order order) const {
|
STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
|
||||||
STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
|
STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
|
||||||
STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
|
typedef typename Conditional<IsSigned<I>::value,
|
||||||
typedef typename Conditional<IsSigned<I>::value,
|
intptr_t,
|
||||||
intptr_t,
|
uintptr_t>::type CI;
|
||||||
uintptr_t>::type CI;
|
|
||||||
|
static CI scale_addend(CI add_value) {
|
||||||
|
return add_value * sizeof(P);
|
||||||
|
}
|
||||||
|
|
||||||
|
static P* add_and_fetch(P* volatile* dest, I add_value, atomic_memory_order order) {
|
||||||
CI addend = add_value;
|
CI addend = add_value;
|
||||||
return PlatformAdd<sizeof(P*)>()(dest, addend, order);
|
return PlatformAdd<sizeof(P*)>().add_and_fetch(dest, scale_addend(addend), order);
|
||||||
|
}
|
||||||
|
static P* fetch_and_add(P* volatile* dest, I add_value, atomic_memory_order order) {
|
||||||
|
CI addend = add_value;
|
||||||
|
return PlatformAdd<sizeof(P*)>().fetch_and_add(dest, scale_addend(addend), order);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<typename Derived>
|
|
||||||
template<typename D, typename I>
|
|
||||||
inline D Atomic::FetchAndAdd<Derived>::operator()(D volatile* dest, I add_value,
|
|
||||||
atomic_memory_order order) const {
|
|
||||||
I addend = add_value;
|
|
||||||
// If D is a pointer type P*, scale by sizeof(P).
|
|
||||||
if (IsPointer<D>::value) {
|
|
||||||
addend *= sizeof(typename RemovePointer<D>::type);
|
|
||||||
}
|
|
||||||
D old = static_cast<const Derived*>(this)->fetch_and_add(dest, addend, order);
|
|
||||||
return old + add_value;
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename Derived>
|
|
||||||
template<typename D, typename I>
|
|
||||||
inline D Atomic::AddAndFetch<Derived>::operator()(D volatile* dest, I add_value,
|
|
||||||
atomic_memory_order order) const {
|
|
||||||
// If D is a pointer type P*, scale by sizeof(P).
|
|
||||||
if (IsPointer<D>::value) {
|
|
||||||
add_value *= sizeof(typename RemovePointer<D>::type);
|
|
||||||
}
|
|
||||||
return static_cast<const Derived*>(this)->add_and_fetch(dest, add_value, order);
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename Type, typename Fn, typename D, typename I>
|
template<typename Type, typename Fn, typename D, typename I>
|
||||||
inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) {
|
inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) {
|
||||||
return PrimitiveConversions::cast<D>(
|
return PrimitiveConversions::cast<D>(
|
||||||
|
|
|
@ -54,7 +54,7 @@ class ConcurrentHashTable<CONFIG, F>::BucketsOperation {
|
||||||
|
|
||||||
// Returns true if you succeeded to claim the range start -> (stop-1).
|
// Returns true if you succeeded to claim the range start -> (stop-1).
|
||||||
bool claim(size_t* start, size_t* stop) {
|
bool claim(size_t* start, size_t* stop) {
|
||||||
size_t claimed = Atomic::add(&_next_to_claim, (size_t)1) - 1;
|
size_t claimed = Atomic::fetch_and_add(&_next_to_claim, 1u);
|
||||||
if (claimed >= _stop_task) {
|
if (claimed >= _stop_task) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue