8236778: Add Atomic::fetch_and_add

Reviewed-by: kbarrett, dholmes
This commit is contained in:
Stefan Karlsson 2020-01-24 09:15:08 +01:00
parent 5013cf6e0c
commit 17106c9e9d
32 changed files with 152 additions and 146 deletions

View file

@ -93,11 +93,14 @@ inline void post_membar(atomic_memory_order order) {
template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};
template<>

View file

@ -28,11 +28,14 @@
// Implementation of class atomic
template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const;
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
return fetch_and_add(dest, add_value, order) + add_value;
}
};
template<>

View file

@ -160,11 +160,14 @@ static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
#endif // ARM
template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};
template<>

View file

@ -33,15 +33,18 @@
// See https://patchwork.kernel.org/patch/3575821/
template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
FULL_MEM_BARRIER;
return res;
}
template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};
template<size_t byte_size>

View file

@ -67,11 +67,14 @@ inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
// For ARMv7 we add explicit barriers in the stubs.
template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};
template<>

View file

@ -93,11 +93,14 @@ inline void post_membar(atomic_memory_order order) {
template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};
template<>

View file

@ -75,11 +75,14 @@ inline void z196_fast_sync() {
}
template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};
template<>

View file

@ -28,11 +28,14 @@
// Implementation of class atomic
template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};
template<>

View file

@ -28,11 +28,14 @@
// Implementation of class atomic
template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const;
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
return fetch_and_add(dest, add_value, order) + add_value;
}
};
template<>

View file

@ -31,11 +31,14 @@
// Implementation of class atomic
template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};
template<>

View file

@ -31,7 +31,7 @@
template<size_t byte_size>
struct Atomic::PlatformAdd {
template<typename D, typename I>
inline D operator()(D volatile* dest, I add_value, atomic_memory_order order) const {
inline D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
D old_value = *dest;
while (true) {
D new_value = old_value + add_value;
@ -41,6 +41,11 @@ struct Atomic::PlatformAdd {
}
return old_value + add_value;
}
template<typename D, typename I>
inline D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};
template<>

View file

@ -41,11 +41,14 @@ extern "C" {
}
template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};
// Not using add_using_helper; see comment for cmpxchg.

View file

@ -54,11 +54,14 @@ template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fe
#pragma warning(disable: 4035) // Disables warnings reporting missing return statement
template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};
#ifdef AMD64

View file

@ -209,7 +209,7 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
return NULL;
}
size_t cur_idx = Atomic::add(&_hwm, 1u) - 1;
size_t cur_idx = Atomic::fetch_and_add(&_hwm, 1u);
if (cur_idx >= _chunk_capacity) {
return NULL;
}
@ -282,7 +282,7 @@ void G1CMRootMemRegions::reset() {
void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
assert_at_safepoint();
size_t idx = Atomic::add(&_num_root_regions, (size_t)1) - 1;
size_t idx = Atomic::fetch_and_add(&_num_root_regions, 1u);
assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
"end (" PTR_FORMAT ")", p2i(start), p2i(end));
@ -310,7 +310,7 @@ const MemRegion* G1CMRootMemRegions::claim_next() {
return NULL;
}
size_t claimed_index = Atomic::add(&_claimed_root_regions, (size_t)1) - 1;
size_t claimed_index = Atomic::fetch_and_add(&_claimed_root_regions, 1u);
if (claimed_index < _num_root_regions) {
return &_root_regions[claimed_index];
}

View file

@ -70,7 +70,7 @@ CardTable::CardValue* G1HotCardCache::insert(CardValue* card_ptr) {
return card_ptr;
}
// Otherwise, the card is hot.
size_t index = Atomic::add(&_hot_cache_idx, 1u) - 1;
size_t index = Atomic::fetch_and_add(&_hot_cache_idx, 1u);
if (index == _hot_cache_size) {
// Can use relaxed store because all racing threads are writing the same
// value and there aren't any concurrent readers.

View file

@ -261,7 +261,7 @@ public:
virtual void work(uint worker_id) {
size_t const actual_chunk_size = MAX2(chunk_size(), _page_size);
while (true) {
char* touch_addr = Atomic::add(&_cur_addr, actual_chunk_size) - actual_chunk_size;
char* touch_addr = Atomic::fetch_and_add(&_cur_addr, actual_chunk_size);
if (touch_addr < _start_addr || touch_addr >= _end_addr) {
break;
}

View file

@ -180,7 +180,7 @@ private:
bool marked_as_dirty = Atomic::cmpxchg(&_contains[region], false, true) == false;
if (marked_as_dirty) {
uint allocated = Atomic::add(&_cur_idx, 1u) - 1;
uint allocated = Atomic::fetch_and_add(&_cur_idx, 1u);
_buffer[allocated] = region;
}
}
@ -232,7 +232,7 @@ private:
void work(uint worker_id) {
while (_cur_dirty_regions < _regions->size()) {
uint next = Atomic::add(&_cur_dirty_regions, _chunk_length) - _chunk_length;
uint next = Atomic::fetch_and_add(&_cur_dirty_regions, _chunk_length);
uint max = MIN2(next + _chunk_length, _regions->size());
for (uint i = next; i < max; i++) {
@ -429,7 +429,7 @@ public:
uint claim_cards_to_scan(uint region, uint increment) {
assert(region < _max_regions, "Tried to access invalid region %u", region);
return Atomic::add(&_card_table_scan_state[region], increment) - increment;
return Atomic::fetch_and_add(&_card_table_scan_state[region], increment);
}
void add_dirty_region(uint const region) {

View file

@ -2452,7 +2452,7 @@ public:
}
bool try_claim(PSParallelCompact::UpdateDensePrefixTask& reference) {
uint claimed = Atomic::add(&_counter, 1u) - 1; // -1 is so that we start with zero
uint claimed = Atomic::fetch_and_add(&_counter, 1u);
if (claimed < _insert_index) {
reference = _backing_array[claimed];
return true;

View file

@ -32,7 +32,7 @@ StringDedupQueue* StringDedupQueue::_queue = NULL;
volatile size_t StringDedupQueue::_claimed_index = 0;
size_t StringDedupQueue::claim() {
return Atomic::add(&_claimed_index, size_t(1)) - 1;
return Atomic::fetch_and_add(&_claimed_index, 1u);
}
void StringDedupQueue::unlink_or_oops_do(StringDedupUnlinkOrOopsDoClosure* cl) {

View file

@ -592,7 +592,7 @@ void StringDedupTable::finish_rehash(StringDedupTable* rehashed_table) {
}
size_t StringDedupTable::claim_table_partition(size_t partition_size) {
return Atomic::add(&_claimed_index, partition_size) - partition_size;
return Atomic::fetch_and_add(&_claimed_index, partition_size);
}
void StringDedupTable::verify() {

View file

@ -1362,7 +1362,7 @@ public:
size_t max = _heap->num_regions();
while (_index < max) {
size_t cur = Atomic::add(&_index, stride) - stride;
size_t cur = Atomic::fetch_and_add(&_index, stride);
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;

View file

@ -484,7 +484,7 @@ void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl)
ShenandoahNMethod** list = _array;
size_t max = (size_t)_length;
while (_claimed < max) {
size_t cur = Atomic::add(&_claimed, stride) - stride;
size_t cur = Atomic::fetch_and_add(&_claimed, stride);
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;

View file

@ -90,7 +90,7 @@ void ShenandoahNMethodTableSnapshot::parallel_blobs_do(CodeBlobClosure *f) {
size_t max = (size_t)_length;
while (_claimed < max) {
size_t cur = Atomic::add(&_claimed, stride) - stride;
size_t cur = Atomic::fetch_and_add(&_claimed, stride);
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;

View file

@ -522,7 +522,7 @@ public:
_options);
while (true) {
size_t v = Atomic::add(&_claimed, 1u) - 1;
size_t v = Atomic::fetch_and_add(&_claimed, 1u);
if (v < _heap->num_regions()) {
ShenandoahHeapRegion* r = _heap->get_region(v);
if (!r->is_humongous() && !r->is_trash()) {

View file

@ -101,7 +101,7 @@ inline ZArrayIteratorImpl<T, parallel>::ZArrayIteratorImpl(ZArray<T>* array) :
template <typename T, bool parallel>
inline bool ZArrayIteratorImpl<T, parallel>::next(T* elem) {
if (parallel) {
const size_t next = Atomic::add(&_next, 1u) - 1u;
const size_t next = Atomic::fetch_and_add(&_next, 1u);
if (next < _array->size()) {
*elem = _array->at(next);
return true;

View file

@ -110,7 +110,7 @@ uintptr_t ZMarkStackSpace::expand_and_alloc_space(size_t size) {
// Increment top before end to make sure another
// thread can't steal out newly expanded space.
addr = Atomic::add(&_top, size) - size;
addr = Atomic::fetch_and_add(&_top, size);
Atomic::add(&_end, expand_size);
return addr;

View file

@ -58,7 +58,7 @@ void ZNMethodTableIteration::nmethods_do(NMethodClosure* cl) {
// Claim table partition. Each partition is currently sized to span
// two cache lines. This number is just a guess, but seems to work well.
const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
const size_t partition_start = MIN2(Atomic::add(&_claimed, partition_size) - partition_size, _size);
const size_t partition_start = MIN2(Atomic::fetch_and_add(&_claimed, partition_size), _size);
const size_t partition_end = MIN2(partition_start + partition_size, _size);
if (partition_start == partition_end) {
// End of table

View file

@ -176,7 +176,7 @@ public:
for (;;) {
// Get granule offset
const size_t size = ZGranuleSize;
const uintptr_t offset = Atomic::add(&_start, size) - size;
const uintptr_t offset = Atomic::fetch_and_add(&_start, size);
if (offset >= _end) {
// Done
break;

View file

@ -38,7 +38,7 @@ inline bool ZRelocationSetIteratorImpl<parallel>::next(ZForwarding** forwarding)
if (parallel) {
if (_next < nforwardings) {
const size_t next = Atomic::add(&_next, 1u) - 1u;
const size_t next = Atomic::fetch_and_add(&_next, 1u);
if (next < nforwardings) {
*forwarding = _relocation_set->_forwardings[next];
return true;

View file

@ -185,7 +185,7 @@ ZJavaThreadsIterator::ZJavaThreadsIterator() :
_claimed(0) {}
uint ZJavaThreadsIterator::claim() {
return Atomic::add(&_claimed, 1u) - 1u;
return Atomic::fetch_and_add(&_claimed, 1u);
}
void ZJavaThreadsIterator::threads_do(ThreadClosure* cl) {

View file

@ -98,13 +98,19 @@ public:
template <typename T>
inline static T load_acquire(const volatile T* dest);
// Atomically add to a location. Returns updated value. add*() provide:
// Atomically add to a location. *add*() provide:
// <fence> add-value-to-dest <membar StoreLoad|StoreStore>
// Returns updated value.
template<typename D, typename I>
inline static D add(D volatile* dest, I add_value,
atomic_memory_order order = memory_order_conservative);
// Returns previous value.
template<typename D, typename I>
inline static D fetch_and_add(D volatile* dest, I add_value,
atomic_memory_order order = memory_order_conservative);
template<typename D, typename I>
inline static D sub(D volatile* dest, I sub_value,
atomic_memory_order order = memory_order_conservative);
@ -230,54 +236,34 @@ private:
// Platform-specific implementation of add. Support for sizes of 4
// bytes and (if different) pointer size bytes are required. The
// class is a function object that must be default constructable,
// with these requirements:
// class must be default constructable, with these requirements:
//
// - dest is of type D*, an integral or pointer type.
// - add_value is of type I, an integral type.
// - sizeof(I) == sizeof(D).
// - if D is an integral type, I == D.
// - order is of type atomic_memory_order.
// - platform_add is an object of type PlatformAdd<sizeof(D)>.
//
// Then
// platform_add(dest, add_value)
// must be a valid expression, returning a result convertible to D.
// Then both
// platform_add.add_and_fetch(dest, add_value, order)
// platform_add.fetch_and_add(dest, add_value, order)
// must be valid expressions returning a result convertible to D.
//
// add_and_fetch atomically adds add_value to the value of dest,
// returning the new value.
//
// fetch_and_add atomically adds add_value to the value of dest,
// returning the old value.
//
// When D is a pointer type P*, both add_and_fetch and fetch_and_add
// treat it as if it were an uintptr_t; they do not perform any
// scaling of add_value, as that has already been done by the caller.
//
// No definition is provided; all platforms must explicitly define
// this class and any needed specializations.
template<size_t byte_size> struct PlatformAdd;
// Helper base classes for defining PlatformAdd. To use, define
// PlatformAdd or a specialization that derives from one of these,
// and include in the PlatformAdd definition the support function
// (described below) required by the base class.
//
// These classes implement the required function object protocol for
// PlatformAdd, using a support function template provided by the
// derived class. Let add_value (of type I) and dest (of type D) be
// the arguments the object is called with. If D is a pointer type
// P*, then let addend (of type I) be add_value * sizeof(P);
// otherwise, addend is add_value.
//
// FetchAndAdd requires the derived class to provide
// fetch_and_add(dest, addend)
// atomically adding addend to the value of dest, and returning the
// old value.
//
// AddAndFetch requires the derived class to provide
// add_and_fetch(dest, addend)
// atomically adding addend to the value of dest, and returning the
// new value.
//
// When D is a pointer type P*, both fetch_and_add and add_and_fetch
// treat it as if it were a uintptr_t; they do not perform any
// scaling of the addend, as that has already been done by the
// caller.
public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
template<typename Derived> struct FetchAndAdd;
template<typename Derived> struct AddAndFetch;
private:
// Support for platforms that implement some variants of add using a
// (typically out of line) non-template helper function. The
// generic arguments passed to PlatformAdd need to be translated to
@ -512,22 +498,6 @@ struct Atomic::PlatformStore {
}
};
// Define FetchAndAdd and AddAndFetch helper classes before including
// platform file, which may use these as base classes, requiring they
// be complete.
template<typename Derived>
struct Atomic::FetchAndAdd {
template<typename D, typename I>
D operator()(D volatile* dest, I add_value, atomic_memory_order order) const;
};
template<typename Derived>
struct Atomic::AddAndFetch {
template<typename D, typename I>
D operator()(D volatile* dest, I add_value, atomic_memory_order order) const;
};
template<typename D>
inline void Atomic::inc(D volatile* dest, atomic_memory_order order) {
STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
@ -684,7 +654,13 @@ inline void Atomic::release_store_fence(volatile D* p, T v) {
template<typename D, typename I>
inline D Atomic::add(D volatile* dest, I add_value,
atomic_memory_order order) {
return AddImpl<D, I>()(dest, add_value, order);
return AddImpl<D, I>::add_and_fetch(dest, add_value, order);
}
template<typename D, typename I>
inline D Atomic::fetch_and_add(D volatile* dest, I add_value,
atomic_memory_order order) {
return AddImpl<D, I>::fetch_and_add(dest, add_value, order);
}
template<typename D, typename I>
@ -695,9 +671,13 @@ struct Atomic::AddImpl<
(sizeof(I) <= sizeof(D)) &&
(IsSigned<I>::value == IsSigned<D>::value)>::type>
{
D operator()(D volatile* dest, I add_value, atomic_memory_order order) const {
static D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) {
D addend = add_value;
return PlatformAdd<sizeof(D)>()(dest, addend, order);
return PlatformAdd<sizeof(D)>().add_and_fetch(dest, addend, order);
}
static D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) {
D addend = add_value;
return PlatformAdd<sizeof(D)>().fetch_and_add(dest, addend, order);
}
};
@ -706,41 +686,26 @@ struct Atomic::AddImpl<
P*, I,
typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
{
P* operator()(P* volatile* dest, I add_value, atomic_memory_order order) const {
STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
typedef typename Conditional<IsSigned<I>::value,
intptr_t,
uintptr_t>::type CI;
STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
typedef typename Conditional<IsSigned<I>::value,
intptr_t,
uintptr_t>::type CI;
static CI scale_addend(CI add_value) {
return add_value * sizeof(P);
}
static P* add_and_fetch(P* volatile* dest, I add_value, atomic_memory_order order) {
CI addend = add_value;
return PlatformAdd<sizeof(P*)>()(dest, addend, order);
return PlatformAdd<sizeof(P*)>().add_and_fetch(dest, scale_addend(addend), order);
}
static P* fetch_and_add(P* volatile* dest, I add_value, atomic_memory_order order) {
CI addend = add_value;
return PlatformAdd<sizeof(P*)>().fetch_and_add(dest, scale_addend(addend), order);
}
};
template<typename Derived>
template<typename D, typename I>
inline D Atomic::FetchAndAdd<Derived>::operator()(D volatile* dest, I add_value,
atomic_memory_order order) const {
I addend = add_value;
// If D is a pointer type P*, scale by sizeof(P).
if (IsPointer<D>::value) {
addend *= sizeof(typename RemovePointer<D>::type);
}
D old = static_cast<const Derived*>(this)->fetch_and_add(dest, addend, order);
return old + add_value;
}
template<typename Derived>
template<typename D, typename I>
inline D Atomic::AddAndFetch<Derived>::operator()(D volatile* dest, I add_value,
atomic_memory_order order) const {
// If D is a pointer type P*, scale by sizeof(P).
if (IsPointer<D>::value) {
add_value *= sizeof(typename RemovePointer<D>::type);
}
return static_cast<const Derived*>(this)->add_and_fetch(dest, add_value, order);
}
template<typename Type, typename Fn, typename D, typename I>
inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) {
return PrimitiveConversions::cast<D>(

View file

@ -54,7 +54,7 @@ class ConcurrentHashTable<CONFIG, F>::BucketsOperation {
// Returns true if you succeeded to claim the range start -> (stop-1).
bool claim(size_t* start, size_t* stop) {
size_t claimed = Atomic::add(&_next_to_claim, (size_t)1) - 1;
size_t claimed = Atomic::fetch_and_add(&_next_to_claim, 1u);
if (claimed >= _stop_task) {
return false;
}