8234737: Harmonize parameter order in Atomic - add

Reviewed-by: rehn, dholmes
This commit is contained in:
Stefan Karlsson 2019-11-25 12:31:39 +01:00
parent 8db2c1158e
commit d45ec50076
82 changed files with 234 additions and 229 deletions

View file

@ -437,7 +437,8 @@ class StubGenerator: public StubCodeGenerator {
// for which we do not support MP and so membars are not necessary. This ARMv5 code will // for which we do not support MP and so membars are not necessary. This ARMv5 code will
// be removed in the future. // be removed in the future.
// Support for jint Atomic::add(jint add_value, volatile jint *dest) // Implementation of atomic_add(jint add_value, volatile jint* dest)
// used by Atomic::add(volatile jint* dest, jint add_value)
// //
// Arguments : // Arguments :
// //

View file

@ -679,7 +679,8 @@ class StubGenerator: public StubCodeGenerator {
} }
// Support for jint Atomic::add(jint add_value, volatile jint* dest). // Implementation of jint atomic_add(jint add_value, volatile jint* dest)
// used by Atomic::add(volatile jint* dest, jint add_value)
// //
// Arguments: // Arguments:
// //

View file

@ -668,7 +668,8 @@ class StubGenerator: public StubCodeGenerator {
return start; return start;
} }
// Support for jint atomic::add(jint add_value, volatile jint* dest) // Implementation of jint atomic_add(jint add_value, volatile jint* dest)
// used by Atomic::add(volatile jint* dest, jint add_value)
// //
// Arguments : // Arguments :
// c_rarg0: add_value // c_rarg0: add_value
@ -690,7 +691,8 @@ class StubGenerator: public StubCodeGenerator {
return start; return start;
} }
// Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) // Implementation of intptr_t atomic_add(intptr_t add_value, volatile intptr_t* dest)
// used by Atomic::add(volatile intptr_t* dest, intptr_t add_value)
// //
// Arguments : // Arguments :
// c_rarg0: add_value // c_rarg0: add_value

View file

@ -1894,7 +1894,7 @@ void bsd_wrap_code(char* base, size_t size) {
} }
char buf[PATH_MAX + 1]; char buf[PATH_MAX + 1];
int num = Atomic::add(1, &cnt); int num = Atomic::add(&cnt, 1);
snprintf(buf, PATH_MAX + 1, "%s/hs-vm-%d-%d", snprintf(buf, PATH_MAX + 1, "%s/hs-vm-%d-%d",
os::get_temp_directory(), os::current_process_id(), num); os::get_temp_directory(), os::current_process_id(), num);
@ -3264,7 +3264,7 @@ uint os::processor_id() {
while (processor_id < 0) { while (processor_id < 0) {
if (Atomic::cmpxchg(-2, &mapping[apic_id], -1) == -1) { if (Atomic::cmpxchg(-2, &mapping[apic_id], -1) == -1) {
Atomic::store(&mapping[apic_id], Atomic::add(1, &next_processor_id) - 1); Atomic::store(&mapping[apic_id], Atomic::add(&next_processor_id, 1) - 1);
} }
processor_id = Atomic::load(&mapping[apic_id]); processor_id = Atomic::load(&mapping[apic_id]);
} }

View file

@ -2813,7 +2813,7 @@ void linux_wrap_code(char* base, size_t size) {
} }
char buf[PATH_MAX+1]; char buf[PATH_MAX+1];
int num = Atomic::add(1, &cnt); int num = Atomic::add(&cnt, 1);
snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d", snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
os::get_temp_directory(), os::current_process_id(), num); os::get_temp_directory(), os::current_process_id(), num);

View file

@ -96,13 +96,13 @@ template<size_t byte_size>
struct Atomic::PlatformAdd struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{ {
template<typename I, typename D> template<typename D, typename I>
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
}; };
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D)); STATIC_ASSERT(4 == sizeof(D));
@ -127,8 +127,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D)); STATIC_ASSERT(8 == sizeof(D));

View file

@ -31,13 +31,13 @@ template<size_t byte_size>
struct Atomic::PlatformAdd struct Atomic::PlatformAdd
: Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> > : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
{ {
template<typename I, typename D> template<typename D, typename I>
D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order /* order */) const; D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const;
}; };
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value,
atomic_memory_order /* order */) const { atomic_memory_order /* order */) const {
STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D)); STATIC_ASSERT(4 == sizeof(D));
@ -92,8 +92,8 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
#ifdef AMD64 #ifdef AMD64
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value,
atomic_memory_order /* order */) const { atomic_memory_order /* order */) const {
STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D)); STATIC_ASSERT(8 == sizeof(D));

View file

@ -163,22 +163,22 @@ template<size_t byte_size>
struct Atomic::PlatformAdd struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{ {
template<typename I, typename D> template<typename D, typename I>
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
}; };
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D)); STATIC_ASSERT(4 == sizeof(D));
#ifdef ARM #ifdef ARM
return add_using_helper<int>(arm_add_and_fetch, add_value, dest); return add_using_helper<int>(arm_add_and_fetch, dest, add_value);
#else #else
#ifdef M68K #ifdef M68K
return add_using_helper<int>(m68k_add_and_fetch, add_value, dest); return add_using_helper<int>(m68k_add_and_fetch, dest, add_value);
#else #else
return __sync_add_and_fetch(dest, add_value); return __sync_add_and_fetch(dest, add_value);
#endif // M68K #endif // M68K
@ -186,8 +186,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
} }
template<> template<>
template<typename I, typename D> template<typename D, typename !>
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D)); STATIC_ASSERT(8 == sizeof(D));

View file

@ -36,8 +36,8 @@ template<size_t byte_size>
struct Atomic::PlatformAdd struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{ {
template<typename I, typename D> template<typename D, typename I>
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const { D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE); D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
FULL_MEM_BARRIER; FULL_MEM_BARRIER;
return res; return res;

View file

@ -70,17 +70,17 @@ template<size_t byte_size>
struct Atomic::PlatformAdd struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{ {
template<typename I, typename D> template<typename D, typename I>
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
}; };
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D)); STATIC_ASSERT(4 == sizeof(D));
return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest); return add_using_helper<int32_t>(os::atomic_add_func, dest, add_value);
} }

View file

@ -96,13 +96,13 @@ template<size_t byte_size>
struct Atomic::PlatformAdd struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{ {
template<typename I, typename D> template<typename D, typename I>
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
}; };
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D)); STATIC_ASSERT(4 == sizeof(D));
@ -127,8 +127,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D)); STATIC_ASSERT(8 == sizeof(D));

View file

@ -78,13 +78,13 @@ template<size_t byte_size>
struct Atomic::PlatformAdd struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{ {
template<typename I, typename D> template<typename D, typename I>
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
}; };
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::add_and_fetch(I inc, D volatile* dest, inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I inc,
atomic_memory_order order) const { atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D)); STATIC_ASSERT(4 == sizeof(D));
@ -137,8 +137,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I inc, D volatile* dest,
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<8>::add_and_fetch(I inc, D volatile* dest, inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I inc,
atomic_memory_order order) const { atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D)); STATIC_ASSERT(8 == sizeof(D));

View file

@ -31,13 +31,13 @@ template<size_t byte_size>
struct Atomic::PlatformAdd struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{ {
template<typename I, typename D> template<typename D, typename I>
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
}; };
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D)); STATIC_ASSERT(4 == sizeof(D));
@ -59,8 +59,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
} }
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D)); STATIC_ASSERT(8 == sizeof(D));

View file

@ -31,13 +31,13 @@ template<size_t byte_size>
struct Atomic::PlatformAdd struct Atomic::PlatformAdd
: Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> > : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
{ {
template<typename I, typename D> template<typename D, typename I>
D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order order) const; D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const;
}; };
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D)); STATIC_ASSERT(4 == sizeof(D));
@ -93,8 +93,8 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
#ifdef AMD64 #ifdef AMD64
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D)); STATIC_ASSERT(8 == sizeof(D));

View file

@ -34,13 +34,13 @@ template<size_t byte_size>
struct Atomic::PlatformAdd struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{ {
template<typename I, typename D> template<typename D, typename I>
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
}; };
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D)); STATIC_ASSERT(4 == sizeof(D));
@ -49,8 +49,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
} }
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D)); STATIC_ASSERT(8 == sizeof(D));

View file

@ -30,8 +30,8 @@
// Implement ADD using a CAS loop. // Implement ADD using a CAS loop.
template<size_t byte_size> template<size_t byte_size>
struct Atomic::PlatformAdd { struct Atomic::PlatformAdd {
template<typename I, typename D> template<typename D, typename I>
inline D operator()(I add_value, D volatile* dest, atomic_memory_order order) const { inline D operator()(D volatile* dest, I add_value, atomic_memory_order order) const {
D old_value = *dest; D old_value = *dest;
while (true) { while (true) {
D new_value = old_value + add_value; D new_value = old_value + add_value;

View file

@ -44,14 +44,14 @@ template<size_t byte_size>
struct Atomic::PlatformAdd struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{ {
template<typename I, typename D> template<typename D, typename I>
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
}; };
// Not using add_using_helper; see comment for cmpxchg. // Not using add_using_helper; see comment for cmpxchg.
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D)); STATIC_ASSERT(4 == sizeof(D));
@ -62,8 +62,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
// Not using add_using_helper; see comment for cmpxchg. // Not using add_using_helper; see comment for cmpxchg.
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D)); STATIC_ASSERT(8 == sizeof(D));

View file

@ -49,7 +49,8 @@
orq %rdx, %rax orq %rdx, %rax
.end .end
// Support for jint Atomic::add(jint add_value, volatile jint* dest) // Implementation of jint _Atomic_add(jint add_value, volatile jint* dest)
// used by Atomic::add(volatile jint* dest, jint add_value)
.inline _Atomic_add,2 .inline _Atomic_add,2
movl %edi, %eax // save add_value for return movl %edi, %eax // save add_value for return
lock lock
@ -57,7 +58,8 @@
addl %edi, %eax addl %edi, %eax
.end .end
// Support for jlong Atomic::add(jlong add_value, volatile jlong* dest) // Implementation of jlong _Atomic_add(jlong add_value, volatile jlong* dest)
// used by Atomic::add(volatile jlong* dest, jint add_value)
.inline _Atomic_add_long,2 .inline _Atomic_add_long,2
movq %rdi, %rax // save add_value for return movq %rdi, %rax // save add_value for return
lock lock

View file

@ -57,23 +57,23 @@ template<size_t byte_size>
struct Atomic::PlatformAdd struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{ {
template<typename I, typename D> template<typename D, typename I>
D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
}; };
#ifdef AMD64 #ifdef AMD64
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest); return add_using_helper<int32_t>(os::atomic_add_func, dest, add_value);
} }
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
return add_using_helper<int64_t>(os::atomic_add_long_func, add_value, dest); return add_using_helper<int64_t>(os::atomic_add_long_func, dest, add_value);
} }
#define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \ #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \
@ -111,8 +111,8 @@ DEFINE_STUB_CMPXCHG(8, int64_t, os::atomic_cmpxchg_long_func)
#else // !AMD64 #else // !AMD64
template<> template<>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D)); STATIC_ASSERT(4 == sizeof(D));

View file

@ -50,7 +50,7 @@ size_t ClassLoaderDataGraph::num_array_classes() {
} }
void ClassLoaderDataGraph::inc_instance_classes(size_t count) { void ClassLoaderDataGraph::inc_instance_classes(size_t count) {
Atomic::add(count, &_num_instance_classes); Atomic::add(&_num_instance_classes, count);
} }
void ClassLoaderDataGraph::dec_instance_classes(size_t count) { void ClassLoaderDataGraph::dec_instance_classes(size_t count) {
@ -59,7 +59,7 @@ void ClassLoaderDataGraph::dec_instance_classes(size_t count) {
} }
void ClassLoaderDataGraph::inc_array_classes(size_t count) { void ClassLoaderDataGraph::inc_array_classes(size_t count) {
Atomic::add(count, &_num_array_classes); Atomic::add(&_num_array_classes, count);
} }
void ClassLoaderDataGraph::dec_array_classes(size_t count) { void ClassLoaderDataGraph::dec_array_classes(size_t count) {

View file

@ -214,11 +214,11 @@ void StringTable::create_table() {
} }
size_t StringTable::item_added() { size_t StringTable::item_added() {
return Atomic::add((size_t)1, &_items_count); return Atomic::add(&_items_count, (size_t)1);
} }
size_t StringTable::add_items_to_clean(size_t ndead) { size_t StringTable::add_items_to_clean(size_t ndead) {
size_t total = Atomic::add((size_t)ndead, &_uncleaned_items_count); size_t total = Atomic::add(&_uncleaned_items_count, (size_t)ndead);
log_trace(stringtable)( log_trace(stringtable)(
"Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT, "Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT,
_uncleaned_items_count, ndead, total); _uncleaned_items_count, ndead, total);
@ -226,7 +226,7 @@ size_t StringTable::add_items_to_clean(size_t ndead) {
} }
void StringTable::item_removed() { void StringTable::item_removed() {
Atomic::add((size_t)-1, &_items_count); Atomic::add(&_items_count, (size_t)-1);
} }
double StringTable::get_load_factor() { double StringTable::get_load_factor() {

View file

@ -724,7 +724,7 @@ void SymbolTable::clean_dead_entries(JavaThread* jt) {
bdt.done(jt); bdt.done(jt);
} }
Atomic::add(stdc._processed, &_symbols_counted); Atomic::add(&_symbols_counted, stdc._processed);
log_debug(symboltable)("Cleaned " SIZE_FORMAT " of " SIZE_FORMAT, log_debug(symboltable)("Cleaned " SIZE_FORMAT " of " SIZE_FORMAT,
stdd._deleted, stdc._processed); stdd._deleted, stdc._processed);

View file

@ -1479,14 +1479,14 @@ int CompileBroker::assign_compile_id(const methodHandle& method, int osr_bci) {
assert(!is_osr, "can't be osr"); assert(!is_osr, "can't be osr");
// Adapters, native wrappers and method handle intrinsics // Adapters, native wrappers and method handle intrinsics
// should be generated always. // should be generated always.
return Atomic::add(1, &_compilation_id); return Atomic::add(&_compilation_id, 1);
} else if (CICountOSR && is_osr) { } else if (CICountOSR && is_osr) {
id = Atomic::add(1, &_osr_compilation_id); id = Atomic::add(&_osr_compilation_id, 1);
if (CIStartOSR <= id && id < CIStopOSR) { if (CIStartOSR <= id && id < CIStopOSR) {
return id; return id;
} }
} else { } else {
id = Atomic::add(1, &_compilation_id); id = Atomic::add(&_compilation_id, 1);
if (CIStart <= id && id < CIStop) { if (CIStart <= id && id < CIStop) {
return id; return id;
} }
@ -1498,7 +1498,7 @@ int CompileBroker::assign_compile_id(const methodHandle& method, int osr_bci) {
#else #else
// CICountOSR is a develop flag and set to 'false' by default. In a product built, // CICountOSR is a develop flag and set to 'false' by default. In a product built,
// only _compilation_id is incremented. // only _compilation_id is incremented.
return Atomic::add(1, &_compilation_id); return Atomic::add(&_compilation_id, 1);
#endif #endif
} }

View file

@ -4226,7 +4226,7 @@ private:
HeapRegion* r = g1h->region_at(region_idx); HeapRegion* r = g1h->region_at(region_idx);
assert(!g1h->is_on_master_free_list(r), "sanity"); assert(!g1h->is_on_master_free_list(r), "sanity");
Atomic::add(r->rem_set()->occupied_locked(), &_rs_length); Atomic::add(&_rs_length, r->rem_set()->occupied_locked());
if (!is_young) { if (!is_young) {
g1h->hot_card_cache()->reset_card_counts(r); g1h->hot_card_cache()->reset_card_counts(r);
@ -4290,7 +4290,7 @@ public:
// Claim serial work. // Claim serial work.
if (_serial_work_claim == 0) { if (_serial_work_claim == 0) {
jint value = Atomic::add(1, &_serial_work_claim) - 1; jint value = Atomic::add(&_serial_work_claim, 1) - 1;
if (value == 0) { if (value == 0) {
double serial_time = os::elapsedTime(); double serial_time = os::elapsedTime();
do_serial_work(); do_serial_work();
@ -4305,7 +4305,7 @@ public:
bool has_non_young_time = false; bool has_non_young_time = false;
while (true) { while (true) {
size_t end = Atomic::add(chunk_size(), &_parallel_work_claim); size_t end = Atomic::add(&_parallel_work_claim, chunk_size());
size_t cur = end - chunk_size(); size_t cur = end - chunk_size();
if (cur >= _num_work_items) { if (cur >= _num_work_items) {

View file

@ -112,7 +112,7 @@ class G1BuildCandidateRegionsTask : public AbstractGangTask {
// Claim a new chunk, returning its bounds [from, to[. // Claim a new chunk, returning its bounds [from, to[.
void claim_chunk(uint& from, uint& to) { void claim_chunk(uint& from, uint& to) {
uint result = Atomic::add(_chunk_size, &_cur_claim_idx); uint result = Atomic::add(&_cur_claim_idx, _chunk_size);
assert(_max_size > result - 1, assert(_max_size > result - 1,
"Array too small, is %u should be %u with chunk size %u.", "Array too small, is %u should be %u with chunk size %u.",
_max_size, result, _chunk_size); _max_size, result, _chunk_size);
@ -214,8 +214,8 @@ class G1BuildCandidateRegionsTask : public AbstractGangTask {
void update_totals(uint num_regions, size_t reclaimable_bytes) { void update_totals(uint num_regions, size_t reclaimable_bytes) {
if (num_regions > 0) { if (num_regions > 0) {
assert(reclaimable_bytes > 0, "invariant"); assert(reclaimable_bytes > 0, "invariant");
Atomic::add(num_regions, &_num_regions_added); Atomic::add(&_num_regions_added, num_regions);
Atomic::add(reclaimable_bytes, &_reclaimable_bytes_added); Atomic::add(&_reclaimable_bytes_added, reclaimable_bytes);
} else { } else {
assert(reclaimable_bytes == 0, "invariant"); assert(reclaimable_bytes == 0, "invariant");
} }

View file

@ -207,7 +207,7 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
return NULL; return NULL;
} }
size_t cur_idx = Atomic::add(1u, &_hwm) - 1; size_t cur_idx = Atomic::add(&_hwm, 1u) - 1;
if (cur_idx >= _chunk_capacity) { if (cur_idx >= _chunk_capacity) {
return NULL; return NULL;
} }
@ -280,7 +280,7 @@ void G1CMRootMemRegions::reset() {
void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) { void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
assert_at_safepoint(); assert_at_safepoint();
size_t idx = Atomic::add((size_t)1, &_num_root_regions) - 1; size_t idx = Atomic::add(&_num_root_regions, (size_t)1) - 1;
assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions); assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to " assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
"end (" PTR_FORMAT ")", p2i(start), p2i(end)); "end (" PTR_FORMAT ")", p2i(start), p2i(end));
@ -308,7 +308,7 @@ const MemRegion* G1CMRootMemRegions::claim_next() {
return NULL; return NULL;
} }
size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1; size_t claimed_index = Atomic::add(&_claimed_root_regions, (size_t)1) - 1;
if (claimed_index < _num_root_regions) { if (claimed_index < _num_root_regions) {
return &_root_regions[claimed_index]; return &_root_regions[claimed_index];
} }
@ -1121,7 +1121,7 @@ public:
virtual void work(uint worker_id) { virtual void work(uint worker_id) {
G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl); G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
_g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id); _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild); Atomic::add(&_total_selected_for_rebuild, update_cl.num_selected_for_rebuild());
} }
uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; } uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }

View file

@ -29,17 +29,17 @@
#include "runtime/atomic.hpp" #include "runtime/atomic.hpp"
inline void G1EvacStats::add_direct_allocated(size_t value) { inline void G1EvacStats::add_direct_allocated(size_t value) {
Atomic::add(value, &_direct_allocated); Atomic::add(&_direct_allocated, value);
} }
inline void G1EvacStats::add_region_end_waste(size_t value) { inline void G1EvacStats::add_region_end_waste(size_t value) {
Atomic::add(value, &_region_end_waste); Atomic::add(&_region_end_waste, value);
Atomic::inc(&_regions_filled); Atomic::inc(&_regions_filled);
} }
inline void G1EvacStats::add_failure_used_and_waste(size_t used, size_t waste) { inline void G1EvacStats::add_failure_used_and_waste(size_t used, size_t waste) {
Atomic::add(used, &_failure_used); Atomic::add(&_failure_used, used);
Atomic::add(waste, &_failure_waste); Atomic::add(&_failure_waste, waste);
} }
#endif // SHARE_GC_G1_G1EVACSTATS_INLINE_HPP #endif // SHARE_GC_G1_G1EVACSTATS_INLINE_HPP

View file

@ -101,7 +101,7 @@ void G1FullGCAdjustTask::work(uint worker_id) {
// Adjust the weak roots. // Adjust the weak roots.
if (Atomic::add(1u, &_references_done) == 1u) { // First incr claims task. if (Atomic::add(&_references_done, 1u) == 1u) { // First incr claims task.
G1CollectedHeap::heap()->ref_processor_stw()->weak_oops_do(&_adjust); G1CollectedHeap::heap()->ref_processor_stw()->weak_oops_do(&_adjust);
} }

View file

@ -68,7 +68,7 @@ CardTable::CardValue* G1HotCardCache::insert(CardValue* card_ptr) {
return card_ptr; return card_ptr;
} }
// Otherwise, the card is hot. // Otherwise, the card is hot.
size_t index = Atomic::add(1u, &_hot_cache_idx) - 1; size_t index = Atomic::add(&_hot_cache_idx, 1u) - 1;
size_t masked_index = index & (_hot_cache_size - 1); size_t masked_index = index & (_hot_cache_size - 1);
CardValue* current_ptr = _hot_cache[masked_index]; CardValue* current_ptr = _hot_cache[masked_index];
@ -91,8 +91,8 @@ void G1HotCardCache::drain(G1CardTableEntryClosure* cl, uint worker_id) {
assert(!use_cache(), "cache should be disabled"); assert(!use_cache(), "cache should be disabled");
while (_hot_cache_par_claimed_idx < _hot_cache_size) { while (_hot_cache_par_claimed_idx < _hot_cache_size) {
size_t end_idx = Atomic::add(_hot_cache_par_chunk_size, size_t end_idx = Atomic::add(&_hot_cache_par_claimed_idx,
&_hot_cache_par_claimed_idx); _hot_cache_par_chunk_size);
size_t start_idx = end_idx - _hot_cache_par_chunk_size; size_t start_idx = end_idx - _hot_cache_par_chunk_size;
// The current worker has successfully claimed the chunk [start_idx..end_idx) // The current worker has successfully claimed the chunk [start_idx..end_idx)
end_idx = MIN2(end_idx, _hot_cache_size); end_idx = MIN2(end_idx, _hot_cache_size);

View file

@ -261,7 +261,7 @@ public:
virtual void work(uint worker_id) { virtual void work(uint worker_id) {
size_t const actual_chunk_size = MAX2(chunk_size(), _page_size); size_t const actual_chunk_size = MAX2(chunk_size(), _page_size);
while (true) { while (true) {
char* touch_addr = Atomic::add(actual_chunk_size, &_cur_addr) - actual_chunk_size; char* touch_addr = Atomic::add(&_cur_addr, actual_chunk_size) - actual_chunk_size;
if (touch_addr < _start_addr || touch_addr >= _end_addr) { if (touch_addr < _start_addr || touch_addr >= _end_addr) {
break; break;
} }

View file

@ -129,7 +129,7 @@ void G1RedirtyCardsQueueSet::update_tail(BufferNode* node) {
void G1RedirtyCardsQueueSet::enqueue_completed_buffer(BufferNode* node) { void G1RedirtyCardsQueueSet::enqueue_completed_buffer(BufferNode* node) {
assert(_collecting, "precondition"); assert(_collecting, "precondition");
Atomic::add(buffer_size() - node->index(), &_entry_count); Atomic::add(&_entry_count, buffer_size() - node->index());
_list.push(*node); _list.push(*node);
update_tail(node); update_tail(node);
} }
@ -139,7 +139,7 @@ void G1RedirtyCardsQueueSet::merge_bufferlist(LocalQSet* src) {
const G1BufferNodeList from = src->take_all_completed_buffers(); const G1BufferNodeList from = src->take_all_completed_buffers();
if (from._head != NULL) { if (from._head != NULL) {
assert(from._tail != NULL, "invariant"); assert(from._tail != NULL, "invariant");
Atomic::add(from._entry_count, &_entry_count); Atomic::add(&_entry_count, from._entry_count);
_list.prepend(*from._head, *from._tail); _list.prepend(*from._head, *from._tail);
update_tail(from._tail); update_tail(from._tail);
} }

View file

@ -46,7 +46,7 @@ inline G1RegionMarkStatsCache::G1RegionMarkStatsCacheEntry* G1RegionMarkStatsCac
inline void G1RegionMarkStatsCache::evict(uint idx) { inline void G1RegionMarkStatsCache::evict(uint idx) {
G1RegionMarkStatsCacheEntry* cur = &_cache[idx]; G1RegionMarkStatsCacheEntry* cur = &_cache[idx];
if (cur->_stats._live_words != 0) { if (cur->_stats._live_words != 0) {
Atomic::add(cur->_stats._live_words, &_target[cur->_region_idx]._live_words); Atomic::add(&_target[cur->_region_idx]._live_words, cur->_stats._live_words);
} }
cur->clear(); cur->clear();
} }

View file

@ -179,7 +179,7 @@ private:
bool marked_as_dirty = Atomic::cmpxchg(true, &_contains[region], false) == false; bool marked_as_dirty = Atomic::cmpxchg(true, &_contains[region], false) == false;
if (marked_as_dirty) { if (marked_as_dirty) {
uint allocated = Atomic::add(1u, &_cur_idx) - 1; uint allocated = Atomic::add(&_cur_idx, 1u) - 1;
_buffer[allocated] = region; _buffer[allocated] = region;
} }
} }
@ -255,7 +255,7 @@ private:
void work(uint worker_id) { void work(uint worker_id) {
while (_cur_dirty_regions < _regions->size()) { while (_cur_dirty_regions < _regions->size()) {
uint next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length; uint next = Atomic::add(&_cur_dirty_regions, _chunk_length) - _chunk_length;
uint max = MIN2(next + _chunk_length, _regions->size()); uint max = MIN2(next + _chunk_length, _regions->size());
for (uint i = next; i < max; i++) { for (uint i = next; i < max; i++) {
@ -447,7 +447,7 @@ public:
uint claim_cards_to_scan(uint region, uint increment) { uint claim_cards_to_scan(uint region, uint increment) {
assert(region < _max_regions, "Tried to access invalid region %u", region); assert(region < _max_regions, "Tried to access invalid region %u", region);
return Atomic::add(increment, &_card_table_scan_state[region]) - increment; return Atomic::add(&_card_table_scan_state[region], increment) - increment;
} }
void add_dirty_region(uint const region) { void add_dirty_region(uint const region) {

View file

@ -90,7 +90,7 @@ ParMarkBitMap::mark_obj(HeapWord* addr, size_t size)
bool end_bit_ok = _end_bits.par_set_bit(end_bit); bool end_bit_ok = _end_bits.par_set_bit(end_bit);
assert(end_bit_ok, "concurrency problem"); assert(end_bit_ok, "concurrency problem");
DEBUG_ONLY(Atomic::inc(&mark_bitmap_count)); DEBUG_ONLY(Atomic::inc(&mark_bitmap_count));
DEBUG_ONLY(Atomic::add(size, &mark_bitmap_size)); DEBUG_ONLY(Atomic::add(&mark_bitmap_size, size));
return true; return true;
} }
return false; return false;

View file

@ -532,7 +532,7 @@ void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize; const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
DEBUG_ONLY(Atomic::inc(&add_obj_count);) DEBUG_ONLY(Atomic::inc(&add_obj_count);)
DEBUG_ONLY(Atomic::add(len, &add_obj_size);) DEBUG_ONLY(Atomic::add(&add_obj_size, len);)
if (beg_region == end_region) { if (beg_region == end_region) {
// All in one region. // All in one region.
@ -2449,7 +2449,7 @@ public:
} }
bool try_claim(PSParallelCompact::UpdateDensePrefixTask& reference) { bool try_claim(PSParallelCompact::UpdateDensePrefixTask& reference) {
uint claimed = Atomic::add(1u, &_counter) - 1; // -1 is so that we start with zero uint claimed = Atomic::add(&_counter, 1u) - 1; // -1 is so that we start with zero
if (claimed < _insert_index) { if (claimed < _insert_index) {
reference = _backing_array[claimed]; reference = _backing_array[claimed];
return true; return true;

View file

@ -536,7 +536,7 @@ inline void ParallelCompactData::RegionData::decrement_destination_count()
{ {
assert(_dc_and_los < dc_claimed, "already claimed"); assert(_dc_and_los < dc_claimed, "already claimed");
assert(_dc_and_los >= dc_one, "count would go negative"); assert(_dc_and_los >= dc_one, "count would go negative");
Atomic::add(dc_mask, &_dc_and_los); Atomic::add(&_dc_and_los, dc_mask);
} }
inline HeapWord* ParallelCompactData::RegionData::data_location() const inline HeapWord* ParallelCompactData::RegionData::data_location() const
@ -576,7 +576,7 @@ inline bool ParallelCompactData::RegionData::claim_unsafe()
inline void ParallelCompactData::RegionData::add_live_obj(size_t words) inline void ParallelCompactData::RegionData::add_live_obj(size_t words)
{ {
assert(words <= (size_t)los_mask - live_obj_size(), "overflow"); assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
Atomic::add(static_cast<region_sz_t>(words), &_dc_and_los); Atomic::add(&_dc_and_los, static_cast<region_sz_t>(words));
} }
inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr) inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)

View file

@ -144,7 +144,7 @@ size_t OopStorage::ActiveArray::block_count_acquire() const {
} }
void OopStorage::ActiveArray::increment_refcount() const { void OopStorage::ActiveArray::increment_refcount() const {
int new_value = Atomic::add(1, &_refcount); int new_value = Atomic::add(&_refcount, 1);
assert(new_value >= 1, "negative refcount %d", new_value - 1); assert(new_value >= 1, "negative refcount %d", new_value - 1);
} }
@ -1010,7 +1010,7 @@ bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
// than a CAS loop on some platforms when there is contention. // than a CAS loop on some platforms when there is contention.
// We can cope with the uncertainty by recomputing start/end from // We can cope with the uncertainty by recomputing start/end from
// the result of the add, and dealing with potential overshoot. // the result of the add, and dealing with potential overshoot.
size_t end = Atomic::add(step, &_next_block); size_t end = Atomic::add(&_next_block, step);
// _next_block may have changed, so recompute start from result of add. // _next_block may have changed, so recompute start from result of add.
start = end - step; start = end - step;
// _next_block may have changed so much that end has overshot. // _next_block may have changed so much that end has overshot.

View file

@ -43,19 +43,19 @@ inline HeapWord* PLAB::allocate_aligned(size_t word_sz, unsigned short alignment
} }
void PLABStats::add_allocated(size_t v) { void PLABStats::add_allocated(size_t v) {
Atomic::add(v, &_allocated); Atomic::add(&_allocated, v);
} }
void PLABStats::add_unused(size_t v) { void PLABStats::add_unused(size_t v) {
Atomic::add(v, &_unused); Atomic::add(&_unused, v);
} }
void PLABStats::add_wasted(size_t v) { void PLABStats::add_wasted(size_t v) {
Atomic::add(v, &_wasted); Atomic::add(&_wasted, v);
} }
void PLABStats::add_undo_wasted(size_t v) { void PLABStats::add_undo_wasted(size_t v) {
Atomic::add(v, &_undo_wasted); Atomic::add(&_undo_wasted, v);
} }
#endif // SHARE_GC_SHARED_PLAB_INLINE_HPP #endif // SHARE_GC_SHARED_PLAB_INLINE_HPP

View file

@ -55,7 +55,7 @@ void PreservedMarks::restore_and_increment(volatile size_t* const total_size_add
restore(); restore();
// Only do the atomic add if the size is > 0. // Only do the atomic add if the size is > 0.
if (stack_size > 0) { if (stack_size > 0) {
Atomic::add(stack_size, total_size_addr); Atomic::add(total_size_addr, stack_size);
} }
} }

View file

@ -182,7 +182,7 @@ void BufferNode::Allocator::release(BufferNode* node) {
const size_t trigger_transfer = 10; const size_t trigger_transfer = 10;
// Add to pending list. Update count first so no underflow in transfer. // Add to pending list. Update count first so no underflow in transfer.
size_t pending_count = Atomic::add(1u, &_pending_count); size_t pending_count = Atomic::add(&_pending_count, 1u);
_pending_list.push(*node); _pending_list.push(*node);
if (pending_count > trigger_transfer) { if (pending_count > trigger_transfer) {
try_transfer_pending(); try_transfer_pending();
@ -219,7 +219,7 @@ bool BufferNode::Allocator::try_transfer_pending() {
// Add synchronized nodes to _free_list. // Add synchronized nodes to _free_list.
// Update count first so no underflow in allocate(). // Update count first so no underflow in allocate().
Atomic::add(count, &_free_count); Atomic::add(&_free_count, count);
_free_list.prepend(*first, *last); _free_list.prepend(*first, *last);
log_trace(gc, ptrqueue, freelist) log_trace(gc, ptrqueue, freelist)
("Transferred %s pending to free: " SIZE_FORMAT, name(), count); ("Transferred %s pending to free: " SIZE_FORMAT, name(), count);
@ -258,4 +258,3 @@ void** PtrQueueSet::allocate_buffer() {
void PtrQueueSet::deallocate_buffer(BufferNode* node) { void PtrQueueSet::deallocate_buffer(BufferNode* node) {
_allocator->release(node); _allocator->release(node);
} }

View file

@ -246,7 +246,7 @@ void ReferenceProcessorPhaseTimes::set_sub_phase_total_phase_time_ms(ReferencePr
void ReferenceProcessorPhaseTimes::add_ref_cleared(ReferenceType ref_type, size_t count) { void ReferenceProcessorPhaseTimes::add_ref_cleared(ReferenceType ref_type, size_t count) {
ASSERT_REF_TYPE(ref_type); ASSERT_REF_TYPE(ref_type);
Atomic::add(count, &_ref_cleared[ref_type_2_index(ref_type)]); Atomic::add(&_ref_cleared[ref_type_2_index(ref_type)], count);
} }
void ReferenceProcessorPhaseTimes::set_ref_discovered(ReferenceType ref_type, size_t count) { void ReferenceProcessorPhaseTimes::set_ref_discovered(ReferenceType ref_type, size_t count) {

View file

@ -32,7 +32,7 @@ StringDedupQueue* StringDedupQueue::_queue = NULL;
volatile size_t StringDedupQueue::_claimed_index = 0; volatile size_t StringDedupQueue::_claimed_index = 0;
size_t StringDedupQueue::claim() { size_t StringDedupQueue::claim() {
return Atomic::add(size_t(1), &_claimed_index) - 1; return Atomic::add(&_claimed_index, size_t(1)) - 1;
} }
void StringDedupQueue::unlink_or_oops_do(StringDedupUnlinkOrOopsDoClosure* cl) { void StringDedupQueue::unlink_or_oops_do(StringDedupUnlinkOrOopsDoClosure* cl) {

View file

@ -589,7 +589,7 @@ void StringDedupTable::finish_rehash(StringDedupTable* rehashed_table) {
} }
size_t StringDedupTable::claim_table_partition(size_t partition_size) { size_t StringDedupTable::claim_table_partition(size_t partition_size) {
return Atomic::add(partition_size, &_claimed_index) - partition_size; return Atomic::add(&_claimed_index, partition_size) - partition_size;
} }
void StringDedupTable::verify() { void StringDedupTable::verify() {

View file

@ -153,7 +153,7 @@ public:
// Wait for the coordinator to dispatch a task. // Wait for the coordinator to dispatch a task.
_start_semaphore->wait(); _start_semaphore->wait();
uint num_started = Atomic::add(1u, &_started); uint num_started = Atomic::add(&_started, 1u);
// Subtract one to get a zero-indexed worker id. // Subtract one to get a zero-indexed worker id.
uint worker_id = num_started - 1; uint worker_id = num_started - 1;

View file

@ -264,7 +264,7 @@ void ShenandoahCodeRootsIterator::fast_parallel_blobs_do(CodeBlobClosure *f) {
size_t max = (size_t)list->length(); size_t max = (size_t)list->length();
while (_claimed < max) { while (_claimed < max) {
size_t cur = Atomic::add(stride, &_claimed) - stride; size_t cur = Atomic::add(&_claimed, stride) - stride;
size_t start = cur; size_t start = cur;
size_t end = MIN2(cur + stride, max); size_t end = MIN2(cur + stride, max);
if (start >= max) break; if (start >= max) break;

View file

@ -593,7 +593,7 @@ void ShenandoahControlThread::notify_heap_changed() {
void ShenandoahControlThread::pacing_notify_alloc(size_t words) { void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
assert(ShenandoahPacing, "should only call when pacing is enabled"); assert(ShenandoahPacing, "should only call when pacing is enabled");
Atomic::add(words, &_allocs_seen); Atomic::add(&_allocs_seen, words);
} }
void ShenandoahControlThread::set_forced_counters_update(bool value) { void ShenandoahControlThread::set_forced_counters_update(bool value) {

View file

@ -620,7 +620,7 @@ void ShenandoahHeap::decrease_committed(size_t bytes) {
} }
void ShenandoahHeap::increase_used(size_t bytes) { void ShenandoahHeap::increase_used(size_t bytes) {
Atomic::add(bytes, &_used); Atomic::add(&_used, bytes);
} }
void ShenandoahHeap::set_used(size_t bytes) { void ShenandoahHeap::set_used(size_t bytes) {
@ -633,7 +633,7 @@ void ShenandoahHeap::decrease_used(size_t bytes) {
} }
void ShenandoahHeap::increase_allocated(size_t bytes) { void ShenandoahHeap::increase_allocated(size_t bytes) {
Atomic::add(bytes, &_bytes_allocated_since_gc_start); Atomic::add(&_bytes_allocated_since_gc_start, bytes);
} }
void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) { void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
@ -1350,7 +1350,7 @@ public:
size_t max = _heap->num_regions(); size_t max = _heap->num_regions();
while (_index < max) { while (_index < max) {
size_t cur = Atomic::add(stride, &_index) - stride; size_t cur = Atomic::add(&_index, stride) - stride;
size_t start = cur; size_t start = cur;
size_t end = MIN2(cur + stride, max); size_t end = MIN2(cur + stride, max);
if (start >= max) break; if (start >= max) break;

View file

@ -49,7 +49,7 @@
inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() { inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
size_t new_index = Atomic::add((size_t) 1, &_index); size_t new_index = Atomic::add(&_index, (size_t) 1);
// get_region() provides the bounds-check and returns NULL on OOB. // get_region() provides the bounds-check and returns NULL on OOB.
return _heap->get_region(new_index - 1); return _heap->get_region(new_index - 1);
} }

View file

@ -687,7 +687,7 @@ void ShenandoahHeapRegion::set_state(RegionState to) {
} }
void ShenandoahHeapRegion::record_pin() { void ShenandoahHeapRegion::record_pin() {
Atomic::add((size_t)1, &_critical_pins); Atomic::add(&_critical_pins, (size_t)1);
} }
void ShenandoahHeapRegion::record_unpin() { void ShenandoahHeapRegion::record_unpin() {

View file

@ -103,7 +103,7 @@ inline void ShenandoahHeapRegion::increase_live_data_gc_words(size_t s) {
} }
inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) { inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) {
size_t new_live_data = Atomic::add(s, &_live_data); size_t new_live_data = Atomic::add(&_live_data, s);
#ifdef ASSERT #ifdef ASSERT
size_t live_bytes = new_live_data * HeapWordSize; size_t live_bytes = new_live_data * HeapWordSize;
size_t used_bytes = used(); size_t used_bytes = used();

View file

@ -132,7 +132,7 @@ BinaryMagnitudeSeq::~BinaryMagnitudeSeq() {
} }
void BinaryMagnitudeSeq::add(size_t val) { void BinaryMagnitudeSeq::add(size_t val) {
Atomic::add(val, &_sum); Atomic::add(&_sum, val);
int mag = log2_intptr(val) + 1; int mag = log2_intptr(val) + 1;
@ -147,7 +147,7 @@ void BinaryMagnitudeSeq::add(size_t val) {
mag = BitsPerSize_t - 1; mag = BitsPerSize_t - 1;
} }
Atomic::add((size_t)1, &_mags[mag]); Atomic::add(&_mags[mag], (size_t)1);
} }
size_t BinaryMagnitudeSeq::level(int level) const { size_t BinaryMagnitudeSeq::level(int level) const {

View file

@ -223,7 +223,7 @@ void ShenandoahPacer::unpace_for_alloc(intptr_t epoch, size_t words) {
} }
intptr_t tax = MAX2<intptr_t>(1, words * Atomic::load(&_tax_rate)); intptr_t tax = MAX2<intptr_t>(1, words * Atomic::load(&_tax_rate));
Atomic::add(tax, &_budget); Atomic::add(&_budget, tax);
} }
intptr_t ShenandoahPacer::epoch() { intptr_t ShenandoahPacer::epoch() {

View file

@ -47,13 +47,13 @@ inline void ShenandoahPacer::report_alloc(size_t words) {
inline void ShenandoahPacer::report_internal(size_t words) { inline void ShenandoahPacer::report_internal(size_t words) {
assert(ShenandoahPacing, "Only be here when pacing is enabled"); assert(ShenandoahPacing, "Only be here when pacing is enabled");
STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t)); STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
Atomic::add((intptr_t)words, &_budget); Atomic::add(&_budget, (intptr_t)words);
} }
inline void ShenandoahPacer::report_progress_internal(size_t words) { inline void ShenandoahPacer::report_progress_internal(size_t words) {
assert(ShenandoahPacing, "Only be here when pacing is enabled"); assert(ShenandoahPacing, "Only be here when pacing is enabled");
STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t)); STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
Atomic::add((intptr_t)words, &_progress); Atomic::add(&_progress, (intptr_t)words);
} }
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHPACER_INLINE_HPP #endif // SHARE_GC_SHENANDOAH_SHENANDOAHPACER_INLINE_HPP

View file

@ -304,7 +304,7 @@ T* ParallelClaimableQueueSet<T, F>::claim_next() {
return NULL; return NULL;
} }
jint index = Atomic::add(1, &_claimed_index); jint index = Atomic::add(&_claimed_index, 1);
if (index <= size) { if (index <= size) {
return GenericTaskQueueSet<T, F>::queue((uint)index - 1); return GenericTaskQueueSet<T, F>::queue((uint)index - 1);

View file

@ -139,7 +139,7 @@ private:
// skip // skip
break; break;
case ShenandoahVerifier::_verify_liveness_complete: case ShenandoahVerifier::_verify_liveness_complete:
Atomic::add((uint) obj->size(), &_ld[obj_reg->region_number()]); Atomic::add(&_ld[obj_reg->region_number()], (uint) obj->size());
// fallthrough for fast failure for un-live regions: // fallthrough for fast failure for un-live regions:
case ShenandoahVerifier::_verify_liveness_conservative: case ShenandoahVerifier::_verify_liveness_conservative:
check(ShenandoahAsserts::_safe_oop, obj, obj_reg->has_live(), check(ShenandoahAsserts::_safe_oop, obj, obj_reg->has_live(),
@ -479,7 +479,7 @@ public:
} }
} }
Atomic::add(processed, &_processed); Atomic::add(&_processed, processed);
} }
}; };
@ -518,7 +518,7 @@ public:
_options); _options);
while (true) { while (true) {
size_t v = Atomic::add(1u, &_claimed) - 1; size_t v = Atomic::add(&_claimed, 1u) - 1;
if (v < _heap->num_regions()) { if (v < _heap->num_regions()) {
ShenandoahHeapRegion* r = _heap->get_region(v); ShenandoahHeapRegion* r = _heap->get_region(v);
if (!r->is_humongous() && !r->is_trash()) { if (!r->is_humongous() && !r->is_trash()) {
@ -538,7 +538,7 @@ public:
if (_heap->complete_marking_context()->is_marked((oop)obj)) { if (_heap->complete_marking_context()->is_marked((oop)obj)) {
verify_and_follow(obj, stack, cl, &processed); verify_and_follow(obj, stack, cl, &processed);
} }
Atomic::add(processed, &_processed); Atomic::add(&_processed, processed);
} }
virtual void work_regular(ShenandoahHeapRegion *r, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl) { virtual void work_regular(ShenandoahHeapRegion *r, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl) {
@ -571,7 +571,7 @@ public:
} }
} }
Atomic::add(processed, &_processed); Atomic::add(&_processed, processed);
} }
void verify_and_follow(HeapWord *addr, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl, size_t *processed) { void verify_and_follow(HeapWord *addr, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl, size_t *processed) {

View file

@ -101,7 +101,7 @@ inline ZArrayIteratorImpl<T, parallel>::ZArrayIteratorImpl(ZArray<T>* array) :
template <typename T, bool parallel> template <typename T, bool parallel>
inline bool ZArrayIteratorImpl<T, parallel>::next(T* elem) { inline bool ZArrayIteratorImpl<T, parallel>::next(T* elem) {
if (parallel) { if (parallel) {
const size_t next = Atomic::add(1u, &_next) - 1u; const size_t next = Atomic::add(&_next, 1u) - 1u;
if (next < _array->size()) { if (next < _array->size()) {
*elem = _array->at(next); *elem = _array->at(next);
return true; return true;

View file

@ -121,8 +121,8 @@ inline bool ZLiveMap::set(size_t index, bool finalizable, bool& inc_live) {
} }
inline void ZLiveMap::inc_live(uint32_t objects, size_t bytes) { inline void ZLiveMap::inc_live(uint32_t objects, size_t bytes) {
Atomic::add(objects, &_live_objects); Atomic::add(&_live_objects, objects);
Atomic::add(bytes, &_live_bytes); Atomic::add(&_live_bytes, bytes);
} }
inline BitMap::idx_t ZLiveMap::segment_start(BitMap::idx_t segment) const { inline BitMap::idx_t ZLiveMap::segment_start(BitMap::idx_t segment) const {

View file

@ -110,8 +110,8 @@ uintptr_t ZMarkStackSpace::expand_and_alloc_space(size_t size) {
// Increment top before end to make sure another // Increment top before end to make sure another
// thread can't steal out newly expanded space. // thread can't steal out newly expanded space.
addr = Atomic::add(size, &_top) - size; addr = Atomic::add(&_top, size) - size;
Atomic::add(expand_size, &_end); Atomic::add(&_end, expand_size);
return addr; return addr;
} }

View file

@ -37,7 +37,7 @@ inline bool ZMarkTerminate::enter_stage(volatile uint* nworking_stage) {
} }
inline void ZMarkTerminate::exit_stage(volatile uint* nworking_stage) { inline void ZMarkTerminate::exit_stage(volatile uint* nworking_stage) {
Atomic::add(1u, nworking_stage); Atomic::add(nworking_stage, 1u);
} }
inline bool ZMarkTerminate::try_exit_stage(volatile uint* nworking_stage) { inline bool ZMarkTerminate::try_exit_stage(volatile uint* nworking_stage) {

View file

@ -58,7 +58,7 @@ void ZNMethodTableIteration::nmethods_do(NMethodClosure* cl) {
// Claim table partition. Each partition is currently sized to span // Claim table partition. Each partition is currently sized to span
// two cache lines. This number is just a guess, but seems to work well. // two cache lines. This number is just a guess, but seems to work well.
const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry); const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
const size_t partition_start = MIN2(Atomic::add(partition_size, &_claimed) - partition_size, _size); const size_t partition_start = MIN2(Atomic::add(&_claimed, partition_size) - partition_size, _size);
const size_t partition_end = MIN2(partition_start + partition_size, _size); const size_t partition_end = MIN2(partition_start + partition_size, _size);
if (partition_start == partition_end) { if (partition_start == partition_end) {
// End of table // End of table

View file

@ -63,7 +63,7 @@ ZPage* ZObjectAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags
ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags); ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags);
if (page != NULL) { if (page != NULL) {
// Increment used bytes // Increment used bytes
Atomic::add(size, _used.addr()); Atomic::add(_used.addr(), size);
} }
return page; return page;
@ -71,7 +71,7 @@ ZPage* ZObjectAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags
void ZObjectAllocator::undo_alloc_page(ZPage* page) { void ZObjectAllocator::undo_alloc_page(ZPage* page) {
// Increment undone bytes // Increment undone bytes
Atomic::add(page->size(), _undone.addr()); Atomic::add(_undone.addr(), page->size());
ZHeap::heap()->undo_alloc_page(page); ZHeap::heap()->undo_alloc_page(page);
} }

View file

@ -38,7 +38,7 @@ inline bool ZRelocationSetIteratorImpl<parallel>::next(ZForwarding** forwarding)
if (parallel) { if (parallel) {
if (_next < nforwardings) { if (_next < nforwardings) {
const size_t next = Atomic::add(1u, &_next) - 1u; const size_t next = Atomic::add(&_next, 1u) - 1u;
if (next < nforwardings) { if (next < nforwardings) {
*forwarding = _relocation_set->_forwardings[next]; *forwarding = _relocation_set->_forwardings[next];
return true; return true;

View file

@ -761,8 +761,8 @@ THREAD_LOCAL uint32_t ZStatTimerDisable::_active = 0;
// //
void ZStatSample(const ZStatSampler& sampler, uint64_t value) { void ZStatSample(const ZStatSampler& sampler, uint64_t value) {
ZStatSamplerData* const cpu_data = sampler.get(); ZStatSamplerData* const cpu_data = sampler.get();
Atomic::add(1u, &cpu_data->_nsamples); Atomic::add(&cpu_data->_nsamples, 1u);
Atomic::add(value, &cpu_data->_sum); Atomic::add(&cpu_data->_sum, value);
uint64_t max = cpu_data->_max; uint64_t max = cpu_data->_max;
for (;;) { for (;;) {
@ -787,14 +787,14 @@ void ZStatSample(const ZStatSampler& sampler, uint64_t value) {
void ZStatInc(const ZStatCounter& counter, uint64_t increment) { void ZStatInc(const ZStatCounter& counter, uint64_t increment) {
ZStatCounterData* const cpu_data = counter.get(); ZStatCounterData* const cpu_data = counter.get();
const uint64_t value = Atomic::add(increment, &cpu_data->_counter); const uint64_t value = Atomic::add(&cpu_data->_counter, increment);
ZTracer::tracer()->report_stat_counter(counter, increment, value); ZTracer::tracer()->report_stat_counter(counter, increment, value);
} }
void ZStatInc(const ZStatUnsampledCounter& counter, uint64_t increment) { void ZStatInc(const ZStatUnsampledCounter& counter, uint64_t increment) {
ZStatCounterData* const cpu_data = counter.get(); ZStatCounterData* const cpu_data = counter.get();
Atomic::add(increment, &cpu_data->_counter); Atomic::add(&cpu_data->_counter, increment);
} }
// //

View file

@ -92,11 +92,11 @@ class MultiThreadedRefCounter {
MultiThreadedRefCounter() : _refs(0) {} MultiThreadedRefCounter() : _refs(0) {}
void inc() const { void inc() const {
Atomic::add(1, &_refs); Atomic::add(&_refs, 1);
} }
bool dec() const { bool dec() const {
return 0 == Atomic::add((-1), &_refs); return 0 == Atomic::add(&_refs, (-1));
} }
int current() const { int current() const {

View file

@ -30,13 +30,13 @@
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
jint LogOutputList::increase_readers() { jint LogOutputList::increase_readers() {
jint result = Atomic::add(1, &_active_readers); jint result = Atomic::add(&_active_readers, 1);
assert(_active_readers > 0, "Ensure we have consistent state"); assert(_active_readers > 0, "Ensure we have consistent state");
return result; return result;
} }
jint LogOutputList::decrease_readers() { jint LogOutputList::decrease_readers() {
jint result = Atomic::add(-1, &_active_readers); jint result = Atomic::add(&_active_readers, -1);
assert(result >= 0, "Ensure we have consistent state"); assert(result >= 0, "Ensure we have consistent state");
return result; return result;
} }

View file

@ -394,7 +394,7 @@ static void dec_stat_nonatomically(size_t* pstat, size_t words) {
} }
static void inc_stat_atomically(volatile size_t* pstat, size_t words) { static void inc_stat_atomically(volatile size_t* pstat, size_t words) {
Atomic::add(words, pstat); Atomic::add(pstat, words);
} }
static void dec_stat_atomically(volatile size_t* pstat, size_t words) { static void dec_stat_atomically(volatile size_t* pstat, size_t words) {

View file

@ -580,7 +580,7 @@ oop Universe::gen_out_of_memory_error(oop default_err) {
int next; int next;
if ((_preallocated_out_of_memory_error_avail_count > 0) && if ((_preallocated_out_of_memory_error_avail_count > 0) &&
SystemDictionary::Throwable_klass()->is_initialized()) { SystemDictionary::Throwable_klass()->is_initialized()) {
next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count); next = (int)Atomic::add(&_preallocated_out_of_memory_error_avail_count, -1);
assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt"); assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt");
} else { } else {
next = -1; next = -1;

View file

@ -710,7 +710,7 @@ jint Klass::compute_modifier_flags(TRAPS) const {
} }
int Klass::atomic_incr_biased_lock_revocation_count() { int Klass::atomic_incr_biased_lock_revocation_count() {
return (int) Atomic::add(1, &_biased_lock_revocation_count); return (int) Atomic::add(&_biased_lock_revocation_count, 1);
} }
// Unless overridden, jvmti_class_status has no flags set. // Unless overridden, jvmti_class_status has no flags set.

View file

@ -327,7 +327,7 @@ void ResolvedMethodTable::reset_dead_counter() {
} }
void ResolvedMethodTable::inc_dead_counter(size_t ndead) { void ResolvedMethodTable::inc_dead_counter(size_t ndead) {
size_t total = Atomic::add(ndead, &_uncleaned_items_count); size_t total = Atomic::add(&_uncleaned_items_count, ndead);
log_trace(membername, table)( log_trace(membername, table)(
"Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT, "Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT,
_uncleaned_items_count, ndead, total); _uncleaned_items_count, ndead, total);

View file

@ -100,8 +100,8 @@ public:
// Atomically add to a location. Returns updated value. add*() provide: // Atomically add to a location. Returns updated value. add*() provide:
// <fence> add-value-to-dest <membar StoreLoad|StoreStore> // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
template<typename I, typename D> template<typename D, typename I>
inline static D add(I add_value, D volatile* dest, inline static D add(D volatile* dest, I add_value,
atomic_memory_order order = memory_order_conservative); atomic_memory_order order = memory_order_conservative);
template<typename I, typename D> template<typename I, typename D>
@ -224,7 +224,7 @@ private:
// Dispatch handler for add. Provides type-based validity checking // Dispatch handler for add. Provides type-based validity checking
// and limited conversions around calls to the platform-specific // and limited conversions around calls to the platform-specific
// implementation layer provided by PlatformAdd. // implementation layer provided by PlatformAdd.
template<typename I, typename D, typename Enable = void> template<typename D, typename I, typename Enable = void>
struct AddImpl; struct AddImpl;
// Platform-specific implementation of add. Support for sizes of 4 // Platform-specific implementation of add. Support for sizes of 4
@ -239,7 +239,7 @@ private:
// - platform_add is an object of type PlatformAdd<sizeof(D)>. // - platform_add is an object of type PlatformAdd<sizeof(D)>.
// //
// Then // Then
// platform_add(add_value, dest) // platform_add(dest, add_value)
// must be a valid expression, returning a result convertible to D. // must be a valid expression, returning a result convertible to D.
// //
// No definition is provided; all platforms must explicitly define // No definition is provided; all platforms must explicitly define
@ -259,12 +259,12 @@ private:
// otherwise, addend is add_value. // otherwise, addend is add_value.
// //
// FetchAndAdd requires the derived class to provide // FetchAndAdd requires the derived class to provide
// fetch_and_add(addend, dest) // fetch_and_add(dest, addend)
// atomically adding addend to the value of dest, and returning the // atomically adding addend to the value of dest, and returning the
// old value. // old value.
// //
// AddAndFetch requires the derived class to provide // AddAndFetch requires the derived class to provide
// add_and_fetch(addend, dest) // add_and_fetch(dest, addend)
// atomically adding addend to the value of dest, and returning the // atomically adding addend to the value of dest, and returning the
// new value. // new value.
// //
@ -286,8 +286,8 @@ private:
// function. No scaling of add_value is performed when D is a pointer // function. No scaling of add_value is performed when D is a pointer
// type, so this function can be used to implement the support function // type, so this function can be used to implement the support function
// required by AddAndFetch. // required by AddAndFetch.
template<typename Type, typename Fn, typename I, typename D> template<typename Type, typename Fn, typename D, typename I>
static D add_using_helper(Fn fn, I add_value, D volatile* dest); static D add_using_helper(Fn fn, D volatile* dest, I add_value);
// Dispatch handler for cmpxchg. Provides type-based validity // Dispatch handler for cmpxchg. Provides type-based validity
// checking and limited conversions around calls to the // checking and limited conversions around calls to the
@ -517,21 +517,21 @@ struct Atomic::PlatformStore {
template<typename Derived> template<typename Derived>
struct Atomic::FetchAndAdd { struct Atomic::FetchAndAdd {
template<typename I, typename D> template<typename D, typename I>
D operator()(I add_value, D volatile* dest, atomic_memory_order order) const; D operator()(D volatile* dest, I add_value, atomic_memory_order order) const;
}; };
template<typename Derived> template<typename Derived>
struct Atomic::AddAndFetch { struct Atomic::AddAndFetch {
template<typename I, typename D> template<typename D, typename I>
D operator()(I add_value, D volatile* dest, atomic_memory_order order) const; D operator()(D volatile* dest, I add_value, atomic_memory_order order) const;
}; };
template<typename D> template<typename D>
inline void Atomic::inc(D volatile* dest, atomic_memory_order order) { inline void Atomic::inc(D volatile* dest, atomic_memory_order order) {
STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value); STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I; typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
Atomic::add(I(1), dest, order); Atomic::add(dest, I(1), order);
} }
template<typename D> template<typename D>
@ -540,7 +540,7 @@ inline void Atomic::dec(D volatile* dest, atomic_memory_order order) {
typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I; typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
// Assumes two's complement integer representation. // Assumes two's complement integer representation.
#pragma warning(suppress: 4146) #pragma warning(suppress: 4146)
Atomic::add(I(-1), dest, order); Atomic::add(dest, I(-1), order);
} }
template<typename I, typename D> template<typename I, typename D>
@ -557,7 +557,7 @@ inline D Atomic::sub(I sub_value, D volatile* dest, atomic_memory_order order) {
AddendType addend = sub_value; AddendType addend = sub_value;
// Assumes two's complement integer representation. // Assumes two's complement integer representation.
#pragma warning(suppress: 4146) // In case AddendType is not signed. #pragma warning(suppress: 4146) // In case AddendType is not signed.
return Atomic::add(-addend, dest, order); return Atomic::add(dest, -addend, order);
} }
// Define the class before including platform file, which may specialize // Define the class before including platform file, which may specialize
@ -678,68 +678,68 @@ inline void Atomic::release_store_fence(volatile D* p, T v) {
StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(p, v); StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(p, v);
} }
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::add(I add_value, D volatile* dest, inline D Atomic::add(D volatile* dest, I add_value,
atomic_memory_order order) { atomic_memory_order order) {
return AddImpl<I, D>()(add_value, dest, order); return AddImpl<D, I>()(dest, add_value, order);
} }
template<typename I, typename D> template<typename D, typename I>
struct Atomic::AddImpl< struct Atomic::AddImpl<
I, D, D, I,
typename EnableIf<IsIntegral<I>::value && typename EnableIf<IsIntegral<I>::value &&
IsIntegral<D>::value && IsIntegral<D>::value &&
(sizeof(I) <= sizeof(D)) && (sizeof(I) <= sizeof(D)) &&
(IsSigned<I>::value == IsSigned<D>::value)>::type> (IsSigned<I>::value == IsSigned<D>::value)>::type>
{ {
D operator()(I add_value, D volatile* dest, atomic_memory_order order) const { D operator()(D volatile* dest, I add_value, atomic_memory_order order) const {
D addend = add_value; D addend = add_value;
return PlatformAdd<sizeof(D)>()(addend, dest, order); return PlatformAdd<sizeof(D)>()(dest, addend, order);
} }
}; };
template<typename I, typename P> template<typename P, typename I>
struct Atomic::AddImpl< struct Atomic::AddImpl<
I, P*, P*, I,
typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type> typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
{ {
P* operator()(I add_value, P* volatile* dest, atomic_memory_order order) const { P* operator()(P* volatile* dest, I add_value, atomic_memory_order order) const {
STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*)); STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*)); STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
typedef typename Conditional<IsSigned<I>::value, typedef typename Conditional<IsSigned<I>::value,
intptr_t, intptr_t,
uintptr_t>::type CI; uintptr_t>::type CI;
CI addend = add_value; CI addend = add_value;
return PlatformAdd<sizeof(P*)>()(addend, dest, order); return PlatformAdd<sizeof(P*)>()(dest, addend, order);
} }
}; };
template<typename Derived> template<typename Derived>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest, inline D Atomic::FetchAndAdd<Derived>::operator()(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
I addend = add_value; I addend = add_value;
// If D is a pointer type P*, scale by sizeof(P). // If D is a pointer type P*, scale by sizeof(P).
if (IsPointer<D>::value) { if (IsPointer<D>::value) {
addend *= sizeof(typename RemovePointer<D>::type); addend *= sizeof(typename RemovePointer<D>::type);
} }
D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest, order); D old = static_cast<const Derived*>(this)->fetch_and_add(dest, addend, order);
return old + add_value; return old + add_value;
} }
template<typename Derived> template<typename Derived>
template<typename I, typename D> template<typename D, typename I>
inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest, inline D Atomic::AddAndFetch<Derived>::operator()(D volatile* dest, I add_value,
atomic_memory_order order) const { atomic_memory_order order) const {
// If D is a pointer type P*, scale by sizeof(P). // If D is a pointer type P*, scale by sizeof(P).
if (IsPointer<D>::value) { if (IsPointer<D>::value) {
add_value *= sizeof(typename RemovePointer<D>::type); add_value *= sizeof(typename RemovePointer<D>::type);
} }
return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest, order); return static_cast<const Derived*>(this)->add_and_fetch(dest, add_value, order);
} }
template<typename Type, typename Fn, typename I, typename D> template<typename Type, typename Fn, typename D, typename I>
inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) { inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) {
return PrimitiveConversions::cast<D>( return PrimitiveConversions::cast<D>(
fn(PrimitiveConversions::cast<Type>(add_value), fn(PrimitiveConversions::cast<Type>(add_value),
reinterpret_cast<Type volatile*>(dest))); reinterpret_cast<Type volatile*>(dest)));

View file

@ -668,7 +668,7 @@ static bool has_reached_max_malloc_test_peak(size_t alloc_size) {
if ((cur_malloc_words + words) > MallocMaxTestWords) { if ((cur_malloc_words + words) > MallocMaxTestWords) {
return true; return true;
} }
Atomic::add(words, &cur_malloc_words); Atomic::add(&cur_malloc_words, words);
} }
return false; return false;
} }

View file

@ -134,7 +134,7 @@ uint ThreadsSMRSupport::_to_delete_list_max = 0;
// 'inline' functions first so the definitions are before first use: // 'inline' functions first so the definitions are before first use:
inline void ThreadsSMRSupport::add_deleted_thread_times(uint add_value) { inline void ThreadsSMRSupport::add_deleted_thread_times(uint add_value) {
Atomic::add(add_value, &_deleted_thread_times); Atomic::add(&_deleted_thread_times, add_value);
} }
inline void ThreadsSMRSupport::inc_deleted_thread_cnt() { inline void ThreadsSMRSupport::inc_deleted_thread_cnt() {

View file

@ -56,7 +56,7 @@ inline void ThreadsList::threads_do(T *cl) const {
// they are called by public inline update_tlh_stats() below: // they are called by public inline update_tlh_stats() below:
inline void ThreadsSMRSupport::add_tlh_times(uint add_value) { inline void ThreadsSMRSupport::add_tlh_times(uint add_value) {
Atomic::add(add_value, &_tlh_times); Atomic::add(&_tlh_times, add_value);
} }
inline void ThreadsSMRSupport::inc_tlh_cnt() { inline void ThreadsSMRSupport::inc_tlh_cnt() {

View file

@ -153,7 +153,7 @@ class MallocSiteTable : AllStatic {
// Acquire shared lock. // Acquire shared lock.
// Return true if shared access is granted. // Return true if shared access is granted.
inline bool sharedLock() { inline bool sharedLock() {
jint res = Atomic::add(1, _lock); jint res = Atomic::add(_lock, 1);
if (res < 0) { if (res < 0) {
Atomic::dec(_lock); Atomic::dec(_lock);
return false; return false;

View file

@ -55,7 +55,7 @@ class MemoryCounter {
inline void allocate(size_t sz) { inline void allocate(size_t sz) {
Atomic::inc(&_count); Atomic::inc(&_count);
if (sz > 0) { if (sz > 0) {
Atomic::add(sz, &_size); Atomic::add(&_size, sz);
DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size)); DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size));
} }
DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);) DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);)
@ -72,7 +72,7 @@ class MemoryCounter {
inline void resize(long sz) { inline void resize(long sz) {
if (sz != 0) { if (sz != 0) {
Atomic::add(size_t(sz), &_size); Atomic::add(&_size, size_t(sz));
DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);) DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
} }
} }

View file

@ -53,7 +53,7 @@ class ConcurrentHashTable<CONFIG, F>::BucketsOperation {
// Returns true if you succeeded to claim the range start -> (stop-1). // Returns true if you succeeded to claim the range start -> (stop-1).
bool claim(size_t* start, size_t* stop) { bool claim(size_t* start, size_t* stop) {
size_t claimed = Atomic::add((size_t)1, &_next_to_claim) - 1; size_t claimed = Atomic::add(&_next_to_claim, (size_t)1) - 1;
if (claimed >= _stop_task) { if (claimed >= _stop_task) {
return false; return false;
} }

View file

@ -59,7 +59,7 @@ class GlobalCounter::CounterThreadCheck : public ThreadClosure {
void GlobalCounter::write_synchronize() { void GlobalCounter::write_synchronize() {
assert((*Thread::current()->get_rcu_counter() & COUNTER_ACTIVE) == 0x0, "must be outside a critcal section"); assert((*Thread::current()->get_rcu_counter() & COUNTER_ACTIVE) == 0x0, "must be outside a critcal section");
// Atomic::add must provide fence since we have storeload dependency. // Atomic::add must provide fence since we have storeload dependency.
uintx gbl_cnt = Atomic::add(COUNTER_INCREMENT, &_global_counter._counter); uintx gbl_cnt = Atomic::add(&_global_counter._counter, COUNTER_INCREMENT);
// Do all RCU threads. // Do all RCU threads.
CounterThreadCheck ctc(gbl_cnt); CounterThreadCheck ctc(gbl_cnt);

View file

@ -44,7 +44,7 @@ SingleWriterSynchronizer::SingleWriterSynchronizer() :
// synchronization have exited that critical section. // synchronization have exited that critical section.
void SingleWriterSynchronizer::synchronize() { void SingleWriterSynchronizer::synchronize() {
// Side-effect in assert balanced by debug-only dec at end. // Side-effect in assert balanced by debug-only dec at end.
assert(Atomic::add(1u, &_writers) == 1u, "multiple writers"); assert(Atomic::add(&_writers, 1u) == 1u, "multiple writers");
// We don't know anything about the muxing between this invocation // We don't know anything about the muxing between this invocation
// and invocations in other threads. We must start with the latest // and invocations in other threads. We must start with the latest
// _enter polarity, else we could clobber the wrong _exit value on // _enter polarity, else we could clobber the wrong _exit value on

View file

@ -89,11 +89,11 @@ public:
}; };
inline uint SingleWriterSynchronizer::enter() { inline uint SingleWriterSynchronizer::enter() {
return Atomic::add(2u, &_enter); return Atomic::add(&_enter, 2u);
} }
inline void SingleWriterSynchronizer::exit(uint enter_value) { inline void SingleWriterSynchronizer::exit(uint enter_value) {
uint exit_value = Atomic::add(2u, &_exit[enter_value & 1]); uint exit_value = Atomic::add(&_exit[enter_value & 1], 2u);
// If this exit completes a synchronize request, wakeup possibly // If this exit completes a synchronize request, wakeup possibly
// waiting synchronizer. Read of _waiting_for must follow the _exit // waiting synchronizer. Read of _waiting_for must follow the _exit
// update. // update.

View file

@ -82,13 +82,13 @@ void GenericWaitBarrier::wait(int barrier_tag) {
OrderAccess::fence(); OrderAccess::fence();
return; return;
} }
Atomic::add(1, &_barrier_threads); Atomic::add(&_barrier_threads, 1);
if (barrier_tag != 0 && barrier_tag == _barrier_tag) { if (barrier_tag != 0 && barrier_tag == _barrier_tag) {
Atomic::add(1, &_waiters); Atomic::add(&_waiters, 1);
_sem_barrier.wait(); _sem_barrier.wait();
// We help out with posting, but we need to do so before we decrement the // We help out with posting, but we need to do so before we decrement the
// _barrier_threads otherwise we might wake threads up in next wait. // _barrier_threads otherwise we might wake threads up in next wait.
GenericWaitBarrier::wake_if_needed(); GenericWaitBarrier::wake_if_needed();
} }
Atomic::add(-1, &_barrier_threads); Atomic::add(&_barrier_threads, -1);
} }

View file

@ -114,7 +114,7 @@ public:
ThreadBlockInVM tbiv(this); // Safepoint check. ThreadBlockInVM tbiv(this); // Safepoint check.
} }
tty->print_cr("%u allocations: " SIZE_FORMAT, _thread_number, _allocations); tty->print_cr("%u allocations: " SIZE_FORMAT, _thread_number, _allocations);
Atomic::add(_allocations, _total_allocations); Atomic::add(_total_allocations, _allocations);
} }
}; };

View file

@ -157,7 +157,7 @@ public:
ThreadBlockInVM tbiv(this); // Safepoint check. ThreadBlockInVM tbiv(this); // Safepoint check.
} }
tty->print_cr("allocations: " SIZE_FORMAT, _allocations); tty->print_cr("allocations: " SIZE_FORMAT, _allocations);
Atomic::add(_allocations, _total_allocations); Atomic::add(_total_allocations, _allocations);
} }
}; };