8186903: Remove j-types from Atomic

Make jlong into int64_t, atomic_FN_long into atomic_FN_int64, make jbyte to u_char.

Reviewed-by: dholmes, dcubed
This commit is contained in:
Coleen Phillimore 2017-12-19 06:29:17 -05:00
parent f01d0f469c
commit 1d0acb189a
22 changed files with 213 additions and 216 deletions

View file

@ -433,7 +433,7 @@ class StubGenerator: public StubCodeGenerator {
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
// Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest) // Support for int32_t Atomic::xchg(int32_t exchange_value, volatile int32_t* dest)
// //
// xchg exists as far back as 8086, lock needed for MP only // xchg exists as far back as 8086, lock needed for MP only
// Stack layout immediately after call: // Stack layout immediately after call:

View file

@ -611,8 +611,8 @@ class StubGenerator: public StubCodeGenerator {
return start; return start;
} }
// Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest, // Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest,
// jbyte compare_value) // int8_t compare_value)
// //
// Arguments : // Arguments :
// c_rarg0: exchange_value // c_rarg0: exchange_value
@ -637,9 +637,9 @@ class StubGenerator: public StubCodeGenerator {
return start; return start;
} }
// Support for jlong atomic::atomic_cmpxchg(jlong exchange_value, // Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value,
// volatile jlong* dest, // volatile int64_t* dest,
// jlong compare_value) // int64_t compare_value)
// Arguments : // Arguments :
// c_rarg0: exchange_value // c_rarg0: exchange_value
// c_rarg1: dest // c_rarg1: dest
@ -694,8 +694,8 @@ class StubGenerator: public StubCodeGenerator {
// Result: // Result:
// *dest += add_value // *dest += add_value
// return *dest; // return *dest;
address generate_atomic_add_ptr() { address generate_atomic_add_long() {
StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr"); StubCodeMark mark(this, "StubRoutines", "atomic_add_long");
address start = __ pc(); address start = __ pc();
__ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
@ -5015,14 +5015,14 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_catch_exception_entry = generate_catch_exception(); StubRoutines::_catch_exception_entry = generate_catch_exception();
// atomic calls // atomic calls
StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long(); StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long();
StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte();
StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
StubRoutines::_atomic_add_entry = generate_atomic_add(); StubRoutines::_atomic_add_entry = generate_atomic_add();
StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr(); StubRoutines::_atomic_add_long_entry = generate_atomic_add_long();
StubRoutines::_fence_entry = generate_orderaccess_fence(); StubRoutines::_fence_entry = generate_orderaccess_fence();
// platform dependent // platform dependent
StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();

View file

@ -258,7 +258,7 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_cmpxchg_long_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_cmpxchg_long_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_add_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_add_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_add_ptr_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_add_long_entry = ShouldNotCallThisStub();
StubRoutines::_fence_entry = ShouldNotCallThisStub(); StubRoutines::_fence_entry = ShouldNotCallThisStub();
} }

View file

@ -132,8 +132,8 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
extern "C" { extern "C" {
// defined in bsd_x86.s // defined in bsd_x86.s
jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool); int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t, bool);
void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst); void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
} }
template<> template<>
@ -143,15 +143,15 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
T compare_value, T compare_value,
cmpxchg_memory_order order) const { cmpxchg_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T)); STATIC_ASSERT(8 == sizeof(T));
return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value); return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
} }
template<> template<>
template<typename T> template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T)); STATIC_ASSERT(8 == sizeof(T));
volatile jlong dest; volatile int64_t dest;
_Atomic_move_long(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest)); _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
return PrimitiveConversions::cast<T>(dest); return PrimitiveConversions::cast<T>(dest);
} }
@ -160,7 +160,7 @@ template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T store_value, inline void Atomic::PlatformStore<8>::operator()(T store_value,
T volatile* dest) const { T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T)); STATIC_ASSERT(8 == sizeof(T));
_Atomic_move_long(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest)); _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
} }
#endif // AMD64 #endif // AMD64

View file

@ -633,10 +633,10 @@ mmx_acs_CopyLeft:
ret ret
# Support for jlong Atomic::cmpxchg(jlong exchange_value, # Support for int64_t Atomic::cmpxchg(int64_t exchange_value,
# volatile jlong* dest, # volatile int64_t* dest,
# jlong compare_value, # int64_t compare_value,
# bool is_MP) # bool is_MP)
# #
.p2align 4,,15 .p2align 4,,15
ELF_TYPE(_Atomic_cmpxchg_long,@function) ELF_TYPE(_Atomic_cmpxchg_long,@function)
@ -658,8 +658,8 @@ SYMBOL(_Atomic_cmpxchg_long):
ret ret
# Support for jlong Atomic::load and Atomic::store. # Support for int64_t Atomic::load and Atomic::store.
# void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst) # void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst)
.p2align 4,,15 .p2align 4,,15
ELF_TYPE(_Atomic_move_long,@function) ELF_TYPE(_Atomic_move_long,@function)
SYMBOL(_Atomic_move_long): SYMBOL(_Atomic_move_long):

View file

@ -265,8 +265,8 @@ template<>
template<typename T> template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T)); STATIC_ASSERT(8 == sizeof(T));
volatile jlong dest; volatile int64_t dest;
os::atomic_copy64(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest)); os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
return PrimitiveConversions::cast<T>(dest); return PrimitiveConversions::cast<T>(dest);
} }
@ -275,7 +275,7 @@ template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T store_value, inline void Atomic::PlatformStore<8>::operator()(T store_value,
T volatile* dest) const { T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T)); STATIC_ASSERT(8 == sizeof(T));
os::atomic_copy64(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest)); os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
} }
#endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP #endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP

View file

@ -50,7 +50,7 @@ template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T)); STATIC_ASSERT(8 == sizeof(T));
return PrimitiveConversions::cast<T>( return PrimitiveConversions::cast<T>(
(*os::atomic_load_long_func)(reinterpret_cast<const volatile jlong*>(src))); (*os::atomic_load_long_func)(reinterpret_cast<const volatile int64_t*>(src)));
} }
template<> template<>
@ -59,7 +59,7 @@ inline void Atomic::PlatformStore<8>::operator()(T store_value,
T volatile* dest) const { T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T)); STATIC_ASSERT(8 == sizeof(T));
(*os::atomic_store_long_func)( (*os::atomic_store_long_func)(
PrimitiveConversions::cast<jlong>(store_value), reinterpret_cast<volatile jlong*>(dest)); PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest));
} }
#endif #endif
@ -103,7 +103,7 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
: "memory"); : "memory");
return val; return val;
#else #else
return add_using_helper<jint>(os::atomic_add_func, add_value, dest); return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
#endif #endif
} }
@ -146,7 +146,7 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
: "memory"); : "memory");
return old_val; return old_val;
#else #else
return xchg_using_helper<jint>(os::atomic_xchg_func, exchange_value, dest); return xchg_using_helper<int32_t>(os::atomic_xchg_func, exchange_value, dest);
#endif #endif
} }
@ -178,17 +178,17 @@ struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
#ifndef AARCH64 #ifndef AARCH64
inline jint reorder_cmpxchg_func(jint exchange_value, inline int32_t reorder_cmpxchg_func(int32_t exchange_value,
jint volatile* dest, int32_t volatile* dest,
jint compare_value) { int32_t compare_value) {
// Warning: Arguments are swapped to avoid moving them for kernel call // Warning: Arguments are swapped to avoid moving them for kernel call
return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest); return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
} }
inline jlong reorder_cmpxchg_long_func(jlong exchange_value, inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value,
jlong volatile* dest, int64_t volatile* dest,
jlong compare_value) { int64_t compare_value) {
assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!"); assert(VM_Version::supports_cx8(), "Atomic compare and exchange int64_t not supported on this architecture!");
// Warning: Arguments are swapped to avoid moving them for kernel call // Warning: Arguments are swapped to avoid moving them for kernel call
return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest); return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
} }
@ -221,7 +221,7 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
: "memory"); : "memory");
return rv; return rv;
#else #else
return cmpxchg_using_helper<jint>(reorder_cmpxchg_func, exchange_value, dest, compare_value); return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
#endif #endif
} }
@ -251,7 +251,7 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
: "memory"); : "memory");
return rv; return rv;
#else #else
return cmpxchg_using_helper<jlong>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value); return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);
#endif #endif
} }

View file

@ -598,11 +598,11 @@ void os::print_register_info(outputStream *st, const void *context) {
#ifndef AARCH64 #ifndef AARCH64
typedef jlong cmpxchg_long_func_t(jlong, jlong, volatile jlong*); typedef int64_t cmpxchg_long_func_t(int64_t, int64_t, volatile int64_t*);
cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap; cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
jlong os::atomic_cmpxchg_long_bootstrap(jlong compare_value, jlong exchange_value, volatile jlong* dest) { int64_t os::atomic_cmpxchg_long_bootstrap(int64_t compare_value, int64_t exchange_value, volatile int64_t* dest) {
// try to use the stub: // try to use the stub:
cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry()); cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
@ -612,16 +612,16 @@ jlong os::atomic_cmpxchg_long_bootstrap(jlong compare_value, jlong exchange_valu
} }
assert(Threads::number_of_threads() == 0, "for bootstrap only"); assert(Threads::number_of_threads() == 0, "for bootstrap only");
jlong old_value = *dest; int64_t old_value = *dest;
if (old_value == compare_value) if (old_value == compare_value)
*dest = exchange_value; *dest = exchange_value;
return old_value; return old_value;
} }
typedef jlong load_long_func_t(const volatile jlong*); typedef int64_t load_long_func_t(const volatile int64_t*);
load_long_func_t* os::atomic_load_long_func = os::atomic_load_long_bootstrap; load_long_func_t* os::atomic_load_long_func = os::atomic_load_long_bootstrap;
jlong os::atomic_load_long_bootstrap(const volatile jlong* src) { int64_t os::atomic_load_long_bootstrap(const volatile int64_t* src) {
// try to use the stub: // try to use the stub:
load_long_func_t* func = CAST_TO_FN_PTR(load_long_func_t*, StubRoutines::atomic_load_long_entry()); load_long_func_t* func = CAST_TO_FN_PTR(load_long_func_t*, StubRoutines::atomic_load_long_entry());
@ -631,15 +631,15 @@ jlong os::atomic_load_long_bootstrap(const volatile jlong* src) {
} }
assert(Threads::number_of_threads() == 0, "for bootstrap only"); assert(Threads::number_of_threads() == 0, "for bootstrap only");
jlong old_value = *src; int64_t old_value = *src;
return old_value; return old_value;
} }
typedef void store_long_func_t(jlong, volatile jlong*); typedef void store_long_func_t(int64_t, volatile int64_t*);
store_long_func_t* os::atomic_store_long_func = os::atomic_store_long_bootstrap; store_long_func_t* os::atomic_store_long_func = os::atomic_store_long_bootstrap;
void os::atomic_store_long_bootstrap(jlong val, volatile jlong* dest) { void os::atomic_store_long_bootstrap(int64_t val, volatile int64_t* dest) {
// try to use the stub: // try to use the stub:
store_long_func_t* func = CAST_TO_FN_PTR(store_long_func_t*, StubRoutines::atomic_store_long_entry()); store_long_func_t* func = CAST_TO_FN_PTR(store_long_func_t*, StubRoutines::atomic_store_long_entry());
@ -652,11 +652,11 @@ void os::atomic_store_long_bootstrap(jlong val, volatile jlong* dest) {
*dest = val; *dest = val;
} }
typedef jint atomic_add_func_t(jint add_value, volatile jint *dest); typedef int32_t atomic_add_func_t(int32_t add_value, volatile int32_t *dest);
atomic_add_func_t * os::atomic_add_func = os::atomic_add_bootstrap; atomic_add_func_t * os::atomic_add_func = os::atomic_add_bootstrap;
jint os::atomic_add_bootstrap(jint add_value, volatile jint *dest) { int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t *dest) {
atomic_add_func_t * func = CAST_TO_FN_PTR(atomic_add_func_t*, atomic_add_func_t * func = CAST_TO_FN_PTR(atomic_add_func_t*,
StubRoutines::atomic_add_entry()); StubRoutines::atomic_add_entry());
if (func != NULL) { if (func != NULL) {
@ -664,16 +664,16 @@ jint os::atomic_add_bootstrap(jint add_value, volatile jint *dest) {
return (*func)(add_value, dest); return (*func)(add_value, dest);
} }
jint old_value = *dest; int32_t old_value = *dest;
*dest = old_value + add_value; *dest = old_value + add_value;
return (old_value + add_value); return (old_value + add_value);
} }
typedef jint atomic_xchg_func_t(jint exchange_value, volatile jint *dest); typedef int32_t atomic_xchg_func_t(int32_t exchange_value, volatile int32_t *dest);
atomic_xchg_func_t * os::atomic_xchg_func = os::atomic_xchg_bootstrap; atomic_xchg_func_t * os::atomic_xchg_func = os::atomic_xchg_bootstrap;
jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint *dest) { int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t *dest) {
atomic_xchg_func_t * func = CAST_TO_FN_PTR(atomic_xchg_func_t*, atomic_xchg_func_t * func = CAST_TO_FN_PTR(atomic_xchg_func_t*,
StubRoutines::atomic_xchg_entry()); StubRoutines::atomic_xchg_entry());
if (func != NULL) { if (func != NULL) {
@ -681,16 +681,16 @@ jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint *dest) {
return (*func)(exchange_value, dest); return (*func)(exchange_value, dest);
} }
jint old_value = *dest; int32_t old_value = *dest;
*dest = exchange_value; *dest = exchange_value;
return (old_value); return (old_value);
} }
typedef jint cmpxchg_func_t(jint, jint, volatile jint*); typedef int32_t cmpxchg_func_t(int32_t, int32_t, volatile int32_t*);
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap; cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
jint os::atomic_cmpxchg_bootstrap(jint compare_value, jint exchange_value, volatile jint* dest) { int32_t os::atomic_cmpxchg_bootstrap(int32_t compare_value, int32_t exchange_value, volatile int32_t* dest) {
// try to use the stub: // try to use the stub:
cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry()); cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
@ -700,7 +700,7 @@ jint os::atomic_cmpxchg_bootstrap(jint compare_value, jint exchange_value, volat
} }
assert(Threads::number_of_threads() == 0, "for bootstrap only"); assert(Threads::number_of_threads() == 0, "for bootstrap only");
jint old_value = *dest; int32_t old_value = *dest;
if (old_value == compare_value) if (old_value == compare_value)
*dest = exchange_value; *dest = exchange_value;
return old_value; return old_value;

View file

@ -45,35 +45,35 @@
static bool register_code_area(char *low, char *high) { return true; } static bool register_code_area(char *low, char *high) { return true; }
#ifndef AARCH64 #ifndef AARCH64
static jlong (*atomic_cmpxchg_long_func)(jlong compare_value, static int64_t (*atomic_cmpxchg_long_func)(int64_t compare_value,
jlong exchange_value, int64_t exchange_value,
volatile jlong *dest); volatile int64_t *dest);
static jlong (*atomic_load_long_func)(const volatile jlong*); static int64_t (*atomic_load_long_func)(const volatile int64_t*);
static void (*atomic_store_long_func)(jlong, volatile jlong*); static void (*atomic_store_long_func)(int64_t, volatile int64_t*);
static jint (*atomic_add_func)(jint add_value, volatile jint *dest); static int32_t (*atomic_add_func)(int32_t add_value, volatile int32_t *dest);
static jint (*atomic_xchg_func)(jint exchange_value, volatile jint *dest); static int32_t (*atomic_xchg_func)(int32_t exchange_value, volatile int32_t *dest);
static jint (*atomic_cmpxchg_func)(jint compare_value, static int32_t (*atomic_cmpxchg_func)(int32_t compare_value,
jint exchange_value, int32_t exchange_value,
volatile jint *dest); volatile int32_t *dest);
static jlong atomic_cmpxchg_long_bootstrap(jlong, jlong, volatile jlong*); static int64_t atomic_cmpxchg_long_bootstrap(int64_t, int64_t, volatile int64_t*);
static jlong atomic_load_long_bootstrap(const volatile jlong*); static int64_t atomic_load_long_bootstrap(const volatile int64_t*);
static void atomic_store_long_bootstrap(jlong, volatile jlong*); static void atomic_store_long_bootstrap(int64_t, volatile int64_t*);
static jint atomic_add_bootstrap(jint add_value, volatile jint *dest); static int32_t atomic_add_bootstrap(int32_t add_value, volatile int32_t *dest);
static jint atomic_xchg_bootstrap(jint exchange_value, volatile jint *dest); static int32_t atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t *dest);
static jint atomic_cmpxchg_bootstrap(jint compare_value, static int32_t atomic_cmpxchg_bootstrap(int32_t compare_value,
jint exchange_value, int32_t exchange_value,
volatile jint *dest); volatile int32_t *dest);
#endif // !AARCH64 #endif // !AARCH64
#endif // OS_CPU_LINUX_ARM_VM_OS_LINUX_ARM_HPP #endif // OS_CPU_LINUX_ARM_VM_OS_LINUX_ARM_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -28,15 +28,15 @@
// //
// NOTE: we are back in class os here, not Linux // NOTE: we are back in class os here, not Linux
// //
static jint (*atomic_xchg_func) (jint, volatile jint*); static int32_t (*atomic_xchg_func) (int32_t, volatile int32_t*);
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint); static int32_t (*atomic_cmpxchg_func) (int32_t, volatile int32_t*, int32_t);
static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong); static int64_t (*atomic_cmpxchg_long_func)(int64_t, volatile int64_t*, int64_t);
static jint (*atomic_add_func) (jint, volatile jint*); static int32_t (*atomic_add_func) (int32_t, volatile int32_t*);
static jint atomic_xchg_bootstrap (jint, volatile jint*); static int32_t atomic_xchg_bootstrap (int32_t, volatile int32_t*);
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint); static int32_t atomic_cmpxchg_bootstrap (int32_t, volatile int32_t*, int32_t);
static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong); static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t);
static jint atomic_add_bootstrap (jint, volatile jint*); static int32_t atomic_add_bootstrap (int32_t, volatile int32_t*);
static void setup_fpu() {} static void setup_fpu() {}

View file

@ -133,8 +133,8 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
extern "C" { extern "C" {
// defined in linux_x86.s // defined in linux_x86.s
jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong); int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t);
void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst); void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
} }
template<> template<>
@ -144,15 +144,15 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
T compare_value, T compare_value,
cmpxchg_memory_order order) const { cmpxchg_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T)); STATIC_ASSERT(8 == sizeof(T));
return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value); return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
} }
template<> template<>
template<typename T> template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T)); STATIC_ASSERT(8 == sizeof(T));
volatile jlong dest; volatile int64_t dest;
_Atomic_move_long(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest)); _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
return PrimitiveConversions::cast<T>(dest); return PrimitiveConversions::cast<T>(dest);
} }
@ -161,7 +161,7 @@ template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T store_value, inline void Atomic::PlatformStore<8>::operator()(T store_value,
T volatile* dest) const { T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T)); STATIC_ASSERT(8 == sizeof(T));
_Atomic_move_long(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest)); _Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
} }
#endif // AMD64 #endif // AMD64

View file

@ -185,8 +185,8 @@ template<>
template<typename T> template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T)); STATIC_ASSERT(8 == sizeof(T));
volatile jlong dest; volatile int64_t dest;
os::atomic_copy64(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest)); os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
return PrimitiveConversions::cast<T>(dest); return PrimitiveConversions::cast<T>(dest);
} }
@ -195,7 +195,7 @@ template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T store_value, inline void Atomic::PlatformStore<8>::operator()(T store_value,
T volatile* dest) const { T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T)); STATIC_ASSERT(8 == sizeof(T));
os::atomic_copy64(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest)); os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
} }
#endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -28,15 +28,15 @@
// //
// NOTE: we are back in class os here, not Solaris // NOTE: we are back in class os here, not Solaris
// //
static jint (*atomic_xchg_func) (jint, volatile jint*); static int32_t (*atomic_xchg_func) (int32_t, volatile int32_t*);
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint); static int32_t (*atomic_cmpxchg_func) (int32_t, volatile int32_t*, int32_t);
static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong); static int64_t (*atomic_cmpxchg_long_func)(int64_t, volatile int64_t*, int64_t);
static jint (*atomic_add_func) (jint, volatile jint*); static int32_t (*atomic_add_func) (int32_t, volatile int32_t*);
static jint atomic_xchg_bootstrap (jint, volatile jint*); static int32_t atomic_xchg_bootstrap (int32_t, volatile int32_t*);
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint); static int32_t atomic_cmpxchg_bootstrap (int32_t, volatile int32_t*, int32_t);
static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong); static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t);
static jint atomic_add_bootstrap (jint, volatile jint*); static int32_t atomic_add_bootstrap (int32_t, volatile int32_t*);
static void setup_fpu() {} static void setup_fpu() {}

View file

@ -28,16 +28,16 @@
// For Sun Studio - implementation is in solaris_x86_64.il. // For Sun Studio - implementation is in solaris_x86_64.il.
extern "C" { extern "C" {
jint _Atomic_add(jint add_value, volatile jint* dest); int32_t _Atomic_add(int32_t add_value, volatile int32_t* dest);
jlong _Atomic_add_long(jlong add_value, volatile jlong* dest); int64_t _Atomic_add_long(int64_t add_value, volatile int64_t* dest);
jint _Atomic_xchg(jint exchange_value, volatile jint* dest); int32_t _Atomic_xchg(int32_t exchange_value, volatile int32_t* dest);
jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, int8_t _Atomic_cmpxchg_byte(int8_t exchange_value, volatile int8_t* dest,
jbyte compare_value); int8_t compare_value);
jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest, int32_t _Atomic_cmpxchg(int32_t exchange_value, volatile int32_t* dest,
jint compare_value); int32_t compare_value);
jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest, int64_t _Atomic_cmpxchg_long(int64_t exchange_value, volatile int64_t* dest,
jlong compare_value); int64_t compare_value);
} }
template<size_t byte_size> template<size_t byte_size>
@ -55,8 +55,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D)); STATIC_ASSERT(4 == sizeof(D));
return PrimitiveConversions::cast<D>( return PrimitiveConversions::cast<D>(
_Atomic_add(PrimitiveConversions::cast<jint>(add_value), _Atomic_add(PrimitiveConversions::cast<int32_t>(add_value),
reinterpret_cast<jint volatile*>(dest))); reinterpret_cast<int32_t volatile*>(dest)));
} }
// Not using add_using_helper; see comment for cmpxchg. // Not using add_using_helper; see comment for cmpxchg.
@ -66,8 +66,8 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D)); STATIC_ASSERT(8 == sizeof(D));
return PrimitiveConversions::cast<D>( return PrimitiveConversions::cast<D>(
_Atomic_add_long(PrimitiveConversions::cast<jlong>(add_value), _Atomic_add_long(PrimitiveConversions::cast<int64_t>(add_value),
reinterpret_cast<jlong volatile*>(dest))); reinterpret_cast<int64_t volatile*>(dest)));
} }
template<> template<>
@ -76,11 +76,11 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
T volatile* dest) const { T volatile* dest) const {
STATIC_ASSERT(4 == sizeof(T)); STATIC_ASSERT(4 == sizeof(T));
return PrimitiveConversions::cast<T>( return PrimitiveConversions::cast<T>(
_Atomic_xchg(PrimitiveConversions::cast<jint>(exchange_value), _Atomic_xchg(PrimitiveConversions::cast<int32_t>(exchange_value),
reinterpret_cast<jint volatile*>(dest))); reinterpret_cast<int32_t volatile*>(dest)));
} }
extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest); extern "C" int64_t _Atomic_xchg_long(int64_t exchange_value, volatile int64_t* dest);
template<> template<>
template<typename T> template<typename T>
@ -88,8 +88,8 @@ inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
T volatile* dest) const { T volatile* dest) const {
STATIC_ASSERT(8 == sizeof(T)); STATIC_ASSERT(8 == sizeof(T));
return PrimitiveConversions::cast<T>( return PrimitiveConversions::cast<T>(
_Atomic_xchg_long(PrimitiveConversions::cast<jlong>(exchange_value), _Atomic_xchg_long(PrimitiveConversions::cast<int64_t>(exchange_value),
reinterpret_cast<jlong volatile*>(dest))); reinterpret_cast<int64_t volatile*>(dest)));
} }
// Not using cmpxchg_using_helper here, because some configurations of // Not using cmpxchg_using_helper here, because some configurations of
@ -106,9 +106,9 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
cmpxchg_memory_order order) const { cmpxchg_memory_order order) const {
STATIC_ASSERT(1 == sizeof(T)); STATIC_ASSERT(1 == sizeof(T));
return PrimitiveConversions::cast<T>( return PrimitiveConversions::cast<T>(
_Atomic_cmpxchg_byte(PrimitiveConversions::cast<jbyte>(exchange_value), _Atomic_cmpxchg_byte(PrimitiveConversions::cast<int8_t>(exchange_value),
reinterpret_cast<jbyte volatile*>(dest), reinterpret_cast<int8_t volatile*>(dest),
PrimitiveConversions::cast<jbyte>(compare_value))); PrimitiveConversions::cast<int8_t>(compare_value)));
} }
template<> template<>
@ -119,9 +119,9 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
cmpxchg_memory_order order) const { cmpxchg_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T)); STATIC_ASSERT(4 == sizeof(T));
return PrimitiveConversions::cast<T>( return PrimitiveConversions::cast<T>(
_Atomic_cmpxchg(PrimitiveConversions::cast<jint>(exchange_value), _Atomic_cmpxchg(PrimitiveConversions::cast<int32_t>(exchange_value),
reinterpret_cast<jint volatile*>(dest), reinterpret_cast<int32_t volatile*>(dest),
PrimitiveConversions::cast<jint>(compare_value))); PrimitiveConversions::cast<int32_t>(compare_value)));
} }
template<> template<>
@ -132,9 +132,9 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
cmpxchg_memory_order order) const { cmpxchg_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T)); STATIC_ASSERT(8 == sizeof(T));
return PrimitiveConversions::cast<T>( return PrimitiveConversions::cast<T>(
_Atomic_cmpxchg_long(PrimitiveConversions::cast<jlong>(exchange_value), _Atomic_cmpxchg_long(PrimitiveConversions::cast<int64_t>(exchange_value),
reinterpret_cast<jlong volatile*>(dest), reinterpret_cast<int64_t volatile*>(dest),
PrimitiveConversions::cast<jlong>(compare_value))); PrimitiveConversions::cast<int64_t>(compare_value)));
} }
#endif // OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP #endif // OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP

View file

@ -904,12 +904,12 @@ void os::Solaris::init_thread_fpu_state(void) {
// until initialization is complete. // until initialization is complete.
// TODO - replace with .il implementation when compiler supports it. // TODO - replace with .il implementation when compiler supports it.
typedef jint xchg_func_t (jint, volatile jint*); typedef int32_t xchg_func_t (int32_t, volatile int32_t*);
typedef jint cmpxchg_func_t (jint, volatile jint*, jint); typedef int32_t cmpxchg_func_t (int32_t, volatile int32_t*, int32_t);
typedef jlong cmpxchg_long_func_t(jlong, volatile jlong*, jlong); typedef int64_t cmpxchg_long_func_t(int64_t, volatile int64_t*, int64_t);
typedef jint add_func_t (jint, volatile jint*); typedef int32_t add_func_t (int32_t, volatile int32_t*);
jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) { int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t* dest) {
// try to use the stub: // try to use the stub:
xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry()); xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
@ -919,12 +919,12 @@ jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
} }
assert(Threads::number_of_threads() == 0, "for bootstrap only"); assert(Threads::number_of_threads() == 0, "for bootstrap only");
jint old_value = *dest; int32_t old_value = *dest;
*dest = exchange_value; *dest = exchange_value;
return old_value; return old_value;
} }
jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) { int32_t os::atomic_cmpxchg_bootstrap(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value) {
// try to use the stub: // try to use the stub:
cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry()); cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
@ -934,13 +934,13 @@ jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint
} }
assert(Threads::number_of_threads() == 0, "for bootstrap only"); assert(Threads::number_of_threads() == 0, "for bootstrap only");
jint old_value = *dest; int32_t old_value = *dest;
if (old_value == compare_value) if (old_value == compare_value)
*dest = exchange_value; *dest = exchange_value;
return old_value; return old_value;
} }
jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) { int64_t os::atomic_cmpxchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value) {
// try to use the stub: // try to use the stub:
cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry()); cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
@ -950,13 +950,13 @@ jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* de
} }
assert(Threads::number_of_threads() == 0, "for bootstrap only"); assert(Threads::number_of_threads() == 0, "for bootstrap only");
jlong old_value = *dest; int64_t old_value = *dest;
if (old_value == compare_value) if (old_value == compare_value)
*dest = exchange_value; *dest = exchange_value;
return old_value; return old_value;
} }
jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) { int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t* dest) {
// try to use the stub: // try to use the stub:
add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry()); add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -31,15 +31,15 @@
#ifdef AMD64 #ifdef AMD64
static void setup_fpu() {} static void setup_fpu() {}
#else #else
static jint (*atomic_xchg_func) (jint, volatile jint*); static int32_t (*atomic_xchg_func) (int32_t, volatile int32_t*);
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint); static int32_t (*atomic_cmpxchg_func) (int32_t, volatile int32_t*, int32_t);
static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong); static int64_t (*atomic_cmpxchg_long_func)(int64_t, volatile int64_t*, int64_t);
static jint (*atomic_add_func) (jint, volatile jint*); static int32_t (*atomic_add_func) (int32_t, volatile int32_t*);
static jint atomic_xchg_bootstrap (jint, volatile jint*); static int32_t atomic_xchg_bootstrap (int32_t, volatile int32_t*);
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint); static int32_t atomic_cmpxchg_bootstrap (int32_t, volatile int32_t*, int32_t);
static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong); static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t);
static jint atomic_add_bootstrap (jint, volatile jint*); static int32_t atomic_add_bootstrap (int32_t, volatile int32_t*);
static void setup_fpu(); static void setup_fpu();
#endif // AMD64 #endif // AMD64

View file

@ -54,13 +54,13 @@ struct Atomic::PlatformAdd
template<> template<>
template<typename I, typename D> template<typename I, typename D>
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
return add_using_helper<jint>(os::atomic_add_func, add_value, dest); return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
} }
template<> template<>
template<typename I, typename D> template<typename I, typename D>
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const { inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
return add_using_helper<intptr_t>(os::atomic_add_ptr_func, add_value, dest); return add_using_helper<int64_t>(os::atomic_add_long_func, add_value, dest);
} }
#define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \ #define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \
@ -72,8 +72,8 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
return xchg_using_helper<StubType>(StubName, exchange_value, dest); \ return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
} }
DEFINE_STUB_XCHG(4, jint, os::atomic_xchg_func) DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func)
DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_long_func) DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func)
#undef DEFINE_STUB_XCHG #undef DEFINE_STUB_XCHG
@ -88,9 +88,9 @@ DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_long_func)
return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \ return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
} }
DEFINE_STUB_CMPXCHG(1, jbyte, os::atomic_cmpxchg_byte_func) DEFINE_STUB_CMPXCHG(1, int8_t, os::atomic_cmpxchg_byte_func)
DEFINE_STUB_CMPXCHG(4, jint, os::atomic_cmpxchg_func) DEFINE_STUB_CMPXCHG(4, int32_t, os::atomic_cmpxchg_func)
DEFINE_STUB_CMPXCHG(8, jlong, os::atomic_cmpxchg_long_func) DEFINE_STUB_CMPXCHG(8, int64_t, os::atomic_cmpxchg_long_func)
#undef DEFINE_STUB_CMPXCHG #undef DEFINE_STUB_CMPXCHG
@ -162,10 +162,10 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
T compare_value, T compare_value,
cmpxchg_memory_order order) const { cmpxchg_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T)); STATIC_ASSERT(8 == sizeof(T));
jint ex_lo = (jint)exchange_value; int32_t ex_lo = (int32_t)exchange_value;
jint ex_hi = *( ((jint*)&exchange_value) + 1 ); int32_t ex_hi = *( ((int32_t*)&exchange_value) + 1 );
jint cmp_lo = (jint)compare_value; int32_t cmp_lo = (int32_t)compare_value;
jint cmp_hi = *( ((jint*)&compare_value) + 1 ); int32_t cmp_hi = *( ((int32_t*)&compare_value) + 1 );
__asm { __asm {
push ebx push ebx
push edi push edi

View file

@ -218,17 +218,17 @@ void os::initialize_thread(Thread* thr) {
// Atomics and Stub Functions // Atomics and Stub Functions
typedef jint xchg_func_t (jint, volatile jint*); typedef int32_t xchg_func_t (int32_t, volatile int32_t*);
typedef intptr_t xchg_long_func_t (jlong, volatile jlong*); typedef int64_t xchg_long_func_t (int64_t, volatile int64_t*);
typedef jint cmpxchg_func_t (jint, volatile jint*, jint); typedef int32_t cmpxchg_func_t (int32_t, volatile int32_t*, int32_t);
typedef jbyte cmpxchg_byte_func_t (jbyte, volatile jbyte*, jbyte); typedef int8_t cmpxchg_byte_func_t (int8_t, volatile int8_t*, int8_t);
typedef jlong cmpxchg_long_func_t (jlong, volatile jlong*, jlong); typedef int64_t cmpxchg_long_func_t (int64_t, volatile int64_t*, int64_t);
typedef jint add_func_t (jint, volatile jint*); typedef int32_t add_func_t (int32_t, volatile int32_t*);
typedef intptr_t add_ptr_func_t (intptr_t, volatile intptr_t*); typedef int64_t add_long_func_t (int64_t, volatile int64_t*);
#ifdef AMD64 #ifdef AMD64
jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) { int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t* dest) {
// try to use the stub: // try to use the stub:
xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry()); xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
@ -238,12 +238,12 @@ jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
} }
assert(Threads::number_of_threads() == 0, "for bootstrap only"); assert(Threads::number_of_threads() == 0, "for bootstrap only");
jint old_value = *dest; int32_t old_value = *dest;
*dest = exchange_value; *dest = exchange_value;
return old_value; return old_value;
} }
intptr_t os::atomic_xchg_long_bootstrap(jlong exchange_value, volatile jlong* dest) { int64_t os::atomic_xchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest) {
// try to use the stub: // try to use the stub:
xchg_long_func_t* func = CAST_TO_FN_PTR(xchg_long_func_t*, StubRoutines::atomic_xchg_long_entry()); xchg_long_func_t* func = CAST_TO_FN_PTR(xchg_long_func_t*, StubRoutines::atomic_xchg_long_entry());
@ -253,13 +253,13 @@ intptr_t os::atomic_xchg_long_bootstrap(jlong exchange_value, volatile jlong* de
} }
assert(Threads::number_of_threads() == 0, "for bootstrap only"); assert(Threads::number_of_threads() == 0, "for bootstrap only");
intptr_t old_value = *dest; int64_t old_value = *dest;
*dest = exchange_value; *dest = exchange_value;
return old_value; return old_value;
} }
jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) { int32_t os::atomic_cmpxchg_bootstrap(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value) {
// try to use the stub: // try to use the stub:
cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry()); cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
@ -269,13 +269,13 @@ jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint
} }
assert(Threads::number_of_threads() == 0, "for bootstrap only"); assert(Threads::number_of_threads() == 0, "for bootstrap only");
jint old_value = *dest; int32_t old_value = *dest;
if (old_value == compare_value) if (old_value == compare_value)
*dest = exchange_value; *dest = exchange_value;
return old_value; return old_value;
} }
jbyte os::atomic_cmpxchg_byte_bootstrap(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) { int8_t os::atomic_cmpxchg_byte_bootstrap(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value) {
// try to use the stub: // try to use the stub:
cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry()); cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry());
@ -285,7 +285,7 @@ jbyte os::atomic_cmpxchg_byte_bootstrap(jbyte exchange_value, volatile jbyte* de
} }
assert(Threads::number_of_threads() == 0, "for bootstrap only"); assert(Threads::number_of_threads() == 0, "for bootstrap only");
jbyte old_value = *dest; int8_t old_value = *dest;
if (old_value == compare_value) if (old_value == compare_value)
*dest = exchange_value; *dest = exchange_value;
return old_value; return old_value;
@ -293,7 +293,7 @@ jbyte os::atomic_cmpxchg_byte_bootstrap(jbyte exchange_value, volatile jbyte* de
#endif // AMD64 #endif // AMD64
jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) { int64_t os::atomic_cmpxchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value) {
// try to use the stub: // try to use the stub:
cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry()); cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
@ -303,7 +303,7 @@ jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* de
} }
assert(Threads::number_of_threads() == 0, "for bootstrap only"); assert(Threads::number_of_threads() == 0, "for bootstrap only");
jlong old_value = *dest; int64_t old_value = *dest;
if (old_value == compare_value) if (old_value == compare_value)
*dest = exchange_value; *dest = exchange_value;
return old_value; return old_value;
@ -311,7 +311,7 @@ jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* de
#ifdef AMD64 #ifdef AMD64
jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) { int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t* dest) {
// try to use the stub: // try to use the stub:
add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry()); add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
@ -324,12 +324,12 @@ jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
return (*dest) += add_value; return (*dest) += add_value;
} }
intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* dest) { int64_t os::atomic_add_long_bootstrap(int64_t add_value, volatile int64_t* dest) {
// try to use the stub: // try to use the stub:
add_ptr_func_t* func = CAST_TO_FN_PTR(add_ptr_func_t*, StubRoutines::atomic_add_ptr_entry()); add_long_func_t* func = CAST_TO_FN_PTR(add_long_func_t*, StubRoutines::atomic_add_long_entry());
if (func != NULL) { if (func != NULL) {
os::atomic_add_ptr_func = func; os::atomic_add_long_func = func;
return (*func)(add_value, dest); return (*func)(add_value, dest);
} }
assert(Threads::number_of_threads() == 0, "for bootstrap only"); assert(Threads::number_of_threads() == 0, "for bootstrap only");
@ -342,7 +342,7 @@ xchg_long_func_t* os::atomic_xchg_long_func = os::atomic_xchg_long_bootstr
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap; cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap; cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
add_func_t* os::atomic_add_func = os::atomic_add_bootstrap; add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;
add_ptr_func_t* os::atomic_add_ptr_func = os::atomic_add_ptr_bootstrap; add_long_func_t* os::atomic_add_long_func = os::atomic_add_long_bootstrap;
#endif // AMD64 #endif // AMD64

View file

@ -29,32 +29,32 @@
// NOTE: we are back in class os here, not win32 // NOTE: we are back in class os here, not win32
// //
#ifdef AMD64 #ifdef AMD64
static jint (*atomic_xchg_func) (jint, volatile jint*); static int32_t (*atomic_xchg_func) (int32_t, volatile int32_t*);
static intptr_t (*atomic_xchg_long_func) (jlong, volatile jlong*); static int64_t (*atomic_xchg_long_func) (int64_t, volatile int64_t*);
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint); static int32_t (*atomic_cmpxchg_func) (int32_t, volatile int32_t*, int32_t);
static jbyte (*atomic_cmpxchg_byte_func) (jbyte, volatile jbyte*, jbyte); static int8_t (*atomic_cmpxchg_byte_func) (int8_t, volatile int8_t*, int8_t);
static jlong (*atomic_cmpxchg_long_func) (jlong, volatile jlong*, jlong); static int64_t (*atomic_cmpxchg_long_func) (int64_t, volatile int64_t*, int64_t);
static jint (*atomic_add_func) (jint, volatile jint*); static int32_t (*atomic_add_func) (int32_t, volatile int32_t*);
static intptr_t (*atomic_add_ptr_func) (intptr_t, volatile intptr_t*); static int64_t (*atomic_add_long_func) (int64_t, volatile int64_t*);
static jint atomic_xchg_bootstrap (jint, volatile jint*); static int32_t atomic_xchg_bootstrap (int32_t, volatile int32_t*);
static intptr_t atomic_xchg_long_bootstrap (jlong, volatile jlong*); static int64_t atomic_xchg_long_bootstrap (int64_t, volatile int64_t*);
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint); static int32_t atomic_cmpxchg_bootstrap (int32_t, volatile int32_t*, int32_t);
static jbyte atomic_cmpxchg_byte_bootstrap(jbyte, volatile jbyte*, jbyte); static int8_t atomic_cmpxchg_byte_bootstrap(int8_t, volatile int8_t*, int8_t);
#else #else
static jlong (*atomic_cmpxchg_long_func) (jlong, volatile jlong*, jlong); static int64_t (*atomic_cmpxchg_long_func) (int64_t, volatile int64_t*, int64_t);
#endif // AMD64 #endif // AMD64
static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong); static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t);
#ifdef AMD64 #ifdef AMD64
static jint atomic_add_bootstrap (jint, volatile jint*); static int32_t atomic_add_bootstrap (int32_t, volatile int32_t*);
static intptr_t atomic_add_ptr_bootstrap (intptr_t, volatile intptr_t*); static int64_t atomic_add_long_bootstrap (int64_t, volatile int64_t*);
#endif // AMD64 #endif // AMD64
static void setup_fpu(); static void setup_fpu();

View file

@ -45,8 +45,8 @@ enum cmpxchg_memory_order {
class Atomic : AllStatic { class Atomic : AllStatic {
public: public:
// Atomic operations on jlong types are not available on all 32-bit // Atomic operations on int64 types are not available on all 32-bit
// platforms. If atomic ops on jlongs are defined here they must only // platforms. If atomic ops on int64 are defined here they must only
// be used from code that verifies they are available at runtime and // be used from code that verifies they are available at runtime and
// can provide an alternative action if not - see supports_cx8() for // can provide an alternative action if not - see supports_cx8() for
// a means to test availability. // a means to test availability.
@ -639,16 +639,16 @@ struct Atomic::AddImpl<
// //
// Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment. // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
template<> template<>
struct Atomic::AddImpl<jshort, jshort> VALUE_OBJ_CLASS_SPEC { struct Atomic::AddImpl<short, short> VALUE_OBJ_CLASS_SPEC {
jshort operator()(jshort add_value, jshort volatile* dest) const { short operator()(short add_value, short volatile* dest) const {
#ifdef VM_LITTLE_ENDIAN #ifdef VM_LITTLE_ENDIAN
assert((intx(dest) & 0x03) == 0x02, "wrong alignment"); assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1)); int new_value = Atomic::add(add_value << 16, (volatile int*)(dest-1));
#else #else
assert((intx(dest) & 0x03) == 0x00, "wrong alignment"); assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest)); int new_value = Atomic::add(add_value << 16, (volatile int*)(dest));
#endif #endif
return (jshort)(new_value >> 16); // preserves sign return (short)(new_value >> 16); // preserves sign
} }
}; };
@ -807,7 +807,7 @@ inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value,
do { do {
// value to swap in matches current value ... // value to swap in matches current value ...
uint32_t new_value = cur; uint32_t new_value = cur;
// ... except for the one jbyte we want to update // ... except for the one byte we want to update
reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value; reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value;
uint32_t res = cmpxchg(new_value, aligned_dest, cur, order); uint32_t res = cmpxchg(new_value, aligned_dest, cur, order);

View file

@ -62,12 +62,11 @@ address StubRoutines::_verify_oop_subroutine_entry = NULL;
address StubRoutines::_atomic_xchg_entry = NULL; address StubRoutines::_atomic_xchg_entry = NULL;
address StubRoutines::_atomic_xchg_long_entry = NULL; address StubRoutines::_atomic_xchg_long_entry = NULL;
address StubRoutines::_atomic_store_entry = NULL; address StubRoutines::_atomic_store_entry = NULL;
address StubRoutines::_atomic_store_ptr_entry = NULL;
address StubRoutines::_atomic_cmpxchg_entry = NULL; address StubRoutines::_atomic_cmpxchg_entry = NULL;
address StubRoutines::_atomic_cmpxchg_byte_entry = NULL; address StubRoutines::_atomic_cmpxchg_byte_entry = NULL;
address StubRoutines::_atomic_cmpxchg_long_entry = NULL; address StubRoutines::_atomic_cmpxchg_long_entry = NULL;
address StubRoutines::_atomic_add_entry = NULL; address StubRoutines::_atomic_add_entry = NULL;
address StubRoutines::_atomic_add_ptr_entry = NULL; address StubRoutines::_atomic_add_long_entry = NULL;
address StubRoutines::_fence_entry = NULL; address StubRoutines::_fence_entry = NULL;
address StubRoutines::_d2i_wrapper = NULL; address StubRoutines::_d2i_wrapper = NULL;
address StubRoutines::_d2l_wrapper = NULL; address StubRoutines::_d2l_wrapper = NULL;

View file

@ -103,12 +103,11 @@ class StubRoutines: AllStatic {
static address _atomic_xchg_entry; static address _atomic_xchg_entry;
static address _atomic_xchg_long_entry; static address _atomic_xchg_long_entry;
static address _atomic_store_entry; static address _atomic_store_entry;
static address _atomic_store_ptr_entry;
static address _atomic_cmpxchg_entry; static address _atomic_cmpxchg_entry;
static address _atomic_cmpxchg_byte_entry; static address _atomic_cmpxchg_byte_entry;
static address _atomic_cmpxchg_long_entry; static address _atomic_cmpxchg_long_entry;
static address _atomic_add_entry; static address _atomic_add_entry;
static address _atomic_add_ptr_entry; static address _atomic_add_long_entry;
static address _fence_entry; static address _fence_entry;
static address _d2i_wrapper; static address _d2i_wrapper;
static address _d2l_wrapper; static address _d2l_wrapper;
@ -277,12 +276,11 @@ class StubRoutines: AllStatic {
static address atomic_xchg_entry() { return _atomic_xchg_entry; } static address atomic_xchg_entry() { return _atomic_xchg_entry; }
static address atomic_xchg_long_entry() { return _atomic_xchg_long_entry; } static address atomic_xchg_long_entry() { return _atomic_xchg_long_entry; }
static address atomic_store_entry() { return _atomic_store_entry; } static address atomic_store_entry() { return _atomic_store_entry; }
static address atomic_store_ptr_entry() { return _atomic_store_ptr_entry; }
static address atomic_cmpxchg_entry() { return _atomic_cmpxchg_entry; } static address atomic_cmpxchg_entry() { return _atomic_cmpxchg_entry; }
static address atomic_cmpxchg_byte_entry() { return _atomic_cmpxchg_byte_entry; } static address atomic_cmpxchg_byte_entry() { return _atomic_cmpxchg_byte_entry; }
static address atomic_cmpxchg_long_entry() { return _atomic_cmpxchg_long_entry; } static address atomic_cmpxchg_long_entry() { return _atomic_cmpxchg_long_entry; }
static address atomic_add_entry() { return _atomic_add_entry; } static address atomic_add_entry() { return _atomic_add_entry; }
static address atomic_add_ptr_entry() { return _atomic_add_ptr_entry; } static address atomic_add_long_entry() { return _atomic_add_long_entry; }
static address fence_entry() { return _fence_entry; } static address fence_entry() { return _fence_entry; }
static address d2i_wrapper() { return _d2i_wrapper; } static address d2i_wrapper() { return _d2i_wrapper; }