7143664: Clean up OrderAccess implementations and usage

Clarify and correct the abstract model for memory barriers provided by the orderAccess class. Refactor the implementations using template specialization to allow the bulk of the code to be shared, with platform specific customizations applied as needed.

Reviewed-by: acorn, dcubed, dholmes, dlong, goetz, kbarrett, sgehwolf
This commit is contained in:
Erik Osterlund 2015-03-03 19:20:26 -05:00 committed by David Holmes
parent 8c5e105bac
commit c2a4574760
16 changed files with 494 additions and 1232 deletions

View file

@ -3947,12 +3947,10 @@ void LIR_Assembler::membar() {
void LIR_Assembler::membar_acquire() { void LIR_Assembler::membar_acquire() {
// No x86 machines currently require load fences // No x86 machines currently require load fences
// __ load_fence();
} }
void LIR_Assembler::membar_release() { void LIR_Assembler::membar_release() {
// No x86 machines currently require store fences // No x86 machines currently require store fences
// __ store_fence();
} }
void LIR_Assembler::membar_loadload() { void LIR_Assembler::membar_loadload() {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014, SAP AG. All rights reserved. * Copyright (c) 2012, 2014, SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -28,6 +28,9 @@
#include "runtime/orderAccess.hpp" #include "runtime/orderAccess.hpp"
// Compiler version last used for testing: xlc 12
// Please update this information when this file changes
// Implementation of class OrderAccess. // Implementation of class OrderAccess.
// //
@ -61,86 +64,30 @@
#define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory"); #define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory");
#define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory"); #define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory");
#define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory"); #define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory");
#define inlasm_release() inlasm_lwsync();
#define inlasm_acquire() inlasm_lwsync();
// Use twi-isync for load_acquire (faster than lwsync). // Use twi-isync for load_acquire (faster than lwsync).
// ATTENTION: seems like xlC 10.1 has problems with this inline assembler macro (VerifyMethodHandles found "bad vminfo in AMH.conv"): // ATTENTION: seems like xlC 10.1 has problems with this inline assembler macro (VerifyMethodHandles found "bad vminfo in AMH.conv"):
// #define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory"); // #define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
#define inlasm_acquire_reg(X) inlasm_lwsync(); #define inlasm_acquire_reg(X) inlasm_lwsync();
#define inlasm_fence() inlasm_sync();
inline void OrderAccess::loadload() { inlasm_lwsync(); } inline void OrderAccess::loadload() { inlasm_lwsync(); }
inline void OrderAccess::storestore() { inlasm_lwsync(); } inline void OrderAccess::storestore() { inlasm_lwsync(); }
inline void OrderAccess::loadstore() { inlasm_lwsync(); } inline void OrderAccess::loadstore() { inlasm_lwsync(); }
inline void OrderAccess::storeload() { inlasm_fence(); } inline void OrderAccess::storeload() { inlasm_sync(); }
inline void OrderAccess::acquire() { inlasm_acquire(); } inline void OrderAccess::acquire() { inlasm_lwsync(); }
inline void OrderAccess::release() { inlasm_release(); } inline void OrderAccess::release() { inlasm_lwsync(); }
inline void OrderAccess::fence() { inlasm_fence(); } inline void OrderAccess::fence() { inlasm_sync(); }
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { register jbyte t = *p; inlasm_acquire_reg(t); return t; } template<> inline jbyte OrderAccess::specialized_load_acquire<jbyte> (volatile jbyte* p) { register jbyte t = load(p); inlasm_acquire_reg(t); return t; }
inline jshort OrderAccess::load_acquire(volatile jshort* p) { register jshort t = *p; inlasm_acquire_reg(t); return t; } template<> inline jshort OrderAccess::specialized_load_acquire<jshort>(volatile jshort* p) { register jshort t = load(p); inlasm_acquire_reg(t); return t; }
inline jint OrderAccess::load_acquire(volatile jint* p) { register jint t = *p; inlasm_acquire_reg(t); return t; } template<> inline jint OrderAccess::specialized_load_acquire<jint> (volatile jint* p) { register jint t = load(p); inlasm_acquire_reg(t); return t; }
inline jlong OrderAccess::load_acquire(volatile jlong* p) { register jlong t = *p; inlasm_acquire_reg(t); return t; } template<> inline jlong OrderAccess::specialized_load_acquire<jlong> (volatile jlong* p) { register jlong t = load(p); inlasm_acquire_reg(t); return t; }
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { register jubyte t = *p; inlasm_acquire_reg(t); return t; }
inline jushort OrderAccess::load_acquire(volatile jushort* p) { register jushort t = *p; inlasm_acquire_reg(t); return t; }
inline juint OrderAccess::load_acquire(volatile juint* p) { register juint t = *p; inlasm_acquire_reg(t); return t; }
inline julong OrderAccess::load_acquire(volatile julong* p) { return (julong)load_acquire((volatile jlong*)p); }
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { register jfloat t = *p; inlasm_acquire(); return t; }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { register jdouble t = *p; inlasm_acquire(); return t; }
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return (intptr_t)load_acquire((volatile jlong*)p); }
inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return (void*) load_acquire((volatile jlong*)p); }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return (void*) load_acquire((volatile jlong*)p); }
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jint* p, jint v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile juint* p, juint v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile julong* p, julong v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { inlasm_release(); *(void* volatile *)p = v; }
inline void OrderAccess::store_fence(jbyte* p, jbyte v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jshort* p, jshort v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jint* p, jint v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jlong* p, jlong v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jubyte* p, jubyte v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jushort* p, jushort v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(juint* p, juint v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(julong* p, julong v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { inlasm_release(); *(void* volatile *)p = v; inlasm_fence(); }
#undef inlasm_sync #undef inlasm_sync
#undef inlasm_lwsync #undef inlasm_lwsync
#undef inlasm_eieio #undef inlasm_eieio
#undef inlasm_isync #undef inlasm_isync
#undef inlasm_release
#undef inlasm_acquire #define VM_HAS_GENERALIZED_ORDER_ACCESS 1
#undef inlasm_fence
#endif // OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP #endif // OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,27 +29,27 @@
#include "runtime/orderAccess.hpp" #include "runtime/orderAccess.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"
// Compiler version last used for testing: clang 5.1
// Please update this information when this file changes
// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
static inline void compiler_barrier() {
__asm__ volatile ("" : : : "memory");
}
// x86 is TSO and hence only needs a fence for storeload
// However, a compiler barrier is still needed to prevent reordering
// between volatile and non-volatile memory accesses.
// Implementation of class OrderAccess. // Implementation of class OrderAccess.
inline void OrderAccess::loadload() { acquire(); } inline void OrderAccess::loadload() { compiler_barrier(); }
inline void OrderAccess::storestore() { release(); } inline void OrderAccess::storestore() { compiler_barrier(); }
inline void OrderAccess::loadstore() { acquire(); } inline void OrderAccess::loadstore() { compiler_barrier(); }
inline void OrderAccess::storeload() { fence(); } inline void OrderAccess::storeload() { fence(); }
inline void OrderAccess::acquire() { inline void OrderAccess::acquire() { compiler_barrier(); }
volatile intptr_t local_dummy; inline void OrderAccess::release() { compiler_barrier(); }
#ifdef AMD64
__asm__ volatile ("movq 0(%%rsp), %0" : "=r" (local_dummy) : : "memory");
#else
__asm__ volatile ("movl 0(%%esp),%0" : "=r" (local_dummy) : : "memory");
#endif // AMD64
}
inline void OrderAccess::release() {
// Avoid hitting the same cache-line from
// different threads.
volatile jint local_dummy = 0;
}
inline void OrderAccess::fence() { inline void OrderAccess::fence() {
if (os::is_MP()) { if (os::is_MP()) {
@ -60,156 +60,50 @@ inline void OrderAccess::fence() {
__asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory"); __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
#endif #endif
} }
compiler_barrier();
} }
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; } template<>
inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; } inline void OrderAccess::specialized_release_store_fence<jbyte> (volatile jbyte* p, jbyte v) {
inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; }
inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); }
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; }
inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; }
inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; }
inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); }
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return jdouble_cast(Atomic::load((volatile jlong*)p)); }
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; }
inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; }
inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; }
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; }
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; }
inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong*)p, jlong_cast(v)); }
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; }
inline void OrderAccess::store_fence(jbyte* p, jbyte v) {
__asm__ volatile ( "xchgb (%2),%0" __asm__ volatile ( "xchgb (%2),%0"
: "=q" (v) : "=q" (v)
: "0" (v), "r" (p) : "0" (v), "r" (p)
: "memory"); : "memory");
} }
inline void OrderAccess::store_fence(jshort* p, jshort v) { template<>
inline void OrderAccess::specialized_release_store_fence<jshort>(volatile jshort* p, jshort v) {
__asm__ volatile ( "xchgw (%2),%0" __asm__ volatile ( "xchgw (%2),%0"
: "=r" (v) : "=r" (v)
: "0" (v), "r" (p) : "0" (v), "r" (p)
: "memory"); : "memory");
} }
inline void OrderAccess::store_fence(jint* p, jint v) { template<>
inline void OrderAccess::specialized_release_store_fence<jint> (volatile jint* p, jint v) {
__asm__ volatile ( "xchgl (%2),%0" __asm__ volatile ( "xchgl (%2),%0"
: "=r" (v) : "=r" (v)
: "0" (v), "r" (p) : "0" (v), "r" (p)
: "memory"); : "memory");
} }
inline void OrderAccess::store_fence(jlong* p, jlong v) {
#ifdef AMD64 #ifdef AMD64
__asm__ __volatile__ ("xchgq (%2), %0" template<>
inline void OrderAccess::specialized_release_store_fence<jlong> (volatile jlong* p, jlong v) {
__asm__ volatile ( "xchgq (%2), %0"
: "=r" (v) : "=r" (v)
: "0" (v), "r" (p) : "0" (v), "r" (p)
: "memory"); : "memory");
#else }
*p = v; fence();
#endif // AMD64 #endif // AMD64
template<>
inline void OrderAccess::specialized_release_store_fence<jfloat> (volatile jfloat* p, jfloat v) {
release_store_fence((volatile jint*)p, jint_cast(v));
}
template<>
inline void OrderAccess::specialized_release_store_fence<jdouble>(volatile jdouble* p, jdouble v) {
release_store_fence((volatile jlong*)p, jlong_cast(v));
} }
// AMD64 copied the bodies for the the signed version. 32bit did this. As long as the #define VM_HAS_GENERALIZED_ORDER_ACCESS 1
// compiler does the inlining this is simpler.
inline void OrderAccess::store_fence(jubyte* p, jubyte v) { store_fence((jbyte*)p, (jbyte)v); }
inline void OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); }
inline void OrderAccess::store_fence(juint* p, juint v) { store_fence((jint*)p, (jint)v); }
inline void OrderAccess::store_fence(julong* p, julong v) { store_fence((jlong*)p, (jlong)v); }
inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); }
inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) {
#ifdef AMD64
__asm__ __volatile__ ("xchgq (%2), %0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
#else
store_fence((jint*)p, (jint)v);
#endif // AMD64
}
inline void OrderAccess::store_ptr_fence(void** p, void* v) {
#ifdef AMD64
__asm__ __volatile__ ("xchgq (%2), %0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
#else
store_fence((jint*)p, (jint)v);
#endif // AMD64
}
// Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) {
__asm__ volatile ( "xchgb (%2),%0"
: "=q" (v)
: "0" (v), "r" (p)
: "memory");
}
inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) {
__asm__ volatile ( "xchgw (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
inline void OrderAccess::release_store_fence(volatile jint* p, jint v) {
__asm__ volatile ( "xchgl (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) {
#ifdef AMD64
__asm__ __volatile__ ( "xchgq (%2), %0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
#else
release_store(p, v); fence();
#endif // AMD64
}
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store_fence((volatile jbyte*)p, (jbyte)v); }
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store_fence((volatile jint*)p, (jint)v); }
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store_fence((volatile jlong*)p, (jlong)v); }
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store_fence((volatile jlong*)p, jlong_cast(v)); }
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
#ifdef AMD64
__asm__ __volatile__ ( "xchgq (%2), %0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
#else
release_store_fence((volatile jint*)p, (jint)v);
#endif // AMD64
}
inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) {
#ifdef AMD64
__asm__ __volatile__ ( "xchgq (%2), %0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
#else
release_store_fence((volatile jint*)p, (jint)v);
#endif // AMD64
}
#endif // OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP #endif // OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009 Red Hat, Inc. * Copyright 2007, 2008, 2009 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -40,8 +40,7 @@ typedef void (__kernel_dmb_t) (void);
#define __kernel_dmb (*(__kernel_dmb_t *) 0xffff0fa0) #define __kernel_dmb (*(__kernel_dmb_t *) 0xffff0fa0)
#define FULL_MEM_BARRIER __kernel_dmb() #define FULL_MEM_BARRIER __kernel_dmb()
#define READ_MEM_BARRIER __kernel_dmb() #define LIGHT_MEM_BARRIER __kernel_dmb()
#define WRITE_MEM_BARRIER __kernel_dmb()
#else // ARM #else // ARM
@ -50,126 +49,31 @@ typedef void (__kernel_dmb_t) (void);
#ifdef PPC #ifdef PPC
#ifdef __NO_LWSYNC__ #ifdef __NO_LWSYNC__
#define READ_MEM_BARRIER __asm __volatile ("sync":::"memory") #define LIGHT_MEM_BARRIER __asm __volatile ("sync":::"memory")
#define WRITE_MEM_BARRIER __asm __volatile ("sync":::"memory")
#else #else
#define READ_MEM_BARRIER __asm __volatile ("lwsync":::"memory") #define LIGHT_MEM_BARRIER __asm __volatile ("lwsync":::"memory")
#define WRITE_MEM_BARRIER __asm __volatile ("lwsync":::"memory")
#endif #endif
#else // PPC #else // PPC
#define READ_MEM_BARRIER __asm __volatile ("":::"memory") #define LIGHT_MEM_BARRIER __asm __volatile ("":::"memory")
#define WRITE_MEM_BARRIER __asm __volatile ("":::"memory")
#endif // PPC #endif // PPC
#endif // ARM #endif // ARM
// Note: What is meant by LIGHT_MEM_BARRIER is a barrier which is sufficient
// to provide TSO semantics, i.e. StoreStore | LoadLoad | LoadStore.
inline void OrderAccess::loadload() { acquire(); } inline void OrderAccess::loadload() { LIGHT_MEM_BARRIER; }
inline void OrderAccess::storestore() { release(); } inline void OrderAccess::storestore() { LIGHT_MEM_BARRIER; }
inline void OrderAccess::loadstore() { acquire(); } inline void OrderAccess::loadstore() { LIGHT_MEM_BARRIER; }
inline void OrderAccess::storeload() { fence(); } inline void OrderAccess::storeload() { FULL_MEM_BARRIER; }
inline void OrderAccess::acquire() { inline void OrderAccess::acquire() { LIGHT_MEM_BARRIER; }
READ_MEM_BARRIER; inline void OrderAccess::release() { LIGHT_MEM_BARRIER; }
} inline void OrderAccess::fence() { FULL_MEM_BARRIER; }
inline void OrderAccess::release() { #define VM_HAS_GENERALIZED_ORDER_ACCESS 1
WRITE_MEM_BARRIER;
}
inline void OrderAccess::fence() {
FULL_MEM_BARRIER;
}
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { jbyte data = *p; acquire(); return data; }
inline jshort OrderAccess::load_acquire(volatile jshort* p) { jshort data = *p; acquire(); return data; }
inline jint OrderAccess::load_acquire(volatile jint* p) { jint data = *p; acquire(); return data; }
inline jlong OrderAccess::load_acquire(volatile jlong* p) {
jlong tmp;
os::atomic_copy64(p, &tmp);
acquire();
return tmp;
}
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { jubyte data = *p; acquire(); return data; }
inline jushort OrderAccess::load_acquire(volatile jushort* p) { jushort data = *p; acquire(); return data; }
inline juint OrderAccess::load_acquire(volatile juint* p) { juint data = *p; acquire(); return data; }
inline julong OrderAccess::load_acquire(volatile julong* p) {
julong tmp;
os::atomic_copy64(p, &tmp);
acquire();
return tmp;
}
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { jfloat data = *p; acquire(); return data; }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) {
jdouble tmp;
os::atomic_copy64(p, &tmp);
acquire();
return tmp;
}
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) {
intptr_t data = *p;
acquire();
return data;
}
inline void* OrderAccess::load_ptr_acquire(volatile void* p) {
void *data = *(void* volatile *)p;
acquire();
return data;
}
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) {
void *data = *(void* const volatile *)p;
acquire();
return data;
}
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { release(); *p = v; }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { release(); *p = v; }
inline void OrderAccess::release_store(volatile jint* p, jint v) { release(); *p = v; }
inline void OrderAccess::release_store(volatile jlong* p, jlong v)
{ release(); os::atomic_copy64(&v, p); }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { release(); *p = v; }
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { release(); *p = v; }
inline void OrderAccess::release_store(volatile juint* p, juint v) { release(); *p = v; }
inline void OrderAccess::release_store(volatile julong* p, julong v)
{ release(); os::atomic_copy64(&v, p); }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { release(); *p = v; }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v)
{ release(); os::atomic_copy64(&v, p); }
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { release(); *p = v; }
inline void OrderAccess::release_store_ptr(volatile void* p, void* v)
{ release(); *(void* volatile *)p = v; }
inline void OrderAccess::store_fence(jbyte* p, jbyte v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jshort* p, jshort v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jint* p, jint v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jlong* p, jlong v) { os::atomic_copy64(&v, p); fence(); }
inline void OrderAccess::store_fence(jubyte* p, jubyte v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jushort* p, jushort v) { *p = v; fence(); }
inline void OrderAccess::store_fence(juint* p, juint v) { *p = v; fence(); }
inline void OrderAccess::store_fence(julong* p, julong v) { os::atomic_copy64(&v, p); fence(); }
inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jdouble* p, jdouble v) { os::atomic_copy64(&v, p); fence(); }
inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; fence(); }
inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { release_store_ptr(p, v); fence(); }
inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { release_store_ptr(p, v); fence(); }
#endif // OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_INLINE_HPP #endif // OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_INLINE_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014, SAP AG. All rights reserved. * Copyright (c) 2012, 2014, SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -32,6 +32,9 @@
#error "OrderAccess currently only implemented for PPC64" #error "OrderAccess currently only implemented for PPC64"
#endif #endif
// Compiler version last used for testing: gcc 4.1.2
// Please update this information when this file changes
// Implementation of class OrderAccess. // Implementation of class OrderAccess.
// //
@ -65,84 +68,29 @@
#define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory"); #define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory");
#define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory"); #define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory");
#define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory"); #define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory");
#define inlasm_release() inlasm_lwsync();
#define inlasm_acquire() inlasm_lwsync();
// Use twi-isync for load_acquire (faster than lwsync). // Use twi-isync for load_acquire (faster than lwsync).
#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory"); #define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
#define inlasm_fence() inlasm_sync();
inline void OrderAccess::loadload() { inlasm_lwsync(); } inline void OrderAccess::loadload() { inlasm_lwsync(); }
inline void OrderAccess::storestore() { inlasm_lwsync(); } inline void OrderAccess::storestore() { inlasm_lwsync(); }
inline void OrderAccess::loadstore() { inlasm_lwsync(); } inline void OrderAccess::loadstore() { inlasm_lwsync(); }
inline void OrderAccess::storeload() { inlasm_fence(); } inline void OrderAccess::storeload() { inlasm_sync(); }
inline void OrderAccess::acquire() { inlasm_acquire(); } inline void OrderAccess::acquire() { inlasm_lwsync(); }
inline void OrderAccess::release() { inlasm_release(); } inline void OrderAccess::release() { inlasm_lwsync(); }
inline void OrderAccess::fence() { inlasm_fence(); } inline void OrderAccess::fence() { inlasm_sync(); }
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { register jbyte t = *p; inlasm_acquire_reg(t); return t; } template<> inline jbyte OrderAccess::specialized_load_acquire<jbyte> (volatile jbyte* p) { register jbyte t = load(p); inlasm_acquire_reg(t); return t; }
inline jshort OrderAccess::load_acquire(volatile jshort* p) { register jshort t = *p; inlasm_acquire_reg(t); return t; } template<> inline jshort OrderAccess::specialized_load_acquire<jshort>(volatile jshort* p) { register jshort t = load(p); inlasm_acquire_reg(t); return t; }
inline jint OrderAccess::load_acquire(volatile jint* p) { register jint t = *p; inlasm_acquire_reg(t); return t; } template<> inline jint OrderAccess::specialized_load_acquire<jint> (volatile jint* p) { register jint t = load(p); inlasm_acquire_reg(t); return t; }
inline jlong OrderAccess::load_acquire(volatile jlong* p) { register jlong t = *p; inlasm_acquire_reg(t); return t; } template<> inline jlong OrderAccess::specialized_load_acquire<jlong> (volatile jlong* p) { register jlong t = load(p); inlasm_acquire_reg(t); return t; }
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { register jubyte t = *p; inlasm_acquire_reg(t); return t; }
inline jushort OrderAccess::load_acquire(volatile jushort* p) { register jushort t = *p; inlasm_acquire_reg(t); return t; }
inline juint OrderAccess::load_acquire(volatile juint* p) { register juint t = *p; inlasm_acquire_reg(t); return t; }
inline julong OrderAccess::load_acquire(volatile julong* p) { return (julong)load_acquire((volatile jlong*)p); }
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { register jfloat t = *p; inlasm_acquire(); return t; }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { register jdouble t = *p; inlasm_acquire(); return t; }
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return (intptr_t)load_acquire((volatile jlong*)p); }
inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return (void*) load_acquire((volatile jlong*)p); }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return (void*) load_acquire((volatile jlong*)p); }
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jint* p, jint v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile juint* p, juint v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile julong* p, julong v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { inlasm_release(); *(void* volatile *)p = v; }
inline void OrderAccess::store_fence(jbyte* p, jbyte v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jshort* p, jshort v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jint* p, jint v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jlong* p, jlong v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jubyte* p, jubyte v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jushort* p, jushort v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(juint* p, juint v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(julong* p, julong v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { inlasm_release(); *(void* volatile *)p = v; inlasm_fence(); }
#undef inlasm_sync #undef inlasm_sync
#undef inlasm_lwsync #undef inlasm_lwsync
#undef inlasm_eieio #undef inlasm_eieio
#undef inlasm_isync #undef inlasm_isync
#undef inlasm_release #undef inlasm_acquire_reg
#undef inlasm_acquire
#undef inlasm_fence #define VM_HAS_GENERALIZED_ORDER_ACCESS 1
#endif // OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP #endif // OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,81 +29,25 @@
// Implementation of class OrderAccess. // Implementation of class OrderAccess.
// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
static inline void compiler_barrier() {
__asm__ volatile ("" : : : "memory");
}
// Assume TSO. // Assume TSO.
inline void OrderAccess::loadload() { acquire(); } inline void OrderAccess::loadload() { compiler_barrier(); }
inline void OrderAccess::storestore() { release(); } inline void OrderAccess::storestore() { compiler_barrier(); }
inline void OrderAccess::loadstore() { acquire(); } inline void OrderAccess::loadstore() { compiler_barrier(); }
inline void OrderAccess::storeload() { fence(); } inline void OrderAccess::storeload() { fence(); }
inline void OrderAccess::acquire() { inline void OrderAccess::acquire() { compiler_barrier(); }
__asm__ volatile ("nop" : : :); inline void OrderAccess::release() { compiler_barrier(); }
}
inline void OrderAccess::release() {
jint* local_dummy = (jint*)&local_dummy;
__asm__ volatile("stw %%g0, [%0]" : : "r" (local_dummy) : "memory");
}
inline void OrderAccess::fence() { inline void OrderAccess::fence() {
__asm__ volatile ("membar #StoreLoad" : : :); __asm__ volatile ("membar #StoreLoad" : : : "memory");
} }
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; } #define VM_HAS_GENERALIZED_ORDER_ACCESS 1
inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; }
inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; }
inline jlong OrderAccess::load_acquire(volatile jlong* p) { return *p; }
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; }
inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; }
inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; }
inline julong OrderAccess::load_acquire(volatile julong* p) { return *p; }
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; }
inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; }
inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; }
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { *p = v; }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; }
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; }
inline void OrderAccess::release_store(volatile julong* p, julong v) { *p = v; }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; }
inline void OrderAccess::store_fence(jbyte* p, jbyte v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jshort* p, jshort v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jint* p, jint v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jlong* p, jlong v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jubyte* p, jubyte v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jushort* p, jushort v) { *p = v; fence(); }
inline void OrderAccess::store_fence(juint* p, juint v) { *p = v; fence(); }
inline void OrderAccess::store_fence(julong* p, julong v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); }
inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; fence(); }
inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); }
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { *p = v; fence(); }
inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { *(void* volatile *)p = v; fence(); }
#endif // OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_INLINE_HPP #endif // OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_INLINE_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,9 @@
#include "runtime/orderAccess.hpp" #include "runtime/orderAccess.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"
// Compiler version last used for testing: gcc 4.8.2
// Please update this information when this file changes
// Implementation of class OrderAccess. // Implementation of class OrderAccess.
// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
@ -36,23 +39,13 @@ static inline void compiler_barrier() {
__asm__ volatile ("" : : : "memory"); __asm__ volatile ("" : : : "memory");
} }
inline void OrderAccess::loadload() { acquire(); } inline void OrderAccess::loadload() { compiler_barrier(); }
inline void OrderAccess::storestore() { release(); } inline void OrderAccess::storestore() { compiler_barrier(); }
inline void OrderAccess::loadstore() { acquire(); } inline void OrderAccess::loadstore() { compiler_barrier(); }
inline void OrderAccess::storeload() { fence(); } inline void OrderAccess::storeload() { fence(); }
inline void OrderAccess::acquire() { inline void OrderAccess::acquire() { compiler_barrier(); }
volatile intptr_t local_dummy; inline void OrderAccess::release() { compiler_barrier(); }
#ifdef AMD64
__asm__ volatile ("movq 0(%%rsp), %0" : "=r" (local_dummy) : : "memory");
#else
__asm__ volatile ("movl 0(%%esp),%0" : "=r" (local_dummy) : : "memory");
#endif // AMD64
}
inline void OrderAccess::release() {
compiler_barrier();
}
inline void OrderAccess::fence() { inline void OrderAccess::fence() {
if (os::is_MP()) { if (os::is_MP()) {
@ -63,156 +56,50 @@ inline void OrderAccess::fence() {
__asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory"); __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
#endif #endif
} }
compiler_barrier();
} }
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { jbyte v = *p; compiler_barrier(); return v; } template<>
inline jshort OrderAccess::load_acquire(volatile jshort* p) { jshort v = *p; compiler_barrier(); return v; } inline void OrderAccess::specialized_release_store_fence<jbyte> (volatile jbyte* p, jbyte v) {
inline jint OrderAccess::load_acquire(volatile jint* p) { jint v = *p; compiler_barrier(); return v; }
inline jlong OrderAccess::load_acquire(volatile jlong* p) { jlong v = Atomic::load(p); compiler_barrier(); return v; }
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { jubyte v = *p; compiler_barrier(); return v; }
inline jushort OrderAccess::load_acquire(volatile jushort* p) { jushort v = *p; compiler_barrier(); return v; }
inline juint OrderAccess::load_acquire(volatile juint* p) { juint v = *p; compiler_barrier(); return v; }
inline julong OrderAccess::load_acquire(volatile julong* p) { julong v = Atomic::load((volatile jlong*)p); compiler_barrier(); return v; }
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { jfloat v = *p; compiler_barrier(); return v; }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { jdouble v = jdouble_cast(Atomic::load((volatile jlong*)p)); compiler_barrier(); return v; }
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { intptr_t v = *p; compiler_barrier(); return v; }
inline void* OrderAccess::load_ptr_acquire(volatile void* p) { void* v = *(void* volatile *)p; compiler_barrier(); return v; }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { void* v = *(void* const volatile *)p; compiler_barrier(); return v; }
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { compiler_barrier(); *p = v; }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { compiler_barrier(); *p = v; }
inline void OrderAccess::release_store(volatile jint* p, jint v) { compiler_barrier(); *p = v; }
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { compiler_barrier(); Atomic::store(v, p); }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { compiler_barrier(); *p = v; }
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { compiler_barrier(); *p = v; }
inline void OrderAccess::release_store(volatile juint* p, juint v) { compiler_barrier(); *p = v; }
inline void OrderAccess::release_store(volatile julong* p, julong v) { compiler_barrier(); Atomic::store((jlong)v, (volatile jlong*)p); }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { compiler_barrier(); *p = v; }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong *)p, jlong_cast(v)); }
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { compiler_barrier(); *p = v; }
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { compiler_barrier(); *(void* volatile *)p = v; }
inline void OrderAccess::store_fence(jbyte* p, jbyte v) {
__asm__ volatile ( "xchgb (%2),%0" __asm__ volatile ( "xchgb (%2),%0"
: "=q" (v) : "=q" (v)
: "0" (v), "r" (p) : "0" (v), "r" (p)
: "memory"); : "memory");
} }
inline void OrderAccess::store_fence(jshort* p, jshort v) { template<>
inline void OrderAccess::specialized_release_store_fence<jshort>(volatile jshort* p, jshort v) {
__asm__ volatile ( "xchgw (%2),%0" __asm__ volatile ( "xchgw (%2),%0"
: "=r" (v) : "=r" (v)
: "0" (v), "r" (p) : "0" (v), "r" (p)
: "memory"); : "memory");
} }
inline void OrderAccess::store_fence(jint* p, jint v) { template<>
inline void OrderAccess::specialized_release_store_fence<jint> (volatile jint* p, jint v) {
__asm__ volatile ( "xchgl (%2),%0" __asm__ volatile ( "xchgl (%2),%0"
: "=r" (v) : "=r" (v)
: "0" (v), "r" (p) : "0" (v), "r" (p)
: "memory"); : "memory");
} }
inline void OrderAccess::store_fence(jlong* p, jlong v) {
#ifdef AMD64 #ifdef AMD64
__asm__ __volatile__ ("xchgq (%2), %0" template<>
inline void OrderAccess::specialized_release_store_fence<jlong> (volatile jlong* p, jlong v) {
__asm__ volatile ( "xchgq (%2), %0"
: "=r" (v) : "=r" (v)
: "0" (v), "r" (p) : "0" (v), "r" (p)
: "memory"); : "memory");
#else }
*p = v; fence();
#endif // AMD64 #endif // AMD64
template<>
inline void OrderAccess::specialized_release_store_fence<jfloat> (volatile jfloat* p, jfloat v) {
release_store_fence((volatile jint*)p, jint_cast(v));
}
template<>
inline void OrderAccess::specialized_release_store_fence<jdouble>(volatile jdouble* p, jdouble v) {
release_store_fence((volatile jlong*)p, jlong_cast(v));
} }
// AMD64 copied the bodies for the the signed version. 32bit did this. As long as the #define VM_HAS_GENERALIZED_ORDER_ACCESS 1
// compiler does the inlining this is simpler.
inline void OrderAccess::store_fence(jubyte* p, jubyte v) { store_fence((jbyte*)p, (jbyte)v); }
inline void OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); }
inline void OrderAccess::store_fence(juint* p, juint v) { store_fence((jint*)p, (jint)v); }
inline void OrderAccess::store_fence(julong* p, julong v) { store_fence((jlong*)p, (jlong)v); }
inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jdouble* p, jdouble v) { store_fence((jlong*)p, jlong_cast(v)); }
inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) {
#ifdef AMD64
__asm__ __volatile__ ("xchgq (%2), %0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
#else
store_fence((jint*)p, (jint)v);
#endif // AMD64
}
inline void OrderAccess::store_ptr_fence(void** p, void* v) {
#ifdef AMD64
__asm__ __volatile__ ("xchgq (%2), %0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
#else
store_fence((jint*)p, (jint)v);
#endif // AMD64
}
// Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) {
__asm__ volatile ( "xchgb (%2),%0"
: "=q" (v)
: "0" (v), "r" (p)
: "memory");
}
inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) {
__asm__ volatile ( "xchgw (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
inline void OrderAccess::release_store_fence(volatile jint* p, jint v) {
__asm__ volatile ( "xchgl (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) {
#ifdef AMD64
__asm__ __volatile__ ( "xchgq (%2), %0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
#else
release_store(p, v); fence();
#endif // AMD64
}
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store_fence((volatile jbyte*)p, (jbyte)v); }
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store_fence((volatile jint*)p, (jint)v); }
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store_fence((volatile jlong*)p, (jlong)v); }
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store_fence((volatile jlong*)p, jlong_cast(v)); }
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
#ifdef AMD64
__asm__ __volatile__ ( "xchgq (%2), %0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
#else
release_store_fence((volatile jint*)p, (jint)v);
#endif // AMD64
}
inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) {
#ifdef AMD64
__asm__ __volatile__ ( "xchgq (%2), %0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
#else
release_store_fence((volatile jint*)p, (jint)v);
#endif // AMD64
}
#endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP #endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009 Red Hat, Inc. * Copyright 2007, 2008, 2009 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -40,8 +40,7 @@ typedef void (__kernel_dmb_t) (void);
#define __kernel_dmb (*(__kernel_dmb_t *) 0xffff0fa0) #define __kernel_dmb (*(__kernel_dmb_t *) 0xffff0fa0)
#define FULL_MEM_BARRIER __kernel_dmb() #define FULL_MEM_BARRIER __kernel_dmb()
#define READ_MEM_BARRIER __kernel_dmb() #define LIGHT_MEM_BARRIER __kernel_dmb()
#define WRITE_MEM_BARRIER __kernel_dmb()
#else // ARM #else // ARM
@ -49,126 +48,33 @@ typedef void (__kernel_dmb_t) (void);
#ifdef PPC #ifdef PPC
#define READ_MEM_BARRIER __asm __volatile ("isync":::"memory")
#ifdef __NO_LWSYNC__ #ifdef __NO_LWSYNC__
#define WRITE_MEM_BARRIER __asm __volatile ("sync":::"memory") #define LIGHT_MEM_BARRIER __asm __volatile ("sync":::"memory")
#else #else
#define WRITE_MEM_BARRIER __asm __volatile ("lwsync":::"memory") #define LIGHT_MEM_BARRIER __asm __volatile ("lwsync":::"memory")
#endif #endif
#else // PPC #else // PPC
#define READ_MEM_BARRIER __asm __volatile ("":::"memory") #define LIGHT_MEM_BARRIER __asm __volatile ("":::"memory")
#define WRITE_MEM_BARRIER __asm __volatile ("":::"memory")
#endif // PPC #endif // PPC
#endif // ARM #endif // ARM
// Note: What is meant by LIGHT_MEM_BARRIER is a barrier which is sufficient
// to provide TSO semantics, i.e. StoreStore | LoadLoad | LoadStore.
inline void OrderAccess::loadload() { acquire(); } inline void OrderAccess::loadload() { LIGHT_MEM_BARRIER; }
inline void OrderAccess::storestore() { release(); } inline void OrderAccess::storestore() { LIGHT_MEM_BARRIER; }
inline void OrderAccess::loadstore() { acquire(); } inline void OrderAccess::loadstore() { LIGHT_MEM_BARRIER; }
inline void OrderAccess::storeload() { fence(); } inline void OrderAccess::storeload() { FULL_MEM_BARRIER; }
inline void OrderAccess::acquire() { inline void OrderAccess::acquire() { LIGHT_MEM_BARRIER; }
READ_MEM_BARRIER; inline void OrderAccess::release() { LIGHT_MEM_BARRIER; }
}
inline void OrderAccess::release() { inline void OrderAccess::fence() { FULL_MEM_BARRIER; }
WRITE_MEM_BARRIER;
}
inline void OrderAccess::fence() { #define VM_HAS_GENERALIZED_ORDER_ACCESS 1
FULL_MEM_BARRIER;
}
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { jbyte data = *p; acquire(); return data; }
inline jshort OrderAccess::load_acquire(volatile jshort* p) { jshort data = *p; acquire(); return data; }
inline jint OrderAccess::load_acquire(volatile jint* p) { jint data = *p; acquire(); return data; }
inline jlong OrderAccess::load_acquire(volatile jlong* p) {
jlong tmp;
os::atomic_copy64(p, &tmp);
acquire();
return tmp;
}
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { jubyte data = *p; acquire(); return data; }
inline jushort OrderAccess::load_acquire(volatile jushort* p) { jushort data = *p; acquire(); return data; }
inline juint OrderAccess::load_acquire(volatile juint* p) { juint data = *p; acquire(); return data; }
inline julong OrderAccess::load_acquire(volatile julong* p) {
julong tmp;
os::atomic_copy64(p, &tmp);
acquire();
return tmp;
}
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { jfloat data = *p; acquire(); return data; }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) {
jdouble tmp;
os::atomic_copy64(p, &tmp);
acquire();
return tmp;
}
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) {
intptr_t data = *p;
acquire();
return data;
}
inline void* OrderAccess::load_ptr_acquire(volatile void* p) {
void *data = *(void* volatile *)p;
acquire();
return data;
}
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) {
void *data = *(void* const volatile *)p;
acquire();
return data;
}
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { release(); *p = v; }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { release(); *p = v; }
inline void OrderAccess::release_store(volatile jint* p, jint v) { release(); *p = v; }
inline void OrderAccess::release_store(volatile jlong* p, jlong v)
{ release(); os::atomic_copy64(&v, p); }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { release(); *p = v; }
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { release(); *p = v; }
inline void OrderAccess::release_store(volatile juint* p, juint v) { release(); *p = v; }
inline void OrderAccess::release_store(volatile julong* p, julong v)
{ release(); os::atomic_copy64(&v, p); }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { release(); *p = v; }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v)
{ release(); os::atomic_copy64(&v, p); }
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { release(); *p = v; }
inline void OrderAccess::release_store_ptr(volatile void* p, void* v)
{ release(); *(void* volatile *)p = v; }
inline void OrderAccess::store_fence(jbyte* p, jbyte v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jshort* p, jshort v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jint* p, jint v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jlong* p, jlong v) { os::atomic_copy64(&v, p); fence(); }
inline void OrderAccess::store_fence(jubyte* p, jubyte v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jushort* p, jushort v) { *p = v; fence(); }
inline void OrderAccess::store_fence(juint* p, juint v) { *p = v; fence(); }
inline void OrderAccess::store_fence(julong* p, julong v) { os::atomic_copy64(&v, p); fence(); }
inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jdouble* p, jdouble v) { os::atomic_copy64(&v, p); fence(); }
inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; fence(); }
inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { release_store_ptr(p, v); fence(); }
inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { release_store_ptr(p, v); fence(); }
#endif // OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_INLINE_HPP #endif // OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_INLINE_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -28,107 +28,30 @@
#include "runtime/atomic.inline.hpp" #include "runtime/atomic.inline.hpp"
#include "runtime/orderAccess.hpp" #include "runtime/orderAccess.hpp"
// Compiler version last used for testing: solaris studio 12u3
// Please update this information when this file changes
// Implementation of class OrderAccess. // Implementation of class OrderAccess.
// Assume TSO. // Assume TSO.
// In solaris_sparc.il // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
extern "C" void _OrderAccess_acquire(); inline void compiler_barrier() {
extern "C" void _OrderAccess_fence(); __asm__ volatile ("" : : : "memory");
}
inline void OrderAccess::loadload() { acquire(); } inline void OrderAccess::loadload() { compiler_barrier(); }
inline void OrderAccess::storestore() { release(); } inline void OrderAccess::storestore() { compiler_barrier(); }
inline void OrderAccess::loadstore() { acquire(); } inline void OrderAccess::loadstore() { compiler_barrier(); }
inline void OrderAccess::storeload() { fence(); } inline void OrderAccess::storeload() { fence(); }
#ifdef _GNU_SOURCE inline void OrderAccess::acquire() { compiler_barrier(); }
inline void OrderAccess::release() { compiler_barrier(); }
inline void OrderAccess::acquire() {
__asm__ volatile ("nop" : : :);
}
inline void OrderAccess::release() {
jint* local_dummy = (jint*)&local_dummy;
__asm__ volatile("stw %%g0, [%0]" : : "r" (local_dummy) : "memory");
}
inline void OrderAccess::fence() { inline void OrderAccess::fence() {
__asm__ volatile ("membar #StoreLoad" : : :); __asm__ volatile ("membar #StoreLoad" : : : "memory");
} }
#else // _GNU_SOURCE #define VM_HAS_GENERALIZED_ORDER_ACCESS 1
inline void OrderAccess::acquire() {
_OrderAccess_acquire();
}
inline void OrderAccess::release() {
// Avoid hitting the same cache-line from
// different threads.
volatile jint local_dummy = 0;
}
inline void OrderAccess::fence() {
_OrderAccess_fence();
}
#endif // _GNU_SOURCE
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; }
inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; }
inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; }
inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); }
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; }
inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; }
inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; }
inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); }
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; }
inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; }
inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; }
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; }
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; }
inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; }
inline void OrderAccess::store_fence(jbyte* p, jbyte v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jshort* p, jshort v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jint* p, jint v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jlong* p, jlong v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jubyte* p, jubyte v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jushort* p, jushort v) { *p = v; fence(); }
inline void OrderAccess::store_fence(juint* p, juint v) { *p = v; fence(); }
inline void OrderAccess::store_fence(julong* p, julong v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); }
inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; fence(); }
inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); }
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { *p = v; fence(); }
inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { *(void* volatile *)p = v; fence(); }
#endif // OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP #endif // OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP

View file

@ -1,5 +1,5 @@
// //
// Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it
@ -221,27 +221,6 @@
.end .end
// Support for void OrderAccess::acquire()
// The method is intentionally empty.
// It exists for the sole purpose of generating
// a C/C++ sequence point over which the compiler won't
// reorder code.
.inline _OrderAccess_acquire,0
.volatile
.nonvolatile
.end
// Support for void OrderAccess::fence()
.inline _OrderAccess_fence,0
.volatile
membar #StoreLoad
.nonvolatile
.end
// Support for void Prefetch::read(void *loc, intx interval) // Support for void Prefetch::read(void *loc, intx interval)
// //
// Prefetch for several reads. // Prefetch for several reads.

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,110 +29,35 @@
#include "runtime/orderAccess.hpp" #include "runtime/orderAccess.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"
// Compiler version last used for testing: solaris studio 12u3
// Please update this information when this file changes
// Implementation of class OrderAccess. // Implementation of class OrderAccess.
// For Sun Studio - implementation is in solaris_i486.il. // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
// For gcc - implementation is just below. inline void compiler_barrier() {
extern "C" void _OrderAccess_acquire(); __asm__ volatile ("" : : : "memory");
extern "C" void _OrderAccess_fence(); }
inline void OrderAccess::loadload() { acquire(); } inline void OrderAccess::loadload() { compiler_barrier(); }
inline void OrderAccess::storestore() { release(); } inline void OrderAccess::storestore() { compiler_barrier(); }
inline void OrderAccess::loadstore() { acquire(); } inline void OrderAccess::loadstore() { compiler_barrier(); }
inline void OrderAccess::storeload() { fence(); } inline void OrderAccess::storeload() { fence(); }
inline void OrderAccess::acquire() { inline void OrderAccess::acquire() { compiler_barrier(); }
_OrderAccess_acquire(); inline void OrderAccess::release() { compiler_barrier(); }
}
inline void OrderAccess::release() {
// Avoid hitting the same cache-line from
// different threads.
volatile jint local_dummy = 0;
}
inline void OrderAccess::fence() { inline void OrderAccess::fence() {
if (os::is_MP()) { if (os::is_MP()) {
_OrderAccess_fence();
}
}
#ifdef _GNU_SOURCE
extern "C" {
inline void _OrderAccess_acquire() {
volatile intptr_t local_dummy;
#ifdef AMD64 #ifdef AMD64
__asm__ volatile ("movq 0(%%rsp), %0" : "=r" (local_dummy) : : "memory"); __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
#else #else
__asm__ volatile ("movl 0(%%esp),%0" : "=r" (local_dummy) : : "memory");
#endif // AMD64
}
inline void _OrderAccess_fence() {
// Always use locked addl since mfence is sometimes expensive
__asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory"); __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
#endif
}
compiler_barrier();
} }
} #define VM_HAS_GENERALIZED_ORDER_ACCESS 1
#endif // GNU_SOURCE
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; }
inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; }
inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; }
inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); }
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; }
inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; }
inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; }
inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); }
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return jdouble_cast(Atomic::load((volatile jlong*)p)); }
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; }
inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; }
inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; }
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; }
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; }
inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong*)p, jlong_cast(v)); }
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; }
inline void OrderAccess::store_fence(jbyte* p, jbyte v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jshort* p, jshort v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jint* p, jint v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jlong* p, jlong v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jubyte* p, jubyte v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jushort* p, jushort v) { *p = v; fence(); }
inline void OrderAccess::store_fence(juint* p, juint v) { *p = v; fence(); }
inline void OrderAccess::store_fence(julong* p, julong v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); }
inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; fence(); }
inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store((jlong *)p, (jlong)v); fence(); }
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store_fence((volatile jlong*)p, jlong_cast(v)); }
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { *p = v; fence(); }
inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { *(void* volatile *)p = v; fence(); }
#endif // OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP #endif // OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP

View file

@ -1,5 +1,5 @@
// //
// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it
@ -141,17 +141,6 @@
fistpll (%eax) fistpll (%eax)
.end .end
// Support for OrderAccess::acquire()
.inline _OrderAccess_acquire,0
movl 0(%esp), %eax
.end
// Support for OrderAccess::fence()
.inline _OrderAccess_fence,0
lock
addl $0, (%esp)
.end
// Support for u2 Bytes::swap_u2(u2 x) // Support for u2 Bytes::swap_u2(u2 x)
.inline _raw_swap_u2,1 .inline _raw_swap_u2,1
movl 0(%esp), %eax movl 0(%esp), %eax

View file

@ -1,5 +1,5 @@
// //
// Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2004, 2015, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it
@ -104,17 +104,6 @@
cmpxchgq %rdi, (%rsi) cmpxchgq %rdi, (%rsi)
.end .end
// Support for OrderAccess::acquire()
.inline _OrderAccess_acquire,0
movl 0(%rsp), %eax
.end
// Support for OrderAccess::fence()
.inline _OrderAccess_fence,0
lock
addl $0, (%rsp)
.end
// Support for u2 Bytes::swap_u2(u2 x) // Support for u2 Bytes::swap_u2(u2 x)
.inline _raw_swap_u2,1 .inline _raw_swap_u2,1
movw %di, %ax movw %di, %ax

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,29 +25,39 @@
#ifndef OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP #ifndef OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
#define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP #define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
#include <intrin.h>
#include "runtime/atomic.inline.hpp" #include "runtime/atomic.inline.hpp"
#include "runtime/orderAccess.hpp" #include "runtime/orderAccess.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"
// Compiler version last used for testing: Microsoft Visual Studio 2010
// Please update this information when this file changes
// Implementation of class OrderAccess. // Implementation of class OrderAccess.
inline void OrderAccess::loadload() { acquire(); } // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
inline void OrderAccess::storestore() { release(); } inline void compiler_barrier() {
inline void OrderAccess::loadstore() { acquire(); } _ReadWriteBarrier();
}
// Note that in MSVC, volatile memory accesses are explicitly
// guaranteed to have acquire release semantics (w.r.t. compiler
// reordering) and therefore does not even need a compiler barrier
// for normal acquire release accesses. And all generalized
// bound calls like release_store go through OrderAccess::load
// and OrderAccess::store which do volatile memory accesses.
template<> inline void ScopedFence<X_ACQUIRE>::postfix() { }
template<> inline void ScopedFence<RELEASE_X>::prefix() { }
template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix() { }
template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
inline void OrderAccess::loadload() { compiler_barrier(); }
inline void OrderAccess::storestore() { compiler_barrier(); }
inline void OrderAccess::loadstore() { compiler_barrier(); }
inline void OrderAccess::storeload() { fence(); } inline void OrderAccess::storeload() { fence(); }
inline void OrderAccess::acquire() { inline void OrderAccess::acquire() { compiler_barrier(); }
#ifndef AMD64 inline void OrderAccess::release() { compiler_barrier(); }
__asm {
mov eax, dword ptr [esp];
}
#endif // !AMD64
}
inline void OrderAccess::release() {
// A volatile store has release semantics.
volatile jint local_dummy = 0;
}
inline void OrderAccess::fence() { inline void OrderAccess::fence() {
#ifdef AMD64 #ifdef AMD64
@ -59,157 +69,47 @@ inline void OrderAccess::fence() {
} }
} }
#endif // AMD64 #endif // AMD64
compiler_barrier();
} }
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; } #ifndef AMD64
inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; } template<>
inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; } inline void OrderAccess::specialized_release_store_fence<jbyte> (volatile jbyte* p, jbyte v) {
inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); }
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; }
inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; }
inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; }
inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); }
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return jdouble_cast(Atomic::load((volatile jlong*)p)); }
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; }
inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; }
inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; }
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; }
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; }
inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong*)p, jlong_cast(v)); }
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; }
inline void OrderAccess::store_fence(jbyte* p, jbyte v) {
#ifdef AMD64
*p = v; fence();
#else
__asm { __asm {
mov edx, p; mov edx, p;
mov al, v; mov al, v;
xchg al, byte ptr [edx]; xchg al, byte ptr [edx];
} }
#endif // AMD64
} }
inline void OrderAccess::store_fence(jshort* p, jshort v) { template<>
#ifdef AMD64 inline void OrderAccess::specialized_release_store_fence<jshort>(volatile jshort* p, jshort v) {
*p = v; fence();
#else
__asm { __asm {
mov edx, p; mov edx, p;
mov ax, v; mov ax, v;
xchg ax, word ptr [edx]; xchg ax, word ptr [edx];
} }
#endif // AMD64
} }
inline void OrderAccess::store_fence(jint* p, jint v) { template<>
#ifdef AMD64 inline void OrderAccess::specialized_release_store_fence<jint> (volatile jint* p, jint v) {
*p = v; fence();
#else
__asm { __asm {
mov edx, p; mov edx, p;
mov eax, v; mov eax, v;
xchg eax, dword ptr [edx]; xchg eax, dword ptr [edx];
} }
#endif // AMD64
}
inline void OrderAccess::store_fence(jlong* p, jlong v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jubyte* p, jubyte v) { store_fence((jbyte*)p, (jbyte)v); }
inline void OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); }
inline void OrderAccess::store_fence(juint* p, juint v) { store_fence((jint*)p, (jint)v); }
inline void OrderAccess::store_fence(julong* p, julong v) { store_fence((jlong*)p, (jlong)v); }
inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); }
inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); }
inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) {
#ifdef AMD64
*p = v; fence();
#else
store_fence((jint*)p, (jint)v);
#endif // AMD64
}
inline void OrderAccess::store_ptr_fence(void** p, void* v) {
#ifdef AMD64
*p = v; fence();
#else
store_fence((jint*)p, (jint)v);
#endif // AMD64
}
// Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) {
#ifdef AMD64
*p = v; fence();
#else
__asm {
mov edx, p;
mov al, v;
xchg al, byte ptr [edx];
} }
#endif // AMD64 #endif // AMD64
template<>
inline void OrderAccess::specialized_release_store_fence<jfloat>(volatile jfloat* p, jfloat v) {
release_store_fence((volatile jint*)p, jint_cast(v));
}
template<>
inline void OrderAccess::specialized_release_store_fence<jdouble>(volatile jdouble* p, jdouble v) {
release_store_fence((volatile jlong*)p, jlong_cast(v));
} }
inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { #define VM_HAS_GENERALIZED_ORDER_ACCESS 1
#ifdef AMD64
*p = v; fence();
#else
__asm {
mov edx, p;
mov ax, v;
xchg ax, word ptr [edx];
}
#endif // AMD64
}
inline void OrderAccess::release_store_fence(volatile jint* p, jint v) {
#ifdef AMD64
*p = v; fence();
#else
__asm {
mov edx, p;
mov eax, v;
xchg eax, dword ptr [edx];
}
#endif // AMD64
}
inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { release_store(p, v); fence(); }
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store_fence((volatile jbyte*)p, (jbyte)v); }
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store_fence((volatile jint*)p, (jint)v); }
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store_fence((volatile jlong*)p, (jlong)v); }
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); }
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store_fence((volatile jlong*)p, jlong_cast(v)); }
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
#ifdef AMD64
*p = v; fence();
#else
release_store_fence((volatile jint*)p, (jint)v);
#endif // AMD64
}
inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) {
#ifdef AMD64
*(void* volatile *)p = v; fence();
#else
release_store_fence((volatile jint*)p, (jint)v);
#endif // AMD64
}
#endif // OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP #endif // OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,11 +29,7 @@
// Memory Access Ordering Model // Memory Access Ordering Model
// //
// This interface is based on the JSR-133 Cookbook for Compiler Writers // This interface is based on the JSR-133 Cookbook for Compiler Writers.
// and on the IA64 memory model. It is the dynamic equivalent of the
// C/C++ volatile specifier. I.e., volatility restricts compile-time
// memory access reordering in a way similar to what we want to occur
// at runtime.
// //
// In the following, the terms 'previous', 'subsequent', 'before', // In the following, the terms 'previous', 'subsequent', 'before',
// 'after', 'preceding' and 'succeeding' refer to program order. The // 'after', 'preceding' and 'succeeding' refer to program order. The
@ -41,7 +37,6 @@
// relative to program order, while 'up' and 'above' refer to backward // relative to program order, while 'up' and 'above' refer to backward
// motion. // motion.
// //
//
// We define four primitive memory barrier operations. // We define four primitive memory barrier operations.
// //
// LoadLoad: Load1(s); LoadLoad; Load2 // LoadLoad: Load1(s); LoadLoad; Load2
@ -69,86 +64,88 @@
// operations. Stores before Store1 may *not* float below Load2 and any // operations. Stores before Store1 may *not* float below Load2 and any
// subsequent load operations. // subsequent load operations.
// //
// We define two further barriers: acquire and release.
// //
// We define two further operations, 'release' and 'acquire'. They are // Conceptually, acquire/release semantics form unidirectional and
// mirror images of each other. // asynchronous barriers w.r.t. a synchronizing load(X) and store(X) pair.
// They should always be used in pairs to publish (release store) and
// access (load acquire) some implicitly understood shared data between
// threads in a relatively cheap fashion not requiring storeload. If not
// used in such a pair, it is advised to use a membar instead:
// acquire/release only make sense as pairs.
// //
// Execution by a processor of release makes the effect of all memory // T1: access_shared_data
// accesses issued by it previous to the release visible to all // T1: ]release
// processors *before* the release completes. The effect of subsequent // T1: (...)
// memory accesses issued by it *may* be made visible *before* the // T1: store(X)
// release. I.e., subsequent memory accesses may float above the
// release, but prior ones may not float below it.
// //
// Execution by a processor of acquire makes the effect of all memory // T2: load(X)
// accesses issued by it subsequent to the acquire visible to all // T2: (...)
// processors *after* the acquire completes. The effect of prior memory // T2: acquire[
// accesses issued by it *may* be made visible *after* the acquire. // T2: access_shared_data
// I.e., prior memory accesses may float below the acquire, but
// subsequent ones may not float above it.
// //
// Finally, we define a 'fence' operation, which conceptually is a // It is guaranteed that if T2: load(X) synchronizes with (observes the
// release combined with an acquire. In the real world these operations // value written by) T1: store(X), then the memory accesses before the T1:
// require one or more machine instructions which can float above and // ]release happen before the memory accesses after the T2: acquire[.
// below the release or acquire, so we usually can't just issue the //
// release-acquire back-to-back. All machines we know of implement some // Total Store Order (TSO) machines can be seen as machines issuing a
// sort of memory fence instruction. // release store for each store and a load acquire for each load. Therefore
// there is an inherent resemblence between TSO and acquire/release
// semantics. TSO can be seen as an abstract machine where loads are
// executed immediately when encountered (hence loadload reordering not
// happening) but enqueues stores in a FIFO queue
// for asynchronous serialization (neither storestore or loadstore
// reordering happening). The only reordering happening is storeload due to
// the queue asynchronously serializing stores (yet in order).
//
// Acquire/release semantics essentially exploits this asynchronicity: when
// the load(X) acquire[ observes the store of ]release store(X), the
// accesses before the release must have happened before the accesses after
// acquire.
//
// The API offers both stand-alone acquire() and release() as well as bound
// load_acquire() and release_store(). It is guaranteed that these are
// semantically equivalent w.r.t. the defined model. However, since
// stand-alone acquire()/release() does not know which previous
// load/subsequent store is considered the synchronizing load/store, they
// may be more conservative in implementations. We advise using the bound
// variants whenever possible.
//
// Finally, we define a "fence" operation, as a bidirectional barrier.
// It guarantees that any memory access preceding the fence is not
// reordered w.r.t. any memory accesses subsequent to the fence in program
// order. This may be used to prevent sequences of loads from floating up
// above sequences of stores.
//
// The following table shows the implementations on some architectures:
//
// Constraint x86 sparc TSO ppc
// ---------------------------------------------------------------------------
// fence LoadStore | lock membar #StoreLoad sync
// StoreStore | addl 0,(sp)
// LoadLoad |
// StoreLoad
//
// release LoadStore | lwsync
// StoreStore
//
// acquire LoadLoad | lwsync
// LoadStore
//
// release_store <store> <store> lwsync
// <store>
//
// release_store_fence xchg <store> lwsync
// membar #StoreLoad <store>
// sync
// //
// //
// The standalone implementations of release and acquire need an associated // load_acquire <load> <load> <load>
// dummy volatile store or load respectively. To avoid redundant operations, // lwsync
// we can define the composite operators: 'release_store', 'store_fence' and
// 'load_acquire'. Here's a summary of the machine instructions corresponding
// to each operation.
// //
// sparc RMO ia64 x86 // Ordering a load relative to preceding stores requires a StoreLoad,
// ---------------------------------------------------------------------
// fence membar #LoadStore | mf lock addl 0,(sp)
// #StoreStore |
// #LoadLoad |
// #StoreLoad
//
// release membar #LoadStore | st.rel [sp]=r0 movl $0,<dummy>
// #StoreStore
// st %g0,[]
//
// acquire ld [%sp],%g0 ld.acq <r>=[sp] movl (sp),<r>
// membar #LoadLoad |
// #LoadStore
//
// release_store membar #LoadStore | st.rel <store>
// #StoreStore
// st
//
// store_fence st st lock xchg
// fence mf
//
// load_acquire ld ld.acq <load>
// membar #LoadLoad |
// #LoadStore
//
// Using only release_store and load_acquire, we can implement the
// following ordered sequences.
//
// 1. load, load == load_acquire, load
// or load_acquire, load_acquire
// 2. load, store == load, release_store
// or load_acquire, store
// or load_acquire, release_store
// 3. store, store == store, release_store
// or release_store, release_store
//
// These require no membar instructions for sparc-TSO and no extra
// instructions for ia64.
//
// Ordering a load relative to preceding stores requires a store_fence,
// which implies a membar #StoreLoad between the store and load under // which implies a membar #StoreLoad between the store and load under
// sparc-TSO. A fence is required by ia64. On x86, we use locked xchg. // sparc-TSO. On x86, we use explicitly locked add.
//
// 4. store, load == store_fence, load
//
// Use store_fence to make sure all stores done in an 'interesting'
// region are made visible prior to both subsequent loads and stores.
// //
// Conventional usage is to issue a load_acquire for ordered loads. Use // Conventional usage is to issue a load_acquire for ordered loads. Use
// release_store for ordered stores when you care only that prior stores // release_store for ordered stores when you care only that prior stores
@ -157,27 +154,19 @@
// release_store_fence to update values like the thread state, where we // release_store_fence to update values like the thread state, where we
// don't want the current thread to continue until all our prior memory // don't want the current thread to continue until all our prior memory
// accesses (including the new thread state) are visible to other threads. // accesses (including the new thread state) are visible to other threads.
// This is equivalent to the volatile semantics of the Java Memory Model.
// //
// C++ Volatile Semantics
// //
// C++ Volatility // C++ volatile semantics prevent compiler re-ordering between
// // volatile memory accesses. However, reordering between non-volatile
// C++ guarantees ordering at operations termed 'sequence points' (defined // and volatile memory accesses is in general undefined. For compiler
// to be volatile accesses and calls to library I/O functions). 'Side // reordering constraints taking non-volatile memory accesses into
// effects' (defined as volatile accesses, calls to library I/O functions // consideration, a compiler barrier has to be used instead. Some
// and object modification) previous to a sequence point must be visible // compiler implementations may choose to enforce additional
// at that sequence point. See the C++ standard, section 1.9, titled // constraints beyond those required by the language. Note also that
// "Program Execution". This means that all barrier implementations, // both volatile semantics and compiler barrier do not prevent
// including standalone loadload, storestore, loadstore, storeload, acquire // hardware reordering.
// and release must include a sequence point, usually via a volatile memory
// access. Other ways to guarantee a sequence point are, e.g., use of
// indirect calls and linux's __asm__ volatile.
// Note: as of 6973570, we have replaced the originally static "dummy" field
// (see above) by a volatile store to the stack. All of the versions of the
// compilers that we currently use (SunStudio, gcc and VC++) respect the
// semantics of volatile here. If you build HotSpot using other
// compilers, you may need to verify that no compiler reordering occurs
// across the sequence point represented by the volatile access.
//
// //
// os::is_MP Considered Redundant // os::is_MP Considered Redundant
// //
@ -240,8 +229,32 @@
// order. If their implementations change such that these assumptions // order. If their implementations change such that these assumptions
// are violated, a whole lot of code will break. // are violated, a whole lot of code will break.
enum ScopedFenceType {
X_ACQUIRE
, RELEASE_X
, RELEASE_X_FENCE
};
template <ScopedFenceType T>
class ScopedFenceGeneral: public StackObj {
public:
void prefix() {}
void postfix() {}
};
template <ScopedFenceType T>
class ScopedFence : public ScopedFenceGeneral<T> {
void *const _field;
public:
ScopedFence(void *const field) : _field(field) { prefix(); }
~ScopedFence() { postfix(); }
void prefix() { ScopedFenceGeneral<T>::prefix(); }
void postfix() { ScopedFenceGeneral<T>::postfix(); }
};
class OrderAccess : AllStatic { class OrderAccess : AllStatic {
public: public:
// barriers
static void loadload(); static void loadload();
static void storestore(); static void storestore();
static void loadstore(); static void loadstore();
@ -280,20 +293,6 @@ class OrderAccess : AllStatic {
static void release_store_ptr(volatile intptr_t* p, intptr_t v); static void release_store_ptr(volatile intptr_t* p, intptr_t v);
static void release_store_ptr(volatile void* p, void* v); static void release_store_ptr(volatile void* p, void* v);
static void store_fence(jbyte* p, jbyte v);
static void store_fence(jshort* p, jshort v);
static void store_fence(jint* p, jint v);
static void store_fence(jlong* p, jlong v);
static void store_fence(jubyte* p, jubyte v);
static void store_fence(jushort* p, jushort v);
static void store_fence(juint* p, juint v);
static void store_fence(julong* p, julong v);
static void store_fence(jfloat* p, jfloat v);
static void store_fence(jdouble* p, jdouble v);
static void store_ptr_fence(intptr_t* p, intptr_t v);
static void store_ptr_fence(void** p, void* v);
static void release_store_fence(volatile jbyte* p, jbyte v); static void release_store_fence(volatile jbyte* p, jbyte v);
static void release_store_fence(volatile jshort* p, jshort v); static void release_store_fence(volatile jshort* p, jshort v);
static void release_store_fence(volatile jint* p, jint v); static void release_store_fence(volatile jint* p, jint v);
@ -313,6 +312,47 @@ class OrderAccess : AllStatic {
// routine if it exists, It should only be used by platforms that // routine if it exists, It should only be used by platforms that
// don't have another way to do the inline assembly. // don't have another way to do the inline assembly.
static void StubRoutines_fence(); static void StubRoutines_fence();
// Give platforms a variation point to specialize.
template<typename T> static T specialized_load_acquire (volatile T* p );
template<typename T> static void specialized_release_store (volatile T* p, T v);
template<typename T> static void specialized_release_store_fence(volatile T* p, T v);
template<typename FieldType, ScopedFenceType FenceType>
static void ordered_store(volatile FieldType* p, FieldType v);
template<typename FieldType, ScopedFenceType FenceType>
static FieldType ordered_load(volatile FieldType* p);
static void store(volatile jbyte* p, jbyte v);
static void store(volatile jshort* p, jshort v);
static void store(volatile jint* p, jint v);
static void store(volatile jlong* p, jlong v);
static void store(volatile jdouble* p, jdouble v);
static void store(volatile jfloat* p, jfloat v);
static jbyte load (volatile jbyte* p);
static jshort load (volatile jshort* p);
static jint load (volatile jint* p);
static jlong load (volatile jlong* p);
static jdouble load (volatile jdouble* p);
static jfloat load (volatile jfloat* p);
// The following store_fence methods are deprecated and will be removed
// when all repos conform to the new generalized OrderAccess.
static void store_fence(jbyte* p, jbyte v);
static void store_fence(jshort* p, jshort v);
static void store_fence(jint* p, jint v);
static void store_fence(jlong* p, jlong v);
static void store_fence(jubyte* p, jubyte v);
static void store_fence(jushort* p, jushort v);
static void store_fence(juint* p, juint v);
static void store_fence(julong* p, julong v);
static void store_fence(jfloat* p, jfloat v);
static void store_fence(jdouble* p, jdouble v);
static void store_ptr_fence(intptr_t* p, intptr_t v);
static void store_ptr_fence(void** p, void* v);
}; };
#endif // SHARE_VM_RUNTIME_ORDERACCESS_HPP #endif // SHARE_VM_RUNTIME_ORDERACCESS_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright 2014 SAP AG. All rights reserved. * Copyright 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -26,6 +26,7 @@
#ifndef SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP #ifndef SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP
#define SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP #define SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP
#include "runtime/atomic.inline.hpp"
#include "runtime/orderAccess.hpp" #include "runtime/orderAccess.hpp"
// Linux // Linux
@ -71,4 +72,92 @@
# include "orderAccess_bsd_zero.inline.hpp" # include "orderAccess_bsd_zero.inline.hpp"
#endif #endif
#ifdef VM_HAS_GENERALIZED_ORDER_ACCESS
template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix() { OrderAccess::acquire(); }
template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix() { OrderAccess::release(); }
template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix() { OrderAccess::release(); }
template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
template <typename FieldType, ScopedFenceType FenceType>
inline void OrderAccess::ordered_store(volatile FieldType* p, FieldType v) {
ScopedFence<FenceType> f((void*)p);
store(p, v);
}
template <typename FieldType, ScopedFenceType FenceType>
inline FieldType OrderAccess::ordered_load(volatile FieldType* p) {
ScopedFence<FenceType> f((void*)p);
return load(p);
}
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return specialized_load_acquire(p); }
inline jshort OrderAccess::load_acquire(volatile jshort* p) { return specialized_load_acquire(p); }
inline jint OrderAccess::load_acquire(volatile jint* p) { return specialized_load_acquire(p); }
inline jlong OrderAccess::load_acquire(volatile jlong* p) { return specialized_load_acquire(p); }
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return specialized_load_acquire(p); }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return specialized_load_acquire(p); }
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return (jubyte) specialized_load_acquire((volatile jbyte*)p); }
inline jushort OrderAccess::load_acquire(volatile jushort* p) { return (jushort)specialized_load_acquire((volatile jshort*)p); }
inline juint OrderAccess::load_acquire(volatile juint* p) { return (juint) specialized_load_acquire((volatile jint*)p); }
inline julong OrderAccess::load_acquire(volatile julong* p) { return (julong) specialized_load_acquire((volatile jlong*)p); }
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return (intptr_t)specialized_load_acquire(p); }
inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return (void*)specialized_load_acquire((volatile intptr_t*)p); }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return (void*)specialized_load_acquire((volatile intptr_t*)p); }
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { specialized_release_store(p, v); }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { specialized_release_store(p, v); }
inline void OrderAccess::release_store(volatile jint* p, jint v) { specialized_release_store(p, v); }
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { specialized_release_store(p, v); }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { specialized_release_store(p, v); }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { specialized_release_store(p, v); }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { specialized_release_store((volatile jbyte*) p, (jbyte) v); }
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { specialized_release_store((volatile jshort*)p, (jshort)v); }
inline void OrderAccess::release_store(volatile juint* p, juint v) { specialized_release_store((volatile jint*) p, (jint) v); }
inline void OrderAccess::release_store(volatile julong* p, julong v) { specialized_release_store((volatile jlong*) p, (jlong) v); }
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { specialized_release_store(p, v); }
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { specialized_release_store((volatile intptr_t*)p, (intptr_t)v); }
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { specialized_release_store_fence(p, v); }
inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { specialized_release_store_fence(p, v); }
inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { specialized_release_store_fence(p, v); }
inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { specialized_release_store_fence(p, v); }
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { specialized_release_store_fence(p, v); }
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { specialized_release_store_fence(p, v); }
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { specialized_release_store_fence((volatile jbyte*) p, (jbyte) v); }
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { specialized_release_store_fence((volatile jshort*)p, (jshort)v); }
inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { specialized_release_store_fence((volatile jint*) p, (jint) v); }
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { specialized_release_store_fence((volatile jlong*) p, (jlong) v); }
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { specialized_release_store_fence(p, v); }
inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { specialized_release_store_fence((volatile intptr_t*)p, (intptr_t)v); }
// The following methods can be specialized using simple template specialization
// in the platform specific files for optimization purposes. Otherwise the
// generalized variant is used.
template<typename T> inline T OrderAccess::specialized_load_acquire (volatile T* p) { return ordered_load<T, X_ACQUIRE>(p); }
template<typename T> inline void OrderAccess::specialized_release_store (volatile T* p, T v) { ordered_store<T, RELEASE_X>(p, v); }
template<typename T> inline void OrderAccess::specialized_release_store_fence(volatile T* p, T v) { ordered_store<T, RELEASE_X_FENCE>(p, v); }
// Generalized atomic volatile accesses valid in OrderAccess
// All other types can be expressed in terms of these.
inline void OrderAccess::store(volatile jbyte* p, jbyte v) { *p = v; }
inline void OrderAccess::store(volatile jshort* p, jshort v) { *p = v; }
inline void OrderAccess::store(volatile jint* p, jint v) { *p = v; }
inline void OrderAccess::store(volatile jlong* p, jlong v) { Atomic::store(v, p); }
inline void OrderAccess::store(volatile jdouble* p, jdouble v) { Atomic::store(jlong_cast(v), (volatile jlong*)p); }
inline void OrderAccess::store(volatile jfloat* p, jfloat v) { *p = v; }
inline jbyte OrderAccess::load(volatile jbyte* p) { return *p; }
inline jshort OrderAccess::load(volatile jshort* p) { return *p; }
inline jint OrderAccess::load(volatile jint* p) { return *p; }
inline jlong OrderAccess::load(volatile jlong* p) { return Atomic::load(p); }
inline jdouble OrderAccess::load(volatile jdouble* p) { return jdouble_cast(Atomic::load((volatile jlong*)p)); }
inline jfloat OrderAccess::load(volatile jfloat* p) { return *p; }
#endif // VM_HAS_GENERALIZED_ORDER_ACCESS
#endif // SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP #endif // SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP