This commit is contained in:
Vladimir Kozlov 2014-01-28 12:25:34 -08:00
commit c3a0e80e0b
345 changed files with 58071 additions and 1206 deletions

View file

@ -0,0 +1,401 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP
#define OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP
#include "orderAccess_aix_ppc.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/os.hpp"
#include "vm_version_ppc.hpp"
#ifndef _LP64
#error "Atomic currently only impleneted for PPC64"
#endif
// Implementation of class atomic
inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
inline jlong Atomic::load(volatile jlong* src) { return *src; }
//
// machine barrier instructions:
//
// - ppc_sync two-way memory barrier, aka fence
// - ppc_lwsync orders Store|Store,
// Load|Store,
// Load|Load,
// but not Store|Load
// - ppc_eieio orders memory accesses for device memory (only)
// - ppc_isync invalidates speculatively executed instructions
// From the POWER ISA 2.06 documentation:
// "[...] an isync instruction prevents the execution of
// instructions following the isync until instructions
// preceding the isync have completed, [...]"
// From IBM's AIX assembler reference:
// "The isync [...] instructions causes the processor to
// refetch any instructions that might have been fetched
// prior to the isync instruction. The instruction isync
// causes the processor to wait for all previous instructions
// to complete. Then any instructions already fetched are
// discarded and instruction processing continues in the
// environment established by the previous instructions."
//
// semantic barrier instructions:
// (as defined in orderAccess.hpp)
//
// - ppc_release orders Store|Store, (maps to ppc_lwsync)
// Load|Store
// - ppc_acquire orders Load|Store, (maps to ppc_lwsync)
// Load|Load
// - ppc_fence orders Store|Store, (maps to ppc_sync)
// Load|Store,
// Load|Load,
// Store|Load
//
#define strasm_sync "\n sync \n"
#define strasm_lwsync "\n lwsync \n"
#define strasm_isync "\n isync \n"
#define strasm_release strasm_lwsync
#define strasm_acquire strasm_lwsync
#define strasm_fence strasm_sync
#define strasm_nobarrier ""
#define strasm_nobarrier_clobber_memory ""
inline jint Atomic::add (jint add_value, volatile jint* dest) {
unsigned int result;
__asm__ __volatile__ (
strasm_lwsync
"1: lwarx %0, 0, %2 \n"
" add %0, %0, %1 \n"
" stwcx. %0, 0, %2 \n"
" bne- 1b \n"
strasm_isync
: /*%0*/"=&r" (result)
: /*%1*/"r" (add_value), /*%2*/"r" (dest)
: "cc", "memory" );
return (jint) result;
}
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
long result;
__asm__ __volatile__ (
strasm_lwsync
"1: ldarx %0, 0, %2 \n"
" add %0, %0, %1 \n"
" stdcx. %0, 0, %2 \n"
" bne- 1b \n"
strasm_isync
: /*%0*/"=&r" (result)
: /*%1*/"r" (add_value), /*%2*/"r" (dest)
: "cc", "memory" );
return (intptr_t) result;
}
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
}
inline void Atomic::inc (volatile jint* dest) {
unsigned int temp;
__asm__ __volatile__ (
strasm_nobarrier
"1: lwarx %0, 0, %2 \n"
" addic %0, %0, 1 \n"
" stwcx. %0, 0, %2 \n"
" bne- 1b \n"
strasm_nobarrier
: /*%0*/"=&r" (temp), "=m" (*dest)
: /*%2*/"r" (dest), "m" (*dest)
: "cc" strasm_nobarrier_clobber_memory);
}
inline void Atomic::inc_ptr(volatile intptr_t* dest) {
long temp;
__asm__ __volatile__ (
strasm_nobarrier
"1: ldarx %0, 0, %2 \n"
" addic %0, %0, 1 \n"
" stdcx. %0, 0, %2 \n"
" bne- 1b \n"
strasm_nobarrier
: /*%0*/"=&r" (temp), "=m" (*dest)
: /*%2*/"r" (dest), "m" (*dest)
: "cc" strasm_nobarrier_clobber_memory);
}
inline void Atomic::inc_ptr(volatile void* dest) {
inc_ptr((volatile intptr_t*)dest);
}
inline void Atomic::dec (volatile jint* dest) {
unsigned int temp;
__asm__ __volatile__ (
strasm_nobarrier
"1: lwarx %0, 0, %2 \n"
" addic %0, %0, -1 \n"
" stwcx. %0, 0, %2 \n"
" bne- 1b \n"
strasm_nobarrier
: /*%0*/"=&r" (temp), "=m" (*dest)
: /*%2*/"r" (dest), "m" (*dest)
: "cc" strasm_nobarrier_clobber_memory);
}
inline void Atomic::dec_ptr(volatile intptr_t* dest) {
long temp;
__asm__ __volatile__ (
strasm_nobarrier
"1: ldarx %0, 0, %2 \n"
" addic %0, %0, -1 \n"
" stdcx. %0, 0, %2 \n"
" bne- 1b \n"
strasm_nobarrier
: /*%0*/"=&r" (temp), "=m" (*dest)
: /*%2*/"r" (dest), "m" (*dest)
: "cc" strasm_nobarrier_clobber_memory);
}
inline void Atomic::dec_ptr(volatile void* dest) {
dec_ptr((volatile intptr_t*)dest);
}
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
// Note that xchg_ptr doesn't necessarily do an acquire
// (see synchronizer.cpp).
unsigned int old_value;
const uint64_t zero = 0;
__asm__ __volatile__ (
/* lwsync */
strasm_lwsync
/* atomic loop */
"1: \n"
" lwarx %[old_value], %[dest], %[zero] \n"
" stwcx. %[exchange_value], %[dest], %[zero] \n"
" bne- 1b \n"
/* isync */
strasm_sync
/* exit */
"2: \n"
/* out */
: [old_value] "=&r" (old_value),
"=m" (*dest)
/* in */
: [dest] "b" (dest),
[zero] "r" (zero),
[exchange_value] "r" (exchange_value),
"m" (*dest)
/* clobber */
: "cc",
"memory"
);
return (jint) old_value;
}
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
// Note that xchg_ptr doesn't necessarily do an acquire
// (see synchronizer.cpp).
long old_value;
const uint64_t zero = 0;
__asm__ __volatile__ (
/* lwsync */
strasm_lwsync
/* atomic loop */
"1: \n"
" ldarx %[old_value], %[dest], %[zero] \n"
" stdcx. %[exchange_value], %[dest], %[zero] \n"
" bne- 1b \n"
/* isync */
strasm_sync
/* exit */
"2: \n"
/* out */
: [old_value] "=&r" (old_value),
"=m" (*dest)
/* in */
: [dest] "b" (dest),
[zero] "r" (zero),
[exchange_value] "r" (exchange_value),
"m" (*dest)
/* clobber */
: "cc",
"memory"
);
return (intptr_t) old_value;
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
}
inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) {
// Note that cmpxchg guarantees a two-way memory barrier across
// the cmpxchg, so it's really a a 'fence_cmpxchg_acquire'
// (see atomic.hpp).
unsigned int old_value;
const uint64_t zero = 0;
__asm__ __volatile__ (
/* fence */
strasm_sync
/* simple guard */
" lwz %[old_value], 0(%[dest]) \n"
" cmpw %[compare_value], %[old_value] \n"
" bne- 2f \n"
/* atomic loop */
"1: \n"
" lwarx %[old_value], %[dest], %[zero] \n"
" cmpw %[compare_value], %[old_value] \n"
" bne- 2f \n"
" stwcx. %[exchange_value], %[dest], %[zero] \n"
" bne- 1b \n"
/* acquire */
strasm_sync
/* exit */
"2: \n"
/* out */
: [old_value] "=&r" (old_value),
"=m" (*dest)
/* in */
: [dest] "b" (dest),
[zero] "r" (zero),
[compare_value] "r" (compare_value),
[exchange_value] "r" (exchange_value),
"m" (*dest)
/* clobber */
: "cc",
"memory"
);
return (jint) old_value;
}
inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
// Note that cmpxchg guarantees a two-way memory barrier across
// the cmpxchg, so it's really a a 'fence_cmpxchg_acquire'
// (see atomic.hpp).
long old_value;
const uint64_t zero = 0;
__asm__ __volatile__ (
/* fence */
strasm_sync
/* simple guard */
" ld %[old_value], 0(%[dest]) \n"
" cmpd %[compare_value], %[old_value] \n"
" bne- 2f \n"
/* atomic loop */
"1: \n"
" ldarx %[old_value], %[dest], %[zero] \n"
" cmpd %[compare_value], %[old_value] \n"
" bne- 2f \n"
" stdcx. %[exchange_value], %[dest], %[zero] \n"
" bne- 1b \n"
/* acquire */
strasm_sync
/* exit */
"2: \n"
/* out */
: [old_value] "=&r" (old_value),
"=m" (*dest)
/* in */
: [dest] "b" (dest),
[zero] "r" (zero),
[compare_value] "r" (compare_value),
[exchange_value] "r" (exchange_value),
"m" (*dest)
/* clobber */
: "cc",
"memory"
);
return (jlong) old_value;
}
inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) {
return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
}
inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) {
return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
}
#undef strasm_sync
#undef strasm_lwsync
#undef strasm_isync
#undef strasm_release
#undef strasm_acquire
#undef strasm_fence
#undef strasm_nobarrier
#undef strasm_nobarrier_clobber_memory
#endif // OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP

View file

@ -0,0 +1,54 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_AIX_OJDKPPC_VM_GLOBALS_AIX_PPC_HPP
#define OS_CPU_AIX_OJDKPPC_VM_GLOBALS_AIX_PPC_HPP
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
define_pd_global(bool, DontYieldALot, false);
define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default
define_pd_global(intx, VMThreadStackSize, 2048);
// if we set CompilerThreadStackSize to a value different than 0, it will
// be used in os::create_thread(). Otherwise, due the strange logic in os::create_thread(),
// the stack size for compiler threads will default to VMThreadStackSize, although it
// is defined to 4M in os::Aix::default_stack_size()!
define_pd_global(intx, CompilerThreadStackSize, 4096);
// Allow extra space in DEBUG builds for asserts.
define_pd_global(uintx,JVMInvokeMethodSlack, 8192);
define_pd_global(intx, StackYellowPages, 6);
define_pd_global(intx, StackRedPages, 1);
define_pd_global(intx, StackShadowPages, 6 DEBUG_ONLY(+2));
// Only used on 64 bit platforms
define_pd_global(uintx,HeapBaseMinAddress, 2*G);
// Only used on 64 bit Windows platforms
define_pd_global(bool, UseVectoredExceptions, false);
#endif // OS_CPU_AIX_OJDKPPC_VM_GLOBALS_AIX_PPC_HPP

View file

@ -0,0 +1,147 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP
#define OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP
#include "runtime/orderAccess.hpp"
#include "vm_version_ppc.hpp"
// Implementation of class OrderAccess.
//
// Machine barrier instructions:
//
// - sync Two-way memory barrier, aka fence.
// - lwsync orders Store|Store,
// Load|Store,
// Load|Load,
// but not Store|Load
// - eieio orders Store|Store
// - isync Invalidates speculatively executed instructions,
// but isync may complete before storage accesses
// associated with instructions preceding isync have
// been performed.
//
// Semantic barrier instructions:
// (as defined in orderAccess.hpp)
//
// - release orders Store|Store, (maps to lwsync)
// Load|Store
// - acquire orders Load|Store, (maps to lwsync)
// Load|Load
// - fence orders Store|Store, (maps to sync)
// Load|Store,
// Load|Load,
// Store|Load
//
#define inlasm_sync() __asm__ __volatile__ ("sync" : : : "memory");
#define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory");
#define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory");
#define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory");
#define inlasm_release() inlasm_lwsync();
#define inlasm_acquire() inlasm_lwsync();
// Use twi-isync for load_acquire (faster than lwsync).
// ATTENTION: seems like xlC 10.1 has problems with this inline assembler macro (VerifyMethodHandles found "bad vminfo in AMH.conv"):
// #define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
#define inlasm_acquire_reg(X) inlasm_lwsync();
#define inlasm_fence() inlasm_sync();
inline void OrderAccess::loadload() { inlasm_lwsync(); }
inline void OrderAccess::storestore() { inlasm_lwsync(); }
inline void OrderAccess::loadstore() { inlasm_lwsync(); }
inline void OrderAccess::storeload() { inlasm_fence(); }
inline void OrderAccess::acquire() { inlasm_acquire(); }
inline void OrderAccess::release() { inlasm_release(); }
inline void OrderAccess::fence() { inlasm_fence(); }
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { register jbyte t = *p; inlasm_acquire_reg(t); return t; }
inline jshort OrderAccess::load_acquire(volatile jshort* p) { register jshort t = *p; inlasm_acquire_reg(t); return t; }
inline jint OrderAccess::load_acquire(volatile jint* p) { register jint t = *p; inlasm_acquire_reg(t); return t; }
inline jlong OrderAccess::load_acquire(volatile jlong* p) { register jlong t = *p; inlasm_acquire_reg(t); return t; }
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { register jubyte t = *p; inlasm_acquire_reg(t); return t; }
inline jushort OrderAccess::load_acquire(volatile jushort* p) { register jushort t = *p; inlasm_acquire_reg(t); return t; }
inline juint OrderAccess::load_acquire(volatile juint* p) { register juint t = *p; inlasm_acquire_reg(t); return t; }
inline julong OrderAccess::load_acquire(volatile julong* p) { return (julong)load_acquire((volatile jlong*)p); }
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { register jfloat t = *p; inlasm_acquire(); return t; }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { register jdouble t = *p; inlasm_acquire(); return t; }
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return (intptr_t)load_acquire((volatile jlong*)p); }
inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return (void*) load_acquire((volatile jlong*)p); }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return (void*) load_acquire((volatile jlong*)p); }
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jint* p, jint v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile juint* p, juint v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile julong* p, julong v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { inlasm_release(); *(void* volatile *)p = v; }
inline void OrderAccess::store_fence(jbyte* p, jbyte v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jshort* p, jshort v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jint* p, jint v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jlong* p, jlong v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jubyte* p, jubyte v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jushort* p, jushort v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(juint* p, juint v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(julong* p, julong v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { inlasm_release(); *(void* volatile *)p = v; inlasm_fence(); }
#undef inlasm_sync
#undef inlasm_lwsync
#undef inlasm_eieio
#undef inlasm_isync
#undef inlasm_release
#undef inlasm_acquire
#undef inlasm_fence
#endif // OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP

View file

@ -0,0 +1,565 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// no precompiled headers
#include "assembler_ppc.inline.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_aix.h"
#include "memory/allocation.inline.hpp"
#include "mutex_aix.inline.hpp"
#include "nativeInst_ppc.hpp"
#include "os_share_aix.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/osThread.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/timer.hpp"
#include "thread_aix.inline.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
// put OS-includes here
# include <ucontext.h>
address os::current_stack_pointer() {
address csp;
#if !defined(USE_XLC_BUILTINS)
// inline assembly for `mr regno(csp), R1_SP':
__asm__ __volatile__ ("mr %0, 1":"=r"(csp):);
#else
csp = (address) __builtin_frame_address(0);
#endif
return csp;
}
char* os::non_memory_address_word() {
// Must never look like an address returned by reserve_memory,
// even in its subfields (as defined by the CPU immediate fields,
// if the CPU splits constants across multiple instructions).
return (char*) -1;
}
// OS specific thread initialization
//
// Calculate and store the limits of the memory stack.
void os::initialize_thread(Thread *thread) { }
// Frame information (pc, sp, fp) retrieved via ucontext
// always looks like a C-frame according to the frame
// conventions in frame_ppc64.hpp.
address os::Aix::ucontext_get_pc(ucontext_t * uc) {
return (address)uc->uc_mcontext.jmp_context.iar;
}
intptr_t* os::Aix::ucontext_get_sp(ucontext_t * uc) {
// gpr1 holds the stack pointer on aix
return (intptr_t*)uc->uc_mcontext.jmp_context.gpr[1/*REG_SP*/];
}
intptr_t* os::Aix::ucontext_get_fp(ucontext_t * uc) {
return NULL;
}
void os::Aix::ucontext_set_pc(ucontext_t* uc, address new_pc) {
uc->uc_mcontext.jmp_context.iar = (uint64_t) new_pc;
}
ExtendedPC os::fetch_frame_from_context(void* ucVoid,
intptr_t** ret_sp, intptr_t** ret_fp) {
ExtendedPC epc;
ucontext_t* uc = (ucontext_t*)ucVoid;
if (uc != NULL) {
epc = ExtendedPC(os::Aix::ucontext_get_pc(uc));
if (ret_sp) *ret_sp = os::Aix::ucontext_get_sp(uc);
if (ret_fp) *ret_fp = os::Aix::ucontext_get_fp(uc);
} else {
// construct empty ExtendedPC for return value checking
epc = ExtendedPC(NULL);
if (ret_sp) *ret_sp = (intptr_t *)NULL;
if (ret_fp) *ret_fp = (intptr_t *)NULL;
}
return epc;
}
frame os::fetch_frame_from_context(void* ucVoid) {
intptr_t* sp;
intptr_t* fp;
ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
// Avoid crash during crash if pc broken.
if (epc.pc()) {
frame fr(sp, epc.pc());
return fr;
}
frame fr(sp);
return fr;
}
frame os::get_sender_for_C_frame(frame* fr) {
if (*fr->sp() == NULL) {
// fr is the last C frame
return frame(NULL, NULL);
}
return frame(fr->sender_sp(), fr->sender_pc());
}
frame os::current_frame() {
intptr_t* csp = (intptr_t*) *((intptr_t*) os::current_stack_pointer());
// hack.
frame topframe(csp, (address)0x8);
// return sender of current topframe which hopefully has pc != NULL.
return os::get_sender_for_C_frame(&topframe);
}
// Utility functions
extern "C" JNIEXPORT int
JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrecognized) {
ucontext_t* uc = (ucontext_t*) ucVoid;
Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady
SignalHandlerMark shm(t);
// Note: it's not uncommon that JNI code uses signal/sigset to install
// then restore certain signal handler (e.g. to temporarily block SIGPIPE,
// or have a SIGILL handler when detecting CPU type). When that happens,
// JVM_handle_aix_signal() might be invoked with junk info/ucVoid. To
// avoid unnecessary crash when libjsig is not preloaded, try handle signals
// that do not require siginfo/ucontext first.
if (sig == SIGPIPE) {
if (os::Aix::chained_handler(sig, info, ucVoid)) {
return 1;
} else {
if (PrintMiscellaneous && (WizardMode || Verbose)) {
warning("Ignoring SIGPIPE - see bug 4229104");
}
return 1;
}
}
JavaThread* thread = NULL;
VMThread* vmthread = NULL;
if (os::Aix::signal_handlers_are_installed) {
if (t != NULL) {
if(t->is_Java_thread()) {
thread = (JavaThread*)t;
}
else if(t->is_VM_thread()) {
vmthread = (VMThread *)t;
}
}
}
// Decide if this trap can be handled by a stub.
address stub = NULL;
// retrieve program counter
address const pc = uc ? os::Aix::ucontext_get_pc(uc) : NULL;
// retrieve crash address
address const addr = info ? (const address) info->si_addr : NULL;
// SafeFetch 32 handling:
// - make it work if _thread is null
// - make it use the standard os::...::ucontext_get/set_pc APIs
if (uc) {
address const pc = os::Aix::ucontext_get_pc(uc);
if (pc && StubRoutines::is_safefetch_fault(pc)) {
os::Aix::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
return true;
}
}
// Handle SIGDANGER right away. AIX would raise SIGDANGER whenever available swap
// space falls below 30%. This is only a chance for the process to gracefully abort.
// We can't hope to proceed after SIGDANGER since SIGKILL tailgates.
if (sig == SIGDANGER) {
goto report_and_die;
}
if (info == NULL || uc == NULL || thread == NULL && vmthread == NULL) {
goto run_chained_handler;
}
// If we are a java thread...
if (thread != NULL) {
// Handle ALL stack overflow variations here
if (sig == SIGSEGV && (addr < thread->stack_base() &&
addr >= thread->stack_base() - thread->stack_size())) {
// stack overflow
//
// If we are in a yellow zone and we are inside java, we disable the yellow zone and
// throw a stack overflow exception.
// If we are in native code or VM C code, we report-and-die. The original coding tried
// to continue with yellow zone disabled, but that doesn't buy us much and prevents
// hs_err_pid files.
if (thread->in_stack_yellow_zone(addr)) {
thread->disable_stack_yellow_zone();
if (thread->thread_state() == _thread_in_Java) {
// Throw a stack overflow exception.
// Guard pages will be reenabled while unwinding the stack.
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
goto run_stub;
} else {
// Thread was in the vm or native code. Return and try to finish.
return 1;
}
} else if (thread->in_stack_red_zone(addr)) {
// Fatal red zone violation. Disable the guard pages and fall through
// to handle_unexpected_exception way down below.
thread->disable_stack_red_zone();
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
goto report_and_die;
} else {
// This means a segv happened inside our stack, but not in
// the guarded zone. I'd like to know when this happens,
tty->print_raw_cr("SIGSEGV happened inside stack but outside yellow and red zone.");
goto report_and_die;
}
} // end handle SIGSEGV inside stack boundaries
if (thread->thread_state() == _thread_in_Java) {
// Java thread running in Java code
// The following signals are used for communicating VM events:
//
// SIGILL: the compiler generates illegal opcodes
// at places where it wishes to interrupt the VM:
// Safepoints, Unreachable Code, Entry points of Zombie methods,
// This results in a SIGILL with (*pc) == inserted illegal instruction.
//
// (so, SIGILLs with a pc inside the zero page are real errors)
//
// SIGTRAP:
// The ppc trap instruction raises a SIGTRAP and is very efficient if it
// does not trap. It is used for conditional branches that are expected
// to be never taken. These are:
// - zombie methods
// - IC (inline cache) misses.
// - null checks leading to UncommonTraps.
// - range checks leading to Uncommon Traps.
// On Aix, these are especially null checks, as the ImplicitNullCheck
// optimization works only in rare cases, as the page at address 0 is only
// write protected. //
// Note: !UseSIGTRAP is used to prevent SIGTRAPS altogether, to facilitate debugging.
//
// SIGSEGV:
// used for safe point polling:
// To notify all threads that they have to reach a safe point, safe point polling is used:
// All threads poll a certain mapped memory page. Normally, this page has read access.
// If the VM wants to inform the threads about impending safe points, it puts this
// page to read only ("poisens" the page), and the threads then reach a safe point.
// used for null checks:
// If the compiler finds a store it uses it for a null check. Unfortunately this
// happens rarely. In heap based and disjoint base compressd oop modes also loads
// are used for null checks.
// A VM-related SIGILL may only occur if we are not in the zero page.
// On AIX, we get a SIGILL if we jump to 0x0 or to somewhere else
// in the zero page, because it is filled with 0x0. We ignore
// explicit SIGILLs in the zero page.
if (sig == SIGILL && (pc < (address) 0x200)) {
if (TraceTraps) {
tty->print_raw_cr("SIGILL happened inside zero page.");
}
goto report_and_die;
}
// Handle signal from NativeJump::patch_verified_entry().
if (( TrapBasedNotEntrantChecks && sig == SIGTRAP && nativeInstruction_at(pc)->is_sigtrap_zombie_not_entrant()) ||
(!TrapBasedNotEntrantChecks && sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant())) {
if (TraceTraps) {
tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
}
stub = SharedRuntime::get_handle_wrong_method_stub();
goto run_stub;
}
else if (sig == SIGSEGV && os::is_poll_address(addr)) {
if (TraceTraps) {
tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", pc);
}
stub = SharedRuntime::get_poll_stub(pc);
goto run_stub;
}
// SIGTRAP-based ic miss check in compiled code.
else if (sig == SIGTRAP && TrapBasedICMissChecks &&
nativeInstruction_at(pc)->is_sigtrap_ic_miss_check()) {
if (TraceTraps) {
tty->print_cr("trap: ic_miss_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
}
stub = SharedRuntime::get_ic_miss_stub();
goto run_stub;
}
// SIGTRAP-based implicit null check in compiled code.
else if (sig == SIGTRAP && TrapBasedNullChecks &&
nativeInstruction_at(pc)->is_sigtrap_null_check()) {
if (TraceTraps) {
tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
}
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
goto run_stub;
}
// SIGSEGV-based implicit null check in compiled code.
else if (sig == SIGSEGV && ImplicitNullChecks &&
CodeCache::contains((void*) pc) &&
!MacroAssembler::needs_explicit_null_check((intptr_t) info->si_addr)) {
if (TraceTraps) {
tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc);
}
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
}
#ifdef COMPILER2
// SIGTRAP-based implicit range check in compiled code.
else if (sig == SIGTRAP && TrapBasedRangeChecks &&
nativeInstruction_at(pc)->is_sigtrap_range_check()) {
if (TraceTraps) {
tty->print_cr("trap: range_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
}
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
goto run_stub;
}
#endif
else if (sig == SIGFPE /* && info->si_code == FPE_INTDIV */) {
if (TraceTraps) {
tty->print_raw_cr("Fix SIGFPE handler, trying divide by zero handler.");
}
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
goto run_stub;
}
else if (sig == SIGBUS) {
// BugId 4454115: A read from a MappedByteBuffer can fault here if the
// underlying file has been truncated. Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
if (nm != NULL && nm->has_unsafe_access()) {
// We don't really need a stub here! Just set the pending exeption and
// continue at the next instruction after the faulting read. Returning
// garbage from this read is ok.
thread->set_pending_unsafe_access_error();
uc->uc_mcontext.jmp_context.iar = ((unsigned long)pc) + 4;
return 1;
}
}
}
else { // thread->thread_state() != _thread_in_Java
// Detect CPU features. This is only done at the very start of the VM. Later, the
// VM_Version::is_determine_features_test_running() flag should be false.
if (sig == SIGILL && VM_Version::is_determine_features_test_running()) {
// SIGILL must be caused by VM_Version::determine_features().
*(int *)pc = 0; // patch instruction to 0 to indicate that it causes a SIGILL,
// flushing of icache is not necessary.
stub = pc + 4; // continue with next instruction.
goto run_stub;
}
else if (thread->thread_state() == _thread_in_vm &&
sig == SIGBUS && thread->doing_unsafe_access()) {
// We don't really need a stub here! Just set the pending exeption and
// continue at the next instruction after the faulting read. Returning
// garbage from this read is ok.
thread->set_pending_unsafe_access_error();
uc->uc_mcontext.jmp_context.iar = ((unsigned long)pc) + 4;
return 1;
}
}
// Check to see if we caught the safepoint code in the
// process of write protecting the memory serialization page.
// It write enables the page immediately after protecting it
// so we can just return to retry the write.
if ((sig == SIGSEGV) &&
os::is_memory_serialize_page(thread, addr)) {
// Synchronization problem in the pseudo memory barrier code (bug id 6546278)
// Block current thread until the memory serialize page permission restored.
os::block_on_serialize_page_trap();
return true;
}
}
run_stub:
// One of the above code blocks ininitalized the stub, so we want to
// delegate control to that stub.
if (stub != NULL) {
// Save all thread context in case we need to restore it.
if (thread != NULL) thread->set_saved_exception_pc(pc);
uc->uc_mcontext.jmp_context.iar = (unsigned long)stub;
return 1;
}
run_chained_handler:
// signal-chaining
if (os::Aix::chained_handler(sig, info, ucVoid)) {
return 1;
}
if (!abort_if_unrecognized) {
// caller wants another chance, so give it to him
return 0;
}
report_and_die:
// Use sigthreadmask instead of sigprocmask on AIX and unmask current signal.
sigset_t newset;
sigemptyset(&newset);
sigaddset(&newset, sig);
sigthreadmask(SIG_UNBLOCK, &newset, NULL);
VMError err(t, sig, pc, info, ucVoid);
err.report_and_die();
ShouldNotReachHere();
return 0;
}
void os::Aix::init_thread_fpu_state(void) {
#if !defined(USE_XLC_BUILTINS)
// Disable FP exceptions.
__asm__ __volatile__ ("mtfsfi 6,0");
#else
__mtfsfi(6, 0);
#endif
}
////////////////////////////////////////////////////////////////////////////////
// thread stack
size_t os::Aix::min_stack_allowed = 768*K;
// Aix is always in floating stack mode. The stack size for a new
// thread can be set via pthread_attr_setstacksize().
bool os::Aix::supports_variable_stack_size() { return true; }
// return default stack size for thr_type
size_t os::Aix::default_stack_size(os::ThreadType thr_type) {
// default stack size (compiler thread needs larger stack)
// Notice that the setting for compiler threads here have no impact
// because of the strange 'fallback logic' in os::create_thread().
// Better set CompilerThreadStackSize in globals_<os_cpu>.hpp if you want to
// specify a different stack size for compiler threads!
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1024 * K);
return s;
}
size_t os::Aix::default_guard_size(os::ThreadType thr_type) {
return 2 * page_size();
}
/////////////////////////////////////////////////////////////////////////////
// helper functions for fatal error handler
void os::print_context(outputStream *st, void *context) {
if (context == NULL) return;
ucontext_t* uc = (ucontext_t*)context;
st->print_cr("Registers:");
st->print("pc =" INTPTR_FORMAT " ", uc->uc_mcontext.jmp_context.iar);
st->print("lr =" INTPTR_FORMAT " ", uc->uc_mcontext.jmp_context.lr);
st->print("ctr=" INTPTR_FORMAT " ", uc->uc_mcontext.jmp_context.ctr);
st->cr();
for (int i = 0; i < 32; i++) {
st->print("r%-2d=" INTPTR_FORMAT " ", i, uc->uc_mcontext.jmp_context.gpr[i]);
if (i % 3 == 2) st->cr();
}
st->cr();
st->cr();
intptr_t *sp = (intptr_t *)os::Aix::ucontext_get_sp(uc);
st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
print_hex_dump(st, (address)sp, (address)(sp + 128), sizeof(intptr_t));
st->cr();
// Note: it may be unsafe to inspect memory near pc. For example, pc may
// point to garbage if entry point in an nmethod is corrupted. Leave
// this at the end, and hope for the best.
address pc = os::Aix::ucontext_get_pc(uc);
st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
print_hex_dump(st, pc - 64, pc + 64, /*instrsize=*/4);
st->cr();
// Try to decode the instructions.
st->print_cr("Decoded instructions: (pc=" PTR_FORMAT ")", pc);
st->print("<TODO: PPC port - print_context>");
// TODO: PPC port Disassembler::decode(pc, 16, 16, st);
st->cr();
}
void os::print_register_info(outputStream *st, void *context) {
if (context == NULL) return;
st->print("Not ported - print_register_info\n");
}
extern "C" {
int SpinPause() {
return 0;
}
}
#ifndef PRODUCT
void os::verify_stack_alignment() {
assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
}
#endif

View file

@ -0,0 +1,35 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
#define OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
static void setup_fpu() {}
// Used to register dynamic code cache area with the OS
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }
#endif // OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP

View file

@ -0,0 +1,58 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
#define OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
#include "runtime/prefetch.hpp"
inline void Prefetch::read(void *loc, intx interval) {
#if !defined(USE_XLC_BUILTINS)
__asm__ __volatile__ (
" dcbt 0, %0 \n"
:
: /*%0*/"r" ( ((address)loc) +((long)interval) )
//:
);
#else
__dcbt(((address)loc) +((long)interval));
#endif
}
inline void Prefetch::write(void *loc, intx interval) {
#if !defined(USE_XLC_PREFETCH_WRITE_BUILTIN)
__asm__ __volatile__ (
" dcbtst 0, %0 \n"
:
: /*%0*/"r" ( ((address)loc) +((long)interval) )
//:
);
#else
__dcbtst( ((address)loc) +((long)interval) );
#endif
}
#endif // OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP

View file

@ -0,0 +1,40 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/threadLocalStorage.hpp"
#include "thread_aix.inline.hpp"
void ThreadLocalStorage::generate_code_for_get_thread() {
// nothing we can do here for user-level thread
}
void ThreadLocalStorage::pd_init() {
// Nothing to do
}
void ThreadLocalStorage::pd_set_thread(Thread* thread) {
os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
}

View file

@ -0,0 +1,36 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
#define OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
// Processor dependent parts of ThreadLocalStorage
public:
static Thread* thread() {
return (Thread *) os::thread_local_storage_at(thread_index());
}
#endif // OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP

View file

@ -0,0 +1,36 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/frame.inline.hpp"
#include "thread_aix.inline.hpp"
// Forte Analyzer AsyncGetCallTrace profiling support is not implemented on Aix/PPC.
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) {
Unimplemented();
return false;
}
void JavaThread::cache_global_variables() { }

View file

@ -0,0 +1,79 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
#define OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
private:
void pd_initialize() {
_anchor.clear();
_last_interpreter_fp = NULL;
}
// The `last' frame is the youngest Java frame on the thread's stack.
frame pd_last_frame() {
assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
intptr_t* sp = last_Java_sp();
address pc = _anchor.last_Java_pc();
// Last_Java_pc ist not set, if we come here from compiled code.
if (pc == NULL)
pc = (address) *(sp + 2);
return frame(sp, pc);
}
public:
void set_base_of_stack_pointer(intptr_t* base_sp) {}
intptr_t* base_of_stack_pointer() { return NULL; }
void record_base_of_stack_pointer() {}
// These routines are only used on cpu architectures that
// have separate register stacks (Itanium).
static bool register_stack_overflow() { return false; }
static void enable_register_stack_guard() {}
static void disable_register_stack_guard() {}
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava);
// -Xprof support
//
// In order to find the last Java fp from an async profile
// tick, we store the current interpreter fp in the thread.
// This value is only valid while we are in the C++ interpreter
// and profiling.
protected:
intptr_t *_last_interpreter_fp;
public:
static ByteSize last_interpreter_fp_offset() {
return byte_offset_of(JavaThread, _last_interpreter_fp);
}
intptr_t* last_interpreter_fp() { return _last_interpreter_fp; }
#endif // OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP

View file

@ -0,0 +1,55 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_AIX_OJDKPPC_VM_VMSTRUCTS_AIX_PPC_HPP
#define OS_CPU_AIX_OJDKPPC_VM_VMSTRUCTS_AIX_PPC_HPP
// These are the OS and CPU-specific fields, types and integer
// constants required by the Serviceability Agent. This file is
// referenced by vmStructs.cpp.
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
\
/******************************/ \
/* Threads (NOTE: incomplete) */ \
/******************************/ \
nonstatic_field(OSThread, _thread_id, pid_t) \
nonstatic_field(OSThread, _pthread_id, pthread_t)
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \
\
/**********************/ \
/* Posix Thread IDs */ \
/**********************/ \
\
declare_integer_type(pid_t) \
declare_unsigned_integer_type(pthread_t)
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#endif // OS_CPU_AIX_OJDKPPC_VM_VMSTRUCTS_AIX_PPC_HPP

View file

@ -36,7 +36,7 @@
// Atomically copy 64 bits of data
static void atomic_copy64(volatile void *src, volatile void *dst) {
#if defined(PPC) && !defined(_LP64)
#if defined(PPC32)
double tmp;
asm volatile ("lfd %0, 0(%1)\n"
"stfd %0, 0(%2)\n"

View file

@ -0,0 +1,401 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP
#define OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP
#include "orderAccess_linux_ppc.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/os.hpp"
#include "vm_version_ppc.hpp"
#ifndef PPC64
#error "Atomic currently only implemented for PPC64"
#endif
// Implementation of class atomic
inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
inline jlong Atomic::load(volatile jlong* src) { return *src; }
/*
machine barrier instructions:
- sync two-way memory barrier, aka fence
- lwsync orders Store|Store,
Load|Store,
Load|Load,
but not Store|Load
- eieio orders memory accesses for device memory (only)
- isync invalidates speculatively executed instructions
From the POWER ISA 2.06 documentation:
"[...] an isync instruction prevents the execution of
instructions following the isync until instructions
preceding the isync have completed, [...]"
From IBM's AIX assembler reference:
"The isync [...] instructions causes the processor to
refetch any instructions that might have been fetched
prior to the isync instruction. The instruction isync
causes the processor to wait for all previous instructions
to complete. Then any instructions already fetched are
discarded and instruction processing continues in the
environment established by the previous instructions."
semantic barrier instructions:
(as defined in orderAccess.hpp)
- release orders Store|Store, (maps to lwsync)
Load|Store
- acquire orders Load|Store, (maps to lwsync)
Load|Load
- fence orders Store|Store, (maps to sync)
Load|Store,
Load|Load,
Store|Load
*/
#define strasm_sync "\n sync \n"
#define strasm_lwsync "\n lwsync \n"
#define strasm_isync "\n isync \n"
#define strasm_release strasm_lwsync
#define strasm_acquire strasm_lwsync
#define strasm_fence strasm_sync
#define strasm_nobarrier ""
#define strasm_nobarrier_clobber_memory ""
inline jint Atomic::add (jint add_value, volatile jint* dest) {
unsigned int result;
__asm__ __volatile__ (
strasm_lwsync
"1: lwarx %0, 0, %2 \n"
" add %0, %0, %1 \n"
" stwcx. %0, 0, %2 \n"
" bne- 1b \n"
strasm_isync
: /*%0*/"=&r" (result)
: /*%1*/"r" (add_value), /*%2*/"r" (dest)
: "cc", "memory" );
return (jint) result;
}
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
long result;
__asm__ __volatile__ (
strasm_lwsync
"1: ldarx %0, 0, %2 \n"
" add %0, %0, %1 \n"
" stdcx. %0, 0, %2 \n"
" bne- 1b \n"
strasm_isync
: /*%0*/"=&r" (result)
: /*%1*/"r" (add_value), /*%2*/"r" (dest)
: "cc", "memory" );
return (intptr_t) result;
}
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
}
inline void Atomic::inc (volatile jint* dest) {
unsigned int temp;
__asm__ __volatile__ (
strasm_nobarrier
"1: lwarx %0, 0, %2 \n"
" addic %0, %0, 1 \n"
" stwcx. %0, 0, %2 \n"
" bne- 1b \n"
strasm_nobarrier
: /*%0*/"=&r" (temp), "=m" (*dest)
: /*%2*/"r" (dest), "m" (*dest)
: "cc" strasm_nobarrier_clobber_memory);
}
inline void Atomic::inc_ptr(volatile intptr_t* dest) {
long temp;
__asm__ __volatile__ (
strasm_nobarrier
"1: ldarx %0, 0, %2 \n"
" addic %0, %0, 1 \n"
" stdcx. %0, 0, %2 \n"
" bne- 1b \n"
strasm_nobarrier
: /*%0*/"=&r" (temp), "=m" (*dest)
: /*%2*/"r" (dest), "m" (*dest)
: "cc" strasm_nobarrier_clobber_memory);
}
inline void Atomic::inc_ptr(volatile void* dest) {
inc_ptr((volatile intptr_t*)dest);
}
inline void Atomic::dec (volatile jint* dest) {
unsigned int temp;
__asm__ __volatile__ (
strasm_nobarrier
"1: lwarx %0, 0, %2 \n"
" addic %0, %0, -1 \n"
" stwcx. %0, 0, %2 \n"
" bne- 1b \n"
strasm_nobarrier
: /*%0*/"=&r" (temp), "=m" (*dest)
: /*%2*/"r" (dest), "m" (*dest)
: "cc" strasm_nobarrier_clobber_memory);
}
inline void Atomic::dec_ptr(volatile intptr_t* dest) {
long temp;
__asm__ __volatile__ (
strasm_nobarrier
"1: ldarx %0, 0, %2 \n"
" addic %0, %0, -1 \n"
" stdcx. %0, 0, %2 \n"
" bne- 1b \n"
strasm_nobarrier
: /*%0*/"=&r" (temp), "=m" (*dest)
: /*%2*/"r" (dest), "m" (*dest)
: "cc" strasm_nobarrier_clobber_memory);
}
inline void Atomic::dec_ptr(volatile void* dest) {
dec_ptr((volatile intptr_t*)dest);
}
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
// Note that xchg_ptr doesn't necessarily do an acquire
// (see synchronizer.cpp).
unsigned int old_value;
const uint64_t zero = 0;
__asm__ __volatile__ (
/* lwsync */
strasm_lwsync
/* atomic loop */
"1: \n"
" lwarx %[old_value], %[dest], %[zero] \n"
" stwcx. %[exchange_value], %[dest], %[zero] \n"
" bne- 1b \n"
/* isync */
strasm_sync
/* exit */
"2: \n"
/* out */
: [old_value] "=&r" (old_value),
"=m" (*dest)
/* in */
: [dest] "b" (dest),
[zero] "r" (zero),
[exchange_value] "r" (exchange_value),
"m" (*dest)
/* clobber */
: "cc",
"memory"
);
return (jint) old_value;
}
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
// Note that xchg_ptr doesn't necessarily do an acquire
// (see synchronizer.cpp).
long old_value;
const uint64_t zero = 0;
__asm__ __volatile__ (
/* lwsync */
strasm_lwsync
/* atomic loop */
"1: \n"
" ldarx %[old_value], %[dest], %[zero] \n"
" stdcx. %[exchange_value], %[dest], %[zero] \n"
" bne- 1b \n"
/* isync */
strasm_sync
/* exit */
"2: \n"
/* out */
: [old_value] "=&r" (old_value),
"=m" (*dest)
/* in */
: [dest] "b" (dest),
[zero] "r" (zero),
[exchange_value] "r" (exchange_value),
"m" (*dest)
/* clobber */
: "cc",
"memory"
);
return (intptr_t) old_value;
}
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
}
inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) {
// Note that cmpxchg guarantees a two-way memory barrier across
// the cmpxchg, so it's really a a 'fence_cmpxchg_acquire'
// (see atomic.hpp).
unsigned int old_value;
const uint64_t zero = 0;
__asm__ __volatile__ (
/* fence */
strasm_sync
/* simple guard */
" lwz %[old_value], 0(%[dest]) \n"
" cmpw %[compare_value], %[old_value] \n"
" bne- 2f \n"
/* atomic loop */
"1: \n"
" lwarx %[old_value], %[dest], %[zero] \n"
" cmpw %[compare_value], %[old_value] \n"
" bne- 2f \n"
" stwcx. %[exchange_value], %[dest], %[zero] \n"
" bne- 1b \n"
/* acquire */
strasm_sync
/* exit */
"2: \n"
/* out */
: [old_value] "=&r" (old_value),
"=m" (*dest)
/* in */
: [dest] "b" (dest),
[zero] "r" (zero),
[compare_value] "r" (compare_value),
[exchange_value] "r" (exchange_value),
"m" (*dest)
/* clobber */
: "cc",
"memory"
);
return (jint) old_value;
}
inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
// Note that cmpxchg guarantees a two-way memory barrier across
// the cmpxchg, so it's really a a 'fence_cmpxchg_acquire'
// (see atomic.hpp).
long old_value;
const uint64_t zero = 0;
__asm__ __volatile__ (
/* fence */
strasm_sync
/* simple guard */
" ld %[old_value], 0(%[dest]) \n"
" cmpd %[compare_value], %[old_value] \n"
" bne- 2f \n"
/* atomic loop */
"1: \n"
" ldarx %[old_value], %[dest], %[zero] \n"
" cmpd %[compare_value], %[old_value] \n"
" bne- 2f \n"
" stdcx. %[exchange_value], %[dest], %[zero] \n"
" bne- 1b \n"
/* acquire */
strasm_sync
/* exit */
"2: \n"
/* out */
: [old_value] "=&r" (old_value),
"=m" (*dest)
/* in */
: [dest] "b" (dest),
[zero] "r" (zero),
[compare_value] "r" (compare_value),
[exchange_value] "r" (exchange_value),
"m" (*dest)
/* clobber */
: "cc",
"memory"
);
return (jlong) old_value;
}
inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) {
return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
}
inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) {
return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
}
#undef strasm_sync
#undef strasm_lwsync
#undef strasm_isync
#undef strasm_release
#undef strasm_acquire
#undef strasm_fence
#undef strasm_nobarrier
#undef strasm_nobarrier_clobber_memory
#endif // OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP

View file

@ -0,0 +1,54 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_LINUX_PPC_VM_GLOBALS_LINUX_PPC_HPP
#define OS_CPU_LINUX_PPC_VM_GLOBALS_LINUX_PPC_HPP
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
define_pd_global(bool, DontYieldALot, false);
define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default
define_pd_global(intx, VMThreadStackSize, 2048);
// if we set CompilerThreadStackSize to a value different than 0, it will
// be used in os::create_thread(). Otherwise, due the strange logic in os::create_thread(),
// the stack size for compiler threads will default to VMThreadStackSize, although it
// is defined to 4M in os::Linux::default_stack_size()!
define_pd_global(intx, CompilerThreadStackSize, 4096);
// Allow extra space in DEBUG builds for asserts.
define_pd_global(uintx,JVMInvokeMethodSlack, 8192);
define_pd_global(intx, StackYellowPages, 6);
define_pd_global(intx, StackRedPages, 1);
define_pd_global(intx, StackShadowPages, 6 DEBUG_ONLY(+2));
// Only used on 64 bit platforms
define_pd_global(uintx,HeapBaseMinAddress, 2*G);
// Only used on 64 bit Windows platforms
define_pd_global(bool, UseVectoredExceptions, false);
#endif // OS_CPU_LINUX_PPC_VM_GLOBALS_LINUX_PPC_HPP

View file

@ -0,0 +1,149 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP
#define OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP
#include "runtime/orderAccess.hpp"
#include "vm_version_ppc.hpp"
#ifndef PPC64
#error "OrderAccess currently only implemented for PPC64"
#endif
// Implementation of class OrderAccess.
//
// Machine barrier instructions:
//
// - sync Two-way memory barrier, aka fence.
// - lwsync orders Store|Store,
// Load|Store,
// Load|Load,
// but not Store|Load
// - eieio orders Store|Store
// - isync Invalidates speculatively executed instructions,
// but isync may complete before storage accesses
// associated with instructions preceding isync have
// been performed.
//
// Semantic barrier instructions:
// (as defined in orderAccess.hpp)
//
// - release orders Store|Store, (maps to lwsync)
// Load|Store
// - acquire orders Load|Store, (maps to lwsync)
// Load|Load
// - fence orders Store|Store, (maps to sync)
// Load|Store,
// Load|Load,
// Store|Load
//
#define inlasm_sync() __asm__ __volatile__ ("sync" : : : "memory");
#define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory");
#define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory");
#define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory");
#define inlasm_release() inlasm_lwsync();
#define inlasm_acquire() inlasm_lwsync();
// Use twi-isync for load_acquire (faster than lwsync).
#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
#define inlasm_fence() inlasm_sync();
inline void OrderAccess::loadload() { inlasm_lwsync(); }
inline void OrderAccess::storestore() { inlasm_lwsync(); }
inline void OrderAccess::loadstore() { inlasm_lwsync(); }
inline void OrderAccess::storeload() { inlasm_fence(); }
inline void OrderAccess::acquire() { inlasm_acquire(); }
inline void OrderAccess::release() { inlasm_release(); }
inline void OrderAccess::fence() { inlasm_fence(); }
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { register jbyte t = *p; inlasm_acquire_reg(t); return t; }
inline jshort OrderAccess::load_acquire(volatile jshort* p) { register jshort t = *p; inlasm_acquire_reg(t); return t; }
inline jint OrderAccess::load_acquire(volatile jint* p) { register jint t = *p; inlasm_acquire_reg(t); return t; }
inline jlong OrderAccess::load_acquire(volatile jlong* p) { register jlong t = *p; inlasm_acquire_reg(t); return t; }
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { register jubyte t = *p; inlasm_acquire_reg(t); return t; }
inline jushort OrderAccess::load_acquire(volatile jushort* p) { register jushort t = *p; inlasm_acquire_reg(t); return t; }
inline juint OrderAccess::load_acquire(volatile juint* p) { register juint t = *p; inlasm_acquire_reg(t); return t; }
inline julong OrderAccess::load_acquire(volatile julong* p) { return (julong)load_acquire((volatile jlong*)p); }
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { register jfloat t = *p; inlasm_acquire(); return t; }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { register jdouble t = *p; inlasm_acquire(); return t; }
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return (intptr_t)load_acquire((volatile jlong*)p); }
inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return (void*) load_acquire((volatile jlong*)p); }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return (void*) load_acquire((volatile jlong*)p); }
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jint* p, jint v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jlong* p, jlong v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jushort* p, jushort v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile juint* p, juint v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile julong* p, julong v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; }
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { inlasm_release(); *(void* volatile *)p = v; }
inline void OrderAccess::store_fence(jbyte* p, jbyte v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jshort* p, jshort v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jint* p, jint v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jlong* p, jlong v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jubyte* p, jubyte v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jushort* p, jushort v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(juint* p, juint v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(julong* p, julong v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; inlasm_fence(); }
inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; inlasm_fence(); }
inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { inlasm_release(); *(void* volatile *)p = v; inlasm_fence(); }
#undef inlasm_sync
#undef inlasm_lwsync
#undef inlasm_eieio
#undef inlasm_isync
#undef inlasm_release
#undef inlasm_acquire
#undef inlasm_fence
#endif // OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP

View file

@ -0,0 +1,614 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file hat
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// no precompiled headers
#include "assembler_ppc.inline.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_linux.h"
#include "memory/allocation.inline.hpp"
#include "mutex_linux.inline.hpp"
#include "nativeInst_ppc.hpp"
#include "os_share_linux.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/osThread.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/timer.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
// put OS-includes here
# include <sys/types.h>
# include <sys/mman.h>
# include <pthread.h>
# include <signal.h>
# include <errno.h>
# include <dlfcn.h>
# include <stdlib.h>
# include <stdio.h>
# include <unistd.h>
# include <sys/resource.h>
# include <pthread.h>
# include <sys/stat.h>
# include <sys/time.h>
# include <sys/utsname.h>
# include <sys/socket.h>
# include <sys/wait.h>
# include <pwd.h>
# include <poll.h>
# include <ucontext.h>
address os::current_stack_pointer() {
intptr_t* csp;
// inline assembly `mr regno(csp), R1_SP':
__asm__ __volatile__ ("mr %0, 1":"=r"(csp):);
return (address) csp;
}
char* os::non_memory_address_word() {
// Must never look like an address returned by reserve_memory,
// even in its subfields (as defined by the CPU immediate fields,
// if the CPU splits constants across multiple instructions).
return (char*) -1;
}
void os::initialize_thread(Thread *thread) { }
// Frame information (pc, sp, fp) retrieved via ucontext
// always looks like a C-frame according to the frame
// conventions in frame_ppc64.hpp.
address os::Linux::ucontext_get_pc(ucontext_t * uc) {
// On powerpc64, ucontext_t is not selfcontained but contains
// a pointer to an optional substructure (mcontext_t.regs) containing the volatile
// registers - NIP, among others.
// This substructure may or may not be there depending where uc came from:
// - if uc was handed over as the argument to a sigaction handler, a pointer to the
// substructure was provided by the kernel when calling the signal handler, and
// regs->nip can be accessed.
// - if uc was filled by getcontext(), it is undefined - getcontext() does not fill
// it because the volatile registers are not needed to make setcontext() work.
// Hopefully it was zero'd out beforehand.
guarantee(uc->uc_mcontext.regs != NULL, "only use ucontext_get_pc in sigaction context");
return (address)uc->uc_mcontext.regs->nip;
}
intptr_t* os::Linux::ucontext_get_sp(ucontext_t * uc) {
return (intptr_t*)uc->uc_mcontext.regs->gpr[1/*REG_SP*/];
}
intptr_t* os::Linux::ucontext_get_fp(ucontext_t * uc) {
return NULL;
}
ExtendedPC os::fetch_frame_from_context(void* ucVoid,
intptr_t** ret_sp, intptr_t** ret_fp) {
ExtendedPC epc;
ucontext_t* uc = (ucontext_t*)ucVoid;
if (uc != NULL) {
epc = ExtendedPC(os::Linux::ucontext_get_pc(uc));
if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc);
if (ret_fp) *ret_fp = os::Linux::ucontext_get_fp(uc);
} else {
// construct empty ExtendedPC for return value checking
epc = ExtendedPC(NULL);
if (ret_sp) *ret_sp = (intptr_t *)NULL;
if (ret_fp) *ret_fp = (intptr_t *)NULL;
}
return epc;
}
frame os::fetch_frame_from_context(void* ucVoid) {
intptr_t* sp;
intptr_t* fp;
ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
return frame(sp, epc.pc());
}
frame os::get_sender_for_C_frame(frame* fr) {
if (*fr->sp() == 0) {
// fr is the last C frame
return frame(NULL, NULL);
}
return frame(fr->sender_sp(), fr->sender_pc());
}
frame os::current_frame() {
intptr_t* csp = (intptr_t*) *((intptr_t*) os::current_stack_pointer());
// hack.
frame topframe(csp, (address)0x8);
// return sender of current topframe which hopefully has pc != NULL.
return os::get_sender_for_C_frame(&topframe);
}
// Utility functions
extern "C" JNIEXPORT int
JVM_handle_linux_signal(int sig,
siginfo_t* info,
void* ucVoid,
int abort_if_unrecognized) {
ucontext_t* uc = (ucontext_t*) ucVoid;
Thread* t = ThreadLocalStorage::get_thread_slow();
SignalHandlerMark shm(t);
// Note: it's not uncommon that JNI code uses signal/sigset to install
// then restore certain signal handler (e.g. to temporarily block SIGPIPE,
// or have a SIGILL handler when detecting CPU type). When that happens,
// JVM_handle_linux_signal() might be invoked with junk info/ucVoid. To
// avoid unnecessary crash when libjsig is not preloaded, try handle signals
// that do not require siginfo/ucontext first.
if (sig == SIGPIPE) {
if (os::Linux::chained_handler(sig, info, ucVoid)) {
return true;
} else {
if (PrintMiscellaneous && (WizardMode || Verbose)) {
warning("Ignoring SIGPIPE - see bug 4229104");
}
return true;
}
}
JavaThread* thread = NULL;
VMThread* vmthread = NULL;
if (os::Linux::signal_handlers_are_installed) {
if (t != NULL) {
if(t->is_Java_thread()) {
thread = (JavaThread*)t;
} else if(t->is_VM_thread()) {
vmthread = (VMThread *)t;
}
}
}
// Moved SafeFetch32 handling outside thread!=NULL conditional block to make
// it work if no associated JavaThread object exists.
if (uc) {
address const pc = os::Linux::ucontext_get_pc(uc);
if (pc && StubRoutines::is_safefetch_fault(pc)) {
uc->uc_mcontext.regs->nip = (unsigned long)StubRoutines::continuation_for_safefetch_fault(pc);
return true;
}
}
// decide if this trap can be handled by a stub
address stub = NULL;
address pc = NULL;
//%note os_trap_1
if (info != NULL && uc != NULL && thread != NULL) {
pc = (address) os::Linux::ucontext_get_pc(uc);
// Handle ALL stack overflow variations here
if (sig == SIGSEGV) {
// Si_addr may not be valid due to a bug in the linux-ppc64 kernel (see
// comment below). Use get_stack_bang_address instead of si_addr.
address addr = ((NativeInstruction*)pc)->get_stack_bang_address(uc);
// Check if fault address is within thread stack.
if (addr < thread->stack_base() &&
addr >= thread->stack_base() - thread->stack_size()) {
// stack overflow
if (thread->in_stack_yellow_zone(addr)) {
thread->disable_stack_yellow_zone();
if (thread->thread_state() == _thread_in_Java) {
// Throw a stack overflow exception.
// Guard pages will be reenabled while unwinding the stack.
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
} else {
// Thread was in the vm or native code. Return and try to finish.
return 1;
}
} else if (thread->in_stack_red_zone(addr)) {
// Fatal red zone violation. Disable the guard pages and fall through
// to handle_unexpected_exception way down below.
thread->disable_stack_red_zone();
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
// This is a likely cause, but hard to verify. Let's just print
// it as a hint.
tty->print_raw_cr("Please check if any of your loaded .so files has "
"enabled executable stack (see man page execstack(8))");
} else {
// Accessing stack address below sp may cause SEGV if current
// thread has MAP_GROWSDOWN stack. This should only happen when
// current thread was created by user code with MAP_GROWSDOWN flag
// and then attached to VM. See notes in os_linux.cpp.
if (thread->osthread()->expanding_stack() == 0) {
thread->osthread()->set_expanding_stack();
if (os::Linux::manually_expand_stack(thread, addr)) {
thread->osthread()->clear_expanding_stack();
return 1;
}
thread->osthread()->clear_expanding_stack();
} else {
fatal("recursive segv. expanding stack.");
}
}
}
}
if (thread->thread_state() == _thread_in_Java) {
// Java thread running in Java code => find exception handler if any
// a fault inside compiled code, the interpreter, or a stub
// A VM-related SIGILL may only occur if we are not in the zero page.
// On AIX, we get a SIGILL if we jump to 0x0 or to somewhere else
// in the zero page, because it is filled with 0x0. We ignore
// explicit SIGILLs in the zero page.
if (sig == SIGILL && (pc < (address) 0x200)) {
if (TraceTraps) {
tty->print_raw_cr("SIGILL happened inside zero page.");
}
goto report_and_die;
}
// Handle signal from NativeJump::patch_verified_entry().
if (( TrapBasedNotEntrantChecks && sig == SIGTRAP && nativeInstruction_at(pc)->is_sigtrap_zombie_not_entrant()) ||
(!TrapBasedNotEntrantChecks && sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant())) {
if (TraceTraps) {
tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
}
stub = SharedRuntime::get_handle_wrong_method_stub();
}
else if (sig == SIGSEGV &&
// A linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults
// in 64bit mode (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6),
// especially when we try to read from the safepoint polling page. So the check
// (address)info->si_addr == os::get_standard_polling_page()
// doesn't work for us. We use:
((NativeInstruction*)pc)->is_safepoint_poll()) {
if (TraceTraps) {
tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", pc);
}
stub = SharedRuntime::get_poll_stub(pc);
}
// SIGTRAP-based ic miss check in compiled code.
else if (sig == SIGTRAP && TrapBasedICMissChecks &&
nativeInstruction_at(pc)->is_sigtrap_ic_miss_check()) {
if (TraceTraps) {
tty->print_cr("trap: ic_miss_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
}
stub = SharedRuntime::get_ic_miss_stub();
}
// SIGTRAP-based implicit null check in compiled code.
else if (sig == SIGTRAP && TrapBasedNullChecks &&
nativeInstruction_at(pc)->is_sigtrap_null_check()) {
if (TraceTraps) {
tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
}
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
}
// SIGSEGV-based implicit null check in compiled code.
else if (sig == SIGSEGV && ImplicitNullChecks &&
CodeCache::contains((void*) pc) &&
!MacroAssembler::needs_explicit_null_check((intptr_t) info->si_addr)) {
if (TraceTraps) {
tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc);
}
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
}
#ifdef COMPILER2
// SIGTRAP-based implicit range check in compiled code.
else if (sig == SIGTRAP && TrapBasedRangeChecks &&
nativeInstruction_at(pc)->is_sigtrap_range_check()) {
if (TraceTraps) {
tty->print_cr("trap: range_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
}
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
}
#endif
else if (sig == SIGBUS) {
// BugId 4454115: A read from a MappedByteBuffer can fault here if the
// underlying file has been truncated. Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL;
if (nm != NULL && nm->has_unsafe_access()) {
// We don't really need a stub here! Just set the pending exeption and
// continue at the next instruction after the faulting read. Returning
// garbage from this read is ok.
thread->set_pending_unsafe_access_error();
uc->uc_mcontext.regs->nip = ((unsigned long)pc) + 4;
return true;
}
}
}
else { // thread->thread_state() != _thread_in_Java
if (sig == SIGILL && VM_Version::is_determine_features_test_running()) {
// SIGILL must be caused by VM_Version::determine_features().
*(int *)pc = 0; // patch instruction to 0 to indicate that it causes a SIGILL,
// flushing of icache is not necessary.
stub = pc + 4; // continue with next instruction.
}
else if (thread->thread_state() == _thread_in_vm &&
sig == SIGBUS && thread->doing_unsafe_access()) {
// We don't really need a stub here! Just set the pending exeption and
// continue at the next instruction after the faulting read. Returning
// garbage from this read is ok.
thread->set_pending_unsafe_access_error();
uc->uc_mcontext.regs->nip = ((unsigned long)pc) + 4;
return true;
}
}
// Check to see if we caught the safepoint code in the
// process of write protecting the memory serialization page.
// It write enables the page immediately after protecting it
// so we can just return to retry the write.
if ((sig == SIGSEGV) &&
// Si_addr may not be valid due to a bug in the linux-ppc64 kernel (see comment above).
// Use is_memory_serialization instead of si_addr.
((NativeInstruction*)pc)->is_memory_serialization(thread, ucVoid)) {
// Synchronization problem in the pseudo memory barrier code (bug id 6546278)
// Block current thread until the memory serialize page permission restored.
os::block_on_serialize_page_trap();
return true;
}
}
if (stub != NULL) {
// Save all thread context in case we need to restore it.
if (thread != NULL) thread->set_saved_exception_pc(pc);
uc->uc_mcontext.regs->nip = (unsigned long)stub;
return true;
}
// signal-chaining
if (os::Linux::chained_handler(sig, info, ucVoid)) {
return true;
}
if (!abort_if_unrecognized) {
// caller wants another chance, so give it to him
return false;
}
if (pc == NULL && uc != NULL) {
pc = os::Linux::ucontext_get_pc(uc);
}
report_and_die:
// unmask current signal
sigset_t newset;
sigemptyset(&newset);
sigaddset(&newset, sig);
sigprocmask(SIG_UNBLOCK, &newset, NULL);
VMError err(t, sig, pc, info, ucVoid);
err.report_and_die();
ShouldNotReachHere();
return false;
}
void os::Linux::init_thread_fpu_state(void) {
// Disable FP exceptions.
__asm__ __volatile__ ("mtfsfi 6,0");
}
int os::Linux::get_fpu_control_word(void) {
// x86 has problems with FPU precision after pthread_cond_timedwait().
// nothing to do on ppc64.
return 0;
}
void os::Linux::set_fpu_control_word(int fpu_control) {
// x86 has problems with FPU precision after pthread_cond_timedwait().
// nothing to do on ppc64.
}
////////////////////////////////////////////////////////////////////////////////
// thread stack
size_t os::Linux::min_stack_allowed = 768*K;
bool os::Linux::supports_variable_stack_size() { return true; }
// return default stack size for thr_type
size_t os::Linux::default_stack_size(os::ThreadType thr_type) {
// default stack size (compiler thread needs larger stack)
// Notice that the setting for compiler threads here have no impact
// because of the strange 'fallback logic' in os::create_thread().
// Better set CompilerThreadStackSize in globals_<os_cpu>.hpp if you want to
// specify a different stack size for compiler threads!
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1024 * K);
return s;
}
size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
return 2 * page_size();
}
// Java thread:
//
// Low memory addresses
// +------------------------+
// | |\ JavaThread created by VM does not have glibc
// | glibc guard page | - guard, attached Java thread usually has
// | |/ 1 page glibc guard.
// P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
// | |\
// | HotSpot Guard Pages | - red and yellow pages
// | |/
// +------------------------+ JavaThread::stack_yellow_zone_base()
// | |\
// | Normal Stack | -
// | |/
// P2 +------------------------+ Thread::stack_base()
//
// Non-Java thread:
//
// Low memory addresses
// +------------------------+
// | |\
// | glibc guard page | - usually 1 page
// | |/
// P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
// | |\
// | Normal Stack | -
// | |/
// P2 +------------------------+ Thread::stack_base()
//
// ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from
// pthread_attr_getstack()
static void current_stack_region(address * bottom, size_t * size) {
if (os::Linux::is_initial_thread()) {
// initial thread needs special handling because pthread_getattr_np()
// may return bogus value.
*bottom = os::Linux::initial_thread_stack_bottom();
*size = os::Linux::initial_thread_stack_size();
} else {
pthread_attr_t attr;
int rslt = pthread_getattr_np(pthread_self(), &attr);
// JVM needs to know exact stack location, abort if it fails
if (rslt != 0) {
if (rslt == ENOMEM) {
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
} else {
fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
}
}
if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) {
fatal("Can not locate current stack attributes!");
}
pthread_attr_destroy(&attr);
}
assert(os::current_stack_pointer() >= *bottom &&
os::current_stack_pointer() < *bottom + *size, "just checking");
}
address os::current_stack_base() {
address bottom;
size_t size;
current_stack_region(&bottom, &size);
return (bottom + size);
}
size_t os::current_stack_size() {
// stack size includes normal stack and HotSpot guard pages
address bottom;
size_t size;
current_stack_region(&bottom, &size);
return size;
}
/////////////////////////////////////////////////////////////////////////////
// helper functions for fatal error handler
void os::print_context(outputStream *st, void *context) {
if (context == NULL) return;
ucontext_t* uc = (ucontext_t*)context;
st->print_cr("Registers:");
st->print("pc =" INTPTR_FORMAT " ", uc->uc_mcontext.regs->nip);
st->print("lr =" INTPTR_FORMAT " ", uc->uc_mcontext.regs->link);
st->print("ctr=" INTPTR_FORMAT " ", uc->uc_mcontext.regs->ctr);
st->cr();
for (int i = 0; i < 32; i++) {
st->print("r%-2d=" INTPTR_FORMAT " ", i, uc->uc_mcontext.regs->gpr[i]);
if (i % 3 == 2) st->cr();
}
st->cr();
st->cr();
intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
print_hex_dump(st, (address)sp, (address)(sp + 128), sizeof(intptr_t));
st->cr();
// Note: it may be unsafe to inspect memory near pc. For example, pc may
// point to garbage if entry point in an nmethod is corrupted. Leave
// this at the end, and hope for the best.
address pc = os::Linux::ucontext_get_pc(uc);
st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
print_hex_dump(st, pc - 64, pc + 64, /*instrsize=*/4);
st->cr();
}
void os::print_register_info(outputStream *st, void *context) {
if (context == NULL) return;
ucontext_t *uc = (ucontext_t*)context;
st->print_cr("Register to memory mapping:");
st->cr();
// this is only for the "general purpose" registers
for (int i = 0; i < 32; i++) {
st->print("r%-2d=", i);
print_location(st, uc->uc_mcontext.regs->gpr[i]);
}
st->cr();
}
extern "C" {
int SpinPause() {
return 0;
}
}
#ifndef PRODUCT
void os::verify_stack_alignment() {
assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
}
#endif

View file

@ -0,0 +1,35 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_LINUX_PPC_VM_OS_LINUX_PPC_HPP
#define OS_CPU_LINUX_PPC_VM_OS_LINUX_PPC_HPP
static void setup_fpu() {}
// Used to register dynamic code cache area with the OS
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }
#endif // OS_CPU_LINUX_PPC_VM_OS_LINUX_PPC_HPP

View file

@ -0,0 +1,50 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_PPC_INLINE_HPP
#define OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_PPC_INLINE_HPP
#include "runtime/prefetch.hpp"
inline void Prefetch::read(void *loc, intx interval) {
__asm__ __volatile__ (
" dcbt 0, %0 \n"
:
: /*%0*/"r" ( ((address)loc) +((long)interval) )
//:
);
}
inline void Prefetch::write(void *loc, intx interval) {
__asm__ __volatile__ (
" dcbtst 0, %0 \n"
:
: /*%0*/"r" ( ((address)loc) +((long)interval) )
//:
);
}
#endif // OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_OJDKPPC_HPP

View file

@ -0,0 +1,39 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/threadLocalStorage.hpp"
void ThreadLocalStorage::generate_code_for_get_thread() {
// nothing we can do here for user-level thread
}
void ThreadLocalStorage::pd_init() {
// Nothing to do
}
void ThreadLocalStorage::pd_set_thread(Thread* thread) {
os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
}

View file

@ -0,0 +1,36 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_LINUX_PPC_VM_THREADLS_LINUX_PPC_HPP
#define OS_CPU_LINUX_PPC_VM_THREADLS_LINUX_PPC_HPP
// Processor dependent parts of ThreadLocalStorage
public:
static Thread* thread() {
return (Thread *) os::thread_local_storage_at(thread_index());
}
#endif // OS_CPU_LINUX_PPC_VM_THREADLS_LINUX_PPC_HPP

View file

@ -0,0 +1,36 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/frame.inline.hpp"
#include "thread_linux.inline.hpp"
// Forte Analyzer AsyncGetCallTrace profiling support is not implemented on Linux/PPC.
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) {
Unimplemented();
return false;
}
void JavaThread::cache_global_variables() { }

View file

@ -0,0 +1,83 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_LINUX_PPC_VM_THREAD_LINUX_PPC_HPP
#define OS_CPU_LINUX_PPC_VM_THREAD_LINUX_PPC_HPP
private:
void pd_initialize() {
_anchor.clear();
_last_interpreter_fp = NULL;
}
// The `last' frame is the youngest Java frame on the thread's stack.
frame pd_last_frame() {
assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
intptr_t* sp = last_Java_sp();
address pc = _anchor.last_Java_pc();
// Last_Java_pc ist not set, if we come here from compiled code.
if (pc == NULL) {
pc = (address) *(sp + 2);
}
return frame(sp, pc);
}
public:
void set_base_of_stack_pointer(intptr_t* base_sp) {}
intptr_t* base_of_stack_pointer() { return NULL; }
void record_base_of_stack_pointer() {}
// These routines are only used on cpu architectures that
// have separate register stacks (Itanium).
static bool register_stack_overflow() { return false; }
static void enable_register_stack_guard() {}
static void disable_register_stack_guard() {}
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava);
protected:
// -Xprof support
//
// In order to find the last Java fp from an async profile
// tick, we store the current interpreter fp in the thread.
// This value is only valid while we are in the C++ interpreter
// and profiling.
intptr_t *_last_interpreter_fp;
public:
static ByteSize last_interpreter_fp_offset() {
return byte_offset_of(JavaThread, _last_interpreter_fp);
}
intptr_t* last_interpreter_fp() { return _last_interpreter_fp; }
#endif // OS_CPU_LINUX_PPC_VM_THREAD_LINUX_PPC_HPP

View file

@ -0,0 +1,55 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_LINUX_PPC_VM_VMSTRUCTS_LINUX_PPC_HPP
#define OS_CPU_LINUX_PPC_VM_VMSTRUCTS_LINUX_PPC_HPP
// These are the OS and CPU-specific fields, types and integer
// constants required by the Serviceability Agent. This file is
// referenced by vmStructs.cpp.
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
\
/******************************/ \
/* Threads (NOTE: incomplete) */ \
/******************************/ \
nonstatic_field(OSThread, _thread_id, pid_t) \
nonstatic_field(OSThread, _pthread_id, pthread_t)
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \
\
/**********************/ \
/* Posix Thread IDs */ \
/**********************/ \
\
declare_integer_type(pid_t) \
declare_unsigned_integer_type(pthread_t)
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#endif // OS_CPU_LINUX_PPC_VM_VMSTRUCTS_LINUX_PPC_HPP

View file

@ -36,7 +36,7 @@
// Atomically copy 64 bits of data
static void atomic_copy64(volatile void *src, volatile void *dst) {
#if defined(PPC) && !defined(_LP64)
#if defined(PPC32)
double tmp;
asm volatile ("lfd %0, 0(%1)\n"
"stfd %0, 0(%2)\n"