8151593: Cleanup definition/usage of INLINE/NOINLINE macros and add xlC support

Reviewed-by: coleenp, stuefe
This commit is contained in:
Matthias Baesken 2016-03-11 16:39:38 +01:00 committed by Volker Simonis
parent d1c99d7509
commit f90ee56f23
12 changed files with 65 additions and 92 deletions

View file

@ -593,15 +593,7 @@ void os::Linux::libpthread_init() {
// _expand_stack_to() assumes its frame size is less than page size, which // _expand_stack_to() assumes its frame size is less than page size, which
// should always be true if the function is not inlined. // should always be true if the function is not inlined.
#if __GNUC__ < 3 // gcc 2.x does not support noinline attribute static void NOINLINE _expand_stack_to(address bottom) {
#define NOINLINE
#else
#define NOINLINE __attribute__ ((noinline))
#endif
static void _expand_stack_to(address bottom) NOINLINE;
static void _expand_stack_to(address bottom) {
address sp; address sp;
size_t size; size_t size;
volatile char *p; volatile char *p;

View file

@ -242,7 +242,7 @@ class ChunkPool: public CHeapObj<mtInternal> {
ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
// Allocate a new chunk from the pool (might expand the pool) // Allocate a new chunk from the pool (might expand the pool)
_NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) { NOINLINE void* allocate(size_t bytes, AllocFailType alloc_failmode) {
assert(bytes == _size, "bad size"); assert(bytes == _size, "bad size");
void* p = NULL; void* p = NULL;
// No VM lock can be taken inside ThreadCritical lock, so os::malloc // No VM lock can be taken inside ThreadCritical lock, so os::malloc

View file

@ -41,18 +41,6 @@
#define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1)) #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1))
#define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK) #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK)
// noinline attribute
#ifdef _WINDOWS
#define _NOINLINE_ __declspec(noinline)
#else
#if __GNUC__ < 3 // gcc 2.x does not support noinline attribute
#define _NOINLINE_
#else
#define _NOINLINE_ __attribute__ ((noinline))
#endif
#endif
class AllocFailStrategy { class AllocFailStrategy {
public: public:
enum AllocFailEnum { EXIT_OOM, RETURN_NULL }; enum AllocFailEnum { EXIT_OOM, RETURN_NULL };
@ -178,17 +166,17 @@ class NativeCallStack;
template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC { template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
public: public:
_NOINLINE_ void* operator new(size_t size, const NativeCallStack& stack) throw(); NOINLINE void* operator new(size_t size, const NativeCallStack& stack) throw();
_NOINLINE_ void* operator new(size_t size) throw(); NOINLINE void* operator new(size_t size) throw();
_NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant, NOINLINE void* operator new (size_t size, const std::nothrow_t& nothrow_constant,
const NativeCallStack& stack) throw(); const NativeCallStack& stack) throw();
_NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant) NOINLINE void* operator new (size_t size, const std::nothrow_t& nothrow_constant)
throw(); throw();
_NOINLINE_ void* operator new [](size_t size, const NativeCallStack& stack) throw(); NOINLINE void* operator new [](size_t size, const NativeCallStack& stack) throw();
_NOINLINE_ void* operator new [](size_t size) throw(); NOINLINE void* operator new [](size_t size) throw();
_NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, NOINLINE void* operator new [](size_t size, const std::nothrow_t& nothrow_constant,
const NativeCallStack& stack) throw(); const NativeCallStack& stack) throw();
_NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) NOINLINE void* operator new [](size_t size, const std::nothrow_t& nothrow_constant)
throw(); throw();
void operator delete(void* p); void operator delete(void* p);
void operator delete [] (void* p); void operator delete [] (void* p);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -36,16 +36,9 @@
// The iteration over the oops in objects is a hot path in the GC code. // The iteration over the oops in objects is a hot path in the GC code.
// By force inlining the following functions, we get similar GC performance // By force inlining the following functions, we get similar GC performance
// as the previous macro based implementation. // as the previous macro based implementation.
#ifdef TARGET_COMPILER_visCPP
#define INLINE __forceinline
#elif defined(TARGET_COMPILER_sparcWorks)
#define INLINE __attribute__((always_inline))
#else
#define INLINE inline
#endif
template <bool nv, typename T, class OopClosureType> template <bool nv, typename T, class OopClosureType>
INLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) { ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) {
T* p = (T*)obj->obj_field_addr<T>(map->offset()); T* p = (T*)obj->obj_field_addr<T>(map->offset());
T* const end = p + map->count(); T* const end = p + map->count();
@ -56,7 +49,7 @@ INLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, Oo
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
template <bool nv, typename T, class OopClosureType> template <bool nv, typename T, class OopClosureType>
INLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) { ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) {
T* const start = (T*)obj->obj_field_addr<T>(map->offset()); T* const start = (T*)obj->obj_field_addr<T>(map->offset());
T* p = start + map->count(); T* p = start + map->count();
@ -68,7 +61,7 @@ INLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop
#endif #endif
template <bool nv, typename T, class OopClosureType> template <bool nv, typename T, class OopClosureType>
INLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) { ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) {
T* p = (T*)obj->obj_field_addr<T>(map->offset()); T* p = (T*)obj->obj_field_addr<T>(map->offset());
T* end = p + map->count(); T* end = p + map->count();
@ -91,7 +84,7 @@ INLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop
} }
template <bool nv, typename T, class OopClosureType> template <bool nv, typename T, class OopClosureType>
INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized(oop obj, OopClosureType* closure) { ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized(oop obj, OopClosureType* closure) {
OopMapBlock* map = start_of_nonstatic_oop_maps(); OopMapBlock* map = start_of_nonstatic_oop_maps();
OopMapBlock* const end_map = map + nonstatic_oop_map_count(); OopMapBlock* const end_map = map + nonstatic_oop_map_count();
@ -102,7 +95,7 @@ INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized(oop obj, OopClos
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
template <bool nv, typename T, class OopClosureType> template <bool nv, typename T, class OopClosureType>
INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure) { ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure) {
OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); OopMapBlock* const start_map = start_of_nonstatic_oop_maps();
OopMapBlock* map = start_map + nonstatic_oop_map_count(); OopMapBlock* map = start_map + nonstatic_oop_map_count();
@ -114,7 +107,7 @@ INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj,
#endif #endif
template <bool nv, typename T, class OopClosureType> template <bool nv, typename T, class OopClosureType>
INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr) { ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
OopMapBlock* map = start_of_nonstatic_oop_maps(); OopMapBlock* map = start_of_nonstatic_oop_maps();
OopMapBlock* const end_map = map + nonstatic_oop_map_count(); OopMapBlock* const end_map = map + nonstatic_oop_map_count();
@ -124,7 +117,7 @@ INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_bounded(oop obj,
} }
template <bool nv, class OopClosureType> template <bool nv, class OopClosureType>
INLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure) { ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure) {
if (UseCompressedOops) { if (UseCompressedOops) {
oop_oop_iterate_oop_maps_specialized<nv, narrowOop>(obj, closure); oop_oop_iterate_oop_maps_specialized<nv, narrowOop>(obj, closure);
} else { } else {
@ -134,7 +127,7 @@ INLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* clo
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
template <bool nv, class OopClosureType> template <bool nv, class OopClosureType>
INLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) { ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) {
if (UseCompressedOops) { if (UseCompressedOops) {
oop_oop_iterate_oop_maps_specialized_reverse<nv, narrowOop>(obj, closure); oop_oop_iterate_oop_maps_specialized_reverse<nv, narrowOop>(obj, closure);
} else { } else {
@ -144,7 +137,7 @@ INLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureT
#endif #endif
template <bool nv, class OopClosureType> template <bool nv, class OopClosureType>
INLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr) { ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
if (UseCompressedOops) { if (UseCompressedOops) {
oop_oop_iterate_oop_maps_specialized_bounded<nv, narrowOop>(obj, closure, mr); oop_oop_iterate_oop_maps_specialized_bounded<nv, narrowOop>(obj, closure, mr);
} else { } else {
@ -153,7 +146,7 @@ INLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureT
} }
template <bool nv, class OopClosureType> template <bool nv, class OopClosureType>
INLINE int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) { ALWAYSINLINE int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
if (Devirtualizer<nv>::do_metadata(closure)) { if (Devirtualizer<nv>::do_metadata(closure)) {
Devirtualizer<nv>::do_klass(closure, this); Devirtualizer<nv>::do_klass(closure, this);
} }
@ -165,7 +158,7 @@ INLINE int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
template <bool nv, class OopClosureType> template <bool nv, class OopClosureType>
INLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) { ALWAYSINLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
assert(!Devirtualizer<nv>::do_metadata(closure), assert(!Devirtualizer<nv>::do_metadata(closure),
"Code to handle metadata is not implemented"); "Code to handle metadata is not implemented");
@ -176,7 +169,7 @@ INLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closu
#endif #endif
template <bool nv, class OopClosureType> template <bool nv, class OopClosureType>
INLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) { ALWAYSINLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
if (Devirtualizer<nv>::do_metadata(closure)) { if (Devirtualizer<nv>::do_metadata(closure)) {
if (mr.contains(obj)) { if (mr.contains(obj)) {
Devirtualizer<nv>::do_klass(closure, this); Devirtualizer<nv>::do_klass(closure, this);
@ -188,8 +181,6 @@ INLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closu
return size_helper(); return size_helper();
} }
#undef INLINE
#define ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ #define ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
OOP_OOP_ITERATE_DEFN( InstanceKlass, OopClosureType, nv_suffix) \ OOP_OOP_ITERATE_DEFN( InstanceKlass, OopClosureType, nv_suffix) \
OOP_OOP_ITERATE_DEFN_BOUNDED( InstanceKlass, OopClosureType, nv_suffix) \ OOP_OOP_ITERATE_DEFN_BOUNDED( InstanceKlass, OopClosureType, nv_suffix) \

View file

@ -44,14 +44,6 @@
#include "utilities/macros.hpp" #include "utilities/macros.hpp"
#include "utilities/preserveException.hpp" #include "utilities/preserveException.hpp"
#if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
// Need to inhibit inlining for older versions of GCC to avoid build-time failures
#define NOINLINE __attribute__((noinline))
#else
#define NOINLINE
#endif
#ifdef DTRACE_ENABLED #ifdef DTRACE_ENABLED
// Only bother with this argument setup if dtrace is available // Only bother with this argument setup if dtrace is available
@ -254,7 +246,7 @@ static volatile int InitDone = 0;
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Enter support // Enter support
void NOINLINE ObjectMonitor::enter(TRAPS) { void ObjectMonitor::enter(TRAPS) {
// The following code is ordered to check the most common cases first // The following code is ordered to check the most common cases first
// and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
Thread * const Self = THREAD; Thread * const Self = THREAD;
@ -431,7 +423,7 @@ int ObjectMonitor::TryLock(Thread * Self) {
#define MAX_RECHECK_INTERVAL 1000 #define MAX_RECHECK_INTERVAL 1000
void NOINLINE ObjectMonitor::EnterI(TRAPS) { void ObjectMonitor::EnterI(TRAPS) {
Thread * const Self = THREAD; Thread * const Self = THREAD;
assert(Self->is_Java_thread(), "invariant"); assert(Self->is_Java_thread(), "invariant");
assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant"); assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
@ -681,7 +673,7 @@ void NOINLINE ObjectMonitor::EnterI(TRAPS) {
// Knob_Reset and Knob_SpinAfterFutile support and restructuring the // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
// loop accordingly. // loop accordingly.
void NOINLINE ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) { void ObjectMonitor::ReenterI(Thread * Self, ObjectWaiter * SelfNode) {
assert(Self != NULL, "invariant"); assert(Self != NULL, "invariant");
assert(SelfNode != NULL, "invariant"); assert(SelfNode != NULL, "invariant");
assert(SelfNode->_thread == Self, "invariant"); assert(SelfNode->_thread == Self, "invariant");
@ -894,7 +886,7 @@ void ObjectMonitor::UnlinkAfterAcquire(Thread *Self, ObjectWaiter *SelfNode) {
// structured the code so the windows are short and the frequency // structured the code so the windows are short and the frequency
// of such futile wakups is low. // of such futile wakups is low.
void NOINLINE ObjectMonitor::exit(bool not_suspended, TRAPS) { void ObjectMonitor::exit(bool not_suspended, TRAPS) {
Thread * const Self = THREAD; Thread * const Self = THREAD;
if (THREAD != _owner) { if (THREAD != _owner) {
if (THREAD->is_lock_owned((address) _owner)) { if (THREAD->is_lock_owned((address) _owner)) {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -48,13 +48,6 @@
#include "utilities/events.hpp" #include "utilities/events.hpp"
#include "utilities/preserveException.hpp" #include "utilities/preserveException.hpp"
#if defined(__GNUC__) && !defined(PPC64)
// Need to inhibit inlining for older versions of GCC to avoid build-time failures
#define NOINLINE __attribute__((noinline))
#else
#define NOINLINE
#endif
// The "core" versions of monitor enter and exit reside in this file. // The "core" versions of monitor enter and exit reside in this file.
// The interpreter and compilers contain specialized transliterated // The interpreter and compilers contain specialized transliterated
// variants of the enter-exit fast-path operations. See i486.ad fast_lock(), // variants of the enter-exit fast-path operations. See i486.ad fast_lock(),
@ -1038,7 +1031,7 @@ void ObjectSynchronizer::verifyInUse(Thread *Self) {
assert(free_tally == Self->omFreeCount, "free count off"); assert(free_tally == Self->omFreeCount, "free count off");
} }
ObjectMonitor * NOINLINE ObjectSynchronizer::omAlloc(Thread * Self) { ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) {
// A large MAXPRIVATE value reduces both list lock contention // A large MAXPRIVATE value reduces both list lock contention
// and list coherency traffic, but also tends to increase the // and list coherency traffic, but also tends to increase the
// number of objectMonitors in circulation as well as the STW // number of objectMonitors in circulation as well as the STW
@ -1313,7 +1306,7 @@ ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
inflate_cause_vm_internal); inflate_cause_vm_internal);
} }
ObjectMonitor * NOINLINE ObjectSynchronizer::inflate(Thread * Self, ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
oop object, oop object,
const InflateCause cause) { const InflateCause cause) {

View file

@ -42,6 +42,12 @@
# include "utilities/globalDefinitions_xlc.hpp" # include "utilities/globalDefinitions_xlc.hpp"
#endif #endif
#ifndef NOINLINE
#define NOINLINE
#endif
#ifndef ALWAYSINLINE
#define ALWAYSINLINE inline
#endif
#ifndef PRAGMA_DIAG_PUSH #ifndef PRAGMA_DIAG_PUSH
#define PRAGMA_DIAG_PUSH #define PRAGMA_DIAG_PUSH
#endif #endif

View file

@ -322,4 +322,8 @@ inline int wcslen(const jchar* x) { return wcslen((const wchar_t*)x); }
#define THREAD_LOCAL_DECL __thread #define THREAD_LOCAL_DECL __thread
#endif #endif
// Inlining support
#define NOINLINE __attribute__ ((noinline))
#define ALWAYSINLINE __attribute__ ((always_inline))
#endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_GCC_HPP #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_GCC_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -277,4 +277,8 @@ inline int wcslen(const jchar* x) { return wcslen((const wchar_t*)x); }
#define THREAD_LOCAL_DECL __thread #define THREAD_LOCAL_DECL __thread
#endif #endif
// Inlining support
#define NOINLINE
#define ALWAYSINLINE __attribute__((always_inline))
#endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_SPARCWORKS_HPP #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_SPARCWORKS_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -240,4 +240,11 @@ inline int vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
#define THREAD_LOCAL_DECL __declspec( thread ) #define THREAD_LOCAL_DECL __declspec( thread )
#endif #endif
// Inlining support
// MSVC has '__declspec(noinline)' but according to the official documentation
// it only applies to member functions. There are reports though which pretend
// that it also works for freestanding functions.
#define NOINLINE __declspec(noinline)
#define ALWAYSINLINE __forceinline
#endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_VISCPP_HPP #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_VISCPP_HPP

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved. * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -184,4 +184,8 @@ inline int wcslen(const jchar* x) { return wcslen((const wchar_t*)x); }
#define THREAD_LOCAL_DECL __thread #define THREAD_LOCAL_DECL __thread
#endif #endif
// Inlining support
#define NOINLINE
#define ALWAYSINLINE __attribute__((always_inline))
#endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_XLC_HPP #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_XLC_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -27,17 +27,6 @@
#include "utilities/stack.hpp" #include "utilities/stack.hpp"
// Stack is used by the GC code and in some hot paths a lot of the Stack
// code gets inlined. This is generally good, but when too much code has
// been inlined, no further inlining is allowed by GCC. Therefore we need
// to prevent parts of the slow path in Stack to be inlined to allow other
// code to be.
#if defined(TARGET_COMPILER_gcc)
#define NOINLINE __attribute__((noinline))
#else
#define NOINLINE
#endif
template <MEMFLAGS F> StackBase<F>::StackBase(size_t segment_size, size_t max_cache_size, template <MEMFLAGS F> StackBase<F>::StackBase(size_t segment_size, size_t max_cache_size,
size_t max_size): size_t max_size):
_seg_size(segment_size), _seg_size(segment_size),
@ -151,6 +140,11 @@ void Stack<E, F>::free(E* addr, size_t bytes)
FREE_C_HEAP_ARRAY(char, (char*) addr); FREE_C_HEAP_ARRAY(char, (char*) addr);
} }
// Stack is used by the GC code and in some hot paths a lot of the Stack
// code gets inlined. This is generally good, but when too much code has
// been inlined, no further inlining is allowed by GCC. Therefore we need
// to prevent parts of the slow path in Stack to be inlined to allow other
// code to be.
template <class E, MEMFLAGS F> template <class E, MEMFLAGS F>
NOINLINE void Stack<E, F>::push_segment() NOINLINE void Stack<E, F>::push_segment()
{ {
@ -280,6 +274,4 @@ E* StackIterator<E, F>::next_addr()
return _cur_seg + --_cur_seg_size; return _cur_seg + --_cur_seg_size;
} }
#undef NOINLINE
#endif // SHARE_VM_UTILITIES_STACK_INLINE_HPP #endif // SHARE_VM_UTILITIES_STACK_INLINE_HPP