This commit is contained in:
Christian Thalinger 2016-01-11 17:11:57 -08:00
commit a0861106e3
1535 changed files with 3601 additions and 18634 deletions

View file

@ -399,6 +399,16 @@ static AliasedFlag const aliased_jvm_flags[] = {
{ NULL, NULL}
};
static AliasedFlag const aliased_jvm_logging_flags[] = {
{ "-XX:+TraceClassResolution", "-Xlog:classresolve=info"},
{ "-XX:-TraceClassResolution", "-Xlog:classresolve=off"},
{ "-XX:+TraceExceptions", "-Xlog:exceptions=info" },
{ "-XX:-TraceExceptions", "-Xlog:exceptions=off" },
{ "-XX:+TraceMonitorInflation", "-Xlog:monitorinflation=debug" },
{ "-XX:-TraceMonitorInflation", "-Xlog:monitorinflation=off" },
{ NULL, NULL }
};
// Return true if "v" is less than "other", where "other" may be "undefined".
static bool version_less_than(JDK_Version v, JDK_Version other) {
assert(!v.is_undefined(), "must be defined");
@ -929,6 +939,20 @@ const char* Arguments::handle_aliases_and_deprecation(const char* arg, bool warn
return NULL;
}
// lookup_logging_aliases
// Called from parse_each_vm_init_arg(). Should be called on -XX options before specific cases are checked.
// If arg matches any aliased_jvm_logging_flags entry, look up the real name and copy it into buffer.
bool Arguments::lookup_logging_aliases(const char* arg, char* buffer) {
for (size_t i = 0; aliased_jvm_logging_flags[i].alias_name != NULL; i++) {
const AliasedFlag& flag_status = aliased_jvm_logging_flags[i];
if (strcmp(flag_status.alias_name, arg) == 0) {
strcpy(buffer, flag_status.real_name);
return true;
}
}
return false;
}
bool Arguments::parse_argument(const char* arg, Flag::Flags origin) {
// range of acceptable characters spelled out for portability reasons
@ -2605,7 +2629,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
for (int index = 0; index < args->nOptions; index++) {
bool is_absolute_path = false; // for -agentpath vs -agentlib
const JavaVMOption* option = args->options + index;
JavaVMOption* option = args->options + index;
if (!match_option(option, "-Djava.class.path", &tail) &&
!match_option(option, "-Dsun.java.command", &tail) &&
@ -2619,6 +2643,16 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
build_jvm_args(option->optionString);
}
// char buffer to store looked up logging option.
char aliased_logging_option[256];
// Catch -XX options which are aliased to Unified logging commands.
if (match_option(option, "-XX:", &tail)) {
if (lookup_logging_aliases(option->optionString, aliased_logging_option)) {
option->optionString = aliased_logging_option;
}
}
// -verbose:[class/gc/jni]
if (match_option(option, "-verbose", &tail)) {
if (!strcmp(tail, ":class") || !strcmp(tail, "")) {
@ -4161,7 +4195,7 @@ jint Arguments::apply_ergo() {
UseBiasedLocking = false;
}
#ifdef ZERO
#ifdef CC_INTERP
// Clear flags not supported on zero.
FLAG_SET_DEFAULT(ProfileInterpreter, false);
FLAG_SET_DEFAULT(UseBiasedLocking, false);

View file

@ -445,6 +445,7 @@ class Arguments : AllStatic {
// Return the "real" name for option arg if arg is an alias, and print a warning if arg is deprecated.
// Return NULL if the arg has expired.
static const char* handle_aliases_and_deprecation(const char* arg, bool warn);
static bool lookup_logging_aliases(const char* arg, char* buffer);
static short CompileOnlyClassesNum;
static short CompileOnlyClassesMax;

View file

@ -563,15 +563,6 @@ Flag::Error HeapBaseMinAddressConstraintFunc(size_t value, bool verbose) {
return MaxSizeForHeapAlignment("HeapBaseMinAddress", value, verbose);
}
Flag::Error NUMAInterleaveGranularityConstraintFunc(size_t value, bool verbose) {
if (UseNUMA && UseNUMAInterleaving) {
size_t min_interleave_granularity = UseLargePages ? os::large_page_size() : os::vm_allocation_granularity();
return MaxSizeForAlignment("NUMAInterleaveGranularity", value, min_interleave_granularity, verbose);
} else {
return Flag::SUCCESS;
}
}
Flag::Error NewSizeConstraintFunc(size_t value, bool verbose) {
#ifdef _LP64
#if INCLUDE_ALL_GCS

View file

@ -68,7 +68,6 @@ Flag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, bool
Flag::Error InitialHeapSizeConstraintFunc(size_t value, bool verbose);
Flag::Error MaxHeapSizeConstraintFunc(size_t value, bool verbose);
Flag::Error HeapBaseMinAddressConstraintFunc(size_t value, bool verbose);
Flag::Error NUMAInterleaveGranularityConstraintFunc(size_t value, bool verbose);
Flag::Error NewSizeConstraintFunc(size_t value, bool verbose);
Flag::Error MinTLABSizeConstraintFunc(size_t value, bool verbose);
Flag::Error TLABSizeConstraintFunc(size_t value, bool verbose);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -50,13 +50,6 @@ inline bool frame::is_first_frame() const {
return is_entry_frame() && entry_frame_is_first();
}
#ifdef CC_INTERP
inline oop* frame::interpreter_frame_temp_oop_addr() const {
interpreterState istate = get_interpreterState();
return (oop *)&istate->_oop_temp;
}
#endif // CC_INTERP
// here are the platform-dependent bodies:
#ifdef TARGET_ARCH_x86

View file

@ -688,8 +688,7 @@ public:
\
product(size_t, NUMAInterleaveGranularity, 2*M, \
"Granularity to use for NUMA interleaving on Windows OS") \
range(os::vm_allocation_granularity(), max_uintx) \
constraint(NUMAInterleaveGranularityConstraintFunc,AfterErgo) \
range(os::vm_allocation_granularity(), NOT_LP64(2*G) LP64_ONLY(8192*G)) \
\
product(bool, ForceNUMA, false, \
"Force NUMA optimizations on single-node/UMA systems") \
@ -1455,9 +1454,6 @@ public:
develop(bool, TraceBytecodes, false, \
"Trace bytecode execution") \
\
product(bool, TraceExceptions, false, \
"Trace exceptions") \
\
develop(bool, TraceICs, false, \
"Trace inline cache changes") \
\
@ -1506,15 +1502,9 @@ public:
develop(bool, TraceClearedExceptions, false, \
"Print when an exception is forcibly cleared") \
\
product(bool, TraceClassResolution, false, \
"Trace all constant pool resolutions (for debugging)") \
\
product(bool, TraceBiasedLocking, false, \
"Trace biased locking in JVM") \
\
product(bool, TraceMonitorInflation, false, \
"Trace monitor inflation in JVM") \
\
/* gc */ \
\
product(bool, UseSerialGC, false, \
@ -3437,15 +3427,18 @@ public:
\
/* stack parameters */ \
product_pd(intx, StackYellowPages, \
"Number of yellow zone (recoverable overflows) pages") \
"Number of yellow zone (recoverable overflows) pages of size " \
"4KB. If pages are bigger yellow zone is aligned up.") \
range(MIN_STACK_YELLOW_PAGES, (DEFAULT_STACK_YELLOW_PAGES+5)) \
\
product_pd(intx, StackRedPages, \
"Number of red zone (unrecoverable overflows) pages") \
"Number of red zone (unrecoverable overflows) pages of size " \
"4KB. If pages are bigger red zone is aligned up.") \
range(MIN_STACK_RED_PAGES, (DEFAULT_STACK_RED_PAGES+2)) \
\
product_pd(intx, StackReservedPages, \
"Number of reserved zone (reserved to annotated methods) pages") \
"Number of reserved zone (reserved to annotated methods) pages" \
" of size 4KB. If pages are bigger reserved zone is aligned up.") \
range(MIN_STACK_RESERVED_PAGES, (DEFAULT_STACK_RESERVED_PAGES+10))\
\
product(bool, RestrictReservedStack, true, \
@ -3453,13 +3446,14 @@ public:
\
/* greater stack shadow pages can't generate instruction to bang stack */ \
product_pd(intx, StackShadowPages, \
"Number of shadow zone (for overflow checking) pages " \
"this should exceed the depth of the VM and native call stack") \
"Number of shadow zone (for overflow checking) pages of size " \
"4KB. If pages are bigger shadow zone is aligned up. " \
"This should exceed the depth of the VM and native call stack.") \
range(MIN_STACK_SHADOW_PAGES, (DEFAULT_STACK_SHADOW_PAGES+30)) \
\
product_pd(intx, ThreadStackSize, \
"Thread Stack Size (in Kbytes)") \
range(0, max_intx-os::vm_page_size()) \
range(0, (max_intx-os::vm_page_size())/(1 * K)) \
\
product_pd(intx, VMThreadStackSize, \
"Non-Java Thread Stack Size (in Kbytes)") \
@ -3467,7 +3461,7 @@ public:
\
product_pd(intx, CompilerThreadStackSize, \
"Compiler Thread Stack Size (in Kbytes)") \
range(0, max_intx /(1 * K)) \
range(0, max_intx/(1 * K)) \
\
develop_pd(size_t, JVMInvokeMethodSlack, \
"Stack space (bytes) required for JVM_InvokeMethod to complete") \

View file

@ -46,9 +46,58 @@ Handle::Handle(Thread* thread, oop obj) {
_handle = thread->handle_area()->allocate_handle(obj);
}
}
#endif
// Copy constructors and destructors for metadata handles
// These do too much to inline.
#define DEF_METADATA_HANDLE_FN_NOINLINE(name, type) \
name##Handle::name##Handle(const name##Handle &h) { \
_value = h._value; \
if (_value != NULL) { \
assert(_value->is_valid(), "obj is valid"); \
if (h._thread != NULL) { \
assert(h._thread == Thread::current(), "thread must be current");\
_thread = h._thread; \
} else { \
_thread = Thread::current(); \
} \
assert (_thread->is_in_stack((address)this), "not on stack?"); \
_thread->metadata_handles()->push((Metadata*)_value); \
} else { \
_thread = NULL; \
} \
} \
name##Handle& name##Handle::operator=(const name##Handle &s) { \
remove(); \
_value = s._value; \
if (_value != NULL) { \
assert(_value->is_valid(), "obj is valid"); \
if (s._thread != NULL) { \
assert(s._thread == Thread::current(), "thread must be current");\
_thread = s._thread; \
} else { \
_thread = Thread::current(); \
} \
assert (_thread->is_in_stack((address)this), "not on stack?"); \
_thread->metadata_handles()->push((Metadata*)_value); \
} else { \
_thread = NULL; \
} \
return *this; \
} \
inline void name##Handle::remove() { \
if (_value != NULL) { \
int i = _thread->metadata_handles()->find_from_end((Metadata*)_value); \
assert(i!=-1, "not in metadata_handles list"); \
_thread->metadata_handles()->remove_at(i); \
} \
} \
name##Handle::~name##Handle () { remove(); } \
DEF_METADATA_HANDLE_FN_NOINLINE(method, Method)
DEF_METADATA_HANDLE_FN_NOINLINE(constantPool, ConstantPool)
static uintx chunk_oops_do(OopClosure* f, Chunk* chunk, char* chunk_top) {
oop* bottom = (oop*) chunk->bottom();
oop* top = (oop*) chunk_top;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -69,48 +69,6 @@ inline name##Handle::name##Handle(Thread* thread, type* obj) : _value(obj), _thr
_thread->metadata_handles()->push((Metadata*)obj); \
} \
} \
inline name##Handle::name##Handle(const name##Handle &h) { \
_value = h._value; \
if (_value != NULL) { \
assert(_value->is_valid(), "obj is valid"); \
if (h._thread != NULL) { \
assert(h._thread == Thread::current(), "thread must be current");\
_thread = h._thread; \
} else { \
_thread = Thread::current(); \
} \
assert (_thread->is_in_stack((address)this), "not on stack?"); \
_thread->metadata_handles()->push((Metadata*)_value); \
} else { \
_thread = NULL; \
} \
} \
inline name##Handle& name##Handle::operator=(const name##Handle &s) { \
remove(); \
_value = s._value; \
if (_value != NULL) { \
assert(_value->is_valid(), "obj is valid"); \
if (s._thread != NULL) { \
assert(s._thread == Thread::current(), "thread must be current");\
_thread = s._thread; \
} else { \
_thread = Thread::current(); \
} \
assert (_thread->is_in_stack((address)this), "not on stack?"); \
_thread->metadata_handles()->push((Metadata*)_value); \
} else { \
_thread = NULL; \
} \
return *this; \
} \
inline void name##Handle::remove() { \
if (_value != NULL) { \
int i = _thread->metadata_handles()->find_from_end((Metadata*)_value); \
assert(i!=-1, "not in metadata_handles list"); \
_thread->metadata_handles()->remove_at(i); \
} \
} \
inline name##Handle::~name##Handle () { remove(); } \
DEF_METADATA_HANDLE_FN(method, Method)
DEF_METADATA_HANDLE_FN(constantPool, ConstantPool)

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,6 @@ friend class Runtime1;
friend class StubAssembler;
friend class CallRuntimeDirectNode;
friend class MacroAssembler;
friend class InterpreterGenerator;
friend class LIR_Assembler;
friend class GraphKit;
friend class StubGenerator;

View file

@ -127,7 +127,6 @@ Monitor* GCTaskManager_lock = NULL;
Mutex* Management_lock = NULL;
Monitor* Service_lock = NULL;
Monitor* PeriodicTask_lock = NULL;
Mutex* LogConfiguration_lock = NULL;
#ifdef INCLUDE_TRACE
Mutex* JfrStacktrace_lock = NULL;
@ -284,7 +283,6 @@ void mutex_init() {
if (WhiteBoxAPI) {
def(Compilation_lock , Monitor, leaf, false, Monitor::_safepoint_check_never);
}
def(LogConfiguration_lock , Mutex, nonleaf, false, Monitor::_safepoint_check_always);
#ifdef INCLUDE_TRACE
def(JfrMsg_lock , Monitor, leaf, true, Monitor::_safepoint_check_always);

View file

@ -127,7 +127,6 @@ extern Mutex* MMUTracker_lock; // protects the MMU
extern Mutex* Management_lock; // a lock used to serialize JVM management
extern Monitor* Service_lock; // a lock used for service thread operation
extern Monitor* PeriodicTask_lock; // protects the periodic task structure
extern Mutex* LogConfiguration_lock; // protects configuration of logging
#ifdef INCLUDE_TRACE
extern Mutex* JfrStacktrace_lock; // used to guard access to the JFR stacktrace table

View file

@ -316,8 +316,16 @@ void os::init_before_ergo() {
// decisions depending on large page support and the calculated large page size.
large_page_init();
// We need to adapt the configured number of stack protection pages given
// in 4K pages to the actual os page size. We must do this before setting
// up minimal stack sizes etc. in os::init_2().
JavaThread::set_stack_red_zone_size (align_size_up(StackRedPages * 4 * K, vm_page_size()));
JavaThread::set_stack_yellow_zone_size (align_size_up(StackYellowPages * 4 * K, vm_page_size()));
JavaThread::set_stack_reserved_zone_size(align_size_up(StackReservedPages * 4 * K, vm_page_size()));
JavaThread::set_stack_shadow_zone_size (align_size_up(StackShadowPages * 4 * K, vm_page_size()));
// VM version initialization identifies some characteristics of the
// the platform that are used during ergonomic decisions.
// platform that are used during ergonomic decisions.
VM_Version::init_before_ergo();
}
@ -1015,8 +1023,7 @@ void os::print_location(outputStream* st, intptr_t x, bool verbose) {
}
// If the addr is in the stack region for this thread then report that
// and print thread info
if (thread->stack_base() >= addr &&
addr > (thread->stack_base() - thread->stack_size())) {
if (thread->on_local_stack(addr)) {
st->print_cr(INTPTR_FORMAT " is pointing into the stack for thread: "
INTPTR_FORMAT, p2i(addr), p2i(thread));
if (verbose) thread->print_on(st);
@ -1375,9 +1382,8 @@ void os::serialize_thread_states() {
// Returns true if the current stack pointer is above the stack shadow
// pages, false otherwise.
bool os::stack_shadow_pages_available(Thread *thread, const methodHandle& method) {
assert(StackRedPages > 0 && StackYellowPages > 0,"Sanity check");
if (!thread->is_Java_thread()) return false;
address sp = current_stack_pointer();
// Check if we have StackShadowPages above the yellow zone. This parameter
// is dependent on the depth of the maximum VM call stack possible from
@ -1386,12 +1392,13 @@ bool os::stack_shadow_pages_available(Thread *thread, const methodHandle& method
// respectively.
const int framesize_in_bytes =
Interpreter::size_top_interpreter_activation(method()) * wordSize;
int reserved_area = ((StackShadowPages + StackRedPages + StackYellowPages
+ StackReservedPages) * vm_page_size())
+ framesize_in_bytes;
// The very lower end of the stack
address stack_limit = thread->stack_base() - thread->stack_size();
return (sp > (stack_limit + reserved_area));
assert((thread->stack_base() - thread->stack_size()) +
(JavaThread::stack_guard_zone_size() +
JavaThread::stack_shadow_zone_size() + framesize_in_bytes) ==
((JavaThread*)thread)->stack_overflow_limit() + framesize_in_bytes, "sanity");
return (sp > ((JavaThread*)thread)->stack_overflow_limit() + framesize_in_bytes);
}
size_t os::page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned) {

View file

@ -607,7 +607,7 @@ class os: AllStatic {
static void print_environment_variables(outputStream* st, const char** env_list);
static void print_context(outputStream* st, const void* context);
static void print_register_info(outputStream* st, const void* context);
static void print_siginfo(outputStream* st, void* siginfo);
static void print_siginfo(outputStream* st, const void* siginfo);
static void print_signal_handlers(outputStream* st, char* buf, size_t buflen);
static void print_date_and_time(outputStream* st, char* buf, size_t buflen);

View file

@ -74,9 +74,9 @@ static void trace_class_resolution(const Klass* to_class) {
const char * to = to_class->external_name();
// print in a single call to reduce interleaving between threads
if (source_file != NULL) {
tty->print("RESOLVE %s %s %s:%d (reflection)\n", from, to, source_file, line_number);
log_info(classresolve)("%s %s %s:%d (reflection)", from, to, source_file, line_number);
} else {
tty->print("RESOLVE %s %s (reflection)\n", from, to);
log_info(classresolve)("%s %s (reflection)", from, to);
}
}
}
@ -599,7 +599,7 @@ static oop get_mirror_from_signature(methodHandle method,
Handle(THREAD, protection_domain),
true,
CHECK_NULL);
if (TraceClassResolution) {
if (log_is_enabled(Info, classresolve)) {
trace_class_resolution(k);
}
return k->java_mirror();
@ -654,7 +654,7 @@ static Handle new_type(Symbol* signature, KlassHandle k, TRAPS) {
Handle(THREAD, k->protection_domain()),
true, CHECK_(Handle()));
if (TraceClassResolution) {
if (log_is_enabled(Info, classresolve)) {
trace_class_resolution(result);
}

View file

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "classfile/vmSymbols.hpp"
#include "logging/log.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/padded.hpp"
#include "memory/resourceArea.hpp"
@ -1414,12 +1415,12 @@ ObjectMonitor * NOINLINE ObjectSynchronizer::inflate(Thread * Self,
// to avoid false sharing on MP systems ...
OM_PERFDATA_OP(Inflations, inc());
TEVENT(Inflate: overwrite stacklock);
if (TraceMonitorInflation) {
if (log_is_enabled(Debug, monitorinflation)) {
if (object->is_instance()) {
ResourceMark rm;
tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
p2i(object), p2i(object->mark()),
object->klass()->external_name());
log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
p2i(object), p2i(object->mark()),
object->klass()->external_name());
}
}
return m;
@ -1462,12 +1463,12 @@ ObjectMonitor * NOINLINE ObjectSynchronizer::inflate(Thread * Self,
// cache lines to avoid false sharing on MP systems ...
OM_PERFDATA_OP(Inflations, inc());
TEVENT(Inflate: overwrite neutral);
if (TraceMonitorInflation) {
if (log_is_enabled(Debug, monitorinflation)) {
if (object->is_instance()) {
ResourceMark rm;
tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
p2i(object), p2i(object->mark()),
object->klass()->external_name());
log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
p2i(object), p2i(object->mark()),
object->klass()->external_name());
}
}
return m;
@ -1526,11 +1527,13 @@ bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
// It's idle - scavenge and return to the global free list
// plain old deflation ...
TEVENT(deflate_idle_monitors - scavenge1);
if (TraceMonitorInflation) {
if (log_is_enabled(Debug, monitorinflation)) {
if (obj->is_instance()) {
ResourceMark rm;
tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
p2i(obj), p2i(obj->mark()), obj->klass()->external_name());
log_debug(monitorinflation)("Deflating object " INTPTR_FORMAT " , "
"mark " INTPTR_FORMAT " , type %s",
p2i(obj), p2i(obj->mark()),
obj->klass()->external_name());
}
}

View file

@ -38,6 +38,7 @@
#include "interpreter/linkResolver.hpp"
#include "interpreter/oopMapCache.hpp"
#include "jvmtifiles/jvmtiEnv.hpp"
#include "logging/log.hpp"
#include "logging/logConfiguration.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
@ -305,10 +306,6 @@ void Thread::clear_thread_current() {
void Thread::record_stack_base_and_size() {
set_stack_base(os::current_stack_base());
set_stack_size(os::current_stack_size());
if (is_Java_thread()) {
((JavaThread*) this)->set_stack_overflow_limit();
((JavaThread*) this)->set_reserved_stack_activation(stack_base());
}
// CR 7190089: on Solaris, primordial thread's stack is adjusted
// in initialize_thread(). Without the adjustment, stack size is
// incorrect if stack is set to unlimited (ulimit -s unlimited).
@ -317,10 +314,14 @@ void Thread::record_stack_base_and_size() {
// set up any platform-specific state.
os::initialize_thread(this);
// Set stack limits after thread is initialized.
if (is_Java_thread()) {
((JavaThread*) this)->set_stack_overflow_limit();
((JavaThread*) this)->set_reserved_stack_activation(stack_base());
}
#if INCLUDE_NMT
// record thread's native stack, stack grows downward
address stack_low_addr = stack_base() - stack_size();
MemTracker::record_thread_stack(stack_low_addr, stack_size());
MemTracker::record_thread_stack(stack_end(), stack_size());
#endif // INCLUDE_NMT
}
@ -337,8 +338,7 @@ Thread::~Thread() {
// not proper way to enforce that.
#if INCLUDE_NMT
if (_stack_base != NULL) {
address low_stack_addr = stack_base() - stack_size();
MemTracker::release_thread_stack(low_stack_addr, stack_size());
MemTracker::release_thread_stack(stack_end(), stack_size());
#ifdef ASSERT
set_stack_base(NULL);
#endif
@ -821,7 +821,7 @@ void Thread::print_on_error(outputStream* st, char* buf, int buflen) const {
else st->print("Thread");
st->print(" [stack: " PTR_FORMAT "," PTR_FORMAT "]",
p2i(_stack_base - _stack_size), p2i(_stack_base));
p2i(stack_end()), p2i(stack_base()));
if (osthread()) {
st->print(" [id=%d]", osthread()->thread_id());
@ -907,9 +907,8 @@ bool Thread::is_in_stack(address adr) const {
return false;
}
bool Thread::is_in_usable_stack(address adr) const {
size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackReservedPages + StackYellowPages + StackRedPages) * os::vm_page_size() : 0;
size_t stack_guard_size = os::uses_stack_guard_pages() ? JavaThread::stack_guard_zone_size() : 0;
size_t usable_stack_size = _stack_size - stack_guard_size;
return ((adr < stack_base()) && (adr >= stack_base() - usable_stack_size));
@ -1534,7 +1533,7 @@ JavaThread::JavaThread(bool is_attaching_via_jni) :
}
bool JavaThread::reguard_stack(address cur_sp) {
if (_stack_guard_state != stack_guard_yellow_disabled
if (_stack_guard_state != stack_guard_yellow_reserved_disabled
&& _stack_guard_state != stack_guard_reserved_disabled) {
return true; // Stack already guarded or guard pages not needed.
}
@ -1551,9 +1550,10 @@ bool JavaThread::reguard_stack(address cur_sp) {
// is executing there, either StackShadowPages should be larger, or
// some exception code in c1, c2 or the interpreter isn't unwinding
// when it should.
guarantee(cur_sp > stack_yellow_zone_base(), "not enough space to reguard - increase StackShadowPages");
if (_stack_guard_state == stack_guard_yellow_disabled) {
enable_stack_yellow_zone();
guarantee(cur_sp > stack_reserved_zone_base(),
"not enough space to reguard - increase StackShadowPages");
if (_stack_guard_state == stack_guard_yellow_reserved_disabled) {
enable_stack_yellow_reserved_zone();
if (reserved_stack_activation() != stack_base()) {
set_reserved_stack_activation(stack_base());
}
@ -2064,10 +2064,7 @@ void JavaThread::check_and_handle_async_exceptions(bool check_unsafe_error) {
frame caller_fr = last_frame().sender(&map);
assert(caller_fr.is_compiled_frame(), "what?");
if (caller_fr.is_deoptimized_frame()) {
if (TraceExceptions) {
ResourceMark rm;
tty->print_cr("deferred async exception at compiled safepoint");
}
log_info(exceptions)("deferred async exception at compiled safepoint");
return;
}
}
@ -2093,14 +2090,15 @@ void JavaThread::check_and_handle_async_exceptions(bool check_unsafe_error) {
// We cannot call Exceptions::_throw(...) here because we cannot block
set_pending_exception(_pending_async_exception, __FILE__, __LINE__);
if (TraceExceptions) {
if (log_is_enabled(Info, exceptions)) {
ResourceMark rm;
tty->print("Async. exception installed at runtime exit (" INTPTR_FORMAT ")", p2i(this));
if (has_last_Java_frame()) {
frame f = last_frame();
tty->print(" (pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " )", p2i(f.pc()), p2i(f.sp()));
}
tty->print_cr(" of type: %s", _pending_async_exception->klass()->external_name());
outputStream* logstream = LogHandle(exceptions)::info_stream();
logstream->print("Async. exception installed at runtime exit (" INTPTR_FORMAT ")", p2i(this));
if (has_last_Java_frame()) {
frame f = last_frame();
logstream->print(" (pc: " INTPTR_FORMAT " sp: " INTPTR_FORMAT " )", p2i(f.pc()), p2i(f.sp()));
}
logstream->print_cr(" of type: %s", _pending_async_exception->klass()->external_name());
}
_pending_async_exception = NULL;
clear_has_async_exception();
@ -2216,9 +2214,10 @@ void JavaThread::send_thread_stop(oop java_throwable) {
// Set async. pending exception in thread.
set_pending_async_exception(java_throwable);
if (TraceExceptions) {
ResourceMark rm;
tty->print_cr("Pending Async. exception installed of type: %s", _pending_async_exception->klass()->external_name());
if (log_is_enabled(Info, exceptions)) {
ResourceMark rm;
log_info(exceptions)("Pending Async. exception installed of type: %s",
InstanceKlass::cast(_pending_async_exception->klass())->external_name());
}
// for AbortVMOnException flag
Exceptions::debug_check_abort(_pending_async_exception->klass()->external_name());
@ -2480,10 +2479,15 @@ void JavaThread::java_resume() {
}
}
size_t JavaThread::_stack_red_zone_size = 0;
size_t JavaThread::_stack_yellow_zone_size = 0;
size_t JavaThread::_stack_reserved_zone_size = 0;
size_t JavaThread::_stack_shadow_zone_size = 0;
void JavaThread::create_stack_guard_pages() {
if (! os::uses_stack_guard_pages() || _stack_guard_state != stack_guard_unused) return;
address low_addr = stack_base() - stack_size();
size_t len = (StackReservedPages + StackYellowPages + StackRedPages) * os::vm_page_size();
if (!os::uses_stack_guard_pages() || _stack_guard_state != stack_guard_unused) { return; }
address low_addr = stack_end();
size_t len = stack_guard_zone_size();
int allocate = os::allocate_stack_guard_pages();
// warning("Guarding at " PTR_FORMAT " for len " SIZE_FORMAT "\n", low_addr, len);
@ -2506,8 +2510,8 @@ void JavaThread::create_stack_guard_pages() {
void JavaThread::remove_stack_guard_pages() {
assert(Thread::current() == this, "from different thread");
if (_stack_guard_state == stack_guard_unused) return;
address low_addr = stack_base() - stack_size();
size_t len = (StackReservedPages + StackYellowPages + StackRedPages) * os::vm_page_size();
address low_addr = stack_end();
size_t len = stack_guard_zone_size();
if (os::allocate_stack_guard_pages()) {
if (os::remove_stack_guard_pages((char *) low_addr, len)) {
@ -2563,18 +2567,18 @@ void JavaThread::disable_stack_reserved_zone() {
disable_register_stack_guard();
}
void JavaThread::enable_stack_yellow_zone() {
void JavaThread::enable_stack_yellow_reserved_zone() {
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
assert(_stack_guard_state != stack_guard_enabled, "already enabled");
// The base notation is from the stacks point of view, growing downward.
// We need to adjust it to work correctly with guard_memory()
address base = stack_yellow_zone_base() - stack_yellow_zone_size();
address base = stack_red_zone_base();
guarantee(base < stack_base(), "Error calculating stack yellow zone");
guarantee(base < os::current_stack_pointer(), "Error calculating stack yellow zone");
if (os::guard_memory((char *) base, stack_yellow_zone_size())) {
if (os::guard_memory((char *) base, stack_yellow_reserved_zone_size())) {
_stack_guard_state = stack_guard_enabled;
} else {
warning("Attempt to guard stack yellow zone failed.");
@ -2582,19 +2586,19 @@ void JavaThread::enable_stack_yellow_zone() {
enable_register_stack_guard();
}
void JavaThread::disable_stack_yellow_zone() {
void JavaThread::disable_stack_yellow_reserved_zone() {
assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
assert(_stack_guard_state != stack_guard_yellow_disabled, "already disabled");
assert(_stack_guard_state != stack_guard_yellow_reserved_disabled, "already disabled");
// Simply return if called for a thread that does not use guard pages.
if (_stack_guard_state == stack_guard_unused) return;
// The base notation is from the stacks point of view, growing downward.
// We need to adjust it to work correctly with guard_memory()
address base = stack_yellow_zone_base() - stack_yellow_zone_size();
address base = stack_red_zone_base();
if (os::unguard_memory((char *)base, stack_yellow_zone_size())) {
_stack_guard_state = stack_guard_yellow_disabled;
if (os::unguard_memory((char *)base, stack_yellow_reserved_zone_size())) {
_stack_guard_state = stack_guard_yellow_reserved_disabled;
} else {
warning("Attempt to unguard stack yellow zone failed.");
}
@ -2899,7 +2903,7 @@ void JavaThread::print_on_error(outputStream* st, char *buf, int buflen) const {
st->print(", id=%d", osthread()->thread_id());
}
st->print(", stack(" PTR_FORMAT "," PTR_FORMAT ")",
p2i(_stack_base - _stack_size), p2i(_stack_base));
p2i(stack_end()), p2i(stack_base()));
st->print("]");
return;
}

View file

@ -550,15 +550,15 @@ protected:
public:
// Stack overflow support
address stack_base() const { assert(_stack_base != NULL,"Sanity check"); return _stack_base; }
void set_stack_base(address base) { _stack_base = base; }
size_t stack_size() const { return _stack_size; }
void set_stack_size(size_t size) { _stack_size = size; }
address stack_end() const { return stack_base() - stack_size(); }
void record_stack_base_and_size();
bool on_local_stack(address adr) const {
// QQQ this has knowledge of direction, ought to be a stack method
return (_stack_base >= adr && adr >= (_stack_base - _stack_size));
return (_stack_base >= adr && adr >= stack_end());
}
uintptr_t self_raw_id() { return _self_raw_id; }
@ -912,7 +912,7 @@ class JavaThread: public Thread {
enum StackGuardState {
stack_guard_unused, // not needed
stack_guard_reserved_disabled,
stack_guard_yellow_disabled,// disabled (temporarily) after stack overflow
stack_guard_yellow_reserved_disabled,// disabled (temporarily) after stack overflow
stack_guard_enabled // enabled
};
@ -1346,32 +1346,138 @@ class JavaThread: public Thread {
}
// Stack overflow support
//
// (small addresses)
//
// -- <-- stack_end() ---
// | |
// | red pages |
// | |
// -- <-- stack_red_zone_base() |
// | |
// | guard
// | yellow pages zone
// | |
// | |
// -- <-- stack_yellow_zone_base() |
// | |
// | |
// | reserved pages |
// | |
// -- <-- stack_reserved_zone_base() --- ---
// /|\ shadow
// | zone
// \|/ size
// some untouched memory --- <-- stack_overflow_limit()
//
//
// --
// |
// | shadow zone
// |
// --
// x frame n
// --
// x frame n-1
// x
// --
// ...
//
// --
// x frame 0
// -- <-- stack_base()
//
// (large addresses)
//
private:
// These values are derived from flags StackRedPages, StackYellowPages,
// StackReservedPages and StackShadowPages. The zone size is determined
// ergonomically if page_size > 4K.
static size_t _stack_red_zone_size;
static size_t _stack_yellow_zone_size;
static size_t _stack_reserved_zone_size;
static size_t _stack_shadow_zone_size;
public:
inline size_t stack_available(address cur_sp);
address stack_reserved_zone_base() {
return stack_yellow_zone_base(); }
size_t stack_reserved_zone_size() {
return StackReservedPages * os::vm_page_size(); }
address stack_yellow_zone_base() {
return (address)(stack_base() -
(stack_size() -
(stack_red_zone_size() + stack_yellow_zone_size())));
static size_t stack_red_zone_size() {
assert(_stack_red_zone_size > 0, "Don't call this before the field is initialized.");
return _stack_red_zone_size;
}
size_t stack_yellow_zone_size() {
return StackYellowPages * os::vm_page_size() + stack_reserved_zone_size();
static void set_stack_red_zone_size(size_t s) {
assert(is_size_aligned(s, os::vm_page_size()),
"We can not protect if the red zone size is not page aligned.");
assert(_stack_red_zone_size == 0, "This should be called only once.");
_stack_red_zone_size = s;
}
address stack_red_zone_base() {
return (address)(stack_base() - (stack_size() - stack_red_zone_size()));
}
size_t stack_red_zone_size() { return StackRedPages * os::vm_page_size(); }
bool in_stack_reserved_zone(address a) {
return (a <= stack_reserved_zone_base()) && (a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size()));
}
bool in_stack_yellow_zone(address a) {
return (a <= stack_yellow_zone_base()) && (a >= stack_red_zone_base());
return (address)(stack_end() + stack_red_zone_size());
}
bool in_stack_red_zone(address a) {
return (a <= stack_red_zone_base()) &&
(a >= (address)((intptr_t)stack_base() - stack_size()));
return a <= stack_red_zone_base() && a >= stack_end();
}
static size_t stack_yellow_zone_size() {
assert(_stack_yellow_zone_size > 0, "Don't call this before the field is initialized.");
return _stack_yellow_zone_size;
}
static void set_stack_yellow_zone_size(size_t s) {
assert(is_size_aligned(s, os::vm_page_size()),
"We can not protect if the yellow zone size is not page aligned.");
assert(_stack_yellow_zone_size == 0, "This should be called only once.");
_stack_yellow_zone_size = s;
}
static size_t stack_reserved_zone_size() {
// _stack_reserved_zone_size may be 0. This indicates the feature is off.
return _stack_reserved_zone_size;
}
static void set_stack_reserved_zone_size(size_t s) {
assert(is_size_aligned(s, os::vm_page_size()),
"We can not protect if the reserved zone size is not page aligned.");
assert(_stack_reserved_zone_size == 0, "This should be called only once.");
_stack_reserved_zone_size = s;
}
address stack_reserved_zone_base() {
return (address)(stack_end() +
(stack_red_zone_size() + stack_yellow_zone_size() + stack_reserved_zone_size()));
}
bool in_stack_reserved_zone(address a) {
return (a <= stack_reserved_zone_base()) &&
(a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size()));
}
static size_t stack_yellow_reserved_zone_size() {
return _stack_yellow_zone_size + _stack_reserved_zone_size;
}
bool in_stack_yellow_reserved_zone(address a) {
return (a <= stack_reserved_zone_base()) && (a >= stack_red_zone_base());
}
// Size of red + yellow + reserved zones.
static size_t stack_guard_zone_size() {
return stack_red_zone_size() + stack_yellow_reserved_zone_size();
}
static size_t stack_shadow_zone_size() {
assert(_stack_shadow_zone_size > 0, "Don't call this before the field is initialized.");
return _stack_shadow_zone_size;
}
static void set_stack_shadow_zone_size(size_t s) {
// The shadow area is not allocated or protected, so
// it needs not be page aligned.
// But the stack bang currently assumes that it is a
// multiple of page size. This guarantees that the bang
// loop touches all pages in the shadow zone.
// This can be guaranteed differently, as well. E.g., if
// the page size is a multiple of 4K, banging in 4K steps
// suffices to touch all pages. (Some pages are banged
// several times, though.)
assert(is_size_aligned(s, os::vm_page_size()),
"Stack bang assumes multiple of page size.");
assert(_stack_shadow_zone_size == 0, "This should be called only once.");
_stack_shadow_zone_size = s;
}
void create_stack_guard_pages();
@ -1379,18 +1485,18 @@ class JavaThread: public Thread {
void enable_stack_reserved_zone();
void disable_stack_reserved_zone();
void enable_stack_yellow_zone();
void disable_stack_yellow_zone();
void enable_stack_yellow_reserved_zone();
void disable_stack_yellow_reserved_zone();
void enable_stack_red_zone();
void disable_stack_red_zone();
inline bool stack_guard_zone_unused();
inline bool stack_yellow_zone_disabled();
inline bool stack_yellow_reserved_zone_disabled();
inline bool stack_reserved_zone_disabled();
inline bool stack_guards_enabled();
address reserved_stack_activation() const { return _reserved_stack_activation; }
void set_reserved_stack_activation(address addr) {
void set_reserved_stack_activation(address addr) {
assert(_reserved_stack_activation == stack_base()
|| _reserved_stack_activation == NULL
|| addr == stack_base(), "Must not be set twice");
@ -1410,11 +1516,9 @@ class JavaThread: public Thread {
address stack_overflow_limit() { return _stack_overflow_limit; }
void set_stack_overflow_limit() {
_stack_overflow_limit = _stack_base - _stack_size +
((StackShadowPages +
StackReservedPages +
StackYellowPages +
StackRedPages) * os::vm_page_size());
_stack_overflow_limit = stack_end() +
(JavaThread::stack_guard_zone_size() +
JavaThread::stack_shadow_zone_size());
}
// Misc. accessors/mutators

View file

@ -126,8 +126,8 @@ inline bool JavaThread::stack_guard_zone_unused() {
return _stack_guard_state == stack_guard_unused;
}
inline bool JavaThread::stack_yellow_zone_disabled() {
return _stack_guard_state == stack_guard_yellow_disabled;
inline bool JavaThread::stack_yellow_reserved_zone_disabled() {
return _stack_guard_state == stack_guard_yellow_reserved_disabled;
}
inline bool JavaThread::stack_reserved_zone_disabled() {
@ -138,9 +138,9 @@ inline size_t JavaThread::stack_available(address cur_sp) {
// This code assumes java stacks grow down
address low_addr; // Limit on the address for deepest stack depth
if (_stack_guard_state == stack_guard_unused) {
low_addr = stack_base() - stack_size();
low_addr = stack_end();
} else {
low_addr = stack_yellow_zone_base();
low_addr = stack_reserved_zone_base();
}
return cur_sp > low_addr ? cur_sp - low_addr : 0;
}

View file

@ -72,16 +72,7 @@ int Abstract_VM_Version::_reserve_for_allocation_prefetch = 0;
#error DEBUG_LEVEL must be defined
#endif
// NOTE: Builds within Visual Studio do not define the build target in
// HOTSPOT_VERSION_STRING, so it must be done here
#if defined(VISUAL_STUDIO_BUILD) && !defined(PRODUCT)
#ifndef HOTSPOT_BUILD_TARGET
#error HOTSPOT_BUILD_TARGET must be defined
#endif
#define VM_RELEASE HOTSPOT_VERSION_STRING "-" HOTSPOT_BUILD_TARGET
#else
#define VM_RELEASE HOTSPOT_VERSION_STRING
#endif
#define VM_RELEASE HOTSPOT_VERSION_STRING
// HOTSPOT_VERSION_STRING equals the JDK VERSION_STRING (unless overridden
// in a standalone build).