8057107: cleanup indent white space issues prior to Contended Locking reorder and cache line bucket

Reviewed-by: fparain, sspitsyn, coleenp
This commit is contained in:
Daniel D. Daugherty 2014-09-10 11:48:20 -06:00
parent 2ad3d66d79
commit e1a36d62a0
15 changed files with 3764 additions and 3764 deletions

View file

@ -260,11 +260,11 @@ void os::Bsd::initialize_system_info() {
mib[1] = HW_NCPU;
len = sizeof(cpu_val);
if (sysctl(mib, 2, &cpu_val, &len, NULL, 0) != -1 && cpu_val >= 1) {
assert(len == sizeof(cpu_val), "unexpected data size");
set_processor_count(cpu_val);
assert(len == sizeof(cpu_val), "unexpected data size");
set_processor_count(cpu_val);
}
else {
set_processor_count(1); // fallback
set_processor_count(1); // fallback
}
/* get physical memory via hw.memsize sysctl (hw.memsize is used
@ -284,19 +284,19 @@ void os::Bsd::initialize_system_info() {
len = sizeof(mem_val);
if (sysctl(mib, 2, &mem_val, &len, NULL, 0) != -1) {
assert(len == sizeof(mem_val), "unexpected data size");
_physical_memory = mem_val;
assert(len == sizeof(mem_val), "unexpected data size");
_physical_memory = mem_val;
} else {
_physical_memory = 256*1024*1024; // fallback (XXXBSD?)
_physical_memory = 256*1024*1024; // fallback (XXXBSD?)
}
#ifdef __OpenBSD__
{
// limit _physical_memory memory view on OpenBSD since
// datasize rlimit restricts us anyway.
struct rlimit limits;
getrlimit(RLIMIT_DATA, &limits);
_physical_memory = MIN2(_physical_memory, (julong)limits.rlim_cur);
// limit _physical_memory memory view on OpenBSD since
// datasize rlimit restricts us anyway.
struct rlimit limits;
getrlimit(RLIMIT_DATA, &limits);
_physical_memory = MIN2(_physical_memory, (julong)limits.rlim_cur);
}
#endif
}
@ -561,14 +561,14 @@ debug_only(static bool signal_sets_initialized = false);
static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
bool os::Bsd::is_sig_ignored(int sig) {
struct sigaction oact;
sigaction(sig, (struct sigaction*)NULL, &oact);
void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
: CAST_FROM_FN_PTR(void*, oact.sa_handler);
if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
return true;
else
return false;
struct sigaction oact;
sigaction(sig, (struct sigaction*)NULL, &oact);
void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
: CAST_FROM_FN_PTR(void*, oact.sa_handler);
if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
return true;
else
return false;
}
void os::Bsd::signal_sets_init() {
@ -596,18 +596,18 @@ void os::Bsd::signal_sets_init() {
sigaddset(&unblocked_sigs, SR_signum);
if (!ReduceSignalUsage) {
if (!os::Bsd::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
if (!os::Bsd::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
}
if (!os::Bsd::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
}
if (!os::Bsd::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
}
if (!os::Bsd::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
}
if (!os::Bsd::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
}
}
}
// Fill in signals that are blocked by all but the VM thread.
sigemptyset(&vm_sigs);
@ -846,9 +846,9 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
// Aborted due to thread limit being reached
if (state == ZOMBIE) {
thread->set_osthread(NULL);
delete osthread;
return false;
thread->set_osthread(NULL);
delete osthread;
return false;
}
// The thread is returned suspended (in state INITIALIZED),
@ -868,7 +868,7 @@ bool os::create_main_thread(JavaThread* thread) {
bool os::create_attached_thread(JavaThread* thread) {
#ifdef ASSERT
thread->verify_not_published();
thread->verify_not_published();
#endif
// Allocate the OSThread object
@ -919,7 +919,7 @@ void os::free_thread(OSThread* osthread) {
// Restore caller's signal mask
sigset_t sigmask = osthread->caller_sigmask();
pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
}
}
delete osthread;
}
@ -1023,27 +1023,27 @@ void os::Bsd::clock_init() {
#ifdef __APPLE__
jlong os::javaTimeNanos() {
const uint64_t tm = mach_absolute_time();
const uint64_t now = (tm * Bsd::_timebase_info.numer) / Bsd::_timebase_info.denom;
const uint64_t prev = Bsd::_max_abstime;
if (now <= prev) {
return prev; // same or retrograde time;
}
const uint64_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&Bsd::_max_abstime, prev);
assert(obsv >= prev, "invariant"); // Monotonicity
// If the CAS succeeded then we're done and return "now".
// If the CAS failed and the observed value "obsv" is >= now then
// we should return "obsv". If the CAS failed and now > obsv > prv then
// some other thread raced this thread and installed a new value, in which case
// we could either (a) retry the entire operation, (b) retry trying to install now
// or (c) just return obsv. We use (c). No loop is required although in some cases
// we might discard a higher "now" value in deference to a slightly lower but freshly
// installed obsv value. That's entirely benign -- it admits no new orderings compared
// to (a) or (b) -- and greatly reduces coherence traffic.
// We might also condition (c) on the magnitude of the delta between obsv and now.
// Avoiding excessive CAS operations to hot RW locations is critical.
// See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
return (prev == obsv) ? now : obsv;
const uint64_t tm = mach_absolute_time();
const uint64_t now = (tm * Bsd::_timebase_info.numer) / Bsd::_timebase_info.denom;
const uint64_t prev = Bsd::_max_abstime;
if (now <= prev) {
return prev; // same or retrograde time;
}
const uint64_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&Bsd::_max_abstime, prev);
assert(obsv >= prev, "invariant"); // Monotonicity
// If the CAS succeeded then we're done and return "now".
// If the CAS failed and the observed value "obsv" is >= now then
// we should return "obsv". If the CAS failed and now > obsv > prv then
// some other thread raced this thread and installed a new value, in which case
// we could either (a) retry the entire operation, (b) retry trying to install now
// or (c) just return obsv. We use (c). No loop is required although in some cases
// we might discard a higher "now" value in deference to a slightly lower but freshly
// installed obsv value. That's entirely benign -- it admits no new orderings compared
// to (a) or (b) -- and greatly reduces coherence traffic.
// We might also condition (c) on the magnitude of the delta between obsv and now.
// Avoiding excessive CAS operations to hot RW locations is critical.
// See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
return (prev == obsv) ? now : obsv;
}
#else // __APPLE__
@ -1307,7 +1307,7 @@ bool os::dll_build_name(char* buffer, size_t buflen,
continue; // skip the empty path values
}
snprintf(buffer, buflen, "%s/" JNI_LIB_PREFIX "%s" JNI_LIB_SUFFIX,
pelements[i], fname);
pelements[i], fname);
if (file_exists(buffer)) {
retval = true;
break;
@ -1372,14 +1372,14 @@ bool os::dll_address_to_function_name(address addr, char *buf,
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
buf, buflen, offset, dlinfo.dli_fname)) {
return true;
return true;
}
}
// Handle non-dynamic manually:
if (dlinfo.dli_fbase != NULL &&
Decoder::decode(addr, localbuf, MACH_MAXSYMLEN, offset,
dlinfo.dli_fbase)) {
dlinfo.dli_fbase)) {
if (!Decoder::demangle(localbuf, buf, buflen)) {
jio_snprintf(buf, buflen, "%s", localbuf);
}
@ -1465,7 +1465,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
bool failed_to_read_elf_head=
(sizeof(elf_head)!=
(::read(file_descriptor, &elf_head,sizeof(elf_head))));
(::read(file_descriptor, &elf_head,sizeof(elf_head))));
::close(file_descriptor);
if (failed_to_read_elf_head) {
@ -1525,33 +1525,33 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
};
#if (defined IA32)
static Elf32_Half running_arch_code=EM_386;
static Elf32_Half running_arch_code=EM_386;
#elif (defined AMD64)
static Elf32_Half running_arch_code=EM_X86_64;
static Elf32_Half running_arch_code=EM_X86_64;
#elif (defined IA64)
static Elf32_Half running_arch_code=EM_IA_64;
static Elf32_Half running_arch_code=EM_IA_64;
#elif (defined __sparc) && (defined _LP64)
static Elf32_Half running_arch_code=EM_SPARCV9;
static Elf32_Half running_arch_code=EM_SPARCV9;
#elif (defined __sparc) && (!defined _LP64)
static Elf32_Half running_arch_code=EM_SPARC;
static Elf32_Half running_arch_code=EM_SPARC;
#elif (defined __powerpc64__)
static Elf32_Half running_arch_code=EM_PPC64;
static Elf32_Half running_arch_code=EM_PPC64;
#elif (defined __powerpc__)
static Elf32_Half running_arch_code=EM_PPC;
static Elf32_Half running_arch_code=EM_PPC;
#elif (defined ARM)
static Elf32_Half running_arch_code=EM_ARM;
static Elf32_Half running_arch_code=EM_ARM;
#elif (defined S390)
static Elf32_Half running_arch_code=EM_S390;
static Elf32_Half running_arch_code=EM_S390;
#elif (defined ALPHA)
static Elf32_Half running_arch_code=EM_ALPHA;
static Elf32_Half running_arch_code=EM_ALPHA;
#elif (defined MIPSEL)
static Elf32_Half running_arch_code=EM_MIPS_RS3_LE;
static Elf32_Half running_arch_code=EM_MIPS_RS3_LE;
#elif (defined PARISC)
static Elf32_Half running_arch_code=EM_PARISC;
static Elf32_Half running_arch_code=EM_PARISC;
#elif (defined MIPS)
static Elf32_Half running_arch_code=EM_MIPS;
static Elf32_Half running_arch_code=EM_MIPS;
#elif (defined M68K)
static Elf32_Half running_arch_code=EM_68K;
static Elf32_Half running_arch_code=EM_68K;
#else
#error Method os::dll_load requires that one of following is defined:\
IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K
@ -1574,7 +1574,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
}
assert(running_arch_index != -1,
"Didn't find running architecture code (running_arch_code) in arch_array");
"Didn't find running architecture code (running_arch_code) in arch_array");
if (running_arch_index == -1) {
// Even though running architecture detection failed
// we may still continue with reporting dlerror() message
@ -1596,13 +1596,13 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
if (lib_arch.name!=NULL) {
::snprintf(diag_msg_buf, diag_msg_max_length-1,
" (Possible cause: can't load %s-bit .so on a %s-bit platform)",
lib_arch.name, arch_array[running_arch_index].name);
" (Possible cause: can't load %s-bit .so on a %s-bit platform)",
lib_arch.name, arch_array[running_arch_index].name);
} else {
::snprintf(diag_msg_buf, diag_msg_max_length-1,
" (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
lib_arch.code,
arch_array[running_arch_index].name);
" (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
lib_arch.code,
arch_array[running_arch_index].name);
}
}
@ -1630,7 +1630,7 @@ void* os::dll_lookup(void* handle, const char* name) {
static bool _print_ascii_file(const char* filename, outputStream* st) {
int fd = ::open(filename, O_RDONLY);
if (fd == -1) {
return false;
return false;
}
char buf[32];
@ -1785,8 +1785,8 @@ void os::jvm_path(char *buf, jint buflen) {
char dli_fname[MAXPATHLEN];
bool ret = dll_address_to_library_name(
CAST_FROM_FN_PTR(address, os::jvm_path),
dli_fname, sizeof(dli_fname), NULL);
CAST_FROM_FN_PTR(address, os::jvm_path),
dli_fname, sizeof(dli_fname), NULL);
assert(ret, "cannot locate libjvm");
char *rp = NULL;
if (ret && dli_fname[0] != '\0') {
@ -1884,12 +1884,12 @@ UserHandler(int sig, void *siginfo, void *context) {
// the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
// don't want to flood the manager thread with sem_post requests.
if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
return;
return;
// Ctrl-C is pressed during error reporting, likely because the error
// handler fails to abort. Let VM die immediately.
if (sig == SIGINT && is_error_reported()) {
os::die();
os::die();
}
os::signal_notify(sig);
@ -1952,16 +1952,16 @@ typedef sem_t os_semaphore_t;
#endif
class Semaphore : public StackObj {
public:
Semaphore();
~Semaphore();
void signal();
void wait();
bool trywait();
bool timedwait(unsigned int sec, int nsec);
private:
jlong currenttime() const;
os_semaphore_t _semaphore;
public:
Semaphore();
~Semaphore();
void signal();
void wait();
bool trywait();
bool timedwait(unsigned int sec, int nsec);
private:
jlong currenttime() const;
os_semaphore_t _semaphore;
};
Semaphore::Semaphore() : _semaphore(0) {
@ -1981,9 +1981,9 @@ void Semaphore::wait() {
}
jlong Semaphore::currenttime() const {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * NANOSECS_PER_SEC) + (tv.tv_usec * 1000);
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * NANOSECS_PER_SEC) + (tv.tv_usec * 1000);
}
#ifdef __APPLE__
@ -2180,7 +2180,7 @@ bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
}
#else
uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
if (res != (uintptr_t) MAP_FAILED) {
return true;
}
@ -2194,7 +2194,7 @@ bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
}
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
bool exec) {
bool exec) {
// alignment_hint is ignored on this OS
return pd_commit_memory(addr, size, exec);
}
@ -2262,7 +2262,7 @@ bool os::pd_uncommit_memory(char* addr, size_t size) {
return ::mprotect(addr, size, PROT_NONE) == 0;
#else
uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
return res != (uintptr_t) MAP_FAILED;
#endif
}
@ -2323,7 +2323,7 @@ static int anon_munmap(char * addr, size_t size) {
}
char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
size_t alignment_hint) {
size_t alignment_hint) {
return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
}
@ -2401,24 +2401,24 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr,
// Currently, size is the total size of the heap
int shmid = shmget(key, bytes, IPC_CREAT|SHM_R|SHM_W);
if (shmid == -1) {
// Possible reasons for shmget failure:
// 1. shmmax is too small for Java heap.
// > check shmmax value: cat /proc/sys/kernel/shmmax
// > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
// 2. not enough large page memory.
// > check available large pages: cat /proc/meminfo
// > increase amount of large pages:
// echo new_value > /proc/sys/vm/nr_hugepages
// Note 1: different Bsd may use different name for this property,
// e.g. on Redhat AS-3 it is "hugetlb_pool".
// Note 2: it's possible there's enough physical memory available but
// they are so fragmented after a long run that they can't
// coalesce into large pages. Try to reserve large pages when
// the system is still "fresh".
if (warn_on_failure) {
warning("Failed to reserve shared memory (errno = %d).", errno);
}
return NULL;
// Possible reasons for shmget failure:
// 1. shmmax is too small for Java heap.
// > check shmmax value: cat /proc/sys/kernel/shmmax
// > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
// 2. not enough large page memory.
// > check available large pages: cat /proc/meminfo
// > increase amount of large pages:
// echo new_value > /proc/sys/vm/nr_hugepages
// Note 1: different Bsd may use different name for this property,
// e.g. on Redhat AS-3 it is "hugetlb_pool".
// Note 2: it's possible there's enough physical memory available but
// they are so fragmented after a long run that they can't
// coalesce into large pages. Try to reserve large pages when
// the system is still "fresh".
if (warn_on_failure) {
warning("Failed to reserve shared memory (errno = %d).", errno);
}
return NULL;
}
// attach to the region
@ -2432,10 +2432,10 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr,
shmctl(shmid, IPC_RMID, NULL);
if ((intptr_t)addr == -1) {
if (warn_on_failure) {
warning("Failed to attach shared memory (errno = %d).", err);
}
return NULL;
if (warn_on_failure) {
warning("Failed to attach shared memory (errno = %d).", err);
}
return NULL;
}
// The memory is committed
@ -2506,12 +2506,12 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
// if kernel honors the hint then we can return immediately.
char * addr = anon_mmap(requested_addr, bytes, false);
if (addr == requested_addr) {
return requested_addr;
return requested_addr;
}
if (addr != NULL) {
// mmap() is successful but it fails to reserve at the requested address
anon_munmap(addr, bytes);
// mmap() is successful but it fails to reserve at the requested address
anon_munmap(addr, bytes);
}
int i;
@ -2839,12 +2839,12 @@ static int SR_initialize() {
if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
int sig = ::strtol(s, 0, 10);
if (sig > 0 || sig < NSIG) {
SR_signum = sig;
SR_signum = sig;
}
}
assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
"SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
"SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
sigemptyset(&SR_sigset);
sigaddset(&SR_sigset, SR_signum);
@ -2977,7 +2977,7 @@ static void do_resume(OSThread* osthread) {
//
extern "C" JNIEXPORT int
JVM_handle_bsd_signal(int signo, siginfo_t* siginfo,
void* ucontext, int abort_if_unrecognized);
void* ucontext, int abort_if_unrecognized);
void signalHandler(int sig, siginfo_t* info, void* uc) {
assert(info != NULL && uc != NULL, "it must be old kernel");
@ -3168,12 +3168,12 @@ void os::Bsd::install_signal_handlers() {
signal_setting_t begin_signal_setting = NULL;
signal_setting_t end_signal_setting = NULL;
begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
if (begin_signal_setting != NULL) {
end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
get_signal_action = CAST_TO_FN_PTR(get_signal_t,
dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
libjsig_is_loaded = true;
assert(UseSignalChaining, "should enable signal-chaining");
}
@ -3203,10 +3203,10 @@ void os::Bsd::install_signal_handlers() {
// exception handling, while leaving the standard BSD signal handlers functional.
kern_return_t kr;
kr = task_set_exception_ports(mach_task_self(),
EXC_MASK_BAD_ACCESS | EXC_MASK_ARITHMETIC,
MACH_PORT_NULL,
EXCEPTION_STATE_IDENTITY,
MACHINE_THREAD_STATE);
EXC_MASK_BAD_ACCESS | EXC_MASK_ARITHMETIC,
MACH_PORT_NULL,
EXCEPTION_STATE_IDENTITY,
MACHINE_THREAD_STATE);
assert(kr == KERN_SUCCESS, "could not set mach task signal handler");
#endif
@ -3302,7 +3302,7 @@ static void print_signal_handler(outputStream* st, int sig,
// Check: is it our handler?
if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler) ||
handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
// It is our signal handler
// check for flags, reset system-used one!
if ((int)sa.sa_flags != os::Bsd::get_our_sigflags(sig)) {
@ -3542,22 +3542,22 @@ jint os::init_2(void)
// Add in 2*BytesPerWord times page size to account for VM stack during
// class initialization depending on 32 or 64 bit VM.
os::Bsd::min_stack_allowed = MAX2(os::Bsd::min_stack_allowed,
(size_t)(StackYellowPages+StackRedPages+StackShadowPages+
2*BytesPerWord COMPILER2_PRESENT(+1)) * Bsd::page_size());
(size_t)(StackYellowPages+StackRedPages+StackShadowPages+
2*BytesPerWord COMPILER2_PRESENT(+1)) * Bsd::page_size());
size_t threadStackSizeInBytes = ThreadStackSize * K;
if (threadStackSizeInBytes != 0 &&
threadStackSizeInBytes < os::Bsd::min_stack_allowed) {
tty->print_cr("\nThe stack size specified is too small, "
"Specify at least %dk",
os::Bsd::min_stack_allowed/ K);
return JNI_ERR;
tty->print_cr("\nThe stack size specified is too small, "
"Specify at least %dk",
os::Bsd::min_stack_allowed/ K);
return JNI_ERR;
}
// Make the stack size a multiple of the page size so that
// the yellow/red zones can be guarded.
JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
vm_page_size()));
vm_page_size()));
if (MaxFDLimit) {
// set the number of file descriptors to max. print out error
@ -3670,12 +3670,12 @@ void os::SuspendedThreadTask::internal_do_task() {
///
class PcFetcher : public os::SuspendedThreadTask {
public:
public:
PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
ExtendedPC result();
protected:
protected:
void do_task(const os::SuspendedThreadTaskContext& context);
private:
private:
ExtendedPC _epc;
};
@ -3722,7 +3722,7 @@ bool os::find(address addr, outputStream* st) {
st->print(PTR_FORMAT ": ", addr);
if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
st->print("%s+%#x", dlinfo.dli_sname,
addr - (intptr_t)dlinfo.dli_saddr);
addr - (intptr_t)dlinfo.dli_saddr);
} else if (dlinfo.dli_fbase != NULL) {
st->print("<offset %#x>", addr - (intptr_t)dlinfo.dli_fbase);
} else {
@ -3892,11 +3892,11 @@ int os::open(const char *path, int oflag, int mode) {
* 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
*/
#ifdef FD_CLOEXEC
{
int flags = ::fcntl(fd, F_GETFD);
if (flags != -1)
::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
}
{
int flags = ::fcntl(fd, F_GETFD);
if (flags != -1)
::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
}
#endif
if (o_delete != 0) {
@ -3960,23 +3960,23 @@ int os::available(int fd, jlong *bytes) {
}
int os::socket_available(int fd, jint *pbytes) {
if (fd < 0)
return OS_OK;
if (fd < 0)
return OS_OK;
int ret;
int ret;
RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
//%% note ioctl can return 0 when successful, JVM_SocketAvailable
// is expected to return 0 on failure and 1 on success to the jdk.
//%% note ioctl can return 0 when successful, JVM_SocketAvailable
// is expected to return 0 on failure and 1 on success to the jdk.
return (ret == OS_ERR) ? 0 : 1;
return (ret == OS_ERR) ? 0 : 1;
}
// Map a block of memory.
char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
bool allow_exec) {
char *addr, size_t bytes, bool read_only,
bool allow_exec) {
int prot;
int flags;
@ -4007,8 +4007,8 @@ char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
// Remap a block of memory.
char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
bool allow_exec) {
char *addr, size_t bytes, bool read_only,
bool allow_exec) {
// same as map_memory() on this OS
return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
allow_exec);
@ -4127,7 +4127,7 @@ void os::pause() {
}
} else {
jio_fprintf(stderr,
"Could not open pause file '%s', continuing immediately.\n", filename);
"Could not open pause file '%s', continuing immediately.\n", filename);
}
}
@ -4223,28 +4223,28 @@ void os::PlatformEvent::park() { // AKA "down()"
int v;
for (;;) {
v = _Event;
if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
v = _Event;
if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
}
guarantee(v >= 0, "invariant");
if (v == 0) {
// Do this the hard way by blocking ...
int status = pthread_mutex_lock(_mutex);
assert_status(status == 0, status, "mutex_lock");
guarantee(_nParked == 0, "invariant");
++_nParked;
while (_Event < 0) {
status = pthread_cond_wait(_cond, _mutex);
// for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
// Treat this the same as if the wait was interrupted
if (status == ETIMEDOUT) { status = EINTR; }
assert_status(status == 0 || status == EINTR, status, "cond_wait");
}
--_nParked;
// Do this the hard way by blocking ...
int status = pthread_mutex_lock(_mutex);
assert_status(status == 0, status, "mutex_lock");
guarantee(_nParked == 0, "invariant");
++_nParked;
while (_Event < 0) {
status = pthread_cond_wait(_cond, _mutex);
// for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
// Treat this the same as if the wait was interrupted
if (status == ETIMEDOUT) { status = EINTR; }
assert_status(status == 0 || status == EINTR, status, "cond_wait");
}
--_nParked;
_Event = 0;
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other.
OrderAccess::fence();
@ -4257,8 +4257,8 @@ int os::PlatformEvent::park(jlong millis) {
int v;
for (;;) {
v = _Event;
if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
v = _Event;
if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
}
guarantee(v >= 0, "invariant");
if (v != 0) return OS_OK;
@ -4302,7 +4302,7 @@ int os::PlatformEvent::park(jlong millis) {
}
--_nParked;
if (_Event >= 0) {
ret = OS_OK;
ret = OS_OK;
}
_Event = 0;
status = pthread_mutex_unlock(_mutex);
@ -4532,17 +4532,17 @@ void Parker::unpark() {
const int s = _counter;
_counter = 1;
if (s < 1) {
if (WorkAroundNPTLTimedWaitHang) {
status = pthread_cond_signal(_cond);
assert(status == 0, "invariant");
status = pthread_mutex_unlock(_mutex);
assert(status == 0, "invariant");
} else {
status = pthread_mutex_unlock(_mutex);
assert(status == 0, "invariant");
status = pthread_cond_signal(_cond);
assert(status == 0, "invariant");
}
if (WorkAroundNPTLTimedWaitHang) {
status = pthread_cond_signal(_cond);
assert(status == 0, "invariant");
status = pthread_mutex_unlock(_mutex);
assert(status == 0, "invariant");
} else {
status = pthread_mutex_unlock(_mutex);
assert(status == 0, "invariant");
status = pthread_cond_signal(_cond);
assert(status == 0, "invariant");
}
} else {
pthread_mutex_unlock(_mutex);
assert(status == 0, "invariant");
@ -4600,26 +4600,26 @@ int os::fork_and_exec(char* cmd) {
// Wait for the child process to exit. This returns immediately if
// the child has already exited. */
while (waitpid(pid, &status, 0) < 0) {
switch (errno) {
case ECHILD: return 0;
case EINTR: break;
default: return -1;
}
switch (errno) {
case ECHILD: return 0;
case EINTR: break;
default: return -1;
}
}
if (WIFEXITED(status)) {
// The child exited normally; get its exit code.
return WEXITSTATUS(status);
// The child exited normally; get its exit code.
return WEXITSTATUS(status);
} else if (WIFSIGNALED(status)) {
// The child exited because of a signal
// The best value to return is 0x80 + signal number,
// because that is what all Unix shells do, and because
// it allows callers to distinguish between process exit and
// process death by signal.
return 0x80 + WTERMSIG(status);
// The child exited because of a signal
// The best value to return is 0x80 + signal number,
// because that is what all Unix shells do, and because
// it allows callers to distinguish between process exit and
// process death by signal.
return 0x80 + WTERMSIG(status);
} else {
// Unknown exit code; pass it through
return status;
// Unknown exit code; pass it through
return status;
}
}
}
@ -4634,40 +4634,40 @@ int os::fork_and_exec(char* cmd) {
//
bool os::is_headless_jre() {
#ifdef __APPLE__
// We no longer build headless-only on Mac OS X
return false;
// We no longer build headless-only on Mac OS X
return false;
#else
struct stat statbuf;
char buf[MAXPATHLEN];
char libmawtpath[MAXPATHLEN];
const char *xawtstr = "/xawt/libmawt" JNI_LIB_SUFFIX;
const char *new_xawtstr = "/libawt_xawt" JNI_LIB_SUFFIX;
char *p;
struct stat statbuf;
char buf[MAXPATHLEN];
char libmawtpath[MAXPATHLEN];
const char *xawtstr = "/xawt/libmawt" JNI_LIB_SUFFIX;
const char *new_xawtstr = "/libawt_xawt" JNI_LIB_SUFFIX;
char *p;
// Get path to libjvm.so
os::jvm_path(buf, sizeof(buf));
// Get path to libjvm.so
os::jvm_path(buf, sizeof(buf));
// Get rid of libjvm.so
p = strrchr(buf, '/');
if (p == NULL) return false;
else *p = '\0';
// Get rid of libjvm.so
p = strrchr(buf, '/');
if (p == NULL) return false;
else *p = '\0';
// Get rid of client or server
p = strrchr(buf, '/');
if (p == NULL) return false;
else *p = '\0';
// Get rid of client or server
p = strrchr(buf, '/');
if (p == NULL) return false;
else *p = '\0';
// check xawt/libmawt.so
strcpy(libmawtpath, buf);
strcat(libmawtpath, xawtstr);
if (::stat(libmawtpath, &statbuf) == 0) return false;
// check xawt/libmawt.so
strcpy(libmawtpath, buf);
strcat(libmawtpath, xawtstr);
if (::stat(libmawtpath, &statbuf) == 0) return false;
// check libawt_xawt.so
strcpy(libmawtpath, buf);
strcat(libmawtpath, new_xawtstr);
if (::stat(libmawtpath, &statbuf) == 0) return false;
// check libawt_xawt.so
strcpy(libmawtpath, buf);
strcat(libmawtpath, new_xawtstr);
if (::stat(libmawtpath, &statbuf) == 0) return false;
return true;
return true;
#endif
}

View file

@ -108,7 +108,7 @@ class Bsd {
// that file provides extensions to the os class and not the
// Bsd class.
static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc,
intptr_t** ret_sp, intptr_t** ret_fp);
intptr_t** ret_sp, intptr_t** ret_fp);
// This boolean allows users to forward their own non-matching signals
// to JVM_handle_bsd_signal, harmlessly.
@ -147,7 +147,7 @@ class Bsd {
// BsdThreads work-around for 6292965
static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime);
private:
private:
typedef int (*sched_getcpu_func_t)(void);
typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
typedef int (*numa_max_node_func_t)(void);
@ -170,7 +170,7 @@ private:
static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
public:
public:
static int sched_getcpu() { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
return _numa_node_to_cpus != NULL ? _numa_node_to_cpus(node, buffer, bufferlen) : -1;
@ -190,55 +190,55 @@ public:
class PlatformEvent : public CHeapObj<mtInternal> {
private:
double CachePad[4]; // increase odds that _mutex is sole occupant of cache line
volatile int _Event;
volatile int _nParked;
pthread_mutex_t _mutex[1];
pthread_cond_t _cond[1];
double PostPad[2];
Thread * _Assoc;
private:
double CachePad[4]; // increase odds that _mutex is sole occupant of cache line
volatile int _Event;
volatile int _nParked;
pthread_mutex_t _mutex[1];
pthread_cond_t _cond[1];
double PostPad[2];
Thread * _Assoc;
public: // TODO-FIXME: make dtor private
~PlatformEvent() { guarantee(0, "invariant"); }
public: // TODO-FIXME: make dtor private
~PlatformEvent() { guarantee(0, "invariant"); }
public:
PlatformEvent() {
int status;
status = pthread_cond_init (_cond, NULL);
assert_status(status == 0, status, "cond_init");
status = pthread_mutex_init (_mutex, NULL);
assert_status(status == 0, status, "mutex_init");
_Event = 0;
_nParked = 0;
_Assoc = NULL;
}
public:
PlatformEvent() {
int status;
status = pthread_cond_init (_cond, NULL);
assert_status(status == 0, status, "cond_init");
status = pthread_mutex_init (_mutex, NULL);
assert_status(status == 0, status, "mutex_init");
_Event = 0;
_nParked = 0;
_Assoc = NULL;
}
// Use caution with reset() and fired() -- they may require MEMBARs
void reset() { _Event = 0; }
int fired() { return _Event; }
void park();
void unpark();
int park(jlong millis);
void SetAssociation(Thread * a) { _Assoc = a; }
// Use caution with reset() and fired() -- they may require MEMBARs
void reset() { _Event = 0; }
int fired() { return _Event; }
void park();
void unpark();
int park(jlong millis);
void SetAssociation(Thread * a) { _Assoc = a; }
};
class PlatformParker : public CHeapObj<mtInternal> {
protected:
pthread_mutex_t _mutex[1];
pthread_cond_t _cond[1];
protected:
pthread_mutex_t _mutex[1];
pthread_cond_t _cond[1];
public: // TODO-FIXME: make dtor private
~PlatformParker() { guarantee(0, "invariant"); }
public: // TODO-FIXME: make dtor private
~PlatformParker() { guarantee(0, "invariant"); }
public:
PlatformParker() {
int status;
status = pthread_cond_init (_cond, NULL);
assert_status(status == 0, status, "cond_init");
status = pthread_mutex_init (_mutex, NULL);
assert_status(status == 0, status, "mutex_init");
}
public:
PlatformParker() {
int status;
status = pthread_cond_init (_cond, NULL);
assert_status(status == 0, status, "cond_init");
status = pthread_mutex_init (_mutex, NULL);
assert_status(status == 0, status, "mutex_init");
}
};
#endif // OS_BSD_VM_OS_BSD_HPP

File diff suppressed because it is too large Load diff

View file

@ -151,7 +151,7 @@ class Linux {
// that file provides extensions to the os class and not the
// Linux class.
static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc,
intptr_t** ret_sp, intptr_t** ret_fp);
intptr_t** ret_sp, intptr_t** ret_fp);
// This boolean allows users to forward their own non-matching signals
// to JVM_handle_linux_signal, harmlessly.
@ -222,10 +222,10 @@ class Linux {
static jlong fast_thread_cpu_time(clockid_t clockid);
// pthread_cond clock suppport
private:
private:
static pthread_condattr_t _condattr[1];
public:
public:
static pthread_condattr_t* condAttr() { return _condattr; }
// Stack repair handling
@ -235,7 +235,7 @@ class Linux {
// LinuxThreads work-around for 6292965
static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime);
private:
private:
typedef int (*sched_getcpu_func_t)(void);
typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
typedef int (*numa_max_node_func_t)(void);
@ -262,7 +262,7 @@ private:
static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; }
static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
static int sched_getcpu_syscall(void);
public:
public:
static int sched_getcpu() { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
return _numa_node_to_cpus != NULL ? _numa_node_to_cpus(node, buffer, bufferlen) : -1;
@ -287,63 +287,63 @@ public:
class PlatformEvent : public CHeapObj<mtInternal> {
private:
double CachePad[4]; // increase odds that _mutex is sole occupant of cache line
volatile int _Event;
volatile int _nParked;
pthread_mutex_t _mutex[1];
pthread_cond_t _cond[1];
double PostPad[2];
Thread * _Assoc;
private:
double CachePad[4]; // increase odds that _mutex is sole occupant of cache line
volatile int _Event;
volatile int _nParked;
pthread_mutex_t _mutex[1];
pthread_cond_t _cond[1];
double PostPad[2];
Thread * _Assoc;
public: // TODO-FIXME: make dtor private
~PlatformEvent() { guarantee(0, "invariant"); }
public: // TODO-FIXME: make dtor private
~PlatformEvent() { guarantee(0, "invariant"); }
public:
PlatformEvent() {
int status;
status = pthread_cond_init (_cond, os::Linux::condAttr());
assert_status(status == 0, status, "cond_init");
status = pthread_mutex_init (_mutex, NULL);
assert_status(status == 0, status, "mutex_init");
_Event = 0;
_nParked = 0;
_Assoc = NULL;
}
public:
PlatformEvent() {
int status;
status = pthread_cond_init (_cond, os::Linux::condAttr());
assert_status(status == 0, status, "cond_init");
status = pthread_mutex_init (_mutex, NULL);
assert_status(status == 0, status, "mutex_init");
_Event = 0;
_nParked = 0;
_Assoc = NULL;
}
// Use caution with reset() and fired() -- they may require MEMBARs
void reset() { _Event = 0; }
int fired() { return _Event; }
void park();
void unpark();
int park(jlong millis); // relative timed-wait only
void SetAssociation(Thread * a) { _Assoc = a; }
// Use caution with reset() and fired() -- they may require MEMBARs
void reset() { _Event = 0; }
int fired() { return _Event; }
void park();
void unpark();
int park(jlong millis); // relative timed-wait only
void SetAssociation(Thread * a) { _Assoc = a; }
};
class PlatformParker : public CHeapObj<mtInternal> {
protected:
enum {
REL_INDEX = 0,
ABS_INDEX = 1
};
int _cur_index; // which cond is in use: -1, 0, 1
pthread_mutex_t _mutex[1];
pthread_cond_t _cond[2]; // one for relative times and one for abs.
protected:
enum {
REL_INDEX = 0,
ABS_INDEX = 1
};
int _cur_index; // which cond is in use: -1, 0, 1
pthread_mutex_t _mutex[1];
pthread_cond_t _cond[2]; // one for relative times and one for abs.
public: // TODO-FIXME: make dtor private
~PlatformParker() { guarantee(0, "invariant"); }
public: // TODO-FIXME: make dtor private
~PlatformParker() { guarantee(0, "invariant"); }
public:
PlatformParker() {
int status;
status = pthread_cond_init (&_cond[REL_INDEX], os::Linux::condAttr());
assert_status(status == 0, status, "cond_init rel");
status = pthread_cond_init (&_cond[ABS_INDEX], NULL);
assert_status(status == 0, status, "cond_init abs");
status = pthread_mutex_init (_mutex, NULL);
assert_status(status == 0, status, "mutex_init");
_cur_index = -1; // mark as unused
}
public:
PlatformParker() {
int status;
status = pthread_cond_init (&_cond[REL_INDEX], os::Linux::condAttr());
assert_status(status == 0, status, "cond_init rel");
status = pthread_cond_init (&_cond[ABS_INDEX], NULL);
assert_status(status == 0, status, "cond_init abs");
status = pthread_mutex_init (_mutex, NULL);
assert_status(status == 0, status, "mutex_init");
_cur_index = -1; // mark as unused
}
};
#endif // OS_LINUX_VM_OS_LINUX_HPP

File diff suppressed because it is too large Load diff

View file

@ -61,8 +61,8 @@ class Solaris {
typedef id_t lgrp_id_t;
typedef int lgrp_rsrc_t;
typedef enum lgrp_view {
LGRP_VIEW_CALLER, /* what's available to the caller */
LGRP_VIEW_OS /* what's available to operating system */
LGRP_VIEW_CALLER, /* what's available to the caller */
LGRP_VIEW_OS /* what's available to operating system */
} lgrp_view_t;
typedef uint_t (*getisax_func_t)(uint32_t* array, uint_t n);
@ -74,8 +74,8 @@ class Solaris {
typedef int (*lgrp_children_func_t)(lgrp_cookie_t cookie, lgrp_id_t parent,
lgrp_id_t *lgrp_array, uint_t lgrp_array_size);
typedef int (*lgrp_resources_func_t)(lgrp_cookie_t cookie, lgrp_id_t lgrp,
lgrp_id_t *lgrp_array, uint_t lgrp_array_size,
lgrp_rsrc_t type);
lgrp_id_t *lgrp_array, uint_t lgrp_array_size,
lgrp_rsrc_t type);
typedef int (*lgrp_nlgrps_func_t)(lgrp_cookie_t cookie);
typedef int (*lgrp_cookie_stale_func_t)(lgrp_cookie_t cookie);
typedef int (*meminfo_func_t)(const uint64_t inaddr[], int addr_count,
@ -128,7 +128,7 @@ class Solaris {
static bool valid_stack_address(Thread* thread, address sp);
static bool valid_ucontext(Thread* thread, ucontext_t* valid, ucontext_t* suspect);
static ucontext_t* get_valid_uc_in_signal_handler(Thread* thread,
ucontext_t* uc);
ucontext_t* uc);
static ExtendedPC ucontext_get_ExtendedPC(ucontext_t* uc);
static intptr_t* ucontext_get_sp(ucontext_t* uc);
@ -143,7 +143,7 @@ class Solaris {
// os_solaris_i486.hpp and os_solaris_sparc.hpp, but that file
// provides extensions to the os class and not the Solaris class.
static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc,
intptr_t** ret_sp, intptr_t** ret_fp);
intptr_t** ret_sp, intptr_t** ret_fp);
static void hotspot_sigmask(Thread* thread);
@ -249,7 +249,7 @@ class Solaris {
static int lgrp_fini(lgrp_cookie_t cookie) { return _lgrp_fini != NULL ? _lgrp_fini(cookie) : -1; }
static lgrp_id_t lgrp_root(lgrp_cookie_t cookie) { return _lgrp_root != NULL ? _lgrp_root(cookie) : -1; };
static int lgrp_children(lgrp_cookie_t cookie, lgrp_id_t parent,
lgrp_id_t *lgrp_array, uint_t lgrp_array_size) {
lgrp_id_t *lgrp_array, uint_t lgrp_array_size) {
return _lgrp_children != NULL ? _lgrp_children(cookie, parent, lgrp_array, lgrp_array_size) : -1;
}
static int lgrp_resources(lgrp_cookie_t cookie, lgrp_id_t lgrp,
@ -269,8 +269,8 @@ class Solaris {
static void set_meminfo(meminfo_func_t func) { _meminfo = func; }
static int meminfo (const uint64_t inaddr[], int addr_count,
const uint_t info_req[], int info_count,
uint64_t outdata[], uint_t validity[]) {
const uint_t info_req[], int info_count,
uint64_t outdata[], uint_t validity[]) {
return _meminfo != NULL ? _meminfo(inaddr, addr_count, info_req, info_count,
outdata, validity) : -1;
}
@ -300,57 +300,57 @@ class Solaris {
};
class PlatformEvent : public CHeapObj<mtInternal> {
private:
double CachePad[4]; // increase odds that _mutex is sole occupant of cache line
volatile int _Event;
int _nParked;
int _pipev[2];
mutex_t _mutex[1];
cond_t _cond[1];
double PostPad[2];
private:
double CachePad[4]; // increase odds that _mutex is sole occupant of cache line
volatile int _Event;
int _nParked;
int _pipev[2];
mutex_t _mutex[1];
cond_t _cond[1];
double PostPad[2];
protected:
// Defining a protected ctor effectively gives us an abstract base class.
// That is, a PlatformEvent can never be instantiated "naked" but only
// as a part of a ParkEvent (recall that ParkEvent extends PlatformEvent).
// TODO-FIXME: make dtor private
~PlatformEvent() { guarantee(0, "invariant"); }
PlatformEvent() {
int status;
status = os::Solaris::cond_init(_cond);
assert_status(status == 0, status, "cond_init");
status = os::Solaris::mutex_init(_mutex);
assert_status(status == 0, status, "mutex_init");
_Event = 0;
_nParked = 0;
_pipev[0] = _pipev[1] = -1;
}
protected:
// Defining a protected ctor effectively gives us an abstract base class.
// That is, a PlatformEvent can never be instantiated "naked" but only
// as a part of a ParkEvent (recall that ParkEvent extends PlatformEvent).
// TODO-FIXME: make dtor private
~PlatformEvent() { guarantee(0, "invariant"); }
PlatformEvent() {
int status;
status = os::Solaris::cond_init(_cond);
assert_status(status == 0, status, "cond_init");
status = os::Solaris::mutex_init(_mutex);
assert_status(status == 0, status, "mutex_init");
_Event = 0;
_nParked = 0;
_pipev[0] = _pipev[1] = -1;
}
public:
// Exercise caution using reset() and fired() -- they may require MEMBARs
void reset() { _Event = 0; }
int fired() { return _Event; }
void park();
int park(jlong millis);
void unpark();
public:
// Exercise caution using reset() and fired() -- they may require MEMBARs
void reset() { _Event = 0; }
int fired() { return _Event; }
void park();
int park(jlong millis);
void unpark();
};
class PlatformParker : public CHeapObj<mtInternal> {
protected:
mutex_t _mutex[1];
cond_t _cond[1];
protected:
mutex_t _mutex[1];
cond_t _cond[1];
public: // TODO-FIXME: make dtor private
~PlatformParker() { guarantee(0, "invariant"); }
public: // TODO-FIXME: make dtor private
~PlatformParker() { guarantee(0, "invariant"); }
public:
PlatformParker() {
int status;
status = os::Solaris::cond_init(_cond);
assert_status(status == 0, status, "cond_init");
status = os::Solaris::mutex_init(_mutex);
assert_status(status == 0, status, "mutex_init");
}
public:
PlatformParker() {
int status;
status = os::Solaris::cond_init(_cond);
assert_status(status == 0, status, "cond_init");
status = os::Solaris::mutex_init(_mutex);
assert_status(status == 0, status, "mutex_init");
}
};
#endif // OS_SOLARIS_VM_OS_SOLARIS_HPP

File diff suppressed because it is too large Load diff

View file

@ -74,12 +74,12 @@ class Atomic : AllStatic {
inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
inline static void* add_ptr(intptr_t add_value, volatile void* dest);
// See comment above about using jlong atomics on 32-bit platforms
static jlong add (jlong add_value, volatile jlong* dest);
static jlong add (jlong add_value, volatile jlong* dest);
// Atomically increment location. inc*() provide:
// <fence> increment-dest <membar StoreLoad|StoreStore>
inline static void inc (volatile jint* dest);
static void inc (volatile jshort* dest);
static void inc (volatile jshort* dest);
inline static void inc (volatile size_t* dest);
inline static void inc_ptr(volatile intptr_t* dest);
inline static void inc_ptr(volatile void* dest);
@ -87,7 +87,7 @@ class Atomic : AllStatic {
// Atomically decrement a location. dec*() provide:
// <fence> decrement-dest <membar StoreLoad|StoreStore>
inline static void dec (volatile jint* dest);
static void dec (volatile jshort* dest);
static void dec (volatile jshort* dest);
inline static void dec (volatile size_t* dest);
inline static void dec_ptr(volatile intptr_t* dest);
inline static void dec_ptr(volatile void* dest);
@ -96,7 +96,7 @@ class Atomic : AllStatic {
// prior value of *dest. xchg*() provide:
// <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
inline static jint xchg(jint exchange_value, volatile jint* dest);
static unsigned int xchg(unsigned int exchange_value, volatile unsigned int* dest);
static unsigned int xchg(unsigned int exchange_value, volatile unsigned int* dest);
inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest);
inline static void* xchg_ptr(void* exchange_value, volatile void* dest);
@ -105,14 +105,14 @@ class Atomic : AllStatic {
// *dest with exchange_value if the comparison succeeded. Returns prior
// value of *dest. cmpxchg*() provide:
// <fence> compare-and-exchange <membar StoreLoad|StoreStore>
static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value);
static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value);
inline static jint cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value);
// See comment above about using jlong atomics on 32-bit platforms
inline static jlong cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value);
static unsigned int cmpxchg(unsigned int exchange_value,
volatile unsigned int* dest,
unsigned int compare_value);
static unsigned int cmpxchg(unsigned int exchange_value,
volatile unsigned int* dest,
unsigned int compare_value);
inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value);
inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value);

View file

@ -407,7 +407,7 @@ static int ParkCommon (ParkEvent * ev, jlong timo) {
// Diagnostic support - periodically unwedge blocked threads
intx nmt = NativeMonitorTimeout;
if (nmt > 0 && (nmt < timo || timo <= 0)) {
timo = nmt;
timo = nmt;
}
int err = OS_OK;
if (0 == timo) {
@ -590,7 +590,7 @@ void Monitor::IUnlock (bool RelaxAssert) {
// as a diagnostic measure consider setting w->_ListNext = BAD
assert(UNS(_OnDeck) == _LBIT, "invariant");
_OnDeck = w; // pass OnDeck to w.
// w will clear OnDeck once it acquires the outer lock
// w will clear OnDeck once it acquires the outer lock
// Another optional optimization ...
// For heavily contended locks it's not uncommon that some other
@ -1082,14 +1082,14 @@ bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equiva
guarantee(no_safepoint_check || Self->is_Java_thread(), "invariant");
#ifdef ASSERT
Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
assert(least != this, "Specification of get_least_... call above");
if (least != NULL && least->rank() <= special) {
tty->print("Attempting to wait on monitor %s/%d while holding"
" lock %s/%d -- possible deadlock",
name(), rank(), least->name(), least->rank());
assert(false, "Shouldn't block(wait) while holding a lock of rank special");
}
Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
assert(least != this, "Specification of get_least_... call above");
if (least != NULL && least->rank() <= special) {
tty->print("Attempting to wait on monitor %s/%d while holding"
" lock %s/%d -- possible deadlock",
name(), rank(), least->name(), least->rank());
assert(false, "Shouldn't block(wait) while holding a lock of rank special");
}
#endif // ASSERT
int wait_status;
@ -1173,8 +1173,8 @@ Mutex::~Mutex() {
Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) {
ClearMonitor((Monitor *) this, name);
#ifdef ASSERT
_allow_vm_block = allow_vm_block;
_rank = Rank;
_allow_vm_block = allow_vm_block;
_rank = Rank;
#endif
}
@ -1280,38 +1280,38 @@ void Monitor::set_owner_implementation(Thread *new_owner) {
// link "this" into the owned locks list
#ifdef ASSERT // Thread::_owned_locks is under the same ifdef
Monitor* locks = get_least_ranked_lock(new_owner->owned_locks());
// Mutex::set_owner_implementation is a friend of Thread
Monitor* locks = get_least_ranked_lock(new_owner->owned_locks());
// Mutex::set_owner_implementation is a friend of Thread
assert(this->rank() >= 0, "bad lock rank");
assert(this->rank() >= 0, "bad lock rank");
// Deadlock avoidance rules require us to acquire Mutexes only in
// a global total order. For example m1 is the lowest ranked mutex
// that the thread holds and m2 is the mutex the thread is trying
// to acquire, then deadlock avoidance rules require that the rank
// of m2 be less than the rank of m1.
// The rank Mutex::native is an exception in that it is not subject
// to the verification rules.
// Here are some further notes relating to mutex acquisition anomalies:
// . under Solaris, the interrupt lock gets acquired when doing
// profiling, so any lock could be held.
// . it is also ok to acquire Safepoint_lock at the very end while we
// already hold Terminator_lock - may happen because of periodic safepoints
if (this->rank() != Mutex::native &&
this->rank() != Mutex::suspend_resume &&
locks != NULL && locks->rank() <= this->rank() &&
!SafepointSynchronize::is_at_safepoint() &&
this != Interrupt_lock && this != ProfileVM_lock &&
!(this == Safepoint_lock && contains(locks, Terminator_lock) &&
SafepointSynchronize::is_synchronizing())) {
new_owner->print_owned_locks();
fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- "
"possible deadlock", this->name(), this->rank(),
locks->name(), locks->rank()));
}
// Deadlock avoidance rules require us to acquire Mutexes only in
// a global total order. For example m1 is the lowest ranked mutex
// that the thread holds and m2 is the mutex the thread is trying
// to acquire, then deadlock avoidance rules require that the rank
// of m2 be less than the rank of m1.
// The rank Mutex::native is an exception in that it is not subject
// to the verification rules.
// Here are some further notes relating to mutex acquisition anomalies:
// . under Solaris, the interrupt lock gets acquired when doing
// profiling, so any lock could be held.
// . it is also ok to acquire Safepoint_lock at the very end while we
// already hold Terminator_lock - may happen because of periodic safepoints
if (this->rank() != Mutex::native &&
this->rank() != Mutex::suspend_resume &&
locks != NULL && locks->rank() <= this->rank() &&
!SafepointSynchronize::is_at_safepoint() &&
this != Interrupt_lock && this != ProfileVM_lock &&
!(this == Safepoint_lock && contains(locks, Terminator_lock) &&
SafepointSynchronize::is_synchronizing())) {
new_owner->print_owned_locks();
fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- "
"possible deadlock", this->name(), this->rank(),
locks->name(), locks->rank()));
}
this->_next = new_owner->_owned_locks;
new_owner->_owned_locks = this;
this->_next = new_owner->_owned_locks;
new_owner->_owned_locks = this;
#endif
} else {
@ -1326,25 +1326,25 @@ void Monitor::set_owner_implementation(Thread *new_owner) {
_owner = NULL; // set the owner
#ifdef ASSERT
Monitor *locks = old_owner->owned_locks();
Monitor *locks = old_owner->owned_locks();
// remove "this" from the owned locks list
// remove "this" from the owned locks list
Monitor *prev = NULL;
bool found = false;
for (; locks != NULL; prev = locks, locks = locks->next()) {
if (locks == this) {
found = true;
break;
}
Monitor *prev = NULL;
bool found = false;
for (; locks != NULL; prev = locks, locks = locks->next()) {
if (locks == this) {
found = true;
break;
}
assert(found, "Removing a lock not owned");
if (prev == NULL) {
old_owner->_owned_locks = _next;
} else {
prev->_next = _next;
}
_next = NULL;
}
assert(found, "Removing a lock not owned");
if (prev == NULL) {
old_owner->_owned_locks = _next;
} else {
prev->_next = _next;
}
_next = NULL;
#endif
}
}
@ -1360,11 +1360,11 @@ void Monitor::check_prelock_state(Thread *thread) {
name()));
}
debug_only(if (rank() != Mutex::special) \
thread->check_for_valid_safepoint_state(false);)
thread->check_for_valid_safepoint_state(false);)
}
if (thread->is_Watcher_thread()) {
assert(!WatcherThread::watcher_thread()->has_crash_protection(),
"locking not allowed when crash protection is set");
"locking not allowed when crash protection is set");
}
}

File diff suppressed because it is too large Load diff

View file

@ -141,7 +141,7 @@ class ObjectMonitor {
_header = NULL;
_count = 0;
_waiters = 0,
_recursions = 0;
_recursions = 0;
_object = NULL;
_owner = NULL;
_WaitSet = NULL;
@ -158,12 +158,12 @@ class ObjectMonitor {
}
~ObjectMonitor() {
// TODO: Add asserts ...
// _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
// _count == 0 _EntryList == NULL etc
// TODO: Add asserts ...
// _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
// _count == 0 _EntryList == NULL etc
}
private:
private:
void Recycle() {
// TODO: add stronger asserts ...
// _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
@ -180,7 +180,7 @@ private:
OwnerIsThread = 0;
}
public:
public:
void* object() const;
void* object_addr();
@ -225,9 +225,9 @@ public:
void ExitEpilog(Thread * Self, ObjectWaiter * Wakee);
bool ExitSuspendEquivalent(JavaThread * Self);
void post_monitor_wait_event(EventJavaMonitorWait * event,
jlong notifier_tid,
jlong timeout,
bool timedout);
jlong notifier_tid,
jlong timeout,
bool timedout);
private:
friend class ObjectSynchronizer;
@ -253,8 +253,8 @@ public:
private:
int OwnerIsThread; // _owner is (Thread *) vs SP/BasicLock
ObjectWaiter * volatile _cxq; // LL of recently-arrived threads blocked on entry.
// The list is actually composed of WaitNodes, acting
// as proxies for Threads.
// The list is actually composed of WaitNodes, acting
// as proxies for Threads.
protected:
ObjectWaiter * volatile _EntryList; // Threads blocked on entry or reentry.
private:

View file

@ -50,8 +50,8 @@ class SharedRuntime: AllStatic {
private:
static methodHandle resolve_sub_helper(JavaThread *thread,
bool is_virtual,
bool is_optimized, TRAPS);
bool is_virtual,
bool is_optimized, TRAPS);
// Shared stub locations
@ -309,11 +309,11 @@ class SharedRuntime: AllStatic {
bool is_virtual,
bool is_optimized, TRAPS);
private:
private:
// deopt blob
static void generate_deopt_blob(void);
public:
public:
static DeoptimizationBlob* deopt_blob(void) { return _deopt_blob; }
// Resets a call-site in compiled code so it will get resolved again.

File diff suppressed because it is too large Load diff

View file

@ -148,7 +148,7 @@ void* Thread::allocate(size_t size, bool throw_excpt, MEMFLAGS flags) {
size_t aligned_size = size + (alignment - sizeof(intptr_t));
void* real_malloc_addr = throw_excpt? AllocateHeap(aligned_size, flags, CURRENT_PC)
: AllocateHeap(aligned_size, flags, CURRENT_PC,
AllocFailStrategy::RETURN_NULL);
AllocFailStrategy::RETURN_NULL);
void* aligned_addr = (void*) align_size_up((intptr_t) real_malloc_addr, alignment);
assert(((uintptr_t) aligned_addr + (uintptr_t) size) <=
((uintptr_t) real_malloc_addr + (uintptr_t) aligned_size),
@ -365,7 +365,7 @@ void Thread::run() {
#ifdef ASSERT
// Private method to check for dangling thread pointer
void check_for_dangling_thread_pointer(Thread *thread) {
assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
"possibility of dangling Thread pointer");
}
#endif
@ -517,8 +517,8 @@ class TraceSuspendDebugBits : public StackObj {
ResourceMark rm;
tty->print_cr(
"Failed wait_for_ext_suspend_completion(thread=%s, debug_bits=%x)",
jt->get_thread_name(), *bits);
"Failed wait_for_ext_suspend_completion(thread=%s, debug_bits=%x)",
jt->get_thread_name(), *bits);
guarantee(!AssertOnSuspendWaitFailure, "external suspend wait failed");
}
@ -654,7 +654,7 @@ bool JavaThread::is_ext_suspend_completed(bool called_by_wait, int delay, uint32
// Returns true if the thread is externally suspended and false otherwise.
//
bool JavaThread::wait_for_ext_suspend_completion(int retries, int delay,
uint32_t *bits) {
uint32_t *bits) {
TraceSuspendDebugBits tsdb(this, true /* is_wait */,
false /* !called_by_wait */, bits);
@ -759,8 +759,8 @@ bool JavaThread::profile_last_Java_frame(frame* _fr) {
bool gotframe = false;
// self suspension saves needed state.
if (has_last_Java_frame() && _anchor.walkable()) {
*_fr = pd_last_frame();
gotframe = true;
*_fr = pd_last_frame();
gotframe = true;
}
return gotframe;
}
@ -790,7 +790,7 @@ bool Thread::claim_oops_do_par_case(int strong_roots_parity) {
} else {
guarantee(res == strong_roots_parity, "Or else what?");
assert(SharedHeap::heap()->workers()->active_workers() > 0,
"Should only fail when parallel.");
"Should only fail when parallel.");
return false;
}
}
@ -882,38 +882,38 @@ bool Thread::owns_locks_but_compiled_lock() const {
// invoke the vm-thread (i.e., and oop allocation). In that case, we also have to make sure that
// no threads which allow_vm_block's are held
void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) {
// Check if current thread is allowed to block at a safepoint
if (!(_allow_safepoint_count == 0))
fatal("Possible safepoint reached by thread that does not allow it");
if (is_Java_thread() && ((JavaThread*)this)->thread_state() != _thread_in_vm) {
fatal("LEAF method calling lock?");
}
// Check if current thread is allowed to block at a safepoint
if (!(_allow_safepoint_count == 0))
fatal("Possible safepoint reached by thread that does not allow it");
if (is_Java_thread() && ((JavaThread*)this)->thread_state() != _thread_in_vm) {
fatal("LEAF method calling lock?");
}
#ifdef ASSERT
if (potential_vm_operation && is_Java_thread()
&& !Universe::is_bootstrapping()) {
// Make sure we do not hold any locks that the VM thread also uses.
// This could potentially lead to deadlocks
for (Monitor *cur = _owned_locks; cur; cur = cur->next()) {
// Threads_lock is special, since the safepoint synchronization will not start before this is
// acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock,
// since it is used to transfer control between JavaThreads and the VMThread
// Do not *exclude* any locks unless you are absolutely sure it is correct. Ask someone else first!
if ((cur->allow_vm_block() &&
cur != Threads_lock &&
cur != Compile_lock && // Temporary: should not be necessary when we get separate compilation
cur != VMOperationRequest_lock &&
cur != VMOperationQueue_lock) ||
cur->rank() == Mutex::special) {
fatal(err_msg("Thread holding lock at safepoint that vm can block on: %s", cur->name()));
}
if (potential_vm_operation && is_Java_thread()
&& !Universe::is_bootstrapping()) {
// Make sure we do not hold any locks that the VM thread also uses.
// This could potentially lead to deadlocks
for (Monitor *cur = _owned_locks; cur; cur = cur->next()) {
// Threads_lock is special, since the safepoint synchronization will not start before this is
// acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock,
// since it is used to transfer control between JavaThreads and the VMThread
// Do not *exclude* any locks unless you are absolutely sure it is correct. Ask someone else first!
if ((cur->allow_vm_block() &&
cur != Threads_lock &&
cur != Compile_lock && // Temporary: should not be necessary when we get separate compilation
cur != VMOperationRequest_lock &&
cur != VMOperationQueue_lock) ||
cur->rank() == Mutex::special) {
fatal(err_msg("Thread holding lock at safepoint that vm can block on: %s", cur->name()));
}
}
}
if (GCALotAtAllSafepoints) {
// We could enter a safepoint here and thus have a gc
InterfaceSupport::check_gc_alot();
}
if (GCALotAtAllSafepoints) {
// We could enter a safepoint here and thus have a gc
InterfaceSupport::check_gc_alot();
}
#endif
}
#endif
@ -947,7 +947,7 @@ bool Thread::is_lock_owned(address adr) const {
}
bool Thread::set_as_starting_thread() {
// NOTE: this must be called inside the main thread.
// NOTE: this must be called inside the main thread.
return os::create_main_thread((JavaThread*)this);
}
@ -1004,12 +1004,12 @@ static oop create_initial_thread(Handle thread_group, JavaThread* thread, TRAPS)
JavaValue result(T_VOID);
JavaCalls::call_special(&result, thread_oop,
klass,
vmSymbols::object_initializer_name(),
vmSymbols::threadgroup_string_void_signature(),
thread_group,
string,
CHECK_NULL);
klass,
vmSymbols::object_initializer_name(),
vmSymbols::threadgroup_string_void_signature(),
thread_group,
string,
CHECK_NULL);
return thread_oop();
}
@ -1019,7 +1019,7 @@ static void call_initializeSystemClass(TRAPS) {
JavaValue result(T_VOID);
JavaCalls::call_static(&result, klass, vmSymbols::initializeSystemClass_name(),
vmSymbols::void_method_signature(), CHECK);
vmSymbols::void_method_signature(), CHECK);
}
char java_runtime_name[128] = "";
@ -1028,7 +1028,7 @@ char java_runtime_version[128] = "";
// extract the JRE name from sun.misc.Version.java_runtime_name
static const char* get_java_runtime_name(TRAPS) {
Klass* k = SystemDictionary::find(vmSymbols::sun_misc_Version(),
Handle(), Handle(), CHECK_AND_CLEAR_NULL);
Handle(), Handle(), CHECK_AND_CLEAR_NULL);
fieldDescriptor fd;
bool found = k != NULL &&
InstanceKlass::cast(k)->find_local_field(vmSymbols::java_runtime_name_name(),
@ -1049,7 +1049,7 @@ static const char* get_java_runtime_name(TRAPS) {
// extract the JRE version from sun.misc.Version.java_runtime_version
static const char* get_java_runtime_version(TRAPS) {
Klass* k = SystemDictionary::find(vmSymbols::sun_misc_Version(),
Handle(), Handle(), CHECK_AND_CLEAR_NULL);
Handle(), Handle(), CHECK_AND_CLEAR_NULL);
fieldDescriptor fd;
bool found = k != NULL &&
InstanceKlass::cast(k)->find_local_field(vmSymbols::java_runtime_version_name(),
@ -1075,8 +1075,8 @@ static void call_postVMInitHook(TRAPS) {
if (klass.not_null()) {
JavaValue result(T_VOID);
JavaCalls::call_static(&result, klass, vmSymbols::run_method_name(),
vmSymbols::void_method_signature(),
CHECK);
vmSymbols::void_method_signature(),
CHECK);
}
}
@ -1146,7 +1146,7 @@ void JavaThread::allocate_threadObj(Handle thread_group, char* thread_name, bool
if (daemon) {
java_lang_Thread::set_daemon(thread_oop());
java_lang_Thread::set_daemon(thread_oop());
}
if (HAS_PENDING_EXCEPTION) {
@ -1157,12 +1157,12 @@ void JavaThread::allocate_threadObj(Handle thread_group, char* thread_name, bool
Handle threadObj(this, this->threadObj());
JavaCalls::call_special(&result,
thread_group,
group,
vmSymbols::add_method_name(),
vmSymbols::thread_void_signature(),
threadObj, // Arg 1
THREAD);
thread_group,
group,
vmSymbols::add_method_name(),
vmSymbols::thread_void_signature(),
threadObj, // Arg 1
THREAD);
}
@ -1246,25 +1246,25 @@ int WatcherThread::sleep() const {
jlong now = os::javaTimeNanos();
if (remaining == 0) {
// if we didn't have any tasks we could have waited for a long time
// consider the time_slept zero and reset time_before_loop
time_slept = 0;
time_before_loop = now;
// if we didn't have any tasks we could have waited for a long time
// consider the time_slept zero and reset time_before_loop
time_slept = 0;
time_before_loop = now;
} else {
// need to recalculate since we might have new tasks in _tasks
time_slept = (int) ((now - time_before_loop) / 1000000);
// need to recalculate since we might have new tasks in _tasks
time_slept = (int) ((now - time_before_loop) / 1000000);
}
// Change to task list or spurious wakeup of some kind
if (timedout || _should_terminate) {
break;
break;
}
remaining = PeriodicTask::time_to_wait();
if (remaining == 0) {
// Last task was just disenrolled so loop around and wait until
// another task gets enrolled
continue;
// Last task was just disenrolled so loop around and wait until
// another task gets enrolled
continue;
}
remaining -= time_slept;
@ -1302,13 +1302,13 @@ void WatcherThread::run() {
for (;;) {
if (!ShowMessageBoxOnError
&& (OnError == NULL || OnError[0] == '\0')
&& Arguments::abort_hook() == NULL) {
os::sleep(this, 2 * 60 * 1000, false);
fdStream err(defaultStream::output_fd());
err.print_raw_cr("# [ timer expired, abort... ]");
// skip atexit/vm_exit/vm_abort hooks
os::die();
&& (OnError == NULL || OnError[0] == '\0')
&& Arguments::abort_hook() == NULL) {
os::sleep(this, 2 * 60 * 1000, false);
fdStream err(defaultStream::output_fd());
err.print_raw_cr("# [ timer expired, abort... ]");
// skip atexit/vm_exit/vm_abort hooks
os::die();
}
// Wake up 5 seconds later, the fatal handler may reset OnError or
@ -1486,10 +1486,10 @@ DirtyCardQueueSet JavaThread::_dirty_card_queue_set;
#endif // INCLUDE_ALL_GCS
JavaThread::JavaThread(bool is_attaching_via_jni) :
Thread()
Thread()
#if INCLUDE_ALL_GCS
, _satb_mark_queue(&_satb_mark_queue_set),
_dirty_card_queue(&_dirty_card_queue_set)
, _satb_mark_queue(&_satb_mark_queue_set),
_dirty_card_queue(&_dirty_card_queue_set)
#endif // INCLUDE_ALL_GCS
{
initialize();
@ -1543,10 +1543,10 @@ void JavaThread::block_if_vm_exited() {
static void compiler_thread_entry(JavaThread* thread, TRAPS);
JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
Thread()
Thread()
#if INCLUDE_ALL_GCS
, _satb_mark_queue(&_satb_mark_queue_set),
_dirty_card_queue(&_dirty_card_queue_set)
, _satb_mark_queue(&_satb_mark_queue_set),
_dirty_card_queue(&_dirty_card_queue_set)
#endif // INCLUDE_ALL_GCS
{
if (TraceThreadEvents) {
@ -1575,7 +1575,7 @@ JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
JavaThread::~JavaThread() {
if (TraceThreadEvents) {
tty->print_cr("terminate thread %p", this);
tty->print_cr("terminate thread %p", this);
}
// JSR166 -- return the parker to the free list
@ -1649,8 +1649,8 @@ void JavaThread::run() {
EventThreadStart event;
if (event.should_commit()) {
event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
event.commit();
event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
event.commit();
}
// We call another function to do the rest so we are sure that the stack addresses used
@ -1742,10 +1742,10 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
if (HAS_PENDING_EXCEPTION) {
ResourceMark rm(this);
jio_fprintf(defaultStream::error_stream(),
"\nException: %s thrown from the UncaughtExceptionHandler"
" in thread \"%s\"\n",
pending_exception()->klass()->external_name(),
get_thread_name());
"\nException: %s thrown from the UncaughtExceptionHandler"
" in thread \"%s\"\n",
pending_exception()->klass()->external_name(),
get_thread_name());
CLEAR_PENDING_EXCEPTION;
}
}
@ -1754,8 +1754,8 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
// from java_lang_Thread object
EventThreadEnd event;
if (event.should_commit()) {
event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
event.commit();
event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
event.commit();
}
// Call after last event on thread
@ -1771,10 +1771,10 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
JavaValue result(T_VOID);
KlassHandle thread_klass(THREAD, SystemDictionary::Thread_klass());
JavaCalls::call_virtual(&result,
threadObj, thread_klass,
vmSymbols::exit_method_name(),
vmSymbols::void_method_signature(),
THREAD);
threadObj, thread_klass,
vmSymbols::exit_method_name(),
vmSymbols::void_method_signature(),
THREAD);
CLEAR_PENDING_EXCEPTION;
}
}
@ -2062,22 +2062,22 @@ void JavaThread::check_and_handle_async_exceptions(bool check_unsafe_error) {
condition = _no_async_condition; // done
switch (thread_state()) {
case _thread_in_vm:
{
JavaThread* THREAD = this;
THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
}
{
JavaThread* THREAD = this;
THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
}
case _thread_in_native:
{
ThreadInVMfromNative tiv(this);
JavaThread* THREAD = this;
THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
}
{
ThreadInVMfromNative tiv(this);
JavaThread* THREAD = this;
THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in an unsafe memory access operation");
}
case _thread_in_Java:
{
ThreadInVMfromJava tiv(this);
JavaThread* THREAD = this;
THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in a recent unsafe memory access operation in compiled Java code");
}
{
ThreadInVMfromJava tiv(this);
JavaThread* THREAD = this;
THROW_MSG(vmSymbols::java_lang_InternalError(), "a fault occurred in a recent unsafe memory access operation in compiled Java code");
}
default:
ShouldNotReachHere();
}
@ -2170,8 +2170,8 @@ void JavaThread::send_thread_stop(oop java_throwable) {
set_pending_async_exception(java_throwable);
if (TraceExceptions) {
ResourceMark rm;
tty->print_cr("Pending Async. exception installed of type: %s", InstanceKlass::cast(_pending_async_exception->klass())->external_name());
ResourceMark rm;
tty->print_cr("Pending Async. exception installed of type: %s", InstanceKlass::cast(_pending_async_exception->klass())->external_name());
}
// for AbortVMOnException flag
NOT_PRODUCT(Exceptions::debug_check_abort(InstanceKlass::cast(_pending_async_exception->klass())->external_name()));
@ -2198,7 +2198,7 @@ void JavaThread::send_thread_stop(oop java_throwable) {
void JavaThread::java_suspend() {
{ MutexLocker mu(Threads_lock);
if (!Threads::includes(this) || is_exiting() || this->threadObj() == NULL) {
return;
return;
}
}
@ -2241,18 +2241,18 @@ int JavaThread::java_suspend_self() {
// we are in the process of exiting so don't suspend
if (is_exiting()) {
clear_external_suspend();
return ret;
clear_external_suspend();
return ret;
}
assert(_anchor.walkable() ||
(is_Java_thread() && !((JavaThread*)this)->has_last_Java_frame()),
"must have walkable stack");
(is_Java_thread() && !((JavaThread*)this)->has_last_Java_frame()),
"must have walkable stack");
MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
assert(!this->is_ext_suspended(),
"a thread trying to self-suspend should not already be suspended");
"a thread trying to self-suspend should not already be suspended");
if (this->is_suspend_equivalent()) {
// If we are self-suspending as a result of the lifting of a
@ -2289,12 +2289,12 @@ int JavaThread::java_suspend_self() {
// hence doesn't need protection from concurrent access at this stage
void JavaThread::verify_not_published() {
if (!Threads_lock->owned_by_self()) {
MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
assert(!Threads::includes(this),
MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
assert(!Threads::includes(this),
"java thread shouldn't have been published yet!");
}
else {
assert(!Threads::includes(this),
assert(!Threads::includes(this),
"java thread shouldn't have been published yet!");
}
}
@ -2474,7 +2474,7 @@ void JavaThread::remove_stack_guard_pages() {
if (os::unguard_memory((char *) low_addr, len)) {
_stack_guard_state = stack_guard_unused;
} else {
warning("Attempt to unprotect stack guard pages failed.");
warning("Attempt to unprotect stack guard pages failed.");
}
}
}
@ -2640,7 +2640,7 @@ void JavaThread::deoptimized_wrt_marked_nmethods() {
// the given JavaThread in its _processed_thread field.
class RememberProcessedThread: public StackObj {
NamedThread* _cur_thr;
public:
public:
RememberProcessedThread(JavaThread* jthr) {
Thread* thread = Thread::current();
if (thread->is_Named_thread()) {
@ -2669,7 +2669,7 @@ void JavaThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf)
Thread::oops_do(f, cld_f, cf);
assert((!has_last_Java_frame() && java_call_counter() == 0) ||
(has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
(has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
if (has_last_Java_frame()) {
// Record JavaThread to GC thread
@ -2729,7 +2729,7 @@ void JavaThread::nmethods_do(CodeBlobClosure* cf) {
Thread::nmethods_do(cf); // (super method is a no-op)
assert((!has_last_Java_frame() && java_call_counter() == 0) ||
(has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
(has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
if (has_last_Java_frame()) {
// Traverse the execution stack
@ -2809,7 +2809,7 @@ void JavaThread::print_on_error(outputStream* st, char *buf, int buflen) const {
st->print("JavaThread \"%s\"", get_thread_name_string(buf, buflen));
oop thread_obj = threadObj();
if (thread_obj != NULL) {
if (java_lang_Thread::is_daemon(thread_obj)) st->print(" daemon");
if (java_lang_Thread::is_daemon(thread_obj)) st->print(" daemon");
}
st->print(" [");
st->print("%s", _get_thread_state_name(_thread_state));
@ -2853,7 +2853,7 @@ const char* JavaThread::get_thread_name() const {
}
}
#endif // ASSERT
return get_thread_name_string();
return get_thread_name_string();
}
// Returns a non-NULL representation of this thread's name, or a suitable
@ -2950,7 +2950,7 @@ void JavaThread::prepare(jobject jni_thread, ThreadPriority prio) {
Handle thread_oop(Thread::current(),
JNIHandles::resolve_non_null(jni_thread));
assert(InstanceKlass::cast(thread_oop->klass())->is_linked(),
"must be initialized");
"must be initialized");
set_threadObj(thread_oop());
java_lang_Thread::set_thread(thread_oop(), this);
@ -3383,7 +3383,7 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
if (!main_thread->set_as_starting_thread()) {
vm_shutdown_during_initialization(
"Failed necessary internal allocation. Out of swap space");
"Failed necessary internal allocation. Out of swap space");
delete main_thread;
*canTryAgain = false; // don't let caller call JNI_CreateJavaVM again
return JNI_ENOMEM;
@ -3583,17 +3583,17 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
}
{
MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
// Make sure the watcher thread can be started by WatcherThread::start()
// or by dynamic enrollment.
WatcherThread::make_startable();
// Start up the WatcherThread if there are any periodic tasks
// NOTE: All PeriodicTasks should be registered by now. If they
// aren't, late joiners might appear to start slowly (we might
// take a while to process their first tick).
if (PeriodicTask::num_tasks() > 0) {
WatcherThread::start();
}
MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
// Make sure the watcher thread can be started by WatcherThread::start()
// or by dynamic enrollment.
WatcherThread::make_startable();
// Start up the WatcherThread if there are any periodic tasks
// NOTE: All PeriodicTasks should be registered by now. If they
// aren't, late joiners might appear to start slowly (we might
// take a while to process their first tick).
if (PeriodicTask::num_tasks() > 0) {
WatcherThread::start();
}
}
// Give os specific code one last chance to start
@ -3749,10 +3749,10 @@ void Threads::shutdown_vm_agents() {
// Find the Agent_OnUnload function.
Agent_OnUnload_t unload_entry = CAST_TO_FN_PTR(Agent_OnUnload_t,
os::find_agent_function(agent,
false,
on_unload_symbols,
num_symbol_entries));
os::find_agent_function(agent,
false,
on_unload_symbols,
num_symbol_entries));
// Invoke the Agent_OnUnload function
if (unload_entry != NULL) {
@ -4060,7 +4060,7 @@ void Threads::possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBl
bool is_par = sh->n_par_threads() > 0;
assert(!is_par ||
(SharedHeap::heap()->n_par_threads() ==
SharedHeap::heap()->workers()->active_workers()), "Mismatch");
SharedHeap::heap()->workers()->active_workers()), "Mismatch");
int cp = SharedHeap::heap()->strong_roots_parity();
ALL_JAVA_THREADS(p) {
if (p->claim_oops_do(is_par, cp)) {
@ -4113,9 +4113,9 @@ void Threads::deoptimized_wrt_marked_nmethods() {
// Get count Java threads that are waiting to enter the specified monitor.
GrowableArray<JavaThread*>* Threads::get_pending_threads(int count,
address monitor, bool doLock) {
address monitor, bool doLock) {
assert(doLock || SafepointSynchronize::is_at_safepoint(),
"must grab Threads_lock or be at safepoint");
"must grab Threads_lock or be at safepoint");
GrowableArray<JavaThread*>* result = new GrowableArray<JavaThread*>(count);
int i = 0;
@ -4181,10 +4181,10 @@ void Threads::print_on(outputStream* st, bool print_stacks, bool internal_format
st->print_cr("%s", os::local_time_string(buf, sizeof(buf)));
st->print_cr("Full thread dump %s (%s %s):",
Abstract_VM_Version::vm_name(),
Abstract_VM_Version::vm_release(),
Abstract_VM_Version::vm_info_string()
);
Abstract_VM_Version::vm_name(),
Abstract_VM_Version::vm_release(),
Abstract_VM_Version::vm_info_string()
);
st->cr();
#if INCLUDE_ALL_GCS
@ -4303,7 +4303,7 @@ typedef volatile int SpinLockT;
void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
if (Atomic::cmpxchg (1, adr, 0) == 0) {
return; // normal fast-path return
return; // normal fast-path return
}
// Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
@ -4311,20 +4311,20 @@ void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
int ctr = 0;
int Yields = 0;
for (;;) {
while (*adr != 0) {
++ctr;
if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
if (Yields > 5) {
os::naked_short_sleep(1);
} else {
os::naked_yield();
++Yields;
}
while (*adr != 0) {
++ctr;
if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
if (Yields > 5) {
os::naked_short_sleep(1);
} else {
SpinPause();
os::naked_yield();
++Yields;
}
}
if (Atomic::cmpxchg(1, adr, 0) == 0) return;
} else {
SpinPause();
}
}
if (Atomic::cmpxchg(1, adr, 0) == 0) return;
}
}
@ -4401,45 +4401,45 @@ void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) {
intptr_t w = Atomic::cmpxchg_ptr(LOCKBIT, Lock, 0);
if (w == 0) return;
if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
return;
return;
}
TEVENT(muxAcquire - Contention);
ParkEvent * const Self = Thread::current()->_MuxEvent;
assert((intptr_t(Self) & LOCKBIT) == 0, "invariant");
for (;;) {
int its = (os::is_MP() ? 100 : 0) + 1;
int its = (os::is_MP() ? 100 : 0) + 1;
// Optional spin phase: spin-then-park strategy
while (--its >= 0) {
w = *Lock;
if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
// Optional spin phase: spin-then-park strategy
while (--its >= 0) {
w = *Lock;
if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
return;
}
}
Self->reset();
Self->OnList = intptr_t(Lock);
// The following fence() isn't _strictly necessary as the subsequent
// CAS() both serializes execution and ratifies the fetched *Lock value.
OrderAccess::fence();
for (;;) {
w = *Lock;
if ((w & LOCKBIT) == 0) {
if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
Self->OnList = 0; // hygiene - allows stronger asserts
return;
}
}
Self->reset();
Self->OnList = intptr_t(Lock);
// The following fence() isn't _strictly necessary as the subsequent
// CAS() both serializes execution and ratifies the fetched *Lock value.
OrderAccess::fence();
for (;;) {
w = *Lock;
if ((w & LOCKBIT) == 0) {
if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
Self->OnList = 0; // hygiene - allows stronger asserts
return;
}
continue; // Interference -- *Lock changed -- Just retry
}
assert(w & LOCKBIT, "invariant");
Self->ListNext = (ParkEvent *) (w & ~LOCKBIT);
if (Atomic::cmpxchg_ptr(intptr_t(Self)|LOCKBIT, Lock, w) == w) break;
}
continue; // Interference -- *Lock changed -- Just retry
}
assert(w & LOCKBIT, "invariant");
Self->ListNext = (ParkEvent *) (w & ~LOCKBIT);
if (Atomic::cmpxchg_ptr(intptr_t(Self)|LOCKBIT, Lock, w) == w) break;
}
while (Self->OnList != 0) {
Self->park();
}
while (Self->OnList != 0) {
Self->park();
}
}
}

View file

@ -115,7 +115,7 @@ class Thread: public ThreadShadow {
void operator delete(void* p);
protected:
static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread);
static void* allocate(size_t size, bool throw_excpt, MEMFLAGS flags = mtThread);
private:
// ***************************************************************
@ -225,10 +225,10 @@ class Thread: public ThreadShadow {
// claimed as a task.
jint _oops_do_parity;
public:
void set_last_handle_mark(HandleMark* mark) { _last_handle_mark = mark; }
HandleMark* last_handle_mark() const { return _last_handle_mark; }
private:
public:
void set_last_handle_mark(HandleMark* mark) { _last_handle_mark = mark; }
HandleMark* last_handle_mark() const { return _last_handle_mark; }
private:
// debug support for checking if code does allow safepoints or not
// GC points in the VM can happen because of allocation, invoking a VM operation, or blocking on
@ -445,9 +445,9 @@ class Thread: public ThreadShadow {
virtual void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
// Handles the parallel case for the method below.
private:
private:
bool claim_oops_do_par_case(int collection_parity);
public:
public:
// Requires that "collection_parity" is that of the current roots
// iteration. If "is_par" is false, sets the parity of "this" to
// "collection_parity", and returns "true". If "is_par" is true,
@ -664,9 +664,9 @@ class NamedThread: public Thread {
// Worker threads are named and have an id of an assigned work.
class WorkerThread: public NamedThread {
private:
private:
uint _id;
public:
public:
WorkerThread() : _id(0) { }
virtual bool is_Worker_thread() const { return true; }
@ -844,7 +844,7 @@ class JavaThread: public Thread {
// handlers thread is in
volatile bool _doing_unsafe_access; // Thread may fault due to unsafe access
bool _do_not_unlock_if_synchronized; // Do not unlock the receiver of a synchronized method (since it was
// never locked) when throwing an exception. Used by interpreter only.
// never locked) when throwing an exception. Used by interpreter only.
// JNI attach states:
enum JNIAttachStates {
@ -898,11 +898,11 @@ class JavaThread: public Thread {
#ifndef PRODUCT
int _jmp_ring_index;
struct {
// We use intptr_t instead of address so debugger doesn't try and display strings
intptr_t _target;
intptr_t _instruction;
const char* _file;
int _line;
// We use intptr_t instead of address so debugger doesn't try and display strings
intptr_t _target;
intptr_t _instruction;
const char* _file;
int _line;
} _jmp_ring[jump_ring_buffer_size];
#endif /* PRODUCT */
@ -1113,7 +1113,7 @@ class JavaThread: public Thread {
// when a suspend equivalent condition lifts.
bool handle_special_suspend_equivalent_condition() {
assert(is_suspend_equivalent(),
"should only be called in a suspend equivalence condition");
"should only be called in a suspend equivalence condition");
MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
bool ret = is_external_suspend();
if (!ret) {
@ -1339,10 +1339,10 @@ class JavaThread: public Thread {
// Only return NULL if thread is off the thread list; starting to
// exit should not return NULL.
if (thread_from_jni_env->is_terminated()) {
thread_from_jni_env->block_if_vm_exited();
return NULL;
thread_from_jni_env->block_if_vm_exited();
return NULL;
} else {
return thread_from_jni_env;
return thread_from_jni_env;
}
}
@ -1352,12 +1352,12 @@ class JavaThread: public Thread {
void enter_critical() { assert(Thread::current() == this ||
Thread::current()->is_VM_thread() && SafepointSynchronize::is_synchronizing(),
"this must be current thread or synchronizing");
_jni_active_critical++; }
_jni_active_critical++; }
void exit_critical() { assert(Thread::current() == this,
"this must be current thread");
_jni_active_critical--;
assert(_jni_active_critical >= 0,
"JNI critical nesting problem?"); }
_jni_active_critical--;
assert(_jni_active_critical >= 0,
"JNI critical nesting problem?"); }
// Checked JNI, is the programmer required to check for exceptions, specify which function name
bool is_pending_jni_exception_check() const { return _pending_jni_exception_check_fn != NULL; }
@ -1411,10 +1411,10 @@ class JavaThread: public Thread {
void print_on_error(outputStream* st, char* buf, int buflen) const;
void verify();
const char* get_thread_name() const;
private:
private:
// factor out low-level mechanics for use in both normal and error cases
const char* get_thread_name_string(char* buf = NULL, int buflen = 0) const;
public:
public:
const char* get_threadgroup_name() const;
const char* get_parent_name() const;
@ -1456,20 +1456,20 @@ public:
// Profiling operation (see fprofile.cpp)
public:
bool profile_last_Java_frame(frame* fr);
bool profile_last_Java_frame(frame* fr);
private:
ThreadProfiler* _thread_profiler;
ThreadProfiler* _thread_profiler;
private:
friend class FlatProfiler; // uses both [gs]et_thread_profiler.
friend class FlatProfilerTask; // uses get_thread_profiler.
friend class ThreadProfilerMark; // uses get_thread_profiler.
ThreadProfiler* get_thread_profiler() { return _thread_profiler; }
ThreadProfiler* set_thread_profiler(ThreadProfiler* tp) {
ThreadProfiler* result = _thread_profiler;
_thread_profiler = tp;
return result;
}
friend class FlatProfiler; // uses both [gs]et_thread_profiler.
friend class FlatProfilerTask; // uses get_thread_profiler.
friend class ThreadProfilerMark; // uses get_thread_profiler.
ThreadProfiler* get_thread_profiler() { return _thread_profiler; }
ThreadProfiler* set_thread_profiler(ThreadProfiler* tp) {
ThreadProfiler* result = _thread_profiler;
_thread_profiler = tp;
return result;
}
public:
// Returns the running thread as a JavaThread
@ -1692,15 +1692,15 @@ public:
// JSR166 per-thread parker
private:
private:
Parker* _parker;
public:
public:
Parker* parker() { return _parker; }
// Biased locking support
private:
private:
GrowableArray<MonitorInfo*>* _cached_monitor_info;
public:
public:
GrowableArray<MonitorInfo*>* cached_monitor_info() { return _cached_monitor_info; }
void set_cached_monitor_info(GrowableArray<MonitorInfo*>* info) { _cached_monitor_info = info; }
@ -1708,12 +1708,12 @@ public:
bool is_attaching_via_jni() const { return _jni_attach_state == _attaching_via_jni; }
bool has_attached_via_jni() const { return is_attaching_via_jni() || _jni_attach_state == _attached_via_jni; }
inline void set_done_attaching_via_jni();
private:
private:
// This field is used to determine if a thread has claimed
// a par_id: it is UINT_MAX if the thread has not claimed a par_id;
// otherwise its value is the par_id that has been claimed.
uint _claimed_par_id;
public:
public:
uint get_claimed_par_id() { return _claimed_par_id; }
void set_claimed_par_id(uint id) { _claimed_par_id = id; }
};
@ -1782,9 +1782,9 @@ class CompilerThread : public JavaThread {
void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
#ifndef PRODUCT
private:
private:
IdealGraphPrinter *_ideal_graph_printer;
public:
public:
IdealGraphPrinter *ideal_graph_printer() { return _ideal_graph_printer; }
void set_ideal_graph_printer(IdealGraphPrinter *n) { _ideal_graph_printer = n; }
#endif
@ -1885,13 +1885,13 @@ class Threads: AllStatic {
// is true, then Threads_lock is grabbed as needed. Otherwise, the
// VM needs to be at a safepoint.
static GrowableArray<JavaThread*>* get_pending_threads(int count,
address monitor, bool doLock);
address monitor, bool doLock);
// Get owning Java thread from the monitor's owner field. If doLock
// is true, then Threads_lock is grabbed as needed. Otherwise, the
// VM needs to be at a safepoint.
static JavaThread *owning_thread_from_monitor_owner(address owner,
bool doLock);
bool doLock);
// Number of threads on the active threads list
static int number_of_threads() { return _number_of_threads; }
@ -1911,9 +1911,9 @@ class ThreadClosure: public StackObj {
};
class SignalHandlerMark: public StackObj {
private:
private:
Thread* _thread;
public:
public:
SignalHandlerMark(Thread* t) {
_thread = t;
if (_thread) _thread->enter_signal_handler();