This commit is contained in:
Jesper Wilhelmsson 2016-02-29 15:24:52 +01:00
commit 506a90ff7b
52 changed files with 808 additions and 772 deletions

View file

@ -36,6 +36,7 @@
#include "compiler/compileBroker.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_aix.h"
#include "logging/log.hpp"
#include "libo4.hpp"
#include "libperfstat_aix.hpp"
#include "libodm_aix.hpp"
@ -791,13 +792,8 @@ static void *java_start(Thread *thread) {
const pthread_t pthread_id = ::pthread_self();
const tid_t kernel_thread_id = ::thread_self();
trcVerbose("newborn Thread : pthread-id %u, ktid " UINT64_FORMAT
", stack %p ... %p, stacksize 0x%IX (%IB)",
pthread_id, kernel_thread_id,
thread->stack_end(),
thread->stack_base(),
thread->stack_size(),
thread->stack_size());
log_info(os, thread)("Thread is alive (pthread id " UINTX_FORMAT ", tid " UINTX_FORMAT ")",
(uintx) pthread_id, (uintx) kernel_thread_id);
// Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
// by the pthread library). In rare cases, this may not be the case, e.g. when third-party
@ -805,7 +801,7 @@ static void *java_start(Thread *thread) {
// guard pages on those stacks, because the stacks may reside in memory which is not
// protectable (shmated).
if (thread->stack_base() > ::sbrk(0)) {
trcVerbose("Thread " UINT64_FORMAT ": stack not in data segment.", (uint64_t) pthread_id);
log_warning(os, thread)("Thread " UINTX_FORMAT ": stack not in data segment.", (uintx)pthread_id);
}
// Try to randomize the cache line index of hot stack frames.
@ -839,8 +835,8 @@ static void *java_start(Thread *thread) {
// Call one more level start routine.
thread->run();
trcVerbose("Thread finished : pthread-id %u, ktid " UINT64_FORMAT ".",
pthread_id, kernel_thread_id);
log_info(os, thread)("Thread finished (pthread id " UINTX_FORMAT ", tid " UINTX_FORMAT ").",
(uintx) pthread_id, (uintx) kernel_thread_id);
return 0;
}
@ -908,20 +904,19 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
pthread_t tid;
int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
char buf[64];
if (ret == 0) {
log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
(uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
} else {
log_warning(os, thread)("Failed to start thread - pthread_create failed (%s) for attributes: %s.",
strerror(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
}
pthread_attr_destroy(&attr);
if (ret == 0) {
trcVerbose("Created New Thread : pthread-id %u", tid);
} else {
if (os::Aix::on_pase()) {
// QIBM_MULTI_THREADED=Y is needed when the launcher is started on iSeries
// using QSH. Otherwise pthread_create fails with errno=11.
trcVerbose("(Please make sure you set the environment variable "
"QIBM_MULTI_THREADED=Y before running this program.)");
}
if (PrintMiscellaneous && (Verbose || WizardMode)) {
perror("pthread_create()");
}
if (ret != 0) {
// Need to clean up stuff we've allocated so far
thread->set_osthread(NULL);
delete osthread;
@ -958,13 +953,6 @@ bool os::create_attached_thread(JavaThread* thread) {
const pthread_t pthread_id = ::pthread_self();
const tid_t kernel_thread_id = ::thread_self();
trcVerbose("attaching Thread : pthread-id %u, ktid " UINT64_FORMAT ", stack %p ... %p, stacksize 0x%IX (%IB)",
pthread_id, kernel_thread_id,
thread->stack_end(),
thread->stack_base(),
thread->stack_size(),
thread->stack_size());
// OSThread::thread_id is the pthread id.
osthread->set_thread_id(pthread_id);
@ -990,6 +978,9 @@ bool os::create_attached_thread(JavaThread* thread) {
// and save the caller's signal mask
os::Aix::hotspot_sigmask(thread);
log_info(os, thread)("Thread attached (pthread id " UINTX_FORMAT ", tid " UINTX_FORMAT ")",
(uintx) pthread_id, (uintx) kernel_thread_id);
return true;
}

View file

@ -32,6 +32,7 @@
#include "compiler/disassembler.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_bsd.h"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "mutex_bsd.inline.hpp"
@ -681,6 +682,9 @@ static void *java_start(Thread *thread) {
osthread->set_thread_id(os::Bsd::gettid());
log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ".",
os::current_thread_id(), (uintx) pthread_self());
#ifdef __APPLE__
uint64_t unique_thread_id = locate_unique_thread_id(osthread->thread_id());
guarantee(unique_thread_id != 0, "unique thread id was not found");
@ -716,6 +720,9 @@ static void *java_start(Thread *thread) {
// call one more level start routine
thread->run();
log_info(os, thread)("Thread finished (tid " UINTX_FORMAT ", pthread id " UINTX_FORMAT ").",
os::current_thread_id(), (uintx) pthread_self());
return 0;
}
@ -776,12 +783,18 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
pthread_t tid;
int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
char buf[64];
if (ret == 0) {
log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
(uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
} else {
log_warning(os, thread)("Failed to start thread - pthread_create failed (%s) for attributes: %s.",
strerror(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
}
pthread_attr_destroy(&attr);
if (ret != 0) {
if (PrintMiscellaneous && (Verbose || WizardMode)) {
perror("pthread_create()");
}
// Need to clean up stuff we've allocated so far
thread->set_osthread(NULL);
delete osthread;
@ -858,6 +871,9 @@ bool os::create_attached_thread(JavaThread* thread) {
// and save the caller's signal mask
os::Bsd::hotspot_sigmask(thread);
log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ".",
os::current_thread_id(), (uintx) pthread_self());
return true;
}

View file

@ -662,6 +662,9 @@ static void *java_start(Thread *thread) {
osthread->set_thread_id(os::current_thread_id());
log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
os::current_thread_id(), (uintx) pthread_self());
if (UseNUMA) {
int lgrp_id = os::numa_get_group_id();
if (lgrp_id != -1) {
@ -691,6 +694,9 @@ static void *java_start(Thread *thread) {
// call one more level start routine
thread->run();
log_info(os, thread)("Thread finished (tid " UINTX_FORMAT ", pthread id " UINTX_FORMAT ").",
os::current_thread_id(), (uintx) pthread_self());
return 0;
}
@ -756,12 +762,18 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
pthread_t tid;
int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
char buf[64];
if (ret == 0) {
log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
(uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
} else {
log_warning(os, thread)("Failed to start thread - pthread_create failed (%s) for attributes: %s.",
strerror(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
}
pthread_attr_destroy(&attr);
if (ret != 0) {
if (PrintMiscellaneous && (Verbose || WizardMode)) {
perror("pthread_create()");
}
// Need to clean up stuff we've allocated so far
thread->set_osthread(NULL);
delete osthread;
@ -858,6 +870,9 @@ bool os::create_attached_thread(JavaThread* thread) {
// and save the caller's signal mask
os::Linux::hotspot_sigmask(thread);
log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
os::current_thread_id(), (uintx) pthread_self());
return true;
}

View file

@ -1071,6 +1071,19 @@ void os::Posix::ucontext_set_pc(ucontext_t* ctx, address pc) {
#endif
}
char* os::Posix::describe_pthread_attr(char* buf, size_t buflen, const pthread_attr_t* attr) {
size_t stack_size = 0;
size_t guard_size = 0;
int detachstate = 0;
pthread_attr_getstacksize(attr, &stack_size);
pthread_attr_getguardsize(attr, &guard_size);
pthread_attr_getdetachstate(attr, &detachstate);
jio_snprintf(buf, buflen, "stacksize: " SIZE_FORMAT "k, guardsize: " SIZE_FORMAT "k, %s",
stack_size / 1024, guard_size / 1024,
(detachstate == PTHREAD_CREATE_DETACHED ? "detached" : "joinable"));
return buf;
}
os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");

View file

@ -76,6 +76,11 @@ public:
static address ucontext_get_pc(const ucontext_t* ctx);
// Set PC into context. Needed for continuation after signal.
static void ucontext_set_pc(ucontext_t* ctx, address pc);
// Helper function; describes pthread attributes as short string. String is written
// to buf with len buflen; buf is returned.
static char* describe_pthread_attr(char* buf, size_t buflen, const pthread_attr_t* attr);
};
/*

View file

@ -32,6 +32,7 @@
#include "compiler/disassembler.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_solaris.h"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "mutex_solaris.inline.hpp"
@ -68,6 +69,7 @@
#include "utilities/defaultStream.hpp"
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"
#include "utilities/vmError.hpp"
// put OS-includes here
@ -736,6 +738,9 @@ extern "C" void* java_start(void* thread_addr) {
osthr->set_lwp_id(_lwp_self()); // Store lwp in case we are bound
thread->_schedctl = (void *) schedctl_init();
log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").",
os::current_thread_id());
if (UseNUMA) {
int lgrp_id = os::numa_get_group_id();
if (lgrp_id != -1) {
@ -781,6 +786,8 @@ extern "C" void* java_start(void* thread_addr) {
Atomic::dec(&os::Solaris::_os_thread_count);
}
log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
if (UseDetachedThreads) {
thr_exit(NULL);
ShouldNotReachHere();
@ -853,6 +860,9 @@ bool os::create_attached_thread(JavaThread* thread) {
// and save the caller's signal mask
os::Solaris::hotspot_sigmask(thread);
log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
os::current_thread_id());
return true;
}
@ -879,6 +889,25 @@ bool os::create_main_thread(JavaThread* thread) {
return true;
}
// Helper function to trace thread attributes, similar to os::Posix::describe_pthread_attr()
static char* describe_thr_create_attributes(char* buf, size_t buflen,
size_t stacksize, long flags)
{
stringStream ss(buf, buflen);
ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
ss.print("flags: ");
#define PRINT_FLAG(f) if (flags & f) ss.print( XSTR(f) " ");
#define ALL(X) \
X(THR_SUSPENDED) \
X(THR_DETACHED) \
X(THR_BOUND) \
X(THR_NEW_LWP) \
X(THR_DAEMON)
ALL(PRINT_FLAG)
#undef ALL
#undef PRINT_FLAG
return buf;
}
bool os::create_thread(Thread* thread, ThreadType thr_type,
size_t stack_size) {
@ -974,10 +1003,17 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
osthread->set_thread_id(-1);
status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
if (status != 0) {
if (PrintMiscellaneous && (Verbose || WizardMode)) {
perror("os::create_thread");
char buf[64];
if (status == 0) {
log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
(uintx) tid, describe_thr_create_attributes(buf, sizeof(buf), stack_size, flags));
} else {
log_warning(os, thread)("Failed to start thread - thr_create failed (%s) for attributes: %s.",
strerror(status), describe_thr_create_attributes(buf, sizeof(buf), stack_size, flags));
}
if (status != 0) {
thread->set_osthread(NULL);
// Need to clean up stuff we've allocated so far
delete osthread;

View file

@ -35,6 +35,7 @@
#include "compiler/disassembler.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_windows.h"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "mutex_windows.inline.hpp"
@ -71,6 +72,7 @@
#include "utilities/defaultStream.hpp"
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"
#include "utilities/vmError.hpp"
#ifdef _DEBUG
@ -436,6 +438,8 @@ static unsigned __stdcall java_start(Thread* thread) {
res = 20115; // java thread
}
log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
// Install a win32 structured exception handler around every thread created
// by VM, so VM can generate error dump when an exception occurred in non-
// Java thread (e.g. VM thread).
@ -446,6 +450,8 @@ static unsigned __stdcall java_start(Thread* thread) {
// Nothing to do.
}
log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
// One less thread is executing
// When the VMThread gets here, the main thread may have already exited
// which frees the CodeHeap containing the Atomic::add code
@ -509,6 +515,10 @@ bool os::create_attached_thread(JavaThread* thread) {
osthread->set_state(RUNNABLE);
thread->set_osthread(osthread);
log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
os::current_thread_id());
return true;
}
@ -530,6 +540,28 @@ bool os::create_main_thread(JavaThread* thread) {
return true;
}
// Helper function to trace _beginthreadex attributes,
// similar to os::Posix::describe_pthread_attr()
static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
size_t stacksize, unsigned initflag)
{
stringStream ss(buf, buflen);
if (stacksize == 0) {
ss.print("stacksize: default, ");
} else {
ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
}
ss.print("flags: ");
#define PRINT_FLAG(f) if (initflag & f) ss.print( XSTR(f) " ");
#define ALL(X) \
X(CREATE_SUSPENDED) \
X(STACK_SIZE_PARAM_IS_A_RESERVATION)
ALL(PRINT_FLAG)
#undef ALL
#undef PRINT_FLAG
return buf;
}
// Allocate and initialize a new OSThread
bool os::create_thread(Thread* thread, ThreadType thr_type,
size_t stack_size) {
@ -596,14 +628,24 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
// document because JVM uses C runtime library. The good news is that the
// flag appears to work with _beginthredex() as well.
const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
HANDLE thread_handle =
(HANDLE)_beginthreadex(NULL,
(unsigned)stack_size,
(unsigned (__stdcall *)(void*)) java_start,
thread,
CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION,
initflag,
&thread_id);
char buf[64];
if (thread_handle != NULL) {
log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
} else {
log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
strerror(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
}
if (thread_handle == NULL) {
// Need to clean up stuff we've allocated so far
CloseHandle(osthread->interrupt_event());
@ -1668,8 +1710,7 @@ void os::win32::print_windows_version(outputStream* st) {
if (is_workstation) {
st->print("10");
} else {
// The server version name of Windows 10 is not known at this time
st->print("%d.%d", major_version, minor_version);
st->print("Server 2016");
}
break;

View file

@ -37,6 +37,7 @@
#include "gc/shared/generation.hpp"
#include "interpreter/bytecodeStream.hpp"
#include "interpreter/oopMapCache.hpp"
#include "logging/logTag.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "memory/oopFactory.hpp"
@ -417,16 +418,15 @@ bool ClassPathImageEntry::is_jrt() {
#if INCLUDE_CDS
void ClassLoader::exit_with_path_failure(const char* error, const char* message) {
assert(DumpSharedSpaces, "only called at dump time");
tty->print_cr("Hint: enable -XX:+TraceClassPaths to diagnose the failure");
tty->print_cr("Hint: enable -Xlog:classpath=info to diagnose the failure");
vm_exit_during_initialization(error, message);
}
#endif
void ClassLoader::trace_class_path(outputStream* out, const char* msg, const char* name) {
if (!TraceClassPaths) {
return;
}
void ClassLoader::trace_class_path(const char* msg, const char* name) {
if (log_is_enabled(Info, classpath)) {
ResourceMark rm;
outputStream* out = LogHandle(classpath)::info_stream();
if (msg) {
out->print("%s", msg);
}
@ -442,9 +442,6 @@ void ClassLoader::trace_class_path(outputStream* out, const char* msg, const cha
}
}
}
if (msg && msg[0] == '[') {
out->print_cr("]");
} else {
out->cr();
}
}
@ -470,11 +467,13 @@ void ClassLoader::check_shared_classpath(const char *path) {
void ClassLoader::setup_bootstrap_search_path() {
assert(_first_entry == NULL, "should not setup bootstrap class search path twice");
const char* sys_class_path = Arguments::get_sysclasspath();
const char* java_class_path = Arguments::get_appclasspath();
if (PrintSharedArchiveAndExit) {
// Don't print sys_class_path - this is the bootcp of this current VM process, not necessarily
// the same as the bootcp of the shared archive.
} else {
trace_class_path(tty, "[Bootstrap loader class path=", sys_class_path);
trace_class_path("bootstrap loader class path=", sys_class_path);
trace_class_path("classpath: ", java_class_path);
}
#if INCLUDE_CDS
if (DumpSharedSpaces) {
@ -578,9 +577,7 @@ ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const str
}
}
}
if (TraceClassPaths) {
tty->print_cr("[Opened %s]", path);
}
log_info(classpath)("opened: %s", path);
log_info(classload)("opened: %s", path);
} else {
// Directory

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -331,7 +331,7 @@ class ClassLoader: AllStatic {
static void exit_with_path_failure(const char* error, const char* message);
#endif
static void trace_class_path(outputStream* out, const char* msg, const char* name = NULL);
static void trace_class_path(const char* msg, const char* name = NULL);
// VM monitoring and management support
static jlong classloader_time_ms();

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,15 +26,15 @@
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/sharedPathsMiscInfo.hpp"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/metaspaceShared.hpp"
#include "runtime/arguments.hpp"
#include "utilities/ostream.hpp"
void SharedPathsMiscInfo::add_path(const char* path, int type) {
if (TraceClassPaths) {
tty->print("[type=%s] ", type_name(type));
trace_class_path("[Add misc shared path ", path);
}
log_info(classpath)("type=%s ", type_name(type));
ClassLoader::trace_class_path("add misc shared path ", path);
write(path, strlen(path) + 1);
write_jint(jint(type));
}
@ -67,11 +67,29 @@ bool SharedPathsMiscInfo::read(void* ptr, size_t size) {
}
bool SharedPathsMiscInfo::fail(const char* msg, const char* name) {
ClassLoader::trace_class_path(tty, msg, name);
ClassLoader::trace_class_path(msg, name);
MetaspaceShared::set_archive_loading_failed();
return false;
}
void SharedPathsMiscInfo::print_path(int type, const char* path) {
ResourceMark rm;
outputStream* out = LogHandle(classpath)::info_stream();
switch (type) {
case BOOT:
out->print("Expecting -Dsun.boot.class.path=%s", path);
break;
case NON_EXIST:
out->print("Expecting that %s does not exist", path);
break;
case REQUIRED:
out->print("Expecting that file %s must exist and is not altered", path);
break;
default:
ShouldNotReachHere();
}
}
bool SharedPathsMiscInfo::check() {
// The whole buffer must be 0 terminated so that we can use strlen and strcmp
// without fear.
@ -90,17 +108,14 @@ bool SharedPathsMiscInfo::check() {
if (!read_jint(&type)) {
return fail("Corrupted archive file header");
}
if (TraceClassPaths) {
tty->print("[type=%s ", type_name(type));
print_path(tty, type, path);
tty->print_cr("]");
}
log_info(classpath)("type=%s ", type_name(type));
print_path(type, path);
if (!check(type, path)) {
if (!PrintSharedArchiveAndExit) {
return false;
}
} else {
trace_class_path("[ok");
ClassLoader::trace_class_path("ok");
}
}

View file

@ -64,9 +64,6 @@ protected:
void write(const void* ptr, size_t size);
bool read(void* ptr, size_t size);
static void trace_class_path(const char* msg, const char* name = NULL) {
ClassLoader::trace_class_path(tty, msg, name);
}
protected:
static bool fail(const char* msg, const char* name = NULL);
virtual bool check(jint type, const char* path);
@ -144,21 +141,7 @@ public:
}
}
virtual void print_path(outputStream* out, int type, const char* path) {
switch (type) {
case BOOT:
out->print("Expecting -Dsun.boot.class.path=%s", path);
break;
case NON_EXIST:
out->print("Expecting that %s does not exist", path);
break;
case REQUIRED:
out->print("Expecting that file %s must exist and is not altered", path);
break;
default:
ShouldNotReachHere();
}
}
virtual void print_path(int type, const char* path);
bool check();
bool read_jint(jint *ptr) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,19 +36,19 @@ ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) :
{
// Ergonomically select initial concurrent refinement parameters
if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, (intx)ParallelGCThreads);
FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, ParallelGCThreads);
}
set_green_zone(G1ConcRefinementGreenZone);
if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
FLAG_SET_DEFAULT(G1ConcRefinementYellowZone, green_zone() * 3);
}
set_yellow_zone(MAX2<int>(G1ConcRefinementYellowZone, green_zone()));
set_yellow_zone(MAX2(G1ConcRefinementYellowZone, green_zone()));
if (FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
}
set_red_zone(MAX2<int>(G1ConcRefinementRedZone, yellow_zone()));
set_red_zone(MAX2(G1ConcRefinementRedZone, yellow_zone()));
}
ConcurrentG1Refine* ConcurrentG1Refine::create(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure, jint* ecode) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,11 +61,11 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
* 2) green = 0. Means no caching. Can be a good way to minimize the
* amount of time spent updating rsets during a collection.
*/
int _green_zone;
int _yellow_zone;
int _red_zone;
size_t _green_zone;
size_t _yellow_zone;
size_t _red_zone;
int _thread_threshold_step;
size_t _thread_threshold_step;
// We delay the refinement of 'hot' cards using the hot card cache.
G1HotCardCache _hot_card_cache;
@ -100,17 +100,17 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
void print_worker_threads_on(outputStream* st) const;
void set_green_zone(int x) { _green_zone = x; }
void set_yellow_zone(int x) { _yellow_zone = x; }
void set_red_zone(int x) { _red_zone = x; }
void set_green_zone(size_t x) { _green_zone = x; }
void set_yellow_zone(size_t x) { _yellow_zone = x; }
void set_red_zone(size_t x) { _red_zone = x; }
int green_zone() const { return _green_zone; }
int yellow_zone() const { return _yellow_zone; }
int red_zone() const { return _red_zone; }
size_t green_zone() const { return _green_zone; }
size_t yellow_zone() const { return _yellow_zone; }
size_t red_zone() const { return _red_zone; }
uint worker_thread_num() const { return _n_worker_threads; }
int thread_threshold_step() const { return _thread_threshold_step; }
size_t thread_threshold_step() const { return _thread_threshold_step; }
G1HotCardCache* hot_card_cache() { return &_hot_card_cache; }

View file

@ -67,10 +67,12 @@ ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *nex
void ConcurrentG1RefineThread::initialize() {
// Current thread activation threshold
_threshold = MIN2<int>(cg1r()->thread_threshold_step() * (_worker_id + 1) + cg1r()->green_zone(),
_threshold = MIN2(cg1r()->thread_threshold_step() * (_worker_id + 1) + cg1r()->green_zone(),
cg1r()->yellow_zone());
// A thread deactivates once the number of buffer reached a deactivation threshold
_deactivation_threshold = MAX2<int>(_threshold - cg1r()->thread_threshold_step(), cg1r()->green_zone());
_deactivation_threshold =
MAX2(_threshold - MIN2(_threshold, cg1r()->thread_threshold_step()),
cg1r()->green_zone());
}
void ConcurrentG1RefineThread::wait_for_completed_buffers() {
@ -127,14 +129,14 @@ void ConcurrentG1RefineThread::run_service() {
}
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
log_debug(gc, refine)("Activated %d, on threshold: %d, current: %d",
log_debug(gc, refine)("Activated %d, on threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT,
_worker_id, _threshold, dcqs.completed_buffers_num());
{
SuspendibleThreadSetJoiner sts_join;
do {
int curr_buffer_num = (int)dcqs.completed_buffers_num();
size_t curr_buffer_num = dcqs.completed_buffers_num();
// If the number of the buffers falls down into the yellow zone,
// that means that the transition period after the evacuation pause has ended.
if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= cg1r()->yellow_zone()) {
@ -151,7 +153,7 @@ void ConcurrentG1RefineThread::run_service() {
false /* during_pause */));
deactivate();
log_debug(gc, refine)("Deactivated %d, off threshold: %d, current: %d",
log_debug(gc, refine)("Deactivated %d, off threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT,
_worker_id, _deactivation_threshold,
dcqs.completed_buffers_num());
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,11 +53,11 @@ class ConcurrentG1RefineThread: public ConcurrentGCThread {
// The closure applied to completed log buffers.
CardTableEntryClosure* _refine_closure;
int _thread_threshold_step;
size_t _thread_threshold_step;
// This thread activation threshold
int _threshold;
size_t _threshold;
// This thread deactivation threshold
int _deactivation_threshold;
size_t _deactivation_threshold;
void wait_for_completed_buffers();

View file

@ -207,22 +207,24 @@ bool DirtyCardQueueSet::mut_process_buffer(void** buf) {
}
BufferNode* DirtyCardQueueSet::get_completed_buffer(int stop_at) {
BufferNode* DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
BufferNode* nd = NULL;
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
if ((int)_n_completed_buffers <= stop_at) {
if (_n_completed_buffers <= stop_at) {
_process_completed = false;
return NULL;
}
if (_completed_buffers_head != NULL) {
nd = _completed_buffers_head;
assert(_n_completed_buffers > 0, "Invariant");
_completed_buffers_head = nd->next();
if (_completed_buffers_head == NULL)
_completed_buffers_tail = NULL;
_n_completed_buffers--;
assert(_n_completed_buffers >= 0, "Invariant");
if (_completed_buffers_head == NULL) {
assert(_n_completed_buffers == 0, "Invariant");
_completed_buffers_tail = NULL;
}
}
DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
return nd;
@ -230,7 +232,7 @@ BufferNode* DirtyCardQueueSet::get_completed_buffer(int stop_at) {
bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
uint worker_i,
int stop_at,
size_t stop_at,
bool during_pause) {
assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
BufferNode* nd = get_completed_buffer(stop_at);

View file

@ -134,10 +134,10 @@ public:
// is returned to the completed buffer set, and this call returns false.
bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
uint worker_i,
int stop_at,
size_t stop_at,
bool during_pause);
BufferNode* get_completed_buffer(int stop_at);
BufferNode* get_completed_buffer(size_t stop_at);
// Applies the current closure to all completed buffers,
// non-consumptively.

View file

@ -1400,7 +1400,6 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
JavaThread::dirty_card_queue_set().abandon_logs();
assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
_young_list->reset_sampled_info();
// At this point there should be no regions in the
// entire heap tagged as young.
assert(check_young_list_empty(true /* check_heap */),
@ -1985,8 +1984,8 @@ jint G1CollectedHeap::initialize() {
JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
DirtyCardQ_CBL_mon,
DirtyCardQ_FL_lock,
concurrent_g1_refine()->yellow_zone(),
concurrent_g1_refine()->red_zone(),
(int)concurrent_g1_refine()->yellow_zone(),
(int)concurrent_g1_refine()->red_zone(),
Shared_DirtyCardQ_lock,
NULL, // fl_owner
true); // init_free_ids
@ -3390,8 +3389,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
clear_cset_fast_test();
_young_list->reset_sampled_info();
// Don't check the whole heap at this point as the
// GC alloc regions from this pause have been tagged
// as survivors and moved on to the survivor list.
@ -4398,6 +4395,8 @@ public:
{ }
void work(uint worker_id) {
G1GCParPhaseTimesTracker x(_g1h->g1_policy()->phase_times(), G1GCPhaseTimes::PreserveCMReferents, worker_id);
ResourceMark rm;
HandleMark hm;
@ -4461,13 +4460,8 @@ void G1CollectedHeap::process_weak_jni_handles() {
g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
}
// Weak Reference processing during an evacuation pause (part 1).
void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
double ref_proc_start = os::elapsedTime();
ReferenceProcessor* rp = _ref_processor_stw;
assert(rp->discovery_enabled(), "should have been enabled");
void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
double preserve_cm_referents_start = os::elapsedTime();
// Any reference objects, in the collection set, that were 'discovered'
// by the CM ref processor should have already been copied (either by
// applying the external root copy closure to the discovered lists, or
@ -4495,9 +4489,18 @@ void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per
per_thread_states,
no_of_gc_workers,
_task_queues);
workers()->run_task(&keep_cm_referents);
g1_policy()->phase_times()->record_preserve_cm_referents_time_ms((os::elapsedTime() - preserve_cm_referents_start) * 1000.0);
}
// Weak Reference processing during an evacuation pause (part 1).
void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
double ref_proc_start = os::elapsedTime();
ReferenceProcessor* rp = _ref_processor_stw;
assert(rp->discovery_enabled(), "should have been enabled");
// Closure to test whether a referent is alive.
G1STWIsAliveClosure is_alive(this);
@ -4529,6 +4532,8 @@ void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per
NULL,
_gc_timer_stw);
} else {
uint no_of_gc_workers = workers()->active_workers();
// Parallel reference processing
assert(rp->num_q() == no_of_gc_workers, "sanity");
assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
@ -4586,6 +4591,12 @@ void G1CollectedHeap::enqueue_discovered_references(G1ParScanThreadStateSet* per
g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
}
void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
double merge_pss_time_start = os::elapsedTime();
per_thread_states->flush();
g1_policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);
}
void G1CollectedHeap::pre_evacuate_collection_set() {
_expand_heap_after_alloc_failure = true;
_evacuation_failed = false;
@ -4644,6 +4655,7 @@ void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_in
// objects (and their reachable sub-graphs) that were
// not copied during the pause.
if (g1_policy()->should_process_references()) {
preserve_cm_referents(per_thread_states);
process_discovered_references(per_thread_states);
} else {
ref_processor_stw()->verify_no_references_recorded();
@ -4687,7 +4699,7 @@ void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_in
_allocator->release_gc_alloc_regions(evacuation_info);
per_thread_states->flush();
merge_per_thread_state_info(per_thread_states);
record_obj_copy_mem_stats();
@ -5188,8 +5200,8 @@ public:
bool success() { return _success; }
};
bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
bool ret = _young_list->check_list_empty(check_sample);
bool G1CollectedHeap::check_young_list_empty(bool check_heap) {
bool ret = _young_list->check_list_empty();
if (check_heap) {
NoYoungRegionsClosure closure;

View file

@ -511,6 +511,9 @@ protected:
// allocated block, or else "NULL".
HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
// Preserve any referents discovered by concurrent marking that have not yet been
// copied by the STW pause.
void preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states);
// Process any reference objects discovered during
// an incremental evacuation pause.
void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
@ -519,6 +522,9 @@ protected:
// after processing.
void enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states);
// Merges the information gathered on a per-thread basis for all worker threads
// during GC into global variables.
void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
public:
WorkGang* workers() const { return _workers; }
@ -1333,8 +1339,7 @@ public:
return _young_list->check_list_well_formed();
}
bool check_young_list_empty(bool check_heap,
bool check_sample = true);
bool check_young_list_empty(bool check_heap);
// *** Stuff related to concurrent marking. It's not clear to me that so
// many of these need to be public.

View file

@ -787,10 +787,9 @@ double G1CollectorPolicy::predict_survivor_regions_evac_time() const {
return survivor_regions_evac_time;
}
void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
void G1CollectorPolicy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
guarantee( adaptive_young_list_length(), "should not call this otherwise" );
size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
if (rs_lengths > _rs_lengths_prediction) {
// add 10% to avoid having to recalculate often
size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
@ -1118,14 +1117,15 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
_short_lived_surv_rate_group->start_adding_regions();
// Do that for any other surv rate groups
double scan_hcc_time_ms = ConcurrentG1Refine::hot_card_cache_enabled() ? average_time_ms(G1GCPhaseTimes::ScanHCC) : 0.0;
if (update_stats) {
double cost_per_card_ms = 0.0;
double cost_scan_hcc = average_time_ms(G1GCPhaseTimes::ScanHCC);
if (_pending_cards > 0) {
cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards;
cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards;
_cost_per_card_ms_seq->add(cost_per_card_ms);
}
_cost_scan_hcc_seq->add(cost_scan_hcc);
_cost_scan_hcc_seq->add(scan_hcc_time_ms);
double cost_per_entry_ms = 0.0;
if (cards_scanned > 10) {
@ -1215,8 +1215,6 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
// Note that _mmu_tracker->max_gc_time() returns the time in seconds.
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
if (update_rs_time_goal_ms < scan_hcc_time_ms) {
log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
"Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
@ -1302,12 +1300,12 @@ void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
const int k_gy = 3, k_gr = 6;
const double inc_k = 1.1, dec_k = 0.9;
int g = cg1r->green_zone();
size_t g = cg1r->green_zone();
if (update_rs_time > goal_ms) {
g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
g = (size_t)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
} else {
if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
g = (int)MAX2(g * inc_k, g + 1.0);
g = (size_t)MAX2(g * inc_k, g + 1.0);
}
}
// Change the refinement threads params
@ -1316,15 +1314,15 @@ void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
cg1r->set_red_zone(g * k_gr);
cg1r->reinitialize_threads();
int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * _predictor.sigma()), 1);
int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
size_t processing_threshold_delta = MAX2<size_t>(cg1r->green_zone() * _predictor.sigma(), 1);
size_t processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
cg1r->yellow_zone());
// Change the barrier params
dcqs.set_process_completed_threshold(processing_threshold);
dcqs.set_max_completed_queue(cg1r->red_zone());
dcqs.set_process_completed_threshold((int)processing_threshold);
dcqs.set_max_completed_queue((int)cg1r->red_zone());
}
int curr_queue_size = dcqs.completed_buffers_num();
size_t curr_queue_size = dcqs.completed_buffers_num();
if (curr_queue_size >= cg1r->yellow_zone()) {
dcqs.set_completed_queue_padding(curr_queue_size);
} else {

View file

@ -471,7 +471,7 @@ public:
// Check the current value of the young list RSet lengths and
// compare it against the last prediction. If the current value is
// higher, recalculate the young list target length prediction.
void revise_young_list_target_length_if_necessary();
void revise_young_list_target_length_if_necessary(size_t rs_lengths);
// This should be called after the heap is resized.
void record_new_heap_size(uint new_number_of_regions);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2271,7 +2271,7 @@ void G1ConcurrentMark::checkpointRootsFinalWork() {
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
guarantee(has_overflown() ||
satb_mq_set.completed_buffers_num() == 0,
"Invariant: has_overflown = %s, num buffers = %d",
"Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT,
BOOL_TO_STR(has_overflown()),
satb_mq_set.completed_buffers_num());
@ -2702,11 +2702,8 @@ public:
};
static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
ReferenceProcessor* result = NULL;
if (G1UseConcMarkReferenceProcessing) {
result = g1h->ref_processor_cm();
assert(result != NULL, "should not be NULL");
}
ReferenceProcessor* result = g1h->ref_processor_cm();
assert(result != NULL, "CM reference processor should not be NULL");
return result;
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,107 +28,70 @@
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1StringDedup.hpp"
#include "gc/g1/workerDataArray.inline.hpp"
#include "memory/allocation.hpp"
#include "memory/resourceArea.hpp"
#include "logging/log.hpp"
#include "runtime/timer.hpp"
#include "runtime/os.hpp"
// Helper class for avoiding interleaved logging
class LineBuffer: public StackObj {
private:
static const int BUFFER_LEN = 1024;
static const int INDENT_CHARS = 3;
char _buffer[BUFFER_LEN];
int _indent_level;
int _cur;
void vappend(const char* format, va_list ap) ATTRIBUTE_PRINTF(2, 0) {
int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
if (res != -1) {
_cur += res;
} else {
DEBUG_ONLY(warning("buffer too small in LineBuffer");)
_buffer[BUFFER_LEN -1] = 0;
_cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
}
}
public:
explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
_buffer[_cur] = ' ';
}
}
#ifndef PRODUCT
~LineBuffer() {
assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
}
#endif
void append(const char* format, ...) ATTRIBUTE_PRINTF(2, 3) {
va_list ap;
va_start(ap, format);
vappend(format, ap);
va_end(ap);
}
const char* to_string() {
_cur = _indent_level * INDENT_CHARS;
return _buffer;
}
};
static const char* Indents[4] = {"", " ", " ", " "};
static const char* Indents[5] = {"", " ", " ", " ", " "};
G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
_max_gc_threads(max_gc_threads)
{
assert(max_gc_threads > 0, "Must have some GC threads");
_gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Start:", false, 2);
_gc_par_phases[ExtRootScan] = new WorkerDataArray<double>(max_gc_threads, "Ext Root Scanning:", true, 2);
_gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Start (ms):");
_gc_par_phases[ExtRootScan] = new WorkerDataArray<double>(max_gc_threads, "Ext Root Scanning (ms):");
// Root scanning phases
_gc_par_phases[ThreadRoots] = new WorkerDataArray<double>(max_gc_threads, "Thread Roots:", true, 3);
_gc_par_phases[StringTableRoots] = new WorkerDataArray<double>(max_gc_threads, "StringTable Roots:", true, 3);
_gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots:", true, 3);
_gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots:", true, 3);
_gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots:", true, 3);
_gc_par_phases[FlatProfilerRoots] = new WorkerDataArray<double>(max_gc_threads, "FlatProfiler Roots:", true, 3);
_gc_par_phases[ManagementRoots] = new WorkerDataArray<double>(max_gc_threads, "Management Roots:", true, 3);
_gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots:", true, 3);
_gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots:", true, 3);
_gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots:", true, 3);
_gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots:", true, 3);
_gc_par_phases[WaitForStrongCLD] = new WorkerDataArray<double>(max_gc_threads, "Wait For Strong CLD:", true, 3);
_gc_par_phases[WeakCLDRoots] = new WorkerDataArray<double>(max_gc_threads, "Weak CLD Roots:", true, 3);
_gc_par_phases[SATBFiltering] = new WorkerDataArray<double>(max_gc_threads, "SATB Filtering:", true, 3);
_gc_par_phases[ThreadRoots] = new WorkerDataArray<double>(max_gc_threads, "Thread Roots (ms):");
_gc_par_phases[StringTableRoots] = new WorkerDataArray<double>(max_gc_threads, "StringTable Roots (ms):");
_gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots (ms):");
_gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots (ms):");
_gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots (ms):");
_gc_par_phases[FlatProfilerRoots] = new WorkerDataArray<double>(max_gc_threads, "FlatProfiler Roots (ms):");
_gc_par_phases[ManagementRoots] = new WorkerDataArray<double>(max_gc_threads, "Management Roots (ms):");
_gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots (ms):");
_gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots (ms):");
_gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots (ms):");
_gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots (ms):");
_gc_par_phases[WaitForStrongCLD] = new WorkerDataArray<double>(max_gc_threads, "Wait For Strong CLD (ms):");
_gc_par_phases[WeakCLDRoots] = new WorkerDataArray<double>(max_gc_threads, "Weak CLD Roots (ms):");
_gc_par_phases[SATBFiltering] = new WorkerDataArray<double>(max_gc_threads, "SATB Filtering (ms):");
_gc_par_phases[UpdateRS] = new WorkerDataArray<double>(max_gc_threads, "Update RS:", true, 2);
_gc_par_phases[ScanHCC] = new WorkerDataArray<double>(max_gc_threads, "Scan HCC:", true, 3);
_gc_par_phases[ScanHCC]->set_enabled(ConcurrentG1Refine::hot_card_cache_enabled());
_gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS:", true, 2);
_gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning:", true, 2);
_gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy:", true, 2);
_gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination:", true, 2);
_gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Total:", true, 2);
_gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>(max_gc_threads, "GC Worker End:", false, 2);
_gc_par_phases[Other] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Other:", true, 2);
_gc_par_phases[UpdateRS] = new WorkerDataArray<double>(max_gc_threads, "Update RS (ms):");
if (ConcurrentG1Refine::hot_card_cache_enabled()) {
_gc_par_phases[ScanHCC] = new WorkerDataArray<double>(max_gc_threads, "Scan HCC (ms):");
} else {
_gc_par_phases[ScanHCC] = NULL;
}
_gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS (ms):");
_gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning (ms):");
_gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms):");
_gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination (ms):");
_gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Total (ms):");
_gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>(max_gc_threads, "GC Worker End (ms):");
_gc_par_phases[Other] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Other (ms):");
_update_rs_processed_buffers = new WorkerDataArray<size_t>(max_gc_threads, "Processed Buffers:", true, 3);
_update_rs_processed_buffers = new WorkerDataArray<size_t>(max_gc_threads, "Processed Buffers:");
_gc_par_phases[UpdateRS]->link_thread_work_items(_update_rs_processed_buffers);
_termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Termination Attempts:", true, 3);
_termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Termination Attempts:");
_gc_par_phases[Termination]->link_thread_work_items(_termination_attempts);
_gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup:", true, 2);
_gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup:", true, 2);
if (UseStringDeduplication) {
_gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup (ms):");
_gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup (ms):");
} else {
_gc_par_phases[StringDedupQueueFixup] = NULL;
_gc_par_phases[StringDedupTableFixup] = NULL;
}
_gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty:", true, 3);
_redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards:", true, 3);
_gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty (ms):");
_redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards:");
_gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards);
_gc_par_phases[PreserveCMReferents] = new WorkerDataArray<double>(max_gc_threads, "Parallel Preserve CM Refs (ms):");
}
void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
@ -140,11 +103,10 @@ void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
_external_accounted_time_ms = 0.0;
for (int i = 0; i < GCParPhasesSentinel; i++) {
if (_gc_par_phases[i] != NULL) {
_gc_par_phases[i]->reset();
}
_gc_par_phases[StringDedupQueueFixup]->set_enabled(G1StringDedup::is_enabled());
_gc_par_phases[StringDedupTableFixup]->set_enabled(G1StringDedup::is_enabled());
}
}
void G1GCPhaseTimes::note_gc_end() {
@ -166,43 +128,10 @@ void G1GCPhaseTimes::note_gc_end() {
}
for (int i = 0; i < GCParPhasesSentinel; i++) {
if (_gc_par_phases[i] != NULL) {
_gc_par_phases[i]->verify(_active_gc_threads);
}
}
void G1GCPhaseTimes::print_stats(const char* indent, const char* str, double value) {
log_debug(gc, phases)("%s%s: %.1lf ms", indent, str, value);
}
double G1GCPhaseTimes::accounted_time_ms() {
// First subtract any externally accounted time
double misc_time_ms = _external_accounted_time_ms;
// Subtract the root region scanning wait time. It's initialized to
// zero at the start of the pause.
misc_time_ms += _root_region_scan_wait_time_ms;
misc_time_ms += _cur_collection_par_time_ms;
// Now subtract the time taken to fix up roots in generated code
misc_time_ms += _cur_collection_code_root_fixup_time_ms;
// Strong code root purge time
misc_time_ms += _cur_strong_code_root_purge_time_ms;
if (G1StringDedup::is_enabled()) {
// String dedup fixup time
misc_time_ms += _cur_string_dedup_fixup_time_ms;
}
// Subtract the time taken to clean the card table from the
// current value of "other time"
misc_time_ms += _cur_clear_ct_time_ms;
// Remove expand heap time from "other time"
misc_time_ms += _cur_expand_heap_time_ms;
return misc_time_ms;
}
// record the time a phase took in seconds
@ -224,193 +153,144 @@ double G1GCPhaseTimes::average_time_ms(GCParPhases phase) {
return _gc_par_phases[phase]->average(_active_gc_threads) * 1000.0;
}
double G1GCPhaseTimes::get_time_ms(GCParPhases phase, uint worker_i) {
return _gc_par_phases[phase]->get(worker_i) * 1000.0;
}
double G1GCPhaseTimes::sum_time_ms(GCParPhases phase) {
return _gc_par_phases[phase]->sum(_active_gc_threads) * 1000.0;
}
double G1GCPhaseTimes::min_time_ms(GCParPhases phase) {
return _gc_par_phases[phase]->minimum(_active_gc_threads) * 1000.0;
}
double G1GCPhaseTimes::max_time_ms(GCParPhases phase) {
return _gc_par_phases[phase]->maximum(_active_gc_threads) * 1000.0;
}
size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_i) {
assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
return _gc_par_phases[phase]->thread_work_items()->get(worker_i);
}
size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase) {
assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
return _gc_par_phases[phase]->thread_work_items()->sum(_active_gc_threads);
}
double G1GCPhaseTimes::average_thread_work_items(GCParPhases phase) {
assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
return _gc_par_phases[phase]->thread_work_items()->average(_active_gc_threads);
}
size_t G1GCPhaseTimes::min_thread_work_items(GCParPhases phase) {
assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
return _gc_par_phases[phase]->thread_work_items()->minimum(_active_gc_threads);
}
size_t G1GCPhaseTimes::max_thread_work_items(GCParPhases phase) {
assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
return _gc_par_phases[phase]->thread_work_items()->maximum(_active_gc_threads);
}
class G1GCParPhasePrinter : public StackObj {
G1GCPhaseTimes* _phase_times;
public:
G1GCParPhasePrinter(G1GCPhaseTimes* phase_times) : _phase_times(phase_times) {}
void print(G1GCPhaseTimes::GCParPhases phase_id) {
WorkerDataArray<double>* phase = _phase_times->_gc_par_phases[phase_id];
if (phase->_length == 1) {
print_single_length(phase_id, phase);
} else {
print_multi_length(phase_id, phase);
template <class T>
void G1GCPhaseTimes::details(T* phase, const char* indent) {
LogHandle(gc, phases, task) log;
if (log.is_level(LogLevel::Trace)) {
outputStream* trace_out = log.trace_stream();
trace_out->print("%s", indent);
phase->print_details_on(trace_out, _active_gc_threads);
}
}
void G1GCPhaseTimes::log_phase(WorkerDataArray<double>* phase, uint indent, outputStream* out, bool print_sum) {
out->print("%s", Indents[indent]);
phase->print_summary_on(out, _active_gc_threads, print_sum);
details(phase, Indents[indent]);
private:
void print_single_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
// No need for min, max, average and sum for only one worker
log_debug(gc, phases)("%s%s: %.1lf", Indents[phase->_indent_level], phase->_title, _phase_times->get_time_ms(phase_id, 0));
WorkerDataArray<size_t>* work_items = phase->_thread_work_items;
WorkerDataArray<size_t>* work_items = phase->thread_work_items();
if (work_items != NULL) {
log_debug(gc, phases)("%s%s: " SIZE_FORMAT, Indents[work_items->_indent_level], work_items->_title, _phase_times->sum_thread_work_items(phase_id));
out->print("%s", Indents[indent + 1]);
work_items->print_summary_on(out, _active_gc_threads, true);
details(work_items, Indents[indent + 1]);
}
}
void print_time_values(const char* indent, G1GCPhaseTimes::GCParPhases phase_id) {
if (log_is_enabled(Trace, gc)) {
LineBuffer buf(0);
uint active_length = _phase_times->_active_gc_threads;
for (uint i = 0; i < active_length; ++i) {
buf.append(" %4.1lf", _phase_times->get_time_ms(phase_id, i));
}
const char* line = buf.to_string();
log_trace(gc, phases)("%s%-25s%s", indent, "", line);
void G1GCPhaseTimes::debug_phase(WorkerDataArray<double>* phase) {
LogHandle(gc, phases) log;
if (log.is_level(LogLevel::Debug)) {
ResourceMark rm;
log_phase(phase, 2, log.debug_stream(), true);
}
}
void print_count_values(const char* indent, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {
if (log_is_enabled(Trace, gc)) {
LineBuffer buf(0);
uint active_length = _phase_times->_active_gc_threads;
for (uint i = 0; i < active_length; ++i) {
buf.append(" " SIZE_FORMAT, _phase_times->get_thread_work_item(phase_id, i));
}
const char* line = buf.to_string();
log_trace(gc, phases)("%s%-25s%s", indent, "", line);
void G1GCPhaseTimes::trace_phase(WorkerDataArray<double>* phase, bool print_sum) {
LogHandle(gc, phases) log;
if (log.is_level(LogLevel::Trace)) {
ResourceMark rm;
log_phase(phase, 3, log.trace_stream(), print_sum);
}
}
void print_thread_work_items(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {
const char* indent = Indents[thread_work_items->_indent_level];
#define PHASE_DOUBLE_FORMAT "%s%s: %.1lfms"
#define PHASE_SIZE_FORMAT "%s%s: " SIZE_FORMAT
assert(thread_work_items->_print_sum, "%s does not have print sum true even though it is a count", thread_work_items->_title);
#define info_line(str, value) \
log_info(gc, phases)(PHASE_DOUBLE_FORMAT, Indents[1], str, value);
log_debug(gc, phases)("%s%-25s Min: " SIZE_FORMAT ", Avg: %4.1lf, Max: " SIZE_FORMAT ", Diff: " SIZE_FORMAT ", Sum: " SIZE_FORMAT,
indent, thread_work_items->_title,
_phase_times->min_thread_work_items(phase_id), _phase_times->average_thread_work_items(phase_id), _phase_times->max_thread_work_items(phase_id),
_phase_times->max_thread_work_items(phase_id) - _phase_times->min_thread_work_items(phase_id), _phase_times->sum_thread_work_items(phase_id));
#define debug_line(str, value) \
log_debug(gc, phases)(PHASE_DOUBLE_FORMAT, Indents[2], str, value);
print_count_values(indent, phase_id, thread_work_items);
}
#define trace_line(str, value) \
log_trace(gc, phases)(PHASE_DOUBLE_FORMAT, Indents[3], str, value);
void print_multi_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
const char* indent = Indents[phase->_indent_level];
#define trace_line_sz(str, value) \
log_trace(gc, phases)(PHASE_SIZE_FORMAT, Indents[3], str, value);
if (phase->_print_sum) {
log_debug(gc, phases)("%s%-25s Min: %4.1lf, Avg: %4.1lf, Max: %4.1lf, Diff: %4.1lf, Sum: %4.1lf",
indent, phase->_title,
_phase_times->min_time_ms(phase_id), _phase_times->average_time_ms(phase_id), _phase_times->max_time_ms(phase_id),
_phase_times->max_time_ms(phase_id) - _phase_times->min_time_ms(phase_id), _phase_times->sum_time_ms(phase_id));
} else {
log_debug(gc, phases)("%s%-25s Min: %4.1lf, Avg: %4.1lf, Max: %4.1lf, Diff: %4.1lf",
indent, phase->_title,
_phase_times->min_time_ms(phase_id), _phase_times->average_time_ms(phase_id), _phase_times->max_time_ms(phase_id),
_phase_times->max_time_ms(phase_id) - _phase_times->min_time_ms(phase_id));
}
#define trace_line_ms(str, value) \
log_trace(gc, phases)(PHASE_SIZE_FORMAT, Indents[3], str, value);
print_time_values(indent, phase_id);
if (phase->_thread_work_items != NULL) {
print_thread_work_items(phase_id, phase->_thread_work_items);
}
}
};
#define info_line_and_account(str, value) \
info_line(str, value); \
accounted_time_ms += value;
void G1GCPhaseTimes::print() {
note_gc_end();
G1GCParPhasePrinter par_phase_printer(this);
double accounted_time_ms = _external_accounted_time_ms;
if (_root_region_scan_wait_time_ms > 0.0) {
print_stats(Indents[1], "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
info_line_and_account("Root Region Scan Waiting", _root_region_scan_wait_time_ms);
}
print_stats(Indents[1], "Parallel Time", _cur_collection_par_time_ms);
for (int i = 0; i <= GCMainParPhasesLast; i++) {
par_phase_printer.print((GCParPhases) i);
info_line_and_account("Evacuate Collection Set", _cur_collection_par_time_ms);
trace_phase(_gc_par_phases[GCWorkerStart], false);
debug_phase(_gc_par_phases[ExtRootScan]);
for (int i = ThreadRoots; i <= SATBFiltering; i++) {
trace_phase(_gc_par_phases[i]);
}
debug_phase(_gc_par_phases[UpdateRS]);
if (ConcurrentG1Refine::hot_card_cache_enabled()) {
trace_phase(_gc_par_phases[ScanHCC]);
}
debug_phase(_gc_par_phases[ScanRS]);
debug_phase(_gc_par_phases[CodeRoots]);
debug_phase(_gc_par_phases[ObjCopy]);
debug_phase(_gc_par_phases[Termination]);
debug_phase(_gc_par_phases[Other]);
debug_phase(_gc_par_phases[GCWorkerTotal]);
trace_phase(_gc_par_phases[GCWorkerEnd], false);
info_line_and_account("Code Roots", _cur_collection_code_root_fixup_time_ms + _cur_strong_code_root_purge_time_ms);
debug_line("Code Roots Fixup", _cur_collection_code_root_fixup_time_ms);
debug_line("Code Roots Purge", _cur_strong_code_root_purge_time_ms);
print_stats(Indents[1], "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
print_stats(Indents[1], "Code Root Purge", _cur_strong_code_root_purge_time_ms);
if (G1StringDedup::is_enabled()) {
print_stats(Indents[1], "String Dedup Fixup", _cur_string_dedup_fixup_time_ms);
for (int i = StringDedupPhasesFirst; i <= StringDedupPhasesLast; i++) {
par_phase_printer.print((GCParPhases) i);
info_line_and_account("String Dedup Fixup", _cur_string_dedup_fixup_time_ms);
debug_phase(_gc_par_phases[StringDedupQueueFixup]);
debug_phase(_gc_par_phases[StringDedupTableFixup]);
}
}
print_stats(Indents[1], "Clear CT", _cur_clear_ct_time_ms);
print_stats(Indents[1], "Expand Heap After Collection", _cur_expand_heap_time_ms);
double misc_time_ms = _gc_pause_time_ms - accounted_time_ms();
print_stats(Indents[1], "Other", misc_time_ms);
info_line_and_account("Clear Card Table", _cur_clear_ct_time_ms);
info_line_and_account("Expand Heap After Collection", _cur_expand_heap_time_ms);
double free_cset_time = _recorded_young_free_cset_time_ms + _recorded_non_young_free_cset_time_ms;
info_line_and_account("Free Collection Set", free_cset_time);
debug_line("Young Free Collection Set", _recorded_young_free_cset_time_ms);
debug_line("Non-Young Free Collection Set", _recorded_non_young_free_cset_time_ms);
info_line_and_account("Merge Per-Thread State", _recorded_merge_pss_time_ms);
info_line("Other", _gc_pause_time_ms - accounted_time_ms);
if (_cur_verify_before_time_ms > 0.0) {
print_stats(Indents[2], "Verify Before", _cur_verify_before_time_ms);
debug_line("Verify Before", _cur_verify_before_time_ms);
}
if (G1CollectedHeap::heap()->evacuation_failed()) {
double evac_fail_handling = _cur_evac_fail_recalc_used + _cur_evac_fail_remove_self_forwards +
_cur_evac_fail_restore_remsets;
print_stats(Indents[2], "Evacuation Failure", evac_fail_handling);
log_trace(gc, phases)("%sRecalculate Used: %.1lf ms", Indents[3], _cur_evac_fail_recalc_used);
log_trace(gc, phases)("%sRemove Self Forwards: %.1lf ms", Indents[3], _cur_evac_fail_remove_self_forwards);
log_trace(gc, phases)("%sRestore RemSet: %.1lf ms", Indents[3], _cur_evac_fail_restore_remsets);
debug_line("Evacuation Failure", evac_fail_handling);
trace_line("Recalculate Used", _cur_evac_fail_recalc_used);
trace_line("Remove Self Forwards",_cur_evac_fail_remove_self_forwards);
trace_line("Restore RemSet", _cur_evac_fail_restore_remsets);
}
print_stats(Indents[2], "Choose CSet",
(_recorded_young_cset_choice_time_ms +
_recorded_non_young_cset_choice_time_ms));
print_stats(Indents[2], "Ref Proc", _cur_ref_proc_time_ms);
print_stats(Indents[2], "Ref Enq", _cur_ref_enq_time_ms);
print_stats(Indents[2], "Redirty Cards", _recorded_redirty_logged_cards_time_ms);
par_phase_printer.print(RedirtyCards);
debug_line("Choose CSet", (_recorded_young_cset_choice_time_ms + _recorded_non_young_cset_choice_time_ms));
debug_line("Preserve CM Refs", _recorded_preserve_cm_referents_time_ms);
debug_line("Ref Proc", _cur_ref_proc_time_ms);
debug_line("Ref Enq", _cur_ref_enq_time_ms);
debug_line("Redirty Cards", _recorded_redirty_logged_cards_time_ms);
trace_phase(_gc_par_phases[RedirtyCards]);
trace_phase(_gc_par_phases[PreserveCMReferents]);
if (G1EagerReclaimHumongousObjects) {
print_stats(Indents[2], "Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);
log_trace(gc, phases)("%sHumongous Total: " SIZE_FORMAT, Indents[3], _cur_fast_reclaim_humongous_total);
log_trace(gc, phases)("%sHumongous Candidate: " SIZE_FORMAT, Indents[3], _cur_fast_reclaim_humongous_candidates);
print_stats(Indents[2], "Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
log_trace(gc, phases)("%sHumongous Reclaimed: " SIZE_FORMAT, Indents[3], _cur_fast_reclaim_humongous_reclaimed);
debug_line("Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);
trace_line_sz("Humongous Total", _cur_fast_reclaim_humongous_total);
trace_line_sz("Humongous Candidate", _cur_fast_reclaim_humongous_candidates);
debug_line("Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
trace_line_sz("Humongous Reclaimed", _cur_fast_reclaim_humongous_reclaimed);
}
print_stats(Indents[2], "Free CSet",
(_recorded_young_free_cset_time_ms +
_recorded_non_young_free_cset_time_ms));
log_trace(gc, phases)("%sYoung Free CSet: %.1lf ms", Indents[3], _recorded_young_free_cset_time_ms);
log_trace(gc, phases)("%sNon-Young Free CSet: %.1lf ms", Indents[3], _recorded_non_young_free_cset_time_ms);
if (_cur_verify_after_time_ms > 0.0) {
print_stats(Indents[2], "Verify After", _cur_verify_after_time_ms);
debug_line("Verify After", _cur_verify_after_time_ms);
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,8 +32,6 @@ class LineBuffer;
template <class T> class WorkerDataArray;
class G1GCPhaseTimes : public CHeapObj<mtGC> {
friend class G1GCParPhasePrinter;
uint _active_gc_threads;
uint _max_gc_threads;
jlong _gc_start_counter;
@ -69,6 +67,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
StringDedupQueueFixup,
StringDedupTableFixup,
RedirtyCards,
PreserveCMReferents,
GCParPhasesSentinel
};
@ -108,6 +107,10 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _recorded_redirty_logged_cards_time_ms;
double _recorded_preserve_cm_referents_time_ms;
double _recorded_merge_pss_time_ms;
double _recorded_young_free_cset_time_ms;
double _recorded_non_young_free_cset_time_ms;
@ -120,11 +123,14 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _cur_verify_before_time_ms;
double _cur_verify_after_time_ms;
// Helper methods for detailed logging
void print_stats(const char*, const char* str, double value);
void note_gc_end();
template <class T>
void details(T* phase, const char* indent);
void log_phase(WorkerDataArray<double>* phase, uint indent, outputStream* out, bool print_sum);
void debug_phase(WorkerDataArray<double>* phase);
void trace_phase(WorkerDataArray<double>* phase, bool print_sum = true);
public:
G1GCPhaseTimes(uint max_gc_threads);
void note_gc_start(uint active_gc_threads);
@ -143,16 +149,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
size_t sum_thread_work_items(GCParPhases phase);
private:
double get_time_ms(GCParPhases phase, uint worker_i);
double sum_time_ms(GCParPhases phase);
double min_time_ms(GCParPhases phase);
double max_time_ms(GCParPhases phase);
size_t get_thread_work_item(GCParPhases phase, uint worker_i);
double average_thread_work_items(GCParPhases phase);
size_t min_thread_work_items(GCParPhases phase);
size_t max_thread_work_items(GCParPhases phase);
public:
void record_clear_ct_time(double ms) {
@ -234,6 +230,14 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_recorded_redirty_logged_cards_time_ms = time_ms;
}
void record_preserve_cm_referents_time_ms(double time_ms) {
_recorded_preserve_cm_referents_time_ms = time_ms;
}
void record_merge_pss_time_ms(double time_ms) {
_recorded_merge_pss_time_ms = time_ms;
}
void record_cur_collection_start_sec(double time_ms) {
_cur_collection_start_sec = time_ms;
}
@ -250,8 +254,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_external_accounted_time_ms += time_ms;
}
double accounted_time_ms();
double cur_collection_start_sec() {
return _cur_collection_start_sec;
}

View file

@ -81,10 +81,7 @@ jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
}
void G1HotCardCache::drain(CardTableEntryClosure* cl, uint worker_i) {
if (!default_use_cache()) {
assert(_hot_cache == NULL, "Logic");
return;
}
assert(default_use_cache(), "Drain only necessary if we use the hot card cache.");
assert(_hot_cache != NULL, "Logic");
assert(!use_cache(), "cache should be disabled");

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,13 +48,15 @@ void G1IHOPControl::update_allocation_info(double allocation_time_s, size_t allo
void G1IHOPControl::print() {
size_t cur_conc_mark_start_threshold = get_conc_mark_start_threshold();
log_debug(gc, ihop)("Basic information (value update), threshold: " SIZE_FORMAT "B (%1.2f), target occupancy: " SIZE_FORMAT "B, current occupancy: " SIZE_FORMAT "B, "
" recent old gen allocation rate: %1.2f, recent marking phase length: %1.2f",
"recent allocation size: " SIZE_FORMAT "B, recent allocation duration: %1.2fms, recent old gen allocation rate: %1.2fB/s, recent marking phase length: %1.2fms",
cur_conc_mark_start_threshold,
cur_conc_mark_start_threshold * 100.0 / _target_occupancy,
_target_occupancy,
G1CollectedHeap::heap()->used(),
_last_allocated_bytes,
_last_allocation_time_s * 1000.0,
_last_allocation_time_s > 0.0 ? _last_allocated_bytes / _last_allocation_time_s : 0.0,
last_marking_length_s());
last_marking_length_s() * 1000.0);
}
void G1IHOPControl::send_trace_event(G1NewTracer* tracer) {
@ -192,12 +194,15 @@ void G1AdaptiveIHOPControl::print() {
G1IHOPControl::print();
size_t actual_target = actual_target_threshold();
log_debug(gc, ihop)("Adaptive IHOP information (value update), threshold: " SIZE_FORMAT "B (%1.2f), internal target occupancy: " SIZE_FORMAT "B, "
" predicted old gen allocation rate: %1.2f, predicted marking phase length: %1.2f, prediction active: %s",
"occupancy: " SIZE_FORMAT "B, additional buffer size: " SIZE_FORMAT "B, predicted old gen allocation rate: %1.2fB/s, "
"predicted marking phase length: %1.2fms, prediction active: %s",
get_conc_mark_start_threshold(),
percent_of(get_conc_mark_start_threshold(), actual_target),
actual_target,
G1CollectedHeap::heap()->used(),
_last_unrestrained_young_size,
_predictor->get_new_prediction(&_allocation_rate_s),
_predictor->get_new_prediction(&_marking_times_s),
_predictor->get_new_prediction(&_marking_times_s) * 1000.0,
have_enough_data_for_prediction() ? "true" : "false");
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -327,6 +327,9 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
assert(worker_id < _n_workers, "out of bounds access");
if (_states[worker_id] == NULL) {
_states[worker_id] = new_par_scan_state(worker_id, _young_cset_length);
}
return _states[worker_id];
}
@ -352,6 +355,10 @@ void G1ParScanThreadStateSet::flush() {
for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) {
G1ParScanThreadState* pss = _states[worker_index];
if (pss == NULL) {
continue;
}
_total_cards_scanned += _cards_scanned[worker_index];
pss->flush(_surviving_young_words_total);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -200,6 +200,7 @@ class G1ParScanThreadStateSet : public StackObj {
size_t* _surviving_young_words_total;
size_t* _cards_scanned;
size_t _total_cards_scanned;
size_t _young_cset_length;
uint _n_workers;
bool _flushed;
@ -210,10 +211,11 @@ class G1ParScanThreadStateSet : public StackObj {
_surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length, mtGC)),
_cards_scanned(NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC)),
_total_cards_scanned(0),
_young_cset_length(young_cset_length),
_n_workers(n_workers),
_flushed(false) {
for (uint i = 0; i < n_workers; ++i) {
_states[i] = new_par_scan_state(i, young_cset_length);
_states[i] = NULL;
}
memset(_surviving_young_words_total, 0, young_cset_length * sizeof(size_t));
memset(_cards_scanned, 0, n_workers * sizeof(size_t));

View file

@ -238,7 +238,7 @@ void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) {
RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i);
{
if (ConcurrentG1Refine::hot_card_cache_enabled()) {
// Apply the closure to the entries of the hot card cache.
G1GCParPhaseTimesTracker y(_g1p->phase_times(), G1GCPhaseTimes::ScanHCC, worker_i);
_g1->iterate_hcc_closure(&into_cset_update_rs_cl, worker_i);
@ -291,7 +291,6 @@ void G1RemSet::cleanup_after_oops_into_collection_set_do() {
_g1->cleanUpCardTable();
DirtyCardQueueSet& into_cset_dcqs = _into_cset_dirty_card_queue_set;
int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num();
if (_g1->evacuation_failed()) {
double restore_remembered_set_start = os::elapsedTime();

View file

@ -26,6 +26,8 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1YoungRemSetSamplingThread.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/g1/suspendibleThreadSet.hpp"
#include "runtime/mutexLocker.hpp"
@ -100,22 +102,35 @@ void G1YoungRemSetSamplingThread::sample_young_list_rs_lengths() {
G1CollectorPolicy* g1p = g1h->g1_policy();
if (g1p->adaptive_young_list_length()) {
int regions_visited = 0;
g1h->young_list()->rs_length_sampling_init();
while (g1h->young_list()->rs_length_sampling_more()) {
g1h->young_list()->rs_length_sampling_next();
HeapRegion* hr = g1h->young_list()->first_region();
size_t sampled_rs_lengths = 0;
while (hr != NULL) {
size_t rs_length = hr->rem_set()->occupied();
sampled_rs_lengths += rs_length;
// The current region may not yet have been added to the
// incremental collection set (it gets added when it is
// retired as the current allocation region).
if (hr->in_collection_set()) {
// Update the collection set policy information for this region
g1p->update_incremental_cset_info(hr, rs_length);
}
++regions_visited;
// we try to yield every time we visit 10 regions
if (regions_visited == 10) {
if (sts.should_yield()) {
sts.yield();
// we just abandon the iteration
break;
// A gc may have occurred and our sampling data is stale and further
// traversal of the young list is unsafe
return;
}
regions_visited = 0;
}
hr = hr->get_next_young_region();
}
g1p->revise_young_list_target_length_if_necessary();
g1p->revise_young_list_target_length_if_necessary(sampled_rs_lengths);
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -71,10 +71,6 @@
"draining concurrent marking work queues.") \
range(1, max_intx) \
\
experimental(bool, G1UseConcMarkReferenceProcessing, true, \
"If true, enable reference discovery during concurrent " \
"marking and reference processing at the end of remark.") \
\
experimental(double, G1LastPLABAverageOccupancy, 50.0, \
"The expected average occupancy of the last PLAB in " \
"percent.") \
@ -107,35 +103,35 @@
"Size of an update buffer") \
range(1, NOT_LP64(32*M) LP64_ONLY(1*G)) \
\
product(intx, G1ConcRefinementYellowZone, 0, \
product(size_t, G1ConcRefinementYellowZone, 0, \
"Number of enqueued update buffers that will " \
"trigger concurrent processing. Will be selected ergonomically " \
"by default.") \
range(0, max_intx) \
range(0, SIZE_MAX) \
\
product(intx, G1ConcRefinementRedZone, 0, \
product(size_t, G1ConcRefinementRedZone, 0, \
"Maximum number of enqueued update buffers before mutator " \
"threads start processing new ones instead of enqueueing them. " \
"Will be selected ergonomically by default. Zero will disable " \
"concurrent processing.") \
range(0, max_intx) \
range(0, SIZE_MAX) \
\
product(intx, G1ConcRefinementGreenZone, 0, \
product(size_t, G1ConcRefinementGreenZone, 0, \
"The number of update buffers that are left in the queue by the " \
"concurrent processing threads. Will be selected ergonomically " \
"by default.") \
range(0, max_intx) \
range(0, SIZE_MAX) \
\
product(intx, G1ConcRefinementServiceIntervalMillis, 300, \
product(uintx, G1ConcRefinementServiceIntervalMillis, 300, \
"The last concurrent refinement thread wakes up every " \
"specified number of milliseconds to do miscellaneous work.") \
range(0, max_jint) \
range(0, max_uintx) \
\
product(intx, G1ConcRefinementThresholdStep, 0, \
product(size_t, G1ConcRefinementThresholdStep, 0, \
"Each time the rset update queue increases by this amount " \
"activate the next refinement thread if available. " \
"Will be selected ergonomically by default.") \
range(0, max_jint) \
range(0, SIZE_MAX) \
\
product(intx, G1RSetUpdatingPauseTimePercent, 10, \
"A target percentage of time that is allowed to be spend on " \

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,6 +30,8 @@
#include "runtime/mutexLocker.hpp"
#include "runtime/thread.inline.hpp"
#include <new>
PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) :
_qset(qset), _buf(NULL), _index(0), _sz(0), _active(active),
_permanent(permanent), _lock(NULL)
@ -87,6 +89,19 @@ void PtrQueue::locking_enqueue_completed_buffer(void** buf) {
}
BufferNode* BufferNode::allocate(size_t byte_size) {
assert(byte_size > 0, "precondition");
assert(is_size_aligned(byte_size, sizeof(void**)),
"Invalid buffer size " SIZE_FORMAT, byte_size);
void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC);
return new (data) BufferNode;
}
void BufferNode::deallocate(BufferNode* node) {
node->~BufferNode();
FREE_C_HEAP_ARRAY(char, node);
}
PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
_max_completed_queue(0),
_cbl_mon(NULL), _fl_lock(NULL),
@ -123,18 +138,24 @@ void PtrQueueSet::initialize(Monitor* cbl_mon,
void** PtrQueueSet::allocate_buffer() {
assert(_sz > 0, "Didn't set a buffer size.");
BufferNode* node = NULL;
{
MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
if (_fl_owner->_buf_free_list != NULL) {
void** res = BufferNode::make_buffer_from_node(_fl_owner->_buf_free_list);
_fl_owner->_buf_free_list = _fl_owner->_buf_free_list->next();
node = _fl_owner->_buf_free_list;
if (node != NULL) {
_fl_owner->_buf_free_list = node->next();
_fl_owner->_buf_free_list_sz--;
return res;
} else {
// Allocate space for the BufferNode in front of the buffer.
char *b = NEW_C_HEAP_ARRAY(char, _sz + BufferNode::aligned_size(), mtGC);
return BufferNode::make_buffer_from_block(b);
}
}
if (node == NULL) {
node = BufferNode::allocate(_sz);
} else {
// Reinitialize buffer obtained from free list.
node->set_index(0);
node->set_next(NULL);
}
return BufferNode::make_buffer_from_node(node);
}
void PtrQueueSet::deallocate_buffer(void** buf) {
assert(_sz > 0, "Didn't set a buffer size.");
@ -150,13 +171,13 @@ void PtrQueueSet::reduce_free_list() {
// For now we'll adopt the strategy of deleting half.
MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
size_t n = _buf_free_list_sz / 2;
while (n > 0) {
assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong.");
void* b = BufferNode::make_block_from_node(_buf_free_list);
_buf_free_list = _buf_free_list->next();
FREE_C_HEAP_ARRAY(char, b);
for (size_t i = 0; i < n; ++i) {
assert(_buf_free_list != NULL,
"_buf_free_list_sz is wrong: " SIZE_FORMAT, _buf_free_list_sz);
BufferNode* node = _buf_free_list;
_buf_free_list = node->next();
_buf_free_list_sz--;
n--;
BufferNode::deallocate(node);
}
}
@ -236,8 +257,9 @@ bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) {
void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) {
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
BufferNode* cbn = BufferNode::new_from_buffer(buf);
BufferNode* cbn = BufferNode::make_node_from_buffer(buf);
cbn->set_index(index);
cbn->set_next(NULL);
if (_completed_buffers_tail == NULL) {
assert(_completed_buffers_head == NULL, "Well-formedness");
_completed_buffers_head = cbn;
@ -249,16 +271,17 @@ void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) {
_n_completed_buffers++;
if (!_process_completed && _process_completed_threshold >= 0 &&
_n_completed_buffers >= _process_completed_threshold) {
_n_completed_buffers >= (size_t)_process_completed_threshold) {
_process_completed = true;
if (_notify_when_complete)
if (_notify_when_complete) {
_cbl_mon->notify();
}
}
DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
}
int PtrQueueSet::completed_buffers_list_length() {
int n = 0;
size_t PtrQueueSet::completed_buffers_list_length() {
size_t n = 0;
BufferNode* cbn = _completed_buffers_head;
while (cbn != NULL) {
n++;
@ -312,7 +335,8 @@ void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) {
void PtrQueueSet::notify_if_necessary() {
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
if (_n_completed_buffers >= _process_completed_threshold || _max_completed_queue == 0) {
assert(_process_completed_threshold >= 0, "_process_completed is negative");
if (_n_completed_buffers >= (size_t)_process_completed_threshold || _max_completed_queue == 0) {
_process_completed = true;
if (_notify_when_complete)
_cbl_mon->notify();

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,9 +33,6 @@
// the addresses of modified old-generation objects. This type supports
// this operation.
// The definition of placement operator new(size_t, void*) in the <new>.
#include <new>
class PtrQueueSet;
class PtrQueue VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
@ -168,42 +165,38 @@ protected:
class BufferNode {
size_t _index;
BufferNode* _next;
public:
void* _buffer[1]; // Pseudo flexible array member.
BufferNode() : _index(0), _next(NULL) { }
~BufferNode() { }
static size_t buffer_offset() {
return offset_of(BufferNode, _buffer);
}
public:
BufferNode* next() const { return _next; }
void set_next(BufferNode* n) { _next = n; }
size_t index() const { return _index; }
void set_index(size_t i) { _index = i; }
// Align the size of the structure to the size of the pointer
static size_t aligned_size() {
static const size_t alignment = round_to(sizeof(BufferNode), sizeof(void*));
return alignment;
// Allocate a new BufferNode with the "buffer" having size bytes.
static BufferNode* allocate(size_t byte_size);
// Free a BufferNode.
static void deallocate(BufferNode* node);
// Return the BufferNode containing the buffer.
static BufferNode* make_node_from_buffer(void** buffer) {
return reinterpret_cast<BufferNode*>(
reinterpret_cast<char*>(buffer) - buffer_offset());
}
// BufferNode is allocated before the buffer.
// The chunk of memory that holds both of them is a block.
// Produce a new BufferNode given a buffer.
static BufferNode* new_from_buffer(void** buf) {
return new (make_block_from_buffer(buf)) BufferNode;
}
// The following are the required conversion routines:
static BufferNode* make_node_from_buffer(void** buf) {
return (BufferNode*)make_block_from_buffer(buf);
}
// Return the buffer for node.
static void** make_buffer_from_node(BufferNode *node) {
return make_buffer_from_block(node);
}
static void* make_block_from_node(BufferNode *node) {
return (void*)node;
}
static void** make_buffer_from_block(void* p) {
return (void**)((char*)p + aligned_size());
}
static void* make_block_from_buffer(void** p) {
return (void*)((char*)p - aligned_size());
// &_buffer[0] might lead to index out of bounds warnings.
return reinterpret_cast<void**>(
reinterpret_cast<char*>(node) + buffer_offset());
}
};
@ -216,7 +209,7 @@ protected:
Monitor* _cbl_mon; // Protects the fields below.
BufferNode* _completed_buffers_head;
BufferNode* _completed_buffers_tail;
int _n_completed_buffers;
size_t _n_completed_buffers;
int _process_completed_threshold;
volatile bool _process_completed;
@ -240,9 +233,9 @@ protected:
// Maximum number of elements allowed on completed queue: after that,
// enqueuer does the work itself. Zero indicates no maximum.
int _max_completed_queue;
int _completed_queue_padding;
size_t _completed_queue_padding;
int completed_buffers_list_length();
size_t completed_buffers_list_length();
void assert_completed_buffer_list_len_correct_locked();
void assert_completed_buffer_list_len_correct();
@ -306,15 +299,15 @@ public:
// list size may be reduced, if that is deemed desirable.
void reduce_free_list();
int completed_buffers_num() { return _n_completed_buffers; }
size_t completed_buffers_num() { return _n_completed_buffers; }
void merge_bufferlists(PtrQueueSet* src);
void set_max_completed_queue(int m) { _max_completed_queue = m; }
int max_completed_queue() { return _max_completed_queue; }
void set_completed_queue_padding(int padding) { _completed_queue_padding = padding; }
int completed_queue_padding() { return _completed_queue_padding; }
void set_completed_queue_padding(size_t padding) { _completed_queue_padding = padding; }
size_t completed_queue_padding() { return _completed_queue_padding; }
// Notify the consumer if the number of buffers crossed the threshold
void notify_if_necessary();

View file

@ -24,18 +24,53 @@
#include "precompiled.hpp"
#include "gc/g1/workerDataArray.inline.hpp"
#include "utilities/ostream.hpp"
template <>
void WorkerDataArray<double>::WDAPrinter::summary(outputStream* out, const char* title, double min, double avg, double max, double diff, double sum, bool print_sum) {
out->print("%-25s Min: %4.1lf, Avg: %4.1lf, Max: %4.1lf, Diff: %4.1lf", title, min * MILLIUNITS, avg * MILLIUNITS, max * MILLIUNITS, diff* MILLIUNITS);
if (print_sum) {
out->print_cr(", Sum: %4.1lf", sum * MILLIUNITS);
} else {
out->cr();
}
}
template <>
void WorkerDataArray<size_t>::WDAPrinter::summary(outputStream* out, const char* title, size_t min, double avg, size_t max, size_t diff, size_t sum, bool print_sum) {
out->print("%-25s Min: " SIZE_FORMAT ", Avg: %4.1lf, Max: " SIZE_FORMAT ", Diff: " SIZE_FORMAT, title, min, avg, max, diff);
if (print_sum) {
out->print_cr(", Sum: " SIZE_FORMAT, sum);
} else {
out->cr();
}
}
template <>
void WorkerDataArray<double>::WDAPrinter::details(const WorkerDataArray<double>* phase, outputStream* out, uint active_threads) {
out->print("%-25s", "");
for (uint i = 0; i < active_threads; ++i) {
out->print(" %4.1lf", phase->get(i) * 1000.0);
}
out->cr();
}
template <>
void WorkerDataArray<size_t>::WDAPrinter::details(const WorkerDataArray<size_t>* phase, outputStream* out, uint active_threads) {
out->print("%-25s", "");
for (uint i = 0; i < active_threads; ++i) {
out->print(" " SIZE_FORMAT, phase->get(i));
}
out->cr();
}
#ifndef PRODUCT
void WorkerDataArray_test() {
const uint length = 3;
const char* title = "Test array";
const bool print_sum = false;
const uint indent_level = 2;
WorkerDataArray<size_t> array(length, title, print_sum, indent_level);
WorkerDataArray<size_t> array(length, title);
assert(strncmp(array.title(), title, strlen(title)) == 0 , "Expected titles to match");
assert(array.should_print_sum() == print_sum, "Expected should_print_sum to match print_sum");
assert(array.indentation() == indent_level, "Expected indentation to match");
const size_t expected[length] = {5, 3, 7};
for (uint i = 0; i < length; i++) {
@ -46,10 +81,7 @@ void WorkerDataArray_test() {
}
assert(array.sum(length) == (5 + 3 + 7), "Expected sums to match");
assert(array.minimum(length) == 3, "Expected mininum to match");
assert(array.maximum(length) == 7, "Expected maximum to match");
assert(array.diff(length) == (7 - 3), "Expected diffs to match");
assert(array.average(length) == 5, "Expected averages to match");
assert(array.average(length) == 5.0, "Expected averages to match");
for (uint i = 0; i < length; i++) {
array.add(i, 1);

View file

@ -22,18 +22,19 @@
*
*/
#ifndef SHARE_VM_GC_G1_WORKERDATAARRAY_HPP
#define SHARE_VM_GC_G1_WORKERDATAARRAY_HPP
#include "memory/allocation.hpp"
#include "utilities/debug.hpp"
class outputStream;
template <class T>
class WorkerDataArray : public CHeapObj<mtGC> {
friend class G1GCParPhasePrinter;
T* _data;
uint _length;
const char* _title;
bool _print_sum;
uint _indent_level;
bool _enabled;
WorkerDataArray<size_t>* _thread_work_items;
@ -42,11 +43,7 @@ class WorkerDataArray : public CHeapObj<mtGC> {
void set_all(T value);
public:
WorkerDataArray(uint length,
const char* title,
bool print_sum,
uint indent_level);
WorkerDataArray(uint length, const char* title);
~WorkerDataArray();
void link_thread_work_items(WorkerDataArray<size_t>* thread_work_items);
@ -62,27 +59,30 @@ class WorkerDataArray : public CHeapObj<mtGC> {
double average(uint active_threads) const;
T sum(uint active_threads) const;
T minimum(uint active_threads) const;
T maximum(uint active_threads) const;
T diff(uint active_threads) const;
uint indentation() const {
return _indent_level;
}
const char* title() const {
return _title;
}
bool should_print_sum() const {
return _print_sum;
}
void clear();
void set_enabled(bool enabled) {
_enabled = enabled;
}
void reset() PRODUCT_RETURN;
void verify(uint active_threads) const PRODUCT_RETURN;
private:
class WDAPrinter {
public:
static void summary(outputStream* out, const char* title, double min, double avg, double max, double diff, double sum, bool print_sum);
static void summary(outputStream* out, const char* title, size_t min, double avg, size_t max, size_t diff, size_t sum, bool print_sum);
static void details(const WorkerDataArray<double>* phase, outputStream* out, uint active_threads);
static void details(const WorkerDataArray<size_t>* phase, outputStream* out, uint active_threads);
};
public:
void print_summary_on(outputStream* out, uint active_threads, bool print_sum = true) const;
void print_details_on(outputStream* out, uint active_threads) const;
};
#endif // SHARE_VM_GC_G1_WORKERDATAARRAY_HPP

View file

@ -22,20 +22,18 @@
*
*/
#ifndef SHARE_VM_GC_G1_WORKERDATAARRAY_INLINE_HPP
#define SHARE_VM_GC_G1_WORKERDATAARRAY_INLINE_HPP
#include "gc/g1/workerDataArray.hpp"
#include "memory/allocation.inline.hpp"
#include "utilities/ostream.hpp"
template <typename T>
WorkerDataArray<T>::WorkerDataArray(uint length,
const char* title,
bool print_sum,
uint indent_level) :
WorkerDataArray<T>::WorkerDataArray(uint length, const char* title) :
_title(title),
_length(0),
_print_sum(print_sum),
_indent_level(indent_level),
_thread_work_items(NULL),
_enabled(true) {
_thread_work_items(NULL) {
assert(length > 0, "Must have some workers to store data for");
_length = length;
_data = NEW_C_HEAP_ARRAY(T, _length, mtGC);
@ -93,29 +91,6 @@ T WorkerDataArray<T>::sum(uint active_threads) const {
return s;
}
template <typename T>
T WorkerDataArray<T>::minimum(uint active_threads) const {
T min = get(0);
for (uint i = 1; i < active_threads; ++i) {
min = MIN2(min, get(i));
}
return min;
}
template <typename T>
T WorkerDataArray<T>::maximum(uint active_threads) const {
T max = get(0);
for (uint i = 1; i < active_threads; ++i) {
max = MAX2(max, get(i));
}
return max;
}
template <typename T>
T WorkerDataArray<T>::diff(uint active_threads) const {
return maximum(active_threads) - minimum(active_threads);
}
template <typename T>
void WorkerDataArray<T>::clear() {
set_all(0);
@ -128,6 +103,27 @@ void WorkerDataArray<T>::set_all(T value) {
}
}
template <class T>
void WorkerDataArray<T>::print_summary_on(outputStream* out, uint active_threads, bool print_sum) const {
T max = get(0);
T min = max;
T sum = 0;
for (uint i = 1; i < active_threads; ++i) {
T value = get(i);
max = MAX2(max, value);
min = MIN2(min, value);
sum += value;
}
T diff = max - min;
double avg = sum / (double) active_threads;
WDAPrinter::summary(out, title(), min, avg, max, diff, sum, print_sum);
}
template <class T>
void WorkerDataArray<T>::print_details_on(outputStream* out, uint active_threads) const {
WDAPrinter::details(this, out, active_threads);
}
#ifndef PRODUCT
template <typename T>
void WorkerDataArray<T>::reset() {
@ -139,10 +135,6 @@ void WorkerDataArray<T>::reset() {
template <typename T>
void WorkerDataArray<T>::verify(uint active_threads) const {
if (!_enabled) {
return;
}
assert(active_threads <= _length, "Wrong number of active threads");
for (uint i = 0; i < active_threads; i++) {
assert(_data[i] != uninitialized(),
@ -163,3 +155,5 @@ inline double WorkerDataArray<double>::uninitialized() const {
return -1.0;
}
#endif
#endif // SHARE_VM_GC_G1_WORKERDATAARRAY_INLINE_HPP

View file

@ -33,9 +33,9 @@
#include "utilities/ostream.hpp"
YoungList::YoungList(G1CollectedHeap* g1h) :
_g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
_g1h(g1h), _head(NULL), _length(0),
_survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
guarantee(check_list_empty(false), "just making sure...");
guarantee(check_list_empty(), "just making sure...");
}
void YoungList::push_region(HeapRegion *hr) {
@ -86,9 +86,7 @@ void YoungList::empty_list() {
_survivor_tail = NULL;
_survivor_length = 0;
_last_sampled_rs_lengths = 0;
assert(check_list_empty(false), "just making sure...");
assert(check_list_empty(), "just making sure...");
}
bool YoungList::check_list_well_formed() {
@ -119,17 +117,13 @@ bool YoungList::check_list_well_formed() {
return ret;
}
bool YoungList::check_list_empty(bool check_sample) {
bool YoungList::check_list_empty() {
bool ret = true;
if (_length != 0) {
log_error(gc, verify)("### YOUNG LIST should have 0 length, not %u", _length);
ret = false;
}
if (check_sample && _last_sampled_rs_lengths != 0) {
log_error(gc, verify)("### YOUNG LIST has non-zero last sampled RS lengths");
ret = false;
}
if (_head != NULL) {
log_error(gc, verify)("### YOUNG LIST does not have a NULL head");
ret = false;
@ -141,38 +135,6 @@ bool YoungList::check_list_empty(bool check_sample) {
return ret;
}
void
YoungList::rs_length_sampling_init() {
_sampled_rs_lengths = 0;
_curr = _head;
}
bool
YoungList::rs_length_sampling_more() {
return _curr != NULL;
}
void
YoungList::rs_length_sampling_next() {
assert( _curr != NULL, "invariant" );
size_t rs_length = _curr->rem_set()->occupied();
_sampled_rs_lengths += rs_length;
// The current region may not yet have been added to the
// incremental collection set (it gets added when it is
// retired as the current allocation region).
if (_curr->in_collection_set()) {
// Update the collection set policy information for this region
_g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
}
_curr = _curr->get_next_young_region();
if (_curr == NULL) {
_last_sampled_rs_lengths = _sampled_rs_lengths;
}
}
void
YoungList::reset_auxilary_lists() {
guarantee( is_empty(), "young list should be empty" );

View file

@ -37,14 +37,9 @@ private:
HeapRegion* _survivor_head;
HeapRegion* _survivor_tail;
HeapRegion* _curr;
uint _length;
uint _survivor_length;
size_t _last_sampled_rs_lengths;
size_t _sampled_rs_lengths;
void empty_list(HeapRegion* list);
public:
@ -72,15 +67,6 @@ public:
return (size_t) survivor_length() * HeapRegion::GrainBytes;
}
void rs_length_sampling_init();
bool rs_length_sampling_more();
void rs_length_sampling_next();
void reset_sampled_info() {
_last_sampled_rs_lengths = 0;
}
size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; }
// for development purposes
void reset_auxilary_lists();
void clear() { _head = NULL; _length = 0; }
@ -97,7 +83,7 @@ public:
// debugging
bool check_list_well_formed();
bool check_list_empty(bool check_sample = true);
bool check_list_empty();
void print();
};

View file

@ -405,7 +405,9 @@ size_t CollectedHeap::max_tlab_size() const {
oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
// If a previous card-mark was deferred, flush it now.
flush_deferred_store_barrier(thread);
if (can_elide_initializing_store_barrier(new_obj)) {
if (can_elide_initializing_store_barrier(new_obj) ||
new_obj->is_typeArray()) {
// Arrays of non-references don't need a pre-barrier.
// The deferred_card_mark region should be empty
// following the flush above.
assert(thread->deferred_card_mark().is_empty(), "Error");

View file

@ -58,6 +58,7 @@
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, metaspace)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases, start)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, phases, task)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, plab)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, region)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, remset)) \

View file

@ -43,6 +43,7 @@
LOG_TAG(classload) /* Trace all classes loaded */ \
LOG_TAG(classloaderdata) /* class loader loader_data lifetime */ \
LOG_TAG(classunload) /* Trace unloading of classes */ \
LOG_TAG(classpath) \
LOG_TAG(compaction) \
LOG_TAG(cpu) \
LOG_TAG(cset) \
@ -81,6 +82,7 @@
LOG_TAG(survivor) \
LOG_TAG(sweep) \
LOG_TAG(task) \
LOG_TAG(thread) \
LOG_TAG(tlab) \
LOG_TAG(time) \
LOG_TAG(verify) \

View file

@ -208,9 +208,7 @@ void FileMapInfo::allocate_classpath_entry_table() {
count ++;
bytes += (int)entry_size;
bytes += name_bytes;
if (TraceClassPaths) {
tty->print_cr("[Add main shared path (%s) %s]", (cpe->is_jar_file() ? "jar" : "dir"), name);
}
log_info(classpath)("add main shared path (%s) %s", (cpe->is_jar_file() ? "jar" : "dir"), name);
} else {
SharedClassPathEntry* ent = shared_classpath(cur_entry);
if (cpe->is_jar_file()) {
@ -275,9 +273,7 @@ bool FileMapInfo::validate_classpath_entry_table() {
struct stat st;
const char* name = ent->_name;
bool ok = true;
if (TraceClassPaths) {
tty->print_cr("[Checking shared classpath entry: %s]", name);
}
log_info(classpath)("checking shared classpath entry: %s", name);
if (os::stat(name, &st) != 0) {
fail_continue("Required classpath entry does not exist: %s", name);
ok = false;
@ -301,9 +297,7 @@ bool FileMapInfo::validate_classpath_entry_table() {
}
}
if (ok) {
if (TraceClassPaths) {
tty->print_cr("[ok]");
}
log_info(classpath)("ok");
} else if (!PrintSharedArchiveAndExit) {
_validating_classpath_entry_table = false;
return false;
@ -888,10 +882,8 @@ bool FileMapInfo::FileMapHeader::validate() {
char header_version[JVM_IDENT_MAX];
get_header_version(header_version);
if (strncmp(_jvm_ident, header_version, JVM_IDENT_MAX-1) != 0) {
if (TraceClassPaths) {
tty->print_cr("Expected: %s", header_version);
tty->print_cr("Actual: %s", _jvm_ident);
}
log_info(classpath)("expected: %s", header_version);
log_info(classpath)("actual: %s", _jvm_ident);
FileMapInfo::fail_continue("The shared archive file was created by a different"
" version or build of HotSpot");
return false;
@ -919,7 +911,7 @@ bool FileMapInfo::validate_header() {
if (status) {
if (!ClassLoader::check_shared_paths_misc_info(_paths_misc_info, _header->_paths_misc_info_size)) {
if (!PrintSharedArchiveAndExit) {
fail_continue("shared class paths mismatch (hint: enable -XX:+TraceClassPaths to diagnose the failure)");
fail_continue("shared class paths mismatch (hint: enable -Xlog:classpath=info to diagnose the failure)");
status = false;
}
}

View file

@ -129,7 +129,7 @@ class typeArrayOopDesc : public arrayOopDesc {
Metadata* metadata_at(int which) const {
return (Metadata*)*long_at_addr(which); }
void metadata_at_put(int which, Metadata* contents) {
*long_at_addr(which) = (long)contents;
*long_at_addr(which) = (jlong)contents;
}
#else
Metadata* metadata_at(int which) const {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -3940,6 +3940,10 @@ void VM_RedefineClasses::redefine_single_class(jclass the_jclass,
scratch_class->set_methods(_old_methods); // To prevent potential GCing of the old methods,
// and to be able to undo operation easily.
Array<int>* old_ordering = the_class->method_ordering();
the_class->set_method_ordering(scratch_class->method_ordering());
scratch_class->set_method_ordering(old_ordering);
ConstantPool* old_constants = the_class->constants();
the_class->set_constants(scratch_class->constants());
scratch_class->set_constants(old_constants); // See the previous comment.

View file

@ -405,8 +405,9 @@ static AliasedFlag const aliased_jvm_flags[] = {
static AliasedLoggingFlag const aliased_logging_flags[] = {
{ "TraceClassLoading", LogLevel::Info, true, LogTag::_classload },
{ "TraceClassUnloading", LogLevel::Info, true, LogTag::_classunload },
{ "TraceClassPaths", LogLevel::Info, true, LogTag::_classpath },
{ "TraceClassResolution", LogLevel::Info, true, LogTag::_classresolve },
{ "TraceClassUnloading", LogLevel::Info, true, LogTag::_classunload },
{ "TraceExceptions", LogLevel::Info, true, LogTag::_exceptions },
{ "TraceMonitorInflation", LogLevel::Debug, true, LogTag::_monitorinflation },
{ "TraceBiasedLocking", LogLevel::Info, true, LogTag::_biasedlocking },
@ -3255,7 +3256,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
// PrintSharedArchiveAndExit will turn on
// -Xshare:on
// -XX:+TraceClassPaths
// -Xlog:classpath=info
if (PrintSharedArchiveAndExit) {
if (FLAG_SET_CMDLINE(bool, UseSharedSpaces, true) != Flag::SUCCESS) {
return JNI_EINVAL;
@ -3263,9 +3264,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
if (FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true) != Flag::SUCCESS) {
return JNI_EINVAL;
}
if (FLAG_SET_CMDLINE(bool, TraceClassPaths, true) != Flag::SUCCESS) {
return JNI_EINVAL;
}
LogConfiguration::configure_stdout(LogLevel::Info, true, LOG_TAGS(classpath));
}
// Change the default value for flags which have different default values
@ -3318,10 +3317,6 @@ void Arguments::fix_appclasspath() {
_java_class_path->set_value(copy);
FreeHeap(copy); // a copy was made by set_value, so don't need this anymore
}
if (!PrintSharedArchiveAndExit) {
ClassLoader::trace_class_path(tty, "[classpath: ", _java_class_path->value());
}
}
static bool has_jar_files(const char* directory) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -836,8 +836,7 @@ void FlatProfiler::record_vm_tick() {
vm_thread_profiler->inc_thread_ticks();
// Get a snapshot of a current VMThread pc (and leave it running!)
// The call may fail if, for instance the VM thread is interrupted while
// holding the Interrupt_lock or for other reasons.
// The call may fail in some circumstances
epc = os::get_thread_pc(VMThread::vm_thread());
if(epc.pc() != NULL) {
if (os::dll_address_to_function_name(epc.pc(), buf, sizeof(buf), NULL)) {

View file

@ -2403,9 +2403,6 @@ public:
product(bool, IgnoreEmptyClassPaths, false, \
"Ignore empty path elements in -classpath") \
\
product(bool, TraceClassPaths, false, \
"Trace processing of class paths") \
\
product(bool, TraceClassLoadingPreorder, false, \
"Trace all classes loaded in order referenced (not loaded)") \
\

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1320,15 +1320,12 @@ void Monitor::set_owner_implementation(Thread *new_owner) {
// The rank Mutex::native is an exception in that it is not subject
// to the verification rules.
// Here are some further notes relating to mutex acquisition anomalies:
// . under Solaris, the interrupt lock gets acquired when doing
// profiling, so any lock could be held.
// . it is also ok to acquire Safepoint_lock at the very end while we
// already hold Terminator_lock - may happen because of periodic safepoints
if (this->rank() != Mutex::native &&
this->rank() != Mutex::suspend_resume &&
locks != NULL && locks->rank() <= this->rank() &&
!SafepointSynchronize::is_at_safepoint() &&
this != Interrupt_lock && this != ProfileVM_lock &&
!(this == Safepoint_lock && contains(locks, Terminator_lock) &&
SafepointSynchronize::is_synchronizing())) {
new_owner->print_owned_locks();

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -50,7 +50,6 @@ Mutex* JmethodIdCreation_lock = NULL;
Mutex* JfieldIdCreation_lock = NULL;
Monitor* JNICritical_lock = NULL;
Mutex* JvmtiThreadState_lock = NULL;
Monitor* JvmtiPendingEvent_lock = NULL;
Monitor* Heap_lock = NULL;
Mutex* ExpandHeap_lock = NULL;
Mutex* AdapterHandlerLibrary_lock = NULL;
@ -73,8 +72,6 @@ Monitor* CGC_lock = NULL;
Monitor* STS_lock = NULL;
Monitor* SLT_lock = NULL;
Monitor* FullGCCount_lock = NULL;
Monitor* CMark_lock = NULL;
Mutex* CMRegionStack_lock = NULL;
Mutex* SATB_Q_FL_lock = NULL;
Monitor* SATB_Q_CBL_mon = NULL;
Mutex* Shared_SATB_Q_lock = NULL;
@ -94,11 +91,8 @@ Mutex* MultiArray_lock = NULL;
Monitor* Terminator_lock = NULL;
Monitor* BeforeExit_lock = NULL;
Monitor* Notify_lock = NULL;
Monitor* Interrupt_lock = NULL;
Monitor* ProfileVM_lock = NULL;
Mutex* ProfilePrint_lock = NULL;
Mutex* ExceptionCache_lock = NULL;
Monitor* ObjAllocPost_lock = NULL;
Mutex* OsrList_lock = NULL;
#ifndef PRODUCT
@ -184,8 +178,6 @@ void mutex_init() {
}
if (UseG1GC) {
def(CMark_lock , Monitor, nonleaf, true, Monitor::_safepoint_check_never); // coordinate concurrent mark thread
def(CMRegionStack_lock , Mutex, leaf, true, Monitor::_safepoint_check_never);
def(SATB_Q_FL_lock , Mutex , special, true, Monitor::_safepoint_check_never);
def(SATB_Q_CBL_mon , Monitor, nonleaf, true, Monitor::_safepoint_check_never);
def(Shared_SATB_Q_lock , Mutex, nonleaf, true, Monitor::_safepoint_check_never);
@ -206,12 +198,10 @@ void mutex_init() {
def(ParGCRareEvent_lock , Mutex , leaf , true, Monitor::_safepoint_check_sometimes);
def(DerivedPointerTableGC_lock , Mutex, leaf, true, Monitor::_safepoint_check_never);
def(CodeCache_lock , Mutex , special, true, Monitor::_safepoint_check_never);
def(Interrupt_lock , Monitor, special, true, Monitor::_safepoint_check_never); // used for interrupt processing
def(RawMonitor_lock , Mutex, special, true, Monitor::_safepoint_check_never);
def(OopMapCacheAlloc_lock , Mutex, leaf, true, Monitor::_safepoint_check_always); // used for oop_map_cache allocation.
def(Patching_lock , Mutex , special, true, Monitor::_safepoint_check_never); // used for safepointing and code patching.
def(ObjAllocPost_lock , Monitor, special, false, Monitor::_safepoint_check_never);
def(Service_lock , Monitor, special, true, Monitor::_safepoint_check_never); // used for service thread operations
def(JmethodIdCreation_lock , Mutex , leaf, true, Monitor::_safepoint_check_always); // used for creating jmethodIDs.
@ -267,7 +257,6 @@ void mutex_init() {
def(MultiArray_lock , Mutex , nonleaf+2, false, Monitor::_safepoint_check_always); // locks SymbolTable_lock
def(JvmtiThreadState_lock , Mutex , nonleaf+2, false, Monitor::_safepoint_check_always); // Used by JvmtiThreadState/JvmtiEventController
def(JvmtiPendingEvent_lock , Monitor, nonleaf, false, Monitor::_safepoint_check_never); // Used by JvmtiCodeBlobEvents
def(Management_lock , Mutex , nonleaf+2, false, Monitor::_safepoint_check_always); // used for JVM management
def(Compile_lock , Mutex , nonleaf+3, true, Monitor::_safepoint_check_sometimes);
@ -277,7 +266,6 @@ void mutex_init() {
def(MethodCompileQueue_lock , Monitor, nonleaf+4, true, Monitor::_safepoint_check_always);
def(Debug2_lock , Mutex , nonleaf+4, true, Monitor::_safepoint_check_never);
def(Debug3_lock , Mutex , nonleaf+4, true, Monitor::_safepoint_check_never);
def(ProfileVM_lock , Monitor, special, false, Monitor::_safepoint_check_never); // used for profiling of the VMThread
def(CompileThread_lock , Monitor, nonleaf+5, false, Monitor::_safepoint_check_always);
def(PeriodicTask_lock , Monitor, nonleaf+5, true, Monitor::_safepoint_check_sometimes);
if (WhiteBoxAPI) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,7 +43,6 @@ extern Mutex* JmethodIdCreation_lock; // a lock on creating JNI metho
extern Mutex* JfieldIdCreation_lock; // a lock on creating JNI static field identifiers
extern Monitor* JNICritical_lock; // a lock used while entering and exiting JNI critical regions, allows GC to sometimes get in
extern Mutex* JvmtiThreadState_lock; // a lock on modification of JVMTI thread data
extern Monitor* JvmtiPendingEvent_lock; // a lock on the JVMTI pending events list
extern Monitor* Heap_lock; // a lock on the heap
extern Mutex* ExpandHeap_lock; // a lock on expanding the heap
extern Mutex* AdapterHandlerLibrary_lock; // a lock on the AdapterHandlerLibrary
@ -68,8 +67,6 @@ extern Monitor* CGC_lock; // used for coordination betwee
extern Monitor* STS_lock; // used for joining/leaving SuspendibleThreadSet.
extern Monitor* SLT_lock; // used in CMS GC for acquiring PLL
extern Monitor* FullGCCount_lock; // in support of "concurrent" full gc
extern Monitor* CMark_lock; // used for concurrent mark thread coordination
extern Mutex* CMRegionStack_lock; // used for protecting accesses to the CM region stack
extern Mutex* SATB_Q_FL_lock; // Protects SATB Q
// buffer free list.
extern Monitor* SATB_Q_CBL_mon; // Protects SATB Q
@ -98,8 +95,6 @@ extern Mutex* MultiArray_lock; // a lock used to guard allocat
extern Monitor* Terminator_lock; // a lock used to guard termination of the vm
extern Monitor* BeforeExit_lock; // a lock used to guard cleanups and shutdown hooks
extern Monitor* Notify_lock; // a lock used to synchronize the start-up of the vm
extern Monitor* Interrupt_lock; // a lock used for condition variable mediated interrupt processing
extern Monitor* ProfileVM_lock; // a lock used for profiling the VMThread
extern Mutex* ProfilePrint_lock; // a lock used to serialize the printing of profiles
extern Mutex* ExceptionCache_lock; // a lock used to synchronize exception cache updates
extern Mutex* OsrList_lock; // a lock used to serialize access to OSR queues

View file

@ -324,6 +324,10 @@ void Thread::record_stack_base_and_size() {
// record thread's native stack, stack grows downward
MemTracker::record_thread_stack(stack_end(), stack_size());
#endif // INCLUDE_NMT
log_debug(os, thread)("Thread " UINTX_FORMAT " stack dimensions: "
PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT "k).",
os::current_thread_id(), p2i(stack_base() - stack_size()),
p2i(stack_base()), stack_size()/1024);
}
@ -1802,6 +1806,10 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
// Call after last event on thread
EVENT_THREAD_EXIT(this);
log_info(os, thread)("Thread " UINTX_FORMAT " %s.",
os::current_thread_id(),
exit_type == JavaThread::normal_exit ? "exiting" : "detaching");
// Call Thread.exit(). We try 3 times in case we got another Thread.stop during
// the execution of the method. If that is not enough, then we don't really care. Thread.stop
// is deprecated anyhow.
@ -2491,18 +2499,25 @@ void JavaThread::create_stack_guard_pages() {
// warning("Guarding at " PTR_FORMAT " for len " SIZE_FORMAT "\n", low_addr, len);
if (allocate && !os::create_stack_guard_pages((char *) low_addr, len)) {
warning("Attempt to allocate stack guard pages failed.");
log_warning(os, thread)("Attempt to allocate stack guard pages failed.");
return;
}
if (os::guard_memory((char *) low_addr, len)) {
_stack_guard_state = stack_guard_enabled;
} else {
warning("Attempt to protect stack guard pages failed.");
log_warning(os, thread)("Attempt to protect stack guard pages failed ("
PTR_FORMAT "-" PTR_FORMAT ").", p2i(low_addr), p2i(low_addr + len));
if (os::uncommit_memory((char *) low_addr, len)) {
warning("Attempt to deallocate stack guard pages failed.");
log_warning(os, thread)("Attempt to deallocate stack guard pages failed.");
}
return;
}
log_debug(os, thread)("Thread " UINTX_FORMAT " stack guard pages activated: "
PTR_FORMAT "-" PTR_FORMAT ".",
os::current_thread_id(), p2i(low_addr), p2i(low_addr + len));
}
void JavaThread::remove_stack_guard_pages() {
@ -2515,16 +2530,25 @@ void JavaThread::remove_stack_guard_pages() {
if (os::remove_stack_guard_pages((char *) low_addr, len)) {
_stack_guard_state = stack_guard_unused;
} else {
warning("Attempt to deallocate stack guard pages failed.");
log_warning(os, thread)("Attempt to deallocate stack guard pages failed ("
PTR_FORMAT "-" PTR_FORMAT ").", p2i(low_addr), p2i(low_addr + len));
return;
}
} else {
if (_stack_guard_state == stack_guard_unused) return;
if (os::unguard_memory((char *) low_addr, len)) {
_stack_guard_state = stack_guard_unused;
} else {
warning("Attempt to unprotect stack guard pages failed.");
log_warning(os, thread)("Attempt to unprotect stack guard pages failed ("
PTR_FORMAT "-" PTR_FORMAT ").", p2i(low_addr), p2i(low_addr + len));
return;
}
}
log_debug(os, thread)("Thread " UINTX_FORMAT " stack guard pages removed: "
PTR_FORMAT "-" PTR_FORMAT ".",
os::current_thread_id(), p2i(low_addr), p2i(low_addr + len));
}
void JavaThread::enable_stack_reserved_zone() {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,7 +23,7 @@
/*
* @test TestGCLogMessages
* @bug 8035406 8027295 8035398 8019342 8027959 8048179 8027962 8069330
* @bug 8035406 8027295 8035398 8019342 8027959 8048179 8027962 8069330 8076463 8150630
* @summary Ensure the output for a minor GC with G1
* includes the expected necessary messages.
* @key gc
@ -38,10 +38,24 @@ import jdk.test.lib.OutputAnalyzer;
public class TestGCLogMessages {
private enum Level {
OFF, DEBUG, TRACE;
public boolean lessOrEqualTo(Level other) {
OFF(""),
INFO("info"),
DEBUG("debug"),
TRACE("trace");
private String logName;
Level(String logName) {
this.logName = logName;
}
public boolean lessThan(Level other) {
return this.compareTo(other) < 0;
}
public String toString() {
return logName;
}
}
private class LogMessageWithLevel {
@ -56,44 +70,48 @@ public class TestGCLogMessages {
private LogMessageWithLevel allLogMessages[] = new LogMessageWithLevel[] {
// Update RS
new LogMessageWithLevel("Scan HCC", Level.DEBUG),
new LogMessageWithLevel("Scan HCC", Level.TRACE),
// Ext Root Scan
new LogMessageWithLevel("Thread Roots:", Level.DEBUG),
new LogMessageWithLevel("StringTable Roots:", Level.DEBUG),
new LogMessageWithLevel("Universe Roots:", Level.DEBUG),
new LogMessageWithLevel("JNI Handles Roots:", Level.DEBUG),
new LogMessageWithLevel("ObjectSynchronizer Roots:", Level.DEBUG),
new LogMessageWithLevel("FlatProfiler Roots", Level.DEBUG),
new LogMessageWithLevel("Management Roots", Level.DEBUG),
new LogMessageWithLevel("SystemDictionary Roots", Level.DEBUG),
new LogMessageWithLevel("CLDG Roots", Level.DEBUG),
new LogMessageWithLevel("JVMTI Roots", Level.DEBUG),
new LogMessageWithLevel("SATB Filtering", Level.DEBUG),
new LogMessageWithLevel("CM RefProcessor Roots", Level.DEBUG),
new LogMessageWithLevel("Wait For Strong CLD", Level.DEBUG),
new LogMessageWithLevel("Weak CLD Roots", Level.DEBUG),
new LogMessageWithLevel("Thread Roots", Level.TRACE),
new LogMessageWithLevel("StringTable Roots", Level.TRACE),
new LogMessageWithLevel("Universe Roots", Level.TRACE),
new LogMessageWithLevel("JNI Handles Roots", Level.TRACE),
new LogMessageWithLevel("ObjectSynchronizer Roots", Level.TRACE),
new LogMessageWithLevel("FlatProfiler Roots", Level.TRACE),
new LogMessageWithLevel("Management Roots", Level.TRACE),
new LogMessageWithLevel("SystemDictionary Roots", Level.TRACE),
new LogMessageWithLevel("CLDG Roots", Level.TRACE),
new LogMessageWithLevel("JVMTI Roots", Level.TRACE),
new LogMessageWithLevel("SATB Filtering", Level.TRACE),
new LogMessageWithLevel("CM RefProcessor Roots", Level.TRACE),
new LogMessageWithLevel("Wait For Strong CLD", Level.TRACE),
new LogMessageWithLevel("Weak CLD Roots", Level.TRACE),
// Redirty Cards
new LogMessageWithLevel("Redirty Cards", Level.DEBUG),
new LogMessageWithLevel("Parallel Redirty", Level.DEBUG),
new LogMessageWithLevel("Redirtied Cards", Level.DEBUG),
new LogMessageWithLevel("Parallel Redirty", Level.TRACE),
new LogMessageWithLevel("Redirtied Cards", Level.TRACE),
// Misc Top-level
new LogMessageWithLevel("Code Root Purge", Level.DEBUG),
new LogMessageWithLevel("String Dedup Fixup", Level.DEBUG),
new LogMessageWithLevel("Expand Heap After Collection", Level.DEBUG),
new LogMessageWithLevel("Code Roots Purge", Level.DEBUG),
new LogMessageWithLevel("String Dedup Fixup", Level.INFO),
new LogMessageWithLevel("Expand Heap After Collection", Level.INFO),
// Free CSet
new LogMessageWithLevel("Young Free CSet", Level.TRACE),
new LogMessageWithLevel("Non-Young Free CSet", Level.TRACE),
new LogMessageWithLevel("Young Free Collection Set", Level.DEBUG),
new LogMessageWithLevel("Non-Young Free Collection Set", Level.DEBUG),
// Humongous Eager Reclaim
new LogMessageWithLevel("Humongous Reclaim", Level.DEBUG),
new LogMessageWithLevel("Humongous Register", Level.DEBUG),
// Preserve CM Referents
new LogMessageWithLevel("Preserve CM Refs", Level.DEBUG),
// Merge PSS
new LogMessageWithLevel("Merge Per-Thread State", Level.INFO),
};
void checkMessagesAtLevel(OutputAnalyzer output, LogMessageWithLevel messages[], Level level) throws Exception {
for (LogMessageWithLevel l : messages) {
if (level.lessOrEqualTo(l.level)) {
if (level.lessThan(l.level)) {
output.shouldNotContain(l.message);
} else {
output.shouldContain(l.message);
output.shouldMatch("\\[" + l.level + ".*" + l.message);
}
}
}