mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-24 21:34:52 +02:00
8046809: vm/mlvm/meth/stress/compiler/deoptimize CodeCache is full
Use separate sweeper thread; enables more aggressive sweeping. Reviewed-by: kvn, jrose
This commit is contained in:
parent
2597d484c6
commit
6520320d1a
30 changed files with 319 additions and 334 deletions
|
@ -1093,9 +1093,8 @@ void ciEnv::register_method(ciMethod* target,
|
||||||
// JVMTI -- compiled method notification (must be done outside lock)
|
// JVMTI -- compiled method notification (must be done outside lock)
|
||||||
nm->post_compiled_method_load_event();
|
nm->post_compiled_method_load_event();
|
||||||
} else {
|
} else {
|
||||||
// The CodeCache is full. Print out warning and disable compilation.
|
// The CodeCache is full.
|
||||||
record_failure("code cache is full");
|
record_failure("code cache is full");
|
||||||
CompileBroker::handle_full_code_cache(CodeCache::get_code_blob_type(comp_level));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -229,8 +229,8 @@ BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
|
||||||
return blob;
|
return blob;
|
||||||
}
|
}
|
||||||
|
|
||||||
void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() {
|
void* BufferBlob::operator new(size_t s, unsigned size) throw() {
|
||||||
return CodeCache::allocate(size, CodeBlobType::NonNMethod, is_critical);
|
return CodeCache::allocate(size, CodeBlobType::NonNMethod);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BufferBlob::free(BufferBlob *blob) {
|
void BufferBlob::free(BufferBlob *blob) {
|
||||||
|
@ -260,10 +260,7 @@ AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
|
||||||
unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
|
unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
|
||||||
{
|
{
|
||||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
// The parameter 'true' indicates a critical memory allocation.
|
blob = new (size) AdapterBlob(size, cb);
|
||||||
// This means that CodeCacheMinimumFreeSpace is used, if necessary
|
|
||||||
const bool is_critical = true;
|
|
||||||
blob = new (size, is_critical) AdapterBlob(size, cb);
|
|
||||||
}
|
}
|
||||||
// Track memory usage statistic after releasing CodeCache_lock
|
// Track memory usage statistic after releasing CodeCache_lock
|
||||||
MemoryService::track_code_cache_memory_usage();
|
MemoryService::track_code_cache_memory_usage();
|
||||||
|
@ -285,10 +282,7 @@ MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
|
||||||
size += round_to(buffer_size, oopSize);
|
size += round_to(buffer_size, oopSize);
|
||||||
{
|
{
|
||||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
// The parameter 'true' indicates a critical memory allocation.
|
blob = new (size) MethodHandlesAdapterBlob(size);
|
||||||
// This means that CodeCacheMinimumFreeSpace is used, if necessary
|
|
||||||
const bool is_critical = true;
|
|
||||||
blob = new (size, is_critical) MethodHandlesAdapterBlob(size);
|
|
||||||
}
|
}
|
||||||
// Track memory usage statistic after releasing CodeCache_lock
|
// Track memory usage statistic after releasing CodeCache_lock
|
||||||
MemoryService::track_code_cache_memory_usage();
|
MemoryService::track_code_cache_memory_usage();
|
||||||
|
@ -336,14 +330,14 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
|
||||||
|
|
||||||
|
|
||||||
void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
|
void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
|
||||||
void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod, true);
|
void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
|
||||||
if (!p) fatal("Initial size of CodeCache is too small");
|
if (!p) fatal("Initial size of CodeCache is too small");
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
// operator new shared by all singletons:
|
// operator new shared by all singletons:
|
||||||
void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
|
void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
|
||||||
void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod, true);
|
void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
|
||||||
if (!p) fatal("Initial size of CodeCache is too small");
|
if (!p) fatal("Initial size of CodeCache is too small");
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
|
@ -221,7 +221,7 @@ class BufferBlob: public CodeBlob {
|
||||||
BufferBlob(const char* name, int size);
|
BufferBlob(const char* name, int size);
|
||||||
BufferBlob(const char* name, int size, CodeBuffer* cb);
|
BufferBlob(const char* name, int size, CodeBuffer* cb);
|
||||||
|
|
||||||
void* operator new(size_t s, unsigned size, bool is_critical = false) throw();
|
void* operator new(size_t s, unsigned size) throw();
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Creation
|
// Creation
|
||||||
|
|
|
@ -44,6 +44,7 @@
|
||||||
#include "runtime/icache.hpp"
|
#include "runtime/icache.hpp"
|
||||||
#include "runtime/java.hpp"
|
#include "runtime/java.hpp"
|
||||||
#include "runtime/mutexLocker.hpp"
|
#include "runtime/mutexLocker.hpp"
|
||||||
|
#include "runtime/sweeper.hpp"
|
||||||
#include "runtime/compilationPolicy.hpp"
|
#include "runtime/compilationPolicy.hpp"
|
||||||
#include "services/memoryService.hpp"
|
#include "services/memoryService.hpp"
|
||||||
#include "trace/tracing.hpp"
|
#include "trace/tracing.hpp"
|
||||||
|
@ -192,7 +193,7 @@ void CodeCache::initialize_heaps() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure we have enough space for VM internal code
|
// Make sure we have enough space for VM internal code
|
||||||
uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
|
uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
|
||||||
if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
|
if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
|
||||||
vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
|
vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
|
||||||
}
|
}
|
||||||
|
@ -348,14 +349,18 @@ CodeBlob* CodeCache::next_blob(CodeBlob* cb) {
|
||||||
return next_blob(get_code_heap(cb), cb);
|
return next_blob(get_code_heap(cb), cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) {
|
/**
|
||||||
// Do not seize the CodeCache lock here--if the caller has not
|
* Do not seize the CodeCache lock here--if the caller has not
|
||||||
// already done so, we are going to lose bigtime, since the code
|
* already done so, we are going to lose bigtime, since the code
|
||||||
// cache will contain a garbage CodeBlob until the caller can
|
* cache will contain a garbage CodeBlob until the caller can
|
||||||
// run the constructor for the CodeBlob subclass he is busy
|
* run the constructor for the CodeBlob subclass he is busy
|
||||||
// instantiating.
|
* instantiating.
|
||||||
|
*/
|
||||||
|
CodeBlob* CodeCache::allocate(int size, int code_blob_type) {
|
||||||
|
// Possibly wakes up the sweeper thread.
|
||||||
|
NMethodSweeper::notify(code_blob_type);
|
||||||
assert_locked_or_safepoint(CodeCache_lock);
|
assert_locked_or_safepoint(CodeCache_lock);
|
||||||
assert(size > 0, "allocation request must be reasonable");
|
assert(size > 0, err_msg_res("Code cache allocation request must be > 0 but is %d", size));
|
||||||
if (size <= 0) {
|
if (size <= 0) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -366,14 +371,18 @@ CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) {
|
||||||
assert(heap != NULL, "heap is null");
|
assert(heap != NULL, "heap is null");
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
cb = (CodeBlob*)heap->allocate(size, is_critical);
|
cb = (CodeBlob*)heap->allocate(size);
|
||||||
if (cb != NULL) break;
|
if (cb != NULL) break;
|
||||||
if (!heap->expand_by(CodeCacheExpansionSize)) {
|
if (!heap->expand_by(CodeCacheExpansionSize)) {
|
||||||
// Expansion failed
|
// Expansion failed
|
||||||
if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) {
|
if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) {
|
||||||
// Fallback solution: Store non-nmethod code in the non-profiled code heap
|
// Fallback solution: Store non-nmethod code in the non-profiled code heap.
|
||||||
return allocate(size, CodeBlobType::MethodNonProfiled, is_critical);
|
// Note that at in the sweeper, we check the reverse_free_ratio of the non-profiled
|
||||||
|
// code heap and force stack scanning if less than 10% if the code heap are free.
|
||||||
|
return allocate(size, CodeBlobType::MethodNonProfiled);
|
||||||
}
|
}
|
||||||
|
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
CompileBroker::handle_full_code_cache(code_blob_type);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
if (PrintCodeCacheExtension) {
|
if (PrintCodeCacheExtension) {
|
||||||
|
@ -770,19 +779,6 @@ size_t CodeCache::max_capacity() {
|
||||||
return max_cap;
|
return max_cap;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns true if a CodeHeap is full and sets code_blob_type accordingly.
|
|
||||||
*/
|
|
||||||
bool CodeCache::is_full(int* code_blob_type) {
|
|
||||||
FOR_ALL_HEAPS(heap) {
|
|
||||||
if ((*heap)->unallocated_capacity() < CodeCacheMinimumFreeSpace) {
|
|
||||||
*code_blob_type = (*heap)->code_blob_type();
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
|
* Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
|
||||||
* is free, reverse_free_ratio() returns 4.
|
* is free, reverse_free_ratio() returns 4.
|
||||||
|
@ -792,9 +788,13 @@ double CodeCache::reverse_free_ratio(int code_blob_type) {
|
||||||
if (heap == NULL) {
|
if (heap == NULL) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace);
|
|
||||||
|
double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0;
|
||||||
double max_capacity = (double)heap->max_capacity();
|
double max_capacity = (double)heap->max_capacity();
|
||||||
return max_capacity / unallocated_capacity;
|
double result = max_capacity / unallocated_capacity;
|
||||||
|
assert (max_capacity >= unallocated_capacity, "Must be");
|
||||||
|
assert (result >= 1.0, err_msg_res("reverse_free_ratio must be at least 1. It is %f", result));
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t CodeCache::bytes_allocated_in_freelists() {
|
size_t CodeCache::bytes_allocated_in_freelists() {
|
||||||
|
|
|
@ -120,7 +120,7 @@ class CodeCache : AllStatic {
|
||||||
static void initialize();
|
static void initialize();
|
||||||
|
|
||||||
// Allocation/administration
|
// Allocation/administration
|
||||||
static CodeBlob* allocate(int size, int code_blob_type, bool is_critical = false); // allocates a new CodeBlob
|
static CodeBlob* allocate(int size, int code_blob_type); // allocates a new CodeBlob
|
||||||
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
|
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
|
||||||
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
|
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
|
||||||
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
|
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
|
||||||
|
@ -182,7 +182,6 @@ class CodeCache : AllStatic {
|
||||||
static size_t unallocated_capacity();
|
static size_t unallocated_capacity();
|
||||||
static size_t max_capacity();
|
static size_t max_capacity();
|
||||||
|
|
||||||
static bool is_full(int* code_blob_type);
|
|
||||||
static double reverse_free_ratio(int code_blob_type);
|
static double reverse_free_ratio(int code_blob_type);
|
||||||
|
|
||||||
static bool needs_cache_clean() { return _needs_cache_clean; }
|
static bool needs_cache_clean() { return _needs_cache_clean; }
|
||||||
|
|
|
@ -804,10 +804,7 @@ nmethod::nmethod(
|
||||||
#endif // def HAVE_DTRACE_H
|
#endif // def HAVE_DTRACE_H
|
||||||
|
|
||||||
void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
|
void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
|
||||||
// With a SegmentedCodeCache, nmethods are allocated on separate heaps and therefore do not share memory
|
return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
|
||||||
// with critical CodeBlobs. We define the allocation as critical to make sure all code heap memory is used.
|
|
||||||
bool is_critical = SegmentedCodeCache;
|
|
||||||
return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level), is_critical);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nmethod::nmethod(
|
nmethod::nmethod(
|
||||||
|
|
|
@ -63,7 +63,6 @@ void* VtableStub::operator new(size_t size, int code_size) throw() {
|
||||||
// If changing the name, update the other file accordingly.
|
// If changing the name, update the other file accordingly.
|
||||||
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
|
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
|
||||||
if (blob == NULL) {
|
if (blob == NULL) {
|
||||||
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
_chunk = blob->content_begin();
|
_chunk = blob->content_begin();
|
||||||
|
|
|
@ -156,8 +156,6 @@ long CompileBroker::_peak_compilation_time = 0;
|
||||||
CompileQueue* CompileBroker::_c2_compile_queue = NULL;
|
CompileQueue* CompileBroker::_c2_compile_queue = NULL;
|
||||||
CompileQueue* CompileBroker::_c1_compile_queue = NULL;
|
CompileQueue* CompileBroker::_c1_compile_queue = NULL;
|
||||||
|
|
||||||
GrowableArray<CompilerThread*>* CompileBroker::_compiler_threads = NULL;
|
|
||||||
|
|
||||||
|
|
||||||
class CompilationLog : public StringEventLog {
|
class CompilationLog : public StringEventLog {
|
||||||
public:
|
public:
|
||||||
|
@ -649,13 +647,10 @@ void CompileQueue::free_all() {
|
||||||
lock()->notify_all();
|
lock()->notify_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
/**
|
||||||
// CompileQueue::get
|
* Get the next CompileTask from a CompileQueue
|
||||||
//
|
*/
|
||||||
// Get the next CompileTask from a CompileQueue
|
|
||||||
CompileTask* CompileQueue::get() {
|
CompileTask* CompileQueue::get() {
|
||||||
NMethodSweeper::possibly_sweep();
|
|
||||||
|
|
||||||
MutexLocker locker(lock());
|
MutexLocker locker(lock());
|
||||||
// If _first is NULL we have no more compile jobs. There are two reasons for
|
// If _first is NULL we have no more compile jobs. There are two reasons for
|
||||||
// having no compile jobs: First, we compiled everything we wanted. Second,
|
// having no compile jobs: First, we compiled everything we wanted. Second,
|
||||||
|
@ -668,24 +663,6 @@ CompileTask* CompileQueue::get() {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) {
|
|
||||||
// Wait a certain amount of time to possibly do another sweep.
|
|
||||||
// We must wait until stack scanning has happened so that we can
|
|
||||||
// transition a method's state from 'not_entrant' to 'zombie'.
|
|
||||||
long wait_time = NmethodSweepCheckInterval * 1000;
|
|
||||||
if (FLAG_IS_DEFAULT(NmethodSweepCheckInterval)) {
|
|
||||||
// Only one thread at a time can do sweeping. Scale the
|
|
||||||
// wait time according to the number of compiler threads.
|
|
||||||
// As a result, the next sweep is likely to happen every 100ms
|
|
||||||
// with an arbitrary number of threads that do sweeping.
|
|
||||||
wait_time = 100 * CICompilerCount;
|
|
||||||
}
|
|
||||||
bool timeout = lock()->wait(!Mutex::_no_safepoint_check_flag, wait_time);
|
|
||||||
if (timeout) {
|
|
||||||
MutexUnlocker ul(lock());
|
|
||||||
NMethodSweeper::possibly_sweep();
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// If there are no compilation tasks and we can compile new jobs
|
// If there are no compilation tasks and we can compile new jobs
|
||||||
// (i.e., there is enough free space in the code cache) there is
|
// (i.e., there is enough free space in the code cache) there is
|
||||||
// no need to invoke the sweeper. As a result, the hotness of methods
|
// no need to invoke the sweeper. As a result, the hotness of methods
|
||||||
|
@ -697,7 +674,6 @@ CompileTask* CompileQueue::get() {
|
||||||
// is not critical and we do not want idle compiler threads to wake up too often.
|
// is not critical and we do not want idle compiler threads to wake up too often.
|
||||||
lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
|
lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (CompileBroker::is_compilation_disabled_forever()) {
|
if (CompileBroker::is_compilation_disabled_forever()) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -886,8 +862,8 @@ void CompileBroker::compilation_init() {
|
||||||
_compilers[1] = new SharkCompiler();
|
_compilers[1] = new SharkCompiler();
|
||||||
#endif // SHARK
|
#endif // SHARK
|
||||||
|
|
||||||
// Start the CompilerThreads
|
// Start the compiler thread(s) and the sweeper thread
|
||||||
init_compiler_threads(c1_count, c2_count);
|
init_compiler_sweeper_threads(c1_count, c2_count);
|
||||||
// totalTime performance counter is always created as it is required
|
// totalTime performance counter is always created as it is required
|
||||||
// by the implementation of java.lang.management.CompilationMBean.
|
// by the implementation of java.lang.management.CompilationMBean.
|
||||||
{
|
{
|
||||||
|
@ -991,13 +967,10 @@ void CompileBroker::compilation_init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
|
JavaThread* CompileBroker::make_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
|
||||||
AbstractCompiler* comp, TRAPS) {
|
AbstractCompiler* comp, bool compiler_thread, TRAPS) {
|
||||||
CompilerThread* compiler_thread = NULL;
|
JavaThread* thread = NULL;
|
||||||
|
Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK_0);
|
||||||
Klass* k =
|
|
||||||
SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(),
|
|
||||||
true, CHECK_0);
|
|
||||||
instanceKlassHandle klass (THREAD, k);
|
instanceKlassHandle klass (THREAD, k);
|
||||||
instanceHandle thread_oop = klass->allocate_instance_handle(CHECK_0);
|
instanceHandle thread_oop = klass->allocate_instance_handle(CHECK_0);
|
||||||
Handle string = java_lang_String::create_from_str(name, CHECK_0);
|
Handle string = java_lang_String::create_from_str(name, CHECK_0);
|
||||||
|
@ -1015,7 +988,11 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
|
||||||
|
|
||||||
{
|
{
|
||||||
MutexLocker mu(Threads_lock, THREAD);
|
MutexLocker mu(Threads_lock, THREAD);
|
||||||
compiler_thread = new CompilerThread(queue, counters);
|
if (compiler_thread) {
|
||||||
|
thread = new CompilerThread(queue, counters);
|
||||||
|
} else {
|
||||||
|
thread = new CodeCacheSweeperThread();
|
||||||
|
}
|
||||||
// At this point the new CompilerThread data-races with this startup
|
// At this point the new CompilerThread data-races with this startup
|
||||||
// thread (which I believe is the primoridal thread and NOT the VM
|
// thread (which I believe is the primoridal thread and NOT the VM
|
||||||
// thread). This means Java bytecodes being executed at startup can
|
// thread). This means Java bytecodes being executed at startup can
|
||||||
|
@ -1028,12 +1005,12 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
|
||||||
// in that case. However, since this must work and we do not allow
|
// in that case. However, since this must work and we do not allow
|
||||||
// exceptions anyway, check and abort if this fails.
|
// exceptions anyway, check and abort if this fails.
|
||||||
|
|
||||||
if (compiler_thread == NULL || compiler_thread->osthread() == NULL){
|
if (thread == NULL || thread->osthread() == NULL) {
|
||||||
vm_exit_during_initialization("java.lang.OutOfMemoryError",
|
vm_exit_during_initialization("java.lang.OutOfMemoryError",
|
||||||
os::native_thread_creation_failed_msg());
|
os::native_thread_creation_failed_msg());
|
||||||
}
|
}
|
||||||
|
|
||||||
java_lang_Thread::set_thread(thread_oop(), compiler_thread);
|
java_lang_Thread::set_thread(thread_oop(), thread);
|
||||||
|
|
||||||
// Note that this only sets the JavaThread _priority field, which by
|
// Note that this only sets the JavaThread _priority field, which by
|
||||||
// definition is limited to Java priorities and not OS priorities.
|
// definition is limited to Java priorities and not OS priorities.
|
||||||
|
@ -1054,24 +1031,26 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
|
||||||
native_prio = os::java_to_os_priority[NearMaxPriority];
|
native_prio = os::java_to_os_priority[NearMaxPriority];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
os::set_native_priority(compiler_thread, native_prio);
|
os::set_native_priority(thread, native_prio);
|
||||||
|
|
||||||
java_lang_Thread::set_daemon(thread_oop());
|
java_lang_Thread::set_daemon(thread_oop());
|
||||||
|
|
||||||
compiler_thread->set_threadObj(thread_oop());
|
thread->set_threadObj(thread_oop());
|
||||||
compiler_thread->set_compiler(comp);
|
if (compiler_thread) {
|
||||||
Threads::add(compiler_thread);
|
thread->as_CompilerThread()->set_compiler(comp);
|
||||||
Thread::start(compiler_thread);
|
}
|
||||||
|
Threads::add(thread);
|
||||||
|
Thread::start(thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Let go of Threads_lock before yielding
|
// Let go of Threads_lock before yielding
|
||||||
os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
|
os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
|
||||||
|
|
||||||
return compiler_thread;
|
return thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) {
|
void CompileBroker::init_compiler_sweeper_threads(int c1_compiler_count, int c2_compiler_count) {
|
||||||
EXCEPTION_MARK;
|
EXCEPTION_MARK;
|
||||||
#if !defined(ZERO) && !defined(SHARK)
|
#if !defined(ZERO) && !defined(SHARK)
|
||||||
assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
|
assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
|
||||||
|
@ -1088,17 +1067,14 @@ void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler
|
||||||
|
|
||||||
int compiler_count = c1_compiler_count + c2_compiler_count;
|
int compiler_count = c1_compiler_count + c2_compiler_count;
|
||||||
|
|
||||||
_compiler_threads =
|
|
||||||
new (ResourceObj::C_HEAP, mtCompiler) GrowableArray<CompilerThread*>(compiler_count, true);
|
|
||||||
|
|
||||||
char name_buffer[256];
|
char name_buffer[256];
|
||||||
|
const bool compiler_thread = true;
|
||||||
for (int i = 0; i < c2_compiler_count; i++) {
|
for (int i = 0; i < c2_compiler_count; i++) {
|
||||||
// Create a name for our thread.
|
// Create a name for our thread.
|
||||||
sprintf(name_buffer, "C2 CompilerThread%d", i);
|
sprintf(name_buffer, "C2 CompilerThread%d", i);
|
||||||
CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
|
CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
|
||||||
// Shark and C2
|
// Shark and C2
|
||||||
CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_compile_queue, counters, _compilers[1], CHECK);
|
make_thread(name_buffer, _c2_compile_queue, counters, _compilers[1], compiler_thread, CHECK);
|
||||||
_compiler_threads->append(new_thread);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = c2_compiler_count; i < compiler_count; i++) {
|
for (int i = c2_compiler_count; i < compiler_count; i++) {
|
||||||
|
@ -1106,13 +1082,17 @@ void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler
|
||||||
sprintf(name_buffer, "C1 CompilerThread%d", i);
|
sprintf(name_buffer, "C1 CompilerThread%d", i);
|
||||||
CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
|
CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
|
||||||
// C1
|
// C1
|
||||||
CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_compile_queue, counters, _compilers[0], CHECK);
|
make_thread(name_buffer, _c1_compile_queue, counters, _compilers[0], compiler_thread, CHECK);
|
||||||
_compiler_threads->append(new_thread);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (UsePerfData) {
|
if (UsePerfData) {
|
||||||
PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, compiler_count, CHECK);
|
PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, compiler_count, CHECK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (MethodFlushing) {
|
||||||
|
// Initialize the sweeper thread
|
||||||
|
make_thread("Sweeper thread", NULL, NULL, NULL, false, CHECK);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1759,13 +1739,6 @@ void CompileBroker::compiler_thread_loop() {
|
||||||
// We need this HandleMark to avoid leaking VM handles.
|
// We need this HandleMark to avoid leaking VM handles.
|
||||||
HandleMark hm(thread);
|
HandleMark hm(thread);
|
||||||
|
|
||||||
// Check if the CodeCache is full
|
|
||||||
int code_blob_type = 0;
|
|
||||||
if (CodeCache::is_full(&code_blob_type)) {
|
|
||||||
// The CodeHeap for code_blob_type is really full
|
|
||||||
handle_full_code_cache(code_blob_type);
|
|
||||||
}
|
|
||||||
|
|
||||||
CompileTask* task = queue->get();
|
CompileTask* task = queue->get();
|
||||||
if (task == NULL) {
|
if (task == NULL) {
|
||||||
continue;
|
continue;
|
||||||
|
@ -1773,8 +1746,9 @@ void CompileBroker::compiler_thread_loop() {
|
||||||
|
|
||||||
// Give compiler threads an extra quanta. They tend to be bursty and
|
// Give compiler threads an extra quanta. They tend to be bursty and
|
||||||
// this helps the compiler to finish up the job.
|
// this helps the compiler to finish up the job.
|
||||||
if( CompilerThreadHintNoPreempt )
|
if (CompilerThreadHintNoPreempt) {
|
||||||
os::hint_no_preempt();
|
os::hint_no_preempt();
|
||||||
|
}
|
||||||
|
|
||||||
// trace per thread time and compile statistics
|
// trace per thread time and compile statistics
|
||||||
CompilerCounters* counters = ((CompilerThread*)thread)->counters();
|
CompilerCounters* counters = ((CompilerThread*)thread)->counters();
|
||||||
|
@ -2074,8 +2048,10 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The CodeCache is full. Print out warning and disable compilation
|
* The CodeCache is full. Print warning and disable compilation.
|
||||||
* or try code cache cleaning so compilation can continue later.
|
* Schedule code cache cleaning so compilation can continue later.
|
||||||
|
* This function needs to be called only from CodeCache::allocate(),
|
||||||
|
* since we currently handle a full code cache uniformly.
|
||||||
*/
|
*/
|
||||||
void CompileBroker::handle_full_code_cache(int code_blob_type) {
|
void CompileBroker::handle_full_code_cache(int code_blob_type) {
|
||||||
UseInterpreter = true;
|
UseInterpreter = true;
|
||||||
|
@ -2107,10 +2083,6 @@ void CompileBroker::handle_full_code_cache(int code_blob_type) {
|
||||||
if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
|
if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
|
||||||
NMethodSweeper::log_sweep("disable_compiler");
|
NMethodSweeper::log_sweep("disable_compiler");
|
||||||
}
|
}
|
||||||
// Switch to 'vm_state'. This ensures that possibly_sweep() can be called
|
|
||||||
// without having to consider the state in which the current thread is.
|
|
||||||
ThreadInVMfromUnknown in_vm;
|
|
||||||
NMethodSweeper::possibly_sweep();
|
|
||||||
} else {
|
} else {
|
||||||
disable_compilation_forever();
|
disable_compilation_forever();
|
||||||
}
|
}
|
||||||
|
|
|
@ -290,8 +290,6 @@ class CompileBroker: AllStatic {
|
||||||
static CompileQueue* _c2_compile_queue;
|
static CompileQueue* _c2_compile_queue;
|
||||||
static CompileQueue* _c1_compile_queue;
|
static CompileQueue* _c1_compile_queue;
|
||||||
|
|
||||||
static GrowableArray<CompilerThread*>* _compiler_threads;
|
|
||||||
|
|
||||||
// performance counters
|
// performance counters
|
||||||
static PerfCounter* _perf_total_compilation;
|
static PerfCounter* _perf_total_compilation;
|
||||||
static PerfCounter* _perf_native_compilation;
|
static PerfCounter* _perf_native_compilation;
|
||||||
|
@ -339,8 +337,8 @@ class CompileBroker: AllStatic {
|
||||||
|
|
||||||
static volatile jint _print_compilation_warning;
|
static volatile jint _print_compilation_warning;
|
||||||
|
|
||||||
static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, TRAPS);
|
static JavaThread* make_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, bool compiler_thread, TRAPS);
|
||||||
static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
|
static void init_compiler_sweeper_threads(int c1_compiler_count, int c2_compiler_count);
|
||||||
static bool compilation_is_complete (methodHandle method, int osr_bci, int comp_level);
|
static bool compilation_is_complete (methodHandle method, int osr_bci, int comp_level);
|
||||||
static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
|
static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
|
||||||
static bool is_compile_blocking();
|
static bool is_compile_blocking();
|
||||||
|
|
|
@ -1077,7 +1077,6 @@ IRT_END
|
||||||
address SignatureHandlerLibrary::set_handler_blob() {
|
address SignatureHandlerLibrary::set_handler_blob() {
|
||||||
BufferBlob* handler_blob = BufferBlob::create("native signature handlers", blob_size);
|
BufferBlob* handler_blob = BufferBlob::create("native signature handlers", blob_size);
|
||||||
if (handler_blob == NULL) {
|
if (handler_blob == NULL) {
|
||||||
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
address handler = handler_blob->code_begin();
|
address handler = handler_blob->code_begin();
|
||||||
|
|
|
@ -171,13 +171,13 @@ void CodeHeap::clear() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
|
void* CodeHeap::allocate(size_t instance_size) {
|
||||||
size_t number_of_segments = size_to_segments(instance_size + header_size());
|
size_t number_of_segments = size_to_segments(instance_size + header_size());
|
||||||
assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
|
assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
|
||||||
|
|
||||||
// First check if we can satisfy request from freelist
|
// First check if we can satisfy request from freelist
|
||||||
NOT_PRODUCT(verify());
|
NOT_PRODUCT(verify());
|
||||||
HeapBlock* block = search_freelist(number_of_segments, is_critical);
|
HeapBlock* block = search_freelist(number_of_segments);
|
||||||
NOT_PRODUCT(verify());
|
NOT_PRODUCT(verify());
|
||||||
|
|
||||||
if (block != NULL) {
|
if (block != NULL) {
|
||||||
|
@ -191,15 +191,6 @@ void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
|
||||||
// Ensure minimum size for allocation to the heap.
|
// Ensure minimum size for allocation to the heap.
|
||||||
number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments);
|
number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments);
|
||||||
|
|
||||||
if (!is_critical) {
|
|
||||||
// Make sure the allocation fits in the unallocated heap without using
|
|
||||||
// the CodeCacheMimimumFreeSpace that is reserved for critical allocations.
|
|
||||||
if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) {
|
|
||||||
// Fail allocation
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (_next_segment + number_of_segments <= _number_of_committed_segments) {
|
if (_next_segment + number_of_segments <= _number_of_committed_segments) {
|
||||||
mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
|
mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
|
||||||
HeapBlock* b = block_at(_next_segment);
|
HeapBlock* b = block_at(_next_segment);
|
||||||
|
@ -427,24 +418,17 @@ void CodeHeap::add_to_freelist(HeapBlock* a) {
|
||||||
* Search freelist for an entry on the list with the best fit.
|
* Search freelist for an entry on the list with the best fit.
|
||||||
* @return NULL, if no one was found
|
* @return NULL, if no one was found
|
||||||
*/
|
*/
|
||||||
FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) {
|
FreeBlock* CodeHeap::search_freelist(size_t length) {
|
||||||
FreeBlock* found_block = NULL;
|
FreeBlock* found_block = NULL;
|
||||||
FreeBlock* found_prev = NULL;
|
FreeBlock* found_prev = NULL;
|
||||||
size_t found_length = 0;
|
size_t found_length = 0;
|
||||||
|
|
||||||
FreeBlock* prev = NULL;
|
FreeBlock* prev = NULL;
|
||||||
FreeBlock* cur = _freelist;
|
FreeBlock* cur = _freelist;
|
||||||
const size_t critical_boundary = (size_t)high_boundary() - CodeCacheMinimumFreeSpace;
|
|
||||||
|
|
||||||
// Search for first block that fits
|
// Search for first block that fits
|
||||||
while(cur != NULL) {
|
while(cur != NULL) {
|
||||||
if (cur->length() >= length) {
|
if (cur->length() >= length) {
|
||||||
// Non critical allocations are not allowed to use the last part of the code heap.
|
|
||||||
// Make sure the end of the allocation doesn't cross into the last part of the code heap.
|
|
||||||
if (!is_critical && (((size_t)cur + length) > critical_boundary)) {
|
|
||||||
// The freelist is sorted by address - if one fails, all consecutive will also fail.
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// Remember block, its previous element, and its length
|
// Remember block, its previous element, and its length
|
||||||
found_block = cur;
|
found_block = cur;
|
||||||
found_prev = prev;
|
found_prev = prev;
|
||||||
|
|
|
@ -120,7 +120,7 @@ class CodeHeap : public CHeapObj<mtCode> {
|
||||||
|
|
||||||
// Toplevel freelist management
|
// Toplevel freelist management
|
||||||
void add_to_freelist(HeapBlock* b);
|
void add_to_freelist(HeapBlock* b);
|
||||||
FreeBlock* search_freelist(size_t length, bool is_critical);
|
FreeBlock* search_freelist(size_t length);
|
||||||
|
|
||||||
// Iteration helpers
|
// Iteration helpers
|
||||||
void* next_free(HeapBlock* b) const;
|
void* next_free(HeapBlock* b) const;
|
||||||
|
@ -140,8 +140,8 @@ class CodeHeap : public CHeapObj<mtCode> {
|
||||||
bool expand_by(size_t size); // expands committed memory by size
|
bool expand_by(size_t size); // expands committed memory by size
|
||||||
|
|
||||||
// Memory allocation
|
// Memory allocation
|
||||||
void* allocate (size_t size, bool is_critical); // allocates a block of size or returns NULL
|
void* allocate (size_t size); // Allocate 'size' bytes in the code cache or return NULL
|
||||||
void deallocate(void* p); // deallocates a block
|
void deallocate(void* p); // Deallocate memory
|
||||||
|
|
||||||
// Attributes
|
// Attributes
|
||||||
char* low_boundary() const { return _memory.low_boundary (); }
|
char* low_boundary() const { return _memory.low_boundary (); }
|
||||||
|
|
|
@ -535,7 +535,6 @@ void Compile::init_scratch_buffer_blob(int const_size) {
|
||||||
if (scratch_buffer_blob() == NULL) {
|
if (scratch_buffer_blob() == NULL) {
|
||||||
// Let CompilerBroker disable further compilations.
|
// Let CompilerBroker disable further compilations.
|
||||||
record_failure("Not enough space for scratch buffer in CodeCache");
|
record_failure("Not enough space for scratch buffer in CodeCache");
|
||||||
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1166,7 +1166,6 @@ CodeBuffer* Compile::init_buffer(uint* blk_starts) {
|
||||||
// Have we run out of code space?
|
// Have we run out of code space?
|
||||||
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
|
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
|
||||||
C->record_failure("CodeCache is full");
|
C->record_failure("CodeCache is full");
|
||||||
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
// Configure the code buffer.
|
// Configure the code buffer.
|
||||||
|
@ -1491,7 +1490,6 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
|
||||||
cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
|
cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
|
||||||
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
|
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
|
||||||
C->record_failure("CodeCache is full");
|
C->record_failure("CodeCache is full");
|
||||||
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1648,7 +1646,6 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
|
||||||
// One last check for failed CodeBuffer::expand:
|
// One last check for failed CodeBuffer::expand:
|
||||||
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
|
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
|
||||||
C->record_failure("CodeCache is full");
|
C->record_failure("CodeCache is full");
|
||||||
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,6 +36,7 @@
|
||||||
#include "runtime/reflection.hpp"
|
#include "runtime/reflection.hpp"
|
||||||
#include "runtime/signature.hpp"
|
#include "runtime/signature.hpp"
|
||||||
#include "runtime/stubRoutines.hpp"
|
#include "runtime/stubRoutines.hpp"
|
||||||
|
#include "utilities/exceptions.hpp"
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -55,26 +56,30 @@
|
||||||
bool MethodHandles::_enabled = false; // set true after successful native linkage
|
bool MethodHandles::_enabled = false; // set true after successful native linkage
|
||||||
MethodHandlesAdapterBlob* MethodHandles::_adapter_code = NULL;
|
MethodHandlesAdapterBlob* MethodHandles::_adapter_code = NULL;
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
|
||||||
// MethodHandles::generate_adapters
|
/**
|
||||||
//
|
* Generates method handle adapters. Returns 'false' if memory allocation
|
||||||
void MethodHandles::generate_adapters() {
|
* failed and true otherwise.
|
||||||
if (SystemDictionary::MethodHandle_klass() == NULL) return;
|
*/
|
||||||
|
bool MethodHandles::generate_adapters() {
|
||||||
|
if (SystemDictionary::MethodHandle_klass() == NULL) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
assert(_adapter_code == NULL, "generate only once");
|
assert(_adapter_code == NULL, "generate only once");
|
||||||
|
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
TraceTime timer("MethodHandles adapters generation", TraceStartupTime);
|
TraceTime timer("MethodHandles adapters generation", TraceStartupTime);
|
||||||
_adapter_code = MethodHandlesAdapterBlob::create(adapter_code_size);
|
_adapter_code = MethodHandlesAdapterBlob::create(adapter_code_size);
|
||||||
if (_adapter_code == NULL)
|
if (_adapter_code == NULL) {
|
||||||
vm_exit_out_of_memory(adapter_code_size, OOM_MALLOC_ERROR,
|
return false;
|
||||||
"CodeCache: no room for MethodHandles adapters");
|
}
|
||||||
{
|
|
||||||
CodeBuffer code(_adapter_code);
|
CodeBuffer code(_adapter_code);
|
||||||
MethodHandlesAdapterGenerator g(&code);
|
MethodHandlesAdapterGenerator g(&code);
|
||||||
g.generate();
|
g.generate();
|
||||||
code.log_section_sizes("MethodHandlesAdapterBlob");
|
code.log_section_sizes("MethodHandlesAdapterBlob");
|
||||||
}
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
@ -1401,7 +1406,9 @@ JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class))
|
||||||
}
|
}
|
||||||
|
|
||||||
if (enable_MH) {
|
if (enable_MH) {
|
||||||
MethodHandles::generate_adapters();
|
if (MethodHandles::generate_adapters() == false) {
|
||||||
|
THROW_MSG(vmSymbols::java_lang_VirtualMachineError(), "Out of space in CodeCache for method handle adapters");
|
||||||
|
}
|
||||||
MethodHandles::set_enabled(true);
|
MethodHandles::set_enabled(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,7 +69,7 @@ class MethodHandles: AllStatic {
|
||||||
enum { _suppress_defc = 1, _suppress_name = 2, _suppress_type = 4 };
|
enum { _suppress_defc = 1, _suppress_name = 2, _suppress_type = 4 };
|
||||||
|
|
||||||
// Generate MethodHandles adapters.
|
// Generate MethodHandles adapters.
|
||||||
static void generate_adapters();
|
static bool generate_adapters();
|
||||||
|
|
||||||
// Called from MethodHandlesAdapterGenerator.
|
// Called from MethodHandlesAdapterGenerator.
|
||||||
static address generate_method_handle_interpreter_entry(MacroAssembler* _masm, vmIntrinsics::ID iid);
|
static address generate_method_handle_interpreter_entry(MacroAssembler* _masm, vmIntrinsics::ID iid);
|
||||||
|
|
|
@ -306,6 +306,9 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
|
||||||
{ "ReflectionWrapResolutionErrors",JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
{ "ReflectionWrapResolutionErrors",JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||||
{ "VerifyReflectionBytecodes", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
{ "VerifyReflectionBytecodes", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||||
{ "AutoShutdownNMT", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
{ "AutoShutdownNMT", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||||
|
{ "NmethodSweepFraction", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||||
|
{ "NmethodSweepCheckInterval", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||||
|
{ "CodeCacheMinimumFreeSpace", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||||
#ifndef ZERO
|
#ifndef ZERO
|
||||||
{ "UseFastAccessorMethods", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
{ "UseFastAccessorMethods", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||||
{ "UseFastEmptyMethods", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
{ "UseFastEmptyMethods", JDK_Version::jdk(9), JDK_Version::jdk(10) },
|
||||||
|
@ -2528,7 +2531,7 @@ bool Arguments::check_vm_args_consistency() {
|
||||||
|
|
||||||
// Check lower bounds of the code cache
|
// Check lower bounds of the code cache
|
||||||
// Template Interpreter code is approximately 3X larger in debug builds.
|
// Template Interpreter code is approximately 3X larger in debug builds.
|
||||||
uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
|
uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
|
||||||
if (InitialCodeCacheSize < (uintx)os::vm_page_size()) {
|
if (InitialCodeCacheSize < (uintx)os::vm_page_size()) {
|
||||||
jio_fprintf(defaultStream::error_stream(),
|
jio_fprintf(defaultStream::error_stream(),
|
||||||
"Invalid InitialCodeCacheSize=%dK. Must be at least %dK.\n", InitialCodeCacheSize/K,
|
"Invalid InitialCodeCacheSize=%dK. Must be at least %dK.\n", InitialCodeCacheSize/K,
|
||||||
|
@ -2564,10 +2567,11 @@ bool Arguments::check_vm_args_consistency() {
|
||||||
status = false;
|
status = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
status &= verify_interval(NmethodSweepFraction, 1, ReservedCodeCacheSize/K, "NmethodSweepFraction");
|
|
||||||
status &= verify_interval(NmethodSweepActivity, 0, 2000, "NmethodSweepActivity");
|
status &= verify_interval(NmethodSweepActivity, 0, 2000, "NmethodSweepActivity");
|
||||||
status &= verify_interval(CodeCacheMinBlockLength, 1, 100, "CodeCacheMinBlockLength");
|
status &= verify_interval(CodeCacheMinBlockLength, 1, 100, "CodeCacheMinBlockLength");
|
||||||
status &= verify_interval(CodeCacheSegmentSize, 1, 1024, "CodeCacheSegmentSize");
|
status &= verify_interval(CodeCacheSegmentSize, 1, 1024, "CodeCacheSegmentSize");
|
||||||
|
status &= verify_interval(StartAggressiveSweepingAt, 0, 100, "StartAggressiveSweepingAt");
|
||||||
|
|
||||||
|
|
||||||
int min_number_of_compiler_threads = get_min_number_of_compiler_threads();
|
int min_number_of_compiler_threads = get_min_number_of_compiler_threads();
|
||||||
// The default CICompilerCount's value is CI_COMPILER_COUNT.
|
// The default CICompilerCount's value is CI_COMPILER_COUNT.
|
||||||
|
@ -3985,12 +3989,6 @@ jint Arguments::apply_ergo() {
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Set NmethodSweepFraction after the size of the code cache is adapted (in case of tiered)
|
|
||||||
if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
|
|
||||||
FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M));
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// Set heap size based on available physical memory
|
// Set heap size based on available physical memory
|
||||||
set_heap_size();
|
set_heap_size();
|
||||||
|
|
||||||
|
@ -4058,13 +4056,6 @@ jint Arguments::apply_ergo() {
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
if (CompileTheWorld) {
|
|
||||||
// Force NmethodSweeper to sweep whole CodeCache each time.
|
|
||||||
if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
|
|
||||||
NmethodSweepFraction = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!LogVMOutput && FLAG_IS_DEFAULT(LogVMOutput)) {
|
if (!LogVMOutput && FLAG_IS_DEFAULT(LogVMOutput)) {
|
||||||
if (use_vm_log()) {
|
if (use_vm_log()) {
|
||||||
LogVMOutput = true;
|
LogVMOutput = true;
|
||||||
|
|
|
@ -2984,12 +2984,6 @@ class CommandLineFlags {
|
||||||
product(intx, SafepointTimeoutDelay, 10000, \
|
product(intx, SafepointTimeoutDelay, 10000, \
|
||||||
"Delay in milliseconds for option SafepointTimeout") \
|
"Delay in milliseconds for option SafepointTimeout") \
|
||||||
\
|
\
|
||||||
product(intx, NmethodSweepFraction, 16, \
|
|
||||||
"Number of invocations of sweeper to cover all nmethods") \
|
|
||||||
\
|
|
||||||
product(intx, NmethodSweepCheckInterval, 5, \
|
|
||||||
"Compilers wake up every n seconds to possibly sweep nmethods") \
|
|
||||||
\
|
|
||||||
product(intx, NmethodSweepActivity, 10, \
|
product(intx, NmethodSweepActivity, 10, \
|
||||||
"Removes cold nmethods from code cache if > 0. Higher values " \
|
"Removes cold nmethods from code cache if > 0. Higher values " \
|
||||||
"result in more aggressive sweeping") \
|
"result in more aggressive sweeping") \
|
||||||
|
@ -3378,9 +3372,6 @@ class CommandLineFlags {
|
||||||
product_pd(uintx, NonNMethodCodeHeapSize, \
|
product_pd(uintx, NonNMethodCodeHeapSize, \
|
||||||
"Size of code heap with non-nmethods (in bytes)") \
|
"Size of code heap with non-nmethods (in bytes)") \
|
||||||
\
|
\
|
||||||
product(uintx, CodeCacheMinimumFreeSpace, 500*K, \
|
|
||||||
"When less than X space left, we stop compiling") \
|
|
||||||
\
|
|
||||||
product_pd(uintx, CodeCacheExpansionSize, \
|
product_pd(uintx, CodeCacheExpansionSize, \
|
||||||
"Code cache expansion size (in bytes)") \
|
"Code cache expansion size (in bytes)") \
|
||||||
\
|
\
|
||||||
|
@ -3393,6 +3384,11 @@ class CommandLineFlags {
|
||||||
product(bool, UseCodeCacheFlushing, true, \
|
product(bool, UseCodeCacheFlushing, true, \
|
||||||
"Remove cold/old nmethods from the code cache") \
|
"Remove cold/old nmethods from the code cache") \
|
||||||
\
|
\
|
||||||
|
product(uintx, StartAggressiveSweepingAt, 10, \
|
||||||
|
"Start aggressive sweeping if X[%] of the code cache is free." \
|
||||||
|
"Segmented code cache: X[%] of the non-profiled heap." \
|
||||||
|
"Non-segmented code cache: X[%] of the total code cache") \
|
||||||
|
\
|
||||||
/* interpreter debugging */ \
|
/* interpreter debugging */ \
|
||||||
develop(intx, BinarySwitchThreshold, 5, \
|
develop(intx, BinarySwitchThreshold, 5, \
|
||||||
"Minimal number of lookupswitch entries for rewriting to binary " \
|
"Minimal number of lookupswitch entries for rewriting to binary " \
|
||||||
|
|
|
@ -61,7 +61,7 @@ Mutex* SymbolTable_lock = NULL;
|
||||||
Mutex* StringTable_lock = NULL;
|
Mutex* StringTable_lock = NULL;
|
||||||
Monitor* StringDedupQueue_lock = NULL;
|
Monitor* StringDedupQueue_lock = NULL;
|
||||||
Mutex* StringDedupTable_lock = NULL;
|
Mutex* StringDedupTable_lock = NULL;
|
||||||
Mutex* CodeCache_lock = NULL;
|
Monitor* CodeCache_lock = NULL;
|
||||||
Mutex* MethodData_lock = NULL;
|
Mutex* MethodData_lock = NULL;
|
||||||
Mutex* RetData_lock = NULL;
|
Mutex* RetData_lock = NULL;
|
||||||
Monitor* VMOperationQueue_lock = NULL;
|
Monitor* VMOperationQueue_lock = NULL;
|
||||||
|
@ -205,7 +205,7 @@ void mutex_init() {
|
||||||
}
|
}
|
||||||
def(ParGCRareEvent_lock , Mutex , leaf , true );
|
def(ParGCRareEvent_lock , Mutex , leaf , true );
|
||||||
def(DerivedPointerTableGC_lock , Mutex, leaf, true );
|
def(DerivedPointerTableGC_lock , Mutex, leaf, true );
|
||||||
def(CodeCache_lock , Mutex , special, true );
|
def(CodeCache_lock , Monitor, special, true );
|
||||||
def(Interrupt_lock , Monitor, special, true ); // used for interrupt processing
|
def(Interrupt_lock , Monitor, special, true ); // used for interrupt processing
|
||||||
def(RawMonitor_lock , Mutex, special, true );
|
def(RawMonitor_lock , Mutex, special, true );
|
||||||
def(OopMapCacheAlloc_lock , Mutex, leaf, true ); // used for oop_map_cache allocation.
|
def(OopMapCacheAlloc_lock , Mutex, leaf, true ); // used for oop_map_cache allocation.
|
||||||
|
|
|
@ -53,7 +53,7 @@ extern Mutex* SymbolTable_lock; // a lock on the symbol table
|
||||||
extern Mutex* StringTable_lock; // a lock on the interned string table
|
extern Mutex* StringTable_lock; // a lock on the interned string table
|
||||||
extern Monitor* StringDedupQueue_lock; // a lock on the string deduplication queue
|
extern Monitor* StringDedupQueue_lock; // a lock on the string deduplication queue
|
||||||
extern Mutex* StringDedupTable_lock; // a lock on the string deduplication table
|
extern Mutex* StringDedupTable_lock; // a lock on the string deduplication table
|
||||||
extern Mutex* CodeCache_lock; // a lock on the CodeCache, rank is special, use MutexLockerEx
|
extern Monitor* CodeCache_lock; // a lock on the CodeCache, rank is special, use MutexLockerEx
|
||||||
extern Mutex* MethodData_lock; // a lock on installation of method data
|
extern Mutex* MethodData_lock; // a lock on installation of method data
|
||||||
extern Mutex* RetData_lock; // a lock on installation of RetData inside method data
|
extern Mutex* RetData_lock; // a lock on installation of RetData inside method data
|
||||||
extern Mutex* DerivedPointerTableGC_lock; // a lock to protect the derived pointer table
|
extern Mutex* DerivedPointerTableGC_lock; // a lock to protect the derived pointer table
|
||||||
|
|
|
@ -2421,8 +2421,6 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
|
||||||
// CodeCache is full, disable compilation
|
// CodeCache is full, disable compilation
|
||||||
// Ought to log this but compile log is only per compile thread
|
// Ought to log this but compile log is only per compile thread
|
||||||
// and we're some non descript Java thread.
|
// and we're some non descript Java thread.
|
||||||
MutexUnlocker mu(AdapterHandlerLibrary_lock);
|
|
||||||
CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
|
|
||||||
return NULL; // Out of CodeCache space
|
return NULL; // Out of CodeCache space
|
||||||
}
|
}
|
||||||
entry->relocate(new_adapter->content_begin());
|
entry->relocate(new_adapter->content_begin());
|
||||||
|
@ -2594,9 +2592,6 @@ void AdapterHandlerLibrary::create_native_wrapper(methodHandle method) {
|
||||||
CompileTask::print_compilation(tty, nm, method->is_static() ? "(static)" : "");
|
CompileTask::print_compilation(tty, nm, method->is_static() ? "(static)" : "");
|
||||||
}
|
}
|
||||||
nm->post_compiled_method_load_event();
|
nm->post_compiled_method_load_event();
|
||||||
} else {
|
|
||||||
// CodeCache is full, disable compilation
|
|
||||||
CompileBroker::handle_full_code_cache(CodeBlobType::MethodNonProfiled);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,6 @@ PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
|
||||||
class SweeperRecord {
|
class SweeperRecord {
|
||||||
public:
|
public:
|
||||||
int traversal;
|
int traversal;
|
||||||
int invocation;
|
|
||||||
int compile_id;
|
int compile_id;
|
||||||
long traversal_mark;
|
long traversal_mark;
|
||||||
int state;
|
int state;
|
||||||
|
@ -62,10 +61,9 @@ class SweeperRecord {
|
||||||
int line;
|
int line;
|
||||||
|
|
||||||
void print() {
|
void print() {
|
||||||
tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
|
tty->print_cr("traversal = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
|
||||||
PTR_FORMAT " state = %d traversal_mark %d line = %d",
|
PTR_FORMAT " state = %d traversal_mark %d line = %d",
|
||||||
traversal,
|
traversal,
|
||||||
invocation,
|
|
||||||
compile_id,
|
compile_id,
|
||||||
kind == NULL ? "" : kind,
|
kind == NULL ? "" : kind,
|
||||||
uep,
|
uep,
|
||||||
|
@ -117,7 +115,6 @@ void NMethodSweeper::record_sweep(nmethod* nm, int line) {
|
||||||
if (_records != NULL) {
|
if (_records != NULL) {
|
||||||
_records[_sweep_index].traversal = _traversals;
|
_records[_sweep_index].traversal = _traversals;
|
||||||
_records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
|
_records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
|
||||||
_records[_sweep_index].invocation = _sweep_fractions_left;
|
|
||||||
_records[_sweep_index].compile_id = nm->compile_id();
|
_records[_sweep_index].compile_id = nm->compile_id();
|
||||||
_records[_sweep_index].kind = nm->compile_kind();
|
_records[_sweep_index].kind = nm->compile_kind();
|
||||||
_records[_sweep_index].state = nm->_state;
|
_records[_sweep_index].state = nm->_state;
|
||||||
|
@ -127,6 +124,14 @@ void NMethodSweeper::record_sweep(nmethod* nm, int line) {
|
||||||
_sweep_index = (_sweep_index + 1) % SweeperLogEntries;
|
_sweep_index = (_sweep_index + 1) % SweeperLogEntries;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void NMethodSweeper::init_sweeper_log() {
|
||||||
|
if (LogSweeper && _records == NULL) {
|
||||||
|
// Create the ring buffer for the logging code
|
||||||
|
_records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
|
||||||
|
memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
|
||||||
|
}
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
#define SWEEP(nm)
|
#define SWEEP(nm)
|
||||||
#endif
|
#endif
|
||||||
|
@ -142,8 +147,6 @@ int NMethodSweeper::_zombified_count = 0; // Nof. nmethods
|
||||||
int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep
|
int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep
|
||||||
|
|
||||||
volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper
|
volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper
|
||||||
volatile int NMethodSweeper::_sweep_fractions_left = 0; // Nof. invocations left until we are completed with this pass
|
|
||||||
volatile int NMethodSweeper::_sweep_started = 0; // Flag to control conc sweeper
|
|
||||||
volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
|
volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
|
||||||
// 1) alive -> not_entrant
|
// 1) alive -> not_entrant
|
||||||
// 2) not_entrant -> zombie
|
// 2) not_entrant -> zombie
|
||||||
|
@ -190,13 +193,15 @@ int NMethodSweeper::hotness_counter_reset_val() {
|
||||||
}
|
}
|
||||||
return _hotness_counter_reset_val;
|
return _hotness_counter_reset_val;
|
||||||
}
|
}
|
||||||
bool NMethodSweeper::sweep_in_progress() {
|
bool NMethodSweeper::wait_for_stack_scanning() {
|
||||||
return !_current.end();
|
return _current.end();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scans the stacks of all Java threads and marks activations of not-entrant methods.
|
/**
|
||||||
// No need to synchronize access, since 'mark_active_nmethods' is always executed at a
|
* Scans the stacks of all Java threads and marks activations of not-entrant methods.
|
||||||
// safepoint.
|
* No need to synchronize access, since 'mark_active_nmethods' is always executed at a
|
||||||
|
* safepoint.
|
||||||
|
*/
|
||||||
void NMethodSweeper::mark_active_nmethods() {
|
void NMethodSweeper::mark_active_nmethods() {
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
|
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
|
||||||
// If we do not want to reclaim not-entrant or zombie methods there is no need
|
// If we do not want to reclaim not-entrant or zombie methods there is no need
|
||||||
|
@ -210,9 +215,8 @@ void NMethodSweeper::mark_active_nmethods() {
|
||||||
|
|
||||||
// Check for restart
|
// Check for restart
|
||||||
assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid");
|
assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid");
|
||||||
if (!sweep_in_progress()) {
|
if (wait_for_stack_scanning()) {
|
||||||
_seen = 0;
|
_seen = 0;
|
||||||
_sweep_fractions_left = NmethodSweepFraction;
|
|
||||||
_current = NMethodIterator();
|
_current = NMethodIterator();
|
||||||
// Initialize to first nmethod
|
// Initialize to first nmethod
|
||||||
_current.next();
|
_current.next();
|
||||||
|
@ -231,6 +235,64 @@ void NMethodSweeper::mark_active_nmethods() {
|
||||||
|
|
||||||
OrderAccess::storestore();
|
OrderAccess::storestore();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This function triggers a VM operation that does stack scanning of active
|
||||||
|
* methods. Stack scanning is mandatory for the sweeper to make progress.
|
||||||
|
*/
|
||||||
|
void NMethodSweeper::do_stack_scanning() {
|
||||||
|
assert(!CodeCache_lock->owned_by_self(), "just checking");
|
||||||
|
if (wait_for_stack_scanning()) {
|
||||||
|
VM_MarkActiveNMethods op;
|
||||||
|
VMThread::execute(&op);
|
||||||
|
_should_sweep = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void NMethodSweeper::sweeper_loop() {
|
||||||
|
bool timeout;
|
||||||
|
while (true) {
|
||||||
|
{
|
||||||
|
ThreadBlockInVM tbivm(JavaThread::current());
|
||||||
|
MutexLockerEx waiter(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
const long wait_time = 60*60*24 * 1000;
|
||||||
|
timeout = CodeCache_lock->wait(Mutex::_no_safepoint_check_flag, wait_time);
|
||||||
|
}
|
||||||
|
if (!timeout) {
|
||||||
|
possibly_sweep();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wakes up the sweeper thread to possibly sweep.
|
||||||
|
*/
|
||||||
|
void NMethodSweeper::notify(int code_blob_type) {
|
||||||
|
// Makes sure that we do not invoke the sweeper too often during startup.
|
||||||
|
double start_threshold = 100.0 / (double)StartAggressiveSweepingAt;
|
||||||
|
double aggressive_sweep_threshold = MIN2(start_threshold, 1.1);
|
||||||
|
if (CodeCache::reverse_free_ratio(code_blob_type) >= aggressive_sweep_threshold) {
|
||||||
|
assert_locked_or_safepoint(CodeCache_lock);
|
||||||
|
CodeCache_lock->notify();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle a safepoint request
|
||||||
|
*/
|
||||||
|
void NMethodSweeper::handle_safepoint_request() {
|
||||||
|
if (SafepointSynchronize::is_synchronizing()) {
|
||||||
|
if (PrintMethodFlushing && Verbose) {
|
||||||
|
tty->print_cr("### Sweep at %d out of %d, yielding to safepoint", _seen, CodeCache::nof_nmethods());
|
||||||
|
}
|
||||||
|
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
|
||||||
|
JavaThread* thread = JavaThread::current();
|
||||||
|
ThreadBlockInVM tbivm(thread);
|
||||||
|
thread->java_suspend_self();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function invokes the sweeper if at least one of the three conditions is met:
|
* This function invokes the sweeper if at least one of the three conditions is met:
|
||||||
* (1) The code cache is getting full
|
* (1) The code cache is getting full
|
||||||
|
@ -239,11 +301,6 @@ void NMethodSweeper::mark_active_nmethods() {
|
||||||
*/
|
*/
|
||||||
void NMethodSweeper::possibly_sweep() {
|
void NMethodSweeper::possibly_sweep() {
|
||||||
assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
|
assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
|
||||||
// Only compiler threads are allowed to sweep
|
|
||||||
if (!MethodFlushing || !sweep_in_progress() || !Thread::current()->is_Compiler_thread()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If there was no state change while nmethod sweeping, 'should_sweep' will be false.
|
// If there was no state change while nmethod sweeping, 'should_sweep' will be false.
|
||||||
// This is one of the two places where should_sweep can be set to true. The general
|
// This is one of the two places where should_sweep can be set to true. The general
|
||||||
// idea is as follows: If there is enough free space in the code cache, there is no
|
// idea is as follows: If there is enough free space in the code cache, there is no
|
||||||
|
@ -280,27 +337,21 @@ void NMethodSweeper::possibly_sweep() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_should_sweep && _sweep_fractions_left > 0) {
|
// Force stack scanning if there is only 10% free space in the code cache.
|
||||||
// Only one thread at a time will sweep
|
// We force stack scanning only non-profiled code heap gets full, since critical
|
||||||
jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
|
// allocation go to the non-profiled heap and we must be make sure that there is
|
||||||
if (old != 0) {
|
// enough space.
|
||||||
return;
|
double free_percent = 1 / CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled) * 100;
|
||||||
|
if (free_percent <= StartAggressiveSweepingAt) {
|
||||||
|
do_stack_scanning();
|
||||||
}
|
}
|
||||||
#ifdef ASSERT
|
|
||||||
if (LogSweeper && _records == NULL) {
|
|
||||||
// Create the ring buffer for the logging code
|
|
||||||
_records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
|
|
||||||
memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (_sweep_fractions_left > 0) {
|
if (_should_sweep) {
|
||||||
|
init_sweeper_log();
|
||||||
sweep_code_cache();
|
sweep_code_cache();
|
||||||
_sweep_fractions_left--;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We are done with sweeping the code cache once.
|
// We are done with sweeping the code cache once.
|
||||||
if (_sweep_fractions_left == 0) {
|
|
||||||
_total_nof_code_cache_sweeps++;
|
_total_nof_code_cache_sweeps++;
|
||||||
_last_sweep = _time_counter;
|
_last_sweep = _time_counter;
|
||||||
// Reset flag; temporarily disables sweeper
|
// Reset flag; temporarily disables sweeper
|
||||||
|
@ -314,12 +365,9 @@ void NMethodSweeper::possibly_sweep() {
|
||||||
_bytes_changed = 0;
|
_bytes_changed = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Release work, because another compiler thread could continue.
|
|
||||||
OrderAccess::release_store((int*)&_sweep_started, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void NMethodSweeper::sweep_code_cache() {
|
void NMethodSweeper::sweep_code_cache() {
|
||||||
|
ResourceMark rm;
|
||||||
Ticks sweep_start_counter = Ticks::now();
|
Ticks sweep_start_counter = Ticks::now();
|
||||||
|
|
||||||
_flushed_count = 0;
|
_flushed_count = 0;
|
||||||
|
@ -327,25 +375,10 @@ void NMethodSweeper::sweep_code_cache() {
|
||||||
_marked_for_reclamation_count = 0;
|
_marked_for_reclamation_count = 0;
|
||||||
|
|
||||||
if (PrintMethodFlushing && Verbose) {
|
if (PrintMethodFlushing && Verbose) {
|
||||||
tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
|
tty->print_cr("### Sweep at %d out of %d", _seen, CodeCache::nof_nmethods());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!CompileBroker::should_compile_new_jobs()) {
|
|
||||||
// If we have turned off compilations we might as well do full sweeps
|
|
||||||
// in order to reach the clean state faster. Otherwise the sleeping compiler
|
|
||||||
// threads will slow down sweeping.
|
|
||||||
_sweep_fractions_left = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// We want to visit all nmethods after NmethodSweepFraction
|
|
||||||
// invocations so divide the remaining number of nmethods by the
|
|
||||||
// remaining number of invocations. This is only an estimate since
|
|
||||||
// the number of nmethods changes during the sweep so the final
|
|
||||||
// stage must iterate until it there are no more nmethods.
|
|
||||||
int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left;
|
|
||||||
int swept_count = 0;
|
int swept_count = 0;
|
||||||
|
|
||||||
|
|
||||||
assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
|
assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
|
||||||
assert(!CodeCache_lock->owned_by_self(), "just checking");
|
assert(!CodeCache_lock->owned_by_self(), "just checking");
|
||||||
|
|
||||||
|
@ -354,19 +387,9 @@ void NMethodSweeper::sweep_code_cache() {
|
||||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
|
||||||
// The last invocation iterates until there are no more nmethods
|
// The last invocation iterates until there are no more nmethods
|
||||||
while ((swept_count < todo || _sweep_fractions_left == 1) && !_current.end()) {
|
while (!_current.end()) {
|
||||||
swept_count++;
|
swept_count++;
|
||||||
if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
|
handle_safepoint_request();
|
||||||
if (PrintMethodFlushing && Verbose) {
|
|
||||||
tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
|
|
||||||
}
|
|
||||||
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
|
||||||
|
|
||||||
assert(Thread::current()->is_Java_thread(), "should be java thread");
|
|
||||||
JavaThread* thread = (JavaThread*)Thread::current();
|
|
||||||
ThreadBlockInVM tbivm(thread);
|
|
||||||
thread->java_suspend_self();
|
|
||||||
}
|
|
||||||
// Since we will give up the CodeCache_lock, always skip ahead
|
// Since we will give up the CodeCache_lock, always skip ahead
|
||||||
// to the next nmethod. Other blobs can be deleted by other
|
// to the next nmethod. Other blobs can be deleted by other
|
||||||
// threads but nmethods are only reclaimed by the sweeper.
|
// threads but nmethods are only reclaimed by the sweeper.
|
||||||
|
@ -382,7 +405,7 @@ void NMethodSweeper::sweep_code_cache() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(_sweep_fractions_left > 1 || _current.end(), "must have scanned the whole cache");
|
assert(_current.end(), "must have scanned the whole cache");
|
||||||
|
|
||||||
const Ticks sweep_end_counter = Ticks::now();
|
const Ticks sweep_end_counter = Ticks::now();
|
||||||
const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
|
const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
|
||||||
|
@ -397,7 +420,6 @@ void NMethodSweeper::sweep_code_cache() {
|
||||||
event.set_starttime(sweep_start_counter);
|
event.set_starttime(sweep_start_counter);
|
||||||
event.set_endtime(sweep_end_counter);
|
event.set_endtime(sweep_end_counter);
|
||||||
event.set_sweepIndex(_traversals);
|
event.set_sweepIndex(_traversals);
|
||||||
event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1);
|
|
||||||
event.set_sweptCount(swept_count);
|
event.set_sweptCount(swept_count);
|
||||||
event.set_flushedCount(_flushed_count);
|
event.set_flushedCount(_flushed_count);
|
||||||
event.set_markedCount(_marked_for_reclamation_count);
|
event.set_markedCount(_marked_for_reclamation_count);
|
||||||
|
@ -407,15 +429,12 @@ void NMethodSweeper::sweep_code_cache() {
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
if(PrintMethodFlushing) {
|
if(PrintMethodFlushing) {
|
||||||
tty->print_cr("### sweeper: sweep time(%d): "
|
tty->print_cr("### sweeper: sweep time(%d): ", (jlong)sweep_time.value());
|
||||||
INT64_FORMAT, _sweep_fractions_left, (jlong)sweep_time.value());
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (_sweep_fractions_left == 1) {
|
|
||||||
_peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
|
_peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
|
||||||
log_sweep("finished");
|
log_sweep("finished");
|
||||||
}
|
|
||||||
|
|
||||||
// Sweeper is the only case where memory is released, check here if it
|
// Sweeper is the only case where memory is released, check here if it
|
||||||
// is time to restart the compiler. Only checking if there is a certain
|
// is time to restart the compiler. Only checking if there is a certain
|
||||||
|
@ -459,10 +478,12 @@ void NMethodSweeper::possibly_enable_sweeper() {
|
||||||
|
|
||||||
class NMethodMarker: public StackObj {
|
class NMethodMarker: public StackObj {
|
||||||
private:
|
private:
|
||||||
CompilerThread* _thread;
|
CodeCacheSweeperThread* _thread;
|
||||||
public:
|
public:
|
||||||
NMethodMarker(nmethod* nm) {
|
NMethodMarker(nmethod* nm) {
|
||||||
_thread = CompilerThread::current();
|
JavaThread* current = JavaThread::current();
|
||||||
|
assert (current->is_Code_cache_sweeper_thread(), "Must be");
|
||||||
|
_thread = (CodeCacheSweeperThread*)JavaThread::current();
|
||||||
if (!nm->is_zombie() && !nm->is_unloaded()) {
|
if (!nm->is_zombie() && !nm->is_unloaded()) {
|
||||||
// Only expose live nmethods for scanning
|
// Only expose live nmethods for scanning
|
||||||
_thread->set_scanned_nmethod(nm);
|
_thread->set_scanned_nmethod(nm);
|
||||||
|
|
|
@ -49,9 +49,7 @@
|
||||||
// remove the nmethod, all inline caches (IC) that point to the the nmethod must be
|
// remove the nmethod, all inline caches (IC) that point to the the nmethod must be
|
||||||
// cleared. After that, the nmethod can be evicted from the code cache. Each nmethod's
|
// cleared. After that, the nmethod can be evicted from the code cache. Each nmethod's
|
||||||
// state change happens during separate sweeps. It may take at least 3 sweeps before an
|
// state change happens during separate sweeps. It may take at least 3 sweeps before an
|
||||||
// nmethod's space is freed. Sweeping is currently done by compiler threads between
|
// nmethod's space is freed.
|
||||||
// compilations or at least each 5 sec (NmethodSweepCheckInterval) when the code cache
|
|
||||||
// is full.
|
|
||||||
|
|
||||||
class NMethodSweeper : public AllStatic {
|
class NMethodSweeper : public AllStatic {
|
||||||
static long _traversals; // Stack scan count, also sweep ID.
|
static long _traversals; // Stack scan count, also sweep ID.
|
||||||
|
@ -64,7 +62,6 @@ class NMethodSweeper : public AllStatic {
|
||||||
static int _zombified_count; // Nof. nmethods made zombie in current sweep
|
static int _zombified_count; // Nof. nmethods made zombie in current sweep
|
||||||
static int _marked_for_reclamation_count; // Nof. nmethods marked for reclaim in current sweep
|
static int _marked_for_reclamation_count; // Nof. nmethods marked for reclaim in current sweep
|
||||||
|
|
||||||
static volatile int _sweep_fractions_left; // Nof. invocations left until we are completed with this pass
|
|
||||||
static volatile int _sweep_started; // Flag to control conc sweeper
|
static volatile int _sweep_started; // Flag to control conc sweeper
|
||||||
static volatile bool _should_sweep; // Indicates if we should invoke the sweeper
|
static volatile bool _should_sweep; // Indicates if we should invoke the sweeper
|
||||||
static volatile int _bytes_changed; // Counts the total nmethod size if the nmethod changed from:
|
static volatile int _bytes_changed; // Counts the total nmethod size if the nmethod changed from:
|
||||||
|
@ -85,8 +82,12 @@ class NMethodSweeper : public AllStatic {
|
||||||
static int process_nmethod(nmethod *nm);
|
static int process_nmethod(nmethod *nm);
|
||||||
static void release_nmethod(nmethod* nm);
|
static void release_nmethod(nmethod* nm);
|
||||||
|
|
||||||
static bool sweep_in_progress();
|
static void init_sweeper_log() NOT_DEBUG_RETURN;
|
||||||
|
static bool wait_for_stack_scanning();
|
||||||
static void sweep_code_cache();
|
static void sweep_code_cache();
|
||||||
|
static void handle_safepoint_request();
|
||||||
|
static void do_stack_scanning();
|
||||||
|
static void possibly_sweep();
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static long traversal_count() { return _traversals; }
|
static long traversal_count() { return _traversals; }
|
||||||
|
@ -106,7 +107,8 @@ class NMethodSweeper : public AllStatic {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void mark_active_nmethods(); // Invoked at the end of each safepoint
|
static void mark_active_nmethods(); // Invoked at the end of each safepoint
|
||||||
static void possibly_sweep(); // Compiler threads call this to sweep
|
static void sweeper_loop();
|
||||||
|
static void notify(int code_blob_type); // Possibly start the sweeper thread.
|
||||||
|
|
||||||
static int hotness_counter_reset_val();
|
static int hotness_counter_reset_val();
|
||||||
static void report_state_change(nmethod* nm);
|
static void report_state_change(nmethod* nm);
|
||||||
|
|
|
@ -66,6 +66,7 @@
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
#include "runtime/statSampler.hpp"
|
#include "runtime/statSampler.hpp"
|
||||||
#include "runtime/stubRoutines.hpp"
|
#include "runtime/stubRoutines.hpp"
|
||||||
|
#include "runtime/sweeper.hpp"
|
||||||
#include "runtime/task.hpp"
|
#include "runtime/task.hpp"
|
||||||
#include "runtime/thread.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
#include "runtime/threadCritical.hpp"
|
#include "runtime/threadCritical.hpp"
|
||||||
|
@ -1551,6 +1552,7 @@ void JavaThread::block_if_vm_exited() {
|
||||||
|
|
||||||
// Remove this ifdef when C1 is ported to the compiler interface.
|
// Remove this ifdef when C1 is ported to the compiler interface.
|
||||||
static void compiler_thread_entry(JavaThread* thread, TRAPS);
|
static void compiler_thread_entry(JavaThread* thread, TRAPS);
|
||||||
|
static void sweeper_thread_entry(JavaThread* thread, TRAPS);
|
||||||
|
|
||||||
JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
|
JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
|
||||||
Thread()
|
Thread()
|
||||||
|
@ -3170,6 +3172,10 @@ static void compiler_thread_entry(JavaThread* thread, TRAPS) {
|
||||||
CompileBroker::compiler_thread_loop();
|
CompileBroker::compiler_thread_loop();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void sweeper_thread_entry(JavaThread* thread, TRAPS) {
|
||||||
|
NMethodSweeper::sweeper_loop();
|
||||||
|
}
|
||||||
|
|
||||||
// Create a CompilerThread
|
// Create a CompilerThread
|
||||||
CompilerThread::CompilerThread(CompileQueue* queue,
|
CompilerThread::CompilerThread(CompileQueue* queue,
|
||||||
CompilerCounters* counters)
|
CompilerCounters* counters)
|
||||||
|
@ -3180,7 +3186,6 @@ CompilerThread::CompilerThread(CompileQueue* queue,
|
||||||
_queue = queue;
|
_queue = queue;
|
||||||
_counters = counters;
|
_counters = counters;
|
||||||
_buffer_blob = NULL;
|
_buffer_blob = NULL;
|
||||||
_scanned_nmethod = NULL;
|
|
||||||
_compiler = NULL;
|
_compiler = NULL;
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
|
@ -3188,7 +3193,12 @@ CompilerThread::CompilerThread(CompileQueue* queue,
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompilerThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
|
// Create sweeper thread
|
||||||
|
CodeCacheSweeperThread::CodeCacheSweeperThread()
|
||||||
|
: JavaThread(&sweeper_thread_entry) {
|
||||||
|
_scanned_nmethod = NULL;
|
||||||
|
}
|
||||||
|
void CodeCacheSweeperThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
|
||||||
JavaThread::oops_do(f, cld_f, cf);
|
JavaThread::oops_do(f, cld_f, cf);
|
||||||
if (_scanned_nmethod != NULL && cf != NULL) {
|
if (_scanned_nmethod != NULL && cf != NULL) {
|
||||||
// Safepoints can occur when the sweeper is scanning an nmethod so
|
// Safepoints can occur when the sweeper is scanning an nmethod so
|
||||||
|
|
|
@ -305,6 +305,7 @@ class Thread: public ThreadShadow {
|
||||||
virtual bool is_VM_thread() const { return false; }
|
virtual bool is_VM_thread() const { return false; }
|
||||||
virtual bool is_Java_thread() const { return false; }
|
virtual bool is_Java_thread() const { return false; }
|
||||||
virtual bool is_Compiler_thread() const { return false; }
|
virtual bool is_Compiler_thread() const { return false; }
|
||||||
|
virtual bool is_Code_cache_sweeper_thread() const { return false; }
|
||||||
virtual bool is_hidden_from_external_view() const { return false; }
|
virtual bool is_hidden_from_external_view() const { return false; }
|
||||||
virtual bool is_jvmti_agent_thread() const { return false; }
|
virtual bool is_jvmti_agent_thread() const { return false; }
|
||||||
// True iff the thread can perform GC operations at a safepoint.
|
// True iff the thread can perform GC operations at a safepoint.
|
||||||
|
@ -1746,6 +1747,24 @@ inline CompilerThread* JavaThread::as_CompilerThread() {
|
||||||
return (CompilerThread*)this;
|
return (CompilerThread*)this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Dedicated thread to sweep the code cache
|
||||||
|
class CodeCacheSweeperThread : public JavaThread {
|
||||||
|
nmethod* _scanned_nmethod; // nmethod being scanned by the sweeper
|
||||||
|
public:
|
||||||
|
CodeCacheSweeperThread();
|
||||||
|
// Track the nmethod currently being scanned by the sweeper
|
||||||
|
void set_scanned_nmethod(nmethod* nm) {
|
||||||
|
assert(_scanned_nmethod == NULL || nm == NULL, "should reset to NULL before writing a new value");
|
||||||
|
_scanned_nmethod = nm;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool is_Code_cache_sweeper_thread() const { return true; }
|
||||||
|
// GC support
|
||||||
|
// Apply "f->do_oop" to all root oops in "this".
|
||||||
|
// Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
|
||||||
|
void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
|
||||||
|
};
|
||||||
|
|
||||||
// A thread used for Compilation.
|
// A thread used for Compilation.
|
||||||
class CompilerThread : public JavaThread {
|
class CompilerThread : public JavaThread {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
|
@ -1758,7 +1777,6 @@ class CompilerThread : public JavaThread {
|
||||||
CompileQueue* _queue;
|
CompileQueue* _queue;
|
||||||
BufferBlob* _buffer_blob;
|
BufferBlob* _buffer_blob;
|
||||||
|
|
||||||
nmethod* _scanned_nmethod; // nmethod being scanned by the sweeper
|
|
||||||
AbstractCompiler* _compiler;
|
AbstractCompiler* _compiler;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
@ -1792,11 +1810,6 @@ class CompilerThread : public JavaThread {
|
||||||
_log = log;
|
_log = log;
|
||||||
}
|
}
|
||||||
|
|
||||||
// GC support
|
|
||||||
// Apply "f->do_oop" to all root oops in "this".
|
|
||||||
// Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
|
|
||||||
void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
private:
|
private:
|
||||||
IdealGraphPrinter *_ideal_graph_printer;
|
IdealGraphPrinter *_ideal_graph_printer;
|
||||||
|
@ -1808,12 +1821,6 @@ class CompilerThread : public JavaThread {
|
||||||
// Get/set the thread's current task
|
// Get/set the thread's current task
|
||||||
CompileTask* task() { return _task; }
|
CompileTask* task() { return _task; }
|
||||||
void set_task(CompileTask* task) { _task = task; }
|
void set_task(CompileTask* task) { _task = task; }
|
||||||
|
|
||||||
// Track the nmethod currently being scanned by the sweeper
|
|
||||||
void set_scanned_nmethod(nmethod* nm) {
|
|
||||||
assert(_scanned_nmethod == NULL || nm == NULL, "should reset to NULL before writing a new value");
|
|
||||||
_scanned_nmethod = nm;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
inline CompilerThread* CompilerThread::current() {
|
inline CompilerThread* CompilerThread::current() {
|
||||||
|
|
|
@ -111,6 +111,9 @@ void VM_Deoptimize::doit() {
|
||||||
CodeCache::make_marked_nmethods_zombies();
|
CodeCache::make_marked_nmethods_zombies();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void VM_MarkActiveNMethods::doit() {
|
||||||
|
NMethodSweeper::mark_active_nmethods();
|
||||||
|
}
|
||||||
|
|
||||||
VM_DeoptimizeFrame::VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id) {
|
VM_DeoptimizeFrame::VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id) {
|
||||||
_thread = thread;
|
_thread = thread;
|
||||||
|
|
|
@ -100,6 +100,7 @@
|
||||||
template(RotateGCLog) \
|
template(RotateGCLog) \
|
||||||
template(WhiteBoxOperation) \
|
template(WhiteBoxOperation) \
|
||||||
template(ClassLoaderStatsOperation) \
|
template(ClassLoaderStatsOperation) \
|
||||||
|
template(MarkActiveNMethods) \
|
||||||
template(PrintCompileQueue) \
|
template(PrintCompileQueue) \
|
||||||
template(PrintCodeList) \
|
template(PrintCodeList) \
|
||||||
template(PrintCodeCache) \
|
template(PrintCodeCache) \
|
||||||
|
@ -252,6 +253,13 @@ class VM_Deoptimize: public VM_Operation {
|
||||||
bool allow_nested_vm_operations() const { return true; }
|
bool allow_nested_vm_operations() const { return true; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class VM_MarkActiveNMethods: public VM_Operation {
|
||||||
|
public:
|
||||||
|
VM_MarkActiveNMethods() {}
|
||||||
|
VMOp_Type type() const { return VMOp_MarkActiveNMethods; }
|
||||||
|
void doit();
|
||||||
|
bool allow_nested_vm_operations() const { return true; }
|
||||||
|
};
|
||||||
|
|
||||||
// Deopt helper that can deoptimize frames in threads other than the
|
// Deopt helper that can deoptimize frames in threads other than the
|
||||||
// current thread. Only used through Deoptimization::deoptimize_frame.
|
// current thread. Only used through Deoptimization::deoptimize_frame.
|
||||||
|
|
|
@ -383,7 +383,6 @@ Declares a structure type that can be used in other events.
|
||||||
<event id="SweepCodeCache" path="vm/code_sweeper/sweep" label="Sweep Code Cache"
|
<event id="SweepCodeCache" path="vm/code_sweeper/sweep" label="Sweep Code Cache"
|
||||||
has_thread="true" is_requestable="false" is_constant="false">
|
has_thread="true" is_requestable="false" is_constant="false">
|
||||||
<value type="INTEGER" field="sweepIndex" label="Sweep Index" relation="SWEEP_ID"/>
|
<value type="INTEGER" field="sweepIndex" label="Sweep Index" relation="SWEEP_ID"/>
|
||||||
<value type="USHORT" field="sweepFractionIndex" label="Fraction Index"/>
|
|
||||||
<value type="UINT" field="sweptCount" label="Methods Swept"/>
|
<value type="UINT" field="sweptCount" label="Methods Swept"/>
|
||||||
<value type="UINT" field="flushedCount" label="Methods Flushed"/>
|
<value type="UINT" field="flushedCount" label="Methods Flushed"/>
|
||||||
<value type="UINT" field="markedCount" label="Methods Reclaimed"/>
|
<value type="UINT" field="markedCount" label="Methods Reclaimed"/>
|
||||||
|
|
|
@ -27,10 +27,20 @@
|
||||||
* @summary Test ensures that there is no crash if there is not enough ReservedCodeacacheSize
|
* @summary Test ensures that there is no crash if there is not enough ReservedCodeacacheSize
|
||||||
* to initialize all compiler threads. The option -Xcomp gives the VM more time to
|
* to initialize all compiler threads. The option -Xcomp gives the VM more time to
|
||||||
* to trigger the old bug.
|
* to trigger the old bug.
|
||||||
* @run main/othervm -XX:ReservedCodeCacheSize=3m -XX:CICompilerCount=64 -Xcomp SmallCodeCacheStartup
|
* @library /testlibrary
|
||||||
*/
|
*/
|
||||||
|
import com.oracle.java.testlibrary.*;
|
||||||
|
|
||||||
public class SmallCodeCacheStartup {
|
public class SmallCodeCacheStartup {
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
|
try {
|
||||||
|
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=3m",
|
||||||
|
"-XX:CICompilerCount=64",
|
||||||
|
"-Xcomp",
|
||||||
|
"SmallCodeCacheStartup");
|
||||||
|
pb.start();
|
||||||
|
} catch (VirtualMachineError e) {}
|
||||||
|
|
||||||
System.out.println("TEST PASSED");
|
System.out.println("TEST PASSED");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -135,7 +135,6 @@ public class TestHumongousCodeCacheRoots {
|
||||||
"-XX:+UnlockDiagnosticVMOptions",
|
"-XX:+UnlockDiagnosticVMOptions",
|
||||||
"-XX:InitiatingHeapOccupancyPercent=1", // strong code root marking
|
"-XX:InitiatingHeapOccupancyPercent=1", // strong code root marking
|
||||||
"-XX:+G1VerifyHeapRegionCodeRoots", "-XX:+VerifyAfterGC", // make sure that verification is run
|
"-XX:+G1VerifyHeapRegionCodeRoots", "-XX:+VerifyAfterGC", // make sure that verification is run
|
||||||
"-XX:NmethodSweepFraction=1", "-XX:NmethodSweepCheckInterval=1", // make the code cache sweep more predictable
|
|
||||||
};
|
};
|
||||||
runTest("-client", baseArguments);
|
runTest("-client", baseArguments);
|
||||||
runTest("-server", baseArguments);
|
runTest("-server", baseArguments);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue