mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-23 12:34:32 +02:00
8006952: Slow VM due to excessive code cache freelist iteration
Remove continous free block requirement Reviewed-by: kvn
This commit is contained in:
parent
33e3f6b59b
commit
92ef5fe748
8 changed files with 123 additions and 154 deletions
|
@ -348,14 +348,14 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
|
|||
|
||||
|
||||
void* RuntimeStub::operator new(size_t s, unsigned size) {
|
||||
void* p = CodeCache::allocate(size);
|
||||
void* p = CodeCache::allocate(size, true);
|
||||
if (!p) fatal("Initial size of CodeCache is too small");
|
||||
return p;
|
||||
}
|
||||
|
||||
// operator new shared by all singletons:
|
||||
void* SingletonBlob::operator new(size_t s, unsigned size) {
|
||||
void* p = CodeCache::allocate(size);
|
||||
void* p = CodeCache::allocate(size, true);
|
||||
if (!p) fatal("Initial size of CodeCache is too small");
|
||||
return p;
|
||||
}
|
||||
|
|
|
@ -172,7 +172,7 @@ nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
|
|||
|
||||
static size_t maxCodeCacheUsed = 0;
|
||||
|
||||
CodeBlob* CodeCache::allocate(int size) {
|
||||
CodeBlob* CodeCache::allocate(int size, bool is_critical) {
|
||||
// Do not seize the CodeCache lock here--if the caller has not
|
||||
// already done so, we are going to lose bigtime, since the code
|
||||
// cache will contain a garbage CodeBlob until the caller can
|
||||
|
@ -183,7 +183,7 @@ CodeBlob* CodeCache::allocate(int size) {
|
|||
CodeBlob* cb = NULL;
|
||||
_number_of_blobs++;
|
||||
while (true) {
|
||||
cb = (CodeBlob*)_heap->allocate(size);
|
||||
cb = (CodeBlob*)_heap->allocate(size, is_critical);
|
||||
if (cb != NULL) break;
|
||||
if (!_heap->expand_by(CodeCacheExpansionSize)) {
|
||||
// Expansion failed
|
||||
|
@ -192,8 +192,8 @@ CodeBlob* CodeCache::allocate(int size) {
|
|||
if (PrintCodeCacheExtension) {
|
||||
ResourceMark rm;
|
||||
tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
|
||||
(intptr_t)_heap->begin(), (intptr_t)_heap->end(),
|
||||
(address)_heap->end() - (address)_heap->begin());
|
||||
(intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),
|
||||
(address)_heap->high() - (address)_heap->low_boundary());
|
||||
}
|
||||
}
|
||||
maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
|
||||
|
@ -608,13 +608,13 @@ void CodeCache::verify_oops() {
|
|||
|
||||
address CodeCache::first_address() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
return (address)_heap->begin();
|
||||
return (address)_heap->low_boundary();
|
||||
}
|
||||
|
||||
|
||||
address CodeCache::last_address() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
return (address)_heap->end();
|
||||
return (address)_heap->high();
|
||||
}
|
||||
|
||||
|
||||
|
@ -996,10 +996,9 @@ void CodeCache::print() {
|
|||
void CodeCache::print_summary(outputStream* st, bool detailed) {
|
||||
size_t total = (_heap->high_boundary() - _heap->low_boundary());
|
||||
st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
|
||||
"Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT
|
||||
"Kb max_free_chunk=" SIZE_FORMAT "Kb",
|
||||
"Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
|
||||
total/K, (total - unallocated_capacity())/K,
|
||||
maxCodeCacheUsed/K, unallocated_capacity()/K, largest_free_block()/K);
|
||||
maxCodeCacheUsed/K, unallocated_capacity()/K);
|
||||
|
||||
if (detailed) {
|
||||
st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
|
||||
|
@ -1018,19 +1017,8 @@ void CodeCache::print_summary(outputStream* st, bool detailed) {
|
|||
|
||||
void CodeCache::log_state(outputStream* st) {
|
||||
st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
|
||||
" adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'"
|
||||
" largest_free_block='" SIZE_FORMAT "'",
|
||||
" adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
|
||||
nof_blobs(), nof_nmethods(), nof_adapters(),
|
||||
unallocated_capacity(), largest_free_block());
|
||||
unallocated_capacity());
|
||||
}
|
||||
|
||||
size_t CodeCache::largest_free_block() {
|
||||
// This is called both with and without CodeCache_lock held so
|
||||
// handle both cases.
|
||||
if (CodeCache_lock->owned_by_self()) {
|
||||
return _heap->largest_free_block();
|
||||
} else {
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
return _heap->largest_free_block();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ class CodeCache : AllStatic {
|
|||
static void initialize();
|
||||
|
||||
// Allocation/administration
|
||||
static CodeBlob* allocate(int size); // allocates a new CodeBlob
|
||||
static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob
|
||||
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
|
||||
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
|
||||
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
|
||||
|
@ -156,19 +156,13 @@ class CodeCache : AllStatic {
|
|||
static address low_bound() { return (address) _heap->low_boundary(); }
|
||||
static address high_bound() { return (address) _heap->high_boundary(); }
|
||||
|
||||
static bool has_space(int size) {
|
||||
// Always leave some room in the CodeCache for I2C/C2I adapters
|
||||
return largest_free_block() > (CodeCacheMinimumFreeSpace + size);
|
||||
}
|
||||
|
||||
// Profiling
|
||||
static address first_address(); // first address used for CodeBlobs
|
||||
static address last_address(); // last address used for CodeBlobs
|
||||
static size_t capacity() { return _heap->capacity(); }
|
||||
static size_t max_capacity() { return _heap->max_capacity(); }
|
||||
static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
|
||||
static size_t largest_free_block();
|
||||
static bool needs_flushing() { return largest_free_block() < CodeCacheFlushingMinimumFreeSpace; }
|
||||
static bool needs_flushing() { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
|
||||
|
||||
static bool needs_cache_clean() { return _needs_cache_clean; }
|
||||
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
||||
|
|
|
@ -501,18 +501,17 @@ nmethod* nmethod::new_native_nmethod(methodHandle method,
|
|||
{
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
|
||||
if (CodeCache::has_space(native_nmethod_size)) {
|
||||
CodeOffsets offsets;
|
||||
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
|
||||
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
|
||||
nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,
|
||||
compile_id, &offsets,
|
||||
code_buffer, frame_size,
|
||||
basic_lock_owner_sp_offset,
|
||||
basic_lock_sp_offset, oop_maps);
|
||||
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm));
|
||||
if (PrintAssembly && nm != NULL)
|
||||
Disassembler::decode(nm);
|
||||
CodeOffsets offsets;
|
||||
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
|
||||
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
|
||||
nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,
|
||||
compile_id, &offsets,
|
||||
code_buffer, frame_size,
|
||||
basic_lock_owner_sp_offset,
|
||||
basic_lock_sp_offset, oop_maps);
|
||||
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm));
|
||||
if (PrintAssembly && nm != NULL) {
|
||||
Disassembler::decode(nm);
|
||||
}
|
||||
}
|
||||
// verify nmethod
|
||||
|
@ -538,18 +537,17 @@ nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
|
|||
{
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
|
||||
if (CodeCache::has_space(nmethod_size)) {
|
||||
CodeOffsets offsets;
|
||||
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
|
||||
offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
|
||||
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
|
||||
CodeOffsets offsets;
|
||||
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
|
||||
offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
|
||||
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
|
||||
|
||||
nm = new (nmethod_size) nmethod(method(), nmethod_size,
|
||||
&offsets, code_buffer, frame_size);
|
||||
nm = new (nmethod_size) nmethod(method(), nmethod_size,
|
||||
&offsets, code_buffer, frame_size);
|
||||
|
||||
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
|
||||
if (PrintAssembly && nm != NULL)
|
||||
Disassembler::decode(nm);
|
||||
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
|
||||
if (PrintAssembly && nm != NULL) {
|
||||
Disassembler::decode(nm);
|
||||
}
|
||||
}
|
||||
// verify nmethod
|
||||
|
@ -591,16 +589,16 @@ nmethod* nmethod::new_nmethod(methodHandle method,
|
|||
+ round_to(handler_table->size_in_bytes(), oopSize)
|
||||
+ round_to(nul_chk_table->size_in_bytes(), oopSize)
|
||||
+ round_to(debug_info->data_size() , oopSize);
|
||||
if (CodeCache::has_space(nmethod_size)) {
|
||||
nm = new (nmethod_size)
|
||||
nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
|
||||
orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
|
||||
oop_maps,
|
||||
handler_table,
|
||||
nul_chk_table,
|
||||
compiler,
|
||||
comp_level);
|
||||
}
|
||||
|
||||
nm = new (nmethod_size)
|
||||
nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
|
||||
orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
|
||||
oop_maps,
|
||||
handler_table,
|
||||
nul_chk_table,
|
||||
compiler,
|
||||
comp_level);
|
||||
|
||||
if (nm != NULL) {
|
||||
// To make dependency checking during class loading fast, record
|
||||
// the nmethod dependencies in the classes it is dependent on.
|
||||
|
@ -612,15 +610,18 @@ nmethod* nmethod::new_nmethod(methodHandle method,
|
|||
// classes the slow way is too slow.
|
||||
for (Dependencies::DepStream deps(nm); deps.next(); ) {
|
||||
Klass* klass = deps.context_type();
|
||||
if (klass == NULL) continue; // ignore things like evol_method
|
||||
if (klass == NULL) {
|
||||
continue; // ignore things like evol_method
|
||||
}
|
||||
|
||||
// record this nmethod as dependent on this klass
|
||||
InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
|
||||
}
|
||||
}
|
||||
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
|
||||
if (PrintAssembly && nm != NULL)
|
||||
if (PrintAssembly && nm != NULL) {
|
||||
Disassembler::decode(nm);
|
||||
}
|
||||
}
|
||||
|
||||
// verify nmethod
|
||||
|
@ -798,13 +799,11 @@ nmethod::nmethod(
|
|||
}
|
||||
#endif // def HAVE_DTRACE_H
|
||||
|
||||
void* nmethod::operator new(size_t size, int nmethod_size) {
|
||||
void* alloc = CodeCache::allocate(nmethod_size);
|
||||
guarantee(alloc != NULL, "CodeCache should have enough space");
|
||||
return alloc;
|
||||
void* nmethod::operator new(size_t size, int nmethod_size) throw () {
|
||||
// Not critical, may return null if there is too little continuous memory
|
||||
return CodeCache::allocate(nmethod_size);
|
||||
}
|
||||
|
||||
|
||||
nmethod::nmethod(
|
||||
Method* method,
|
||||
int nmethod_size,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue