mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-25 13:54:38 +02:00
8006952: Slow VM due to excessive code cache freelist iteration
Remove continous free block requirement Reviewed-by: kvn
This commit is contained in:
parent
33e3f6b59b
commit
92ef5fe748
8 changed files with 123 additions and 154 deletions
|
@ -348,14 +348,14 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
|
||||||
|
|
||||||
|
|
||||||
void* RuntimeStub::operator new(size_t s, unsigned size) {
|
void* RuntimeStub::operator new(size_t s, unsigned size) {
|
||||||
void* p = CodeCache::allocate(size);
|
void* p = CodeCache::allocate(size, true);
|
||||||
if (!p) fatal("Initial size of CodeCache is too small");
|
if (!p) fatal("Initial size of CodeCache is too small");
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
// operator new shared by all singletons:
|
// operator new shared by all singletons:
|
||||||
void* SingletonBlob::operator new(size_t s, unsigned size) {
|
void* SingletonBlob::operator new(size_t s, unsigned size) {
|
||||||
void* p = CodeCache::allocate(size);
|
void* p = CodeCache::allocate(size, true);
|
||||||
if (!p) fatal("Initial size of CodeCache is too small");
|
if (!p) fatal("Initial size of CodeCache is too small");
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
|
@ -172,7 +172,7 @@ nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
|
||||||
|
|
||||||
static size_t maxCodeCacheUsed = 0;
|
static size_t maxCodeCacheUsed = 0;
|
||||||
|
|
||||||
CodeBlob* CodeCache::allocate(int size) {
|
CodeBlob* CodeCache::allocate(int size, bool is_critical) {
|
||||||
// Do not seize the CodeCache lock here--if the caller has not
|
// Do not seize the CodeCache lock here--if the caller has not
|
||||||
// already done so, we are going to lose bigtime, since the code
|
// already done so, we are going to lose bigtime, since the code
|
||||||
// cache will contain a garbage CodeBlob until the caller can
|
// cache will contain a garbage CodeBlob until the caller can
|
||||||
|
@ -183,7 +183,7 @@ CodeBlob* CodeCache::allocate(int size) {
|
||||||
CodeBlob* cb = NULL;
|
CodeBlob* cb = NULL;
|
||||||
_number_of_blobs++;
|
_number_of_blobs++;
|
||||||
while (true) {
|
while (true) {
|
||||||
cb = (CodeBlob*)_heap->allocate(size);
|
cb = (CodeBlob*)_heap->allocate(size, is_critical);
|
||||||
if (cb != NULL) break;
|
if (cb != NULL) break;
|
||||||
if (!_heap->expand_by(CodeCacheExpansionSize)) {
|
if (!_heap->expand_by(CodeCacheExpansionSize)) {
|
||||||
// Expansion failed
|
// Expansion failed
|
||||||
|
@ -192,8 +192,8 @@ CodeBlob* CodeCache::allocate(int size) {
|
||||||
if (PrintCodeCacheExtension) {
|
if (PrintCodeCacheExtension) {
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
|
tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
|
||||||
(intptr_t)_heap->begin(), (intptr_t)_heap->end(),
|
(intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),
|
||||||
(address)_heap->end() - (address)_heap->begin());
|
(address)_heap->high() - (address)_heap->low_boundary());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
|
maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
|
||||||
|
@ -608,13 +608,13 @@ void CodeCache::verify_oops() {
|
||||||
|
|
||||||
address CodeCache::first_address() {
|
address CodeCache::first_address() {
|
||||||
assert_locked_or_safepoint(CodeCache_lock);
|
assert_locked_or_safepoint(CodeCache_lock);
|
||||||
return (address)_heap->begin();
|
return (address)_heap->low_boundary();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
address CodeCache::last_address() {
|
address CodeCache::last_address() {
|
||||||
assert_locked_or_safepoint(CodeCache_lock);
|
assert_locked_or_safepoint(CodeCache_lock);
|
||||||
return (address)_heap->end();
|
return (address)_heap->high();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -996,10 +996,9 @@ void CodeCache::print() {
|
||||||
void CodeCache::print_summary(outputStream* st, bool detailed) {
|
void CodeCache::print_summary(outputStream* st, bool detailed) {
|
||||||
size_t total = (_heap->high_boundary() - _heap->low_boundary());
|
size_t total = (_heap->high_boundary() - _heap->low_boundary());
|
||||||
st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
|
st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
|
||||||
"Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT
|
"Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
|
||||||
"Kb max_free_chunk=" SIZE_FORMAT "Kb",
|
|
||||||
total/K, (total - unallocated_capacity())/K,
|
total/K, (total - unallocated_capacity())/K,
|
||||||
maxCodeCacheUsed/K, unallocated_capacity()/K, largest_free_block()/K);
|
maxCodeCacheUsed/K, unallocated_capacity()/K);
|
||||||
|
|
||||||
if (detailed) {
|
if (detailed) {
|
||||||
st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
|
st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
|
||||||
|
@ -1018,19 +1017,8 @@ void CodeCache::print_summary(outputStream* st, bool detailed) {
|
||||||
|
|
||||||
void CodeCache::log_state(outputStream* st) {
|
void CodeCache::log_state(outputStream* st) {
|
||||||
st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
|
st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
|
||||||
" adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'"
|
" adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
|
||||||
" largest_free_block='" SIZE_FORMAT "'",
|
|
||||||
nof_blobs(), nof_nmethods(), nof_adapters(),
|
nof_blobs(), nof_nmethods(), nof_adapters(),
|
||||||
unallocated_capacity(), largest_free_block());
|
unallocated_capacity());
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t CodeCache::largest_free_block() {
|
|
||||||
// This is called both with and without CodeCache_lock held so
|
|
||||||
// handle both cases.
|
|
||||||
if (CodeCache_lock->owned_by_self()) {
|
|
||||||
return _heap->largest_free_block();
|
|
||||||
} else {
|
|
||||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
|
||||||
return _heap->largest_free_block();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -70,7 +70,7 @@ class CodeCache : AllStatic {
|
||||||
static void initialize();
|
static void initialize();
|
||||||
|
|
||||||
// Allocation/administration
|
// Allocation/administration
|
||||||
static CodeBlob* allocate(int size); // allocates a new CodeBlob
|
static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob
|
||||||
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
|
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
|
||||||
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
|
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
|
||||||
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
|
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
|
||||||
|
@ -156,19 +156,13 @@ class CodeCache : AllStatic {
|
||||||
static address low_bound() { return (address) _heap->low_boundary(); }
|
static address low_bound() { return (address) _heap->low_boundary(); }
|
||||||
static address high_bound() { return (address) _heap->high_boundary(); }
|
static address high_bound() { return (address) _heap->high_boundary(); }
|
||||||
|
|
||||||
static bool has_space(int size) {
|
|
||||||
// Always leave some room in the CodeCache for I2C/C2I adapters
|
|
||||||
return largest_free_block() > (CodeCacheMinimumFreeSpace + size);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Profiling
|
// Profiling
|
||||||
static address first_address(); // first address used for CodeBlobs
|
static address first_address(); // first address used for CodeBlobs
|
||||||
static address last_address(); // last address used for CodeBlobs
|
static address last_address(); // last address used for CodeBlobs
|
||||||
static size_t capacity() { return _heap->capacity(); }
|
static size_t capacity() { return _heap->capacity(); }
|
||||||
static size_t max_capacity() { return _heap->max_capacity(); }
|
static size_t max_capacity() { return _heap->max_capacity(); }
|
||||||
static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
|
static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
|
||||||
static size_t largest_free_block();
|
static bool needs_flushing() { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
|
||||||
static bool needs_flushing() { return largest_free_block() < CodeCacheFlushingMinimumFreeSpace; }
|
|
||||||
|
|
||||||
static bool needs_cache_clean() { return _needs_cache_clean; }
|
static bool needs_cache_clean() { return _needs_cache_clean; }
|
||||||
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
||||||
|
|
|
@ -501,7 +501,6 @@ nmethod* nmethod::new_native_nmethod(methodHandle method,
|
||||||
{
|
{
|
||||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
|
int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
|
||||||
if (CodeCache::has_space(native_nmethod_size)) {
|
|
||||||
CodeOffsets offsets;
|
CodeOffsets offsets;
|
||||||
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
|
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
|
||||||
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
|
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
|
||||||
|
@ -511,7 +510,7 @@ nmethod* nmethod::new_native_nmethod(methodHandle method,
|
||||||
basic_lock_owner_sp_offset,
|
basic_lock_owner_sp_offset,
|
||||||
basic_lock_sp_offset, oop_maps);
|
basic_lock_sp_offset, oop_maps);
|
||||||
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm));
|
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm));
|
||||||
if (PrintAssembly && nm != NULL)
|
if (PrintAssembly && nm != NULL) {
|
||||||
Disassembler::decode(nm);
|
Disassembler::decode(nm);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -538,7 +537,6 @@ nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
|
||||||
{
|
{
|
||||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
|
int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
|
||||||
if (CodeCache::has_space(nmethod_size)) {
|
|
||||||
CodeOffsets offsets;
|
CodeOffsets offsets;
|
||||||
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
|
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
|
||||||
offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
|
offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
|
||||||
|
@ -548,7 +546,7 @@ nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
|
||||||
&offsets, code_buffer, frame_size);
|
&offsets, code_buffer, frame_size);
|
||||||
|
|
||||||
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
|
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
|
||||||
if (PrintAssembly && nm != NULL)
|
if (PrintAssembly && nm != NULL) {
|
||||||
Disassembler::decode(nm);
|
Disassembler::decode(nm);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -591,7 +589,7 @@ nmethod* nmethod::new_nmethod(methodHandle method,
|
||||||
+ round_to(handler_table->size_in_bytes(), oopSize)
|
+ round_to(handler_table->size_in_bytes(), oopSize)
|
||||||
+ round_to(nul_chk_table->size_in_bytes(), oopSize)
|
+ round_to(nul_chk_table->size_in_bytes(), oopSize)
|
||||||
+ round_to(debug_info->data_size() , oopSize);
|
+ round_to(debug_info->data_size() , oopSize);
|
||||||
if (CodeCache::has_space(nmethod_size)) {
|
|
||||||
nm = new (nmethod_size)
|
nm = new (nmethod_size)
|
||||||
nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
|
nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
|
||||||
orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
|
orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
|
||||||
|
@ -600,7 +598,7 @@ nmethod* nmethod::new_nmethod(methodHandle method,
|
||||||
nul_chk_table,
|
nul_chk_table,
|
||||||
compiler,
|
compiler,
|
||||||
comp_level);
|
comp_level);
|
||||||
}
|
|
||||||
if (nm != NULL) {
|
if (nm != NULL) {
|
||||||
// To make dependency checking during class loading fast, record
|
// To make dependency checking during class loading fast, record
|
||||||
// the nmethod dependencies in the classes it is dependent on.
|
// the nmethod dependencies in the classes it is dependent on.
|
||||||
|
@ -612,16 +610,19 @@ nmethod* nmethod::new_nmethod(methodHandle method,
|
||||||
// classes the slow way is too slow.
|
// classes the slow way is too slow.
|
||||||
for (Dependencies::DepStream deps(nm); deps.next(); ) {
|
for (Dependencies::DepStream deps(nm); deps.next(); ) {
|
||||||
Klass* klass = deps.context_type();
|
Klass* klass = deps.context_type();
|
||||||
if (klass == NULL) continue; // ignore things like evol_method
|
if (klass == NULL) {
|
||||||
|
continue; // ignore things like evol_method
|
||||||
|
}
|
||||||
|
|
||||||
// record this nmethod as dependent on this klass
|
// record this nmethod as dependent on this klass
|
||||||
InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
|
InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
|
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
|
||||||
if (PrintAssembly && nm != NULL)
|
if (PrintAssembly && nm != NULL) {
|
||||||
Disassembler::decode(nm);
|
Disassembler::decode(nm);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// verify nmethod
|
// verify nmethod
|
||||||
debug_only(if (nm) nm->verify();) // might block
|
debug_only(if (nm) nm->verify();) // might block
|
||||||
|
@ -798,13 +799,11 @@ nmethod::nmethod(
|
||||||
}
|
}
|
||||||
#endif // def HAVE_DTRACE_H
|
#endif // def HAVE_DTRACE_H
|
||||||
|
|
||||||
void* nmethod::operator new(size_t size, int nmethod_size) {
|
void* nmethod::operator new(size_t size, int nmethod_size) throw () {
|
||||||
void* alloc = CodeCache::allocate(nmethod_size);
|
// Not critical, may return null if there is too little continuous memory
|
||||||
guarantee(alloc != NULL, "CodeCache should have enough space");
|
return CodeCache::allocate(nmethod_size);
|
||||||
return alloc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
nmethod::nmethod(
|
nmethod::nmethod(
|
||||||
Method* method,
|
Method* method,
|
||||||
int nmethod_size,
|
int nmethod_size,
|
||||||
|
|
|
@ -1581,7 +1581,7 @@ void CompileBroker::compiler_thread_loop() {
|
||||||
// We need this HandleMark to avoid leaking VM handles.
|
// We need this HandleMark to avoid leaking VM handles.
|
||||||
HandleMark hm(thread);
|
HandleMark hm(thread);
|
||||||
|
|
||||||
if (CodeCache::largest_free_block() < CodeCacheMinimumFreeSpace) {
|
if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
|
||||||
// the code cache is really full
|
// the code cache is really full
|
||||||
handle_full_code_cache();
|
handle_full_code_cache();
|
||||||
} else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) {
|
} else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) {
|
||||||
|
|
|
@ -42,7 +42,7 @@ CodeHeap::CodeHeap() {
|
||||||
_log2_segment_size = 0;
|
_log2_segment_size = 0;
|
||||||
_next_segment = 0;
|
_next_segment = 0;
|
||||||
_freelist = NULL;
|
_freelist = NULL;
|
||||||
_free_segments = 0;
|
_freelist_segments = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -115,8 +115,8 @@ bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
|
||||||
}
|
}
|
||||||
|
|
||||||
on_code_mapping(_memory.low(), _memory.committed_size());
|
on_code_mapping(_memory.low(), _memory.committed_size());
|
||||||
_number_of_committed_segments = number_of_segments(_memory.committed_size());
|
_number_of_committed_segments = size_to_segments(_memory.committed_size());
|
||||||
_number_of_reserved_segments = number_of_segments(_memory.reserved_size());
|
_number_of_reserved_segments = size_to_segments(_memory.reserved_size());
|
||||||
assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
|
assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
|
||||||
|
|
||||||
// reserve space for _segmap
|
// reserve space for _segmap
|
||||||
|
@ -149,8 +149,8 @@ bool CodeHeap::expand_by(size_t size) {
|
||||||
if (!_memory.expand_by(dm)) return false;
|
if (!_memory.expand_by(dm)) return false;
|
||||||
on_code_mapping(base, dm);
|
on_code_mapping(base, dm);
|
||||||
size_t i = _number_of_committed_segments;
|
size_t i = _number_of_committed_segments;
|
||||||
_number_of_committed_segments = number_of_segments(_memory.committed_size());
|
_number_of_committed_segments = size_to_segments(_memory.committed_size());
|
||||||
assert(_number_of_reserved_segments == number_of_segments(_memory.reserved_size()), "number of reserved segments should not change");
|
assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change");
|
||||||
assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
|
assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
|
||||||
// expand _segmap space
|
// expand _segmap space
|
||||||
size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();
|
size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();
|
||||||
|
@ -176,33 +176,44 @@ void CodeHeap::clear() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void* CodeHeap::allocate(size_t size) {
|
void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
|
||||||
size_t length = number_of_segments(size + sizeof(HeapBlock));
|
size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock));
|
||||||
assert(length *_segment_size >= sizeof(FreeBlock), "not enough room for FreeList");
|
assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
|
||||||
|
|
||||||
// First check if we can satify request from freelist
|
// First check if we can satify request from freelist
|
||||||
debug_only(verify());
|
debug_only(verify());
|
||||||
HeapBlock* block = search_freelist(length);
|
HeapBlock* block = search_freelist(number_of_segments, is_critical);
|
||||||
debug_only(if (VerifyCodeCacheOften) verify());
|
debug_only(if (VerifyCodeCacheOften) verify());
|
||||||
if (block != NULL) {
|
if (block != NULL) {
|
||||||
assert(block->length() >= length && block->length() < length + CodeCacheMinBlockLength, "sanity check");
|
assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");
|
||||||
assert(!block->free(), "must be marked free");
|
assert(!block->free(), "must be marked free");
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
memset((void *)block->allocated_space(), badCodeHeapNewVal, size);
|
memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size);
|
||||||
#endif
|
#endif
|
||||||
return block->allocated_space();
|
return block->allocated_space();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (length < CodeCacheMinBlockLength) {
|
// Ensure minimum size for allocation to the heap.
|
||||||
length = CodeCacheMinBlockLength;
|
if (number_of_segments < CodeCacheMinBlockLength) {
|
||||||
|
number_of_segments = CodeCacheMinBlockLength;
|
||||||
}
|
}
|
||||||
if (_next_segment + length <= _number_of_committed_segments) {
|
|
||||||
mark_segmap_as_used(_next_segment, _next_segment + length);
|
if (!is_critical) {
|
||||||
|
// Make sure the allocation fits in the unallocated heap without using
|
||||||
|
// the CodeCacheMimimumFreeSpace that is reserved for critical allocations.
|
||||||
|
if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) {
|
||||||
|
// Fail allocation
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (_next_segment + number_of_segments <= _number_of_committed_segments) {
|
||||||
|
mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
|
||||||
HeapBlock* b = block_at(_next_segment);
|
HeapBlock* b = block_at(_next_segment);
|
||||||
b->initialize(length);
|
b->initialize(number_of_segments);
|
||||||
_next_segment += length;
|
_next_segment += number_of_segments;
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
memset((void *)b->allocated_space(), badCodeHeapNewVal, size);
|
memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size);
|
||||||
#endif
|
#endif
|
||||||
return b->allocated_space();
|
return b->allocated_space();
|
||||||
} else {
|
} else {
|
||||||
|
@ -219,7 +230,7 @@ void CodeHeap::deallocate(void* p) {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
memset((void *)b->allocated_space(),
|
memset((void *)b->allocated_space(),
|
||||||
badCodeHeapFreeVal,
|
badCodeHeapFreeVal,
|
||||||
size(b->length()) - sizeof(HeapBlock));
|
segments_to_size(b->length()) - sizeof(HeapBlock));
|
||||||
#endif
|
#endif
|
||||||
add_to_freelist(b);
|
add_to_freelist(b);
|
||||||
|
|
||||||
|
@ -299,32 +310,14 @@ size_t CodeHeap::max_capacity() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t CodeHeap::allocated_capacity() const {
|
size_t CodeHeap::allocated_capacity() const {
|
||||||
// Start with the committed size in _memory;
|
// size of used heap - size on freelist
|
||||||
size_t l = _memory.committed_size();
|
return segments_to_size(_next_segment - _freelist_segments);
|
||||||
|
|
||||||
// Subtract the committed, but unused, segments
|
|
||||||
l -= size(_number_of_committed_segments - _next_segment);
|
|
||||||
|
|
||||||
// Subtract the size of the freelist
|
|
||||||
l -= size(_free_segments);
|
|
||||||
|
|
||||||
return l;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t CodeHeap::largest_free_block() const {
|
// Returns size of the unallocated heap block
|
||||||
// First check unused space excluding free blocks.
|
size_t CodeHeap::heap_unallocated_capacity() const {
|
||||||
size_t free_sz = size(_free_segments);
|
// Total number of segments - number currently used
|
||||||
size_t unused = max_capacity() - allocated_capacity() - free_sz;
|
return segments_to_size(_number_of_reserved_segments - _next_segment);
|
||||||
if (unused >= free_sz)
|
|
||||||
return unused;
|
|
||||||
|
|
||||||
// Now check largest free block.
|
|
||||||
size_t len = 0;
|
|
||||||
for (FreeBlock* b = _freelist; b != NULL; b = b->link()) {
|
|
||||||
if (b->length() > len)
|
|
||||||
len = b->length();
|
|
||||||
}
|
|
||||||
return MAX2(unused, size(len));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Free list management
|
// Free list management
|
||||||
|
@ -365,7 +358,7 @@ void CodeHeap::add_to_freelist(HeapBlock *a) {
|
||||||
assert(b != _freelist, "cannot be removed twice");
|
assert(b != _freelist, "cannot be removed twice");
|
||||||
|
|
||||||
// Mark as free and update free space count
|
// Mark as free and update free space count
|
||||||
_free_segments += b->length();
|
_freelist_segments += b->length();
|
||||||
b->set_free();
|
b->set_free();
|
||||||
|
|
||||||
// First element in list?
|
// First element in list?
|
||||||
|
@ -400,7 +393,7 @@ void CodeHeap::add_to_freelist(HeapBlock *a) {
|
||||||
|
|
||||||
// Search freelist for an entry on the list with the best fit
|
// Search freelist for an entry on the list with the best fit
|
||||||
// Return NULL if no one was found
|
// Return NULL if no one was found
|
||||||
FreeBlock* CodeHeap::search_freelist(size_t length) {
|
FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) {
|
||||||
FreeBlock *best_block = NULL;
|
FreeBlock *best_block = NULL;
|
||||||
FreeBlock *best_prev = NULL;
|
FreeBlock *best_prev = NULL;
|
||||||
size_t best_length = 0;
|
size_t best_length = 0;
|
||||||
|
@ -411,6 +404,16 @@ FreeBlock* CodeHeap::search_freelist(size_t length) {
|
||||||
while(cur != NULL) {
|
while(cur != NULL) {
|
||||||
size_t l = cur->length();
|
size_t l = cur->length();
|
||||||
if (l >= length && (best_block == NULL || best_length > l)) {
|
if (l >= length && (best_block == NULL || best_length > l)) {
|
||||||
|
|
||||||
|
// Non critical allocations are not allowed to use the last part of the code heap.
|
||||||
|
if (!is_critical) {
|
||||||
|
// Make sure the end of the allocation doesn't cross into the last part of the code heap
|
||||||
|
if (((size_t)cur + length) > ((size_t)high_boundary() - CodeCacheMinimumFreeSpace)) {
|
||||||
|
// the freelist is sorted by address - if one fails, all consecutive will also fail.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Remember best block, its previous element, and its length
|
// Remember best block, its previous element, and its length
|
||||||
best_block = cur;
|
best_block = cur;
|
||||||
best_prev = prev;
|
best_prev = prev;
|
||||||
|
@ -452,7 +455,7 @@ FreeBlock* CodeHeap::search_freelist(size_t length) {
|
||||||
}
|
}
|
||||||
|
|
||||||
best_block->set_used();
|
best_block->set_used();
|
||||||
_free_segments -= length;
|
_freelist_segments -= length;
|
||||||
return best_block;
|
return best_block;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -478,7 +481,7 @@ void CodeHeap::verify() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that freelist contains the right amount of free space
|
// Verify that freelist contains the right amount of free space
|
||||||
// guarantee(len == _free_segments, "wrong freelist");
|
// guarantee(len == _freelist_segments, "wrong freelist");
|
||||||
|
|
||||||
// Verify that the number of free blocks is not out of hand.
|
// Verify that the number of free blocks is not out of hand.
|
||||||
static int free_block_threshold = 10000;
|
static int free_block_threshold = 10000;
|
||||||
|
|
|
@ -91,11 +91,11 @@ class CodeHeap : public CHeapObj<mtCode> {
|
||||||
size_t _next_segment;
|
size_t _next_segment;
|
||||||
|
|
||||||
FreeBlock* _freelist;
|
FreeBlock* _freelist;
|
||||||
size_t _free_segments; // No. of segments in freelist
|
size_t _freelist_segments; // No. of segments in freelist
|
||||||
|
|
||||||
// Helper functions
|
// Helper functions
|
||||||
size_t number_of_segments(size_t size) const { return (size + _segment_size - 1) >> _log2_segment_size; }
|
size_t size_to_segments(size_t size) const { return (size + _segment_size - 1) >> _log2_segment_size; }
|
||||||
size_t size(size_t number_of_segments) const { return number_of_segments << _log2_segment_size; }
|
size_t segments_to_size(size_t number_of_segments) const { return number_of_segments << _log2_segment_size; }
|
||||||
|
|
||||||
size_t segment_for(void* p) const { return ((char*)p - _memory.low()) >> _log2_segment_size; }
|
size_t segment_for(void* p) const { return ((char*)p - _memory.low()) >> _log2_segment_size; }
|
||||||
HeapBlock* block_at(size_t i) const { return (HeapBlock*)(_memory.low() + (i << _log2_segment_size)); }
|
HeapBlock* block_at(size_t i) const { return (HeapBlock*)(_memory.low() + (i << _log2_segment_size)); }
|
||||||
|
@ -110,7 +110,7 @@ class CodeHeap : public CHeapObj<mtCode> {
|
||||||
|
|
||||||
// Toplevel freelist management
|
// Toplevel freelist management
|
||||||
void add_to_freelist(HeapBlock *b);
|
void add_to_freelist(HeapBlock *b);
|
||||||
FreeBlock* search_freelist(size_t length);
|
FreeBlock* search_freelist(size_t length, bool is_critical);
|
||||||
|
|
||||||
// Iteration helpers
|
// Iteration helpers
|
||||||
void* next_free(HeapBlock* b) const;
|
void* next_free(HeapBlock* b) const;
|
||||||
|
@ -132,23 +132,20 @@ class CodeHeap : public CHeapObj<mtCode> {
|
||||||
void clear(); // clears all heap contents
|
void clear(); // clears all heap contents
|
||||||
|
|
||||||
// Memory allocation
|
// Memory allocation
|
||||||
void* allocate (size_t size); // allocates a block of size or returns NULL
|
void* allocate (size_t size, bool is_critical); // allocates a block of size or returns NULL
|
||||||
void deallocate(void* p); // deallocates a block
|
void deallocate(void* p); // deallocates a block
|
||||||
|
|
||||||
// Attributes
|
// Attributes
|
||||||
void* begin() const { return _memory.low (); }
|
char* low_boundary() const { return _memory.low_boundary (); }
|
||||||
void* end() const { return _memory.high(); }
|
char* high() const { return _memory.high(); }
|
||||||
bool contains(void* p) const { return begin() <= p && p < end(); }
|
char* high_boundary() const { return _memory.high_boundary(); }
|
||||||
|
|
||||||
|
bool contains(const void* p) const { return low_boundary() <= p && p < high(); }
|
||||||
void* find_start(void* p) const; // returns the block containing p or NULL
|
void* find_start(void* p) const; // returns the block containing p or NULL
|
||||||
size_t alignment_unit() const; // alignment of any block
|
size_t alignment_unit() const; // alignment of any block
|
||||||
size_t alignment_offset() const; // offset of first byte of any block, within the enclosing alignment unit
|
size_t alignment_offset() const; // offset of first byte of any block, within the enclosing alignment unit
|
||||||
static size_t header_size(); // returns the header size for each heap block
|
static size_t header_size(); // returns the header size for each heap block
|
||||||
|
|
||||||
// Returns reserved area high and low addresses
|
|
||||||
char *low_boundary() const { return _memory.low_boundary (); }
|
|
||||||
char *high() const { return _memory.high(); }
|
|
||||||
char *high_boundary() const { return _memory.high_boundary(); }
|
|
||||||
|
|
||||||
// Iteration
|
// Iteration
|
||||||
|
|
||||||
// returns the first block or NULL
|
// returns the first block or NULL
|
||||||
|
@ -161,8 +158,11 @@ class CodeHeap : public CHeapObj<mtCode> {
|
||||||
size_t max_capacity() const;
|
size_t max_capacity() const;
|
||||||
size_t allocated_capacity() const;
|
size_t allocated_capacity() const;
|
||||||
size_t unallocated_capacity() const { return max_capacity() - allocated_capacity(); }
|
size_t unallocated_capacity() const { return max_capacity() - allocated_capacity(); }
|
||||||
size_t largest_free_block() const;
|
|
||||||
|
|
||||||
|
private:
|
||||||
|
size_t heap_unallocated_capacity() const;
|
||||||
|
|
||||||
|
public:
|
||||||
// Debugging
|
// Debugging
|
||||||
void verify();
|
void verify();
|
||||||
void print() PRODUCT_RETURN;
|
void print() PRODUCT_RETURN;
|
||||||
|
|
|
@ -1044,21 +1044,6 @@ void NonSafepointEmitter::emit_non_safepoint() {
|
||||||
debug_info->end_non_safepoint(pc_offset);
|
debug_info->end_non_safepoint(pc_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// helper for fill_buffer bailout logic
|
|
||||||
static void turn_off_compiler(Compile* C) {
|
|
||||||
if (CodeCache::largest_free_block() >= CodeCacheMinimumFreeSpace*10) {
|
|
||||||
// Do not turn off compilation if a single giant method has
|
|
||||||
// blown the code cache size.
|
|
||||||
C->record_failure("excessive request to CodeCache");
|
|
||||||
} else {
|
|
||||||
// Let CompilerBroker disable further compilations.
|
|
||||||
C->record_failure("CodeCache is full");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
//------------------------------init_buffer------------------------------------
|
//------------------------------init_buffer------------------------------------
|
||||||
CodeBuffer* Compile::init_buffer(uint* blk_starts) {
|
CodeBuffer* Compile::init_buffer(uint* blk_starts) {
|
||||||
|
|
||||||
|
@ -1158,7 +1143,7 @@ CodeBuffer* Compile::init_buffer(uint* blk_starts) {
|
||||||
|
|
||||||
// Have we run out of code space?
|
// Have we run out of code space?
|
||||||
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
|
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
|
||||||
turn_off_compiler(this);
|
C->record_failure("CodeCache is full");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
// Configure the code buffer.
|
// Configure the code buffer.
|
||||||
|
@ -1476,7 +1461,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
|
||||||
// Verify that there is sufficient space remaining
|
// Verify that there is sufficient space remaining
|
||||||
cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
|
cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
|
||||||
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
|
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
|
||||||
turn_off_compiler(this);
|
C->record_failure("CodeCache is full");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1633,7 +1618,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
|
||||||
|
|
||||||
// One last check for failed CodeBuffer::expand:
|
// One last check for failed CodeBuffer::expand:
|
||||||
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
|
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
|
||||||
turn_off_compiler(this);
|
C->record_failure("CodeCache is full");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue