mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-21 03:24:38 +02:00
Initial load
This commit is contained in:
parent
686d76f772
commit
8153779ad3
2894 changed files with 911801 additions and 0 deletions
703
hotspot/src/share/vm/code/codeBlob.cpp
Normal file
703
hotspot/src/share/vm/code/codeBlob.cpp
Normal file
|
@ -0,0 +1,703 @@
|
|||
/*
|
||||
* Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
# include "incls/_precompiled.incl"
|
||||
# include "incls/_codeBlob.cpp.incl"
|
||||
|
||||
unsigned int align_code_offset(int offset) {
|
||||
// align the size to CodeEntryAlignment
|
||||
return
|
||||
((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1))
|
||||
- (int)CodeHeap::header_size();
|
||||
}
|
||||
|
||||
|
||||
// This must be consistent with the CodeBlob constructor's layout actions.
|
||||
unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
|
||||
unsigned int size = header_size;
|
||||
size += round_to(cb->total_relocation_size(), oopSize);
|
||||
// align the size to CodeEntryAlignment
|
||||
size = align_code_offset(size);
|
||||
size += round_to(cb->total_code_size(), oopSize);
|
||||
size += round_to(cb->total_oop_size(), oopSize);
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
// Creates a simple CodeBlob. Sets up the size of the different regions.
|
||||
CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) {
|
||||
assert(size == round_to(size, oopSize), "unaligned size");
|
||||
assert(locs_size == round_to(locs_size, oopSize), "unaligned size");
|
||||
assert(header_size == round_to(header_size, oopSize), "unaligned size");
|
||||
assert(!UseRelocIndex, "no space allocated for reloc index yet");
|
||||
|
||||
// Note: If UseRelocIndex is enabled, there needs to be (at least) one
|
||||
// extra word for the relocation information, containing the reloc
|
||||
// index table length. Unfortunately, the reloc index table imple-
|
||||
// mentation is not easily understandable and thus it is not clear
|
||||
// what exactly the format is supposed to be. For now, we just turn
|
||||
// off the use of this table (gri 7/6/2000).
|
||||
|
||||
_name = name;
|
||||
_size = size;
|
||||
_frame_complete_offset = frame_complete;
|
||||
_header_size = header_size;
|
||||
_relocation_size = locs_size;
|
||||
_instructions_offset = align_code_offset(header_size + locs_size);
|
||||
_data_offset = size;
|
||||
_oops_offset = size;
|
||||
_oops_length = 0;
|
||||
_frame_size = 0;
|
||||
set_oop_maps(NULL);
|
||||
}
|
||||
|
||||
|
||||
// Creates a CodeBlob from a CodeBuffer. Sets up the size of the different regions,
|
||||
// and copy code and relocation info.
|
||||
CodeBlob::CodeBlob(
|
||||
const char* name,
|
||||
CodeBuffer* cb,
|
||||
int header_size,
|
||||
int size,
|
||||
int frame_complete,
|
||||
int frame_size,
|
||||
OopMapSet* oop_maps
|
||||
) {
|
||||
assert(size == round_to(size, oopSize), "unaligned size");
|
||||
assert(header_size == round_to(header_size, oopSize), "unaligned size");
|
||||
|
||||
_name = name;
|
||||
_size = size;
|
||||
_frame_complete_offset = frame_complete;
|
||||
_header_size = header_size;
|
||||
_relocation_size = round_to(cb->total_relocation_size(), oopSize);
|
||||
_instructions_offset = align_code_offset(header_size + _relocation_size);
|
||||
_data_offset = _instructions_offset + round_to(cb->total_code_size(), oopSize);
|
||||
_oops_offset = _size - round_to(cb->total_oop_size(), oopSize);
|
||||
_oops_length = 0; // temporary, until the copy_oops handshake
|
||||
assert(_oops_offset >= _data_offset, "codeBlob is too small");
|
||||
assert(_data_offset <= size, "codeBlob is too small");
|
||||
|
||||
cb->copy_code_and_locs_to(this);
|
||||
set_oop_maps(oop_maps);
|
||||
_frame_size = frame_size;
|
||||
#ifdef COMPILER1
|
||||
// probably wrong for tiered
|
||||
assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
|
||||
#endif // COMPILER1
|
||||
}
|
||||
|
||||
|
||||
void CodeBlob::set_oop_maps(OopMapSet* p) {
|
||||
// Danger Will Robinson! This method allocates a big
|
||||
// chunk of memory, its your job to free it.
|
||||
if (p != NULL) {
|
||||
// We need to allocate a chunk big enough to hold the OopMapSet and all of its OopMaps
|
||||
_oop_maps = (OopMapSet* )NEW_C_HEAP_ARRAY(unsigned char, p->heap_size());
|
||||
p->copy_to((address)_oop_maps);
|
||||
} else {
|
||||
_oop_maps = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeBlob::flush() {
|
||||
if (_oop_maps) {
|
||||
FREE_C_HEAP_ARRAY(unsigned char, _oop_maps);
|
||||
_oop_maps = NULL;
|
||||
}
|
||||
_comments.free();
|
||||
}
|
||||
|
||||
|
||||
// Promote one word from an assembly-time handle to a live embedded oop.
|
||||
inline void CodeBlob::initialize_immediate_oop(oop* dest, jobject handle) {
|
||||
if (handle == NULL ||
|
||||
// As a special case, IC oops are initialized to 1 or -1.
|
||||
handle == (jobject) Universe::non_oop_word()) {
|
||||
(*dest) = (oop)handle;
|
||||
} else {
|
||||
(*dest) = JNIHandles::resolve_non_null(handle);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeBlob::copy_oops(GrowableArray<jobject>* array) {
|
||||
assert(_oops_length == 0, "do this handshake just once, please");
|
||||
int length = array->length();
|
||||
assert((address)(oops_begin() + length) <= data_end(), "oops big enough");
|
||||
oop* dest = oops_begin();
|
||||
for (int index = 0 ; index < length; index++) {
|
||||
initialize_immediate_oop(&dest[index], array->at(index));
|
||||
}
|
||||
_oops_length = length;
|
||||
|
||||
// Now we can fix up all the oops in the code.
|
||||
// We need to do this in the code because
|
||||
// the assembler uses jobjects as placeholders.
|
||||
// The code and relocations have already been
|
||||
// initialized by the CodeBlob constructor,
|
||||
// so it is valid even at this early point to
|
||||
// iterate over relocations and patch the code.
|
||||
fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
|
||||
}
|
||||
|
||||
|
||||
relocInfo::relocType CodeBlob::reloc_type_for_address(address pc) {
|
||||
RelocIterator iter(this, pc, pc+1);
|
||||
while (iter.next()) {
|
||||
return (relocInfo::relocType) iter.type();
|
||||
}
|
||||
// No relocation info found for pc
|
||||
ShouldNotReachHere();
|
||||
return relocInfo::none; // dummy return value
|
||||
}
|
||||
|
||||
|
||||
bool CodeBlob::is_at_poll_return(address pc) {
|
||||
RelocIterator iter(this, pc, pc+1);
|
||||
while (iter.next()) {
|
||||
if (iter.type() == relocInfo::poll_return_type)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
bool CodeBlob::is_at_poll_or_poll_return(address pc) {
|
||||
RelocIterator iter(this, pc, pc+1);
|
||||
while (iter.next()) {
|
||||
relocInfo::relocType t = iter.type();
|
||||
if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void CodeBlob::fix_oop_relocations(address begin, address end,
|
||||
bool initialize_immediates) {
|
||||
// re-patch all oop-bearing instructions, just in case some oops moved
|
||||
RelocIterator iter(this, begin, end);
|
||||
while (iter.next()) {
|
||||
if (iter.type() == relocInfo::oop_type) {
|
||||
oop_Relocation* reloc = iter.oop_reloc();
|
||||
if (initialize_immediates && reloc->oop_is_immediate()) {
|
||||
oop* dest = reloc->oop_addr();
|
||||
initialize_immediate_oop(dest, (jobject) *dest);
|
||||
}
|
||||
// Refresh the oop-related bits of this instruction.
|
||||
reloc->fix_oop_relocation();
|
||||
}
|
||||
|
||||
// There must not be any interfering patches or breakpoints.
|
||||
assert(!(iter.type() == relocInfo::breakpoint_type
|
||||
&& iter.breakpoint_reloc()->active()),
|
||||
"no active breakpoint");
|
||||
}
|
||||
}
|
||||
|
||||
void CodeBlob::do_unloading(BoolObjectClosure* is_alive,
|
||||
OopClosure* keep_alive,
|
||||
bool unloading_occurred) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
OopMap* CodeBlob::oop_map_for_return_address(address return_address) {
|
||||
address pc = return_address ;
|
||||
assert (oop_maps() != NULL, "nope");
|
||||
return oop_maps()->find_map_at_offset ((intptr_t) pc - (intptr_t) instructions_begin());
|
||||
}
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Implementation of BufferBlob
|
||||
|
||||
|
||||
BufferBlob::BufferBlob(const char* name, int size)
|
||||
: CodeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0)
|
||||
{}
|
||||
|
||||
BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
|
||||
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
|
||||
|
||||
BufferBlob* blob = NULL;
|
||||
unsigned int size = sizeof(BufferBlob);
|
||||
// align the size to CodeEntryAlignment
|
||||
size = align_code_offset(size);
|
||||
size += round_to(buffer_size, oopSize);
|
||||
assert(name != NULL, "must provide a name");
|
||||
{
|
||||
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
blob = new (size) BufferBlob(name, size);
|
||||
}
|
||||
// Track memory usage statistic after releasing CodeCache_lock
|
||||
MemoryService::track_code_cache_memory_usage();
|
||||
|
||||
return blob;
|
||||
}
|
||||
|
||||
|
||||
BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb)
|
||||
: CodeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, NULL)
|
||||
{}
|
||||
|
||||
BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
|
||||
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
|
||||
|
||||
BufferBlob* blob = NULL;
|
||||
unsigned int size = allocation_size(cb, sizeof(BufferBlob));
|
||||
assert(name != NULL, "must provide a name");
|
||||
{
|
||||
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
blob = new (size) BufferBlob(name, size, cb);
|
||||
}
|
||||
// Track memory usage statistic after releasing CodeCache_lock
|
||||
MemoryService::track_code_cache_memory_usage();
|
||||
|
||||
return blob;
|
||||
}
|
||||
|
||||
|
||||
void* BufferBlob::operator new(size_t s, unsigned size) {
|
||||
void* p = CodeCache::allocate(size);
|
||||
return p;
|
||||
}
|
||||
|
||||
|
||||
void BufferBlob::free( BufferBlob *blob ) {
|
||||
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
|
||||
{
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
CodeCache::free((CodeBlob*)blob);
|
||||
}
|
||||
// Track memory usage statistic after releasing CodeCache_lock
|
||||
MemoryService::track_code_cache_memory_usage();
|
||||
}
|
||||
|
||||
bool BufferBlob::is_adapter_blob() const {
|
||||
return (strcmp(AdapterHandlerEntry::name, name()) == 0);
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Implementation of RuntimeStub
|
||||
|
||||
RuntimeStub::RuntimeStub(
|
||||
const char* name,
|
||||
CodeBuffer* cb,
|
||||
int size,
|
||||
int frame_complete,
|
||||
int frame_size,
|
||||
OopMapSet* oop_maps,
|
||||
bool caller_must_gc_arguments
|
||||
)
|
||||
: CodeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps)
|
||||
{
|
||||
_caller_must_gc_arguments = caller_must_gc_arguments;
|
||||
}
|
||||
|
||||
|
||||
RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
|
||||
CodeBuffer* cb,
|
||||
int frame_complete,
|
||||
int frame_size,
|
||||
OopMapSet* oop_maps,
|
||||
bool caller_must_gc_arguments)
|
||||
{
|
||||
RuntimeStub* stub = NULL;
|
||||
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
|
||||
{
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
unsigned int size = allocation_size(cb, sizeof(RuntimeStub));
|
||||
stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
|
||||
}
|
||||
|
||||
// Do not hold the CodeCache lock during name formatting.
|
||||
if (stub != NULL) {
|
||||
char stub_id[256];
|
||||
jio_snprintf(stub_id, sizeof(stub_id), "RuntimeStub - %s", stub_name);
|
||||
if (PrintStubCode) {
|
||||
tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, stub);
|
||||
Disassembler::decode(stub->instructions_begin(), stub->instructions_end());
|
||||
}
|
||||
VTune::register_stub(stub_id, stub->instructions_begin(), stub->instructions_end());
|
||||
Forte::register_stub(stub_id, stub->instructions_begin(), stub->instructions_end());
|
||||
|
||||
if (JvmtiExport::should_post_dynamic_code_generated()) {
|
||||
JvmtiExport::post_dynamic_code_generated(stub_name, stub->instructions_begin(), stub->instructions_end());
|
||||
}
|
||||
}
|
||||
|
||||
// Track memory usage statistic after releasing CodeCache_lock
|
||||
MemoryService::track_code_cache_memory_usage();
|
||||
|
||||
return stub;
|
||||
}
|
||||
|
||||
|
||||
void* RuntimeStub::operator new(size_t s, unsigned size) {
|
||||
void* p = CodeCache::allocate(size);
|
||||
if (!p) fatal("Initial size of CodeCache is too small");
|
||||
return p;
|
||||
}
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Implementation of DeoptimizationBlob
|
||||
|
||||
DeoptimizationBlob::DeoptimizationBlob(
|
||||
CodeBuffer* cb,
|
||||
int size,
|
||||
OopMapSet* oop_maps,
|
||||
int unpack_offset,
|
||||
int unpack_with_exception_offset,
|
||||
int unpack_with_reexecution_offset,
|
||||
int frame_size
|
||||
)
|
||||
: SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps)
|
||||
{
|
||||
_unpack_offset = unpack_offset;
|
||||
_unpack_with_exception = unpack_with_exception_offset;
|
||||
_unpack_with_reexecution = unpack_with_reexecution_offset;
|
||||
#ifdef COMPILER1
|
||||
_unpack_with_exception_in_tls = -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
DeoptimizationBlob* DeoptimizationBlob::create(
|
||||
CodeBuffer* cb,
|
||||
OopMapSet* oop_maps,
|
||||
int unpack_offset,
|
||||
int unpack_with_exception_offset,
|
||||
int unpack_with_reexecution_offset,
|
||||
int frame_size)
|
||||
{
|
||||
DeoptimizationBlob* blob = NULL;
|
||||
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
|
||||
{
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
unsigned int size = allocation_size(cb, sizeof(DeoptimizationBlob));
|
||||
blob = new (size) DeoptimizationBlob(cb,
|
||||
size,
|
||||
oop_maps,
|
||||
unpack_offset,
|
||||
unpack_with_exception_offset,
|
||||
unpack_with_reexecution_offset,
|
||||
frame_size);
|
||||
}
|
||||
|
||||
// Do not hold the CodeCache lock during name formatting.
|
||||
if (blob != NULL) {
|
||||
char blob_id[256];
|
||||
jio_snprintf(blob_id, sizeof(blob_id), "DeoptimizationBlob@" PTR_FORMAT, blob->instructions_begin());
|
||||
if (PrintStubCode) {
|
||||
tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
|
||||
Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
|
||||
}
|
||||
VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
|
||||
Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
|
||||
|
||||
if (JvmtiExport::should_post_dynamic_code_generated()) {
|
||||
JvmtiExport::post_dynamic_code_generated("DeoptimizationBlob",
|
||||
blob->instructions_begin(),
|
||||
blob->instructions_end());
|
||||
}
|
||||
}
|
||||
|
||||
// Track memory usage statistic after releasing CodeCache_lock
|
||||
MemoryService::track_code_cache_memory_usage();
|
||||
|
||||
return blob;
|
||||
}
|
||||
|
||||
|
||||
void* DeoptimizationBlob::operator new(size_t s, unsigned size) {
|
||||
void* p = CodeCache::allocate(size);
|
||||
if (!p) fatal("Initial size of CodeCache is too small");
|
||||
return p;
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Implementation of UncommonTrapBlob
|
||||
|
||||
#ifdef COMPILER2
|
||||
UncommonTrapBlob::UncommonTrapBlob(
|
||||
CodeBuffer* cb,
|
||||
int size,
|
||||
OopMapSet* oop_maps,
|
||||
int frame_size
|
||||
)
|
||||
: SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps)
|
||||
{}
|
||||
|
||||
|
||||
UncommonTrapBlob* UncommonTrapBlob::create(
|
||||
CodeBuffer* cb,
|
||||
OopMapSet* oop_maps,
|
||||
int frame_size)
|
||||
{
|
||||
UncommonTrapBlob* blob = NULL;
|
||||
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
|
||||
{
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
unsigned int size = allocation_size(cb, sizeof(UncommonTrapBlob));
|
||||
blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size);
|
||||
}
|
||||
|
||||
// Do not hold the CodeCache lock during name formatting.
|
||||
if (blob != NULL) {
|
||||
char blob_id[256];
|
||||
jio_snprintf(blob_id, sizeof(blob_id), "UncommonTrapBlob@" PTR_FORMAT, blob->instructions_begin());
|
||||
if (PrintStubCode) {
|
||||
tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
|
||||
Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
|
||||
}
|
||||
VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
|
||||
Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
|
||||
|
||||
if (JvmtiExport::should_post_dynamic_code_generated()) {
|
||||
JvmtiExport::post_dynamic_code_generated("UncommonTrapBlob",
|
||||
blob->instructions_begin(),
|
||||
blob->instructions_end());
|
||||
}
|
||||
}
|
||||
|
||||
// Track memory usage statistic after releasing CodeCache_lock
|
||||
MemoryService::track_code_cache_memory_usage();
|
||||
|
||||
return blob;
|
||||
}
|
||||
|
||||
|
||||
void* UncommonTrapBlob::operator new(size_t s, unsigned size) {
|
||||
void* p = CodeCache::allocate(size);
|
||||
if (!p) fatal("Initial size of CodeCache is too small");
|
||||
return p;
|
||||
}
|
||||
#endif // COMPILER2
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Implementation of ExceptionBlob
|
||||
|
||||
#ifdef COMPILER2
|
||||
ExceptionBlob::ExceptionBlob(
|
||||
CodeBuffer* cb,
|
||||
int size,
|
||||
OopMapSet* oop_maps,
|
||||
int frame_size
|
||||
)
|
||||
: SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps)
|
||||
{}
|
||||
|
||||
|
||||
ExceptionBlob* ExceptionBlob::create(
|
||||
CodeBuffer* cb,
|
||||
OopMapSet* oop_maps,
|
||||
int frame_size)
|
||||
{
|
||||
ExceptionBlob* blob = NULL;
|
||||
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
|
||||
{
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
unsigned int size = allocation_size(cb, sizeof(ExceptionBlob));
|
||||
blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size);
|
||||
}
|
||||
|
||||
// We do not need to hold the CodeCache lock during name formatting
|
||||
if (blob != NULL) {
|
||||
char blob_id[256];
|
||||
jio_snprintf(blob_id, sizeof(blob_id), "ExceptionBlob@" PTR_FORMAT, blob->instructions_begin());
|
||||
if (PrintStubCode) {
|
||||
tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
|
||||
Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
|
||||
}
|
||||
VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
|
||||
Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
|
||||
|
||||
if (JvmtiExport::should_post_dynamic_code_generated()) {
|
||||
JvmtiExport::post_dynamic_code_generated("ExceptionBlob",
|
||||
blob->instructions_begin(),
|
||||
blob->instructions_end());
|
||||
}
|
||||
}
|
||||
|
||||
// Track memory usage statistic after releasing CodeCache_lock
|
||||
MemoryService::track_code_cache_memory_usage();
|
||||
|
||||
return blob;
|
||||
}
|
||||
|
||||
|
||||
void* ExceptionBlob::operator new(size_t s, unsigned size) {
|
||||
void* p = CodeCache::allocate(size);
|
||||
if (!p) fatal("Initial size of CodeCache is too small");
|
||||
return p;
|
||||
}
|
||||
#endif // COMPILER2
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Implementation of SafepointBlob
|
||||
|
||||
SafepointBlob::SafepointBlob(
|
||||
CodeBuffer* cb,
|
||||
int size,
|
||||
OopMapSet* oop_maps,
|
||||
int frame_size
|
||||
)
|
||||
: SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps)
|
||||
{}
|
||||
|
||||
|
||||
SafepointBlob* SafepointBlob::create(
|
||||
CodeBuffer* cb,
|
||||
OopMapSet* oop_maps,
|
||||
int frame_size)
|
||||
{
|
||||
SafepointBlob* blob = NULL;
|
||||
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
|
||||
{
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
unsigned int size = allocation_size(cb, sizeof(SafepointBlob));
|
||||
blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
|
||||
}
|
||||
|
||||
// We do not need to hold the CodeCache lock during name formatting.
|
||||
if (blob != NULL) {
|
||||
char blob_id[256];
|
||||
jio_snprintf(blob_id, sizeof(blob_id), "SafepointBlob@" PTR_FORMAT, blob->instructions_begin());
|
||||
if (PrintStubCode) {
|
||||
tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
|
||||
Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
|
||||
}
|
||||
VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
|
||||
Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
|
||||
|
||||
if (JvmtiExport::should_post_dynamic_code_generated()) {
|
||||
JvmtiExport::post_dynamic_code_generated("SafepointBlob",
|
||||
blob->instructions_begin(),
|
||||
blob->instructions_end());
|
||||
}
|
||||
}
|
||||
|
||||
// Track memory usage statistic after releasing CodeCache_lock
|
||||
MemoryService::track_code_cache_memory_usage();
|
||||
|
||||
return blob;
|
||||
}
|
||||
|
||||
|
||||
void* SafepointBlob::operator new(size_t s, unsigned size) {
|
||||
void* p = CodeCache::allocate(size);
|
||||
if (!p) fatal("Initial size of CodeCache is too small");
|
||||
return p;
|
||||
}
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Verification and printing
|
||||
|
||||
void CodeBlob::verify() {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
void CodeBlob::print() const {
|
||||
tty->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", this);
|
||||
tty->print_cr("Framesize: %d", _frame_size);
|
||||
}
|
||||
|
||||
|
||||
void CodeBlob::print_value_on(outputStream* st) const {
|
||||
st->print_cr("[CodeBlob]");
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void BufferBlob::verify() {
|
||||
// unimplemented
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
void BufferBlob::print() const {
|
||||
CodeBlob::print();
|
||||
print_value_on(tty);
|
||||
}
|
||||
|
||||
|
||||
void BufferBlob::print_value_on(outputStream* st) const {
|
||||
st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", this, name());
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
void RuntimeStub::verify() {
|
||||
// unimplemented
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
void RuntimeStub::print() const {
|
||||
CodeBlob::print();
|
||||
tty->print("Runtime Stub (" INTPTR_FORMAT "): ", this);
|
||||
tty->print_cr(name());
|
||||
Disassembler::decode((CodeBlob*)this);
|
||||
}
|
||||
|
||||
|
||||
void RuntimeStub::print_value_on(outputStream* st) const {
|
||||
st->print("RuntimeStub (" INTPTR_FORMAT "): ", this); st->print(name());
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void SingletonBlob::verify() {
|
||||
// unimplemented
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
void SingletonBlob::print() const {
|
||||
CodeBlob::print();
|
||||
tty->print_cr(name());
|
||||
Disassembler::decode((CodeBlob*)this);
|
||||
}
|
||||
|
||||
|
||||
void SingletonBlob::print_value_on(outputStream* st) const {
|
||||
st->print_cr(name());
|
||||
}
|
||||
|
||||
void DeoptimizationBlob::print_value_on(outputStream* st) const {
|
||||
st->print_cr("Deoptimization (frame not available)");
|
||||
}
|
||||
|
||||
#endif // PRODUCT
|
511
hotspot/src/share/vm/code/codeBlob.hpp
Normal file
511
hotspot/src/share/vm/code/codeBlob.hpp
Normal file
|
@ -0,0 +1,511 @@
|
|||
/*
|
||||
* Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// CodeBlob - superclass for all entries in the CodeCache.
|
||||
//
|
||||
// Suptypes are:
|
||||
// nmethod : Compiled Java methods (include method that calls to native code)
|
||||
// RuntimeStub : Call to VM runtime methods
|
||||
// DeoptimizationBlob : Used for deoptimizatation
|
||||
// ExceptionBlob : Used for stack unrolling
|
||||
// SafepointBlob : Used to handle illegal instruction exceptions
|
||||
//
|
||||
//
|
||||
// Layout:
|
||||
// - header
|
||||
// - relocation
|
||||
// - instruction space
|
||||
// - data space
|
||||
class DeoptimizationBlob;
|
||||
|
||||
class CodeBlob VALUE_OBJ_CLASS_SPEC {
|
||||
|
||||
friend class VMStructs;
|
||||
|
||||
private:
|
||||
const char* _name;
|
||||
int _size; // total size of CodeBlob in bytes
|
||||
int _header_size; // size of header (depends on subclass)
|
||||
int _relocation_size; // size of relocation
|
||||
int _instructions_offset; // offset to where instructions region begins
|
||||
int _frame_complete_offset; // instruction offsets in [0.._frame_complete_offset) have
|
||||
// not finished setting up their frame. Beware of pc's in
|
||||
// that range. There is a similar range(s) on returns
|
||||
// which we don't detect.
|
||||
int _data_offset; // offset to where data region begins
|
||||
int _oops_offset; // offset to where embedded oop table begins (inside data)
|
||||
int _oops_length; // number of embedded oops
|
||||
int _frame_size; // size of stack frame
|
||||
OopMapSet* _oop_maps; // OopMap for this CodeBlob
|
||||
CodeComments _comments;
|
||||
|
||||
friend class OopRecorder;
|
||||
|
||||
void fix_oop_relocations(address begin, address end, bool initialize_immediates);
|
||||
inline void initialize_immediate_oop(oop* dest, jobject handle);
|
||||
|
||||
public:
|
||||
// Returns the space needed for CodeBlob
|
||||
static unsigned int allocation_size(CodeBuffer* cb, int header_size);
|
||||
|
||||
// Creation
|
||||
// a) simple CodeBlob
|
||||
// frame_complete is the offset from the beginning of the instructions
|
||||
// to where the frame setup (from stackwalk viewpoint) is complete.
|
||||
CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size);
|
||||
|
||||
// b) full CodeBlob
|
||||
CodeBlob(
|
||||
const char* name,
|
||||
CodeBuffer* cb,
|
||||
int header_size,
|
||||
int size,
|
||||
int frame_complete,
|
||||
int frame_size,
|
||||
OopMapSet* oop_maps
|
||||
);
|
||||
|
||||
// Deletion
|
||||
void flush();
|
||||
|
||||
// Typing
|
||||
virtual bool is_buffer_blob() const { return false; }
|
||||
virtual bool is_nmethod() const { return false; }
|
||||
virtual bool is_runtime_stub() const { return false; }
|
||||
virtual bool is_deoptimization_stub() const { return false; }
|
||||
virtual bool is_uncommon_trap_stub() const { return false; }
|
||||
virtual bool is_exception_stub() const { return false; }
|
||||
virtual bool is_safepoint_stub() const { return false; }
|
||||
virtual bool is_adapter_blob() const { return false; }
|
||||
|
||||
virtual bool is_compiled_by_c2() const { return false; }
|
||||
virtual bool is_compiled_by_c1() const { return false; }
|
||||
|
||||
// Boundaries
|
||||
address header_begin() const { return (address) this; }
|
||||
address header_end() const { return ((address) this) + _header_size; };
|
||||
relocInfo* relocation_begin() const { return (relocInfo*) header_end(); };
|
||||
relocInfo* relocation_end() const { return (relocInfo*)(header_end() + _relocation_size); }
|
||||
address instructions_begin() const { return (address) header_begin() + _instructions_offset; }
|
||||
address instructions_end() const { return (address) header_begin() + _data_offset; }
|
||||
address data_begin() const { return (address) header_begin() + _data_offset; }
|
||||
address data_end() const { return (address) header_begin() + _size; }
|
||||
oop* oops_begin() const { return (oop*) (header_begin() + _oops_offset); }
|
||||
oop* oops_end() const { return oops_begin() + _oops_length; }
|
||||
|
||||
// Offsets
|
||||
int relocation_offset() const { return _header_size; }
|
||||
int instructions_offset() const { return _instructions_offset; }
|
||||
int data_offset() const { return _data_offset; }
|
||||
int oops_offset() const { return _oops_offset; }
|
||||
|
||||
// Sizes
|
||||
int size() const { return _size; }
|
||||
int header_size() const { return _header_size; }
|
||||
int relocation_size() const { return (address) relocation_end() - (address) relocation_begin(); }
|
||||
int instructions_size() const { return instructions_end() - instructions_begin(); }
|
||||
int data_size() const { return data_end() - data_begin(); }
|
||||
int oops_size() const { return (address) oops_end() - (address) oops_begin(); }
|
||||
|
||||
// Containment
|
||||
bool blob_contains(address addr) const { return header_begin() <= addr && addr < data_end(); }
|
||||
bool relocation_contains(relocInfo* addr) const{ return relocation_begin() <= addr && addr < relocation_end(); }
|
||||
bool instructions_contains(address addr) const { return instructions_begin() <= addr && addr < instructions_end(); }
|
||||
bool data_contains(address addr) const { return data_begin() <= addr && addr < data_end(); }
|
||||
bool oops_contains(oop* addr) const { return oops_begin() <= addr && addr < oops_end(); }
|
||||
bool contains(address addr) const { return instructions_contains(addr); }
|
||||
bool is_frame_complete_at(address addr) const { return instructions_contains(addr) &&
|
||||
addr >= instructions_begin() + _frame_complete_offset; }
|
||||
|
||||
// Relocation support
|
||||
void fix_oop_relocations(address begin, address end) {
|
||||
fix_oop_relocations(begin, end, false);
|
||||
}
|
||||
void fix_oop_relocations() {
|
||||
fix_oop_relocations(NULL, NULL, false);
|
||||
}
|
||||
relocInfo::relocType reloc_type_for_address(address pc);
|
||||
bool is_at_poll_return(address pc);
|
||||
bool is_at_poll_or_poll_return(address pc);
|
||||
|
||||
// Support for oops in scopes and relocs:
|
||||
// Note: index 0 is reserved for null.
|
||||
oop oop_at(int index) const { return index == 0? (oop)NULL: *oop_addr_at(index); }
|
||||
oop* oop_addr_at(int index) const{ // for GC
|
||||
// relocation indexes are biased by 1 (because 0 is reserved)
|
||||
assert(index > 0 && index <= _oops_length, "must be a valid non-zero index");
|
||||
return &oops_begin()[index-1];
|
||||
}
|
||||
|
||||
void copy_oops(GrowableArray<jobject>* oops);
|
||||
|
||||
// CodeCache support: really only used by the nmethods, but in order to get
|
||||
// asserts and certain bookkeeping to work in the CodeCache they are defined
|
||||
// virtual here.
|
||||
virtual bool is_zombie() const { return false; }
|
||||
virtual bool is_locked_by_vm() const { return false; }
|
||||
|
||||
virtual bool is_unloaded() const { return false; }
|
||||
virtual bool is_not_entrant() const { return false; }
|
||||
|
||||
// GC support
|
||||
virtual bool is_alive() const = 0;
|
||||
virtual void do_unloading(BoolObjectClosure* is_alive,
|
||||
OopClosure* keep_alive,
|
||||
bool unloading_occurred);
|
||||
virtual void oops_do(OopClosure* f) = 0;
|
||||
|
||||
// OopMap for frame
|
||||
OopMapSet* oop_maps() const { return _oop_maps; }
|
||||
void set_oop_maps(OopMapSet* p);
|
||||
OopMap* oop_map_for_return_address(address return_address);
|
||||
virtual void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { ShouldNotReachHere(); }
|
||||
|
||||
// Frame support
|
||||
int frame_size() const { return _frame_size; }
|
||||
void set_frame_size(int size) { _frame_size = size; }
|
||||
|
||||
// Returns true, if the next frame is responsible for GC'ing oops passed as arguments
|
||||
virtual bool caller_must_gc_arguments(JavaThread* thread) const { return false; }
|
||||
|
||||
// Naming
|
||||
const char* name() const { return _name; }
|
||||
void set_name(const char* name) { _name = name; }
|
||||
|
||||
// Debugging
|
||||
virtual void verify();
|
||||
virtual void print() const PRODUCT_RETURN;
|
||||
virtual void print_value_on(outputStream* st) const PRODUCT_RETURN;
|
||||
|
||||
// Print the comment associated with offset on stream, if there is one
|
||||
void print_block_comment(outputStream* stream, intptr_t offset) {
|
||||
_comments.print_block_comment(stream, offset);
|
||||
}
|
||||
|
||||
// Transfer ownership of comments to this CodeBlob
|
||||
void set_comments(CodeComments& comments) {
|
||||
_comments.assign(comments);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// BufferBlob: used to hold non-relocatable machine code such as the interpreter, stubroutines, etc.
|
||||
|
||||
class BufferBlob: public CodeBlob {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
// Creation support
|
||||
BufferBlob(const char* name, int size);
|
||||
BufferBlob(const char* name, int size, CodeBuffer* cb);
|
||||
|
||||
void* operator new(size_t s, unsigned size);
|
||||
|
||||
public:
|
||||
// Creation
|
||||
static BufferBlob* create(const char* name, int buffer_size);
|
||||
static BufferBlob* create(const char* name, CodeBuffer* cb);
|
||||
|
||||
static void free(BufferBlob* buf);
|
||||
|
||||
// Typing
|
||||
bool is_buffer_blob() const { return true; }
|
||||
bool is_adapter_blob() const;
|
||||
|
||||
// GC/Verification support
|
||||
void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ }
|
||||
bool is_alive() const { return true; }
|
||||
void do_unloading(BoolObjectClosure* is_alive,
|
||||
OopClosure* keep_alive,
|
||||
bool unloading_occurred) { /* do nothing */ }
|
||||
|
||||
void oops_do(OopClosure* f) { /* do nothing*/ }
|
||||
|
||||
void verify();
|
||||
void print() const PRODUCT_RETURN;
|
||||
void print_value_on(outputStream* st) const PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// RuntimeStub: describes stubs used by compiled code to call a (static) C++ runtime routine
|
||||
|
||||
class RuntimeStub: public CodeBlob {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
bool _caller_must_gc_arguments;
|
||||
|
||||
// Creation support
|
||||
RuntimeStub(
|
||||
const char* name,
|
||||
CodeBuffer* cb,
|
||||
int size,
|
||||
int frame_complete,
|
||||
int frame_size,
|
||||
OopMapSet* oop_maps,
|
||||
bool caller_must_gc_arguments
|
||||
);
|
||||
|
||||
void* operator new(size_t s, unsigned size);
|
||||
|
||||
public:
|
||||
// Creation
|
||||
static RuntimeStub* new_runtime_stub(
|
||||
const char* stub_name,
|
||||
CodeBuffer* cb,
|
||||
int frame_complete,
|
||||
int frame_size,
|
||||
OopMapSet* oop_maps,
|
||||
bool caller_must_gc_arguments
|
||||
);
|
||||
|
||||
// Typing
|
||||
bool is_runtime_stub() const { return true; }
|
||||
|
||||
// GC support
|
||||
bool caller_must_gc_arguments(JavaThread* thread) const { return _caller_must_gc_arguments; }
|
||||
|
||||
address entry_point() { return instructions_begin(); }
|
||||
|
||||
// GC/Verification support
|
||||
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ }
|
||||
bool is_alive() const { return true; }
|
||||
void do_unloading(BoolObjectClosure* is_alive,
|
||||
OopClosure* keep_alive,
|
||||
bool unloading_occurred) { /* do nothing */ }
|
||||
void oops_do(OopClosure* f) { /* do-nothing*/ }
|
||||
|
||||
void verify();
|
||||
void print() const PRODUCT_RETURN;
|
||||
void print_value_on(outputStream* st) const PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Super-class for all blobs that exist in only one instance. Implements default behaviour.
|
||||
|
||||
class SingletonBlob: public CodeBlob {
|
||||
friend class VMStructs;
|
||||
public:
|
||||
SingletonBlob(
|
||||
const char* name,
|
||||
CodeBuffer* cb,
|
||||
int header_size,
|
||||
int size,
|
||||
int frame_size,
|
||||
OopMapSet* oop_maps
|
||||
)
|
||||
: CodeBlob(name, cb, header_size, size, CodeOffsets::frame_never_safe, frame_size, oop_maps)
|
||||
{};
|
||||
|
||||
bool is_alive() const { return true; }
|
||||
void do_unloading(BoolObjectClosure* is_alive,
|
||||
OopClosure* keep_alive,
|
||||
bool unloading_occurred) { /* do-nothing*/ }
|
||||
|
||||
void verify(); // does nothing
|
||||
void print() const PRODUCT_RETURN;
|
||||
void print_value_on(outputStream* st) const PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// DeoptimizationBlob
|
||||
|
||||
class DeoptimizationBlob: public SingletonBlob {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
int _unpack_offset;
|
||||
int _unpack_with_exception;
|
||||
int _unpack_with_reexecution;
|
||||
|
||||
int _unpack_with_exception_in_tls;
|
||||
|
||||
// Creation support
|
||||
DeoptimizationBlob(
|
||||
CodeBuffer* cb,
|
||||
int size,
|
||||
OopMapSet* oop_maps,
|
||||
int unpack_offset,
|
||||
int unpack_with_exception_offset,
|
||||
int unpack_with_reexecution_offset,
|
||||
int frame_size
|
||||
);
|
||||
|
||||
void* operator new(size_t s, unsigned size);
|
||||
|
||||
public:
|
||||
// Creation
|
||||
static DeoptimizationBlob* create(
|
||||
CodeBuffer* cb,
|
||||
OopMapSet* oop_maps,
|
||||
int unpack_offset,
|
||||
int unpack_with_exception_offset,
|
||||
int unpack_with_reexecution_offset,
|
||||
int frame_size
|
||||
);
|
||||
|
||||
// Typing
|
||||
bool is_deoptimization_stub() const { return true; }
|
||||
const DeoptimizationBlob *as_deoptimization_stub() const { return this; }
|
||||
bool exception_address_is_unpack_entry(address pc) const {
|
||||
address unpack_pc = unpack();
|
||||
return (pc == unpack_pc || (pc + frame::pc_return_offset) == unpack_pc);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
// GC for args
|
||||
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ }
|
||||
|
||||
// Iteration
|
||||
void oops_do(OopClosure* f) {}
|
||||
|
||||
// Printing
|
||||
void print_value_on(outputStream* st) const PRODUCT_RETURN;
|
||||
|
||||
address unpack() const { return instructions_begin() + _unpack_offset; }
|
||||
address unpack_with_exception() const { return instructions_begin() + _unpack_with_exception; }
|
||||
address unpack_with_reexecution() const { return instructions_begin() + _unpack_with_reexecution; }
|
||||
|
||||
// Alternate entry point for C1 where the exception and issuing pc
|
||||
// are in JavaThread::_exception_oop and JavaThread::_exception_pc
|
||||
// instead of being in registers. This is needed because C1 doesn't
|
||||
// model exception paths in a way that keeps these registers free so
|
||||
// there may be live values in those registers during deopt.
|
||||
void set_unpack_with_exception_in_tls_offset(int offset) {
|
||||
_unpack_with_exception_in_tls = offset;
|
||||
assert(contains(instructions_begin() + _unpack_with_exception_in_tls), "must be PC inside codeblob");
|
||||
}
|
||||
address unpack_with_exception_in_tls() const { return instructions_begin() + _unpack_with_exception_in_tls; }
|
||||
};
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// UncommonTrapBlob (currently only used by Compiler 2)
|
||||
|
||||
#ifdef COMPILER2
|
||||
|
||||
class UncommonTrapBlob: public SingletonBlob {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
// Creation support
|
||||
UncommonTrapBlob(
|
||||
CodeBuffer* cb,
|
||||
int size,
|
||||
OopMapSet* oop_maps,
|
||||
int frame_size
|
||||
);
|
||||
|
||||
void* operator new(size_t s, unsigned size);
|
||||
|
||||
public:
|
||||
// Creation
|
||||
static UncommonTrapBlob* create(
|
||||
CodeBuffer* cb,
|
||||
OopMapSet* oop_maps,
|
||||
int frame_size
|
||||
);
|
||||
|
||||
// GC for args
|
||||
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ }
|
||||
|
||||
// Typing
|
||||
bool is_uncommon_trap_stub() const { return true; }
|
||||
|
||||
// Iteration
|
||||
void oops_do(OopClosure* f) {}
|
||||
};
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// ExceptionBlob: used for exception unwinding in compiled code (currently only used by Compiler 2)
|
||||
|
||||
class ExceptionBlob: public SingletonBlob {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
// Creation support
|
||||
ExceptionBlob(
|
||||
CodeBuffer* cb,
|
||||
int size,
|
||||
OopMapSet* oop_maps,
|
||||
int frame_size
|
||||
);
|
||||
|
||||
void* operator new(size_t s, unsigned size);
|
||||
|
||||
public:
|
||||
// Creation
|
||||
static ExceptionBlob* create(
|
||||
CodeBuffer* cb,
|
||||
OopMapSet* oop_maps,
|
||||
int frame_size
|
||||
);
|
||||
|
||||
// GC for args
|
||||
void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ }
|
||||
|
||||
// Typing
|
||||
bool is_exception_stub() const { return true; }
|
||||
|
||||
// Iteration
|
||||
void oops_do(OopClosure* f) {}
|
||||
};
|
||||
#endif // COMPILER2
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// SafepointBlob: handles illegal_instruction exceptions during a safepoint
|
||||
|
||||
class SafepointBlob: public SingletonBlob {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
// Creation support
|
||||
SafepointBlob(
|
||||
CodeBuffer* cb,
|
||||
int size,
|
||||
OopMapSet* oop_maps,
|
||||
int frame_size
|
||||
);
|
||||
|
||||
void* operator new(size_t s, unsigned size);
|
||||
|
||||
public:
|
||||
// Creation
|
||||
static SafepointBlob* create(
|
||||
CodeBuffer* cb,
|
||||
OopMapSet* oop_maps,
|
||||
int frame_size
|
||||
);
|
||||
|
||||
// GC for args
|
||||
void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ }
|
||||
|
||||
// Typing
|
||||
bool is_safepoint_stub() const { return true; }
|
||||
|
||||
// Iteration
|
||||
void oops_do(OopClosure* f) {}
|
||||
};
|
662
hotspot/src/share/vm/code/codeCache.cpp
Normal file
662
hotspot/src/share/vm/code/codeCache.cpp
Normal file
|
@ -0,0 +1,662 @@
|
|||
/*
|
||||
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
# include "incls/_precompiled.incl"
|
||||
# include "incls/_codeCache.cpp.incl"
|
||||
|
||||
// Helper class for printing in CodeCache
|
||||
|
||||
class CodeBlob_sizes {
|
||||
private:
|
||||
int count;
|
||||
int total_size;
|
||||
int header_size;
|
||||
int code_size;
|
||||
int stub_size;
|
||||
int relocation_size;
|
||||
int scopes_oop_size;
|
||||
int scopes_data_size;
|
||||
int scopes_pcs_size;
|
||||
|
||||
public:
|
||||
CodeBlob_sizes() {
|
||||
count = 0;
|
||||
total_size = 0;
|
||||
header_size = 0;
|
||||
code_size = 0;
|
||||
stub_size = 0;
|
||||
relocation_size = 0;
|
||||
scopes_oop_size = 0;
|
||||
scopes_data_size = 0;
|
||||
scopes_pcs_size = 0;
|
||||
}
|
||||
|
||||
int total() { return total_size; }
|
||||
bool is_empty() { return count == 0; }
|
||||
|
||||
void print(const char* title) {
|
||||
tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])",
|
||||
count,
|
||||
title,
|
||||
total() / K,
|
||||
header_size * 100 / total_size,
|
||||
relocation_size * 100 / total_size,
|
||||
code_size * 100 / total_size,
|
||||
stub_size * 100 / total_size,
|
||||
scopes_oop_size * 100 / total_size,
|
||||
scopes_data_size * 100 / total_size,
|
||||
scopes_pcs_size * 100 / total_size);
|
||||
}
|
||||
|
||||
void add(CodeBlob* cb) {
|
||||
count++;
|
||||
total_size += cb->size();
|
||||
header_size += cb->header_size();
|
||||
relocation_size += cb->relocation_size();
|
||||
scopes_oop_size += cb->oops_size();
|
||||
if (cb->is_nmethod()) {
|
||||
nmethod *nm = (nmethod*)cb;
|
||||
code_size += nm->code_size();
|
||||
stub_size += nm->stub_size();
|
||||
|
||||
scopes_data_size += nm->scopes_data_size();
|
||||
scopes_pcs_size += nm->scopes_pcs_size();
|
||||
} else {
|
||||
code_size += cb->instructions_size();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// CodeCache implementation
|
||||
|
||||
CodeHeap * CodeCache::_heap = new CodeHeap();
|
||||
int CodeCache::_number_of_blobs = 0;
|
||||
int CodeCache::_number_of_nmethods_with_dependencies = 0;
|
||||
bool CodeCache::_needs_cache_clean = false;
|
||||
|
||||
|
||||
CodeBlob* CodeCache::first() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
return (CodeBlob*)_heap->first();
|
||||
}
|
||||
|
||||
|
||||
CodeBlob* CodeCache::next(CodeBlob* cb) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
return (CodeBlob*)_heap->next(cb);
|
||||
}
|
||||
|
||||
|
||||
CodeBlob* CodeCache::alive(CodeBlob *cb) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
while (cb != NULL && !cb->is_alive()) cb = next(cb);
|
||||
return cb;
|
||||
}
|
||||
|
||||
|
||||
nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
|
||||
return (nmethod*)cb;
|
||||
}
|
||||
|
||||
|
||||
CodeBlob* CodeCache::allocate(int size) {
|
||||
// Do not seize the CodeCache lock here--if the caller has not
|
||||
// already done so, we are going to lose bigtime, since the code
|
||||
// cache will contain a garbage CodeBlob until the caller can
|
||||
// run the constructor for the CodeBlob subclass he is busy
|
||||
// instantiating.
|
||||
guarantee(size >= 0, "allocation request must be reasonable");
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
CodeBlob* cb = NULL;
|
||||
_number_of_blobs++;
|
||||
while (true) {
|
||||
cb = (CodeBlob*)_heap->allocate(size);
|
||||
if (cb != NULL) break;
|
||||
if (!_heap->expand_by(CodeCacheExpansionSize)) {
|
||||
// Expansion failed
|
||||
return NULL;
|
||||
}
|
||||
if (PrintCodeCacheExtension) {
|
||||
ResourceMark rm;
|
||||
tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
|
||||
(intptr_t)_heap->begin(), (intptr_t)_heap->end(),
|
||||
(address)_heap->end() - (address)_heap->begin());
|
||||
}
|
||||
}
|
||||
verify_if_often();
|
||||
if (PrintCodeCache2) { // Need to add a new flag
|
||||
ResourceMark rm;
|
||||
tty->print_cr("CodeCache allocation: addr: " INTPTR_FORMAT ", size: 0x%x\n", cb, size);
|
||||
}
|
||||
return cb;
|
||||
}
|
||||
|
||||
void CodeCache::free(CodeBlob* cb) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
verify_if_often();
|
||||
|
||||
if (PrintCodeCache2) { // Need to add a new flag
|
||||
ResourceMark rm;
|
||||
tty->print_cr("CodeCache free: addr: " INTPTR_FORMAT ", size: 0x%x\n", cb, cb->size());
|
||||
}
|
||||
if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) {
|
||||
_number_of_nmethods_with_dependencies--;
|
||||
}
|
||||
_number_of_blobs--;
|
||||
|
||||
_heap->deallocate(cb);
|
||||
|
||||
verify_if_often();
|
||||
assert(_number_of_blobs >= 0, "sanity check");
|
||||
}
|
||||
|
||||
|
||||
void CodeCache::commit(CodeBlob* cb) {
|
||||
// this is called by nmethod::nmethod, which must already own CodeCache_lock
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) {
|
||||
_number_of_nmethods_with_dependencies++;
|
||||
}
|
||||
// flush the hardware I-cache
|
||||
ICache::invalidate_range(cb->instructions_begin(), cb->instructions_size());
|
||||
}
|
||||
|
||||
|
||||
void CodeCache::flush() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
|
||||
// Iteration over CodeBlobs
|
||||
|
||||
#define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) )
|
||||
#define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
|
||||
#define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
|
||||
|
||||
|
||||
bool CodeCache::contains(void *p) {
|
||||
// It should be ok to call contains without holding a lock
|
||||
return _heap->contains(p);
|
||||
}
|
||||
|
||||
|
||||
// This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
|
||||
// looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
|
||||
// valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
|
||||
CodeBlob* CodeCache::find_blob(void* start) {
|
||||
CodeBlob* result = find_blob_unsafe(start);
|
||||
if (result == NULL) return NULL;
|
||||
// We could potientially look up non_entrant methods
|
||||
guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
|
||||
return result;
|
||||
}
|
||||
|
||||
nmethod* CodeCache::find_nmethod(void* start) {
|
||||
CodeBlob *cb = find_blob(start);
|
||||
assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
|
||||
return (nmethod*)cb;
|
||||
}
|
||||
|
||||
|
||||
void CodeCache::blobs_do(void f(CodeBlob* nm)) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
FOR_ALL_BLOBS(p) {
|
||||
f(p);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeCache::nmethods_do(void f(nmethod* nm)) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
FOR_ALL_BLOBS(nm) {
|
||||
if (nm->is_nmethod()) f((nmethod*)nm);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int CodeCache::alignment_unit() {
|
||||
return (int)_heap->alignment_unit();
|
||||
}
|
||||
|
||||
|
||||
int CodeCache::alignment_offset() {
|
||||
return (int)_heap->alignment_offset();
|
||||
}
|
||||
|
||||
|
||||
// Mark code blobs for unloading if they contain otherwise
|
||||
// unreachable oops.
|
||||
void CodeCache::do_unloading(BoolObjectClosure* is_alive,
|
||||
OopClosure* keep_alive,
|
||||
bool unloading_occurred) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
FOR_ALL_ALIVE_BLOBS(cb) {
|
||||
cb->do_unloading(is_alive, keep_alive, unloading_occurred);
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::oops_do(OopClosure* f) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
FOR_ALL_ALIVE_BLOBS(cb) {
|
||||
cb->oops_do(f);
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::gc_prologue() {
|
||||
}
|
||||
|
||||
|
||||
void CodeCache::gc_epilogue() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
FOR_ALL_ALIVE_BLOBS(cb) {
|
||||
if (cb->is_nmethod()) {
|
||||
nmethod *nm = (nmethod*)cb;
|
||||
assert(!nm->is_unloaded(), "Tautology");
|
||||
if (needs_cache_clean()) {
|
||||
nm->cleanup_inline_caches();
|
||||
}
|
||||
debug_only(nm->verify();)
|
||||
}
|
||||
cb->fix_oop_relocations();
|
||||
}
|
||||
set_needs_cache_clean(false);
|
||||
}
|
||||
|
||||
|
||||
address CodeCache::first_address() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
return (address)_heap->begin();
|
||||
}
|
||||
|
||||
|
||||
address CodeCache::last_address() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
return (address)_heap->end();
|
||||
}
|
||||
|
||||
|
||||
void icache_init();
|
||||
|
||||
void CodeCache::initialize() {
|
||||
assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
|
||||
#ifdef COMPILER2
|
||||
assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
|
||||
#endif
|
||||
assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
|
||||
// This was originally just a check of the alignment, causing failure, instead, round
|
||||
// the code cache to the page size. In particular, Solaris is moving to a larger
|
||||
// default page size.
|
||||
CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
|
||||
InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
|
||||
ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
|
||||
if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
|
||||
vm_exit_during_initialization("Could not reserve enough space for code cache");
|
||||
}
|
||||
|
||||
MemoryService::add_code_heap_memory_pool(_heap);
|
||||
|
||||
// Initialize ICache flush mechanism
|
||||
// This service is needed for os::register_code_area
|
||||
icache_init();
|
||||
|
||||
// Give OS a chance to register generated code area.
|
||||
// This is used on Windows 64 bit platforms to register
|
||||
// Structured Exception Handlers for our generated code.
|
||||
os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
|
||||
}
|
||||
|
||||
|
||||
void codeCache_init() {
|
||||
CodeCache::initialize();
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------------------------
|
||||
|
||||
int CodeCache::number_of_nmethods_with_dependencies() {
|
||||
return _number_of_nmethods_with_dependencies;
|
||||
}
|
||||
|
||||
void CodeCache::clear_inline_caches() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
FOR_ALL_ALIVE_NMETHODS(nm) {
|
||||
nm->clear_inline_caches();
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// used to keep track of how much time is spent in mark_for_deoptimization
|
||||
static elapsedTimer dependentCheckTime;
|
||||
static int dependentCheckCount = 0;
|
||||
#endif // PRODUCT
|
||||
|
||||
|
||||
int CodeCache::mark_for_deoptimization(DepChange& changes) {
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
|
||||
#ifndef PRODUCT
|
||||
dependentCheckTime.start();
|
||||
dependentCheckCount++;
|
||||
#endif // PRODUCT
|
||||
|
||||
int number_of_marked_CodeBlobs = 0;
|
||||
|
||||
// search the hierarchy looking for nmethods which are affected by the loading of this class
|
||||
|
||||
// then search the interfaces this class implements looking for nmethods
|
||||
// which might be dependent of the fact that an interface only had one
|
||||
// implementor.
|
||||
|
||||
{ No_Safepoint_Verifier nsv;
|
||||
for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
|
||||
klassOop d = str.klass();
|
||||
number_of_marked_CodeBlobs += instanceKlass::cast(d)->mark_dependent_nmethods(changes);
|
||||
}
|
||||
}
|
||||
|
||||
if (VerifyDependencies) {
|
||||
// Turn off dependency tracing while actually testing deps.
|
||||
NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
|
||||
FOR_ALL_ALIVE_NMETHODS(nm) {
|
||||
if (!nm->is_marked_for_deoptimization() &&
|
||||
nm->check_all_dependencies()) {
|
||||
ResourceMark rm;
|
||||
tty->print_cr("Should have been marked for deoptimization:");
|
||||
changes.print();
|
||||
nm->print();
|
||||
nm->print_dependencies();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
dependentCheckTime.stop();
|
||||
#endif // PRODUCT
|
||||
|
||||
return number_of_marked_CodeBlobs;
|
||||
}
|
||||
|
||||
|
||||
#ifdef HOTSWAP
|
||||
int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
int number_of_marked_CodeBlobs = 0;
|
||||
|
||||
// Deoptimize all methods of the evolving class itself
|
||||
objArrayOop old_methods = dependee->methods();
|
||||
for (int i = 0; i < old_methods->length(); i++) {
|
||||
ResourceMark rm;
|
||||
methodOop old_method = (methodOop) old_methods->obj_at(i);
|
||||
nmethod *nm = old_method->code();
|
||||
if (nm != NULL) {
|
||||
nm->mark_for_deoptimization();
|
||||
number_of_marked_CodeBlobs++;
|
||||
}
|
||||
}
|
||||
|
||||
FOR_ALL_ALIVE_NMETHODS(nm) {
|
||||
if (nm->is_marked_for_deoptimization()) {
|
||||
// ...Already marked in the previous pass; don't count it again.
|
||||
} else if (nm->is_evol_dependent_on(dependee())) {
|
||||
ResourceMark rm;
|
||||
nm->mark_for_deoptimization();
|
||||
number_of_marked_CodeBlobs++;
|
||||
} else {
|
||||
// flush caches in case they refer to a redefined methodOop
|
||||
nm->clear_inline_caches();
|
||||
}
|
||||
}
|
||||
|
||||
return number_of_marked_CodeBlobs;
|
||||
}
|
||||
#endif // HOTSWAP
|
||||
|
||||
|
||||
// Deoptimize all methods
|
||||
void CodeCache::mark_all_nmethods_for_deoptimization() {
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
FOR_ALL_ALIVE_NMETHODS(nm) {
|
||||
nm->mark_for_deoptimization();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int CodeCache::mark_for_deoptimization(methodOop dependee) {
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
int number_of_marked_CodeBlobs = 0;
|
||||
|
||||
FOR_ALL_ALIVE_NMETHODS(nm) {
|
||||
if (nm->is_dependent_on_method(dependee)) {
|
||||
ResourceMark rm;
|
||||
nm->mark_for_deoptimization();
|
||||
number_of_marked_CodeBlobs++;
|
||||
}
|
||||
}
|
||||
|
||||
return number_of_marked_CodeBlobs;
|
||||
}
|
||||
|
||||
void CodeCache::make_marked_nmethods_zombies() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
|
||||
FOR_ALL_ALIVE_NMETHODS(nm) {
|
||||
if (nm->is_marked_for_deoptimization()) {
|
||||
|
||||
// If the nmethod has already been made non-entrant and it can be converted
|
||||
// then zombie it now. Otherwise make it non-entrant and it will eventually
|
||||
// be zombied when it is no longer seen on the stack. Note that the nmethod
|
||||
// might be "entrant" and not on the stack and so could be zombied immediately
|
||||
// but we can't tell because we don't track it on stack until it becomes
|
||||
// non-entrant.
|
||||
|
||||
if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
|
||||
nm->make_zombie();
|
||||
} else {
|
||||
nm->make_not_entrant();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::make_marked_nmethods_not_entrant() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
FOR_ALL_ALIVE_NMETHODS(nm) {
|
||||
if (nm->is_marked_for_deoptimization()) {
|
||||
nm->make_not_entrant();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::verify() {
|
||||
_heap->verify();
|
||||
FOR_ALL_ALIVE_BLOBS(p) {
|
||||
p->verify();
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------------------------
|
||||
// Non-product version
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
void CodeCache::verify_if_often() {
|
||||
if (VerifyCodeCacheOften) {
|
||||
_heap->verify();
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::print_internals() {
|
||||
int nmethodCount = 0;
|
||||
int runtimeStubCount = 0;
|
||||
int adapterCount = 0;
|
||||
int deoptimizationStubCount = 0;
|
||||
int uncommonTrapStubCount = 0;
|
||||
int bufferBlobCount = 0;
|
||||
int total = 0;
|
||||
int nmethodAlive = 0;
|
||||
int nmethodNotEntrant = 0;
|
||||
int nmethodZombie = 0;
|
||||
int nmethodUnloaded = 0;
|
||||
int nmethodJava = 0;
|
||||
int nmethodNative = 0;
|
||||
int maxCodeSize = 0;
|
||||
ResourceMark rm;
|
||||
|
||||
CodeBlob *cb;
|
||||
for (cb = first(); cb != NULL; cb = next(cb)) {
|
||||
total++;
|
||||
if (cb->is_nmethod()) {
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
|
||||
if (Verbose && nm->method() != NULL) {
|
||||
ResourceMark rm;
|
||||
char *method_name = nm->method()->name_and_sig_as_C_string();
|
||||
tty->print("%s", method_name);
|
||||
if(nm->is_alive()) { tty->print_cr(" alive"); }
|
||||
if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
|
||||
if(nm->is_zombie()) { tty->print_cr(" zombie"); }
|
||||
}
|
||||
|
||||
nmethodCount++;
|
||||
|
||||
if(nm->is_alive()) { nmethodAlive++; }
|
||||
if(nm->is_not_entrant()) { nmethodNotEntrant++; }
|
||||
if(nm->is_zombie()) { nmethodZombie++; }
|
||||
if(nm->is_unloaded()) { nmethodUnloaded++; }
|
||||
if(nm->is_native_method()) { nmethodNative++; }
|
||||
|
||||
if(nm->method() != NULL && nm->is_java_method()) {
|
||||
nmethodJava++;
|
||||
if(nm->code_size() > maxCodeSize) {
|
||||
maxCodeSize = nm->code_size();
|
||||
}
|
||||
}
|
||||
} else if (cb->is_runtime_stub()) {
|
||||
runtimeStubCount++;
|
||||
} else if (cb->is_deoptimization_stub()) {
|
||||
deoptimizationStubCount++;
|
||||
} else if (cb->is_uncommon_trap_stub()) {
|
||||
uncommonTrapStubCount++;
|
||||
} else if (cb->is_adapter_blob()) {
|
||||
adapterCount++;
|
||||
} else if (cb->is_buffer_blob()) {
|
||||
bufferBlobCount++;
|
||||
}
|
||||
}
|
||||
|
||||
int bucketSize = 512;
|
||||
int bucketLimit = maxCodeSize / bucketSize + 1;
|
||||
int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit);
|
||||
memset(buckets,0,sizeof(int) * bucketLimit);
|
||||
|
||||
for (cb = first(); cb != NULL; cb = next(cb)) {
|
||||
if (cb->is_nmethod()) {
|
||||
nmethod* nm = (nmethod*)cb;
|
||||
if(nm->is_java_method()) {
|
||||
buckets[nm->code_size() / bucketSize]++;
|
||||
}
|
||||
}
|
||||
}
|
||||
tty->print_cr("Code Cache Entries (total of %d)",total);
|
||||
tty->print_cr("-------------------------------------------------");
|
||||
tty->print_cr("nmethods: %d",nmethodCount);
|
||||
tty->print_cr("\talive: %d",nmethodAlive);
|
||||
tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
|
||||
tty->print_cr("\tzombie: %d",nmethodZombie);
|
||||
tty->print_cr("\tunloaded: %d",nmethodUnloaded);
|
||||
tty->print_cr("\tjava: %d",nmethodJava);
|
||||
tty->print_cr("\tnative: %d",nmethodNative);
|
||||
tty->print_cr("runtime_stubs: %d",runtimeStubCount);
|
||||
tty->print_cr("adapters: %d",adapterCount);
|
||||
tty->print_cr("buffer blobs: %d",bufferBlobCount);
|
||||
tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
|
||||
tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
|
||||
tty->print_cr("\nnmethod size distribution (non-zombie java)");
|
||||
tty->print_cr("-------------------------------------------------");
|
||||
|
||||
for(int i=0; i<bucketLimit; i++) {
|
||||
if(buckets[i] != 0) {
|
||||
tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
|
||||
tty->fill_to(40);
|
||||
tty->print_cr("%d",buckets[i]);
|
||||
}
|
||||
}
|
||||
|
||||
FREE_C_HEAP_ARRAY(int, buckets);
|
||||
}
|
||||
|
||||
void CodeCache::print() {
|
||||
CodeBlob_sizes live;
|
||||
CodeBlob_sizes dead;
|
||||
|
||||
FOR_ALL_BLOBS(p) {
|
||||
if (!p->is_alive()) {
|
||||
dead.add(p);
|
||||
} else {
|
||||
live.add(p);
|
||||
}
|
||||
}
|
||||
|
||||
tty->print_cr("CodeCache:");
|
||||
|
||||
tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(),
|
||||
dependentCheckTime.seconds() / dependentCheckCount);
|
||||
|
||||
if (!live.is_empty()) {
|
||||
live.print("live");
|
||||
}
|
||||
if (!dead.is_empty()) {
|
||||
dead.print("dead");
|
||||
}
|
||||
|
||||
|
||||
if (Verbose) {
|
||||
// print the oop_map usage
|
||||
int code_size = 0;
|
||||
int number_of_blobs = 0;
|
||||
int number_of_oop_maps = 0;
|
||||
int map_size = 0;
|
||||
FOR_ALL_BLOBS(p) {
|
||||
if (p->is_alive()) {
|
||||
number_of_blobs++;
|
||||
code_size += p->instructions_size();
|
||||
OopMapSet* set = p->oop_maps();
|
||||
if (set != NULL) {
|
||||
number_of_oop_maps += set->size();
|
||||
map_size += set->heap_size();
|
||||
}
|
||||
}
|
||||
}
|
||||
tty->print_cr("OopMaps");
|
||||
tty->print_cr(" #blobs = %d", number_of_blobs);
|
||||
tty->print_cr(" code size = %d", code_size);
|
||||
tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
|
||||
tty->print_cr(" map size = %d", map_size);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif // PRODUCT
|
129
hotspot/src/share/vm/code/codeCache.hpp
Normal file
129
hotspot/src/share/vm/code/codeCache.hpp
Normal file
|
@ -0,0 +1,129 @@
|
|||
/*
|
||||
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// The CodeCache implements the code cache for various pieces of generated
|
||||
// code, e.g., compiled java methods, runtime stubs, transition frames, etc.
|
||||
// The entries in the CodeCache are all CodeBlob's.
|
||||
|
||||
// Implementation:
|
||||
// - Each CodeBlob occupies one chunk of memory.
|
||||
// - Like the offset table in oldspace the zone has at table for
|
||||
// locating a method given a addess of an instruction.
|
||||
|
||||
class OopClosure;
|
||||
class DepChange;
|
||||
|
||||
class CodeCache : AllStatic {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
// CodeHeap is malloc()'ed at startup and never deleted during shutdown,
|
||||
// so that the generated assembly code is always there when it's needed.
|
||||
// This may cause memory leak, but is necessary, for now. See 4423824,
|
||||
// 4422213 or 4436291 for details.
|
||||
static CodeHeap * _heap;
|
||||
static int _number_of_blobs;
|
||||
static int _number_of_nmethods_with_dependencies;
|
||||
static bool _needs_cache_clean;
|
||||
|
||||
static void verify_if_often() PRODUCT_RETURN;
|
||||
public:
|
||||
|
||||
// Initialization
|
||||
static void initialize();
|
||||
|
||||
// Allocation/administration
|
||||
static CodeBlob* allocate(int size); // allocates a new CodeBlob
|
||||
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
|
||||
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
|
||||
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
|
||||
static void free(CodeBlob* cb); // frees a CodeBlob
|
||||
static void flush(); // flushes all CodeBlobs
|
||||
static bool contains(void *p); // returns whether p is included
|
||||
static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
|
||||
static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
|
||||
|
||||
// Lookup
|
||||
static CodeBlob* find_blob(void* start);
|
||||
static nmethod* find_nmethod(void* start);
|
||||
|
||||
// Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
|
||||
// what you are doing)
|
||||
static CodeBlob* find_blob_unsafe(void* start) {
|
||||
CodeBlob* result = (CodeBlob*)_heap->find_start(start);
|
||||
assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob");
|
||||
return result;
|
||||
}
|
||||
|
||||
// Iteration
|
||||
static CodeBlob* first();
|
||||
static CodeBlob* next (CodeBlob* cb);
|
||||
static CodeBlob* alive(CodeBlob *cb);
|
||||
static nmethod* alive_nmethod(CodeBlob *cb);
|
||||
static int nof_blobs() { return _number_of_blobs; }
|
||||
|
||||
// GC support
|
||||
static void gc_epilogue();
|
||||
static void gc_prologue();
|
||||
// If "unloading_occurred" is true, then unloads (i.e., breaks root links
|
||||
// to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading"
|
||||
// to "true" iff some code got unloaded.
|
||||
static void do_unloading(BoolObjectClosure* is_alive,
|
||||
OopClosure* keep_alive,
|
||||
bool unloading_occurred);
|
||||
static void oops_do(OopClosure* f);
|
||||
|
||||
// Printing/debugging
|
||||
static void print() PRODUCT_RETURN; // prints summary
|
||||
static void print_internals();
|
||||
static void verify(); // verifies the code cache
|
||||
|
||||
// The full limits of the codeCache
|
||||
static address low_bound() { return (address) _heap->low_boundary(); }
|
||||
static address high_bound() { return (address) _heap->high_boundary(); }
|
||||
|
||||
// Profiling
|
||||
static address first_address(); // first address used for CodeBlobs
|
||||
static address last_address(); // last address used for CodeBlobs
|
||||
static size_t capacity() { return _heap->capacity(); }
|
||||
static size_t max_capacity() { return _heap->max_capacity(); }
|
||||
static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
|
||||
|
||||
static bool needs_cache_clean() { return _needs_cache_clean; }
|
||||
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
||||
static void clear_inline_caches(); // clear all inline caches
|
||||
|
||||
// Deoptimization
|
||||
static int mark_for_deoptimization(DepChange& changes);
|
||||
#ifdef HOTSWAP
|
||||
static int mark_for_evol_deoptimization(instanceKlassHandle dependee);
|
||||
#endif // HOTSWAP
|
||||
|
||||
static void mark_all_nmethods_for_deoptimization();
|
||||
static int mark_for_deoptimization(methodOop dependee);
|
||||
static void make_marked_nmethods_zombies();
|
||||
static void make_marked_nmethods_not_entrant();
|
||||
|
||||
// tells how many nmethods have dependencies
|
||||
static int number_of_nmethods_with_dependencies();
|
||||
};
|
662
hotspot/src/share/vm/code/compiledIC.cpp
Normal file
662
hotspot/src/share/vm/code/compiledIC.cpp
Normal file
|
@ -0,0 +1,662 @@
|
|||
/*
|
||||
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "incls/_precompiled.incl"
|
||||
#include "incls/_compiledIC.cpp.incl"
|
||||
|
||||
|
||||
// Every time a compiled IC is changed or its type is being accessed,
|
||||
// either the CompiledIC_lock must be set or we must be at a safe point.
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Low-level access to an inline cache. Private, since they might not be
|
||||
// MT-safe to use.
|
||||
|
||||
void CompiledIC::set_cached_oop(oop cache) {
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
|
||||
assert (!is_optimized(), "an optimized virtual call does not have a cached oop");
|
||||
assert (cache == NULL || cache != badOop, "invalid oop");
|
||||
|
||||
if (TraceCompiledIC) {
|
||||
tty->print(" ");
|
||||
print_compiled_ic();
|
||||
tty->print_cr(" changing oop to " INTPTR_FORMAT, (address)cache);
|
||||
}
|
||||
|
||||
if (cache == NULL) cache = (oop)Universe::non_oop_word();
|
||||
|
||||
*_oop_addr = cache;
|
||||
// fix up the relocations
|
||||
RelocIterator iter = _oops;
|
||||
while (iter.next()) {
|
||||
if (iter.type() == relocInfo::oop_type) {
|
||||
oop_Relocation* r = iter.oop_reloc();
|
||||
if (r->oop_addr() == _oop_addr)
|
||||
r->fix_oop_relocation();
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
oop CompiledIC::cached_oop() const {
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
|
||||
assert (!is_optimized(), "an optimized virtual call does not have a cached oop");
|
||||
|
||||
if (!is_in_transition_state()) {
|
||||
oop data = *_oop_addr;
|
||||
// If we let the oop value here be initialized to zero...
|
||||
assert(data != NULL || Universe::non_oop_word() == NULL,
|
||||
"no raw nulls in CompiledIC oops, because of patching races");
|
||||
return (data == (oop)Universe::non_oop_word()) ? (oop)NULL : data;
|
||||
} else {
|
||||
return InlineCacheBuffer::cached_oop_for((CompiledIC *)this);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CompiledIC::set_ic_destination(address entry_point) {
|
||||
assert(entry_point != NULL, "must set legal entry point");
|
||||
assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
|
||||
if (TraceCompiledIC) {
|
||||
tty->print(" ");
|
||||
print_compiled_ic();
|
||||
tty->print_cr(" changing destination to " INTPTR_FORMAT, entry_point);
|
||||
}
|
||||
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
|
||||
#ifdef ASSERT
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
|
||||
assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
|
||||
#endif
|
||||
_ic_call->set_destination_mt_safe(entry_point);
|
||||
}
|
||||
|
||||
|
||||
address CompiledIC::ic_destination() const {
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
|
||||
if (!is_in_transition_state()) {
|
||||
return _ic_call->destination();
|
||||
} else {
|
||||
return InlineCacheBuffer::ic_destination_for((CompiledIC *)this);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool CompiledIC::is_in_transition_state() const {
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
|
||||
return InlineCacheBuffer::contains(_ic_call->destination());
|
||||
}
|
||||
|
||||
|
||||
// Returns native address of 'call' instruction in inline-cache. Used by
|
||||
// the InlineCacheBuffer when it needs to find the stub.
|
||||
address CompiledIC::stub_address() const {
|
||||
assert(is_in_transition_state(), "should only be called when we are in a transition state");
|
||||
return _ic_call->destination();
|
||||
}
|
||||
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// High-level access to an inline cache. Guaranteed to be MT-safe.
|
||||
|
||||
|
||||
void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
|
||||
methodHandle method = call_info->selected_method();
|
||||
bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index());
|
||||
assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
|
||||
assert(method->is_oop(), "cannot be NULL and must be oop");
|
||||
assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
|
||||
assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
|
||||
|
||||
address entry;
|
||||
if (is_invoke_interface) {
|
||||
int index = klassItable::compute_itable_index(call_info->resolved_method()());
|
||||
entry = VtableStubs::create_stub(false, index, method());
|
||||
assert(entry != NULL, "entry not computed");
|
||||
klassOop k = call_info->resolved_method()->method_holder();
|
||||
assert(Klass::cast(k)->is_interface(), "sanity check");
|
||||
InlineCacheBuffer::create_transition_stub(this, k, entry);
|
||||
} else {
|
||||
// Can be different than method->vtable_index(), due to package-private etc.
|
||||
int vtable_index = call_info->vtable_index();
|
||||
entry = VtableStubs::create_stub(true, vtable_index, method());
|
||||
InlineCacheBuffer::create_transition_stub(this, method(), entry);
|
||||
}
|
||||
|
||||
if (TraceICs) {
|
||||
ResourceMark rm;
|
||||
tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
|
||||
instruction_address(), method->print_value_string(), entry);
|
||||
}
|
||||
|
||||
Events::log("compiledIC " INTPTR_FORMAT " --> megamorphic " INTPTR_FORMAT, this, (address)method());
|
||||
// We can't check this anymore. With lazy deopt we could have already
|
||||
// cleaned this IC entry before we even return. This is possible if
|
||||
// we ran out of space in the inline cache buffer trying to do the
|
||||
// set_next and we safepointed to free up space. This is a benign
|
||||
// race because the IC entry was complete when we safepointed so
|
||||
// cleaning it immediately is harmless.
|
||||
// assert(is_megamorphic(), "sanity check");
|
||||
}
|
||||
|
||||
|
||||
// true if destination is megamorphic stub
|
||||
bool CompiledIC::is_megamorphic() const {
|
||||
assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
|
||||
assert(!is_optimized(), "an optimized call cannot be megamorphic");
|
||||
|
||||
// Cannot rely on cached_oop. It is either an interface or a method.
|
||||
return VtableStubs::is_entry_point(ic_destination());
|
||||
}
|
||||
|
||||
bool CompiledIC::is_call_to_compiled() const {
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
|
||||
|
||||
// Use unsafe, since an inline cache might point to a zombie method. However, the zombie
|
||||
// method is guaranteed to still exist, since we only remove methods after all inline caches
|
||||
// has been cleaned up
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
|
||||
bool is_monomorphic = (cb != NULL && cb->is_nmethod());
|
||||
// Check that the cached_oop is a klass for non-optimized monomorphic calls
|
||||
// This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
|
||||
// for calling directly to vep without using the inline cache (i.e., cached_oop == NULL)
|
||||
#ifdef ASSERT
|
||||
#ifdef TIERED
|
||||
CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
|
||||
bool is_c1_method = caller->is_compiled_by_c1();
|
||||
#else
|
||||
#ifdef COMPILER1
|
||||
bool is_c1_method = true;
|
||||
#else
|
||||
bool is_c1_method = false;
|
||||
#endif // COMPILER1
|
||||
#endif // TIERED
|
||||
assert( is_c1_method ||
|
||||
!is_monomorphic ||
|
||||
is_optimized() ||
|
||||
(cached_oop() != NULL && cached_oop()->is_klass()), "sanity check");
|
||||
#endif // ASSERT
|
||||
return is_monomorphic;
|
||||
}
|
||||
|
||||
|
||||
bool CompiledIC::is_call_to_interpreted() const {
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
|
||||
// Call to interpreter if destination is either calling to a stub (if it
|
||||
// is optimized), or calling to an I2C blob
|
||||
bool is_call_to_interpreted = false;
|
||||
if (!is_optimized()) {
|
||||
// must use unsafe because the destination can be a zombie (and we're cleaning)
|
||||
// and the print_compiled_ic code wants to know if site (in the non-zombie)
|
||||
// is to the interpreter.
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
|
||||
is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
|
||||
assert(!is_call_to_interpreted || (cached_oop() != NULL && cached_oop()->is_compiledICHolder()), "sanity check");
|
||||
} else {
|
||||
// Check if we are calling into our own codeblob (i.e., to a stub)
|
||||
CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address());
|
||||
address dest = ic_destination();
|
||||
#ifdef ASSERT
|
||||
{
|
||||
CodeBlob* db = CodeCache::find_blob_unsafe(dest);
|
||||
assert(!db->is_adapter_blob(), "must use stub!");
|
||||
}
|
||||
#endif /* ASSERT */
|
||||
is_call_to_interpreted = cb->contains(dest);
|
||||
}
|
||||
return is_call_to_interpreted;
|
||||
}
|
||||
|
||||
|
||||
void CompiledIC::set_to_clean() {
|
||||
assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call");
|
||||
if (TraceInlineCacheClearing || TraceICs) {
|
||||
tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", instruction_address());
|
||||
print();
|
||||
}
|
||||
|
||||
address entry;
|
||||
if (is_optimized()) {
|
||||
entry = SharedRuntime::get_resolve_opt_virtual_call_stub();
|
||||
} else {
|
||||
entry = SharedRuntime::get_resolve_virtual_call_stub();
|
||||
}
|
||||
|
||||
// A zombie transition will always be safe, since the oop has already been set to NULL, so
|
||||
// we only need to patch the destination
|
||||
bool safe_transition = is_optimized() || SafepointSynchronize::is_at_safepoint();
|
||||
|
||||
if (safe_transition) {
|
||||
if (!is_optimized()) set_cached_oop(NULL);
|
||||
// Kill any leftover stub we might have too
|
||||
if (is_in_transition_state()) {
|
||||
ICStub* old_stub = ICStub_from_destination_address(stub_address());
|
||||
old_stub->clear();
|
||||
}
|
||||
set_ic_destination(entry);
|
||||
} else {
|
||||
// Unsafe transition - create stub.
|
||||
InlineCacheBuffer::create_transition_stub(this, NULL, entry);
|
||||
}
|
||||
// We can't check this anymore. With lazy deopt we could have already
|
||||
// cleaned this IC entry before we even return. This is possible if
|
||||
// we ran out of space in the inline cache buffer trying to do the
|
||||
// set_next and we safepointed to free up space. This is a benign
|
||||
// race because the IC entry was complete when we safepointed so
|
||||
// cleaning it immediately is harmless.
|
||||
// assert(is_clean(), "sanity check");
|
||||
}
|
||||
|
||||
|
||||
bool CompiledIC::is_clean() const {
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
|
||||
bool is_clean = false;
|
||||
address dest = ic_destination();
|
||||
is_clean = dest == SharedRuntime::get_resolve_opt_virtual_call_stub() ||
|
||||
dest == SharedRuntime::get_resolve_virtual_call_stub();
|
||||
assert(!is_clean || is_optimized() || cached_oop() == NULL, "sanity check");
|
||||
return is_clean;
|
||||
}
|
||||
|
||||
|
||||
void CompiledIC::set_to_monomorphic(const CompiledICInfo& info) {
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
|
||||
// Updating a cache to the wrong entry can cause bugs that are very hard
|
||||
// to track down - if cache entry gets invalid - we just clean it. In
|
||||
// this way it is always the same code path that is responsible for
|
||||
// updating and resolving an inline cache
|
||||
//
|
||||
// The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
|
||||
// callsites. In addition ic_miss code will update a site to monomorphic if it determines
|
||||
// that an monomorphic call to the interpreter can now be monomorphic to compiled code.
|
||||
//
|
||||
// In both of these cases the only thing being modifed is the jump/call target and these
|
||||
// transitions are mt_safe
|
||||
|
||||
Thread *thread = Thread::current();
|
||||
if (info._to_interpreter) {
|
||||
// Call to interpreter
|
||||
if (info.is_optimized() && is_optimized()) {
|
||||
assert(is_clean(), "unsafe IC path");
|
||||
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
|
||||
// the call analysis (callee structure) specifies that the call is optimized
|
||||
// (either because of CHA or the static target is final)
|
||||
// At code generation time, this call has been emitted as static call
|
||||
// Call via stub
|
||||
assert(info.cached_oop().not_null() && info.cached_oop()->is_method(), "sanity check");
|
||||
CompiledStaticCall* csc = compiledStaticCall_at(instruction_address());
|
||||
methodHandle method (thread, (methodOop)info.cached_oop()());
|
||||
csc->set_to_interpreted(method, info.entry());
|
||||
if (TraceICs) {
|
||||
ResourceMark rm(thread);
|
||||
tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s",
|
||||
instruction_address(),
|
||||
method->print_value_string());
|
||||
}
|
||||
} else {
|
||||
// Call via method-klass-holder
|
||||
assert(info.cached_oop().not_null(), "must be set");
|
||||
InlineCacheBuffer::create_transition_stub(this, info.cached_oop()(), info.entry());
|
||||
|
||||
if (TraceICs) {
|
||||
ResourceMark rm(thread);
|
||||
tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via mkh", instruction_address());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Call to compiled code
|
||||
bool static_bound = info.is_optimized() || (info.cached_oop().is_null());
|
||||
#ifdef ASSERT
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
|
||||
assert (cb->is_nmethod(), "must be compiled!");
|
||||
#endif /* ASSERT */
|
||||
|
||||
// This is MT safe if we come from a clean-cache and go through a
|
||||
// non-verified entry point
|
||||
bool safe = SafepointSynchronize::is_at_safepoint() ||
|
||||
(!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
|
||||
|
||||
if (!safe) {
|
||||
InlineCacheBuffer::create_transition_stub(this, info.cached_oop()(), info.entry());
|
||||
} else {
|
||||
set_ic_destination(info.entry());
|
||||
if (!is_optimized()) set_cached_oop(info.cached_oop()());
|
||||
}
|
||||
|
||||
if (TraceICs) {
|
||||
ResourceMark rm(thread);
|
||||
assert(info.cached_oop() == NULL || info.cached_oop()()->is_klass(), "must be");
|
||||
tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s",
|
||||
instruction_address(),
|
||||
((klassOop)info.cached_oop()())->print_value_string(),
|
||||
(safe) ? "" : "via stub");
|
||||
}
|
||||
}
|
||||
// We can't check this anymore. With lazy deopt we could have already
|
||||
// cleaned this IC entry before we even return. This is possible if
|
||||
// we ran out of space in the inline cache buffer trying to do the
|
||||
// set_next and we safepointed to free up space. This is a benign
|
||||
// race because the IC entry was complete when we safepointed so
|
||||
// cleaning it immediately is harmless.
|
||||
// assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
|
||||
}
|
||||
|
||||
|
||||
// is_optimized: Compiler has generated an optimized call (i.e., no inline
|
||||
// cache) static_bound: The call can be static bound (i.e, no need to use
|
||||
// inline cache)
|
||||
void CompiledIC::compute_monomorphic_entry(methodHandle method,
|
||||
KlassHandle receiver_klass,
|
||||
bool is_optimized,
|
||||
bool static_bound,
|
||||
CompiledICInfo& info,
|
||||
TRAPS) {
|
||||
info._is_optimized = is_optimized;
|
||||
|
||||
nmethod* method_code = method->code();
|
||||
address entry = NULL;
|
||||
if (method_code != NULL) {
|
||||
// Call to compiled code
|
||||
if (static_bound || is_optimized) {
|
||||
entry = method_code->verified_entry_point();
|
||||
} else {
|
||||
entry = method_code->entry_point();
|
||||
}
|
||||
}
|
||||
if (entry != NULL) {
|
||||
// Call to compiled code
|
||||
info._entry = entry;
|
||||
if (static_bound || is_optimized) {
|
||||
info._cached_oop = Handle(THREAD, (oop)NULL);
|
||||
} else {
|
||||
info._cached_oop = receiver_klass;
|
||||
}
|
||||
info._to_interpreter = false;
|
||||
} else {
|
||||
// Note: the following problem exists with Compiler1:
|
||||
// - at compile time we may or may not know if the destination is final
|
||||
// - if we know that the destination is final, we will emit an optimized
|
||||
// virtual call (no inline cache), and need a methodOop to make a call
|
||||
// to the interpreter
|
||||
// - if we do not know if the destination is final, we emit a standard
|
||||
// virtual call, and use CompiledICHolder to call interpreted code
|
||||
// (no static call stub has been generated)
|
||||
// However in that case we will now notice it is static_bound
|
||||
// and convert the call into what looks to be an optimized
|
||||
// virtual call. This causes problems in verifying the IC because
|
||||
// it look vanilla but is optimized. Code in is_call_to_interpreted
|
||||
// is aware of this and weakens its asserts.
|
||||
|
||||
info._to_interpreter = true;
|
||||
// static_bound should imply is_optimized -- otherwise we have a
|
||||
// performance bug (statically-bindable method is called via
|
||||
// dynamically-dispatched call note: the reverse implication isn't
|
||||
// necessarily true -- the call may have been optimized based on compiler
|
||||
// analysis (static_bound is only based on "final" etc.)
|
||||
#ifdef COMPILER2
|
||||
#ifdef TIERED
|
||||
#if defined(ASSERT)
|
||||
// can't check the assert because we don't have the CompiledIC with which to
|
||||
// find the address if the call instruction.
|
||||
//
|
||||
// CodeBlob* cb = find_blob_unsafe(instruction_address());
|
||||
// assert(cb->is_compiled_by_c1() || !static_bound || is_optimized, "static_bound should imply is_optimized");
|
||||
#endif // ASSERT
|
||||
#else
|
||||
assert(!static_bound || is_optimized, "static_bound should imply is_optimized");
|
||||
#endif // TIERED
|
||||
#endif // COMPILER2
|
||||
if (is_optimized) {
|
||||
// Use stub entry
|
||||
info._entry = method()->get_c2i_entry();
|
||||
info._cached_oop = method;
|
||||
} else {
|
||||
// Use mkh entry
|
||||
oop holder = oopFactory::new_compiledICHolder(method, receiver_klass, CHECK);
|
||||
info._cached_oop = Handle(THREAD, holder);
|
||||
info._entry = method()->get_c2i_unverified_entry();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
inline static RelocIterator parse_ic(CodeBlob* code, address ic_call, oop* &_oop_addr, bool *is_optimized) {
|
||||
address first_oop = NULL;
|
||||
// Mergers please note: Sun SC5.x CC insists on an lvalue for a reference parameter.
|
||||
CodeBlob *code1 = code;
|
||||
return virtual_call_Relocation::parse_ic(code1, ic_call, first_oop, _oop_addr, is_optimized);
|
||||
}
|
||||
|
||||
CompiledIC::CompiledIC(NativeCall* ic_call)
|
||||
: _ic_call(ic_call),
|
||||
_oops(parse_ic(NULL, ic_call->instruction_address(), _oop_addr, &_is_optimized))
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
CompiledIC::CompiledIC(Relocation* ic_reloc)
|
||||
: _ic_call(nativeCall_at(ic_reloc->addr())),
|
||||
_oops(parse_ic(ic_reloc->code(), ic_reloc->addr(), _oop_addr, &_is_optimized))
|
||||
{
|
||||
assert(ic_reloc->type() == relocInfo::virtual_call_type ||
|
||||
ic_reloc->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
|
||||
}
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
void CompiledStaticCall::set_to_clean() {
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
|
||||
// Reset call site
|
||||
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
|
||||
#ifdef ASSERT
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(this);
|
||||
assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
|
||||
#endif
|
||||
set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub());
|
||||
|
||||
// Do not reset stub here: It is too expensive to call find_stub.
|
||||
// Instead, rely on caller (nmethod::clear_inline_caches) to clear
|
||||
// both the call and its stub.
|
||||
}
|
||||
|
||||
|
||||
bool CompiledStaticCall::is_clean() const {
|
||||
return destination() == SharedRuntime::get_resolve_static_call_stub();
|
||||
}
|
||||
|
||||
bool CompiledStaticCall::is_call_to_compiled() const {
|
||||
return CodeCache::contains(destination());
|
||||
}
|
||||
|
||||
|
||||
bool CompiledStaticCall::is_call_to_interpreted() const {
|
||||
// It is a call to interpreted, if it calls to a stub. Hence, the destination
|
||||
// must be in the stub part of the nmethod that contains the call
|
||||
nmethod* nm = CodeCache::find_nmethod(instruction_address());
|
||||
return nm->stub_contains(destination());
|
||||
}
|
||||
|
||||
|
||||
void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
|
||||
address stub=find_stub();
|
||||
assert(stub!=NULL, "stub not found");
|
||||
|
||||
if (TraceICs) {
|
||||
ResourceMark rm;
|
||||
tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
|
||||
instruction_address(),
|
||||
callee->name_and_sig_as_C_string());
|
||||
}
|
||||
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), "a) MT-unsafe modification of inline cache");
|
||||
assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, "b) MT-unsafe modification of inline cache");
|
||||
|
||||
// Update stub
|
||||
method_holder->set_data((intptr_t)callee());
|
||||
jump->set_jump_destination(entry);
|
||||
|
||||
// Update jump to call
|
||||
set_destination_mt_safe(stub);
|
||||
}
|
||||
|
||||
|
||||
void CompiledStaticCall::set(const StaticCallInfo& info) {
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
|
||||
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
|
||||
// Updating a cache to the wrong entry can cause bugs that are very hard
|
||||
// to track down - if cache entry gets invalid - we just clean it. In
|
||||
// this way it is always the same code path that is responsible for
|
||||
// updating and resolving an inline cache
|
||||
assert(is_clean(), "do not update a call entry - use clean");
|
||||
|
||||
if (info._to_interpreter) {
|
||||
// Call to interpreted code
|
||||
set_to_interpreted(info.callee(), info.entry());
|
||||
} else {
|
||||
if (TraceICs) {
|
||||
ResourceMark rm;
|
||||
tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT,
|
||||
instruction_address(),
|
||||
info.entry());
|
||||
}
|
||||
// Call to compiled code
|
||||
assert (CodeCache::contains(info.entry()), "wrong entry point");
|
||||
set_destination_mt_safe(info.entry());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Compute settings for a CompiledStaticCall. Since we might have to set
|
||||
// the stub when calling to the interpreter, we need to return arguments.
|
||||
void CompiledStaticCall::compute_entry(methodHandle m, StaticCallInfo& info) {
|
||||
nmethod* m_code = m->code();
|
||||
info._callee = m;
|
||||
if (m_code != NULL) {
|
||||
info._to_interpreter = false;
|
||||
info._entry = m_code->verified_entry_point();
|
||||
} else {
|
||||
// Callee is interpreted code. In any case entering the interpreter
|
||||
// puts a converter-frame on the stack to save arguments.
|
||||
info._to_interpreter = true;
|
||||
info._entry = m()->get_c2i_entry();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
|
||||
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
|
||||
// Reset stub
|
||||
address stub = static_stub->addr();
|
||||
assert(stub!=NULL, "stub not found");
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
method_holder->set_data(0);
|
||||
jump->set_jump_destination((address)-1);
|
||||
}
|
||||
|
||||
|
||||
address CompiledStaticCall::find_stub() {
|
||||
// Find reloc. information containing this call-site
|
||||
RelocIterator iter((nmethod*)NULL, instruction_address());
|
||||
while (iter.next()) {
|
||||
if (iter.addr() == instruction_address()) {
|
||||
switch(iter.type()) {
|
||||
case relocInfo::static_call_type:
|
||||
return iter.static_call_reloc()->static_stub();
|
||||
// We check here for opt_virtual_call_type, since we reuse the code
|
||||
// from the CompiledIC implementation
|
||||
case relocInfo::opt_virtual_call_type:
|
||||
return iter.opt_virtual_call_reloc()->static_stub();
|
||||
case relocInfo::poll_type:
|
||||
case relocInfo::poll_return_type: // A safepoint can't overlap a call.
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Non-product mode code
|
||||
#ifndef PRODUCT
|
||||
|
||||
void CompiledIC::verify() {
|
||||
// make sure code pattern is actually a call imm32 instruction
|
||||
_ic_call->verify();
|
||||
if (os::is_MP()) {
|
||||
_ic_call->verify_alignment();
|
||||
}
|
||||
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted()
|
||||
|| is_optimized() || is_megamorphic(), "sanity check");
|
||||
}
|
||||
|
||||
|
||||
void CompiledIC::print() {
|
||||
print_compiled_ic();
|
||||
tty->cr();
|
||||
}
|
||||
|
||||
|
||||
void CompiledIC::print_compiled_ic() {
|
||||
tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT,
|
||||
instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination());
|
||||
}
|
||||
|
||||
|
||||
void CompiledStaticCall::print() {
|
||||
tty->print("static call at " INTPTR_FORMAT " -> ", instruction_address());
|
||||
if (is_clean()) {
|
||||
tty->print("clean");
|
||||
} else if (is_call_to_compiled()) {
|
||||
tty->print("compiled");
|
||||
} else if (is_call_to_interpreted()) {
|
||||
tty->print("interpreted");
|
||||
}
|
||||
tty->cr();
|
||||
}
|
||||
|
||||
void CompiledStaticCall::verify() {
|
||||
// Verify call
|
||||
NativeCall::verify();
|
||||
if (os::is_MP()) {
|
||||
verify_alignment();
|
||||
}
|
||||
|
||||
// Verify stub
|
||||
address stub = find_stub();
|
||||
assert(stub != NULL, "no stub found for static call");
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
// Verify state
|
||||
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
|
||||
}
|
||||
|
||||
#endif
|
241
hotspot/src/share/vm/code/compiledIC.hpp
Normal file
241
hotspot/src/share/vm/code/compiledIC.hpp
Normal file
|
@ -0,0 +1,241 @@
|
|||
/*
|
||||
* Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// The CompiledIC represents a compiled inline cache.
|
||||
//
|
||||
// In order to make patching of the inline cache MT-safe, we only allow the following
|
||||
// transitions (when not at a safepoint):
|
||||
//
|
||||
//
|
||||
// [1] --<-- Clean -->--- [1]
|
||||
// / (null) \
|
||||
// / \ /-<-\
|
||||
// / [2] \ / \
|
||||
// Interpreted ---------> Monomorphic | [3]
|
||||
// (compiledICHolderOop) (klassOop) |
|
||||
// \ / \ /
|
||||
// [4] \ / [4] \->-/
|
||||
// \->- Megamorphic -<-/
|
||||
// (methodOop)
|
||||
//
|
||||
// The text in paranteses () refere to the value of the inline cache receiver (mov instruction)
|
||||
//
|
||||
// The numbers in square brackets refere to the kind of transition:
|
||||
// [1]: Initial fixup. Receiver it found from debug information
|
||||
// [2]: Compilation of a method
|
||||
// [3]: Recompilation of a method (note: only entry is changed. The klassOop must stay the same)
|
||||
// [4]: Inline cache miss. We go directly to megamorphic call.
|
||||
//
|
||||
// The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe
|
||||
// transition is made to a stub.
|
||||
//
|
||||
class CompiledIC;
|
||||
|
||||
class CompiledICInfo {
|
||||
friend class CompiledIC;
|
||||
private:
|
||||
address _entry; // entry point for call
|
||||
Handle _cached_oop; // Value of cached_oop (either in stub or inline cache)
|
||||
bool _is_optimized; // it is an optimized virtual call (i.e., can be statically bound)
|
||||
bool _to_interpreter; // Call it to interpreter
|
||||
public:
|
||||
address entry() const { return _entry; }
|
||||
Handle cached_oop() const { return _cached_oop; }
|
||||
bool is_optimized() const { return _is_optimized; }
|
||||
};
|
||||
|
||||
class CompiledIC: public ResourceObj {
|
||||
friend class InlineCacheBuffer;
|
||||
friend class ICStub;
|
||||
|
||||
|
||||
private:
|
||||
NativeCall* _ic_call; // the call instruction
|
||||
oop* _oop_addr; // patchable oop cell for this IC
|
||||
RelocIterator _oops; // iteration over any and all set-oop instructions
|
||||
bool _is_optimized; // an optimized virtual call (i.e., no compiled IC)
|
||||
|
||||
CompiledIC(NativeCall* ic_call);
|
||||
CompiledIC(Relocation* ic_reloc); // Must be of virtual_call_type/opt_virtual_call_type
|
||||
|
||||
// low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe
|
||||
// to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make
|
||||
// changes to a transition stub.
|
||||
void set_ic_destination(address entry_point);
|
||||
void set_cached_oop(oop cache);
|
||||
|
||||
// Reads the location of the transition stub. This will fail with an assertion, if no transition stub is
|
||||
// associated with the inline cache.
|
||||
address stub_address() const;
|
||||
bool is_in_transition_state() const; // Use InlineCacheBuffer
|
||||
|
||||
public:
|
||||
// conversion (machine PC to CompiledIC*)
|
||||
friend CompiledIC* CompiledIC_before(address return_addr);
|
||||
friend CompiledIC* CompiledIC_at(address call_site);
|
||||
friend CompiledIC* CompiledIC_at(Relocation* call_site);
|
||||
|
||||
// Return the cached_oop/destination associated with this inline cache. If the cache currently points
|
||||
// to a transition stub, it will read the values from the transition stub.
|
||||
oop cached_oop() const;
|
||||
address ic_destination() const;
|
||||
|
||||
bool is_optimized() const { return _is_optimized; }
|
||||
|
||||
// State
|
||||
bool is_clean() const;
|
||||
bool is_megamorphic() const;
|
||||
bool is_call_to_compiled() const;
|
||||
bool is_call_to_interpreted() const;
|
||||
|
||||
address end_of_call() { return _ic_call->return_address(); }
|
||||
|
||||
// MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock
|
||||
// so you are guaranteed that no patching takes place. The same goes for verify.
|
||||
//
|
||||
// Note: We do not provide any direct access to the stub code, to prevent parts of the code
|
||||
// to manipulate the inline cache in MT-unsafe ways.
|
||||
//
|
||||
// They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
|
||||
//
|
||||
void set_to_clean(); // Can only be called during a safepoint operation
|
||||
void set_to_monomorphic(const CompiledICInfo& info);
|
||||
void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
|
||||
|
||||
static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
|
||||
bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
|
||||
|
||||
// Location
|
||||
address instruction_address() const { return _ic_call->instruction_address(); }
|
||||
|
||||
// Misc
|
||||
void print() PRODUCT_RETURN;
|
||||
void print_compiled_ic() PRODUCT_RETURN;
|
||||
void verify() PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
inline CompiledIC* CompiledIC_before(address return_addr) {
|
||||
CompiledIC* c_ic = new CompiledIC(nativeCall_before(return_addr));
|
||||
c_ic->verify();
|
||||
return c_ic;
|
||||
}
|
||||
|
||||
inline CompiledIC* CompiledIC_at(address call_site) {
|
||||
CompiledIC* c_ic = new CompiledIC(nativeCall_at(call_site));
|
||||
c_ic->verify();
|
||||
return c_ic;
|
||||
}
|
||||
|
||||
inline CompiledIC* CompiledIC_at(Relocation* call_site) {
|
||||
CompiledIC* c_ic = new CompiledIC(call_site);
|
||||
c_ic->verify();
|
||||
return c_ic;
|
||||
}
|
||||
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// The CompiledStaticCall represents a call to a static method in the compiled
|
||||
//
|
||||
// Transition diagram of a static call site is somewhat simpler than for an inlined cache:
|
||||
//
|
||||
//
|
||||
// -----<----- Clean ----->-----
|
||||
// / \
|
||||
// / \
|
||||
// compilled code <------------> interpreted code
|
||||
//
|
||||
// Clean: Calls directly to runtime method for fixup
|
||||
// Compiled code: Calls directly to compiled code
|
||||
// Interpreted code: Calls to stub that set methodOop reference
|
||||
//
|
||||
//
|
||||
class CompiledStaticCall;
|
||||
|
||||
class StaticCallInfo {
|
||||
private:
|
||||
address _entry; // Entrypoint
|
||||
methodHandle _callee; // Callee (used when calling interpreter)
|
||||
bool _to_interpreter; // call to interpreted method (otherwise compiled)
|
||||
|
||||
friend class CompiledStaticCall;
|
||||
public:
|
||||
address entry() const { return _entry; }
|
||||
methodHandle callee() const { return _callee; }
|
||||
};
|
||||
|
||||
|
||||
class CompiledStaticCall: public NativeCall {
|
||||
friend class CompiledIC;
|
||||
|
||||
// Also used by CompiledIC
|
||||
void set_to_interpreted(methodHandle callee, address entry);
|
||||
bool is_optimized_virtual();
|
||||
|
||||
public:
|
||||
friend CompiledStaticCall* compiledStaticCall_before(address return_addr);
|
||||
friend CompiledStaticCall* compiledStaticCall_at(address native_call);
|
||||
friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site);
|
||||
|
||||
// State
|
||||
bool is_clean() const;
|
||||
bool is_call_to_compiled() const;
|
||||
bool is_call_to_interpreted() const;
|
||||
|
||||
// Clean static call (will force resolving on next use)
|
||||
void set_to_clean();
|
||||
|
||||
// Set state. The entry must be the same, as computed by compute_entry.
|
||||
// Computation and setting is split up, since the actions are separate during
|
||||
// a OptoRuntime::resolve_xxx.
|
||||
void set(const StaticCallInfo& info);
|
||||
|
||||
// Compute entry point given a method
|
||||
static void compute_entry(methodHandle m, StaticCallInfo& info);
|
||||
|
||||
// Stub support
|
||||
address find_stub();
|
||||
static void set_stub_to_clean(static_stub_Relocation* static_stub);
|
||||
|
||||
// Misc.
|
||||
void print() PRODUCT_RETURN;
|
||||
void verify() PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
|
||||
inline CompiledStaticCall* compiledStaticCall_before(address return_addr) {
|
||||
CompiledStaticCall* st = (CompiledStaticCall*)nativeCall_before(return_addr);
|
||||
st->verify();
|
||||
return st;
|
||||
}
|
||||
|
||||
inline CompiledStaticCall* compiledStaticCall_at(address native_call) {
|
||||
CompiledStaticCall* st = (CompiledStaticCall*)native_call;
|
||||
st->verify();
|
||||
return st;
|
||||
}
|
||||
|
||||
inline CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) {
|
||||
return compiledStaticCall_at(call_site->addr());
|
||||
}
|
281
hotspot/src/share/vm/code/compressedStream.cpp
Normal file
281
hotspot/src/share/vm/code/compressedStream.cpp
Normal file
|
@ -0,0 +1,281 @@
|
|||
/*
|
||||
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "incls/_precompiled.incl"
|
||||
#include "incls/_compressedStream.cpp.incl"
|
||||
|
||||
// 32-bit one-to-one sign encoding taken from Pack200
|
||||
// converts leading sign bits into leading zeroes with trailing sign bit
|
||||
inline juint CompressedStream::encode_sign(jint value) {
|
||||
return (value << 1) ^ (value >> 31);
|
||||
}
|
||||
inline jint CompressedStream::decode_sign(juint value) {
|
||||
return (value >> 1) ^ -(jint)(value & 1);
|
||||
}
|
||||
|
||||
// 32-bit self-inverse encoding of float bits
|
||||
// converts trailing zeroes (common in floats) to leading zeroes
|
||||
inline juint CompressedStream::reverse_int(juint i) {
|
||||
// Hacker's Delight, Figure 7-1
|
||||
i = (i & 0x55555555) << 1 | (i >> 1) & 0x55555555;
|
||||
i = (i & 0x33333333) << 2 | (i >> 2) & 0x33333333;
|
||||
i = (i & 0x0f0f0f0f) << 4 | (i >> 4) & 0x0f0f0f0f;
|
||||
i = (i << 24) | ((i & 0xff00) << 8) | ((i >> 8) & 0xff00) | (i >> 24);
|
||||
return i;
|
||||
}
|
||||
|
||||
|
||||
jint CompressedReadStream::read_signed_int() {
|
||||
return decode_sign(read_int());
|
||||
}
|
||||
|
||||
// Compressing floats is simple, because the only common pattern
|
||||
// is trailing zeroes. (Compare leading sign bits on ints.)
|
||||
// Since floats are left-justified, as opposed to right-justified
|
||||
// ints, we can bit-reverse them in order to take advantage of int
|
||||
// compression.
|
||||
|
||||
jfloat CompressedReadStream::read_float() {
|
||||
int rf = read_int();
|
||||
int f = reverse_int(rf);
|
||||
return jfloat_cast(f);
|
||||
}
|
||||
|
||||
jdouble CompressedReadStream::read_double() {
|
||||
jint rh = read_int();
|
||||
jint rl = read_int();
|
||||
jint h = reverse_int(rh);
|
||||
jint l = reverse_int(rl);
|
||||
return jdouble_cast(jlong_from(h, l));
|
||||
}
|
||||
|
||||
jlong CompressedReadStream::read_long() {
|
||||
jint low = read_signed_int();
|
||||
jint high = read_signed_int();
|
||||
return jlong_from(high, low);
|
||||
}
|
||||
|
||||
CompressedWriteStream::CompressedWriteStream(int initial_size) : CompressedStream(NULL, 0) {
|
||||
_buffer = NEW_RESOURCE_ARRAY(u_char, initial_size);
|
||||
_size = initial_size;
|
||||
_position = 0;
|
||||
}
|
||||
|
||||
void CompressedWriteStream::grow() {
|
||||
u_char* _new_buffer = NEW_RESOURCE_ARRAY(u_char, _size * 2);
|
||||
memcpy(_new_buffer, _buffer, _position);
|
||||
_buffer = _new_buffer;
|
||||
_size = _size * 2;
|
||||
}
|
||||
|
||||
void CompressedWriteStream::write_signed_int(jint value) {
|
||||
// this encoding, called SIGNED5, is taken from Pack200
|
||||
write_int(encode_sign(value));
|
||||
}
|
||||
|
||||
void CompressedWriteStream::write_float(jfloat value) {
|
||||
juint f = jint_cast(value);
|
||||
juint rf = reverse_int(f);
|
||||
assert(f == reverse_int(rf), "can re-read same bits");
|
||||
write_int(rf);
|
||||
}
|
||||
|
||||
void CompressedWriteStream::write_double(jdouble value) {
|
||||
juint h = high(jlong_cast(value));
|
||||
juint l = low( jlong_cast(value));
|
||||
juint rh = reverse_int(h);
|
||||
juint rl = reverse_int(l);
|
||||
assert(h == reverse_int(rh), "can re-read same bits");
|
||||
assert(l == reverse_int(rl), "can re-read same bits");
|
||||
write_int(rh);
|
||||
write_int(rl);
|
||||
}
|
||||
|
||||
void CompressedWriteStream::write_long(jlong value) {
|
||||
write_signed_int(low(value));
|
||||
write_signed_int(high(value));
|
||||
}
|
||||
|
||||
|
||||
/// The remaining details
|
||||
|
||||
#ifndef PRODUCT
|
||||
// set this to trigger unit test
|
||||
void test_compressed_stream(int trace);
|
||||
bool test_compressed_stream_enabled = false;
|
||||
#endif
|
||||
|
||||
// This encoding, called UNSIGNED5, is taken from J2SE Pack200.
|
||||
// It assumes that most values have lots of leading zeroes.
|
||||
// Very small values, in the range [0..191], code in one byte.
|
||||
// Any 32-bit value (including negatives) can be coded, in
|
||||
// up to five bytes. The grammar is:
|
||||
// low_byte = [0..191]
|
||||
// high_byte = [192..255]
|
||||
// any_byte = low_byte | high_byte
|
||||
// coding = low_byte
|
||||
// | high_byte low_byte
|
||||
// | high_byte high_byte low_byte
|
||||
// | high_byte high_byte high_byte low_byte
|
||||
// | high_byte high_byte high_byte high_byte any_byte
|
||||
// Each high_byte contributes six bits of payload.
|
||||
// The encoding is one-to-one (except for integer overflow)
|
||||
// and easy to parse and unparse.
|
||||
|
||||
jint CompressedReadStream::read_int_mb(jint b0) {
|
||||
int pos = position() - 1;
|
||||
u_char* buf = buffer() + pos;
|
||||
assert(buf[0] == b0 && b0 >= L, "correctly called");
|
||||
jint sum = b0;
|
||||
// must collect more bytes: b[1]...b[4]
|
||||
int lg_H_i = lg_H;
|
||||
for (int i = 0; ; ) {
|
||||
jint b_i = buf[++i]; // b_i = read(); ++i;
|
||||
sum += b_i << lg_H_i; // sum += b[i]*(64**i)
|
||||
if (b_i < L || i == MAX_i) {
|
||||
set_position(pos+i+1);
|
||||
return sum;
|
||||
}
|
||||
lg_H_i += lg_H;
|
||||
}
|
||||
}
|
||||
|
||||
void CompressedWriteStream::write_int_mb(jint value) {
|
||||
debug_only(int pos1 = position());
|
||||
juint sum = value;
|
||||
for (int i = 0; ; ) {
|
||||
if (sum < L || i == MAX_i) {
|
||||
// remainder is either a "low code" or the 5th byte
|
||||
assert(sum == (u_char)sum, "valid byte");
|
||||
write((u_char)sum);
|
||||
break;
|
||||
}
|
||||
sum -= L;
|
||||
int b_i = L + (sum % H); // this is a "high code"
|
||||
sum >>= lg_H; // extracted 6 bits
|
||||
write(b_i); ++i;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (test_compressed_stream_enabled) { // hack to enable this stress test
|
||||
test_compressed_stream_enabled = false;
|
||||
test_compressed_stream(0);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
/// a unit test (can be run by hand from a debugger)
|
||||
|
||||
// Avoid a VS2005 compiler stack overflow w/ fastdebug build.
|
||||
// The following pragma optimize turns off optimization ONLY
|
||||
// for this block (a matching directive turns it back on later).
|
||||
// These directives can be removed once the MS VS.NET 2005
|
||||
// compiler stack overflow is fixed.
|
||||
#if _MSC_VER >=1400 && !defined(_WIN64)
|
||||
#pragma optimize("", off)
|
||||
#endif
|
||||
|
||||
// generator for an "interesting" set of critical values
|
||||
enum { stretch_limit = (1<<16) * (64-16+1) };
|
||||
static jlong stretch(jint x, int bits) {
|
||||
// put x[high 4] into place
|
||||
jlong h = (jlong)((x >> (16-4))) << (bits - 4);
|
||||
// put x[low 12] into place, sign extended
|
||||
jlong l = ((jlong)x << (64-12)) >> (64-12);
|
||||
// move l upwards, maybe
|
||||
l <<= (x >> 16);
|
||||
return h ^ l;
|
||||
}
|
||||
|
||||
void test_compressed_stream(int trace) {
|
||||
CompressedWriteStream bytes(stretch_limit * 100);
|
||||
jint n;
|
||||
int step = 0, fails = 0;
|
||||
#define CHECKXY(x, y, fmt) { \
|
||||
++step; \
|
||||
int xlen = (pos = decode.position()) - lastpos; lastpos = pos; \
|
||||
if (trace > 0 && (step % trace) == 0) { \
|
||||
tty->print_cr("step %d, n=%08x: value=" fmt " (len=%d)", \
|
||||
step, n, x, xlen); } \
|
||||
if (x != y) { \
|
||||
tty->print_cr("step %d, n=%d: " fmt " != " fmt, step, n, x, y); \
|
||||
fails++; \
|
||||
} }
|
||||
for (n = 0; n < (1<<8); n++) {
|
||||
jbyte x = (jbyte)n;
|
||||
bytes.write_byte(x); ++step;
|
||||
}
|
||||
for (n = 0; n < stretch_limit; n++) {
|
||||
jint x = (jint)stretch(n, 32);
|
||||
bytes.write_int(x); ++step;
|
||||
bytes.write_signed_int(x); ++step;
|
||||
bytes.write_float(jfloat_cast(x)); ++step;
|
||||
}
|
||||
for (n = 0; n < stretch_limit; n++) {
|
||||
jlong x = stretch(n, 64);
|
||||
bytes.write_long(x); ++step;
|
||||
bytes.write_double(jdouble_cast(x)); ++step;
|
||||
}
|
||||
int length = bytes.position();
|
||||
if (trace != 0)
|
||||
tty->print_cr("set up test of %d stream values, size %d", step, length);
|
||||
step = 0;
|
||||
// now decode it all
|
||||
CompressedReadStream decode(bytes.buffer());
|
||||
int pos, lastpos = decode.position();
|
||||
for (n = 0; n < (1<<8); n++) {
|
||||
jbyte x = (jbyte)n;
|
||||
jbyte y = decode.read_byte();
|
||||
CHECKXY(x, y, "%db");
|
||||
}
|
||||
for (n = 0; n < stretch_limit; n++) {
|
||||
jint x = (jint)stretch(n, 32);
|
||||
jint y1 = decode.read_int();
|
||||
CHECKXY(x, y1, "%du");
|
||||
jint y2 = decode.read_signed_int();
|
||||
CHECKXY(x, y2, "%di");
|
||||
jint y3 = jint_cast(decode.read_float());
|
||||
CHECKXY(x, y3, "%df");
|
||||
}
|
||||
for (n = 0; n < stretch_limit; n++) {
|
||||
jlong x = stretch(n, 64);
|
||||
jlong y1 = decode.read_long();
|
||||
CHECKXY(x, y1, INT64_FORMAT "l");
|
||||
jlong y2 = jlong_cast(decode.read_double());
|
||||
CHECKXY(x, y2, INT64_FORMAT "d");
|
||||
}
|
||||
int length2 = decode.position();
|
||||
if (trace != 0)
|
||||
tty->print_cr("finished test of %d stream values, size %d", step, length2);
|
||||
guarantee(length == length2, "bad length");
|
||||
guarantee(fails == 0, "test failures");
|
||||
}
|
||||
|
||||
#if _MSC_VER >=1400 && !defined(_WIN64)
|
||||
#pragma optimize("", on)
|
||||
#endif
|
||||
|
||||
#endif // PRODUCT
|
120
hotspot/src/share/vm/code/compressedStream.hpp
Normal file
120
hotspot/src/share/vm/code/compressedStream.hpp
Normal file
|
@ -0,0 +1,120 @@
|
|||
/*
|
||||
* Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// Simple interface for filing out and filing in basic types
|
||||
// Used for writing out and reading in debugging information.
|
||||
|
||||
class CompressedStream : public ResourceObj {
|
||||
friend class VMStructs;
|
||||
protected:
|
||||
u_char* _buffer;
|
||||
int _position;
|
||||
|
||||
enum {
|
||||
// Constants for UNSIGNED5 coding of Pack200
|
||||
lg_H = 6, H = 1<<lg_H, // number of high codes (64)
|
||||
L = (1<<BitsPerByte)-H, // number of low codes (192)
|
||||
MAX_i = 4 // bytes are numbered in (0..4), max 5 bytes
|
||||
};
|
||||
|
||||
// these inlines are defined only in compressedStream.cpp
|
||||
static inline juint encode_sign(jint value); // for Pack200 SIGNED5
|
||||
static inline jint decode_sign(juint value); // for Pack200 SIGNED5
|
||||
static inline juint reverse_int(juint bits); // to trim trailing float 0's
|
||||
|
||||
public:
|
||||
CompressedStream(u_char* buffer, int position = 0) {
|
||||
_buffer = buffer;
|
||||
_position = position;
|
||||
}
|
||||
|
||||
u_char* buffer() const { return _buffer; }
|
||||
|
||||
// Positioning
|
||||
int position() const { return _position; }
|
||||
void set_position(int position) { _position = position; }
|
||||
};
|
||||
|
||||
|
||||
class CompressedReadStream : public CompressedStream {
|
||||
private:
|
||||
inline u_char read() { return _buffer[_position++]; }
|
||||
|
||||
jint read_int_mb(jint b0); // UNSIGNED5 coding, 2-5 byte cases
|
||||
|
||||
public:
|
||||
CompressedReadStream(u_char* buffer, int position = 0)
|
||||
: CompressedStream(buffer, position) {}
|
||||
|
||||
jboolean read_bool() { return (jboolean) read(); }
|
||||
jbyte read_byte() { return (jbyte ) read(); }
|
||||
jchar read_char() { return (jchar ) read_int(); }
|
||||
jshort read_short() { return (jshort ) read_signed_int(); }
|
||||
jint read_int() { jint b0 = read();
|
||||
if (b0 < L) return b0;
|
||||
else return read_int_mb(b0);
|
||||
}
|
||||
jint read_signed_int();
|
||||
jfloat read_float(); // jfloat_cast(reverse_int(read_int()))
|
||||
jdouble read_double(); // jdouble_cast(2*reverse_int(read_int))
|
||||
jlong read_long(); // jlong_from(2*read_signed_int())
|
||||
};
|
||||
|
||||
|
||||
class CompressedWriteStream : public CompressedStream {
|
||||
private:
|
||||
bool full() {
|
||||
return _position >= _size;
|
||||
}
|
||||
void store(u_char b) {
|
||||
_buffer[_position++] = b;
|
||||
}
|
||||
void write(u_char b) {
|
||||
if (full()) grow();
|
||||
store(b);
|
||||
}
|
||||
void grow();
|
||||
|
||||
void write_int_mb(jint value); // UNSIGNED5 coding, 1-5 byte cases
|
||||
|
||||
protected:
|
||||
int _size;
|
||||
|
||||
public:
|
||||
CompressedWriteStream(int initial_size);
|
||||
CompressedWriteStream(u_char* buffer, int initial_size, int position = 0)
|
||||
: CompressedStream(buffer, position) { _size = initial_size; }
|
||||
|
||||
void write_bool(jboolean value) { write(value); }
|
||||
void write_byte(jbyte value) { write(value); }
|
||||
void write_char(jchar value) { write_int(value); }
|
||||
void write_short(jshort value) { write_signed_int(value); }
|
||||
void write_int(jint value) { if ((juint)value < L && !full())
|
||||
store((u_char)value);
|
||||
else write_int_mb(value); }
|
||||
void write_signed_int(jint value); // write_int(encode_sign(value))
|
||||
void write_float(jfloat value); // write_int(reverse_int(jint_cast(v)))
|
||||
void write_double(jdouble value); // write_int(reverse_int(<low,high>))
|
||||
void write_long(jlong value); // write_signed_int(<low,high>)
|
||||
};
|
252
hotspot/src/share/vm/code/debugInfo.cpp
Normal file
252
hotspot/src/share/vm/code/debugInfo.cpp
Normal file
|
@ -0,0 +1,252 @@
|
|||
/*
|
||||
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
# include "incls/_precompiled.incl"
|
||||
# include "incls/_debugInfo.cpp.incl"
|
||||
|
||||
// Comstructors
|
||||
|
||||
DebugInfoWriteStream::DebugInfoWriteStream(DebugInformationRecorder* recorder, int initial_size)
|
||||
: CompressedWriteStream(initial_size) {
|
||||
_recorder = recorder;
|
||||
}
|
||||
|
||||
// Serializing oops
|
||||
|
||||
void DebugInfoWriteStream::write_handle(jobject h) {
|
||||
write_int(recorder()->oop_recorder()->find_index(h));
|
||||
}
|
||||
|
||||
ScopeValue* DebugInfoReadStream::read_object_value() {
|
||||
int id = read_int();
|
||||
#ifdef ASSERT
|
||||
assert(_obj_pool != NULL, "object pool does not exist");
|
||||
for (int i = _obj_pool->length() - 1; i >= 0; i--) {
|
||||
assert(((ObjectValue*) _obj_pool->at(i))->id() != id, "should not be read twice");
|
||||
}
|
||||
#endif
|
||||
ObjectValue* result = new ObjectValue(id);
|
||||
_obj_pool->append(result);
|
||||
result->read_object(this);
|
||||
return result;
|
||||
}
|
||||
|
||||
ScopeValue* DebugInfoReadStream::get_cached_object() {
|
||||
int id = read_int();
|
||||
assert(_obj_pool != NULL, "object pool does not exist");
|
||||
for (int i = _obj_pool->length() - 1; i >= 0; i--) {
|
||||
ObjectValue* sv = (ObjectValue*) _obj_pool->at(i);
|
||||
if (sv->id() == id) {
|
||||
return sv;
|
||||
}
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Serializing scope values
|
||||
|
||||
enum { LOCATION_CODE = 0, CONSTANT_INT_CODE = 1, CONSTANT_OOP_CODE = 2,
|
||||
CONSTANT_LONG_CODE = 3, CONSTANT_DOUBLE_CODE = 4,
|
||||
OBJECT_CODE = 5, OBJECT_ID_CODE = 6 };
|
||||
|
||||
ScopeValue* ScopeValue::read_from(DebugInfoReadStream* stream) {
|
||||
ScopeValue* result = NULL;
|
||||
switch(stream->read_int()) {
|
||||
case LOCATION_CODE: result = new LocationValue(stream); break;
|
||||
case CONSTANT_INT_CODE: result = new ConstantIntValue(stream); break;
|
||||
case CONSTANT_OOP_CODE: result = new ConstantOopReadValue(stream); break;
|
||||
case CONSTANT_LONG_CODE: result = new ConstantLongValue(stream); break;
|
||||
case CONSTANT_DOUBLE_CODE: result = new ConstantDoubleValue(stream); break;
|
||||
case OBJECT_CODE: result = stream->read_object_value(); break;
|
||||
case OBJECT_ID_CODE: result = stream->get_cached_object(); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// LocationValue
|
||||
|
||||
LocationValue::LocationValue(DebugInfoReadStream* stream) {
|
||||
_location = Location(stream);
|
||||
}
|
||||
|
||||
void LocationValue::write_on(DebugInfoWriteStream* stream) {
|
||||
stream->write_int(LOCATION_CODE);
|
||||
location().write_on(stream);
|
||||
}
|
||||
|
||||
void LocationValue::print_on(outputStream* st) const {
|
||||
location().print_on(st);
|
||||
}
|
||||
|
||||
// ObjectValue
|
||||
|
||||
void ObjectValue::read_object(DebugInfoReadStream* stream) {
|
||||
_klass = read_from(stream);
|
||||
assert(_klass->is_constant_oop(), "should be constant klass oop");
|
||||
int length = stream->read_int();
|
||||
for (int i = 0; i < length; i++) {
|
||||
ScopeValue* val = read_from(stream);
|
||||
_field_values.append(val);
|
||||
}
|
||||
}
|
||||
|
||||
void ObjectValue::write_on(DebugInfoWriteStream* stream) {
|
||||
if (_visited) {
|
||||
stream->write_int(OBJECT_ID_CODE);
|
||||
stream->write_int(_id);
|
||||
} else {
|
||||
_visited = true;
|
||||
stream->write_int(OBJECT_CODE);
|
||||
stream->write_int(_id);
|
||||
_klass->write_on(stream);
|
||||
int length = _field_values.length();
|
||||
stream->write_int(length);
|
||||
for (int i = 0; i < length; i++) {
|
||||
_field_values.at(i)->write_on(stream);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ObjectValue::print_on(outputStream* st) const {
|
||||
st->print("obj[%d]", _id);
|
||||
}
|
||||
|
||||
void ObjectValue::print_fields_on(outputStream* st) const {
|
||||
#ifndef PRODUCT
|
||||
if (_field_values.length() > 0) {
|
||||
_field_values.at(0)->print_on(st);
|
||||
}
|
||||
for (int i = 1; i < _field_values.length(); i++) {
|
||||
st->print(", ");
|
||||
_field_values.at(i)->print_on(st);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// ConstantIntValue
|
||||
|
||||
ConstantIntValue::ConstantIntValue(DebugInfoReadStream* stream) {
|
||||
_value = stream->read_signed_int();
|
||||
}
|
||||
|
||||
void ConstantIntValue::write_on(DebugInfoWriteStream* stream) {
|
||||
stream->write_int(CONSTANT_INT_CODE);
|
||||
stream->write_signed_int(value());
|
||||
}
|
||||
|
||||
void ConstantIntValue::print_on(outputStream* st) const {
|
||||
st->print("%d", value());
|
||||
}
|
||||
|
||||
// ConstantLongValue
|
||||
|
||||
ConstantLongValue::ConstantLongValue(DebugInfoReadStream* stream) {
|
||||
_value = stream->read_long();
|
||||
}
|
||||
|
||||
void ConstantLongValue::write_on(DebugInfoWriteStream* stream) {
|
||||
stream->write_int(CONSTANT_LONG_CODE);
|
||||
stream->write_long(value());
|
||||
}
|
||||
|
||||
void ConstantLongValue::print_on(outputStream* st) const {
|
||||
st->print(INT64_FORMAT, value());
|
||||
}
|
||||
|
||||
// ConstantDoubleValue
|
||||
|
||||
ConstantDoubleValue::ConstantDoubleValue(DebugInfoReadStream* stream) {
|
||||
_value = stream->read_double();
|
||||
}
|
||||
|
||||
void ConstantDoubleValue::write_on(DebugInfoWriteStream* stream) {
|
||||
stream->write_int(CONSTANT_DOUBLE_CODE);
|
||||
stream->write_double(value());
|
||||
}
|
||||
|
||||
void ConstantDoubleValue::print_on(outputStream* st) const {
|
||||
st->print("%f", value());
|
||||
}
|
||||
|
||||
// ConstantOopWriteValue
|
||||
|
||||
void ConstantOopWriteValue::write_on(DebugInfoWriteStream* stream) {
|
||||
stream->write_int(CONSTANT_OOP_CODE);
|
||||
stream->write_handle(value());
|
||||
}
|
||||
|
||||
void ConstantOopWriteValue::print_on(outputStream* st) const {
|
||||
JNIHandles::resolve(value())->print_value_on(st);
|
||||
}
|
||||
|
||||
|
||||
// ConstantOopReadValue
|
||||
|
||||
ConstantOopReadValue::ConstantOopReadValue(DebugInfoReadStream* stream) {
|
||||
_value = Handle(stream->read_oop());
|
||||
}
|
||||
|
||||
void ConstantOopReadValue::write_on(DebugInfoWriteStream* stream) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
void ConstantOopReadValue::print_on(outputStream* st) const {
|
||||
value()()->print_value_on(st);
|
||||
}
|
||||
|
||||
|
||||
// MonitorValue
|
||||
|
||||
MonitorValue::MonitorValue(ScopeValue* owner, Location basic_lock, bool eliminated) {
|
||||
_owner = owner;
|
||||
_basic_lock = basic_lock;
|
||||
_eliminated = eliminated;
|
||||
}
|
||||
|
||||
MonitorValue::MonitorValue(DebugInfoReadStream* stream) {
|
||||
_basic_lock = Location(stream);
|
||||
_owner = ScopeValue::read_from(stream);
|
||||
_eliminated = (stream->read_bool() != 0);
|
||||
}
|
||||
|
||||
void MonitorValue::write_on(DebugInfoWriteStream* stream) {
|
||||
_basic_lock.write_on(stream);
|
||||
_owner->write_on(stream);
|
||||
stream->write_bool(_eliminated);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void MonitorValue::print_on(outputStream* st) const {
|
||||
st->print("monitor{");
|
||||
owner()->print_on(st);
|
||||
st->print(",");
|
||||
basic_lock().print_on(st);
|
||||
st->print("}");
|
||||
if (_eliminated) {
|
||||
st->print(" (eliminated)");
|
||||
}
|
||||
}
|
||||
#endif
|
272
hotspot/src/share/vm/code/debugInfo.hpp
Normal file
272
hotspot/src/share/vm/code/debugInfo.hpp
Normal file
|
@ -0,0 +1,272 @@
|
|||
/*
|
||||
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// Classes used for serializing debugging information.
|
||||
// These abstractions are introducted to provide symmetric
|
||||
// read and write operations.
|
||||
|
||||
// ScopeValue describes the value of a variable/expression in a scope
|
||||
// - LocationValue describes a value in a given location (in frame or register)
|
||||
// - ConstantValue describes a constant
|
||||
|
||||
class ScopeValue: public ResourceObj {
|
||||
public:
|
||||
// Testers
|
||||
virtual bool is_location() const { return false; }
|
||||
virtual bool is_object() const { return false; }
|
||||
virtual bool is_constant_int() const { return false; }
|
||||
virtual bool is_constant_double() const { return false; }
|
||||
virtual bool is_constant_long() const { return false; }
|
||||
virtual bool is_constant_oop() const { return false; }
|
||||
virtual bool equals(ScopeValue* other) const { return false; }
|
||||
|
||||
// Serialization of debugging information
|
||||
virtual void write_on(DebugInfoWriteStream* stream) = 0;
|
||||
static ScopeValue* read_from(DebugInfoReadStream* stream);
|
||||
};
|
||||
|
||||
|
||||
// A Location value describes a value in a given location; i.e. the corresponding
|
||||
// logical entity (e.g., a method temporary) lives in this location.
|
||||
|
||||
class LocationValue: public ScopeValue {
|
||||
private:
|
||||
Location _location;
|
||||
public:
|
||||
LocationValue(Location location) { _location = location; }
|
||||
bool is_location() const { return true; }
|
||||
Location location() const { return _location; }
|
||||
|
||||
// Serialization of debugging information
|
||||
LocationValue(DebugInfoReadStream* stream);
|
||||
void write_on(DebugInfoWriteStream* stream);
|
||||
|
||||
// Printing
|
||||
void print_on(outputStream* st) const;
|
||||
};
|
||||
|
||||
|
||||
// An ObjectValue describes an object eliminated by escape analysis.
|
||||
|
||||
class ObjectValue: public ScopeValue {
|
||||
private:
|
||||
int _id;
|
||||
ScopeValue* _klass;
|
||||
GrowableArray<ScopeValue*> _field_values;
|
||||
Handle _value;
|
||||
bool _visited;
|
||||
|
||||
public:
|
||||
ObjectValue(int id, ScopeValue* klass)
|
||||
: _id(id)
|
||||
, _klass(klass)
|
||||
, _field_values()
|
||||
, _value()
|
||||
, _visited(false) {
|
||||
assert(klass->is_constant_oop(), "should be constant klass oop");
|
||||
}
|
||||
|
||||
ObjectValue(int id)
|
||||
: _id(id)
|
||||
, _klass(NULL)
|
||||
, _field_values()
|
||||
, _value()
|
||||
, _visited(false) {}
|
||||
|
||||
// Accessors
|
||||
bool is_object() const { return true; }
|
||||
int id() const { return _id; }
|
||||
ScopeValue* klass() const { return _klass; }
|
||||
GrowableArray<ScopeValue*>* field_values() { return &_field_values; }
|
||||
ScopeValue* field_at(int i) const { return _field_values.at(i); }
|
||||
int field_size() { return _field_values.length(); }
|
||||
Handle value() const { return _value; }
|
||||
bool is_visited() const { return _visited; }
|
||||
|
||||
void set_value(oop value) { _value = Handle(value); }
|
||||
void set_visited(bool visited) { _visited = false; }
|
||||
|
||||
// Serialization of debugging information
|
||||
void read_object(DebugInfoReadStream* stream);
|
||||
void write_on(DebugInfoWriteStream* stream);
|
||||
|
||||
// Printing
|
||||
void print_on(outputStream* st) const;
|
||||
void print_fields_on(outputStream* st) const;
|
||||
};
|
||||
|
||||
|
||||
// A ConstantIntValue describes a constant int; i.e., the corresponding logical entity
|
||||
// is either a source constant or its computation has been constant-folded.
|
||||
|
||||
class ConstantIntValue: public ScopeValue {
|
||||
private:
|
||||
jint _value;
|
||||
public:
|
||||
ConstantIntValue(jint value) { _value = value; }
|
||||
jint value() const { return _value; }
|
||||
bool is_constant_int() const { return true; }
|
||||
bool equals(ScopeValue* other) const { return false; }
|
||||
|
||||
// Serialization of debugging information
|
||||
ConstantIntValue(DebugInfoReadStream* stream);
|
||||
void write_on(DebugInfoWriteStream* stream);
|
||||
|
||||
// Printing
|
||||
void print_on(outputStream* st) const;
|
||||
};
|
||||
|
||||
class ConstantLongValue: public ScopeValue {
|
||||
private:
|
||||
jlong _value;
|
||||
public:
|
||||
ConstantLongValue(jlong value) { _value = value; }
|
||||
jlong value() const { return _value; }
|
||||
bool is_constant_long() const { return true; }
|
||||
bool equals(ScopeValue* other) const { return false; }
|
||||
|
||||
// Serialization of debugging information
|
||||
ConstantLongValue(DebugInfoReadStream* stream);
|
||||
void write_on(DebugInfoWriteStream* stream);
|
||||
|
||||
// Printing
|
||||
void print_on(outputStream* st) const;
|
||||
};
|
||||
|
||||
class ConstantDoubleValue: public ScopeValue {
|
||||
private:
|
||||
jdouble _value;
|
||||
public:
|
||||
ConstantDoubleValue(jdouble value) { _value = value; }
|
||||
jdouble value() const { return _value; }
|
||||
bool is_constant_double() const { return true; }
|
||||
bool equals(ScopeValue* other) const { return false; }
|
||||
|
||||
// Serialization of debugging information
|
||||
ConstantDoubleValue(DebugInfoReadStream* stream);
|
||||
void write_on(DebugInfoWriteStream* stream);
|
||||
|
||||
// Printing
|
||||
void print_on(outputStream* st) const;
|
||||
};
|
||||
|
||||
// A ConstantOopWriteValue is created by the compiler to
|
||||
// be written as debugging information.
|
||||
|
||||
class ConstantOopWriteValue: public ScopeValue {
|
||||
private:
|
||||
jobject _value;
|
||||
public:
|
||||
ConstantOopWriteValue(jobject value) { _value = value; }
|
||||
jobject value() const { return _value; }
|
||||
bool is_constant_oop() const { return true; }
|
||||
bool equals(ScopeValue* other) const { return false; }
|
||||
|
||||
// Serialization of debugging information
|
||||
void write_on(DebugInfoWriteStream* stream);
|
||||
|
||||
// Printing
|
||||
void print_on(outputStream* st) const;
|
||||
};
|
||||
|
||||
// A ConstantOopReadValue is created by the VM when reading
|
||||
// debug information
|
||||
|
||||
class ConstantOopReadValue: public ScopeValue {
|
||||
private:
|
||||
Handle _value;
|
||||
public:
|
||||
Handle value() const { return _value; }
|
||||
bool is_constant_oop() const { return true; }
|
||||
bool equals(ScopeValue* other) const { return false; }
|
||||
|
||||
// Serialization of debugging information
|
||||
ConstantOopReadValue(DebugInfoReadStream* stream);
|
||||
void write_on(DebugInfoWriteStream* stream);
|
||||
|
||||
// Printing
|
||||
void print_on(outputStream* st) const;
|
||||
};
|
||||
|
||||
// MonitorValue describes the pair used for monitor_enter and monitor_exit.
|
||||
|
||||
class MonitorValue: public ResourceObj {
|
||||
private:
|
||||
ScopeValue* _owner;
|
||||
Location _basic_lock;
|
||||
bool _eliminated;
|
||||
public:
|
||||
// Constructor
|
||||
MonitorValue(ScopeValue* owner, Location basic_lock, bool eliminated = false);
|
||||
|
||||
// Accessors
|
||||
ScopeValue* owner() const { return _owner; }
|
||||
Location basic_lock() const { return _basic_lock; }
|
||||
bool eliminated() const { return _eliminated; }
|
||||
|
||||
// Serialization of debugging information
|
||||
MonitorValue(DebugInfoReadStream* stream);
|
||||
void write_on(DebugInfoWriteStream* stream);
|
||||
|
||||
// Printing
|
||||
void print_on(outputStream* st) const;
|
||||
};
|
||||
|
||||
// DebugInfoReadStream specializes CompressedReadStream for reading
|
||||
// debugging information. Used by ScopeDesc.
|
||||
|
||||
class DebugInfoReadStream : public CompressedReadStream {
|
||||
private:
|
||||
const nmethod* _code;
|
||||
const nmethod* code() const { return _code; }
|
||||
GrowableArray<ScopeValue*>* _obj_pool;
|
||||
public:
|
||||
DebugInfoReadStream(const nmethod* code, int offset, GrowableArray<ScopeValue*>* obj_pool = NULL) :
|
||||
CompressedReadStream(code->scopes_data_begin(), offset) {
|
||||
_code = code;
|
||||
_obj_pool = obj_pool;
|
||||
|
||||
} ;
|
||||
|
||||
oop read_oop() {
|
||||
return code()->oop_at(read_int());
|
||||
}
|
||||
ScopeValue* read_object_value();
|
||||
ScopeValue* get_cached_object();
|
||||
// BCI encoding is mostly unsigned, but -1 is a distinguished value
|
||||
int read_bci() { return read_int() + InvocationEntryBci; }
|
||||
};
|
||||
|
||||
// DebugInfoWriteStream specializes CompressedWriteStream for
|
||||
// writing debugging information. Used by ScopeDescRecorder.
|
||||
|
||||
class DebugInfoWriteStream : public CompressedWriteStream {
|
||||
private:
|
||||
DebugInformationRecorder* _recorder;
|
||||
DebugInformationRecorder* recorder() const { return _recorder; }
|
||||
public:
|
||||
DebugInfoWriteStream(DebugInformationRecorder* recorder, int initial_size);
|
||||
void write_handle(jobject h);
|
||||
void write_bci(int bci) { write_int(bci - InvocationEntryBci); }
|
||||
};
|
411
hotspot/src/share/vm/code/debugInfoRec.cpp
Normal file
411
hotspot/src/share/vm/code/debugInfoRec.cpp
Normal file
|
@ -0,0 +1,411 @@
|
|||
/*
|
||||
* Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
# include "incls/_precompiled.incl"
|
||||
# include "incls/_debugInfoRec.cpp.incl"
|
||||
|
||||
// Private definition.
|
||||
// There is one DIR_Chunk for each scope and values array.
|
||||
// A chunk can potentially be used more than once.
|
||||
// We keep track of these chunks in order to detect
|
||||
// repetition and enable sharing.
|
||||
class DIR_Chunk {
|
||||
friend class DebugInformationRecorder;
|
||||
int _offset; // location in the stream of this scope
|
||||
int _length; // number of bytes in the stream
|
||||
int _hash; // hash of stream bytes (for quicker reuse)
|
||||
|
||||
void* operator new(size_t ignore, DebugInformationRecorder* dir) {
|
||||
assert(ignore == sizeof(DIR_Chunk), "");
|
||||
if (dir->_next_chunk >= dir->_next_chunk_limit) {
|
||||
const int CHUNK = 100;
|
||||
dir->_next_chunk = NEW_RESOURCE_ARRAY(DIR_Chunk, CHUNK);
|
||||
dir->_next_chunk_limit = dir->_next_chunk + CHUNK;
|
||||
}
|
||||
return dir->_next_chunk++;
|
||||
}
|
||||
|
||||
DIR_Chunk(int offset, int length, DebugInformationRecorder* dir) {
|
||||
_offset = offset;
|
||||
_length = length;
|
||||
unsigned int hash = 0;
|
||||
address p = dir->stream()->buffer() + _offset;
|
||||
for (int i = 0; i < length; i++) {
|
||||
if (i == 6) break;
|
||||
hash *= 127;
|
||||
hash += p[i];
|
||||
}
|
||||
_hash = hash;
|
||||
}
|
||||
|
||||
DIR_Chunk* find_match(GrowableArray<DIR_Chunk*>* arr,
|
||||
int start_index,
|
||||
DebugInformationRecorder* dir) {
|
||||
int end_index = arr->length();
|
||||
int hash = this->_hash, length = this->_length;
|
||||
address buf = dir->stream()->buffer();
|
||||
for (int i = end_index; --i >= start_index; ) {
|
||||
DIR_Chunk* that = arr->at(i);
|
||||
if (hash == that->_hash &&
|
||||
length == that->_length &&
|
||||
0 == memcmp(buf + this->_offset, buf + that->_offset, length)) {
|
||||
return that;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
};
|
||||
|
||||
static inline bool compute_recording_non_safepoints() {
|
||||
if (JvmtiExport::should_post_compiled_method_load()
|
||||
&& FLAG_IS_DEFAULT(DebugNonSafepoints)) {
|
||||
// The default value of this flag is taken to be true,
|
||||
// if JVMTI is looking at nmethod codes.
|
||||
// We anticipate that JVMTI may wish to participate in profiling.
|
||||
return true;
|
||||
}
|
||||
|
||||
// If the flag is set manually, use it, whether true or false.
|
||||
// Otherwise, if JVMTI is not in the picture, use the default setting.
|
||||
// (This is true in debug, just for the exercise, false in product mode.)
|
||||
return DebugNonSafepoints;
|
||||
}
|
||||
|
||||
DebugInformationRecorder::DebugInformationRecorder(OopRecorder* oop_recorder)
|
||||
: _recording_non_safepoints(compute_recording_non_safepoints())
|
||||
{
|
||||
_pcs_size = 100;
|
||||
_pcs = NEW_RESOURCE_ARRAY(PcDesc, _pcs_size);
|
||||
_pcs_length = 0;
|
||||
|
||||
_prev_safepoint_pc = PcDesc::lower_offset_limit;
|
||||
|
||||
_stream = new DebugInfoWriteStream(this, 10 * K);
|
||||
// make sure that there is no stream_decode_offset that is zero
|
||||
_stream->write_byte((jbyte)0xFF);
|
||||
|
||||
// make sure that we can distinguish the value "serialized_null" from offsets
|
||||
assert(_stream->position() > serialized_null, "sanity");
|
||||
|
||||
_oop_recorder = oop_recorder;
|
||||
|
||||
_all_chunks = new GrowableArray<DIR_Chunk*>(300);
|
||||
_shared_chunks = new GrowableArray<DIR_Chunk*>(30);
|
||||
_next_chunk = _next_chunk_limit = NULL;
|
||||
|
||||
add_new_pc_offset(PcDesc::lower_offset_limit); // sentinel record
|
||||
|
||||
debug_only(_recording_state = rs_null);
|
||||
}
|
||||
|
||||
|
||||
void DebugInformationRecorder::add_oopmap(int pc_offset, OopMap* map) {
|
||||
// !!!!! Preserve old style handling of oopmaps for now
|
||||
_oopmaps->add_gc_map(pc_offset, map);
|
||||
}
|
||||
|
||||
void DebugInformationRecorder::add_safepoint(int pc_offset, OopMap* map) {
|
||||
assert(!_oop_recorder->is_complete(), "not frozen yet");
|
||||
// Store the new safepoint
|
||||
|
||||
// Add the oop map
|
||||
add_oopmap(pc_offset, map);
|
||||
|
||||
add_new_pc_offset(pc_offset);
|
||||
|
||||
assert(_recording_state == rs_null, "nesting of recording calls");
|
||||
debug_only(_recording_state = rs_safepoint);
|
||||
}
|
||||
|
||||
void DebugInformationRecorder::add_non_safepoint(int pc_offset) {
|
||||
assert(!_oop_recorder->is_complete(), "not frozen yet");
|
||||
assert(_recording_non_safepoints, "must be recording non-safepoints");
|
||||
|
||||
add_new_pc_offset(pc_offset);
|
||||
|
||||
assert(_recording_state == rs_null, "nesting of recording calls");
|
||||
debug_only(_recording_state = rs_non_safepoint);
|
||||
}
|
||||
|
||||
void DebugInformationRecorder::add_new_pc_offset(int pc_offset) {
|
||||
assert(_pcs_length == 0 || last_pc()->pc_offset() < pc_offset,
|
||||
"must specify a new, larger pc offset");
|
||||
|
||||
// add the pcdesc
|
||||
if (_pcs_length == _pcs_size) {
|
||||
// Expand
|
||||
int new_pcs_size = _pcs_size * 2;
|
||||
PcDesc* new_pcs = NEW_RESOURCE_ARRAY(PcDesc, new_pcs_size);
|
||||
for (int index = 0; index < _pcs_length; index++) {
|
||||
new_pcs[index] = _pcs[index];
|
||||
}
|
||||
_pcs_size = new_pcs_size;
|
||||
_pcs = new_pcs;
|
||||
}
|
||||
assert(_pcs_size > _pcs_length, "There must be room for after expanding");
|
||||
|
||||
_pcs[_pcs_length++] = PcDesc(pc_offset, DebugInformationRecorder::serialized_null,
|
||||
DebugInformationRecorder::serialized_null);
|
||||
}
|
||||
|
||||
|
||||
int DebugInformationRecorder::serialize_monitor_values(GrowableArray<MonitorValue*>* monitors) {
|
||||
if (monitors == NULL || monitors->is_empty()) return DebugInformationRecorder::serialized_null;
|
||||
assert(_recording_state == rs_safepoint, "must be recording a safepoint");
|
||||
int result = stream()->position();
|
||||
stream()->write_int(monitors->length());
|
||||
for (int index = 0; index < monitors->length(); index++) {
|
||||
monitors->at(index)->write_on(stream());
|
||||
}
|
||||
assert(result != serialized_null, "sanity");
|
||||
|
||||
// (See comment below on DebugInformationRecorder::describe_scope.)
|
||||
int shared_result = find_sharable_decode_offset(result);
|
||||
if (shared_result != serialized_null) {
|
||||
stream()->set_position(result);
|
||||
result = shared_result;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
int DebugInformationRecorder::serialize_scope_values(GrowableArray<ScopeValue*>* values) {
|
||||
if (values == NULL || values->is_empty()) return DebugInformationRecorder::serialized_null;
|
||||
assert(_recording_state == rs_safepoint, "must be recording a safepoint");
|
||||
int result = stream()->position();
|
||||
assert(result != serialized_null, "sanity");
|
||||
stream()->write_int(values->length());
|
||||
for (int index = 0; index < values->length(); index++) {
|
||||
values->at(index)->write_on(stream());
|
||||
}
|
||||
|
||||
// (See comment below on DebugInformationRecorder::describe_scope.)
|
||||
int shared_result = find_sharable_decode_offset(result);
|
||||
if (shared_result != serialized_null) {
|
||||
stream()->set_position(result);
|
||||
result = shared_result;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
// These variables are put into one block to reduce relocations
|
||||
// and make it simpler to print from the debugger.
|
||||
static
|
||||
struct dir_stats_struct {
|
||||
int chunks_queried;
|
||||
int chunks_shared;
|
||||
int chunks_reshared;
|
||||
int chunks_elided;
|
||||
|
||||
void print() {
|
||||
tty->print_cr("Debug Data Chunks: %d, shared %d+%d, non-SP's elided %d",
|
||||
chunks_queried,
|
||||
chunks_shared, chunks_reshared,
|
||||
chunks_elided);
|
||||
}
|
||||
} dir_stats;
|
||||
#endif //PRODUCT
|
||||
|
||||
|
||||
int DebugInformationRecorder::find_sharable_decode_offset(int stream_offset) {
|
||||
// Only pull this trick if non-safepoint recording
|
||||
// is enabled, for now.
|
||||
if (!recording_non_safepoints())
|
||||
return serialized_null;
|
||||
|
||||
NOT_PRODUCT(++dir_stats.chunks_queried);
|
||||
int stream_length = stream()->position() - stream_offset;
|
||||
assert(stream_offset != serialized_null, "should not be null");
|
||||
assert(stream_length != 0, "should not be empty");
|
||||
|
||||
DIR_Chunk* ns = new(this) DIR_Chunk(stream_offset, stream_length, this);
|
||||
|
||||
// Look in previously shared scopes first:
|
||||
DIR_Chunk* ms = ns->find_match(_shared_chunks, 0, this);
|
||||
if (ms != NULL) {
|
||||
NOT_PRODUCT(++dir_stats.chunks_reshared);
|
||||
assert(ns+1 == _next_chunk, "");
|
||||
_next_chunk = ns;
|
||||
return ms->_offset;
|
||||
}
|
||||
|
||||
// Look in recently encountered scopes next:
|
||||
const int MAX_RECENT = 50;
|
||||
int start_index = _all_chunks->length() - MAX_RECENT;
|
||||
if (start_index < 0) start_index = 0;
|
||||
ms = ns->find_match(_all_chunks, start_index, this);
|
||||
if (ms != NULL) {
|
||||
NOT_PRODUCT(++dir_stats.chunks_shared);
|
||||
// Searching in _all_chunks is limited to a window,
|
||||
// but searching in _shared_chunks is unlimited.
|
||||
_shared_chunks->append(ms);
|
||||
assert(ns+1 == _next_chunk, "");
|
||||
_next_chunk = ns;
|
||||
return ms->_offset;
|
||||
}
|
||||
|
||||
// No match. Add this guy to the list, in hopes of future shares.
|
||||
_all_chunks->append(ns);
|
||||
return serialized_null;
|
||||
}
|
||||
|
||||
|
||||
// must call add_safepoint before: it sets PcDesc and this routine uses
|
||||
// the last PcDesc set
|
||||
void DebugInformationRecorder::describe_scope(int pc_offset,
|
||||
ciMethod* method,
|
||||
int bci,
|
||||
DebugToken* locals,
|
||||
DebugToken* expressions,
|
||||
DebugToken* monitors) {
|
||||
assert(_recording_state != rs_null, "nesting of recording calls");
|
||||
PcDesc* last_pd = last_pc();
|
||||
assert(last_pd->pc_offset() == pc_offset, "must be last pc");
|
||||
int sender_stream_offset = last_pd->scope_decode_offset();
|
||||
// update the stream offset of current pc desc
|
||||
int stream_offset = stream()->position();
|
||||
last_pd->set_scope_decode_offset(stream_offset);
|
||||
|
||||
// serialize sender stream offest
|
||||
stream()->write_int(sender_stream_offset);
|
||||
|
||||
// serialize scope
|
||||
jobject method_enc = (method == NULL)? NULL: method->encoding();
|
||||
stream()->write_int(oop_recorder()->find_index(method_enc));
|
||||
stream()->write_bci(bci);
|
||||
assert(method == NULL ||
|
||||
(method->is_native() && bci == 0) ||
|
||||
(!method->is_native() && 0 <= bci && bci < method->code_size()) ||
|
||||
bci == -1, "illegal bci");
|
||||
|
||||
// serialize the locals/expressions/monitors
|
||||
stream()->write_int((intptr_t) locals);
|
||||
stream()->write_int((intptr_t) expressions);
|
||||
stream()->write_int((intptr_t) monitors);
|
||||
|
||||
// Here's a tricky bit. We just wrote some bytes.
|
||||
// Wouldn't it be nice to find that we had already
|
||||
// written those same bytes somewhere else?
|
||||
// If we get lucky this way, reset the stream
|
||||
// and reuse the old bytes. By the way, this
|
||||
// trick not only shares parent scopes, but also
|
||||
// compresses equivalent non-safepoint PcDescs.
|
||||
int shared_stream_offset = find_sharable_decode_offset(stream_offset);
|
||||
if (shared_stream_offset != serialized_null) {
|
||||
stream()->set_position(stream_offset);
|
||||
last_pd->set_scope_decode_offset(shared_stream_offset);
|
||||
}
|
||||
}
|
||||
|
||||
void DebugInformationRecorder::dump_object_pool(GrowableArray<ScopeValue*>* objects) {
|
||||
guarantee( _pcs_length > 0, "safepoint must exist before describing scopes");
|
||||
PcDesc* last_pd = &_pcs[_pcs_length-1];
|
||||
if (objects != NULL) {
|
||||
for (int i = objects->length() - 1; i >= 0; i--) {
|
||||
((ObjectValue*) objects->at(i))->set_visited(false);
|
||||
}
|
||||
}
|
||||
int offset = serialize_scope_values(objects);
|
||||
last_pd->set_obj_decode_offset(offset);
|
||||
}
|
||||
|
||||
void DebugInformationRecorder::end_scopes(int pc_offset, bool is_safepoint) {
|
||||
assert(_recording_state == (is_safepoint? rs_safepoint: rs_non_safepoint),
|
||||
"nesting of recording calls");
|
||||
debug_only(_recording_state = rs_null);
|
||||
|
||||
// Try to compress away an equivalent non-safepoint predecessor.
|
||||
// (This only works because we have previously recognized redundant
|
||||
// scope trees and made them use a common scope_decode_offset.)
|
||||
if (_pcs_length >= 2 && recording_non_safepoints()) {
|
||||
PcDesc* last = last_pc();
|
||||
PcDesc* prev = prev_pc();
|
||||
// If prev is (a) not a safepoint and (b) has the same
|
||||
// stream pointer, then it can be coalesced into the last.
|
||||
// This is valid because non-safepoints are only sought
|
||||
// with pc_desc_near, which (when it misses prev) will
|
||||
// search forward until it finds last.
|
||||
// In addition, it does not matter if the last PcDesc
|
||||
// is for a safepoint or not.
|
||||
if (_prev_safepoint_pc < prev->pc_offset() &&
|
||||
prev->scope_decode_offset() == last->scope_decode_offset()) {
|
||||
assert(prev == last-1, "sane");
|
||||
prev->set_pc_offset(pc_offset);
|
||||
_pcs_length -= 1;
|
||||
NOT_PRODUCT(++dir_stats.chunks_elided);
|
||||
}
|
||||
}
|
||||
|
||||
// We have just recorded this safepoint.
|
||||
// Remember it in case the previous paragraph needs to know.
|
||||
if (is_safepoint) {
|
||||
_prev_safepoint_pc = pc_offset;
|
||||
}
|
||||
}
|
||||
|
||||
DebugToken* DebugInformationRecorder::create_scope_values(GrowableArray<ScopeValue*>* values) {
|
||||
assert(!_oop_recorder->is_complete(), "not frozen yet");
|
||||
return (DebugToken*) (intptr_t) serialize_scope_values(values);
|
||||
}
|
||||
|
||||
|
||||
DebugToken* DebugInformationRecorder::create_monitor_values(GrowableArray<MonitorValue*>* monitors) {
|
||||
assert(!_oop_recorder->is_complete(), "not frozen yet");
|
||||
return (DebugToken*) (intptr_t) serialize_monitor_values(monitors);
|
||||
}
|
||||
|
||||
|
||||
int DebugInformationRecorder::data_size() {
|
||||
debug_only(_oop_recorder->oop_size()); // mark it "frozen" for asserts
|
||||
return _stream->position();
|
||||
}
|
||||
|
||||
|
||||
int DebugInformationRecorder::pcs_size() {
|
||||
debug_only(_oop_recorder->oop_size()); // mark it "frozen" for asserts
|
||||
if (last_pc()->pc_offset() != PcDesc::upper_offset_limit)
|
||||
add_new_pc_offset(PcDesc::upper_offset_limit);
|
||||
return _pcs_length * sizeof(PcDesc);
|
||||
}
|
||||
|
||||
|
||||
void DebugInformationRecorder::copy_to(nmethod* nm) {
|
||||
nm->copy_scopes_data(stream()->buffer(), stream()->position());
|
||||
nm->copy_scopes_pcs(_pcs, _pcs_length);
|
||||
}
|
||||
|
||||
|
||||
void DebugInformationRecorder::verify(const nmethod* code) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void DebugInformationRecorder::print_statistics() {
|
||||
dir_stats.print();
|
||||
}
|
||||
#endif //PRODUCT
|
182
hotspot/src/share/vm/code/debugInfoRec.hpp
Normal file
182
hotspot/src/share/vm/code/debugInfoRec.hpp
Normal file
|
@ -0,0 +1,182 @@
|
|||
/*
|
||||
* Copyright 1998-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
//** The DebugInformationRecorder collects debugging information
|
||||
// for a compiled method.
|
||||
// Debugging information is used for:
|
||||
// - garbage collecting compiled frames
|
||||
// - stack tracing across compiled frames
|
||||
// - deoptimizating compiled frames
|
||||
//
|
||||
// The implementation requires the compiler to use the recorder
|
||||
// in the following order:
|
||||
// 1) Describe debug information for safepoints at increasing addresses.
|
||||
// a) Add safepoint entry (use add_safepoint or add_non_safepoint)
|
||||
// b) Describe scopes for that safepoint
|
||||
// - create locals if needed (use create_scope_values)
|
||||
// - create expressions if needed (use create_scope_values)
|
||||
// - create monitor stack if needed (use create_monitor_values)
|
||||
// - describe scope (use describe_scope)
|
||||
// "repeat last four steps for all scopes"
|
||||
// "outer most scope first and inner most scope last"
|
||||
// NB: nodes from create_scope_values and create_locations
|
||||
// can be reused for simple sharing.
|
||||
// - mark the end of the scopes (end_safepoint or end_non_safepoint)
|
||||
// 2) Use oop_size, data_size, pcs_size to create the nmethod and
|
||||
// finally migrate the debugging information into the nmethod
|
||||
// by calling copy_to.
|
||||
|
||||
class DebugToken; // Opaque datatype for stored:
|
||||
// - GrowableArray<ScopeValue*>
|
||||
// - GrowableArray<MonitorValue*>
|
||||
|
||||
// Alias for InvocationEntryBci.
|
||||
// Both constants are used for a pseudo-BCI which refers
|
||||
// to the state just _before_ a method is entered.
|
||||
// SynchronizationEntryBCI is used where the emphasis
|
||||
// is on the implicit monitorenter of a synchronized method.
|
||||
const int SynchronizationEntryBCI = InvocationEntryBci;
|
||||
|
||||
class DIR_Chunk; // private class, a nugget of collected information
|
||||
|
||||
class DebugInformationRecorder: public ResourceObj {
|
||||
public:
|
||||
// constructor
|
||||
DebugInformationRecorder(OopRecorder* oop_recorder);
|
||||
|
||||
// adds an oopmap at a specific offset
|
||||
void add_oopmap(int pc_offset, OopMap* map);
|
||||
|
||||
// adds a jvm mapping at pc-offset, for a safepoint only
|
||||
void add_safepoint(int pc_offset, OopMap* map);
|
||||
|
||||
// adds a jvm mapping at pc-offset, for a non-safepoint (profile point)
|
||||
void add_non_safepoint(int pc_offset);
|
||||
|
||||
// Describes debugging information for a scope at the given pc_offset.
|
||||
// Calls must be in non-decreasing order of pc_offset.
|
||||
// If there are several calls at a single pc_offset,
|
||||
// then they occur in the same order as they were performed by the JVM,
|
||||
// with the most recent (innermost) call being described last.
|
||||
// For a safepoint, the pc_offset must have been mentioned
|
||||
// previously by add_safepoint.
|
||||
// Otherwise, the pc_offset must have been mentioned previously
|
||||
// by add_non_safepoint, and the locals, expressions, and monitors
|
||||
// must all be null.
|
||||
void describe_scope(int pc_offset,
|
||||
ciMethod* method,
|
||||
int bci,
|
||||
DebugToken* locals = NULL,
|
||||
DebugToken* expressions = NULL,
|
||||
DebugToken* monitors = NULL);
|
||||
|
||||
|
||||
void dump_object_pool(GrowableArray<ScopeValue*>* objects);
|
||||
|
||||
// This call must follow every add_safepoint,
|
||||
// after any intervening describe_scope calls.
|
||||
void end_safepoint(int pc_offset) { end_scopes(pc_offset, true); }
|
||||
void end_non_safepoint(int pc_offset) { end_scopes(pc_offset, false); }
|
||||
|
||||
// helper fuctions for describe_scope to enable sharing
|
||||
DebugToken* create_scope_values(GrowableArray<ScopeValue*>* values);
|
||||
DebugToken* create_monitor_values(GrowableArray<MonitorValue*>* monitors);
|
||||
|
||||
// returns the size of the generated scopeDescs.
|
||||
int data_size();
|
||||
int pcs_size();
|
||||
int oop_size() { return oop_recorder()->oop_size(); }
|
||||
|
||||
// copy the generated debugging information to nmethod
|
||||
void copy_to(nmethod* nm);
|
||||
|
||||
// verifies the debug information
|
||||
void verify(const nmethod* code);
|
||||
|
||||
static void print_statistics() PRODUCT_RETURN;
|
||||
|
||||
// Method for setting oopmaps to temporarily preserve old handling of oopmaps
|
||||
OopMapSet *_oopmaps;
|
||||
void set_oopmaps(OopMapSet *oopmaps) { _oopmaps = oopmaps; }
|
||||
|
||||
OopRecorder* oop_recorder() { return _oop_recorder; }
|
||||
|
||||
int last_pc_offset() { return last_pc()->pc_offset(); }
|
||||
|
||||
bool recording_non_safepoints() { return _recording_non_safepoints; }
|
||||
|
||||
private:
|
||||
friend class ScopeDesc;
|
||||
friend class vframeStreamCommon;
|
||||
friend class DIR_Chunk;
|
||||
|
||||
// True if we are recording non-safepoint scopes.
|
||||
// This flag is set if DebugNonSafepoints is true, or if
|
||||
// JVMTI post_compiled_method_load events are enabled.
|
||||
const bool _recording_non_safepoints;
|
||||
|
||||
DebugInfoWriteStream* _stream;
|
||||
|
||||
DebugInfoWriteStream* stream() const { return _stream; }
|
||||
|
||||
OopRecorder* _oop_recorder;
|
||||
|
||||
// Scopes that have been described so far.
|
||||
GrowableArray<DIR_Chunk*>* _all_chunks;
|
||||
GrowableArray<DIR_Chunk*>* _shared_chunks;
|
||||
DIR_Chunk* _next_chunk;
|
||||
DIR_Chunk* _next_chunk_limit;
|
||||
|
||||
#ifdef ASSERT
|
||||
enum { rs_null, rs_safepoint, rs_non_safepoint };
|
||||
int _recording_state;
|
||||
#endif
|
||||
|
||||
PcDesc* _pcs;
|
||||
int _pcs_size;
|
||||
int _pcs_length;
|
||||
// Note: Would use GrowableArray<PcDesc>, but structs are not supported.
|
||||
|
||||
// PC of most recent real safepoint before the current one,
|
||||
// updated after end_scopes.
|
||||
int _prev_safepoint_pc;
|
||||
|
||||
PcDesc* last_pc() {
|
||||
guarantee(_pcs_length > 0, "a safepoint must be declared already");
|
||||
return &_pcs[_pcs_length-1];
|
||||
}
|
||||
PcDesc* prev_pc() {
|
||||
guarantee(_pcs_length > 1, "a safepoint must be declared already");
|
||||
return &_pcs[_pcs_length-2];
|
||||
}
|
||||
void add_new_pc_offset(int pc_offset);
|
||||
void end_scopes(int pc_offset, bool is_safepoint);
|
||||
|
||||
int serialize_monitor_values(GrowableArray<MonitorValue*>* monitors);
|
||||
int serialize_scope_values(GrowableArray<ScopeValue*>* values);
|
||||
int find_sharable_decode_offset(int stream_offset);
|
||||
|
||||
public:
|
||||
enum { serialized_null = 0 };
|
||||
};
|
1549
hotspot/src/share/vm/code/dependencies.cpp
Normal file
1549
hotspot/src/share/vm/code/dependencies.cpp
Normal file
File diff suppressed because it is too large
Load diff
550
hotspot/src/share/vm/code/dependencies.hpp
Normal file
550
hotspot/src/share/vm/code/dependencies.hpp
Normal file
|
@ -0,0 +1,550 @@
|
|||
/*
|
||||
* Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
//** Dependencies represent assertions (approximate invariants) within
|
||||
// the class hierarchy. An example is an assertion that a given
|
||||
// method is not overridden; another example is that a type has only
|
||||
// one concrete subtype. Compiled code which relies on such
|
||||
// assertions must be discarded if they are overturned by changes in
|
||||
// the class hierarchy. We can think of these assertions as
|
||||
// approximate invariants, because we expect them to be overturned
|
||||
// very infrequently. We are willing to perform expensive recovery
|
||||
// operations when they are overturned. The benefit, of course, is
|
||||
// performing optimistic optimizations (!) on the object code.
|
||||
//
|
||||
// Changes in the class hierarchy due to dynamic linking or
|
||||
// class evolution can violate dependencies. There is enough
|
||||
// indexing between classes and nmethods to make dependency
|
||||
// checking reasonably efficient.
|
||||
|
||||
class ciEnv;
|
||||
class nmethod;
|
||||
class OopRecorder;
|
||||
class xmlStream;
|
||||
class CompileLog;
|
||||
class DepChange;
|
||||
class No_Safepoint_Verifier;
|
||||
|
||||
class Dependencies: public ResourceObj {
|
||||
public:
|
||||
// Note: In the comments on dependency types, most uses of the terms
|
||||
// subtype and supertype are used in a "non-strict" or "inclusive"
|
||||
// sense, and are starred to remind the reader of this fact.
|
||||
// Strict uses of the terms use the word "proper".
|
||||
//
|
||||
// Specifically, every class is its own subtype* and supertype*.
|
||||
// (This trick is easier than continually saying things like "Y is a
|
||||
// subtype of X or X itself".)
|
||||
//
|
||||
// Sometimes we write X > Y to mean X is a proper supertype of Y.
|
||||
// The notation X > {Y, Z} means X has proper subtypes Y, Z.
|
||||
// The notation X.m > Y means that Y inherits m from X, while
|
||||
// X.m > Y.m means Y overrides X.m. A star denotes abstractness,
|
||||
// as *I > A, meaning (abstract) interface I is a super type of A,
|
||||
// or A.*m > B.m, meaning B.m implements abstract method A.m.
|
||||
//
|
||||
// In this module, the terms "subtype" and "supertype" refer to
|
||||
// Java-level reference type conversions, as detected by
|
||||
// "instanceof" and performed by "checkcast" operations. The method
|
||||
// Klass::is_subtype_of tests these relations. Note that "subtype"
|
||||
// is richer than "subclass" (as tested by Klass::is_subclass_of),
|
||||
// since it takes account of relations involving interface and array
|
||||
// types.
|
||||
//
|
||||
// To avoid needless complexity, dependencies involving array types
|
||||
// are not accepted. If you need to make an assertion about an
|
||||
// array type, make the assertion about its corresponding element
|
||||
// types. Any assertion that might change about an array type can
|
||||
// be converted to an assertion about its element type.
|
||||
//
|
||||
// Most dependencies are evaluated over a "context type" CX, which
|
||||
// stands for the set Subtypes(CX) of every Java type that is a subtype*
|
||||
// of CX. When the system loads a new class or interface N, it is
|
||||
// responsible for re-evaluating changed dependencies whose context
|
||||
// type now includes N, that is, all super types of N.
|
||||
//
|
||||
enum DepType {
|
||||
end_marker = 0,
|
||||
|
||||
// An 'evol' dependency simply notes that the contents of the
|
||||
// method were used. If it evolves (is replaced), the nmethod
|
||||
// must be recompiled. No other dependencies are implied.
|
||||
evol_method,
|
||||
FIRST_TYPE = evol_method,
|
||||
|
||||
// A context type CX is a leaf it if has no proper subtype.
|
||||
leaf_type,
|
||||
|
||||
// An abstract class CX has exactly one concrete subtype CC.
|
||||
abstract_with_unique_concrete_subtype,
|
||||
|
||||
// The type CX is purely abstract, with no concrete subtype* at all.
|
||||
abstract_with_no_concrete_subtype,
|
||||
|
||||
// The concrete CX is free of concrete proper subtypes.
|
||||
concrete_with_no_concrete_subtype,
|
||||
|
||||
// Given a method M1 and a context class CX, the set MM(CX, M1) of
|
||||
// "concrete matching methods" in CX of M1 is the set of every
|
||||
// concrete M2 for which it is possible to create an invokevirtual
|
||||
// or invokeinterface call site that can reach either M1 or M2.
|
||||
// That is, M1 and M2 share a name, signature, and vtable index.
|
||||
// We wish to notice when the set MM(CX, M1) is just {M1}, or
|
||||
// perhaps a set of two {M1,M2}, and issue dependencies on this.
|
||||
|
||||
// The set MM(CX, M1) can be computed by starting with any matching
|
||||
// concrete M2 that is inherited into CX, and then walking the
|
||||
// subtypes* of CX looking for concrete definitions.
|
||||
|
||||
// The parameters to this dependency are the method M1 and the
|
||||
// context class CX. M1 must be either inherited in CX or defined
|
||||
// in a subtype* of CX. It asserts that MM(CX, M1) is no greater
|
||||
// than {M1}.
|
||||
unique_concrete_method, // one unique concrete method under CX
|
||||
|
||||
// An "exclusive" assertion concerns two methods or subtypes, and
|
||||
// declares that there are at most two (or perhaps later N>2)
|
||||
// specific items that jointly satisfy the restriction.
|
||||
// We list all items explicitly rather than just giving their
|
||||
// count, for robustness in the face of complex schema changes.
|
||||
|
||||
// A context class CX (which may be either abstract or concrete)
|
||||
// has two exclusive concrete subtypes* C1, C2 if every concrete
|
||||
// subtype* of CX is either C1 or C2. Note that if neither C1 or C2
|
||||
// are equal to CX, then CX itself must be abstract. But it is
|
||||
// also possible (for example) that C1 is CX (a concrete class)
|
||||
// and C2 is a proper subtype of C1.
|
||||
abstract_with_exclusive_concrete_subtypes_2,
|
||||
|
||||
// This dependency asserts that MM(CX, M1) is no greater than {M1,M2}.
|
||||
exclusive_concrete_methods_2,
|
||||
|
||||
// This dependency asserts that no instances of class or it's
|
||||
// subclasses require finalization registration.
|
||||
no_finalizable_subclasses,
|
||||
|
||||
TYPE_LIMIT
|
||||
};
|
||||
enum {
|
||||
LG2_TYPE_LIMIT = 4, // assert(TYPE_LIMIT <= (1<<LG2_TYPE_LIMIT))
|
||||
|
||||
// handy categorizations of dependency types:
|
||||
all_types = ((1<<TYPE_LIMIT)-1) & ((-1)<<FIRST_TYPE),
|
||||
non_ctxk_types = (1<<evol_method),
|
||||
ctxk_types = all_types & ~non_ctxk_types,
|
||||
|
||||
max_arg_count = 3, // current maximum number of arguments (incl. ctxk)
|
||||
|
||||
// A "context type" is a class or interface that
|
||||
// provides context for evaluating a dependency.
|
||||
// When present, it is one of the arguments (dep_context_arg).
|
||||
//
|
||||
// If a dependency does not have a context type, there is a
|
||||
// default context, depending on the type of the dependency.
|
||||
// This bit signals that a default context has been compressed away.
|
||||
default_context_type_bit = (1<<LG2_TYPE_LIMIT)
|
||||
};
|
||||
|
||||
static const char* dep_name(DepType dept);
|
||||
static int dep_args(DepType dept);
|
||||
static int dep_context_arg(DepType dept) {
|
||||
return dept_in_mask(dept, ctxk_types)? 0: -1;
|
||||
}
|
||||
|
||||
private:
|
||||
// State for writing a new set of dependencies:
|
||||
GrowableArray<int>* _dep_seen; // (seen[h->ident] & (1<<dept))
|
||||
GrowableArray<ciObject*>* _deps[TYPE_LIMIT];
|
||||
|
||||
static const char* _dep_name[TYPE_LIMIT];
|
||||
static int _dep_args[TYPE_LIMIT];
|
||||
|
||||
static bool dept_in_mask(DepType dept, int mask) {
|
||||
return (int)dept >= 0 && dept < TYPE_LIMIT && ((1<<dept) & mask) != 0;
|
||||
}
|
||||
|
||||
bool note_dep_seen(int dept, ciObject* x) {
|
||||
assert(dept < BitsPerInt, "oob");
|
||||
int x_id = x->ident();
|
||||
assert(_dep_seen != NULL, "deps must be writable");
|
||||
int seen = _dep_seen->at_grow(x_id, 0);
|
||||
_dep_seen->at_put(x_id, seen | (1<<dept));
|
||||
// return true if we've already seen dept/x
|
||||
return (seen & (1<<dept)) != 0;
|
||||
}
|
||||
|
||||
bool maybe_merge_ctxk(GrowableArray<ciObject*>* deps,
|
||||
int ctxk_i, ciKlass* ctxk);
|
||||
|
||||
void sort_all_deps();
|
||||
size_t estimate_size_in_bytes();
|
||||
|
||||
// Initialize _deps, etc.
|
||||
void initialize(ciEnv* env);
|
||||
|
||||
// State for making a new set of dependencies:
|
||||
OopRecorder* _oop_recorder;
|
||||
|
||||
// Logging support
|
||||
CompileLog* _log;
|
||||
|
||||
address _content_bytes; // everything but the oop references, encoded
|
||||
size_t _size_in_bytes;
|
||||
|
||||
public:
|
||||
// Make a new empty dependencies set.
|
||||
Dependencies(ciEnv* env) {
|
||||
initialize(env);
|
||||
}
|
||||
|
||||
private:
|
||||
// Check for a valid context type.
|
||||
// Enforce the restriction against array types.
|
||||
static void check_ctxk(ciKlass* ctxk) {
|
||||
assert(ctxk->is_instance_klass(), "java types only");
|
||||
}
|
||||
static void check_ctxk_concrete(ciKlass* ctxk) {
|
||||
assert(is_concrete_klass(ctxk->as_instance_klass()), "must be concrete");
|
||||
}
|
||||
static void check_ctxk_abstract(ciKlass* ctxk) {
|
||||
check_ctxk(ctxk);
|
||||
assert(!is_concrete_klass(ctxk->as_instance_klass()), "must be abstract");
|
||||
}
|
||||
|
||||
void assert_common_1(DepType dept, ciObject* x);
|
||||
void assert_common_2(DepType dept, ciKlass* ctxk, ciObject* x);
|
||||
void assert_common_3(DepType dept, ciKlass* ctxk, ciObject* x, ciObject* x2);
|
||||
|
||||
public:
|
||||
// Adding assertions to a new dependency set at compile time:
|
||||
void assert_evol_method(ciMethod* m);
|
||||
void assert_leaf_type(ciKlass* ctxk);
|
||||
void assert_abstract_with_unique_concrete_subtype(ciKlass* ctxk, ciKlass* conck);
|
||||
void assert_abstract_with_no_concrete_subtype(ciKlass* ctxk);
|
||||
void assert_concrete_with_no_concrete_subtype(ciKlass* ctxk);
|
||||
void assert_unique_concrete_method(ciKlass* ctxk, ciMethod* uniqm);
|
||||
void assert_abstract_with_exclusive_concrete_subtypes(ciKlass* ctxk, ciKlass* k1, ciKlass* k2);
|
||||
void assert_exclusive_concrete_methods(ciKlass* ctxk, ciMethod* m1, ciMethod* m2);
|
||||
void assert_has_no_finalizable_subclasses(ciKlass* ctxk);
|
||||
|
||||
// Define whether a given method or type is concrete.
|
||||
// These methods define the term "concrete" as used in this module.
|
||||
// For this module, an "abstract" class is one which is non-concrete.
|
||||
//
|
||||
// Future optimizations may allow some classes to remain
|
||||
// non-concrete until their first instantiation, and allow some
|
||||
// methods to remain non-concrete until their first invocation.
|
||||
// In that case, there would be a middle ground between concrete
|
||||
// and abstract (as defined by the Java language and VM).
|
||||
static bool is_concrete_klass(klassOop k); // k is instantiable
|
||||
static bool is_concrete_method(methodOop m); // m is invocable
|
||||
static Klass* find_finalizable_subclass(Klass* k);
|
||||
|
||||
// These versions of the concreteness queries work through the CI.
|
||||
// The CI versions are allowed to skew sometimes from the VM
|
||||
// (oop-based) versions. The cost of such a difference is a
|
||||
// (safely) aborted compilation, or a deoptimization, or a missed
|
||||
// optimization opportunity.
|
||||
//
|
||||
// In order to prevent spurious assertions, query results must
|
||||
// remain stable within any single ciEnv instance. (I.e., they must
|
||||
// not go back into the VM to get their value; they must cache the
|
||||
// bit in the CI, either eagerly or lazily.)
|
||||
static bool is_concrete_klass(ciInstanceKlass* k); // k appears instantiable
|
||||
static bool is_concrete_method(ciMethod* m); // m appears invocable
|
||||
static bool has_finalizable_subclass(ciInstanceKlass* k);
|
||||
|
||||
// As a general rule, it is OK to compile under the assumption that
|
||||
// a given type or method is concrete, even if it at some future
|
||||
// point becomes abstract. So dependency checking is one-sided, in
|
||||
// that it permits supposedly concrete classes or methods to turn up
|
||||
// as really abstract. (This shouldn't happen, except during class
|
||||
// evolution, but that's the logic of the checking.) However, if a
|
||||
// supposedly abstract class or method suddenly becomes concrete, a
|
||||
// dependency on it must fail.
|
||||
|
||||
// Checking old assertions at run-time (in the VM only):
|
||||
static klassOop check_evol_method(methodOop m);
|
||||
static klassOop check_leaf_type(klassOop ctxk);
|
||||
static klassOop check_abstract_with_unique_concrete_subtype(klassOop ctxk, klassOop conck,
|
||||
DepChange* changes = NULL);
|
||||
static klassOop check_abstract_with_no_concrete_subtype(klassOop ctxk,
|
||||
DepChange* changes = NULL);
|
||||
static klassOop check_concrete_with_no_concrete_subtype(klassOop ctxk,
|
||||
DepChange* changes = NULL);
|
||||
static klassOop check_unique_concrete_method(klassOop ctxk, methodOop uniqm,
|
||||
DepChange* changes = NULL);
|
||||
static klassOop check_abstract_with_exclusive_concrete_subtypes(klassOop ctxk, klassOop k1, klassOop k2,
|
||||
DepChange* changes = NULL);
|
||||
static klassOop check_exclusive_concrete_methods(klassOop ctxk, methodOop m1, methodOop m2,
|
||||
DepChange* changes = NULL);
|
||||
static klassOop check_has_no_finalizable_subclasses(klassOop ctxk,
|
||||
DepChange* changes = NULL);
|
||||
// A returned klassOop is NULL if the dependency assertion is still
|
||||
// valid. A non-NULL klassOop is a 'witness' to the assertion
|
||||
// failure, a point in the class hierarchy where the assertion has
|
||||
// been proven false. For example, if check_leaf_type returns
|
||||
// non-NULL, the value is a subtype of the supposed leaf type. This
|
||||
// witness value may be useful for logging the dependency failure.
|
||||
// Note that, when a dependency fails, there may be several possible
|
||||
// witnesses to the failure. The value returned from the check_foo
|
||||
// method is chosen arbitrarily.
|
||||
|
||||
// The 'changes' value, if non-null, requests a limited spot-check
|
||||
// near the indicated recent changes in the class hierarchy.
|
||||
// It is used by DepStream::spot_check_dependency_at.
|
||||
|
||||
// Detecting possible new assertions:
|
||||
static klassOop find_unique_concrete_subtype(klassOop ctxk);
|
||||
static methodOop find_unique_concrete_method(klassOop ctxk, methodOop m);
|
||||
static int find_exclusive_concrete_subtypes(klassOop ctxk, int klen, klassOop k[]);
|
||||
static int find_exclusive_concrete_methods(klassOop ctxk, int mlen, methodOop m[]);
|
||||
|
||||
// Create the encoding which will be stored in an nmethod.
|
||||
void encode_content_bytes();
|
||||
|
||||
address content_bytes() {
|
||||
assert(_content_bytes != NULL, "encode it first");
|
||||
return _content_bytes;
|
||||
}
|
||||
size_t size_in_bytes() {
|
||||
assert(_content_bytes != NULL, "encode it first");
|
||||
return _size_in_bytes;
|
||||
}
|
||||
|
||||
OopRecorder* oop_recorder() { return _oop_recorder; }
|
||||
CompileLog* log() { return _log; }
|
||||
|
||||
void copy_to(nmethod* nm);
|
||||
|
||||
void log_all_dependencies();
|
||||
void log_dependency(DepType dept, int nargs, ciObject* args[]) {
|
||||
write_dependency_to(log(), dept, nargs, args);
|
||||
}
|
||||
void log_dependency(DepType dept,
|
||||
ciObject* x0,
|
||||
ciObject* x1 = NULL,
|
||||
ciObject* x2 = NULL) {
|
||||
if (log() == NULL) return;
|
||||
ciObject* args[max_arg_count];
|
||||
args[0] = x0;
|
||||
args[1] = x1;
|
||||
args[2] = x2;
|
||||
assert(2 < max_arg_count, "");
|
||||
log_dependency(dept, dep_args(dept), args);
|
||||
}
|
||||
|
||||
static void write_dependency_to(CompileLog* log,
|
||||
DepType dept,
|
||||
int nargs, ciObject* args[],
|
||||
klassOop witness = NULL);
|
||||
static void write_dependency_to(CompileLog* log,
|
||||
DepType dept,
|
||||
int nargs, oop args[],
|
||||
klassOop witness = NULL);
|
||||
static void write_dependency_to(xmlStream* xtty,
|
||||
DepType dept,
|
||||
int nargs, oop args[],
|
||||
klassOop witness = NULL);
|
||||
static void print_dependency(DepType dept,
|
||||
int nargs, oop args[],
|
||||
klassOop witness = NULL);
|
||||
|
||||
private:
|
||||
// helper for encoding common context types as zero:
|
||||
static ciKlass* ctxk_encoded_as_null(DepType dept, ciObject* x);
|
||||
|
||||
static klassOop ctxk_encoded_as_null(DepType dept, oop x);
|
||||
|
||||
public:
|
||||
// Use this to iterate over an nmethod's dependency set.
|
||||
// Works on new and old dependency sets.
|
||||
// Usage:
|
||||
//
|
||||
// ;
|
||||
// Dependencies::DepType dept;
|
||||
// for (Dependencies::DepStream deps(nm); deps.next(); ) {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// The caller must be in the VM, since oops are not wrapped in handles.
|
||||
class DepStream {
|
||||
private:
|
||||
nmethod* _code; // null if in a compiler thread
|
||||
Dependencies* _deps; // null if not in a compiler thread
|
||||
CompressedReadStream _bytes;
|
||||
#ifdef ASSERT
|
||||
size_t _byte_limit;
|
||||
#endif
|
||||
|
||||
// iteration variables:
|
||||
DepType _type;
|
||||
int _xi[max_arg_count+1];
|
||||
|
||||
void initial_asserts(size_t byte_limit) NOT_DEBUG({});
|
||||
|
||||
inline oop recorded_oop_at(int i);
|
||||
// => _code? _code->oop_at(i): *_deps->_oop_recorder->handle_at(i)
|
||||
|
||||
klassOop check_dependency_impl(DepChange* changes);
|
||||
|
||||
public:
|
||||
DepStream(Dependencies* deps)
|
||||
: _deps(deps),
|
||||
_code(NULL),
|
||||
_bytes(deps->content_bytes())
|
||||
{
|
||||
initial_asserts(deps->size_in_bytes());
|
||||
}
|
||||
DepStream(nmethod* code)
|
||||
: _deps(NULL),
|
||||
_code(code),
|
||||
_bytes(code->dependencies_begin())
|
||||
{
|
||||
initial_asserts(code->dependencies_size());
|
||||
}
|
||||
|
||||
bool next();
|
||||
|
||||
DepType type() { return _type; }
|
||||
int argument_count() { return dep_args(type()); }
|
||||
int argument_index(int i) { assert(0 <= i && i < argument_count(), "oob");
|
||||
return _xi[i]; }
|
||||
oop argument(int i); // => recorded_oop_at(argument_index(i))
|
||||
klassOop context_type();
|
||||
|
||||
methodOop method_argument(int i) {
|
||||
oop x = argument(i);
|
||||
assert(x->is_method(), "type");
|
||||
return (methodOop) x;
|
||||
}
|
||||
klassOop type_argument(int i) {
|
||||
oop x = argument(i);
|
||||
assert(x->is_klass(), "type");
|
||||
return (klassOop) x;
|
||||
}
|
||||
|
||||
// The point of the whole exercise: Is this dep is still OK?
|
||||
klassOop check_dependency() {
|
||||
return check_dependency_impl(NULL);
|
||||
}
|
||||
// A lighter version: Checks only around recent changes in a class
|
||||
// hierarchy. (See Universe::flush_dependents_on.)
|
||||
klassOop spot_check_dependency_at(DepChange& changes);
|
||||
|
||||
// Log the current dependency to xtty or compilation log.
|
||||
void log_dependency(klassOop witness = NULL);
|
||||
|
||||
// Print the current dependency to tty.
|
||||
void print_dependency(klassOop witness = NULL, bool verbose = false);
|
||||
};
|
||||
friend class Dependencies::DepStream;
|
||||
|
||||
static void print_statistics() PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
// A class hierarchy change coming through the VM (under the Compile_lock).
|
||||
// The change is structured as a single new type with any number of supers
|
||||
// and implemented interface types. Other than the new type, any of the
|
||||
// super types can be context types for a relevant dependency, which the
|
||||
// new type could invalidate.
|
||||
class DepChange : public StackObj {
|
||||
private:
|
||||
enum ChangeType {
|
||||
NO_CHANGE = 0, // an uninvolved klass
|
||||
Change_new_type, // a newly loaded type
|
||||
Change_new_sub, // a super with a new subtype
|
||||
Change_new_impl, // an interface with a new implementation
|
||||
CHANGE_LIMIT,
|
||||
Start_Klass = CHANGE_LIMIT // internal indicator for ContextStream
|
||||
};
|
||||
|
||||
// each change set is rooted in exactly one new type (at present):
|
||||
KlassHandle _new_type;
|
||||
|
||||
void initialize();
|
||||
|
||||
public:
|
||||
// notes the new type, marks it and all its super-types
|
||||
DepChange(KlassHandle new_type)
|
||||
: _new_type(new_type)
|
||||
{
|
||||
initialize();
|
||||
}
|
||||
|
||||
// cleans up the marks
|
||||
~DepChange();
|
||||
|
||||
klassOop new_type() { return _new_type(); }
|
||||
|
||||
// involves_context(k) is true if k is new_type or any of the super types
|
||||
bool involves_context(klassOop k);
|
||||
|
||||
// Usage:
|
||||
// for (DepChange::ContextStream str(changes); str.next(); ) {
|
||||
// klassOop k = str.klass();
|
||||
// switch (str.change_type()) {
|
||||
// ...
|
||||
// }
|
||||
// }
|
||||
class ContextStream : public StackObj {
|
||||
private:
|
||||
DepChange& _changes;
|
||||
friend class DepChange;
|
||||
|
||||
// iteration variables:
|
||||
ChangeType _change_type;
|
||||
klassOop _klass;
|
||||
objArrayOop _ti_base; // i.e., transitive_interfaces
|
||||
int _ti_index;
|
||||
int _ti_limit;
|
||||
|
||||
// start at the beginning:
|
||||
void start() {
|
||||
klassOop new_type = _changes.new_type();
|
||||
_change_type = (new_type == NULL ? NO_CHANGE: Start_Klass);
|
||||
_klass = new_type;
|
||||
_ti_base = NULL;
|
||||
_ti_index = 0;
|
||||
_ti_limit = 0;
|
||||
}
|
||||
|
||||
ContextStream(DepChange& changes)
|
||||
: _changes(changes)
|
||||
{ start(); }
|
||||
|
||||
public:
|
||||
ContextStream(DepChange& changes, No_Safepoint_Verifier& nsv)
|
||||
: _changes(changes)
|
||||
// the nsv argument makes it safe to hold oops like _klass
|
||||
{ start(); }
|
||||
|
||||
bool next();
|
||||
|
||||
klassOop klass() { return _klass; }
|
||||
};
|
||||
friend class DepChange::ContextStream;
|
||||
|
||||
void print();
|
||||
};
|
226
hotspot/src/share/vm/code/exceptionHandlerTable.cpp
Normal file
226
hotspot/src/share/vm/code/exceptionHandlerTable.cpp
Normal file
|
@ -0,0 +1,226 @@
|
|||
/*
|
||||
* Copyright 1998-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "incls/_precompiled.incl"
|
||||
#include "incls/_exceptionHandlerTable.cpp.incl"
|
||||
|
||||
void ExceptionHandlerTable::add_entry(HandlerTableEntry entry) {
|
||||
_nesting.check();
|
||||
if (_length >= _size) {
|
||||
// not enough space => grow the table (amortized growth, double its size)
|
||||
guarantee(_size > 0, "no space allocated => cannot grow the table since it is part of nmethod");
|
||||
int new_size = _size * 2;
|
||||
_table = REALLOC_RESOURCE_ARRAY(HandlerTableEntry, _table, _size, new_size);
|
||||
_size = new_size;
|
||||
}
|
||||
assert(_length < _size, "sanity check");
|
||||
_table[_length++] = entry;
|
||||
}
|
||||
|
||||
|
||||
HandlerTableEntry* ExceptionHandlerTable::subtable_for(int catch_pco) const {
|
||||
int i = 0;
|
||||
while (i < _length) {
|
||||
HandlerTableEntry* t = _table + i;
|
||||
if (t->pco() == catch_pco) {
|
||||
// found subtable matching the catch_pco
|
||||
return t;
|
||||
} else {
|
||||
// advance to next subtable
|
||||
i += t->len() + 1; // +1 for header
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
ExceptionHandlerTable::ExceptionHandlerTable(int initial_size) {
|
||||
guarantee(initial_size > 0, "initial size must be > 0");
|
||||
_table = NEW_RESOURCE_ARRAY(HandlerTableEntry, initial_size);
|
||||
_length = 0;
|
||||
_size = initial_size;
|
||||
}
|
||||
|
||||
|
||||
ExceptionHandlerTable::ExceptionHandlerTable(const nmethod* nm) {
|
||||
_table = (HandlerTableEntry*)nm->handler_table_begin();
|
||||
_length = nm->handler_table_size() / sizeof(HandlerTableEntry);
|
||||
_size = 0; // no space allocated by ExeptionHandlerTable!
|
||||
}
|
||||
|
||||
|
||||
void ExceptionHandlerTable::add_subtable(
|
||||
int catch_pco,
|
||||
GrowableArray<intptr_t>* handler_bcis,
|
||||
GrowableArray<intptr_t>* scope_depths_from_top_scope,
|
||||
GrowableArray<intptr_t>* handler_pcos
|
||||
) {
|
||||
assert(subtable_for(catch_pco) == NULL, "catch handlers for this catch_pco added twice");
|
||||
assert(handler_bcis->length() == handler_pcos->length(), "bci & pc table have different length");
|
||||
assert(scope_depths_from_top_scope == NULL || handler_bcis->length() == scope_depths_from_top_scope->length(), "bci & scope_depths table have different length");
|
||||
if (handler_bcis->length() > 0) {
|
||||
// add subtable header
|
||||
add_entry(HandlerTableEntry(handler_bcis->length(), catch_pco, 0));
|
||||
// add individual entries
|
||||
for (int i = 0; i < handler_bcis->length(); i++) {
|
||||
intptr_t scope_depth = 0;
|
||||
if (scope_depths_from_top_scope != NULL) {
|
||||
scope_depth = scope_depths_from_top_scope->at(i);
|
||||
}
|
||||
add_entry(HandlerTableEntry(handler_bcis->at(i), handler_pcos->at(i), scope_depth));
|
||||
assert(entry_for(catch_pco, handler_bcis->at(i), scope_depth)->pco() == handler_pcos->at(i), "entry not added correctly (1)");
|
||||
assert(entry_for(catch_pco, handler_bcis->at(i), scope_depth)->scope_depth() == scope_depth, "entry not added correctly (2)");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ExceptionHandlerTable::copy_to(nmethod* nm) {
|
||||
assert(size_in_bytes() == nm->handler_table_size(), "size of space allocated in nmethod incorrect");
|
||||
memmove(nm->handler_table_begin(), _table, size_in_bytes());
|
||||
}
|
||||
|
||||
|
||||
HandlerTableEntry* ExceptionHandlerTable::entry_for(int catch_pco, int handler_bci, int scope_depth) const {
|
||||
HandlerTableEntry* t = subtable_for(catch_pco);
|
||||
if (t != NULL) {
|
||||
int l = t->len();
|
||||
while (l-- > 0) {
|
||||
t++;
|
||||
if (t->bci() == handler_bci && t->scope_depth() == scope_depth) return t;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
void ExceptionHandlerTable::print_subtable(HandlerTableEntry* t) const {
|
||||
int l = t->len();
|
||||
tty->print_cr("catch_pco = %d (%d entries)", t->pco(), l);
|
||||
while (l-- > 0) {
|
||||
t++;
|
||||
tty->print_cr(" bci %d at scope depth %d -> pco %d", t->bci(), t->scope_depth(), t->pco());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ExceptionHandlerTable::print() const {
|
||||
tty->print_cr("ExceptionHandlerTable (size = %d bytes)", size_in_bytes());
|
||||
int i = 0;
|
||||
while (i < _length) {
|
||||
HandlerTableEntry* t = _table + i;
|
||||
print_subtable(t);
|
||||
// advance to next subtable
|
||||
i += t->len() + 1; // +1 for header
|
||||
}
|
||||
}
|
||||
|
||||
void ExceptionHandlerTable::print_subtable_for(int catch_pco) const {
|
||||
HandlerTableEntry* subtable = subtable_for(catch_pco);
|
||||
|
||||
if( subtable != NULL ) { print_subtable( subtable ); }
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Implicit null exception tables. Maps an exception PC offset to a
|
||||
// continuation PC offset. During construction it's a variable sized
|
||||
// array with a max size and current length. When stored inside an
|
||||
// nmethod a zero length table takes no space. This is detected by
|
||||
// nul_chk_table_size() == 0. Otherwise the table has a length word
|
||||
// followed by pairs of <excp-offset, const-offset>.
|
||||
void ImplicitExceptionTable::set_size( uint size ) {
|
||||
_size = size;
|
||||
_data = NEW_RESOURCE_ARRAY(implicit_null_entry, (size*2));
|
||||
_len = 0;
|
||||
}
|
||||
|
||||
void ImplicitExceptionTable::append( uint exec_off, uint cont_off ) {
|
||||
assert( (sizeof(implicit_null_entry) >= 4) || (exec_off < 65535), "" );
|
||||
assert( (sizeof(implicit_null_entry) >= 4) || (cont_off < 65535), "" );
|
||||
uint l = len();
|
||||
if (l == _size) {
|
||||
uint old_size_in_elements = _size*2;
|
||||
if (_size == 0) _size = 4;
|
||||
_size *= 2;
|
||||
uint new_size_in_elements = _size*2;
|
||||
_data = REALLOC_RESOURCE_ARRAY(uint, _data, old_size_in_elements, new_size_in_elements);
|
||||
}
|
||||
*(adr(l) ) = exec_off;
|
||||
*(adr(l)+1) = cont_off;
|
||||
_len = l+1;
|
||||
};
|
||||
|
||||
uint ImplicitExceptionTable::at( uint exec_off ) const {
|
||||
uint l = len();
|
||||
for( uint i=0; i<l; i++ )
|
||||
if( *adr(i) == exec_off )
|
||||
return *(adr(i)+1);
|
||||
return 0; // Failed to find any execption offset
|
||||
}
|
||||
|
||||
void ImplicitExceptionTable::print(address base) const {
|
||||
tty->print("{");
|
||||
for( uint i=0; i<len(); i++ )
|
||||
tty->print("< "INTPTR_FORMAT", "INTPTR_FORMAT" > ",base + *adr(i), base + *(adr(i)+1));
|
||||
tty->print_cr("}");
|
||||
}
|
||||
|
||||
ImplicitExceptionTable::ImplicitExceptionTable(const nmethod* nm) {
|
||||
if (nm->nul_chk_table_size() == 0) {
|
||||
_len = 0;
|
||||
_data = NULL;
|
||||
} else {
|
||||
// the first word is the length if non-zero, so read it out and
|
||||
// skip to the next word to get the table.
|
||||
_data = (implicit_null_entry*)nm->nul_chk_table_begin();
|
||||
_len = _data[0];
|
||||
_data++;
|
||||
}
|
||||
_size = len();
|
||||
assert(size_in_bytes() <= nm->nul_chk_table_size(), "size of space allocated in nmethod incorrect");
|
||||
}
|
||||
|
||||
void ImplicitExceptionTable::copy_to( nmethod* nm ) {
|
||||
assert(size_in_bytes() <= nm->nul_chk_table_size(), "size of space allocated in nmethod incorrect");
|
||||
if (len() != 0) {
|
||||
implicit_null_entry* nmdata = (implicit_null_entry*)nm->nul_chk_table_begin();
|
||||
// store the length in the first uint
|
||||
nmdata[0] = _len;
|
||||
nmdata++;
|
||||
// copy the table after the length
|
||||
memmove( nmdata, _data, 2 * len() * sizeof(implicit_null_entry));
|
||||
} else {
|
||||
// zero length table takes zero bytes
|
||||
assert(size_in_bytes() == 0, "bad size");
|
||||
assert(nm->nul_chk_table_size() == 0, "bad size");
|
||||
}
|
||||
}
|
||||
|
||||
void ImplicitExceptionTable::verify(nmethod *nm) const {
|
||||
for (uint i = 0; i < len(); i++) {
|
||||
if ((*adr(i) > (unsigned int)nm->code_size()) ||
|
||||
(*(adr(i)+1) > (unsigned int)nm->code_size()))
|
||||
fatal1("Invalid offset in ImplicitExceptionTable at %lx", _data);
|
||||
}
|
||||
}
|
156
hotspot/src/share/vm/code/exceptionHandlerTable.hpp
Normal file
156
hotspot/src/share/vm/code/exceptionHandlerTable.hpp
Normal file
|
@ -0,0 +1,156 @@
|
|||
/*
|
||||
* Copyright 1998-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// A HandlerTableEntry describes an individual entry of a subtable
|
||||
// of ExceptionHandlerTable. An entry consists of a pair(bci, pco),
|
||||
// where bci is the exception handler bci, and pco is the pc offset
|
||||
// relative to the nmethod code start for the compiled exception
|
||||
// handler corresponding to the (interpreted) exception handler
|
||||
// starting at bci.
|
||||
//
|
||||
// The first HandlerTableEntry of each subtable holds the length
|
||||
// and catch_pco for the subtable (the length is the number of
|
||||
// subtable entries w/o header).
|
||||
|
||||
class HandlerTableEntry {
|
||||
private:
|
||||
int _bci;
|
||||
int _pco;
|
||||
int _scope_depth;
|
||||
|
||||
public:
|
||||
HandlerTableEntry(int bci, int pco, int scope_depth) {
|
||||
assert( 0 <= pco, "pco must be positive");
|
||||
assert( 0 <= scope_depth, "scope_depth must be positive");
|
||||
_bci = bci;
|
||||
_pco = pco;
|
||||
_scope_depth = scope_depth;
|
||||
}
|
||||
|
||||
int len() const { return _bci; } // for entry at subtable begin
|
||||
int bci() const { return _bci; }
|
||||
int pco() const { return _pco; }
|
||||
int scope_depth() const { return _scope_depth; }
|
||||
};
|
||||
|
||||
|
||||
// An ExceptionHandlerTable is an abstraction over a list of subtables
|
||||
// of exception handlers for CatchNodes. Each subtable has a one-entry
|
||||
// header holding length and catch_pco of the subtable, followed
|
||||
// by 'length' entries for each exception handler that can be reached
|
||||
// from the corresponding CatchNode. The catch_pco is the pc offset of
|
||||
// the CatchNode in the corresponding nmethod. Empty subtables are dis-
|
||||
// carded.
|
||||
//
|
||||
// Structure of the table:
|
||||
//
|
||||
// table = { subtable }.
|
||||
// subtable = header entry { entry }.
|
||||
// header = a pair (number of subtable entries, catch pc offset, [unused])
|
||||
// entry = a pair (handler bci, handler pc offset, scope depth)
|
||||
//
|
||||
// An ExceptionHandlerTable can be created from scratch, in which case
|
||||
// it is possible to add subtables. It can also be created from an
|
||||
// nmethod (for lookup purposes) in which case the table cannot be
|
||||
// modified.
|
||||
|
||||
class nmethod;
|
||||
class ExceptionHandlerTable VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
HandlerTableEntry* _table; // the table
|
||||
int _length; // the current length of the table
|
||||
int _size; // the number of allocated entries
|
||||
ReallocMark _nesting; // assertion check for reallocations
|
||||
|
||||
// add the entry & grow the table if needed
|
||||
void add_entry(HandlerTableEntry entry);
|
||||
HandlerTableEntry* subtable_for(int catch_pco) const;
|
||||
|
||||
public:
|
||||
// (compile-time) construction within compiler
|
||||
ExceptionHandlerTable(int initial_size = 8);
|
||||
|
||||
// (run-time) construction from nmethod
|
||||
ExceptionHandlerTable(const nmethod* nm);
|
||||
|
||||
// (compile-time) add entries
|
||||
void add_subtable(
|
||||
int catch_pco, // the pc offset for the CatchNode
|
||||
GrowableArray<intptr_t>* handler_bcis, // the exception handler entry point bcis
|
||||
GrowableArray<intptr_t>* scope_depths_from_top_scope,
|
||||
// if representing exception handlers in multiple
|
||||
// inlined scopes, indicates which scope relative to
|
||||
// the youngest/innermost one in which we are performing
|
||||
// the lookup; zero (or null GrowableArray) indicates
|
||||
// innermost scope
|
||||
GrowableArray<intptr_t>* handler_pcos // pc offsets for the compiled handlers
|
||||
);
|
||||
|
||||
// nmethod support
|
||||
int size_in_bytes() const { return round_to(_length * sizeof(HandlerTableEntry), oopSize); }
|
||||
void copy_to(nmethod* nm);
|
||||
|
||||
// lookup
|
||||
HandlerTableEntry* entry_for(int catch_pco, int handler_bci, int scope_depth) const;
|
||||
|
||||
// debugging
|
||||
void print_subtable(HandlerTableEntry* t) const;
|
||||
void print() const;
|
||||
void print_subtable_for(int catch_pco) const;
|
||||
};
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Implicit null exception tables. Maps an exception PC offset to a
|
||||
// continuation PC offset. During construction it's a variable sized
|
||||
// array with a max size and current length. When stored inside an
|
||||
// nmethod a zero length table takes no space. This is detected by
|
||||
// nul_chk_table_size() == 0. Otherwise the table has a length word
|
||||
// followed by pairs of <excp-offset, const-offset>.
|
||||
|
||||
// Use 32-bit representation for offsets
|
||||
typedef uint implicit_null_entry;
|
||||
|
||||
class ImplicitExceptionTable VALUE_OBJ_CLASS_SPEC {
|
||||
uint _size;
|
||||
uint _len;
|
||||
implicit_null_entry *_data;
|
||||
implicit_null_entry *adr( uint idx ) const { return &_data[2*idx]; }
|
||||
ReallocMark _nesting; // assertion check for reallocations
|
||||
public:
|
||||
ImplicitExceptionTable( ) : _data(0), _size(0), _len(0) { }
|
||||
// (run-time) construction from nmethod
|
||||
ImplicitExceptionTable( const nmethod *nm );
|
||||
|
||||
void set_size( uint size );
|
||||
void append( uint exec_off, uint cont_off );
|
||||
uint at( uint exec_off ) const;
|
||||
|
||||
uint len() const { return _len; }
|
||||
int size_in_bytes() const { return len() == 0 ? 0 : ((2 * len() + 1) * sizeof(implicit_null_entry)); }
|
||||
|
||||
void copy_to(nmethod* nm);
|
||||
void print(address base) const;
|
||||
void verify(nmethod *nm) const;
|
||||
};
|
186
hotspot/src/share/vm/code/icBuffer.cpp
Normal file
186
hotspot/src/share/vm/code/icBuffer.cpp
Normal file
|
@ -0,0 +1,186 @@
|
|||
/*
|
||||
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "incls/_precompiled.incl"
|
||||
#include "incls/_icBuffer.cpp.incl"
|
||||
|
||||
|
||||
DEF_STUB_INTERFACE(ICStub);
|
||||
|
||||
StubQueue* InlineCacheBuffer::_buffer = NULL;
|
||||
ICStub* InlineCacheBuffer::_next_stub = NULL;
|
||||
|
||||
|
||||
void ICStub::finalize() {
|
||||
if (!is_empty()) {
|
||||
ResourceMark rm;
|
||||
CompiledIC *ic = CompiledIC_at(ic_site());
|
||||
assert(CodeCache::find_nmethod(ic->instruction_address()) != NULL, "inline cache in non-nmethod?");
|
||||
|
||||
assert(this == ICStub_from_destination_address(ic->stub_address()), "wrong owner of ic buffer");
|
||||
ic->set_cached_oop(cached_oop());
|
||||
ic->set_ic_destination(destination());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
address ICStub::destination() const {
|
||||
return InlineCacheBuffer::ic_buffer_entry_point(code_begin());
|
||||
}
|
||||
|
||||
oop ICStub::cached_oop() const {
|
||||
return InlineCacheBuffer::ic_buffer_cached_oop(code_begin());
|
||||
}
|
||||
|
||||
|
||||
void ICStub::set_stub(CompiledIC *ic, oop cached_value, address dest_addr) {
|
||||
// We cannot store a pointer to the 'ic' object, since it is resource allocated. Instead we
|
||||
// store the location of the inline cache. Then we have enough information recreate the CompiledIC
|
||||
// object when we need to remove the stub.
|
||||
_ic_site = ic->instruction_address();
|
||||
|
||||
// Assemble new stub
|
||||
InlineCacheBuffer::assemble_ic_buffer_code(code_begin(), cached_value, dest_addr);
|
||||
assert(destination() == dest_addr, "can recover destination");
|
||||
assert(cached_oop() == cached_value, "can recover destination");
|
||||
}
|
||||
|
||||
|
||||
void ICStub::clear() {
|
||||
_ic_site = NULL;
|
||||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
// anybody calling to this stub will trap
|
||||
|
||||
void ICStub::verify() {
|
||||
}
|
||||
|
||||
void ICStub::print() {
|
||||
tty->print_cr("ICStub: site: " INTPTR_FORMAT, _ic_site);
|
||||
}
|
||||
#endif
|
||||
|
||||
//-----------------------------------------------------------------------------------------------
|
||||
// Implementation of InlineCacheBuffer
|
||||
|
||||
void InlineCacheBuffer::init_next_stub() {
|
||||
ICStub* ic_stub = (ICStub*)buffer()->request_committed (ic_stub_code_size());
|
||||
assert (ic_stub != NULL, "no room for a single stub");
|
||||
set_next_stub(ic_stub);
|
||||
}
|
||||
|
||||
void InlineCacheBuffer::initialize() {
|
||||
if (_buffer != NULL) return; // already initialized
|
||||
_buffer = new StubQueue(new ICStubInterface, 10*K, InlineCacheBuffer_lock, "InlineCacheBuffer");
|
||||
assert (_buffer != NULL, "cannot allocate InlineCacheBuffer");
|
||||
init_next_stub();
|
||||
}
|
||||
|
||||
|
||||
ICStub* InlineCacheBuffer::new_ic_stub() {
|
||||
while (true) {
|
||||
ICStub* ic_stub = (ICStub*)buffer()->request_committed(ic_stub_code_size());
|
||||
if (ic_stub != NULL) {
|
||||
return ic_stub;
|
||||
}
|
||||
// we ran out of inline cache buffer space; must enter safepoint.
|
||||
// We do this by forcing a safepoint
|
||||
EXCEPTION_MARK;
|
||||
|
||||
VM_ForceSafepoint vfs;
|
||||
VMThread::execute(&vfs);
|
||||
// We could potential get an async. exception at this point.
|
||||
// In that case we will rethrow it to ourselvs.
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
oop exception = PENDING_EXCEPTION;
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
Thread::send_async_exception(JavaThread::current()->threadObj(), exception);
|
||||
}
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
void InlineCacheBuffer::update_inline_caches() {
|
||||
if (buffer()->number_of_stubs() > 1) {
|
||||
if (TraceICBuffer) {
|
||||
tty->print_cr("[updating inline caches with %d stubs]", buffer()->number_of_stubs());
|
||||
}
|
||||
buffer()->remove_all();
|
||||
init_next_stub();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool InlineCacheBuffer::contains(address instruction_address) {
|
||||
return buffer()->contains(instruction_address);
|
||||
}
|
||||
|
||||
|
||||
bool InlineCacheBuffer::is_empty() {
|
||||
return buffer()->number_of_stubs() == 1; // always has sentinel
|
||||
}
|
||||
|
||||
|
||||
void InlineCacheBuffer_init() {
|
||||
InlineCacheBuffer::initialize();
|
||||
}
|
||||
|
||||
|
||||
void InlineCacheBuffer::create_transition_stub(CompiledIC *ic, oop cached_oop, address entry) {
|
||||
assert(!SafepointSynchronize::is_at_safepoint(), "should not be called during a safepoint");
|
||||
assert (CompiledIC_lock->is_locked(), "");
|
||||
assert(cached_oop == NULL || cached_oop->is_perm(), "must belong to perm. space");
|
||||
if (TraceICBuffer) { tty->print_cr(" create transition stub for " INTPTR_FORMAT, ic->instruction_address()); }
|
||||
|
||||
// If an transition stub is already associate with the inline cache, then we remove the association.
|
||||
if (ic->is_in_transition_state()) {
|
||||
ICStub* old_stub = ICStub_from_destination_address(ic->stub_address());
|
||||
old_stub->clear();
|
||||
}
|
||||
|
||||
// allocate and initialize new "out-of-line" inline-cache
|
||||
ICStub* ic_stub = get_next_stub();
|
||||
ic_stub->set_stub(ic, cached_oop, entry);
|
||||
|
||||
// Update inline cache in nmethod to point to new "out-of-line" allocated inline cache
|
||||
ic->set_ic_destination(ic_stub->code_begin());
|
||||
|
||||
set_next_stub(new_ic_stub()); // can cause safepoint synchronization
|
||||
}
|
||||
|
||||
|
||||
address InlineCacheBuffer::ic_destination_for(CompiledIC *ic) {
|
||||
ICStub* stub = ICStub_from_destination_address(ic->stub_address());
|
||||
return stub->destination();
|
||||
}
|
||||
|
||||
|
||||
oop InlineCacheBuffer::cached_oop_for(CompiledIC *ic) {
|
||||
ICStub* stub = ICStub_from_destination_address(ic->stub_address());
|
||||
return stub->cached_oop();
|
||||
}
|
128
hotspot/src/share/vm/code/icBuffer.hpp
Normal file
128
hotspot/src/share/vm/code/icBuffer.hpp
Normal file
|
@ -0,0 +1,128 @@
|
|||
/*
|
||||
* Copyright 1997-2003 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
//
|
||||
// For CompiledIC's:
|
||||
//
|
||||
// In cases where we do not have MT-safe state transformation,
|
||||
// we go to a transition state, using ICStubs. At a safepoint,
|
||||
// the inline caches are transferred from the transitional code:
|
||||
//
|
||||
// instruction_address --> 01 set xxx_oop, Ginline_cache_klass
|
||||
// 23 jump_to Gtemp, yyyy
|
||||
// 4 nop
|
||||
|
||||
class ICStub: public Stub {
|
||||
private:
|
||||
int _size; // total size of the stub incl. code
|
||||
address _ic_site; // points at call instruction of owning ic-buffer
|
||||
/* stub code follows here */
|
||||
protected:
|
||||
friend class ICStubInterface;
|
||||
// This will be called only by ICStubInterface
|
||||
void initialize(int size) { _size = size; _ic_site = NULL; }
|
||||
void finalize(); // called when a method is removed
|
||||
|
||||
// General info
|
||||
int size() const { return _size; }
|
||||
static int code_size_to_size(int code_size) { return round_to(sizeof(ICStub), CodeEntryAlignment) + code_size; }
|
||||
|
||||
public:
|
||||
// Creation
|
||||
void set_stub(CompiledIC *ic, oop cached_value, address dest_addr);
|
||||
|
||||
// Code info
|
||||
address code_begin() const { return (address)this + round_to(sizeof(ICStub), CodeEntryAlignment); }
|
||||
address code_end() const { return (address)this + size(); }
|
||||
|
||||
// Call site info
|
||||
address ic_site() const { return _ic_site; }
|
||||
void clear();
|
||||
bool is_empty() const { return _ic_site == NULL; }
|
||||
|
||||
// stub info
|
||||
address destination() const; // destination of jump instruction
|
||||
oop cached_oop() const; // cached_oop for stub
|
||||
|
||||
// Debugging
|
||||
void verify() PRODUCT_RETURN;
|
||||
void print() PRODUCT_RETURN;
|
||||
|
||||
// Creation
|
||||
friend ICStub* ICStub_from_destination_address(address destination_address);
|
||||
};
|
||||
|
||||
// ICStub Creation
|
||||
inline ICStub* ICStub_from_destination_address(address destination_address) {
|
||||
ICStub* stub = (ICStub*) (destination_address - round_to(sizeof(ICStub), CodeEntryAlignment));
|
||||
#ifdef ASSERT
|
||||
stub->verify();
|
||||
#endif
|
||||
return stub;
|
||||
}
|
||||
|
||||
class InlineCacheBuffer: public AllStatic {
|
||||
private:
|
||||
// friends
|
||||
friend class ICStub;
|
||||
|
||||
static int ic_stub_code_size();
|
||||
|
||||
static StubQueue* _buffer;
|
||||
static ICStub* _next_stub;
|
||||
|
||||
static StubQueue* buffer() { return _buffer; }
|
||||
static void set_next_stub(ICStub* next_stub) { _next_stub = next_stub; }
|
||||
static ICStub* get_next_stub() { return _next_stub; }
|
||||
|
||||
static void init_next_stub();
|
||||
|
||||
static ICStub* new_ic_stub();
|
||||
|
||||
|
||||
// Machine-dependent implementation of ICBuffer
|
||||
static void assemble_ic_buffer_code(address code_begin, oop cached_oop, address entry_point);
|
||||
static address ic_buffer_entry_point (address code_begin);
|
||||
static oop ic_buffer_cached_oop (address code_begin);
|
||||
|
||||
public:
|
||||
|
||||
// Initialization; must be called before first usage
|
||||
static void initialize();
|
||||
|
||||
// Access
|
||||
static bool contains(address instruction_address);
|
||||
|
||||
// removes the ICStubs after backpatching
|
||||
static void update_inline_caches();
|
||||
|
||||
// for debugging
|
||||
static bool is_empty();
|
||||
|
||||
|
||||
// New interface
|
||||
static void create_transition_stub(CompiledIC *ic, oop cached_oop, address entry);
|
||||
static address ic_destination_for(CompiledIC *ic);
|
||||
static oop cached_oop_for(CompiledIC *ic);
|
||||
};
|
69
hotspot/src/share/vm/code/location.cpp
Normal file
69
hotspot/src/share/vm/code/location.cpp
Normal file
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "incls/_precompiled.incl"
|
||||
#include "incls/_location.cpp.incl"
|
||||
|
||||
void Location::print_on(outputStream* st) const {
|
||||
if(type() == invalid && !legal_offset_in_bytes(offset() * BytesPerInt)) {
|
||||
// product of Location::invalid_loc() or Location::Location().
|
||||
switch (where()) {
|
||||
case on_stack: st->print("empty"); break;
|
||||
case in_register: st->print("invalid"); break;
|
||||
}
|
||||
return;
|
||||
}
|
||||
switch (where()) {
|
||||
case on_stack: st->print("stack[%d]", stack_offset()); break;
|
||||
case in_register: st->print("reg %s [%d]", reg()->name(), register_number()); break;
|
||||
default: st->print("Wrong location where %d", where());
|
||||
}
|
||||
switch (type()) {
|
||||
case normal: break;
|
||||
case oop: st->print(",oop"); break;
|
||||
case int_in_long: st->print(",int"); break;
|
||||
case lng: st->print(",long"); break;
|
||||
case float_in_dbl: st->print(",float"); break;
|
||||
case dbl: st->print(",double"); break;
|
||||
case addr: st->print(",address"); break;
|
||||
default: st->print("Wrong location type %d", type());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Location::Location(DebugInfoReadStream* stream) {
|
||||
_value = (uint16_t) stream->read_int();
|
||||
}
|
||||
|
||||
|
||||
void Location::write_on(DebugInfoWriteStream* stream) {
|
||||
stream->write_int(_value & 0x0000FFFF);
|
||||
}
|
||||
|
||||
|
||||
// Valid argument to Location::new_stk_loc()?
|
||||
bool Location::legal_offset_in_bytes(int offset_in_bytes) {
|
||||
if ((offset_in_bytes % BytesPerInt) != 0) return false;
|
||||
return (offset_in_bytes / BytesPerInt) < (OFFSET_MASK >> OFFSET_SHIFT);
|
||||
}
|
114
hotspot/src/share/vm/code/location.hpp
Normal file
114
hotspot/src/share/vm/code/location.hpp
Normal file
|
@ -0,0 +1,114 @@
|
|||
/*
|
||||
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// A Location describes a concrete machine variable location
|
||||
// (such as integer or floating point register or a stack-held
|
||||
// variable). Used when generating debug-information for nmethods.
|
||||
//
|
||||
// Encoding:
|
||||
//
|
||||
// bits:
|
||||
// Where: [15]
|
||||
// Type: [14..12]
|
||||
// Offset: [11..0]
|
||||
|
||||
class Location VALUE_OBJ_CLASS_SPEC {
|
||||
friend class VMStructs;
|
||||
public:
|
||||
enum Where {
|
||||
on_stack,
|
||||
in_register
|
||||
};
|
||||
|
||||
enum Type {
|
||||
normal, // Ints, floats, double halves
|
||||
oop, // Oop (please GC me!)
|
||||
int_in_long, // Integer held in long register
|
||||
lng, // Long held in one register
|
||||
float_in_dbl, // Float held in double register
|
||||
dbl, // Double held in one register
|
||||
addr, // JSR return address
|
||||
invalid // Invalid location
|
||||
};
|
||||
|
||||
|
||||
private:
|
||||
enum {
|
||||
OFFSET_MASK = (jchar) 0x0FFF,
|
||||
OFFSET_SHIFT = 0,
|
||||
TYPE_MASK = (jchar) 0x7000,
|
||||
TYPE_SHIFT = 12,
|
||||
WHERE_MASK = (jchar) 0x8000,
|
||||
WHERE_SHIFT = 15
|
||||
};
|
||||
|
||||
uint16_t _value;
|
||||
|
||||
// Create a bit-packed Location
|
||||
Location(Where where_, Type type_, unsigned offset_) {
|
||||
set(where_, type_, offset_);
|
||||
assert( where () == where_ , "" );
|
||||
assert( type () == type_ , "" );
|
||||
assert( offset() == offset_, "" );
|
||||
}
|
||||
|
||||
inline void set(Where where_, Type type_, unsigned offset_) {
|
||||
_value = (uint16_t) ((where_ << WHERE_SHIFT) |
|
||||
(type_ << TYPE_SHIFT) |
|
||||
((offset_ << OFFSET_SHIFT) & OFFSET_MASK));
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
// Stack location Factory. Offset is 4-byte aligned; remove low bits
|
||||
static Location new_stk_loc( Type t, int offset ) { return Location(on_stack,t,offset>>LogBytesPerInt); }
|
||||
// Register location Factory
|
||||
static Location new_reg_loc( Type t, VMReg reg ) { return Location(in_register, t, reg->value()); }
|
||||
// Default constructor
|
||||
Location() { set(on_stack,invalid,(unsigned) -1); }
|
||||
|
||||
// Bit field accessors
|
||||
Where where() const { return (Where) ((_value & WHERE_MASK) >> WHERE_SHIFT);}
|
||||
Type type() const { return (Type) ((_value & TYPE_MASK) >> TYPE_SHIFT); }
|
||||
unsigned offset() const { return (unsigned) ((_value & OFFSET_MASK) >> OFFSET_SHIFT); }
|
||||
|
||||
// Accessors
|
||||
bool is_register() const { return where() == in_register; }
|
||||
bool is_stack() const { return where() == on_stack; }
|
||||
|
||||
int stack_offset() const { assert(where() == on_stack, "wrong Where"); return offset()<<LogBytesPerInt; }
|
||||
int register_number() const { assert(where() == in_register, "wrong Where"); return offset() ; }
|
||||
|
||||
VMReg reg() const { assert(where() == in_register, "wrong Where"); return VMRegImpl::as_VMReg(offset()) ; }
|
||||
|
||||
// Printing
|
||||
void print_on(outputStream* st) const;
|
||||
|
||||
// Serialization of debugging information
|
||||
Location(DebugInfoReadStream* stream);
|
||||
void write_on(DebugInfoWriteStream* stream);
|
||||
|
||||
// check
|
||||
static bool legal_offset_in_bytes(int offset_in_bytes);
|
||||
};
|
2216
hotspot/src/share/vm/code/nmethod.cpp
Normal file
2216
hotspot/src/share/vm/code/nmethod.cpp
Normal file
File diff suppressed because it is too large
Load diff
578
hotspot/src/share/vm/code/nmethod.hpp
Normal file
578
hotspot/src/share/vm/code/nmethod.hpp
Normal file
|
@ -0,0 +1,578 @@
|
|||
/*
|
||||
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// This class is used internally by nmethods, to cache
|
||||
// exception/pc/handler information.
|
||||
|
||||
class ExceptionCache : public CHeapObj {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
static address _unwind_handler;
|
||||
enum { cache_size = 16 };
|
||||
klassOop _exception_type;
|
||||
address _pc[cache_size];
|
||||
address _handler[cache_size];
|
||||
int _count;
|
||||
ExceptionCache* _next;
|
||||
|
||||
address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; }
|
||||
void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
|
||||
address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; }
|
||||
void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
|
||||
int count() { return _count; }
|
||||
void increment_count() { _count++; }
|
||||
|
||||
public:
|
||||
|
||||
ExceptionCache(Handle exception, address pc, address handler);
|
||||
|
||||
klassOop exception_type() { return _exception_type; }
|
||||
klassOop* exception_type_addr() { return &_exception_type; }
|
||||
ExceptionCache* next() { return _next; }
|
||||
void set_next(ExceptionCache *ec) { _next = ec; }
|
||||
|
||||
address match(Handle exception, address pc);
|
||||
bool match_exception_with_space(Handle exception) ;
|
||||
address test_address(address addr);
|
||||
bool add_address_and_handler(address addr, address handler) ;
|
||||
|
||||
static address unwind_handler() { return _unwind_handler; }
|
||||
};
|
||||
|
||||
|
||||
// cache pc descs found in earlier inquiries
|
||||
class PcDescCache VALUE_OBJ_CLASS_SPEC {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
enum { cache_size = 4 };
|
||||
PcDesc* _last_pc_desc; // most recent pc_desc found
|
||||
PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found
|
||||
public:
|
||||
PcDescCache() { debug_only(_last_pc_desc = NULL); }
|
||||
void reset_to(PcDesc* initial_pc_desc);
|
||||
PcDesc* find_pc_desc(int pc_offset, bool approximate);
|
||||
void add_pc_desc(PcDesc* pc_desc);
|
||||
PcDesc* last_pc_desc() { return _last_pc_desc; }
|
||||
};
|
||||
|
||||
|
||||
// nmethods (native methods) are the compiled code versions of Java methods.
|
||||
|
||||
struct nmFlags {
|
||||
friend class VMStructs;
|
||||
unsigned int version:8; // version number (0 = first version)
|
||||
unsigned int level:4; // optimization level
|
||||
unsigned int age:4; // age (in # of sweep steps)
|
||||
|
||||
unsigned int state:2; // {alive, zombie, unloaded)
|
||||
|
||||
unsigned int isUncommonRecompiled:1; // recompiled because of uncommon trap?
|
||||
unsigned int isToBeRecompiled:1; // to be recompiled as soon as it matures
|
||||
unsigned int hasFlushedDependencies:1; // Used for maintenance of dependencies
|
||||
unsigned int markedForReclamation:1; // Used by NMethodSweeper
|
||||
|
||||
unsigned int has_unsafe_access:1; // May fault due to unsafe access.
|
||||
|
||||
void clear();
|
||||
};
|
||||
|
||||
|
||||
// A nmethod contains:
|
||||
// - header (the nmethod structure)
|
||||
// [Relocation]
|
||||
// - relocation information
|
||||
// - constant part (doubles, longs and floats used in nmethod)
|
||||
// [Code]
|
||||
// - code body
|
||||
// - exception handler
|
||||
// - stub code
|
||||
// [Debugging information]
|
||||
// - oop array
|
||||
// - data array
|
||||
// - pcs
|
||||
// [Exception handler table]
|
||||
// - handler entry point array
|
||||
// [Implicit Null Pointer exception table]
|
||||
// - implicit null table array
|
||||
|
||||
class Dependencies;
|
||||
class ExceptionHandlerTable;
|
||||
class ImplicitExceptionTable;
|
||||
class AbstractCompiler;
|
||||
class xmlStream;
|
||||
|
||||
class nmethod : public CodeBlob {
|
||||
friend class VMStructs;
|
||||
friend class NMethodSweeper;
|
||||
private:
|
||||
// Shared fields for all nmethod's
|
||||
static int _zombie_instruction_size;
|
||||
|
||||
methodOop _method;
|
||||
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
|
||||
|
||||
nmethod* _link; // To support simple linked-list chaining of nmethods
|
||||
|
||||
AbstractCompiler* _compiler; // The compiler which compiled this nmethod
|
||||
|
||||
// Offsets for different nmethod parts
|
||||
int _exception_offset;
|
||||
// All deoptee's will resume execution at this location described by this offset
|
||||
int _deoptimize_offset;
|
||||
int _stub_offset;
|
||||
int _consts_offset;
|
||||
int _scopes_data_offset;
|
||||
int _scopes_pcs_offset;
|
||||
int _dependencies_offset;
|
||||
int _handler_table_offset;
|
||||
int _nul_chk_table_offset;
|
||||
int _nmethod_end_offset;
|
||||
|
||||
// location in frame (offset for sp) that deopt can store the original
|
||||
// pc during a deopt.
|
||||
int _orig_pc_offset;
|
||||
|
||||
int _compile_id; // which compilation made this nmethod
|
||||
int _comp_level; // compilation level
|
||||
|
||||
// offsets for entry points
|
||||
address _entry_point; // entry point with class check
|
||||
address _verified_entry_point; // entry point without class check
|
||||
address _osr_entry_point; // entry point for on stack replacement
|
||||
|
||||
nmFlags flags; // various flags to keep track of nmethod state
|
||||
bool _markedForDeoptimization; // Used for stack deoptimization
|
||||
enum { alive = 0,
|
||||
not_entrant = 1, // uncommon trap has happend but activations may still exist
|
||||
zombie = 2,
|
||||
unloaded = 3 };
|
||||
|
||||
// used by jvmti to track if an unload event has been posted for this nmethod.
|
||||
bool _unload_reported;
|
||||
|
||||
NOT_PRODUCT(bool _has_debug_info; )
|
||||
|
||||
// Nmethod Flushing lock (if non-zero, then the nmethod is not removed)
|
||||
jint _lock_count;
|
||||
|
||||
// not_entrant method removal. Each mark_sweep pass will update
|
||||
// this mark to current sweep invocation count if it is seen on the
|
||||
// stack. An not_entrant method can be removed when there is no
|
||||
// more activations, i.e., when the _stack_traversal_mark is less than
|
||||
// current sweep traversal index.
|
||||
long _stack_traversal_mark;
|
||||
|
||||
ExceptionCache *_exception_cache;
|
||||
PcDescCache _pc_desc_cache;
|
||||
|
||||
// These are only used for compiled synchronized native methods to
|
||||
// locate the owner and stack slot for the BasicLock so that we can
|
||||
// properly revoke the bias of the owner if necessary. They are
|
||||
// needed because there is no debug information for compiled native
|
||||
// wrappers and the oop maps are insufficient to allow
|
||||
// frame::retrieve_receiver() to work. Currently they are expected
|
||||
// to be byte offsets from the Java stack pointer for maximum code
|
||||
// sharing between platforms. Note that currently biased locking
|
||||
// will never cause Class instances to be biased but this code
|
||||
// handles the static synchronized case as well.
|
||||
ByteSize _compiled_synchronized_native_basic_lock_owner_sp_offset;
|
||||
ByteSize _compiled_synchronized_native_basic_lock_sp_offset;
|
||||
|
||||
friend class nmethodLocker;
|
||||
|
||||
// For native wrappers
|
||||
nmethod(methodOop method,
|
||||
int nmethod_size,
|
||||
CodeOffsets* offsets,
|
||||
CodeBuffer *code_buffer,
|
||||
int frame_size,
|
||||
ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
|
||||
ByteSize basic_lock_sp_offset, /* synchronized natives only */
|
||||
OopMapSet* oop_maps);
|
||||
|
||||
// Creation support
|
||||
nmethod(methodOop method,
|
||||
int nmethod_size,
|
||||
int compile_id,
|
||||
int entry_bci,
|
||||
CodeOffsets* offsets,
|
||||
int orig_pc_offset,
|
||||
DebugInformationRecorder *recorder,
|
||||
Dependencies* dependencies,
|
||||
CodeBuffer *code_buffer,
|
||||
int frame_size,
|
||||
OopMapSet* oop_maps,
|
||||
ExceptionHandlerTable* handler_table,
|
||||
ImplicitExceptionTable* nul_chk_table,
|
||||
AbstractCompiler* compiler,
|
||||
int comp_level);
|
||||
|
||||
// helper methods
|
||||
void* operator new(size_t size, int nmethod_size);
|
||||
void check_store();
|
||||
|
||||
const char* reloc_string_for(u_char* begin, u_char* end);
|
||||
void make_not_entrant_or_zombie(int state);
|
||||
void inc_decompile_count();
|
||||
|
||||
// used to check that writes to nmFlags are done consistently.
|
||||
static void check_safepoint() PRODUCT_RETURN;
|
||||
|
||||
// Used to manipulate the exception cache
|
||||
void add_exception_cache_entry(ExceptionCache* new_entry);
|
||||
ExceptionCache* exception_cache_entry_for_exception(Handle exception);
|
||||
|
||||
// Inform external interfaces that a compiled method has been unloaded
|
||||
inline void post_compiled_method_unload();
|
||||
|
||||
public:
|
||||
// create nmethod with entry_bci
|
||||
static nmethod* new_nmethod(methodHandle method,
|
||||
int compile_id,
|
||||
int entry_bci,
|
||||
CodeOffsets* offsets,
|
||||
int orig_pc_offset,
|
||||
DebugInformationRecorder* recorder,
|
||||
Dependencies* dependencies,
|
||||
CodeBuffer *code_buffer,
|
||||
int frame_size,
|
||||
OopMapSet* oop_maps,
|
||||
ExceptionHandlerTable* handler_table,
|
||||
ImplicitExceptionTable* nul_chk_table,
|
||||
AbstractCompiler* compiler,
|
||||
int comp_level);
|
||||
|
||||
static nmethod* new_native_nmethod(methodHandle method,
|
||||
CodeBuffer *code_buffer,
|
||||
int vep_offset,
|
||||
int frame_complete,
|
||||
int frame_size,
|
||||
ByteSize receiver_sp_offset,
|
||||
ByteSize basic_lock_sp_offset,
|
||||
OopMapSet* oop_maps);
|
||||
|
||||
// accessors
|
||||
methodOop method() const { return _method; }
|
||||
AbstractCompiler* compiler() const { return _compiler; }
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool has_debug_info() const { return _has_debug_info; }
|
||||
void set_has_debug_info(bool f) { _has_debug_info = false; }
|
||||
#endif // NOT PRODUCT
|
||||
|
||||
// type info
|
||||
bool is_nmethod() const { return true; }
|
||||
bool is_java_method() const { return !method()->is_native(); }
|
||||
bool is_native_method() const { return method()->is_native(); }
|
||||
bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
|
||||
bool is_osr_only_method() const { return is_osr_method(); }
|
||||
|
||||
bool is_compiled_by_c1() const;
|
||||
bool is_compiled_by_c2() const;
|
||||
|
||||
// boundaries for different parts
|
||||
address code_begin () const { return _entry_point; }
|
||||
address code_end () const { return header_begin() + _stub_offset ; }
|
||||
address exception_begin () const { return header_begin() + _exception_offset ; }
|
||||
address deopt_handler_begin() const { return header_begin() + _deoptimize_offset ; }
|
||||
address stub_begin () const { return header_begin() + _stub_offset ; }
|
||||
address stub_end () const { return header_begin() + _consts_offset ; }
|
||||
address consts_begin () const { return header_begin() + _consts_offset ; }
|
||||
address consts_end () const { return header_begin() + _scopes_data_offset ; }
|
||||
address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
|
||||
address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
|
||||
PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
|
||||
PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset); }
|
||||
address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
|
||||
address dependencies_end () const { return header_begin() + _handler_table_offset ; }
|
||||
address handler_table_begin() const { return header_begin() + _handler_table_offset ; }
|
||||
address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
|
||||
address nul_chk_table_begin() const { return header_begin() + _nul_chk_table_offset ; }
|
||||
address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
|
||||
|
||||
int code_size () const { return code_end () - code_begin (); }
|
||||
int stub_size () const { return stub_end () - stub_begin (); }
|
||||
int consts_size () const { return consts_end () - consts_begin (); }
|
||||
int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
|
||||
int scopes_pcs_size () const { return (intptr_t)scopes_pcs_end () - (intptr_t)scopes_pcs_begin (); }
|
||||
int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
|
||||
int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
|
||||
int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
|
||||
|
||||
int total_size () const;
|
||||
|
||||
bool code_contains (address addr) const { return code_begin () <= addr && addr < code_end (); }
|
||||
bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
|
||||
bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
|
||||
bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
|
||||
bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
|
||||
bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
|
||||
bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
|
||||
|
||||
// entry points
|
||||
address entry_point() const { return _entry_point; } // normal entry point
|
||||
address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
|
||||
|
||||
// flag accessing and manipulation
|
||||
bool is_in_use() const { return flags.state == alive; }
|
||||
bool is_alive() const { return flags.state == alive || flags.state == not_entrant; }
|
||||
bool is_not_entrant() const { return flags.state == not_entrant; }
|
||||
bool is_zombie() const { return flags.state == zombie; }
|
||||
bool is_unloaded() const { return flags.state == unloaded; }
|
||||
|
||||
// Make the nmethod non entrant. The nmethod will continue to be alive.
|
||||
// It is used when an uncommon trap happens.
|
||||
void make_not_entrant() { make_not_entrant_or_zombie(not_entrant); }
|
||||
void make_zombie() { make_not_entrant_or_zombie(zombie); }
|
||||
|
||||
// used by jvmti to track if the unload event has been reported
|
||||
bool unload_reported() { return _unload_reported; }
|
||||
void set_unload_reported() { _unload_reported = true; }
|
||||
|
||||
bool is_marked_for_deoptimization() const { return _markedForDeoptimization; }
|
||||
void mark_for_deoptimization() { _markedForDeoptimization = true; }
|
||||
|
||||
void make_unloaded(BoolObjectClosure* is_alive, oop cause);
|
||||
|
||||
bool has_dependencies() { return dependencies_size() != 0; }
|
||||
void flush_dependencies(BoolObjectClosure* is_alive);
|
||||
bool has_flushed_dependencies() { return flags.hasFlushedDependencies; }
|
||||
void set_has_flushed_dependencies() {
|
||||
check_safepoint();
|
||||
assert(!has_flushed_dependencies(), "should only happen once");
|
||||
flags.hasFlushedDependencies = 1;
|
||||
}
|
||||
|
||||
bool is_marked_for_reclamation() const { return flags.markedForReclamation; }
|
||||
void mark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 1; }
|
||||
void unmark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 0; }
|
||||
|
||||
bool has_unsafe_access() const { return flags.has_unsafe_access; }
|
||||
void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; }
|
||||
|
||||
int level() const { return flags.level; }
|
||||
void set_level(int newLevel) { check_safepoint(); flags.level = newLevel; }
|
||||
|
||||
int comp_level() const { return _comp_level; }
|
||||
|
||||
int version() const { return flags.version; }
|
||||
void set_version(int v);
|
||||
|
||||
// Sweeper support
|
||||
long stack_traversal_mark() { return _stack_traversal_mark; }
|
||||
void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; }
|
||||
|
||||
// Exception cache support
|
||||
ExceptionCache* exception_cache() const { return _exception_cache; }
|
||||
void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
|
||||
address handler_for_exception_and_pc(Handle exception, address pc);
|
||||
void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
|
||||
void remove_from_exception_cache(ExceptionCache* ec);
|
||||
|
||||
// implicit exceptions support
|
||||
address continuation_for_implicit_exception(address pc);
|
||||
|
||||
// On-stack replacement support
|
||||
int osr_entry_bci() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _entry_bci; }
|
||||
address osr_entry() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _osr_entry_point; }
|
||||
void invalidate_osr_method();
|
||||
nmethod* link() const { return _link; }
|
||||
void set_link(nmethod *n) { _link = n; }
|
||||
|
||||
// tells whether frames described by this nmethod can be deoptimized
|
||||
// note: native wrappers cannot be deoptimized.
|
||||
bool can_be_deoptimized() const { return is_java_method(); }
|
||||
|
||||
// Inline cache support
|
||||
void clear_inline_caches();
|
||||
void cleanup_inline_caches();
|
||||
bool inlinecache_check_contains(address addr) const {
|
||||
return (addr >= instructions_begin() && addr < verified_entry_point());
|
||||
}
|
||||
|
||||
// unlink and deallocate this nmethod
|
||||
// Only NMethodSweeper class is expected to use this. NMethodSweeper is not
|
||||
// expected to use any other private methods/data in this class.
|
||||
|
||||
protected:
|
||||
void flush();
|
||||
|
||||
public:
|
||||
// If returning true, it is unsafe to remove this nmethod even though it is a zombie
|
||||
// nmethod, since the VM might have a reference to it. Should only be called from a safepoint.
|
||||
bool is_locked_by_vm() const { return _lock_count >0; }
|
||||
|
||||
// See comment at definition of _last_seen_on_stack
|
||||
void mark_as_seen_on_stack();
|
||||
bool can_not_entrant_be_converted();
|
||||
|
||||
// Evolution support. We make old (discarded) compiled methods point to new methodOops.
|
||||
void set_method(methodOop method) { _method = method; }
|
||||
|
||||
// GC support
|
||||
void do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive,
|
||||
bool unloading_occurred);
|
||||
bool can_unload(BoolObjectClosure* is_alive, OopClosure* keep_alive,
|
||||
oop* root, bool unloading_occurred);
|
||||
|
||||
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
|
||||
OopClosure* f);
|
||||
void oops_do(OopClosure* f);
|
||||
|
||||
// ScopeDesc for an instruction
|
||||
ScopeDesc* scope_desc_at(address pc);
|
||||
|
||||
private:
|
||||
ScopeDesc* scope_desc_in(address begin, address end);
|
||||
|
||||
address* orig_pc_addr(const frame* fr ) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
|
||||
|
||||
PcDesc* find_pc_desc_internal(address pc, bool approximate);
|
||||
|
||||
PcDesc* find_pc_desc(address pc, bool approximate) {
|
||||
PcDesc* desc = _pc_desc_cache.last_pc_desc();
|
||||
if (desc != NULL && desc->pc_offset() == pc - instructions_begin()) {
|
||||
return desc;
|
||||
}
|
||||
return find_pc_desc_internal(pc, approximate);
|
||||
}
|
||||
|
||||
public:
|
||||
// ScopeDesc retrieval operation
|
||||
PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
|
||||
// pc_desc_near returns the first PcDesc at or after the givne pc.
|
||||
PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
|
||||
|
||||
public:
|
||||
// copying of debugging information
|
||||
void copy_scopes_pcs(PcDesc* pcs, int count);
|
||||
void copy_scopes_data(address buffer, int size);
|
||||
|
||||
// deopt
|
||||
// return true is the pc is one would expect if the frame is being deopted.
|
||||
bool is_deopt_pc(address pc);
|
||||
// Accessor/mutator for the original pc of a frame before a frame was deopted.
|
||||
address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
|
||||
void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
|
||||
|
||||
// jvmti support:
|
||||
void post_compiled_method_load_event();
|
||||
|
||||
// verify operations
|
||||
void verify();
|
||||
void verify_scopes();
|
||||
void verify_interrupt_point(address interrupt_point);
|
||||
|
||||
// printing support
|
||||
void print() const PRODUCT_RETURN;
|
||||
void print_code() PRODUCT_RETURN;
|
||||
void print_relocations() PRODUCT_RETURN;
|
||||
void print_pcs() PRODUCT_RETURN;
|
||||
void print_scopes() PRODUCT_RETURN;
|
||||
void print_dependencies() PRODUCT_RETURN;
|
||||
void print_value_on(outputStream* st) const PRODUCT_RETURN;
|
||||
void print_calls(outputStream* st) PRODUCT_RETURN;
|
||||
void print_handler_table() PRODUCT_RETURN;
|
||||
void print_nul_chk_table() PRODUCT_RETURN;
|
||||
void print_nmethod(bool print_code) PRODUCT_RETURN;
|
||||
|
||||
void print_on(outputStream* st, const char* title) const;
|
||||
|
||||
// Logging
|
||||
void log_identity(xmlStream* log) const;
|
||||
void log_new_nmethod() const;
|
||||
void log_state_change(int state) const;
|
||||
|
||||
// Prints a comment for one native instruction (reloc info, pc desc)
|
||||
void print_code_comment_on(outputStream* st, int column, address begin, address end) PRODUCT_RETURN;
|
||||
static void print_statistics() PRODUCT_RETURN;
|
||||
|
||||
// Compiler task identification. Note that all OSR methods
|
||||
// are numbered in an independent sequence if CICountOSR is true,
|
||||
// and native method wrappers are also numbered independently if
|
||||
// CICountNative is true.
|
||||
int compile_id() const { return _compile_id; }
|
||||
const char* compile_kind() const;
|
||||
|
||||
// For debugging
|
||||
// CompiledIC* IC_at(char* p) const;
|
||||
// PrimitiveIC* primitiveIC_at(char* p) const;
|
||||
oop embeddedOop_at(address p);
|
||||
|
||||
// tells if any of this method's dependencies have been invalidated
|
||||
// (this is expensive!)
|
||||
bool check_all_dependencies();
|
||||
|
||||
// tells if this compiled method is dependent on the given changes,
|
||||
// and the changes have invalidated it
|
||||
bool check_dependency_on(DepChange& changes);
|
||||
|
||||
// Evolution support. Tells if this compiled method is dependent on any of
|
||||
// methods m() of class dependee, such that if m() in dependee is replaced,
|
||||
// this compiled method will have to be deoptimized.
|
||||
bool is_evol_dependent_on(klassOop dependee);
|
||||
|
||||
// Fast breakpoint support. Tells if this compiled method is
|
||||
// dependent on the given method. Returns true if this nmethod
|
||||
// corresponds to the given method as well.
|
||||
bool is_dependent_on_method(methodOop dependee);
|
||||
|
||||
// is it ok to patch at address?
|
||||
bool is_patchable_at(address instr_address);
|
||||
|
||||
// UseBiasedLocking support
|
||||
ByteSize compiled_synchronized_native_basic_lock_owner_sp_offset() {
|
||||
return _compiled_synchronized_native_basic_lock_owner_sp_offset;
|
||||
}
|
||||
ByteSize compiled_synchronized_native_basic_lock_sp_offset() {
|
||||
return _compiled_synchronized_native_basic_lock_sp_offset;
|
||||
}
|
||||
|
||||
// support for code generation
|
||||
static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); }
|
||||
static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); }
|
||||
static int entry_bci_offset() { return offset_of(nmethod, _entry_bci); }
|
||||
|
||||
};
|
||||
|
||||
// Locks an nmethod so its code will not get removed, even if it is a zombie/not_entrant method
|
||||
class nmethodLocker : public StackObj {
|
||||
nmethod* _nm;
|
||||
|
||||
static void lock_nmethod(nmethod* nm); // note: nm can be NULL
|
||||
static void unlock_nmethod(nmethod* nm); // (ditto)
|
||||
|
||||
public:
|
||||
nmethodLocker(address pc); // derive nm from pc
|
||||
nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
|
||||
nmethodLocker() { _nm = NULL; }
|
||||
~nmethodLocker() { unlock_nmethod(_nm); }
|
||||
|
||||
nmethod* code() { return _nm; }
|
||||
void set_code(nmethod* new_nm) {
|
||||
unlock_nmethod(_nm); // note: This works even if _nm==new_nm.
|
||||
_nm = new_nm;
|
||||
lock_nmethod(_nm);
|
||||
}
|
||||
};
|
156
hotspot/src/share/vm/code/oopRecorder.cpp
Normal file
156
hotspot/src/share/vm/code/oopRecorder.cpp
Normal file
|
@ -0,0 +1,156 @@
|
|||
/*
|
||||
* Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
# include "incls/_precompiled.incl"
|
||||
# include "incls/_oopRecorder.cpp.incl"
|
||||
|
||||
#ifdef ASSERT
|
||||
int OopRecorder::_find_index_calls = 0;
|
||||
int OopRecorder::_hit_indexes = 0;
|
||||
int OopRecorder::_missed_indexes = 0;
|
||||
#endif //ASSERT
|
||||
|
||||
|
||||
OopRecorder::OopRecorder(Arena* arena) {
|
||||
_handles = NULL;
|
||||
_indexes = NULL;
|
||||
_arena = arena;
|
||||
_complete = false;
|
||||
}
|
||||
|
||||
OopRecorder::IndexCache::IndexCache() {
|
||||
assert(first_index > 0, "initial zero state of cache must be invalid index");
|
||||
Copy::zero_to_bytes(&_cache[0], sizeof(_cache));
|
||||
}
|
||||
|
||||
int OopRecorder::oop_size() {
|
||||
_complete = true;
|
||||
if (_handles == NULL) return 0;
|
||||
return _handles->length() * sizeof(oop);
|
||||
}
|
||||
|
||||
void OopRecorder::copy_to(CodeBlob* code) {
|
||||
assert(_complete, "must be frozen");
|
||||
maybe_initialize(); // get non-null handles, even if we have no oops
|
||||
code->copy_oops(_handles);
|
||||
}
|
||||
|
||||
void OopRecorder::maybe_initialize() {
|
||||
if (_handles == NULL) {
|
||||
if (_arena != NULL) {
|
||||
_handles = new(_arena) GrowableArray<jobject>(_arena, 10, 0, 0);
|
||||
_no_finds = new(_arena) GrowableArray<int>( _arena, 10, 0, 0);
|
||||
} else {
|
||||
_handles = new GrowableArray<jobject>(10, 0, 0);
|
||||
_no_finds = new GrowableArray<int>( 10, 0, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
jobject OopRecorder::handle_at(int index) {
|
||||
// there is always a NULL virtually present as first object
|
||||
if (index == null_index) return NULL;
|
||||
return _handles->at(index - first_index);
|
||||
}
|
||||
|
||||
|
||||
// Local definition. Used only in this module.
|
||||
inline bool OopRecorder::is_real_jobject(jobject h) {
|
||||
return h != NULL && h != (jobject)Universe::non_oop_word();
|
||||
}
|
||||
|
||||
|
||||
int OopRecorder::add_handle(jobject h, bool make_findable) {
|
||||
assert(!_complete, "cannot allocate more elements after size query");
|
||||
maybe_initialize();
|
||||
// indexing uses 1 as an origin--0 means null
|
||||
int index = _handles->length() + first_index;
|
||||
_handles->append(h);
|
||||
|
||||
// Support correct operation of find_index().
|
||||
assert(!(make_findable && !is_real_jobject(h)), "nulls are not findable");
|
||||
if (make_findable) {
|
||||
// This index may be returned from find_index().
|
||||
if (_indexes != NULL) {
|
||||
int* cloc = _indexes->cache_location(h);
|
||||
_indexes->set_cache_location_index(cloc, index);
|
||||
} else if (index == index_cache_threshold && _arena != NULL) {
|
||||
_indexes = new(_arena) IndexCache();
|
||||
for (int i = 0; i < _handles->length(); i++) {
|
||||
// Load the cache with pre-existing elements.
|
||||
int index0 = i + first_index;
|
||||
if (_no_finds->contains(index0)) continue;
|
||||
int* cloc = _indexes->cache_location(_handles->at(i));
|
||||
_indexes->set_cache_location_index(cloc, index0);
|
||||
}
|
||||
}
|
||||
} else if (is_real_jobject(h)) {
|
||||
// Remember that this index is not to be returned from find_index().
|
||||
// This case is rare, because most or all uses of allocate_index pass
|
||||
// a jobject argument of NULL or Universe::non_oop_word.
|
||||
// Thus, the expected length of _no_finds is zero.
|
||||
_no_finds->append(index);
|
||||
}
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
|
||||
int OopRecorder::maybe_find_index(jobject h) {
|
||||
debug_only(_find_index_calls++);
|
||||
assert(!_complete, "cannot allocate more elements after size query");
|
||||
maybe_initialize();
|
||||
if (h == NULL) return null_index;
|
||||
assert(is_real_jobject(h), "must be valid jobject");
|
||||
int* cloc = (_indexes == NULL)? NULL: _indexes->cache_location(h);
|
||||
if (cloc != NULL) {
|
||||
int cindex = _indexes->cache_location_index(cloc);
|
||||
if (cindex == 0) {
|
||||
return -1; // We know this handle is completely new.
|
||||
}
|
||||
if (cindex >= first_index && _handles->at(cindex - first_index) == h) {
|
||||
debug_only(_hit_indexes++);
|
||||
return cindex;
|
||||
}
|
||||
if (!_indexes->cache_location_collision(cloc)) {
|
||||
return -1; // We know the current cache occupant is unique to that cloc.
|
||||
}
|
||||
}
|
||||
|
||||
// Not found in cache, due to a cache collision. (Or, no cache at all.)
|
||||
// Do a linear search, most recent to oldest.
|
||||
for (int i = _handles->length() - 1; i >= 0; i--) {
|
||||
if (_handles->at(i) == h) {
|
||||
int findex = i + first_index;
|
||||
if (_no_finds->contains(findex)) continue; // oops; skip this one
|
||||
if (cloc != NULL) {
|
||||
_indexes->set_cache_location_index(cloc, findex);
|
||||
}
|
||||
debug_only(_missed_indexes++);
|
||||
return findex;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
136
hotspot/src/share/vm/code/oopRecorder.hpp
Normal file
136
hotspot/src/share/vm/code/oopRecorder.hpp
Normal file
|
@ -0,0 +1,136 @@
|
|||
/*
|
||||
* Copyright 1998-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// Recording and retrieval of oop relocations in compiled code.
|
||||
|
||||
class CodeBlob;
|
||||
|
||||
class OopRecorder : public ResourceObj {
|
||||
public:
|
||||
// A two-way mapping from positive indexes to oop handles.
|
||||
// The zero index is reserved for a constant (sharable) null.
|
||||
// Indexes may not be negative.
|
||||
|
||||
// Use the given arena to manage storage, if not NULL.
|
||||
// By default, uses the current ResourceArea.
|
||||
OopRecorder(Arena* arena = NULL);
|
||||
|
||||
// Generate a new index on which CodeBlob::oop_addr_at will work.
|
||||
// allocate_index and find_index never return the same index,
|
||||
// and allocate_index never returns the same index twice.
|
||||
// In fact, two successive calls to allocate_index return successive ints.
|
||||
int allocate_index(jobject h) {
|
||||
return add_handle(h, false);
|
||||
}
|
||||
|
||||
// For a given jobject, this will return the same index repeatedly.
|
||||
// The index can later be given to oop_at to retrieve the oop.
|
||||
// However, the oop must not be changed via CodeBlob::oop_addr_at.
|
||||
int find_index(jobject h) {
|
||||
int index = maybe_find_index(h);
|
||||
if (index < 0) { // previously unallocated
|
||||
index = add_handle(h, true);
|
||||
}
|
||||
return index;
|
||||
}
|
||||
|
||||
// variant of find_index which does not allocate if not found (yields -1)
|
||||
int maybe_find_index(jobject h);
|
||||
|
||||
// returns the size of the generated oop table, for sizing the CodeBlob.
|
||||
// must be called after all oops are allocated!
|
||||
int oop_size();
|
||||
|
||||
// Retrieve the oop handle at a given index.
|
||||
jobject handle_at(int index);
|
||||
|
||||
int element_count() {
|
||||
// there is always a NULL virtually present as first object
|
||||
return _handles->length() + first_index;
|
||||
}
|
||||
|
||||
// copy the generated oop table to CodeBlob
|
||||
void copy_to(CodeBlob* code); // => code->copy_oops(_handles)
|
||||
|
||||
bool is_unused() { return _handles == NULL && !_complete; }
|
||||
#ifdef ASSERT
|
||||
bool is_complete() { return _complete; }
|
||||
#endif
|
||||
|
||||
private:
|
||||
// leaky hash table of handle => index, to help detect duplicate insertion
|
||||
class IndexCache: public ResourceObj {
|
||||
// This class is only used by the OopRecorder class.
|
||||
friend class OopRecorder;
|
||||
enum {
|
||||
_log_cache_size = 9,
|
||||
_cache_size = (1<<_log_cache_size),
|
||||
// Index entries are ints. The LSBit is a collision indicator.
|
||||
_collision_bit_shift = 0,
|
||||
_collision_bit = 1,
|
||||
_index_shift = _collision_bit_shift+1
|
||||
};
|
||||
int _cache[_cache_size];
|
||||
static juint cache_index(jobject handle) {
|
||||
juint ci = (int) (intptr_t) handle;
|
||||
ci ^= ci >> (BitsPerByte*2);
|
||||
ci += ci >> (BitsPerByte*1);
|
||||
return ci & (_cache_size-1);
|
||||
}
|
||||
int* cache_location(jobject handle) {
|
||||
return &_cache[ cache_index(handle) ];
|
||||
}
|
||||
static bool cache_location_collision(int* cloc) {
|
||||
return ((*cloc) & _collision_bit) != 0;
|
||||
}
|
||||
static int cache_location_index(int* cloc) {
|
||||
return (*cloc) >> _index_shift;
|
||||
}
|
||||
static void set_cache_location_index(int* cloc, int index) {
|
||||
int cval0 = (*cloc);
|
||||
int cval1 = (index << _index_shift);
|
||||
if (cval0 != 0 && cval1 != cval0) cval1 += _collision_bit;
|
||||
(*cloc) = cval1;
|
||||
}
|
||||
IndexCache();
|
||||
};
|
||||
|
||||
// Helper function; returns false for NULL or Universe::non_oop_word().
|
||||
inline bool is_real_jobject(jobject h);
|
||||
|
||||
void maybe_initialize();
|
||||
int add_handle(jobject h, bool make_findable);
|
||||
|
||||
enum { null_index = 0, first_index = 1, index_cache_threshold = 20 };
|
||||
|
||||
GrowableArray<jobject>* _handles; // ordered list (first is always NULL)
|
||||
GrowableArray<int>* _no_finds; // all unfindable indexes; usually empty
|
||||
IndexCache* _indexes; // map: jobject -> its probable index
|
||||
Arena* _arena;
|
||||
bool _complete;
|
||||
|
||||
#ifdef ASSERT
|
||||
static int _find_index_calls, _hit_indexes, _missed_indexes;
|
||||
#endif
|
||||
};
|
61
hotspot/src/share/vm/code/pcDesc.cpp
Normal file
61
hotspot/src/share/vm/code/pcDesc.cpp
Normal file
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
# include "incls/_precompiled.incl"
|
||||
# include "incls/_pcDesc.cpp.incl"
|
||||
|
||||
PcDesc::PcDesc(int pc_offset, int scope_decode_offset, int obj_decode_offset) {
|
||||
_pc_offset = pc_offset;
|
||||
_scope_decode_offset = scope_decode_offset;
|
||||
_obj_decode_offset = obj_decode_offset;
|
||||
}
|
||||
|
||||
address PcDesc::real_pc(const nmethod* code) const {
|
||||
return code->instructions_begin() + pc_offset();
|
||||
}
|
||||
|
||||
void PcDesc::print(nmethod* code) {
|
||||
#ifndef PRODUCT
|
||||
ResourceMark rm;
|
||||
tty->print_cr("PcDesc(pc=0x%lx offset=%x):", real_pc(code), pc_offset());
|
||||
|
||||
if (scope_decode_offset() == DebugInformationRecorder::serialized_null) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (ScopeDesc* sd = code->scope_desc_at(real_pc(code));
|
||||
sd != NULL;
|
||||
sd = sd->sender()) {
|
||||
tty->print(" ");
|
||||
sd->method()->print_short_name(tty);
|
||||
tty->print(" @%d", sd->bci());
|
||||
tty->cr();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool PcDesc::verify(nmethod* code) {
|
||||
//Unimplemented();
|
||||
return true;
|
||||
}
|
61
hotspot/src/share/vm/code/pcDesc.hpp
Normal file
61
hotspot/src/share/vm/code/pcDesc.hpp
Normal file
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// PcDescs map a physical PC (given as offset from start of nmethod) to
|
||||
// the corresponding source scope and byte code index.
|
||||
|
||||
class nmethod;
|
||||
|
||||
class PcDesc VALUE_OBJ_CLASS_SPEC {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
int _pc_offset; // offset from start of nmethod
|
||||
int _scope_decode_offset; // offset for scope in nmethod
|
||||
int _obj_decode_offset;
|
||||
|
||||
public:
|
||||
int pc_offset() const { return _pc_offset; }
|
||||
int scope_decode_offset() const { return _scope_decode_offset; }
|
||||
int obj_decode_offset() const { return _obj_decode_offset; }
|
||||
|
||||
void set_pc_offset(int x) { _pc_offset = x; }
|
||||
void set_scope_decode_offset(int x) { _scope_decode_offset = x; }
|
||||
void set_obj_decode_offset(int x) { _obj_decode_offset = x; }
|
||||
|
||||
// Constructor (only used for static in nmethod.cpp)
|
||||
// Also used by ScopeDesc::sender()]
|
||||
PcDesc(int pc_offset, int scope_decode_offset, int obj_decode_offset);
|
||||
|
||||
enum {
|
||||
// upper and lower exclusive limits real offsets:
|
||||
lower_offset_limit = -1,
|
||||
upper_offset_limit = (unsigned int)-1 >> 1
|
||||
};
|
||||
|
||||
// Returns the real pc
|
||||
address real_pc(const nmethod* code) const;
|
||||
|
||||
void print(nmethod* code);
|
||||
bool verify(nmethod* code);
|
||||
};
|
1188
hotspot/src/share/vm/code/relocInfo.cpp
Normal file
1188
hotspot/src/share/vm/code/relocInfo.cpp
Normal file
File diff suppressed because it is too large
Load diff
1328
hotspot/src/share/vm/code/relocInfo.hpp
Normal file
1328
hotspot/src/share/vm/code/relocInfo.hpp
Normal file
File diff suppressed because it is too large
Load diff
237
hotspot/src/share/vm/code/scopeDesc.cpp
Normal file
237
hotspot/src/share/vm/code/scopeDesc.cpp
Normal file
|
@ -0,0 +1,237 @@
|
|||
/*
|
||||
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
# include "incls/_precompiled.incl"
|
||||
# include "incls/_scopeDesc.cpp.incl"
|
||||
|
||||
|
||||
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset) {
|
||||
_code = code;
|
||||
_decode_offset = decode_offset;
|
||||
_objects = decode_object_values(obj_decode_offset);
|
||||
decode_body();
|
||||
}
|
||||
|
||||
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset) {
|
||||
_code = code;
|
||||
_decode_offset = decode_offset;
|
||||
_objects = decode_object_values(DebugInformationRecorder::serialized_null);
|
||||
decode_body();
|
||||
}
|
||||
|
||||
|
||||
ScopeDesc::ScopeDesc(const ScopeDesc* parent) {
|
||||
_code = parent->_code;
|
||||
_decode_offset = parent->_sender_decode_offset;
|
||||
_objects = parent->_objects;
|
||||
decode_body();
|
||||
}
|
||||
|
||||
|
||||
void ScopeDesc::decode_body() {
|
||||
if (decode_offset() == DebugInformationRecorder::serialized_null) {
|
||||
// This is a sentinel record, which is only relevant to
|
||||
// approximate queries. Decode a reasonable frame.
|
||||
_sender_decode_offset = DebugInformationRecorder::serialized_null;
|
||||
_method = methodHandle(_code->method());
|
||||
_bci = InvocationEntryBci;
|
||||
_locals_decode_offset = DebugInformationRecorder::serialized_null;
|
||||
_expressions_decode_offset = DebugInformationRecorder::serialized_null;
|
||||
_monitors_decode_offset = DebugInformationRecorder::serialized_null;
|
||||
} else {
|
||||
// decode header
|
||||
DebugInfoReadStream* stream = stream_at(decode_offset());
|
||||
|
||||
_sender_decode_offset = stream->read_int();
|
||||
_method = methodHandle((methodOop) stream->read_oop());
|
||||
_bci = stream->read_bci();
|
||||
// decode offsets for body and sender
|
||||
_locals_decode_offset = stream->read_int();
|
||||
_expressions_decode_offset = stream->read_int();
|
||||
_monitors_decode_offset = stream->read_int();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
GrowableArray<ScopeValue*>* ScopeDesc::decode_scope_values(int decode_offset) {
|
||||
if (decode_offset == DebugInformationRecorder::serialized_null) return NULL;
|
||||
DebugInfoReadStream* stream = stream_at(decode_offset);
|
||||
int length = stream->read_int();
|
||||
GrowableArray<ScopeValue*>* result = new GrowableArray<ScopeValue*> (length);
|
||||
for (int index = 0; index < length; index++) {
|
||||
result->push(ScopeValue::read_from(stream));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
GrowableArray<ScopeValue*>* ScopeDesc::decode_object_values(int decode_offset) {
|
||||
if (decode_offset == DebugInformationRecorder::serialized_null) return NULL;
|
||||
GrowableArray<ScopeValue*>* result = new GrowableArray<ScopeValue*>();
|
||||
DebugInfoReadStream* stream = new DebugInfoReadStream(_code, decode_offset, result);
|
||||
int length = stream->read_int();
|
||||
for (int index = 0; index < length; index++) {
|
||||
result->push(ScopeValue::read_from(stream));
|
||||
}
|
||||
assert(result->length() == length, "inconsistent debug information");
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
GrowableArray<MonitorValue*>* ScopeDesc::decode_monitor_values(int decode_offset) {
|
||||
if (decode_offset == DebugInformationRecorder::serialized_null) return NULL;
|
||||
DebugInfoReadStream* stream = stream_at(decode_offset);
|
||||
int length = stream->read_int();
|
||||
GrowableArray<MonitorValue*>* result = new GrowableArray<MonitorValue*> (length);
|
||||
for (int index = 0; index < length; index++) {
|
||||
result->push(new MonitorValue(stream));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
DebugInfoReadStream* ScopeDesc::stream_at(int decode_offset) const {
|
||||
return new DebugInfoReadStream(_code, decode_offset, _objects);
|
||||
}
|
||||
|
||||
GrowableArray<ScopeValue*>* ScopeDesc::locals() {
|
||||
return decode_scope_values(_locals_decode_offset);
|
||||
}
|
||||
|
||||
GrowableArray<ScopeValue*>* ScopeDesc::expressions() {
|
||||
return decode_scope_values(_expressions_decode_offset);
|
||||
}
|
||||
|
||||
GrowableArray<MonitorValue*>* ScopeDesc::monitors() {
|
||||
return decode_monitor_values(_monitors_decode_offset);
|
||||
}
|
||||
|
||||
GrowableArray<ScopeValue*>* ScopeDesc::objects() {
|
||||
return _objects;
|
||||
}
|
||||
|
||||
bool ScopeDesc::is_top() const {
|
||||
return _sender_decode_offset == DebugInformationRecorder::serialized_null;
|
||||
}
|
||||
|
||||
ScopeDesc* ScopeDesc::sender() const {
|
||||
if (is_top()) return NULL;
|
||||
return new ScopeDesc(this);
|
||||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
void ScopeDesc::print_value_on(outputStream* st) const {
|
||||
tty->print(" ");
|
||||
method()()->print_short_name(st);
|
||||
int lineno = method()->line_number_from_bci(bci());
|
||||
if (lineno != -1) {
|
||||
st->print_cr("@%d (line %d)", bci(), lineno);
|
||||
} else {
|
||||
st->print_cr("@%d", bci());
|
||||
}
|
||||
}
|
||||
|
||||
void ScopeDesc::print_on(outputStream* st) const {
|
||||
print_on(st, NULL);
|
||||
}
|
||||
|
||||
void ScopeDesc::print_on(outputStream* st, PcDesc* pd) const {
|
||||
// header
|
||||
if (pd != NULL) {
|
||||
tty->print_cr("ScopeDesc(pc=" PTR_FORMAT " offset=%x):", pd->real_pc(_code), pd->pc_offset());
|
||||
}
|
||||
|
||||
print_value_on(st);
|
||||
// decode offsets
|
||||
if (WizardMode) {
|
||||
st->print("ScopeDesc[%d]@" PTR_FORMAT " ", _decode_offset, _code->instructions_begin());
|
||||
st->print_cr(" offset: %d", _decode_offset);
|
||||
st->print_cr(" bci: %d", bci());
|
||||
st->print_cr(" locals: %d", _locals_decode_offset);
|
||||
st->print_cr(" stack: %d", _expressions_decode_offset);
|
||||
st->print_cr(" monitor: %d", _monitors_decode_offset);
|
||||
st->print_cr(" sender: %d", _sender_decode_offset);
|
||||
}
|
||||
// locals
|
||||
{ GrowableArray<ScopeValue*>* l = ((ScopeDesc*) this)->locals();
|
||||
if (l != NULL) {
|
||||
tty->print_cr(" Locals");
|
||||
for (int index = 0; index < l->length(); index++) {
|
||||
st->print(" - l%d: ", index);
|
||||
l->at(index)->print_on(st);
|
||||
st->cr();
|
||||
}
|
||||
}
|
||||
}
|
||||
// expressions
|
||||
{ GrowableArray<ScopeValue*>* l = ((ScopeDesc*) this)->expressions();
|
||||
if (l != NULL) {
|
||||
st->print_cr(" Expression stack");
|
||||
for (int index = 0; index < l->length(); index++) {
|
||||
st->print(" - @%d: ", index);
|
||||
l->at(index)->print_on(st);
|
||||
st->cr();
|
||||
}
|
||||
}
|
||||
}
|
||||
// monitors
|
||||
{ GrowableArray<MonitorValue*>* l = ((ScopeDesc*) this)->monitors();
|
||||
if (l != NULL) {
|
||||
st->print_cr(" Monitor stack");
|
||||
for (int index = 0; index < l->length(); index++) {
|
||||
st->print(" - @%d: ", index);
|
||||
l->at(index)->print_on(st);
|
||||
st->cr();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
if (DoEscapeAnalysis && is_top() && _objects != NULL) {
|
||||
tty->print_cr("Objects");
|
||||
for (int i = 0; i < _objects->length(); i++) {
|
||||
ObjectValue* sv = (ObjectValue*) _objects->at(i);
|
||||
tty->print(" - %d: ", sv->id());
|
||||
sv->print_fields_on(tty);
|
||||
tty->cr();
|
||||
}
|
||||
}
|
||||
#endif // COMPILER2
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void ScopeDesc::verify() {
|
||||
ResourceMark rm;
|
||||
guarantee(method()->is_method(), "type check");
|
||||
|
||||
// check if we have any illegal elements on the expression stack
|
||||
{ GrowableArray<ScopeValue*>* l = expressions();
|
||||
if (l != NULL) {
|
||||
for (int index = 0; index < l->length(); index++) {
|
||||
//guarantee(!l->at(index)->is_illegal(), "expression element cannot be illegal");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
123
hotspot/src/share/vm/code/scopeDesc.hpp
Normal file
123
hotspot/src/share/vm/code/scopeDesc.hpp
Normal file
|
@ -0,0 +1,123 @@
|
|||
/*
|
||||
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// SimpleScopeDesc is used when all you need to extract from
|
||||
// a given pc,nmethod pair is a methodOop and a bci. This is
|
||||
// quite a bit faster than allocating a full ScopeDesc, but
|
||||
// very limited in abilities.
|
||||
|
||||
class SimpleScopeDesc : public StackObj {
|
||||
private:
|
||||
methodOop _method;
|
||||
int _bci;
|
||||
|
||||
public:
|
||||
SimpleScopeDesc(nmethod* code,address pc) {
|
||||
PcDesc* pc_desc = code->pc_desc_at(pc);
|
||||
assert(pc_desc != NULL, "Must be able to find matching PcDesc");
|
||||
DebugInfoReadStream buffer(code, pc_desc->scope_decode_offset());
|
||||
int ignore_sender = buffer.read_int();
|
||||
_method = methodOop(buffer.read_oop());
|
||||
_bci = buffer.read_bci();
|
||||
}
|
||||
|
||||
methodOop method() { return _method; }
|
||||
int bci() { return _bci; }
|
||||
};
|
||||
|
||||
// ScopeDescs contain the information that makes source-level debugging of
|
||||
// nmethods possible; each scopeDesc describes a method activation
|
||||
|
||||
class ScopeDesc : public ResourceObj {
|
||||
public:
|
||||
// Constructor
|
||||
ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset);
|
||||
|
||||
// Calls above, giving default value of "serialized_null" to the
|
||||
// "obj_decode_offset" argument. (We don't use a default argument to
|
||||
// avoid a .hpp-.hpp dependency.)
|
||||
ScopeDesc(const nmethod* code, int decode_offset);
|
||||
|
||||
// JVM state
|
||||
methodHandle method() const { return _method; }
|
||||
int bci() const { return _bci; }
|
||||
|
||||
GrowableArray<ScopeValue*>* locals();
|
||||
GrowableArray<ScopeValue*>* expressions();
|
||||
GrowableArray<MonitorValue*>* monitors();
|
||||
GrowableArray<ScopeValue*>* objects();
|
||||
|
||||
// Stack walking, returns NULL if this is the outer most scope.
|
||||
ScopeDesc* sender() const;
|
||||
|
||||
// Returns where the scope was decoded
|
||||
int decode_offset() const { return _decode_offset; }
|
||||
|
||||
// Tells whether sender() returns NULL
|
||||
bool is_top() const;
|
||||
// Tells whether sd is equal to this
|
||||
bool is_equal(ScopeDesc* sd) const;
|
||||
|
||||
private:
|
||||
// Alternative constructor
|
||||
ScopeDesc(const ScopeDesc* parent);
|
||||
|
||||
// JVM state
|
||||
methodHandle _method;
|
||||
int _bci;
|
||||
|
||||
// Decoding offsets
|
||||
int _decode_offset;
|
||||
int _sender_decode_offset;
|
||||
int _locals_decode_offset;
|
||||
int _expressions_decode_offset;
|
||||
int _monitors_decode_offset;
|
||||
|
||||
// Object pool
|
||||
GrowableArray<ScopeValue*>* _objects;
|
||||
|
||||
// Nmethod information
|
||||
const nmethod* _code;
|
||||
|
||||
// Decoding operations
|
||||
void decode_body();
|
||||
GrowableArray<ScopeValue*>* decode_scope_values(int decode_offset);
|
||||
GrowableArray<MonitorValue*>* decode_monitor_values(int decode_offset);
|
||||
GrowableArray<ScopeValue*>* decode_object_values(int decode_offset);
|
||||
|
||||
DebugInfoReadStream* stream_at(int decode_offset) const;
|
||||
|
||||
|
||||
public:
|
||||
// Verification
|
||||
void verify();
|
||||
|
||||
#ifndef PRODUCT
|
||||
public:
|
||||
// Printing support
|
||||
void print_on(outputStream* st) const;
|
||||
void print_on(outputStream* st, PcDesc* pd) const;
|
||||
void print_value_on(outputStream* st) const;
|
||||
#endif
|
||||
};
|
254
hotspot/src/share/vm/code/stubs.cpp
Normal file
254
hotspot/src/share/vm/code/stubs.cpp
Normal file
|
@ -0,0 +1,254 @@
|
|||
/*
|
||||
* Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "incls/_precompiled.incl"
|
||||
#include "incls/_stubs.cpp.incl"
|
||||
|
||||
|
||||
// Implementation of StubQueue
|
||||
//
|
||||
// Standard wrap-around queue implementation; the queue dimensions
|
||||
// are specified by the _queue_begin & _queue_end indices. The queue
|
||||
// can be in two states (transparent to the outside):
|
||||
//
|
||||
// a) contiguous state: all queue entries in one block (or empty)
|
||||
//
|
||||
// Queue: |...|XXXXXXX|...............|
|
||||
// ^0 ^begin ^end ^size = limit
|
||||
// |_______|
|
||||
// one block
|
||||
//
|
||||
// b) non-contiguous state: queue entries in two blocks
|
||||
//
|
||||
// Queue: |XXX|.......|XXXXXXX|.......|
|
||||
// ^0 ^end ^begin ^limit ^size
|
||||
// |___| |_______|
|
||||
// 1st block 2nd block
|
||||
//
|
||||
// In the non-contiguous state, the wrap-around point is
|
||||
// indicated via the _buffer_limit index since the last
|
||||
// queue entry may not fill up the queue completely in
|
||||
// which case we need to know where the 2nd block's end
|
||||
// is to do the proper wrap-around. When removing the
|
||||
// last entry of the 2nd block, _buffer_limit is reset
|
||||
// to _buffer_size.
|
||||
//
|
||||
// CAUTION: DO NOT MESS WITH THIS CODE IF YOU CANNOT PROVE
|
||||
// ITS CORRECTNESS! THIS CODE IS MORE SUBTLE THAN IT LOOKS!
|
||||
|
||||
|
||||
StubQueue::StubQueue(StubInterface* stub_interface, int buffer_size,
|
||||
Mutex* lock, const char* name) : _mutex(lock) {
|
||||
intptr_t size = round_to(buffer_size, 2*BytesPerWord);
|
||||
BufferBlob* blob = BufferBlob::create(name, size);
|
||||
if( blob == NULL ) vm_exit_out_of_memory1(size, "CodeCache: no room for %s", name);
|
||||
_stub_interface = stub_interface;
|
||||
_buffer_size = blob->instructions_size();
|
||||
_buffer_limit = blob->instructions_size();
|
||||
_stub_buffer = blob->instructions_begin();
|
||||
_queue_begin = 0;
|
||||
_queue_end = 0;
|
||||
_number_of_stubs = 0;
|
||||
register_queue(this);
|
||||
}
|
||||
|
||||
|
||||
StubQueue::~StubQueue() {
|
||||
// Note: Currently StubQueues are never destroyed so nothing needs to be done here.
|
||||
// If we want to implement the destructor, we need to release the BufferBlob
|
||||
// allocated in the constructor (i.e., we need to keep it around or look it
|
||||
// up via CodeCache::find_blob(...).
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
|
||||
Stub* StubQueue::stub_containing(address pc) const {
|
||||
if (contains(pc)) {
|
||||
for (Stub* s = first(); s != NULL; s = next(s)) {
|
||||
if (stub_contains(s, pc)) return s;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
Stub* StubQueue::request_committed(int code_size) {
|
||||
Stub* s = request(code_size);
|
||||
if (s != NULL) commit(code_size);
|
||||
return s;
|
||||
}
|
||||
|
||||
|
||||
Stub* StubQueue::request(int requested_code_size) {
|
||||
assert(requested_code_size > 0, "requested_code_size must be > 0");
|
||||
if (_mutex != NULL) _mutex->lock();
|
||||
Stub* s = current_stub();
|
||||
int requested_size = round_to(stub_code_size_to_size(requested_code_size), CodeEntryAlignment);
|
||||
if (requested_size <= available_space()) {
|
||||
if (is_contiguous()) {
|
||||
// Queue: |...|XXXXXXX|.............|
|
||||
// ^0 ^begin ^end ^size = limit
|
||||
assert(_buffer_limit == _buffer_size, "buffer must be fully usable");
|
||||
if (_queue_end + requested_size <= _buffer_size) {
|
||||
// code fits in at the end => nothing to do
|
||||
stub_initialize(s, requested_size);
|
||||
return s;
|
||||
} else {
|
||||
// stub doesn't fit in at the queue end
|
||||
// => reduce buffer limit & wrap around
|
||||
assert(!is_empty(), "just checkin'");
|
||||
_buffer_limit = _queue_end;
|
||||
_queue_end = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (requested_size <= available_space()) {
|
||||
assert(!is_contiguous(), "just checkin'");
|
||||
assert(_buffer_limit <= _buffer_size, "queue invariant broken");
|
||||
// Queue: |XXX|.......|XXXXXXX|.......|
|
||||
// ^0 ^end ^begin ^limit ^size
|
||||
s = current_stub();
|
||||
stub_initialize(s, requested_size);
|
||||
return s;
|
||||
}
|
||||
// Not enough space left
|
||||
if (_mutex != NULL) _mutex->unlock();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
void StubQueue::commit(int committed_code_size) {
|
||||
assert(committed_code_size > 0, "committed_code_size must be > 0");
|
||||
int committed_size = round_to(stub_code_size_to_size(committed_code_size), CodeEntryAlignment);
|
||||
Stub* s = current_stub();
|
||||
assert(committed_size <= stub_size(s), "committed size must not exceed requested size");
|
||||
stub_initialize(s, committed_size);
|
||||
_queue_end += committed_size;
|
||||
_number_of_stubs++;
|
||||
if (_mutex != NULL) _mutex->unlock();
|
||||
debug_only(stub_verify(s);)
|
||||
}
|
||||
|
||||
|
||||
void StubQueue::remove_first() {
|
||||
if (number_of_stubs() == 0) return;
|
||||
Stub* s = first();
|
||||
debug_only(stub_verify(s);)
|
||||
stub_finalize(s);
|
||||
_queue_begin += stub_size(s);
|
||||
assert(_queue_begin <= _buffer_limit, "sanity check");
|
||||
if (_queue_begin == _queue_end) {
|
||||
// buffer empty
|
||||
// => reset queue indices
|
||||
_queue_begin = 0;
|
||||
_queue_end = 0;
|
||||
_buffer_limit = _buffer_size;
|
||||
} else if (_queue_begin == _buffer_limit) {
|
||||
// buffer limit reached
|
||||
// => reset buffer limit & wrap around
|
||||
_buffer_limit = _buffer_size;
|
||||
_queue_begin = 0;
|
||||
}
|
||||
_number_of_stubs--;
|
||||
}
|
||||
|
||||
|
||||
void StubQueue::remove_first(int n) {
|
||||
int i = MIN2(n, number_of_stubs());
|
||||
while (i-- > 0) remove_first();
|
||||
}
|
||||
|
||||
|
||||
void StubQueue::remove_all(){
|
||||
debug_only(verify();)
|
||||
remove_first(number_of_stubs());
|
||||
assert(number_of_stubs() == 0, "sanity check");
|
||||
}
|
||||
|
||||
|
||||
enum { StubQueueLimit = 10 }; // there are only a few in the world
|
||||
static StubQueue* registered_stub_queues[StubQueueLimit];
|
||||
|
||||
void StubQueue::register_queue(StubQueue* sq) {
|
||||
for (int i = 0; i < StubQueueLimit; i++) {
|
||||
if (registered_stub_queues[i] == NULL) {
|
||||
registered_stub_queues[i] = sq;
|
||||
return;
|
||||
}
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
|
||||
void StubQueue::queues_do(void f(StubQueue* sq)) {
|
||||
for (int i = 0; i < StubQueueLimit; i++) {
|
||||
if (registered_stub_queues[i] != NULL) {
|
||||
f(registered_stub_queues[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void StubQueue::stubs_do(void f(Stub* s)) {
|
||||
debug_only(verify();)
|
||||
MutexLockerEx lock(_mutex);
|
||||
for (Stub* s = first(); s != NULL; s = next(s)) f(s);
|
||||
}
|
||||
|
||||
|
||||
void StubQueue::verify() {
|
||||
// verify only if initialized
|
||||
if (_stub_buffer == NULL) return;
|
||||
MutexLockerEx lock(_mutex);
|
||||
// verify index boundaries
|
||||
guarantee(0 <= _buffer_size, "buffer size must be positive");
|
||||
guarantee(0 <= _buffer_limit && _buffer_limit <= _buffer_size , "_buffer_limit out of bounds");
|
||||
guarantee(0 <= _queue_begin && _queue_begin < _buffer_limit, "_queue_begin out of bounds");
|
||||
guarantee(0 <= _queue_end && _queue_end <= _buffer_limit, "_queue_end out of bounds");
|
||||
// verify alignment
|
||||
guarantee(_buffer_size % CodeEntryAlignment == 0, "_buffer_size not aligned");
|
||||
guarantee(_buffer_limit % CodeEntryAlignment == 0, "_buffer_limit not aligned");
|
||||
guarantee(_queue_begin % CodeEntryAlignment == 0, "_queue_begin not aligned");
|
||||
guarantee(_queue_end % CodeEntryAlignment == 0, "_queue_end not aligned");
|
||||
// verify buffer limit/size relationship
|
||||
if (is_contiguous()) {
|
||||
guarantee(_buffer_limit == _buffer_size, "_buffer_limit must equal _buffer_size");
|
||||
}
|
||||
// verify contents
|
||||
int n = 0;
|
||||
for (Stub* s = first(); s != NULL; s = next(s)) {
|
||||
stub_verify(s);
|
||||
n++;
|
||||
}
|
||||
guarantee(n == number_of_stubs(), "number of stubs inconsistent");
|
||||
guarantee(_queue_begin != _queue_end || n == 0, "buffer indices must be the same");
|
||||
}
|
||||
|
||||
|
||||
void StubQueue::print() {
|
||||
MutexLockerEx lock(_mutex);
|
||||
for (Stub* s = first(); s != NULL; s = next(s)) {
|
||||
stub_print(s);
|
||||
}
|
||||
}
|
208
hotspot/src/share/vm/code/stubs.hpp
Normal file
208
hotspot/src/share/vm/code/stubs.hpp
Normal file
|
@ -0,0 +1,208 @@
|
|||
/*
|
||||
* Copyright 1997-2002 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// The classes in this file provide a simple framework for the
|
||||
// management of little pieces of machine code - or stubs -
|
||||
// created on the fly and frequently discarded. In this frame-
|
||||
// work stubs are stored in a queue.
|
||||
|
||||
|
||||
// Stub serves as abstract base class. A concrete stub
|
||||
// implementation is a subclass of Stub, implementing
|
||||
// all (non-virtual!) functions required sketched out
|
||||
// in the Stub class.
|
||||
//
|
||||
// A concrete stub layout may look like this (both data
|
||||
// and code sections could be empty as well):
|
||||
//
|
||||
// ________
|
||||
// stub -->| | <--+
|
||||
// | data | |
|
||||
// |________| |
|
||||
// code_begin -->| | |
|
||||
// | | |
|
||||
// | code | | size
|
||||
// | | |
|
||||
// |________| |
|
||||
// code_end -->| | |
|
||||
// | data | |
|
||||
// |________| |
|
||||
// <--+
|
||||
|
||||
|
||||
class Stub VALUE_OBJ_CLASS_SPEC {
|
||||
public:
|
||||
// Initialization/finalization
|
||||
void initialize(int size) { ShouldNotCallThis(); } // called to initialize/specify the stub's size
|
||||
void finalize() { ShouldNotCallThis(); } // called before the stub is deallocated
|
||||
|
||||
// General info/converters
|
||||
int size() const { ShouldNotCallThis(); return 0; } // must return the size provided by initialize
|
||||
static int code_size_to_size(int code_size) { ShouldNotCallThis(); return 0; } // computes the size given the code size
|
||||
|
||||
// Code info
|
||||
address code_begin() const { ShouldNotCallThis(); return NULL; } // points to the first byte of the code
|
||||
address code_end() const { ShouldNotCallThis(); return NULL; } // points to the first byte after the code
|
||||
|
||||
// Debugging
|
||||
void verify() { ShouldNotCallThis(); } // verifies the Stub
|
||||
void print() { ShouldNotCallThis(); } // prints some information about the stub
|
||||
};
|
||||
|
||||
|
||||
// A stub interface defines the interface between a stub queue
|
||||
// and the stubs it queues. In order to avoid a vtable and
|
||||
// (and thus the extra word) in each stub, a concrete stub
|
||||
// interface object is created and associated with a stub
|
||||
// buffer which in turn uses the stub interface to interact
|
||||
// with its stubs.
|
||||
//
|
||||
// StubInterface serves as an abstract base class. A concrete
|
||||
// stub interface implementation is a subclass of StubInterface,
|
||||
// forwarding its virtual function calls to non-virtual calls
|
||||
// of the concrete stub (see also macro below). There's exactly
|
||||
// one stub interface instance required per stub queue.
|
||||
|
||||
class StubInterface: public CHeapObj {
|
||||
public:
|
||||
// Initialization/finalization
|
||||
virtual void initialize(Stub* self, int size) = 0; // called after creation (called twice if allocated via (request, commit))
|
||||
virtual void finalize(Stub* self) = 0; // called before deallocation
|
||||
|
||||
// General info/converters
|
||||
virtual int size(Stub* self) const = 0; // the total size of the stub in bytes (must be a multiple of CodeEntryAlignment)
|
||||
virtual int code_size_to_size(int code_size) const = 0; // computes the total stub size in bytes given the code size in bytes
|
||||
|
||||
// Code info
|
||||
virtual address code_begin(Stub* self) const = 0; // points to the first code byte
|
||||
virtual address code_end(Stub* self) const = 0; // points to the first byte after the code
|
||||
|
||||
// Debugging
|
||||
virtual void verify(Stub* self) = 0; // verifies the stub
|
||||
virtual void print(Stub* self) = 0; // prints information about the stub
|
||||
};
|
||||
|
||||
|
||||
// DEF_STUB_INTERFACE is used to create a concrete stub interface
|
||||
// class, forwarding stub interface calls to the corresponding
|
||||
// stub calls.
|
||||
|
||||
#define DEF_STUB_INTERFACE(stub) \
|
||||
class stub##Interface: public StubInterface { \
|
||||
private: \
|
||||
static stub* cast(Stub* self) { return (stub*)self; } \
|
||||
\
|
||||
public: \
|
||||
/* Initialization/finalization */ \
|
||||
virtual void initialize(Stub* self, int size) { cast(self)->initialize(size); } \
|
||||
virtual void finalize(Stub* self) { cast(self)->finalize(); } \
|
||||
\
|
||||
/* General info */ \
|
||||
virtual int size(Stub* self) const { return cast(self)->size(); } \
|
||||
virtual int code_size_to_size(int code_size) const { return stub::code_size_to_size(code_size); } \
|
||||
\
|
||||
/* Code info */ \
|
||||
virtual address code_begin(Stub* self) const { return cast(self)->code_begin(); } \
|
||||
virtual address code_end(Stub* self) const { return cast(self)->code_end(); } \
|
||||
\
|
||||
/* Debugging */ \
|
||||
virtual void verify(Stub* self) { cast(self)->verify(); } \
|
||||
virtual void print(Stub* self) { cast(self)->print(); } \
|
||||
};
|
||||
|
||||
|
||||
// A StubQueue maintains a queue of stubs.
|
||||
// Note: All sizes (spaces) are given in bytes.
|
||||
|
||||
class StubQueue: public CHeapObj {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
StubInterface* _stub_interface; // the interface prototype
|
||||
address _stub_buffer; // where all stubs are stored
|
||||
int _buffer_size; // the buffer size in bytes
|
||||
int _buffer_limit; // the (byte) index of the actual buffer limit (_buffer_limit <= _buffer_size)
|
||||
int _queue_begin; // the (byte) index of the first queue entry (word-aligned)
|
||||
int _queue_end; // the (byte) index of the first entry after the queue (word-aligned)
|
||||
int _number_of_stubs; // the number of buffered stubs
|
||||
Mutex* const _mutex; // the lock used for a (request, commit) transaction
|
||||
|
||||
void check_index(int i) const { assert(0 <= i && i < _buffer_limit && i % CodeEntryAlignment == 0, "illegal index"); }
|
||||
bool is_contiguous() const { return _queue_begin <= _queue_end; }
|
||||
int index_of(Stub* s) const { int i = (address)s - _stub_buffer; check_index(i); return i; }
|
||||
Stub* stub_at(int i) const { check_index(i); return (Stub*)(_stub_buffer + i); }
|
||||
Stub* current_stub() const { return stub_at(_queue_end); }
|
||||
|
||||
// Stub functionality accessed via interface
|
||||
void stub_initialize(Stub* s, int size) { assert(size % CodeEntryAlignment == 0, "size not aligned"); _stub_interface->initialize(s, size); }
|
||||
void stub_finalize(Stub* s) { _stub_interface->finalize(s); }
|
||||
int stub_size(Stub* s) const { return _stub_interface->size(s); }
|
||||
bool stub_contains(Stub* s, address pc) const { return _stub_interface->code_begin(s) <= pc && pc < _stub_interface->code_end(s); }
|
||||
int stub_code_size_to_size(int code_size) const { return _stub_interface->code_size_to_size(code_size); }
|
||||
void stub_verify(Stub* s) { _stub_interface->verify(s); }
|
||||
void stub_print(Stub* s) { _stub_interface->print(s); }
|
||||
|
||||
static void register_queue(StubQueue*);
|
||||
|
||||
public:
|
||||
StubQueue(StubInterface* stub_interface, int buffer_size, Mutex* lock,
|
||||
const char* name);
|
||||
~StubQueue();
|
||||
|
||||
// General queue info
|
||||
bool is_empty() const { return _queue_begin == _queue_end; }
|
||||
int total_space() const { return _buffer_size - 1; }
|
||||
int available_space() const { int d = _queue_begin - _queue_end - 1; return d < 0 ? d + _buffer_size : d; }
|
||||
int used_space() const { return total_space() - available_space(); }
|
||||
int number_of_stubs() const { return _number_of_stubs; }
|
||||
bool contains(address pc) const { return _stub_buffer <= pc && pc < _stub_buffer + _buffer_limit; }
|
||||
Stub* stub_containing(address pc) const;
|
||||
address code_start() const { return _stub_buffer; }
|
||||
address code_end() const { return _stub_buffer + _buffer_limit; }
|
||||
|
||||
// Stub allocation (atomic transactions)
|
||||
Stub* request_committed(int code_size); // request a stub that provides exactly code_size space for code
|
||||
Stub* request(int requested_code_size); // request a stub with a (maximum) code space - locks the queue
|
||||
void commit (int committed_code_size); // commit the previously requested stub - unlocks the queue
|
||||
|
||||
// Stub deallocation
|
||||
void remove_first(); // remove the first stub in the queue
|
||||
void remove_first(int n); // remove the first n stubs in the queue
|
||||
void remove_all(); // remove all stubs in the queue
|
||||
|
||||
// Iteration
|
||||
static void queues_do(void f(StubQueue* s)); // call f with each StubQueue
|
||||
void stubs_do(void f(Stub* s)); // call f with all stubs
|
||||
Stub* first() const { return number_of_stubs() > 0 ? stub_at(_queue_begin) : NULL; }
|
||||
Stub* next(Stub* s) const { int i = index_of(s) + stub_size(s);
|
||||
if (i == _buffer_limit) i = 0;
|
||||
return (i == _queue_end) ? NULL : stub_at(i);
|
||||
}
|
||||
|
||||
address stub_code_begin(Stub* s) const { return _stub_interface->code_begin(s); }
|
||||
address stub_code_end(Stub* s) const { return _stub_interface->code_end(s); }
|
||||
|
||||
// Debugging/printing
|
||||
void verify(); // verifies the stub queue
|
||||
void print(); // prints information about the stub queue
|
||||
};
|
51
hotspot/src/share/vm/code/vmreg.cpp
Normal file
51
hotspot/src/share/vm/code/vmreg.cpp
Normal file
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "incls/_precompiled.incl"
|
||||
#include "incls/_vmreg.cpp.incl"
|
||||
|
||||
// First VMReg value that could refer to a stack slot
|
||||
VMReg VMRegImpl::stack0 = (VMReg)(intptr_t)((ConcreteRegisterImpl::number_of_registers + 1) & ~1);
|
||||
|
||||
// VMRegs are 4 bytes wide on all platforms
|
||||
const int VMRegImpl::stack_slot_size = 4;
|
||||
const int VMRegImpl::slots_per_word = wordSize / stack_slot_size;
|
||||
|
||||
const int VMRegImpl::register_count = ConcreteRegisterImpl::number_of_registers;
|
||||
// Register names
|
||||
const char *VMRegImpl::regName[ConcreteRegisterImpl::number_of_registers];
|
||||
|
||||
void VMRegImpl::print() {
|
||||
#ifndef PRODUCT
|
||||
if( is_reg() ) {
|
||||
assert( VMRegImpl::regName[value()], "" );
|
||||
tty->print("%s",VMRegImpl::regName[value()]);
|
||||
} else if (is_stack()) {
|
||||
int stk = value() - stack0->value();
|
||||
tty->print("[%d]", stk*4);
|
||||
} else {
|
||||
tty->print("BAD!");
|
||||
}
|
||||
#endif // PRODUCT
|
||||
}
|
182
hotspot/src/share/vm/code/vmreg.hpp
Normal file
182
hotspot/src/share/vm/code/vmreg.hpp
Normal file
|
@ -0,0 +1,182 @@
|
|||
/*
|
||||
* Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
//------------------------------VMReg------------------------------------------
|
||||
// The VM uses 'unwarped' stack slots; the compiler uses 'warped' stack slots.
|
||||
// Register numbers below VMRegImpl::stack0 are the same for both. Register
|
||||
// numbers above stack0 are either warped (in the compiler) or unwarped
|
||||
// (in the VM). Unwarped numbers represent stack indices, offsets from
|
||||
// the current stack pointer. Warped numbers are required during compilation
|
||||
// when we do not yet know how big the frame will be.
|
||||
|
||||
class VMRegImpl;
|
||||
typedef VMRegImpl* VMReg;
|
||||
|
||||
class VMRegImpl {
|
||||
// friend class OopMap;
|
||||
friend class VMStructs;
|
||||
friend class OptoReg;
|
||||
// friend class Location;
|
||||
private:
|
||||
enum {
|
||||
BAD = -1
|
||||
};
|
||||
|
||||
|
||||
|
||||
static VMReg stack0;
|
||||
// Names for registers
|
||||
static const char *regName[];
|
||||
static const int register_count;
|
||||
|
||||
|
||||
public:
|
||||
|
||||
static VMReg as_VMReg(int val, bool bad_ok = false) { assert(val > BAD || bad_ok, "invalid"); return (VMReg) (intptr_t) val; }
|
||||
|
||||
const char* name() {
|
||||
if (is_reg()) {
|
||||
return regName[value()];
|
||||
} else if (!is_valid()) {
|
||||
return "BAD";
|
||||
} else {
|
||||
// shouldn't really be called with stack
|
||||
return "STACKED REG";
|
||||
}
|
||||
}
|
||||
static VMReg Bad() { return (VMReg) (intptr_t) BAD; }
|
||||
bool is_valid() { return ((intptr_t) this) != BAD; }
|
||||
bool is_stack() { return (intptr_t) this >= (intptr_t) stack0; }
|
||||
bool is_reg() { return is_valid() && !is_stack(); }
|
||||
|
||||
// A concrete register is a value that returns true for is_reg() and is
|
||||
// also a register you could use in the assembler. On machines with
|
||||
// 64bit registers only one half of the VMReg (and OptoReg) is considered
|
||||
// concrete.
|
||||
bool is_concrete();
|
||||
|
||||
// VMRegs are 4 bytes wide on all platforms
|
||||
static const int stack_slot_size;
|
||||
static const int slots_per_word;
|
||||
|
||||
|
||||
// This really ought to check that the register is "real" in the sense that
|
||||
// we don't try and get the VMReg number of a physical register that doesn't
|
||||
// have an expressible part. That would be pd specific code
|
||||
VMReg next() {
|
||||
assert((is_reg() && value() < stack0->value() - 1) || is_stack(), "must be");
|
||||
return (VMReg)(intptr_t)(value() + 1);
|
||||
}
|
||||
VMReg prev() {
|
||||
assert((is_stack() && value() > stack0->value()) || (is_reg() && value() != 0), "must be");
|
||||
return (VMReg)(intptr_t)(value() - 1);
|
||||
}
|
||||
|
||||
|
||||
intptr_t value() const {return (intptr_t) this; }
|
||||
|
||||
void print();
|
||||
|
||||
// bias a stack slot.
|
||||
// Typically used to adjust a virtual frame slots by amounts that are offset by
|
||||
// amounts that are part of the native abi. The VMReg must be a stack slot
|
||||
// and the result must be also.
|
||||
|
||||
VMReg bias(int offset) {
|
||||
assert(is_stack(), "must be");
|
||||
// VMReg res = VMRegImpl::as_VMReg(value() + offset);
|
||||
VMReg res = stack2reg(reg2stack() + offset);
|
||||
assert(res->is_stack(), "must be");
|
||||
return res;
|
||||
}
|
||||
|
||||
// Convert register numbers to stack slots and vice versa
|
||||
static VMReg stack2reg( int idx ) {
|
||||
return (VMReg) (intptr_t) (stack0->value() + idx);
|
||||
}
|
||||
|
||||
uintptr_t reg2stack() {
|
||||
assert( is_stack(), "Not a stack-based register" );
|
||||
return value() - stack0->value();
|
||||
}
|
||||
|
||||
static void set_regName();
|
||||
|
||||
#include "incls/_vmreg_pd.hpp.incl"
|
||||
|
||||
};
|
||||
|
||||
//---------------------------VMRegPair-------------------------------------------
|
||||
// Pairs of 32-bit registers for arguments.
|
||||
// SharedRuntime::java_calling_convention will overwrite the structs with
|
||||
// the calling convention's registers. VMRegImpl::Bad is returned for any
|
||||
// unused 32-bit register. This happens for the unused high half of Int
|
||||
// arguments, or for 32-bit pointers or for longs in the 32-bit sparc build
|
||||
// (which are passed to natives in low 32-bits of e.g. O0/O1 and the high
|
||||
// 32-bits of O0/O1 are set to VMRegImpl::Bad). Longs in one register & doubles
|
||||
// always return a high and a low register, as do 64-bit pointers.
|
||||
//
|
||||
class VMRegPair {
|
||||
private:
|
||||
VMReg _second;
|
||||
VMReg _first;
|
||||
public:
|
||||
void set_bad ( ) { _second=VMRegImpl::Bad(); _first=VMRegImpl::Bad(); }
|
||||
void set1 ( VMReg v ) { _second=VMRegImpl::Bad(); _first=v; }
|
||||
void set2 ( VMReg v ) { _second=v->next(); _first=v; }
|
||||
void set_pair( VMReg second, VMReg first ) { _second= second; _first= first; }
|
||||
void set_ptr ( VMReg ptr ) {
|
||||
#ifdef _LP64
|
||||
_second = ptr->next();
|
||||
#else
|
||||
_second = VMRegImpl::Bad();
|
||||
#endif
|
||||
_first = ptr;
|
||||
}
|
||||
// Return true if single register, even if the pair is really just adjacent stack slots
|
||||
bool is_single_reg() {
|
||||
return (_first->is_valid()) && (_first->value() + 1 == _second->value());
|
||||
}
|
||||
|
||||
// Return true if single stack based "register" where the slot alignment matches input alignment
|
||||
bool is_adjacent_on_stack(int alignment) {
|
||||
return (_first->is_stack() && (_first->value() + 1 == _second->value()) && ((_first->value() & (alignment-1)) == 0));
|
||||
}
|
||||
|
||||
// Return true if single stack based "register" where the slot alignment matches input alignment
|
||||
bool is_adjacent_aligned_on_stack(int alignment) {
|
||||
return (_first->is_stack() && (_first->value() + 1 == _second->value()) && ((_first->value() & (alignment-1)) == 0));
|
||||
}
|
||||
|
||||
// Return true if single register but adjacent stack slots do not count
|
||||
bool is_single_phys_reg() {
|
||||
return (_first->is_reg() && (_first->value() + 1 == _second->value()));
|
||||
}
|
||||
|
||||
VMReg second() const { return _second; }
|
||||
VMReg first() const { return _first; }
|
||||
VMRegPair(VMReg s, VMReg f) { _second = s; _first = f; }
|
||||
VMRegPair(VMReg f) { _second = VMRegImpl::Bad(); _first = f; }
|
||||
VMRegPair() { _second = VMRegImpl::Bad(); _first = VMRegImpl::Bad(); }
|
||||
};
|
197
hotspot/src/share/vm/code/vtableStubs.cpp
Normal file
197
hotspot/src/share/vm/code/vtableStubs.cpp
Normal file
|
@ -0,0 +1,197 @@
|
|||
/*
|
||||
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "incls/_precompiled.incl"
|
||||
#include "incls/_vtableStubs.cpp.incl"
|
||||
|
||||
// -----------------------------------------------------------------------------------------
|
||||
// Implementation of VtableStub
|
||||
|
||||
address VtableStub::_chunk = NULL;
|
||||
address VtableStub::_chunk_end = NULL;
|
||||
VMReg VtableStub::_receiver_location = VMRegImpl::Bad();
|
||||
|
||||
static int num_vtable_chunks = 0;
|
||||
|
||||
|
||||
void* VtableStub::operator new(size_t size, int code_size) {
|
||||
assert(size == sizeof(VtableStub), "mismatched size");
|
||||
num_vtable_chunks++;
|
||||
// compute real VtableStub size (rounded to nearest word)
|
||||
const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
|
||||
// malloc them in chunks to minimize header overhead
|
||||
const int chunk_factor = 32;
|
||||
if (_chunk == NULL || _chunk + real_size > _chunk_end) {
|
||||
const int bytes = chunk_factor * real_size + pd_code_alignment();
|
||||
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
|
||||
if( blob == NULL ) vm_exit_out_of_memory1(bytes, "CodeCache: no room for %s", "vtable chunks");
|
||||
_chunk = blob->instructions_begin();
|
||||
_chunk_end = _chunk + bytes;
|
||||
VTune::register_stub("vtable stub", _chunk, _chunk_end);
|
||||
Forte::register_stub("vtable stub", _chunk, _chunk_end);
|
||||
// Notify JVMTI about this stub. The event will be recorded by the enclosing
|
||||
// JvmtiDynamicCodeEventCollector and posted when this thread has released
|
||||
// all locks.
|
||||
if (JvmtiExport::should_post_dynamic_code_generated()) {
|
||||
JvmtiExport::post_dynamic_code_generated_while_holding_locks("vtable stub", _chunk, _chunk_end);
|
||||
}
|
||||
align_chunk();
|
||||
}
|
||||
assert(_chunk + real_size <= _chunk_end, "bad allocation");
|
||||
void* res = _chunk;
|
||||
_chunk += real_size;
|
||||
align_chunk();
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
void VtableStub::print() {
|
||||
tty->print("vtable stub (index = %d, receiver_location = %d, code = [" INTPTR_FORMAT ", " INTPTR_FORMAT "[)",
|
||||
index(), receiver_location(), code_begin(), code_end());
|
||||
}
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------------------------
|
||||
// Implementation of VtableStubs
|
||||
//
|
||||
// For each hash value there's a linked list of vtable stubs (with that
|
||||
// hash value). Each list is anchored in a little hash _table, indexed
|
||||
// by that hash value.
|
||||
|
||||
VtableStub* VtableStubs::_table[VtableStubs::N];
|
||||
int VtableStubs::_number_of_vtable_stubs = 0;
|
||||
|
||||
|
||||
void VtableStubs::initialize() {
|
||||
VtableStub::_receiver_location = SharedRuntime::name_for_receiver();
|
||||
{
|
||||
MutexLocker ml(VtableStubs_lock);
|
||||
assert(_number_of_vtable_stubs == 0, "potential performance bug: VtableStubs initialized more than once");
|
||||
assert(is_power_of_2(N), "N must be a power of 2");
|
||||
for (int i = 0; i < N; i++) {
|
||||
_table[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
address VtableStubs::create_stub(bool is_vtable_stub, int vtable_index, methodOop method) {
|
||||
assert(vtable_index >= 0, "must be positive");
|
||||
|
||||
VtableStub* s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
|
||||
if (s == NULL) {
|
||||
if (is_vtable_stub) {
|
||||
s = create_vtable_stub(vtable_index);
|
||||
} else {
|
||||
s = create_itable_stub(vtable_index);
|
||||
}
|
||||
enter(is_vtable_stub, vtable_index, s);
|
||||
#ifndef PRODUCT
|
||||
if (PrintAdapterHandlers) {
|
||||
tty->print_cr("Decoding VtableStub %s[%d]@%d",
|
||||
is_vtable_stub? "vtbl": "itbl", vtable_index, VtableStub::receiver_location());
|
||||
Disassembler::decode(s->code_begin(), s->code_end());
|
||||
}
|
||||
#endif
|
||||
}
|
||||
return s->entry_point();
|
||||
}
|
||||
|
||||
|
||||
inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){
|
||||
// Assumption: receiver_location < 4 in most cases.
|
||||
int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
|
||||
return (is_vtable_stub ? ~hash : hash) & mask;
|
||||
}
|
||||
|
||||
|
||||
VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
|
||||
MutexLocker ml(VtableStubs_lock);
|
||||
unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);
|
||||
VtableStub* s = _table[hash];
|
||||
while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();
|
||||
return s;
|
||||
}
|
||||
|
||||
|
||||
void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
|
||||
MutexLocker ml(VtableStubs_lock);
|
||||
assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");
|
||||
unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);
|
||||
// enter s at the beginning of the corresponding list
|
||||
s->set_next(_table[h]);
|
||||
_table[h] = s;
|
||||
_number_of_vtable_stubs++;
|
||||
}
|
||||
|
||||
|
||||
bool VtableStubs::is_entry_point(address pc) {
|
||||
MutexLocker ml(VtableStubs_lock);
|
||||
VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
|
||||
uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index());
|
||||
VtableStub* s;
|
||||
for (s = _table[hash]; s != NULL && s != stub; s = s->next()) {}
|
||||
return s == stub;
|
||||
}
|
||||
|
||||
|
||||
bool VtableStubs::contains(address pc) {
|
||||
// simple solution for now - we may want to use
|
||||
// a faster way if this function is called often
|
||||
return stub_containing(pc) != NULL;
|
||||
}
|
||||
|
||||
|
||||
VtableStub* VtableStubs::stub_containing(address pc) {
|
||||
// Note: No locking needed since any change to the data structure
|
||||
// happens with an atomic store into it (we don't care about
|
||||
// consistency with the _number_of_vtable_stubs counter).
|
||||
for (int i = 0; i < N; i++) {
|
||||
for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {
|
||||
if (s->contains(pc)) return s;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void vtableStubs_init() {
|
||||
VtableStubs::initialize();
|
||||
}
|
||||
|
||||
|
||||
//-----------------------------------------------------------------------------------------------------
|
||||
// Non-product code
|
||||
#ifndef PRODUCT
|
||||
|
||||
extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index) {
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
klassOop klass = receiver->klass();
|
||||
instanceKlass* ik = instanceKlass::cast(klass);
|
||||
klassVtable* vt = ik->vtable();
|
||||
klass->print();
|
||||
fatal3("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", index %d (vtable length %d)", (address)receiver, index, vt->length());
|
||||
}
|
||||
|
||||
#endif // Product
|
121
hotspot/src/share/vm/code/vtableStubs.hpp
Normal file
121
hotspot/src/share/vm/code/vtableStubs.hpp
Normal file
|
@ -0,0 +1,121 @@
|
|||
/*
|
||||
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// A VtableStub holds an individual code stub for a pair (vtable index, #args) for either itables or vtables
|
||||
// There's a one-to-one relationship between a VtableStub and such a pair.
|
||||
|
||||
class VtableStub {
|
||||
private:
|
||||
friend class VtableStubs;
|
||||
|
||||
static address _chunk; // For allocation
|
||||
static address _chunk_end; // For allocation
|
||||
static VMReg _receiver_location; // Where to find receiver
|
||||
|
||||
VtableStub* _next; // Pointer to next entry in hash table
|
||||
const short _index; // vtable index
|
||||
short _ame_offset; // Where an AbstractMethodError might occur
|
||||
short _npe_offset; // Where a NullPointerException might occur
|
||||
bool _is_vtable_stub; // True if vtable stub, false, is itable stub
|
||||
/* code follows here */ // The vtableStub code
|
||||
|
||||
void* operator new(size_t size, int code_size);
|
||||
|
||||
VtableStub(bool is_vtable_stub, int index)
|
||||
: _next(NULL), _is_vtable_stub(is_vtable_stub),
|
||||
_index(index), _ame_offset(-1), _npe_offset(-1) {}
|
||||
VtableStub* next() const { return _next; }
|
||||
int index() const { return _index; }
|
||||
static VMReg receiver_location() { return _receiver_location; }
|
||||
void set_next(VtableStub* n) { _next = n; }
|
||||
address code_begin() const { return (address)(this + 1); }
|
||||
address code_end() const { return code_begin() + pd_code_size_limit(_is_vtable_stub); }
|
||||
address entry_point() const { return code_begin(); }
|
||||
static int entry_offset() { return sizeof(class VtableStub); }
|
||||
|
||||
bool matches(bool is_vtable_stub, int index) const {
|
||||
return _index == index && _is_vtable_stub == is_vtable_stub;
|
||||
}
|
||||
bool contains(address pc) const { return code_begin() <= pc && pc < code_end(); }
|
||||
|
||||
void set_exception_points(address npe_addr, address ame_addr) {
|
||||
_npe_offset = npe_addr - code_begin();
|
||||
_ame_offset = ame_addr - code_begin();
|
||||
assert(is_abstract_method_error(ame_addr), "offset must be correct");
|
||||
assert(is_null_pointer_exception(npe_addr), "offset must be correct");
|
||||
assert(!is_abstract_method_error(npe_addr), "offset must be correct");
|
||||
assert(!is_null_pointer_exception(ame_addr), "offset must be correct");
|
||||
}
|
||||
|
||||
// platform-dependent routines
|
||||
static int pd_code_size_limit(bool is_vtable_stub);
|
||||
static int pd_code_alignment();
|
||||
// CNC: Removed because vtable stubs are now made with an ideal graph
|
||||
// static bool pd_disregard_arg_size();
|
||||
|
||||
static void align_chunk() {
|
||||
uintptr_t off = (uintptr_t)( _chunk + sizeof(VtableStub) ) % pd_code_alignment();
|
||||
if (off != 0) _chunk += pd_code_alignment() - off;
|
||||
}
|
||||
|
||||
public:
|
||||
// Query
|
||||
bool is_itable_stub() { return !_is_vtable_stub; }
|
||||
bool is_vtable_stub() { return _is_vtable_stub; }
|
||||
bool is_abstract_method_error(address epc) { return epc == code_begin()+_ame_offset; }
|
||||
bool is_null_pointer_exception(address epc) { return epc == code_begin()+_npe_offset; }
|
||||
|
||||
void print();
|
||||
};
|
||||
|
||||
|
||||
// VtableStubs creates the code stubs for compiled calls through vtables.
|
||||
// There is one stub per (vtable index, args_size) pair, and the stubs are
|
||||
// never deallocated. They don't need to be GCed because they contain no oops.
|
||||
|
||||
class VtableStubs : AllStatic {
|
||||
public: // N must be public (some compilers need this for _table)
|
||||
enum {
|
||||
N = 256, // size of stub table; must be power of two
|
||||
mask = N - 1
|
||||
};
|
||||
|
||||
private:
|
||||
static VtableStub* _table[N]; // table of existing stubs
|
||||
static int _number_of_vtable_stubs; // number of stubs created so far (for statistics)
|
||||
|
||||
static VtableStub* create_vtable_stub(int vtable_index);
|
||||
static VtableStub* create_itable_stub(int vtable_index);
|
||||
static VtableStub* lookup (bool is_vtable_stub, int vtable_index);
|
||||
static void enter (bool is_vtable_stub, int vtable_index, VtableStub* s);
|
||||
static inline uint hash (bool is_vtable_stub, int vtable_index);
|
||||
|
||||
public:
|
||||
static address create_stub(bool is_vtable_stub, int vtable_index, methodOop method); // return the entry point of a stub for this call
|
||||
static bool is_entry_point(address pc); // is pc a vtable stub entry point?
|
||||
static bool contains(address pc); // is pc within any stub?
|
||||
static VtableStub* stub_containing(address pc); // stub containing pc or NULL
|
||||
static int number_of_vtable_stubs() { return _number_of_vtable_stubs; }
|
||||
static void initialize();
|
||||
};
|
Loading…
Add table
Add a link
Reference in a new issue