6964458: Reimplement class meta-data storage to use native memory

Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes

Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
This commit is contained in:
Jon Masamitsu 2012-09-01 13:25:18 -04:00 committed by Coleen Phillimore
parent 36eee7c8c8
commit 5c58d27aac
853 changed files with 26124 additions and 82956 deletions

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -74,6 +74,7 @@ unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
size = align_code_offset(size);
size += round_to(cb->total_content_size(), oopSize);
size += round_to(cb->total_oop_size(), oopSize);
size += round_to(cb->total_metadata_size(), oopSize);
return size;
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -68,8 +68,6 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
OopMapSet* _oop_maps; // OopMap for this CodeBlob
CodeComments _comments;
friend class OopRecorder;
public:
// Returns the space needed for CodeBlob
static unsigned int allocation_size(CodeBuffer* cb, int header_size);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,9 @@
#include "precompiled.hpp"
#include "code/codeBlob.hpp"
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
#include "code/dependencies.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "code/pcDesc.hpp"
#include "gc_implementation/shared/markSweep.hpp"
@ -33,7 +35,7 @@
#include "memory/gcLocker.hpp"
#include "memory/iterator.hpp"
#include "memory/resourceArea.hpp"
#include "oops/methodOop.hpp"
#include "oops/method.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
@ -54,6 +56,7 @@ class CodeBlob_sizes {
int stub_size;
int relocation_size;
int scopes_oop_size;
int scopes_metadata_size;
int scopes_data_size;
int scopes_pcs_size;
@ -66,6 +69,7 @@ class CodeBlob_sizes {
stub_size = 0;
relocation_size = 0;
scopes_oop_size = 0;
scopes_metadata_size = 0;
scopes_data_size = 0;
scopes_pcs_size = 0;
}
@ -83,6 +87,7 @@ class CodeBlob_sizes {
code_size * 100 / total_size,
stub_size * 100 / total_size,
scopes_oop_size * 100 / total_size,
scopes_metadata_size * 100 / total_size,
scopes_data_size * 100 / total_size,
scopes_pcs_size * 100 / total_size);
}
@ -98,6 +103,7 @@ class CodeBlob_sizes {
stub_size += nm->stub_size();
scopes_oop_size += nm->oops_size();
scopes_metadata_size += nm->metadata_size();
scopes_data_size += nm->scopes_data_size();
scopes_pcs_size += nm->scopes_pcs_size();
} else {
@ -284,6 +290,12 @@ void CodeCache::nmethods_do(void f(nmethod* nm)) {
}
}
void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
assert_locked_or_safepoint(CodeCache_lock);
FOR_ALL_ALIVE_NMETHODS(nm) {
f(nm);
}
}
int CodeCache::alignment_unit() {
return (int)_heap->alignment_unit();
@ -448,7 +460,7 @@ void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
#endif //PRODUCT
nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
nmethod* CodeCache::find_and_remove_saved_code(Method* m) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
nmethod* saved = _saved_nmethods;
nmethod* prev = NULL;
@ -468,7 +480,7 @@ nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
xtty->method(methodOop(m));
xtty->method(m);
xtty->stamp();
xtty->end_elem();
}
@ -518,7 +530,7 @@ void CodeCache::speculatively_disconnect(nmethod* nm) {
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
xtty->method(methodOop(nm->method()));
xtty->method(nm->method());
xtty->stamp();
xtty->end_elem();
}
@ -548,6 +560,32 @@ void CodeCache::gc_epilogue() {
set_needs_cache_clean(false);
prune_scavenge_root_nmethods();
assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
#ifdef ASSERT
// make sure that we aren't leaking icholders
int count = 0;
FOR_ALL_BLOBS(cb) {
if (cb->is_nmethod()) {
RelocIterator iter((nmethod*)cb);
while(iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
CompiledIC *ic = CompiledIC_at(iter.reloc());
if (TraceCompiledIC) {
tty->print("noticed icholder " INTPTR_FORMAT " ", ic->cached_icholder());
ic->print();
}
assert(ic->cached_icholder() != NULL, "must be non-NULL");
count++;
}
}
}
}
}
assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
CompiledICHolder::live_count(), "must agree");
#endif
}
@ -649,8 +687,8 @@ int CodeCache::mark_for_deoptimization(DepChange& changes) {
{ No_Safepoint_Verifier nsv;
for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
klassOop d = str.klass();
number_of_marked_CodeBlobs += instanceKlass::cast(d)->mark_dependent_nmethods(changes);
Klass* d = str.klass();
number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
}
}
@ -683,10 +721,10 @@ int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
int number_of_marked_CodeBlobs = 0;
// Deoptimize all methods of the evolving class itself
objArrayOop old_methods = dependee->methods();
Array<Method*>* old_methods = dependee->methods();
for (int i = 0; i < old_methods->length(); i++) {
ResourceMark rm;
methodOop old_method = (methodOop) old_methods->obj_at(i);
Method* old_method = old_methods->at(i);
nmethod *nm = old_method->code();
if (nm != NULL) {
nm->mark_for_deoptimization();
@ -702,7 +740,7 @@ int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
nm->mark_for_deoptimization();
number_of_marked_CodeBlobs++;
} else {
// flush caches in case they refer to a redefined methodOop
// flush caches in case they refer to a redefined Method*
nm->clear_inline_caches();
}
}
@ -721,7 +759,7 @@ void CodeCache::mark_all_nmethods_for_deoptimization() {
}
int CodeCache::mark_for_deoptimization(methodOop dependee) {
int CodeCache::mark_for_deoptimization(Method* dependee) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
int number_of_marked_CodeBlobs = 0;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -80,6 +80,7 @@ class CodeCache : AllStatic {
static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
// Lookup
static CodeBlob* find_blob(void* start);
@ -170,7 +171,7 @@ class CodeCache : AllStatic {
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
static void clear_inline_caches(); // clear all inline caches
static nmethod* find_and_remove_saved_code(methodOop m);
static nmethod* find_and_remove_saved_code(Method* m);
static void remove_saved_code(nmethod* nm);
static void speculatively_disconnect(nmethod* nm);
@ -181,7 +182,7 @@ class CodeCache : AllStatic {
#endif // HOTSWAP
static void mark_all_nmethods_for_deoptimization();
static int mark_for_deoptimization(methodOop dependee);
static int mark_for_deoptimization(Method* dependee);
static void make_marked_nmethods_zombies();
static void make_marked_nmethods_not_entrant();

View file

@ -31,8 +31,9 @@
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/linkResolver.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/oopFactory.hpp"
#include "oops/methodOop.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "runtime/icache.hpp"
@ -44,61 +45,79 @@
// Every time a compiled IC is changed or its type is being accessed,
// either the CompiledIC_lock must be set or we must be at a safe point.
// Release the CompiledICHolder* associated with this call site is there is one.
void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
// This call site might have become stale so inspect it carefully.
NativeCall* call = nativeCall_at(call_site->addr());
if (is_icholder_entry(call->destination())) {
NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
}
}
bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
// This call site might have become stale so inspect it carefully.
NativeCall* call = nativeCall_at(call_site->addr());
return is_icholder_entry(call->destination());
}
//-----------------------------------------------------------------------------
// Low-level access to an inline cache. Private, since they might not be
// MT-safe to use.
void CompiledIC::set_cached_oop(oop cache) {
void* CompiledIC::cached_value() const {
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
assert (!is_optimized(), "an optimized virtual call does not have a cached oop");
assert (cache == NULL || cache != badOop, "invalid oop");
if (TraceCompiledIC) {
tty->print(" ");
print_compiled_ic();
tty->print_cr(" changing oop to " INTPTR_FORMAT, (address)cache);
}
if (cache == NULL) cache = (oop)Universe::non_oop_word();
*_oop_addr = cache;
// fix up the relocations
RelocIterator iter = _oops;
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
oop_Relocation* r = iter.oop_reloc();
if (r->oop_addr() == _oop_addr)
r->fix_oop_relocation();
}
}
return;
}
oop CompiledIC::cached_oop() const {
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
assert (!is_optimized(), "an optimized virtual call does not have a cached oop");
assert (!is_optimized(), "an optimized virtual call does not have a cached metadata");
if (!is_in_transition_state()) {
oop data = *_oop_addr;
// If we let the oop value here be initialized to zero...
void* data = (void*)_value->data();
// If we let the metadata value here be initialized to zero...
assert(data != NULL || Universe::non_oop_word() == NULL,
"no raw nulls in CompiledIC oops, because of patching races");
return (data == (oop)Universe::non_oop_word()) ? (oop)NULL : data;
"no raw nulls in CompiledIC metadatas, because of patching races");
return (data == (void*)Universe::non_oop_word()) ? NULL : data;
} else {
return InlineCacheBuffer::cached_oop_for((CompiledIC *)this);
return InlineCacheBuffer::cached_value_for((CompiledIC *)this);
}
}
void CompiledIC::set_ic_destination(address entry_point) {
void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder) {
assert(entry_point != NULL, "must set legal entry point");
assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
assert (!is_optimized() || cache == NULL, "an optimized virtual call does not have a cached metadata");
assert (cache == NULL || cache != (Metadata*)badOopVal, "invalid metadata");
assert(!is_icholder || is_icholder_entry(entry_point), "must be");
// Don't use ic_destination for this test since that forwards
// through ICBuffer instead of returning the actual current state of
// the CompiledIC.
if (is_icholder_entry(_ic_call->destination())) {
// When patching for the ICStub case the cached value isn't
// overwritten until the ICStub copied into the CompiledIC during
// the next safepoint. Make sure that the CompiledICHolder* is
// marked for release at this point since it won't be identifiable
// once the entry point is overwritten.
InlineCacheBuffer::queue_for_release((CompiledICHolder*)_value->data());
}
if (TraceCompiledIC) {
tty->print(" ");
print_compiled_ic();
tty->print_cr(" changing destination to " INTPTR_FORMAT, entry_point);
tty->print(" changing destination to " INTPTR_FORMAT, entry_point);
if (!is_optimized()) {
tty->print(" changing cached %s to " INTPTR_FORMAT, is_icholder ? "icholder" : "metadata", (address)cache);
}
if (is_icstub) {
tty->print(" (icstub)");
}
tty->cr();
}
{
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
#ifdef ASSERT
CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
@ -107,6 +126,25 @@ void CompiledIC::set_ic_destination(address entry_point) {
_ic_call->set_destination_mt_safe(entry_point);
}
if (is_optimized() || is_icstub) {
// Optimized call sites don't have a cache value and ICStub call
// sites only change the entry point. Changing the value in that
// case could lead to MT safety issues.
assert(cache == NULL, "must be null");
return;
}
if (cache == NULL) cache = (void*)Universe::non_oop_word();
_value->set_data((intptr_t)cache);
}
void CompiledIC::set_ic_destination(ICStub* stub) {
internal_set_ic_destination(stub->code_begin(), true, NULL, false);
}
address CompiledIC::ic_destination() const {
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
@ -124,6 +162,11 @@ bool CompiledIC::is_in_transition_state() const {
}
bool CompiledIC::is_icholder_call() const {
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
return !_is_optimized && is_icholder_entry(ic_destination());
}
// Returns native address of 'call' instruction in inline-cache. Used by
// the InlineCacheBuffer when it needs to find the stub.
address CompiledIC::stub_address() const {
@ -140,7 +183,6 @@ void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecod
methodHandle method = call_info->selected_method();
bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index());
assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
assert(method->is_oop(), "cannot be NULL and must be oop");
assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
@ -149,7 +191,7 @@ void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecod
int index = klassItable::compute_itable_index(call_info->resolved_method()());
entry = VtableStubs::create_stub(false, index, method());
assert(entry != NULL, "entry not computed");
klassOop k = call_info->resolved_method()->method_holder();
Klass* k = call_info->resolved_method()->method_holder();
assert(Klass::cast(k)->is_interface(), "sanity check");
InlineCacheBuffer::create_transition_stub(this, k, entry);
} else {
@ -180,7 +222,7 @@ bool CompiledIC::is_megamorphic() const {
assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
assert(!is_optimized(), "an optimized call cannot be megamorphic");
// Cannot rely on cached_oop. It is either an interface or a method.
// Cannot rely on cached_value. It is either an interface or a method.
return VtableStubs::is_entry_point(ic_destination());
}
@ -192,24 +234,16 @@ bool CompiledIC::is_call_to_compiled() const {
// has been cleaned up
CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
bool is_monomorphic = (cb != NULL && cb->is_nmethod());
// Check that the cached_oop is a klass for non-optimized monomorphic calls
// Check that the cached_value is a klass for non-optimized monomorphic calls
// This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
// for calling directly to vep without using the inline cache (i.e., cached_oop == NULL)
// for calling directly to vep without using the inline cache (i.e., cached_value == NULL)
#ifdef ASSERT
#ifdef TIERED
CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
bool is_c1_method = caller->is_compiled_by_c1();
#else
#ifdef COMPILER1
bool is_c1_method = true;
#else
bool is_c1_method = false;
#endif // COMPILER1
#endif // TIERED
assert( is_c1_method ||
!is_monomorphic ||
is_optimized() ||
(cached_oop() != NULL && cached_oop()->is_klass()), "sanity check");
(cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
#endif // ASSERT
return is_monomorphic;
}
@ -226,7 +260,7 @@ bool CompiledIC::is_call_to_interpreted() const {
// is to the interpreter.
CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
assert(!is_call_to_interpreted || (cached_oop() != NULL && cached_oop()->is_compiledICHolder()), "sanity check");
assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check");
} else {
// Check if we are calling into our own codeblob (i.e., to a stub)
CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address());
@ -257,18 +291,21 @@ void CompiledIC::set_to_clean() {
entry = SharedRuntime::get_resolve_virtual_call_stub();
}
// A zombie transition will always be safe, since the oop has already been set to NULL, so
// A zombie transition will always be safe, since the metadata has already been set to NULL, so
// we only need to patch the destination
bool safe_transition = is_optimized() || SafepointSynchronize::is_at_safepoint();
if (safe_transition) {
if (!is_optimized()) set_cached_oop(NULL);
// Kill any leftover stub we might have too
if (is_in_transition_state()) {
ICStub* old_stub = ICStub_from_destination_address(stub_address());
old_stub->clear();
}
if (is_optimized()) {
set_ic_destination(entry);
} else {
set_ic_destination_and_value(entry, (void*)NULL);
}
} else {
// Unsafe transition - create stub.
InlineCacheBuffer::create_transition_stub(this, NULL, entry);
@ -289,12 +326,12 @@ bool CompiledIC::is_clean() const {
address dest = ic_destination();
is_clean = dest == SharedRuntime::get_resolve_opt_virtual_call_stub() ||
dest == SharedRuntime::get_resolve_virtual_call_stub();
assert(!is_clean || is_optimized() || cached_oop() == NULL, "sanity check");
assert(!is_clean || is_optimized() || cached_value() == NULL, "sanity check");
return is_clean;
}
void CompiledIC::set_to_monomorphic(const CompiledICInfo& info) {
void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
// Updating a cache to the wrong entry can cause bugs that are very hard
// to track down - if cache entry gets invalid - we just clean it. In
@ -309,7 +346,7 @@ void CompiledIC::set_to_monomorphic(const CompiledICInfo& info) {
// transitions are mt_safe
Thread *thread = Thread::current();
if (info._to_interpreter) {
if (info.to_interpreter()) {
// Call to interpreter
if (info.is_optimized() && is_optimized()) {
assert(is_clean(), "unsafe IC path");
@ -318,9 +355,9 @@ void CompiledIC::set_to_monomorphic(const CompiledICInfo& info) {
// (either because of CHA or the static target is final)
// At code generation time, this call has been emitted as static call
// Call via stub
assert(info.cached_oop().not_null() && info.cached_oop()->is_method(), "sanity check");
assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check");
CompiledStaticCall* csc = compiledStaticCall_at(instruction_address());
methodHandle method (thread, (methodOop)info.cached_oop()());
methodHandle method (thread, (Method*)info.cached_metadata());
csc->set_to_interpreted(method, info.entry());
if (TraceICs) {
ResourceMark rm(thread);
@ -330,17 +367,15 @@ void CompiledIC::set_to_monomorphic(const CompiledICInfo& info) {
}
} else {
// Call via method-klass-holder
assert(info.cached_oop().not_null(), "must be set");
InlineCacheBuffer::create_transition_stub(this, info.cached_oop()(), info.entry());
InlineCacheBuffer::create_transition_stub(this, info.claim_cached_icholder(), info.entry());
if (TraceICs) {
ResourceMark rm(thread);
tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via mkh", instruction_address());
tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", instruction_address());
}
}
} else {
// Call to compiled code
bool static_bound = info.is_optimized() || (info.cached_oop().is_null());
bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL);
#ifdef ASSERT
CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
assert (cb->is_nmethod(), "must be compiled!");
@ -352,18 +387,21 @@ void CompiledIC::set_to_monomorphic(const CompiledICInfo& info) {
(!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
if (!safe) {
InlineCacheBuffer::create_transition_stub(this, info.cached_oop()(), info.entry());
InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry());
} else {
if (is_optimized()) {
set_ic_destination(info.entry());
if (!is_optimized()) set_cached_oop(info.cached_oop()());
} else {
set_ic_destination_and_value(info.entry(), info.cached_metadata());
}
}
if (TraceICs) {
ResourceMark rm(thread);
assert(info.cached_oop() == NULL || info.cached_oop()()->is_klass(), "must be");
assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be");
tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s",
instruction_address(),
((klassOop)info.cached_oop()())->print_value_string(),
((Klass*)info.cached_metadata())->print_value_string(),
(safe) ? "" : "via stub");
}
}
@ -386,8 +424,6 @@ void CompiledIC::compute_monomorphic_entry(methodHandle method,
bool static_bound,
CompiledICInfo& info,
TRAPS) {
info._is_optimized = is_optimized;
nmethod* method_code = method->code();
address entry = NULL;
if (method_code != NULL) {
@ -400,18 +436,12 @@ void CompiledIC::compute_monomorphic_entry(methodHandle method,
}
if (entry != NULL) {
// Call to compiled code
info._entry = entry;
if (static_bound || is_optimized) {
info._cached_oop = Handle(THREAD, (oop)NULL);
} else {
info._cached_oop = receiver_klass;
}
info._to_interpreter = false;
info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass(), is_optimized);
} else {
// Note: the following problem exists with Compiler1:
// - at compile time we may or may not know if the destination is final
// - if we know that the destination is final, we will emit an optimized
// virtual call (no inline cache), and need a methodOop to make a call
// virtual call (no inline cache), and need a Method* to make a call
// to the interpreter
// - if we do not know if the destination is final, we emit a standard
// virtual call, and use CompiledICHolder to call interpreted code
@ -422,7 +452,6 @@ void CompiledIC::compute_monomorphic_entry(methodHandle method,
// it look vanilla but is optimized. Code in is_call_to_interpreted
// is aware of this and weakens its asserts.
info._to_interpreter = true;
// static_bound should imply is_optimized -- otherwise we have a
// performance bug (statically-bindable method is called via
// dynamically-dispatched call note: the reverse implication isn't
@ -443,38 +472,46 @@ void CompiledIC::compute_monomorphic_entry(methodHandle method,
#endif // COMPILER2
if (is_optimized) {
// Use stub entry
info._entry = method()->get_c2i_entry();
info._cached_oop = method;
info.set_interpreter_entry(method()->get_c2i_entry(), method());
} else {
// Use mkh entry
oop holder = oopFactory::new_compiledICHolder(method, receiver_klass, CHECK);
info._cached_oop = Handle(THREAD, holder);
info._entry = method()->get_c2i_unverified_entry();
// Use icholder entry
CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass());
info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder);
}
}
assert(info.is_optimized() == is_optimized, "must agree");
}
inline static RelocIterator parse_ic(nmethod* nm, address ic_call, oop* &_oop_addr, bool *is_optimized) {
address first_oop = NULL;
// Mergers please note: Sun SC5.x CC insists on an lvalue for a reference parameter.
nmethod* tmp_nm = nm;
return virtual_call_Relocation::parse_ic(tmp_nm, ic_call, first_oop, _oop_addr, is_optimized);
bool CompiledIC::is_icholder_entry(address entry) {
CodeBlob* cb = CodeCache::find_blob_unsafe(entry);
return (cb != NULL && cb->is_adapter_blob());
}
CompiledIC::CompiledIC(NativeCall* ic_call)
: _ic_call(ic_call),
_oops(parse_ic(NULL, ic_call->instruction_address(), _oop_addr, &_is_optimized))
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
: _ic_call(call)
{
address ic_call = call->instruction_address();
assert(ic_call != NULL, "ic_call address must be set");
assert(nm != NULL, "must pass nmethod");
assert(nm->contains(ic_call), "must be in nmethod");
// search for the ic_call at the given address
RelocIterator iter(nm, ic_call, ic_call+1);
bool ret = iter.next();
assert(ret == true, "relocInfo must exist at this address");
assert(iter.addr() == ic_call, "must find ic_call");
if (iter.type() == relocInfo::virtual_call_type) {
virtual_call_Relocation* r = iter.virtual_call_reloc();
_is_optimized = false;
_value = nativeMovConstReg_at(r->cached_value());
} else {
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
_is_optimized = true;
_value = NULL;
}
CompiledIC::CompiledIC(Relocation* ic_reloc)
: _ic_call(nativeCall_at(ic_reloc->addr())),
_oops(parse_ic(ic_reloc->code(), ic_reloc->addr(), _oop_addr, &_is_optimized))
{
assert(ic_reloc->type() == relocInfo::virtual_call_type ||
ic_reloc->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
}
@ -639,8 +676,8 @@ void CompiledIC::print() {
void CompiledIC::print_compiled_ic() {
tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT,
instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination());
tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination(), is_optimized() ? NULL : cached_value());
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,9 +26,7 @@
#define SHARE_VM_CODE_COMPILEDIC_HPP
#include "interpreter/linkResolver.hpp"
#include "oops/compiledICHolderKlass.hpp"
#include "oops/compiledICHolderOop.hpp"
#include "oops/klassOop.hpp"
#include "oops/compiledICHolder.hpp"
#ifdef TARGET_ARCH_x86
# include "nativeInst_x86.hpp"
#endif
@ -57,36 +55,88 @@
// / \ /-<-\
// / [2] \ / \
// Interpreted ---------> Monomorphic | [3]
// (compiledICHolderOop) (klassOop) |
// (CompiledICHolder*) (Klass*) |
// \ / \ /
// [4] \ / [4] \->-/
// \->- Megamorphic -<-/
// (methodOop)
// (Method*)
//
// The text in paranteses () refere to the value of the inline cache receiver (mov instruction)
//
// The numbers in square brackets refere to the kind of transition:
// [1]: Initial fixup. Receiver it found from debug information
// [2]: Compilation of a method
// [3]: Recompilation of a method (note: only entry is changed. The klassOop must stay the same)
// [3]: Recompilation of a method (note: only entry is changed. The Klass* must stay the same)
// [4]: Inline cache miss. We go directly to megamorphic call.
//
// The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe
// transition is made to a stub.
//
class CompiledIC;
class ICStub;
class CompiledICInfo {
friend class CompiledIC;
class CompiledICInfo : public StackObj {
private:
address _entry; // entry point for call
Handle _cached_oop; // Value of cached_oop (either in stub or inline cache)
void* _cached_value; // Value of cached_value (either in stub or inline cache)
bool _is_icholder; // Is the cached value a CompiledICHolder*
bool _is_optimized; // it is an optimized virtual call (i.e., can be statically bound)
bool _to_interpreter; // Call it to interpreter
bool _release_icholder;
public:
address entry() const { return _entry; }
Handle cached_oop() const { return _cached_oop; }
Metadata* cached_metadata() const { assert(!_is_icholder, ""); return (Metadata*)_cached_value; }
CompiledICHolder* claim_cached_icholder() {
assert(_is_icholder, "");
assert(_cached_value != NULL, "must be non-NULL");
_release_icholder = false;
CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
icholder->claim();
return icholder;
}
bool is_optimized() const { return _is_optimized; }
bool to_interpreter() const { return _to_interpreter; }
void set_compiled_entry(address entry, Klass* klass, bool is_optimized) {
_entry = entry;
_cached_value = (void*)klass;
_to_interpreter = false;
_is_icholder = false;
_is_optimized = is_optimized;
_release_icholder = false;
}
void set_interpreter_entry(address entry, Method* method) {
_entry = entry;
_cached_value = (void*)method;
_to_interpreter = true;
_is_icholder = false;
_is_optimized = true;
_release_icholder = false;
}
void set_icholder_entry(address entry, CompiledICHolder* icholder) {
_entry = entry;
_cached_value = (void*)icholder;
_to_interpreter = true;
_is_icholder = true;
_is_optimized = false;
_release_icholder = true;
}
CompiledICInfo(): _entry(NULL), _cached_value(NULL), _is_icholder(false),
_to_interpreter(false), _is_optimized(false), _release_icholder(false) {
}
~CompiledICInfo() {
// In rare cases the info is computed but not used, so release any
// CompiledICHolder* that was created
if (_release_icholder) {
assert(_is_icholder, "must be");
CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
icholder->claim();
delete icholder;
}
}
};
class CompiledIC: public ResourceObj {
@ -96,18 +146,32 @@ class CompiledIC: public ResourceObj {
private:
NativeCall* _ic_call; // the call instruction
oop* _oop_addr; // patchable oop cell for this IC
RelocIterator _oops; // iteration over any and all set-oop instructions
NativeMovConstReg* _value; // patchable value cell for this IC
bool _is_optimized; // an optimized virtual call (i.e., no compiled IC)
CompiledIC(NativeCall* ic_call);
CompiledIC(Relocation* ic_reloc); // Must be of virtual_call_type/opt_virtual_call_type
CompiledIC(nmethod* nm, NativeCall* ic_call);
static bool is_icholder_entry(address entry);
// low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe
// to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make
// changes to a transition stub.
void set_ic_destination(address entry_point);
void set_cached_oop(oop cache);
void internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder);
void set_ic_destination(ICStub* stub);
void set_ic_destination(address entry_point) {
assert(_is_optimized, "use set_ic_destination_and_value instead");
internal_set_ic_destination(entry_point, false, NULL, false);
}
// This only for use by ICStubs where the type of the value isn't known
void set_ic_destination_and_value(address entry_point, void* value) {
internal_set_ic_destination(entry_point, false, value, is_icholder_entry(entry_point));
}
void set_ic_destination_and_value(address entry_point, Metadata* value) {
internal_set_ic_destination(entry_point, false, value, false);
}
void set_ic_destination_and_value(address entry_point, CompiledICHolder* value) {
internal_set_ic_destination(entry_point, false, value, true);
}
// Reads the location of the transition stub. This will fail with an assertion, if no transition stub is
// associated with the inline cache.
@ -116,13 +180,28 @@ class CompiledIC: public ResourceObj {
public:
// conversion (machine PC to CompiledIC*)
friend CompiledIC* CompiledIC_before(address return_addr);
friend CompiledIC* CompiledIC_at(address call_site);
friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr);
friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site);
friend CompiledIC* CompiledIC_at(Relocation* call_site);
// Return the cached_oop/destination associated with this inline cache. If the cache currently points
// This is used to release CompiledICHolder*s from nmethods that
// are about to be freed. The callsite might contain other stale
// values of other kinds so it must be careful.
static void cleanup_call_site(virtual_call_Relocation* call_site);
static bool is_icholder_call_site(virtual_call_Relocation* call_site);
// Return the cached_metadata/destination associated with this inline cache. If the cache currently points
// to a transition stub, it will read the values from the transition stub.
oop cached_oop() const;
void* cached_value() const;
CompiledICHolder* cached_icholder() const {
assert(is_icholder_call(), "must be");
return (CompiledICHolder*) cached_value();
}
Metadata* cached_metadata() const {
assert(!is_icholder_call(), "must be");
return (Metadata*) cached_value();
}
address ic_destination() const;
bool is_optimized() const { return _is_optimized; }
@ -133,6 +212,8 @@ class CompiledIC: public ResourceObj {
bool is_call_to_compiled() const;
bool is_call_to_interpreted() const;
bool is_icholder_call() const;
address end_of_call() { return _ic_call->return_address(); }
// MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock
@ -144,7 +225,7 @@ class CompiledIC: public ResourceObj {
// They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
//
void set_to_clean(); // Can only be called during a safepoint operation
void set_to_monomorphic(const CompiledICInfo& info);
void set_to_monomorphic(CompiledICInfo& info);
void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
@ -159,20 +240,22 @@ class CompiledIC: public ResourceObj {
void verify() PRODUCT_RETURN;
};
inline CompiledIC* CompiledIC_before(address return_addr) {
CompiledIC* c_ic = new CompiledIC(nativeCall_before(return_addr));
inline CompiledIC* CompiledIC_before(nmethod* nm, address return_addr) {
CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr));
c_ic->verify();
return c_ic;
}
inline CompiledIC* CompiledIC_at(address call_site) {
CompiledIC* c_ic = new CompiledIC(nativeCall_at(call_site));
inline CompiledIC* CompiledIC_at(nmethod* nm, address call_site) {
CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site));
c_ic->verify();
return c_ic;
}
inline CompiledIC* CompiledIC_at(Relocation* call_site) {
CompiledIC* c_ic = new CompiledIC(call_site);
assert(call_site->type() == relocInfo::virtual_call_type ||
call_site->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
CompiledIC* c_ic = new CompiledIC(call_site->code(), nativeCall_at(call_site->addr()));
c_ic->verify();
return c_ic;
}
@ -191,7 +274,7 @@ inline CompiledIC* CompiledIC_at(Relocation* call_site) {
//
// Clean: Calls directly to runtime method for fixup
// Compiled code: Calls directly to compiled code
// Interpreted code: Calls to stub that set methodOop reference
// Interpreted code: Calls to stub that set Method* reference
//
//
class CompiledStaticCall;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,6 +41,10 @@ void DebugInfoWriteStream::write_handle(jobject h) {
write_int(recorder()->oop_recorder()->find_index(h));
}
void DebugInfoWriteStream::write_metadata(Metadata* h) {
write_int(recorder()->oop_recorder()->find_index(h));
}
ScopeValue* DebugInfoReadStream::read_object_value() {
int id = read_int();
#ifdef ASSERT
@ -109,7 +113,7 @@ void LocationValue::print_on(outputStream* st) const {
void ObjectValue::read_object(DebugInfoReadStream* stream) {
_klass = read_from(stream);
assert(_klass->is_constant_oop(), "should be constant klass oop");
assert(_klass->is_constant_oop(), "should be constant java mirror oop");
int length = stream->read_int();
for (int i = 0; i < length; i++) {
ScopeValue* val = read_from(stream);
@ -198,6 +202,9 @@ void ConstantDoubleValue::print_on(outputStream* st) const {
// ConstantOopWriteValue
void ConstantOopWriteValue::write_on(DebugInfoWriteStream* stream) {
assert(JNIHandles::resolve(value()) == NULL ||
Universe::heap()->is_in_reserved(JNIHandles::resolve(value())),
"Should be in heap");
stream->write_int(CONSTANT_OOP_CODE);
stream->write_handle(value());
}
@ -211,6 +218,8 @@ void ConstantOopWriteValue::print_on(outputStream* st) const {
ConstantOopReadValue::ConstantOopReadValue(DebugInfoReadStream* stream) {
_value = Handle(stream->read_oop());
assert(_value() == NULL ||
Universe::heap()->is_in_reserved(_value()), "Should be in heap");
}
void ConstantOopReadValue::write_on(DebugInfoWriteStream* stream) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,6 +40,8 @@
// - LocationValue describes a value in a given location (in frame or register)
// - ConstantValue describes a constant
class ConstantOopReadValue;
class ScopeValue: public ResourceObj {
public:
// Testers
@ -51,6 +53,11 @@ class ScopeValue: public ResourceObj {
virtual bool is_constant_oop() const { return false; }
virtual bool equals(ScopeValue* other) const { return false; }
ConstantOopReadValue* as_ConstantOopReadValue() {
assert(is_constant_oop(), "must be");
return (ConstantOopReadValue*) this;
}
// Serialization of debugging information
virtual void write_on(DebugInfoWriteStream* stream) = 0;
static ScopeValue* read_from(DebugInfoReadStream* stream);
@ -94,7 +101,7 @@ class ObjectValue: public ScopeValue {
, _field_values()
, _value()
, _visited(false) {
assert(klass->is_constant_oop(), "should be constant klass oop");
assert(klass->is_constant_oop(), "should be constant java mirror oop");
}
ObjectValue(int id)
@ -260,7 +267,15 @@ class DebugInfoReadStream : public CompressedReadStream {
} ;
oop read_oop() {
return code()->oop_at(read_int());
oop o = code()->oop_at(read_int());
assert(o == NULL || o->is_oop(), "oop only");
return o;
}
Method* read_method() {
Method* o = (Method*)(code()->metadata_at(read_int()));
assert(o == NULL ||
o->is_metadata(), "meta data only");
return o;
}
ScopeValue* read_object_value();
ScopeValue* get_cached_object();
@ -279,6 +294,8 @@ class DebugInfoWriteStream : public CompressedWriteStream {
DebugInfoWriteStream(DebugInformationRecorder* recorder, int initial_size);
void write_handle(jobject h);
void write_bci(int bci) { write_int(bci - InvocationEntryBci); }
void write_metadata(Metadata* m);
};
#endif // SHARE_VM_CODE_DEBUGINFO_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -305,7 +305,7 @@ void DebugInformationRecorder::describe_scope(int pc_offset,
stream()->write_int(sender_stream_offset);
// serialize scope
jobject method_enc = (method == NULL)? NULL: method->constant_encoding();
Metadata* method_enc = (method == NULL)? NULL: method->constant_encoding();
stream()->write_int(oop_recorder()->find_index(method_enc));
stream()->write_bci(bci);
assert(method == NULL ||
@ -378,26 +378,36 @@ void DebugInformationRecorder::end_scopes(int pc_offset, bool is_safepoint) {
}
}
#ifdef ASSERT
bool DebugInformationRecorder::recorders_frozen() {
return _oop_recorder->is_complete() || _oop_recorder->is_complete();
}
void DebugInformationRecorder::mark_recorders_frozen() {
_oop_recorder->freeze();
}
#endif // PRODUCT
DebugToken* DebugInformationRecorder::create_scope_values(GrowableArray<ScopeValue*>* values) {
assert(!_oop_recorder->is_complete(), "not frozen yet");
assert(!recorders_frozen(), "not frozen yet");
return (DebugToken*) (intptr_t) serialize_scope_values(values);
}
DebugToken* DebugInformationRecorder::create_monitor_values(GrowableArray<MonitorValue*>* monitors) {
assert(!_oop_recorder->is_complete(), "not frozen yet");
assert(!recorders_frozen(), "not frozen yet");
return (DebugToken*) (intptr_t) serialize_monitor_values(monitors);
}
int DebugInformationRecorder::data_size() {
debug_only(_oop_recorder->oop_size()); // mark it "frozen" for asserts
debug_only(mark_recorders_frozen()); // mark it "frozen" for asserts
return _stream->position();
}
int DebugInformationRecorder::pcs_size() {
debug_only(_oop_recorder->oop_size()); // mark it "frozen" for asserts
debug_only(mark_recorders_frozen()); // mark it "frozen" for asserts
if (last_pc()->pc_offset() != PcDesc::upper_offset_limit)
add_new_pc_offset(PcDesc::upper_offset_limit);
return _pcs_length * sizeof(PcDesc);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -56,8 +56,8 @@
// NB: nodes from create_scope_values and create_locations
// can be reused for simple sharing.
// - mark the end of the scopes (end_safepoint or end_non_safepoint)
// 2) Use oop_size, data_size, pcs_size to create the nmethod and
// finally migrate the debugging information into the nmethod
// 2) Use oop_size, metadata_size, data_size, pcs_size to create the nmethod
// and finally migrate the debugging information into the nmethod
// by calling copy_to.
class DebugToken; // Opaque datatype for stored:
@ -123,6 +123,7 @@ class DebugInformationRecorder: public ResourceObj {
int data_size();
int pcs_size();
int oop_size() { return oop_recorder()->oop_size(); }
int metadata_size() { return oop_recorder()->metadata_size(); }
// copy the generated debugging information to nmethod
void copy_to(nmethod* nm);
@ -193,6 +194,11 @@ class DebugInformationRecorder: public ResourceObj {
int serialize_scope_values(GrowableArray<ScopeValue*>* values);
int find_sharable_decode_offset(int stream_offset);
#ifndef PRODUCT
bool recorders_frozen();
void mark_recorders_frozen();
#endif // PRODUCT
public:
enum { serialized_null = 0 };
};

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -203,7 +203,7 @@ class Dependencies: public ResourceObj {
private:
// State for writing a new set of dependencies:
GrowableArray<int>* _dep_seen; // (seen[h->ident] & (1<<dept))
GrowableArray<ciObject*>* _deps[TYPE_LIMIT];
GrowableArray<ciBaseObject*>* _deps[TYPE_LIMIT];
static const char* _dep_name[TYPE_LIMIT];
static int _dep_args[TYPE_LIMIT];
@ -212,7 +212,7 @@ class Dependencies: public ResourceObj {
return (int)dept >= 0 && dept < TYPE_LIMIT && ((1<<dept) & mask) != 0;
}
bool note_dep_seen(int dept, ciObject* x) {
bool note_dep_seen(int dept, ciBaseObject* x) {
assert(dept < BitsPerInt, "oob");
int x_id = x->ident();
assert(_dep_seen != NULL, "deps must be writable");
@ -222,7 +222,7 @@ class Dependencies: public ResourceObj {
return (seen & (1<<dept)) != 0;
}
bool maybe_merge_ctxk(GrowableArray<ciObject*>* deps,
bool maybe_merge_ctxk(GrowableArray<ciBaseObject*>* deps,
int ctxk_i, ciKlass* ctxk);
void sort_all_deps();
@ -260,9 +260,9 @@ class Dependencies: public ResourceObj {
assert(!is_concrete_klass(ctxk->as_instance_klass()), "must be abstract");
}
void assert_common_1(DepType dept, ciObject* x);
void assert_common_2(DepType dept, ciObject* x0, ciObject* x1);
void assert_common_3(DepType dept, ciKlass* ctxk, ciObject* x1, ciObject* x2);
void assert_common_1(DepType dept, ciBaseObject* x);
void assert_common_2(DepType dept, ciBaseObject* x0, ciBaseObject* x1);
void assert_common_3(DepType dept, ciKlass* ctxk, ciBaseObject* x1, ciBaseObject* x2);
public:
// Adding assertions to a new dependency set at compile time:
@ -286,8 +286,8 @@ class Dependencies: public ResourceObj {
// methods to remain non-concrete until their first invocation.
// In that case, there would be a middle ground between concrete
// and abstract (as defined by the Java language and VM).
static bool is_concrete_klass(klassOop k); // k is instantiable
static bool is_concrete_method(methodOop m); // m is invocable
static bool is_concrete_klass(Klass* k); // k is instantiable
static bool is_concrete_method(Method* m); // m is invocable
static Klass* find_finalizable_subclass(Klass* k);
// These versions of the concreteness queries work through the CI.
@ -314,24 +314,24 @@ class Dependencies: public ResourceObj {
// dependency on it must fail.
// Checking old assertions at run-time (in the VM only):
static klassOop check_evol_method(methodOop m);
static klassOop check_leaf_type(klassOop ctxk);
static klassOop check_abstract_with_unique_concrete_subtype(klassOop ctxk, klassOop conck,
static Klass* check_evol_method(Method* m);
static Klass* check_leaf_type(Klass* ctxk);
static Klass* check_abstract_with_unique_concrete_subtype(Klass* ctxk, Klass* conck,
KlassDepChange* changes = NULL);
static klassOop check_abstract_with_no_concrete_subtype(klassOop ctxk,
static Klass* check_abstract_with_no_concrete_subtype(Klass* ctxk,
KlassDepChange* changes = NULL);
static klassOop check_concrete_with_no_concrete_subtype(klassOop ctxk,
static Klass* check_concrete_with_no_concrete_subtype(Klass* ctxk,
KlassDepChange* changes = NULL);
static klassOop check_unique_concrete_method(klassOop ctxk, methodOop uniqm,
static Klass* check_unique_concrete_method(Klass* ctxk, Method* uniqm,
KlassDepChange* changes = NULL);
static klassOop check_abstract_with_exclusive_concrete_subtypes(klassOop ctxk, klassOop k1, klassOop k2,
static Klass* check_abstract_with_exclusive_concrete_subtypes(Klass* ctxk, Klass* k1, Klass* k2,
KlassDepChange* changes = NULL);
static klassOop check_exclusive_concrete_methods(klassOop ctxk, methodOop m1, methodOop m2,
static Klass* check_exclusive_concrete_methods(Klass* ctxk, Method* m1, Method* m2,
KlassDepChange* changes = NULL);
static klassOop check_has_no_finalizable_subclasses(klassOop ctxk, KlassDepChange* changes = NULL);
static klassOop check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes = NULL);
// A returned klassOop is NULL if the dependency assertion is still
// valid. A non-NULL klassOop is a 'witness' to the assertion
static Klass* check_has_no_finalizable_subclasses(Klass* ctxk, KlassDepChange* changes = NULL);
static Klass* check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes = NULL);
// A returned Klass* is NULL if the dependency assertion is still
// valid. A non-NULL Klass* is a 'witness' to the assertion
// failure, a point in the class hierarchy where the assertion has
// been proven false. For example, if check_leaf_type returns
// non-NULL, the value is a subtype of the supposed leaf type. This
@ -345,10 +345,10 @@ class Dependencies: public ResourceObj {
// It is used by DepStream::spot_check_dependency_at.
// Detecting possible new assertions:
static klassOop find_unique_concrete_subtype(klassOop ctxk);
static methodOop find_unique_concrete_method(klassOop ctxk, methodOop m);
static int find_exclusive_concrete_subtypes(klassOop ctxk, int klen, klassOop k[]);
static int find_exclusive_concrete_methods(klassOop ctxk, int mlen, methodOop m[]);
static Klass* find_unique_concrete_subtype(Klass* ctxk);
static Method* find_unique_concrete_method(Klass* ctxk, Method* m);
static int find_exclusive_concrete_subtypes(Klass* ctxk, int klen, Klass* k[]);
static int find_exclusive_concrete_methods(Klass* ctxk, int mlen, Method* m[]);
// Create the encoding which will be stored in an nmethod.
void encode_content_bytes();
@ -368,15 +368,15 @@ class Dependencies: public ResourceObj {
void copy_to(nmethod* nm);
void log_all_dependencies();
void log_dependency(DepType dept, int nargs, ciObject* args[]) {
void log_dependency(DepType dept, int nargs, ciBaseObject* args[]) {
write_dependency_to(log(), dept, nargs, args);
}
void log_dependency(DepType dept,
ciObject* x0,
ciObject* x1 = NULL,
ciObject* x2 = NULL) {
ciBaseObject* x0,
ciBaseObject* x1 = NULL,
ciBaseObject* x2 = NULL) {
if (log() == NULL) return;
ciObject* args[max_arg_count];
ciBaseObject* args[max_arg_count];
args[0] = x0;
args[1] = x1;
args[2] = x2;
@ -384,27 +384,47 @@ class Dependencies: public ResourceObj {
log_dependency(dept, dep_args(dept), args);
}
class DepArgument : public ResourceObj {
private:
bool _is_oop;
bool _valid;
void* _value;
public:
DepArgument() : _is_oop(false), _value(NULL), _valid(false) {}
DepArgument(oop v): _is_oop(true), _value(v), _valid(true) {}
DepArgument(Metadata* v): _is_oop(false), _value(v), _valid(true) {}
bool is_null() const { return _value == NULL; }
bool is_oop() const { return _is_oop; }
bool is_metadata() const { return !_is_oop; }
bool is_klass() const { return is_metadata() && metadata_value()->is_klass(); }
bool is_method() const { return is_metadata() && metadata_value()->is_method(); }
oop oop_value() const { assert(_is_oop && _valid, "must be"); return (oop) _value; }
Metadata* metadata_value() const { assert(!_is_oop && _valid, "must be"); return (Metadata*) _value; }
};
static void write_dependency_to(CompileLog* log,
DepType dept,
int nargs, ciObject* args[],
klassOop witness = NULL);
int nargs, ciBaseObject* args[],
Klass* witness = NULL);
static void write_dependency_to(CompileLog* log,
DepType dept,
int nargs, oop args[],
klassOop witness = NULL);
int nargs, DepArgument args[],
Klass* witness = NULL);
static void write_dependency_to(xmlStream* xtty,
DepType dept,
int nargs, oop args[],
klassOop witness = NULL);
int nargs, DepArgument args[],
Klass* witness = NULL);
static void print_dependency(DepType dept,
int nargs, oop args[],
klassOop witness = NULL);
int nargs, DepArgument args[],
Klass* witness = NULL);
private:
// helper for encoding common context types as zero:
static ciKlass* ctxk_encoded_as_null(DepType dept, ciObject* x);
static ciKlass* ctxk_encoded_as_null(DepType dept, ciBaseObject* x);
static klassOop ctxk_encoded_as_null(DepType dept, oop x);
static Klass* ctxk_encoded_as_null(DepType dept, Metadata* x);
public:
// Use this to iterate over an nmethod's dependency set.
@ -433,13 +453,13 @@ class Dependencies: public ResourceObj {
void initial_asserts(size_t byte_limit) NOT_DEBUG({});
inline Metadata* recorded_metadata_at(int i);
inline oop recorded_oop_at(int i);
// => _code? _code->oop_at(i): *_deps->_oop_recorder->handle_at(i)
klassOop check_klass_dependency(KlassDepChange* changes);
klassOop check_call_site_dependency(CallSiteDepChange* changes);
Klass* check_klass_dependency(KlassDepChange* changes);
Klass* check_call_site_dependency(CallSiteDepChange* changes);
void trace_and_log_witness(klassOop witness);
void trace_and_log_witness(Klass* witness);
public:
DepStream(Dependencies* deps)
@ -463,38 +483,39 @@ class Dependencies: public ResourceObj {
int argument_count() { return dep_args(type()); }
int argument_index(int i) { assert(0 <= i && i < argument_count(), "oob");
return _xi[i]; }
oop argument(int i); // => recorded_oop_at(argument_index(i))
klassOop context_type();
Metadata* argument(int i); // => recorded_oop_at(argument_index(i))
oop argument_oop(int i); // => recorded_oop_at(argument_index(i))
Klass* context_type();
bool is_klass_type() { return Dependencies::is_klass_type(type()); }
methodOop method_argument(int i) {
oop x = argument(i);
Method* method_argument(int i) {
Metadata* x = argument(i);
assert(x->is_method(), "type");
return (methodOop) x;
return (Method*) x;
}
klassOop type_argument(int i) {
oop x = argument(i);
Klass* type_argument(int i) {
Metadata* x = argument(i);
assert(x->is_klass(), "type");
return (klassOop) x;
return (Klass*) x;
}
// The point of the whole exercise: Is this dep still OK?
klassOop check_dependency() {
klassOop result = check_klass_dependency(NULL);
Klass* check_dependency() {
Klass* result = check_klass_dependency(NULL);
if (result != NULL) return result;
return check_call_site_dependency(NULL);
}
// A lighter version: Checks only around recent changes in a class
// hierarchy. (See Universe::flush_dependents_on.)
klassOop spot_check_dependency_at(DepChange& changes);
Klass* spot_check_dependency_at(DepChange& changes);
// Log the current dependency to xtty or compilation log.
void log_dependency(klassOop witness = NULL);
void log_dependency(Klass* witness = NULL);
// Print the current dependency to tty.
void print_dependency(klassOop witness = NULL, bool verbose = false);
void print_dependency(Klass* witness = NULL, bool verbose = false);
};
friend class Dependencies::DepStream;
@ -533,7 +554,7 @@ class DepChange : public StackObj {
// Usage:
// for (DepChange::ContextStream str(changes); str.next(); ) {
// klassOop k = str.klass();
// Klass* k = str.klass();
// switch (str.change_type()) {
// ...
// }
@ -545,8 +566,8 @@ class DepChange : public StackObj {
// iteration variables:
ChangeType _change_type;
klassOop _klass;
objArrayOop _ti_base; // i.e., transitive_interfaces
Klass* _klass;
Array<Klass*>* _ti_base; // i.e., transitive_interfaces
int _ti_index;
int _ti_limit;
@ -566,7 +587,7 @@ class DepChange : public StackObj {
bool next();
ChangeType change_type() { return _change_type; }
klassOop klass() { return _klass; }
Klass* klass() { return _klass; }
};
friend class DepChange::ContextStream;
};
@ -598,10 +619,10 @@ class KlassDepChange : public DepChange {
// What kind of DepChange is this?
virtual bool is_klass_change() const { return true; }
klassOop new_type() { return _new_type(); }
Klass* new_type() { return _new_type(); }
// involves_context(k) is true if k is new_type or any of the super types
bool involves_context(klassOop k);
bool involves_context(Klass* k);
};

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,7 @@
#define SHARE_VM_CODE_EXCEPTIONHANDLERTABLE_HPP
#include "memory/allocation.hpp"
#include "oops/methodOop.hpp"
#include "oops/method.hpp"
// A HandlerTableEntry describes an individual entry of a subtable
// of ExceptionHandlerTable. An entry consists of a pair(bci, pco),

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,7 +32,7 @@
#include "interpreter/linkResolver.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.inline.hpp"
#include "oops/methodOop.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#include "runtime/mutexLocker.hpp"
@ -59,16 +59,17 @@ DEF_STUB_INTERFACE(ICStub);
StubQueue* InlineCacheBuffer::_buffer = NULL;
ICStub* InlineCacheBuffer::_next_stub = NULL;
CompiledICHolder* InlineCacheBuffer::_pending_released = NULL;
int InlineCacheBuffer::_pending_count = 0;
void ICStub::finalize() {
if (!is_empty()) {
ResourceMark rm;
CompiledIC *ic = CompiledIC_at(ic_site());
CompiledIC *ic = CompiledIC_at(CodeCache::find_nmethod(ic_site()), ic_site());
assert(CodeCache::find_nmethod(ic->instruction_address()) != NULL, "inline cache in non-nmethod?");
assert(this == ICStub_from_destination_address(ic->stub_address()), "wrong owner of ic buffer");
ic->set_cached_oop(cached_oop());
ic->set_ic_destination(destination());
ic->set_ic_destination_and_value(destination(), cached_value());
}
}
@ -77,25 +78,28 @@ address ICStub::destination() const {
return InlineCacheBuffer::ic_buffer_entry_point(code_begin());
}
oop ICStub::cached_oop() const {
return InlineCacheBuffer::ic_buffer_cached_oop(code_begin());
void* ICStub::cached_value() const {
return InlineCacheBuffer::ic_buffer_cached_value(code_begin());
}
void ICStub::set_stub(CompiledIC *ic, oop cached_value, address dest_addr) {
void ICStub::set_stub(CompiledIC *ic, void* cached_val, address dest_addr) {
// We cannot store a pointer to the 'ic' object, since it is resource allocated. Instead we
// store the location of the inline cache. Then we have enough information recreate the CompiledIC
// object when we need to remove the stub.
_ic_site = ic->instruction_address();
// Assemble new stub
InlineCacheBuffer::assemble_ic_buffer_code(code_begin(), cached_value, dest_addr);
InlineCacheBuffer::assemble_ic_buffer_code(code_begin(), cached_val, dest_addr);
assert(destination() == dest_addr, "can recover destination");
assert(cached_oop() == cached_value, "can recover destination");
assert(cached_value() == cached_val, "can recover destination");
}
void ICStub::clear() {
if (CompiledIC::is_icholder_entry(destination())) {
InlineCacheBuffer::queue_for_release((CompiledICHolder*)cached_value());
}
_ic_site = NULL;
}
@ -161,6 +165,7 @@ void InlineCacheBuffer::update_inline_caches() {
buffer()->remove_all();
init_next_stub();
}
release_pending_icholders();
}
@ -179,11 +184,13 @@ void InlineCacheBuffer_init() {
}
void InlineCacheBuffer::create_transition_stub(CompiledIC *ic, oop cached_oop, address entry) {
void InlineCacheBuffer::create_transition_stub(CompiledIC *ic, void* cached_value, address entry) {
assert(!SafepointSynchronize::is_at_safepoint(), "should not be called during a safepoint");
assert (CompiledIC_lock->is_locked(), "");
assert(cached_oop == NULL || cached_oop->is_perm(), "must belong to perm. space");
if (TraceICBuffer) { tty->print_cr(" create transition stub for " INTPTR_FORMAT, ic->instruction_address()); }
if (TraceICBuffer) {
tty->print_cr(" create transition stub for " INTPTR_FORMAT " destination " INTPTR_FORMAT " cached value " INTPTR_FORMAT,
ic->instruction_address(), entry, cached_value);
}
// If an transition stub is already associate with the inline cache, then we remove the association.
if (ic->is_in_transition_state()) {
@ -193,10 +200,10 @@ void InlineCacheBuffer::create_transition_stub(CompiledIC *ic, oop cached_oop, a
// allocate and initialize new "out-of-line" inline-cache
ICStub* ic_stub = get_next_stub();
ic_stub->set_stub(ic, cached_oop, entry);
ic_stub->set_stub(ic, cached_value, entry);
// Update inline cache in nmethod to point to new "out-of-line" allocated inline cache
ic->set_ic_destination(ic_stub->code_begin());
ic->set_ic_destination(ic_stub);
set_next_stub(new_ic_stub()); // can cause safepoint synchronization
}
@ -208,7 +215,35 @@ address InlineCacheBuffer::ic_destination_for(CompiledIC *ic) {
}
oop InlineCacheBuffer::cached_oop_for(CompiledIC *ic) {
void* InlineCacheBuffer::cached_value_for(CompiledIC *ic) {
ICStub* stub = ICStub_from_destination_address(ic->stub_address());
return stub->cached_oop();
return stub->cached_value();
}
// Free CompiledICHolder*s that are no longer in use
void InlineCacheBuffer::release_pending_icholders() {
assert(SafepointSynchronize::is_at_safepoint(), "should only be called during a safepoint");
CompiledICHolder* holder = _pending_released;
_pending_released = NULL;
while (holder != NULL) {
CompiledICHolder* next = holder->next();
delete holder;
holder = next;
_pending_count--;
}
assert(_pending_count == 0, "wrong count");
}
// Enqueue this icholder for release during the next safepoint. It's
// not safe to free them until them since they might be visible to
// another thread.
void InlineCacheBuffer::queue_for_release(CompiledICHolder* icholder) {
MutexLockerEx mex(InlineCacheBuffer_lock);
icholder->set_next(_pending_released);
_pending_released = icholder;
_pending_count++;
if (TraceICBuffer) {
tty->print_cr("enqueueing icholder " INTPTR_FORMAT " to be freed", icholder);
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,7 +57,7 @@ class ICStub: public Stub {
public:
// Creation
void set_stub(CompiledIC *ic, oop cached_value, address dest_addr);
void set_stub(CompiledIC *ic, void* cached_value, address dest_addr);
// Code info
address code_begin() const { return (address)this + round_to(sizeof(ICStub), CodeEntryAlignment); }
@ -70,7 +70,7 @@ class ICStub: public Stub {
// stub info
address destination() const; // destination of jump instruction
oop cached_oop() const; // cached_oop for stub
void* cached_value() const; // cached_value for stub
// Debugging
void verify() PRODUCT_RETURN;
@ -99,6 +99,9 @@ class InlineCacheBuffer: public AllStatic {
static StubQueue* _buffer;
static ICStub* _next_stub;
static CompiledICHolder* _pending_released;
static int _pending_count;
static StubQueue* buffer() { return _buffer; }
static void set_next_stub(ICStub* next_stub) { _next_stub = next_stub; }
static ICStub* get_next_stub() { return _next_stub; }
@ -109,9 +112,9 @@ class InlineCacheBuffer: public AllStatic {
// Machine-dependent implementation of ICBuffer
static void assemble_ic_buffer_code(address code_begin, oop cached_oop, address entry_point);
static void assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point);
static address ic_buffer_entry_point (address code_begin);
static oop ic_buffer_cached_oop (address code_begin);
static void* ic_buffer_cached_value (address code_begin);
public:
@ -127,11 +130,14 @@ class InlineCacheBuffer: public AllStatic {
// for debugging
static bool is_empty();
static void release_pending_icholders();
static void queue_for_release(CompiledICHolder* icholder);
static int pending_icholder_count() { return _pending_count; }
// New interface
static void create_transition_stub(CompiledIC *ic, oop cached_oop, address entry);
static void create_transition_stub(CompiledIC *ic, void* cached_value, address entry);
static address ic_destination_for(CompiledIC *ic);
static oop cached_oop_for(CompiledIC *ic);
static void* cached_value_for(CompiledIC *ic);
};
#endif // SHARE_VM_CODE_ICBUFFER_HPP

View file

@ -34,7 +34,7 @@
#include "compiler/compilerOracle.hpp"
#include "compiler/disassembler.hpp"
#include "interpreter/bytecode.hpp"
#include "oops/methodDataOop.hpp"
#include "oops/methodData.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "prims/jvmtiImpl.hpp"
#include "runtime/sharedRuntime.hpp"
@ -59,7 +59,7 @@ HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,
#define DTRACE_METHOD_UNLOAD_PROBE(method) \
{ \
methodOop m = (method); \
Method* m = (method); \
if (m != NULL) { \
Symbol* klass_name = m->klass_name(); \
Symbol* name = m->name(); \
@ -73,7 +73,7 @@ HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,
#else /* USDT2 */
#define DTRACE_METHOD_UNLOAD_PROBE(method) \
{ \
methodOop m = (method); \
Method* m = (method); \
if (m != NULL) { \
Symbol* klass_name = m->klass_name(); \
Symbol* name = m->name(); \
@ -495,6 +495,7 @@ nmethod* nmethod::new_native_nmethod(methodHandle method,
ByteSize basic_lock_owner_sp_offset,
ByteSize basic_lock_sp_offset,
OopMapSet* oop_maps) {
code_buffer->finalize_oop_references(method);
// create nmethod
nmethod* nm = NULL;
{
@ -529,6 +530,7 @@ nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
int trap_offset,
int frame_complete,
int frame_size) {
code_buffer->finalize_oop_references(method);
// create nmethod
nmethod* nm = NULL;
{
@ -573,6 +575,7 @@ nmethod* nmethod::new_nmethod(methodHandle method,
)
{
assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
code_buffer->finalize_oop_references(method);
// create nmethod
nmethod* nm = NULL;
{ MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
@ -601,11 +604,11 @@ nmethod* nmethod::new_nmethod(methodHandle method,
// the number of methods compiled. For applications with a lot
// classes the slow way is too slow.
for (Dependencies::DepStream deps(nm); deps.next(); ) {
klassOop klass = deps.context_type();
Klass* klass = deps.context_type();
if (klass == NULL) continue; // ignore things like evol_method
// record this nmethod as dependent on this klass
instanceKlass::cast(klass)->add_dependent_nmethod(nm);
InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
}
}
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
@ -627,7 +630,7 @@ nmethod* nmethod::new_nmethod(methodHandle method,
// For native wrappers
nmethod::nmethod(
methodOop method,
Method* method,
int nmethod_size,
int compile_id,
CodeOffsets* offsets,
@ -658,7 +661,8 @@ nmethod::nmethod(
_consts_offset = data_offset();
_stub_offset = data_offset();
_oops_offset = data_offset();
_scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize);
_metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize);
_scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize);
_scopes_pcs_offset = _scopes_data_offset;
_dependencies_offset = _scopes_pcs_offset;
_handler_table_offset = _dependencies_offset;
@ -672,7 +676,7 @@ nmethod::nmethod(
_exception_cache = NULL;
_pc_desc_cache.reset_to(NULL);
code_buffer->copy_oops_to(this);
code_buffer->copy_values_to(this);
if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
CodeCache::add_scavenge_root_nmethod(this);
}
@ -710,7 +714,7 @@ nmethod::nmethod(
// For dtrace wrappers
#ifdef HAVE_DTRACE_H
nmethod::nmethod(
methodOop method,
Method* method,
int nmethod_size,
CodeOffsets* offsets,
CodeBuffer* code_buffer,
@ -738,7 +742,8 @@ nmethod::nmethod(
_consts_offset = data_offset();
_stub_offset = data_offset();
_oops_offset = data_offset();
_scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize);
_metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize);
_scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize);
_scopes_pcs_offset = _scopes_data_offset;
_dependencies_offset = _scopes_pcs_offset;
_handler_table_offset = _dependencies_offset;
@ -752,7 +757,7 @@ nmethod::nmethod(
_exception_cache = NULL;
_pc_desc_cache.reset_to(NULL);
code_buffer->copy_oops_to(this);
code_buffer->copy_values_to(this);
debug_only(verify_scavenge_root_oops());
CodeCache::commit(this);
}
@ -792,7 +797,7 @@ void* nmethod::operator new(size_t size, int nmethod_size) {
nmethod::nmethod(
methodOop method,
Method* method,
int nmethod_size,
int compile_id,
int entry_bci,
@ -847,7 +852,9 @@ nmethod::nmethod(
}
_oops_offset = data_offset();
_scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size (), oopSize);
_metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize);
_scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize);
_scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize);
_dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size());
_handler_table_offset = _dependencies_offset + round_to(dependencies->size_in_bytes (), oopSize);
@ -861,7 +868,7 @@ nmethod::nmethod(
_pc_desc_cache.reset_to(scopes_pcs_begin());
// Copy contents of ScopeDescRecorder to nmethod
code_buffer->copy_oops_to(this);
code_buffer->copy_values_to(this);
debug_info->copy_to(this);
dependencies->copy_to(this);
if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
@ -1003,10 +1010,10 @@ inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
}
void nmethod::copy_oops(GrowableArray<jobject>* array) {
//assert(oops_size() == 0, "do this handshake just once, please");
// Have to have the same name because it's called by a template
void nmethod::copy_values(GrowableArray<jobject>* array) {
int length = array->length();
assert((address)(oops_begin() + length) <= data_end(), "oops big enough");
assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
oop* dest = oops_begin();
for (int index = 0 ; index < length; index++) {
initialize_immediate_oop(&dest[index], array->at(index));
@ -1020,6 +1027,14 @@ void nmethod::copy_oops(GrowableArray<jobject>* array) {
fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
}
void nmethod::copy_values(GrowableArray<Metadata*>* array) {
int length = array->length();
assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
Metadata** dest = metadata_begin();
for (int index = 0 ; index < length; index++) {
dest[index] = array->at(index);
}
}
bool nmethod::is_at_poll_return(address pc) {
RelocIterator iter(this, pc, pc+1);
@ -1054,6 +1069,9 @@ void nmethod::fix_oop_relocations(address begin, address end, bool initialize_im
}
// Refresh the oop-related bits of this instruction.
reloc->fix_oop_relocation();
} else if (iter.type() == relocInfo::metadata_type) {
metadata_Relocation* reloc = iter.metadata_reloc();
reloc->fix_metadata_relocation();
}
// There must not be any interfering patches or breakpoints.
@ -1172,11 +1190,11 @@ bool nmethod::can_not_entrant_be_converted() {
void nmethod::inc_decompile_count() {
if (!is_compiled_by_c2()) return;
// Could be gated by ProfileTraps, but do not bother...
methodOop m = method();
Method* m = method();
if (m == NULL) return;
methodDataOop mdo = m->method_data();
MethodData* mdo = m->method_data();
if (mdo == NULL) return;
// There is a benign race here. See comments in methodDataOop.hpp.
// There is a benign race here. See comments in methodData.hpp.
mdo->inc_decompile_count();
}
@ -1195,7 +1213,7 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
// Break cycle between nmethod & method
if (TraceClassUnloading && WizardMode) {
tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT
" unloadable], methodOop(" INTPTR_FORMAT
" unloadable], Method*(" INTPTR_FORMAT
"), cause(" INTPTR_FORMAT ")",
this, (address)_method, (address)cause);
if (!Universe::heap()->is_gc_active())
@ -1205,12 +1223,12 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
if (is_osr_method()) {
invalidate_osr_method();
}
// If _method is already NULL the methodOop is about to be unloaded,
// If _method is already NULL the Method* is about to be unloaded,
// so we don't have to break the cycle. Note that it is possible to
// have the methodOop live here, in case we unload the nmethod because
// it is pointing to some oop (other than the methodOop) being unloaded.
// have the Method* live here, in case we unload the nmethod because
// it is pointing to some oop (other than the Method*) being unloaded.
if (_method != NULL) {
// OSR methods point to the methodOop, but the methodOop does not
// OSR methods point to the Method*, but the Method* does not
// point back!
if (_method->code() == this) {
_method->clear_code(); // Break a cycle
@ -1230,7 +1248,7 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
// Log the unloading.
log_state_change();
// The methodOop is gone at this point
// The Method* is gone at this point
assert(_method == NULL, "Tautology");
set_osr_link(NULL);
@ -1242,7 +1260,7 @@ void nmethod::invalidate_osr_method() {
assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
// Remove from list of active nmethods
if (method() != NULL)
instanceKlass::cast(method()->method_holder())->remove_osr_nmethod(this);
InstanceKlass::cast(method()->method_holder())->remove_osr_nmethod(this);
// Set entry as invalid
_entry_bci = InvalidOSREntryBci;
}
@ -1320,7 +1338,7 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
// Remove nmethod from method.
// We need to check if both the _code and _from_compiled_code_entry_point
// refer to this nmethod because there is a race in setting these two fields
// in methodOop as seen in bugid 4947125.
// in Method* as seen in bugid 4947125.
// If the vep() points to the zombie nmethod, the memory for the nmethod
// could be flushed and the compiler and vtable stubs could still call
// through it.
@ -1440,13 +1458,13 @@ void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
if (!has_flushed_dependencies()) {
set_has_flushed_dependencies();
for (Dependencies::DepStream deps(this); deps.next(); ) {
klassOop klass = deps.context_type();
Klass* klass = deps.context_type();
if (klass == NULL) continue; // ignore things like evol_method
// During GC the is_alive closure is non-NULL, and is used to
// determine liveness of dependees that need to be updated.
if (is_alive == NULL || is_alive->do_object_b(klass)) {
instanceKlass::cast(klass)->remove_dependent_nmethod(this);
if (is_alive == NULL || klass->is_loader_alive(is_alive)) {
InstanceKlass::cast(klass)->remove_dependent_nmethod(this);
}
}
}
@ -1462,16 +1480,7 @@ bool nmethod::can_unload(BoolObjectClosure* is_alive,
if (obj == NULL || is_alive->do_object_b(obj)) {
return false;
}
if (obj->is_compiledICHolder()) {
compiledICHolderOop cichk_oop = compiledICHolderOop(obj);
if (is_alive->do_object_b(
cichk_oop->holder_method()->method_holder()) &&
is_alive->do_object_b(cichk_oop->holder_klass())) {
// The oop should be kept alive
keep_alive->do_oop(root);
return false;
}
}
// If ScavengeRootsInCode is true, an nmethod might be unloaded
// simply because one of its constant oops has gone dead.
// No actual classes need to be unloaded in order for this to occur.
@ -1486,7 +1495,7 @@ bool nmethod::can_unload(BoolObjectClosure* is_alive,
// Transfer information from compilation to jvmti
void nmethod::post_compiled_method_load_event() {
methodOop moop = method();
Method* moop = method();
#ifndef USDT2
HS_DTRACE_PROBE8(hotspot, compiled__method__load,
moop->klass_name()->bytes(),
@ -1541,10 +1550,10 @@ void nmethod::post_compiled_method_unload() {
// If a JVMTI agent has enabled the CompiledMethodUnload event then
// post the event. Sometime later this nmethod will be made a zombie
// by the sweeper but the methodOop will not be valid at that point.
// by the sweeper but the Method* will not be valid at that point.
// If the _jmethod_id is null then no load event was ever requested
// so don't bother posting the unload. The main reason for this is
// that the jmethodID is a weak reference to the methodOop so if
// that the jmethodID is a weak reference to the Method* so if
// it's being unloaded there's no way to look it up since the weak
// ref will have been cleared.
if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
@ -1602,19 +1611,12 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive,
unloading_occurred = true;
}
// Follow methodOop
if (can_unload(is_alive, keep_alive, (oop*)&_method, unloading_occurred)) {
return;
}
// Exception cache
ExceptionCache* ec = exception_cache();
while (ec != NULL) {
oop* ex_addr = (oop*)ec->exception_type_addr();
oop ex = *ex_addr;
Klass* ex_klass = ec->exception_type();
ExceptionCache* next_ec = ec->next();
if (ex != NULL && !is_alive->do_object_b(ex)) {
assert(!ex->is_compiledICHolder(), "Possible error here");
if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
remove_from_exception_cache(ec);
}
ec = next_ec;
@ -1629,27 +1631,37 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive,
while(iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
CompiledIC *ic = CompiledIC_at(iter.reloc());
oop ic_oop = ic->cached_oop();
if (ic_oop != NULL && !is_alive->do_object_b(ic_oop)) {
if (ic->is_icholder_call()) {
// The only exception is compiledICHolder oops which may
// yet be marked below. (We check this further below).
if (ic_oop->is_compiledICHolder()) {
compiledICHolderOop cichk_oop = compiledICHolderOop(ic_oop);
if (is_alive->do_object_b(
cichk_oop->holder_method()->method_holder()) &&
is_alive->do_object_b(cichk_oop->holder_klass())) {
CompiledICHolder* cichk_oop = ic->cached_icholder();
if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
continue;
}
} else {
Metadata* ic_oop = ic->cached_metadata();
if (ic_oop != NULL) {
if (ic_oop->is_klass()) {
if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
continue;
}
} else if (ic_oop->is_method()) {
if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
continue;
}
} else {
ShouldNotReachHere();
}
}
}
ic->set_to_clean();
assert(ic->cached_oop() == NULL,
"cached oop in IC should be cleared");
}
}
}
}
// Compiled code
{
RelocIterator iter(this, low_boundary);
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
@ -1666,6 +1678,7 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive,
}
}
}
}
// Scopes
@ -1676,23 +1689,121 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive,
}
}
#ifndef PRODUCT
// This nmethod was not unloaded; check below that all CompiledICs
// refer to marked oops.
{
// Ensure that all metadata is still alive
verify_metadata_loaders(low_boundary, is_alive);
}
#ifdef ASSERT
class CheckClass : AllStatic {
static BoolObjectClosure* _is_alive;
// Check class_loader is alive for this bit of metadata.
static void check_class(Metadata* md) {
Klass* klass = NULL;
if (md->is_klass()) {
klass = ((Klass*)md);
} else if (md->is_method()) {
klass = ((Method*)md)->method_holder();
} else if (md->is_methodData()) {
klass = ((MethodData*)md)->method()->method_holder();
} else {
md->print();
ShouldNotReachHere();
}
assert(klass->is_loader_alive(_is_alive), "must be alive");
}
public:
static void do_check_class(BoolObjectClosure* is_alive, nmethod* nm) {
assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
_is_alive = is_alive;
nm->metadata_do(check_class);
}
};
// This is called during a safepoint so can use static data
BoolObjectClosure* CheckClass::_is_alive = NULL;
#endif // ASSERT
// Processing of oop references should have been sufficient to keep
// all strong references alive. Any weak references should have been
// cleared as well. Visit all the metadata and ensure that it's
// really alive.
void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
#ifdef ASSERT
RelocIterator iter(this, low_boundary);
while (iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
CompiledIC *ic = CompiledIC_at(iter.reloc());
oop ic_oop = ic->cached_oop();
assert(ic_oop == NULL || is_alive->do_object_b(ic_oop),
"Found unmarked ic_oop in reachable nmethod");
}
// static_stub_Relocations may have dangling references to
// Method*s so trim them out here. Otherwise it looks like
// compiled code is maintaining a link to dead metadata.
address static_call_addr = NULL;
if (iter.type() == relocInfo::opt_virtual_call_type) {
CompiledIC* cic = CompiledIC_at(iter.reloc());
if (!cic->is_call_to_interpreted()) {
static_call_addr = iter.addr();
}
} else if (iter.type() == relocInfo::static_call_type) {
CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
if (!csc->is_call_to_interpreted()) {
static_call_addr = iter.addr();
}
}
if (static_call_addr != NULL) {
RelocIterator sciter(this, low_boundary);
while (sciter.next()) {
if (sciter.type() == relocInfo::static_stub_type &&
sciter.static_stub_reloc()->static_call() == static_call_addr) {
sciter.static_stub_reloc()->clear_inline_cache();
}
}
}
}
#endif // !PRODUCT
// Check that the metadata embedded in the nmethod is alive
CheckClass::do_check_class(is_alive, this);
#endif
}
// Iterate over metadata calling this function. Used by RedefineClasses
void nmethod::metadata_do(void f(Metadata*)) {
address low_boundary = verified_entry_point();
if (is_not_entrant()) {
low_boundary += NativeJump::instruction_size;
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// (See comment above.)
}
{
// Visit all immediate references that are embedded in the instruction stream.
RelocIterator iter(this, low_boundary);
while (iter.next()) {
if (iter.type() == relocInfo::metadata_type ) {
metadata_Relocation* r = iter.metadata_reloc();
// In this lmetadata, we must only follow those metadatas directly embedded in
// the code. Other metadatas (oop_index>0) are seen as part of
// the metadata section below.
assert(1 == (r->metadata_is_immediate()) +
(r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
"metadata must be found in exactly one place");
if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
Metadata* md = r->metadata_value();
f(md);
}
}
}
}
// Visit the metadata section
for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
if (*p == Universe::non_oop_word() || *p == NULL) continue; // skip non-oops
Metadata* md = *p;
f(md);
}
// Call function Method*, not embedded in these other places.
if (_method != NULL) f(_method);
}
// This method is called twice during GC -- once while
// tracing the "active" nmethods on thread stacks during
// the (strong) marking phase, and then again when walking
@ -1719,17 +1830,6 @@ void nmethod::oops_do(OopClosure* f, bool do_strong_roots_only) {
// (See comment above.)
}
// Compiled code
f->do_oop((oop*) &_method);
if (!do_strong_roots_only) {
// weak roots processing phase -- update ExceptionCache oops
ExceptionCache* ec = exception_cache();
while(ec != NULL) {
f->do_oop((oop*)ec->exception_type_addr());
ec = ec->next();
}
} // Else strong roots phase -- skip oops in ExceptionCache
RelocIterator iter(this, low_boundary);
while (iter.next()) {
@ -2063,21 +2163,21 @@ bool nmethod::check_dependency_on(DepChange& changes) {
return found_check;
}
bool nmethod::is_evol_dependent_on(klassOop dependee) {
instanceKlass *dependee_ik = instanceKlass::cast(dependee);
objArrayOop dependee_methods = dependee_ik->methods();
bool nmethod::is_evol_dependent_on(Klass* dependee) {
InstanceKlass *dependee_ik = InstanceKlass::cast(dependee);
Array<Method*>* dependee_methods = dependee_ik->methods();
for (Dependencies::DepStream deps(this); deps.next(); ) {
if (deps.type() == Dependencies::evol_method) {
methodOop method = deps.method_argument(0);
Method* method = deps.method_argument(0);
for (int j = 0; j < dependee_methods->length(); j++) {
if ((methodOop) dependee_methods->obj_at(j) == method) {
if (dependee_methods->at(j) == method) {
// RC_TRACE macro has an embedded ResourceMark
RC_TRACE(0x01000000,
("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)",
_method->method_holder()->klass_part()->external_name(),
_method->method_holder()->external_name(),
_method->name()->as_C_string(),
_method->signature()->as_C_string(), compile_id(),
method->method_holder()->klass_part()->external_name(),
method->method_holder()->external_name(),
method->name()->as_C_string(),
method->signature()->as_C_string()));
if (TraceDependencies || LogCompilation)
@ -2091,11 +2191,11 @@ bool nmethod::is_evol_dependent_on(klassOop dependee) {
}
// Called from mark_for_deoptimization, when dependee is invalidated.
bool nmethod::is_dependent_on_method(methodOop dependee) {
bool nmethod::is_dependent_on_method(Method* dependee) {
for (Dependencies::DepStream deps(this); deps.next(); ) {
if (deps.type() != Dependencies::evol_method)
continue;
methodOop method = deps.method_argument(0);
Method* method = deps.method_argument(0);
if (method == dependee) return true;
}
return false;
@ -2234,7 +2334,7 @@ void nmethod::verify() {
// Make sure all the entry points are correctly aligned for patching.
NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
assert(method()->is_oop(), "must be valid");
// assert(method()->is_oop(), "must be valid");
ResourceMark rm;
@ -2274,11 +2374,11 @@ void nmethod::verify_interrupt_point(address call_site) {
if (CompiledIC_lock->owner() == cur ||
((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
SafepointSynchronize::is_at_safepoint())) {
ic = CompiledIC_at(call_site);
ic = CompiledIC_at(this, call_site);
CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
} else {
MutexLocker ml_verify (CompiledIC_lock);
ic = CompiledIC_at(call_site);
ic = CompiledIC_at(this, call_site);
}
PcDesc* pd = pc_desc_at(ic->end_of_call());
assert(pd != NULL, "PcDesc must exist");
@ -2413,6 +2513,10 @@ void nmethod::print() const {
oops_begin(),
oops_end(),
oops_size());
if (metadata_size () > 0) tty->print_cr(" metadata [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
metadata_begin(),
metadata_end(),
metadata_size());
if (scopes_data_size () > 0) tty->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
scopes_data_begin(),
scopes_data_end(),
@ -2462,10 +2566,10 @@ void nmethod::print_dependencies() {
tty->print_cr("Dependencies:");
for (Dependencies::DepStream deps(this); deps.next(); ) {
deps.print_dependency();
klassOop ctxk = deps.context_type();
Klass* ctxk = deps.context_type();
if (ctxk != NULL) {
Klass* k = Klass::cast(ctxk);
if (k->oop_is_instance() && ((instanceKlass*)k)->is_dependent_nmethod(this)) {
if (k->oop_is_instance() && ((InstanceKlass*)k)->is_dependent_nmethod(this)) {
tty->print_cr(" [nmethod<=klass]%s", k->external_name());
}
}
@ -2528,6 +2632,16 @@ const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
st.print(")");
return st.as_string();
}
case relocInfo::metadata_type: {
stringStream st;
metadata_Relocation* r = iter.metadata_reloc();
Metadata* obj = r->metadata_value();
st.print("metadata(");
if (obj == NULL) st.print("NULL");
else obj->print_value_on(&st);
st.print(")");
return st.as_string();
}
case relocInfo::virtual_call_type: return "virtual_call";
case relocInfo::opt_virtual_call_type: return "optimized virtual_call";
case relocInfo::static_call_type: return "static_call";
@ -2690,7 +2804,7 @@ void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin,
if (sd->bci() == SynchronizationEntryBCI) {
st->print(";*synchronization entry");
} else {
if (sd->method().is_null()) {
if (sd->method() == NULL) {
st->print("method is NULL");
} else if (sd->method()->is_native()) {
st->print("method is native");
@ -2731,7 +2845,7 @@ void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin,
for (;sd != NULL; sd = sd->sender()) {
st->move_to(column);
st->print("; -");
if (sd->method().is_null()) {
if (sd->method() == NULL) {
st->print("method is NULL");
} else {
sd->method()->print_short_name(st);

View file

@ -27,6 +27,7 @@
#include "code/codeBlob.hpp"
#include "code/pcDesc.hpp"
#include "oops/metadata.hpp"
// This class is used internally by nmethods, to cache
// exception/pc/handler information.
@ -35,7 +36,7 @@ class ExceptionCache : public CHeapObj<mtCode> {
friend class VMStructs;
private:
enum { cache_size = 16 };
klassOop _exception_type;
Klass* _exception_type;
address _pc[cache_size];
address _handler[cache_size];
int _count;
@ -52,8 +53,7 @@ class ExceptionCache : public CHeapObj<mtCode> {
ExceptionCache(Handle exception, address pc, address handler);
klassOop exception_type() { return _exception_type; }
klassOop* exception_type_addr() { return &_exception_type; }
Klass* exception_type() { return _exception_type; }
ExceptionCache* next() { return _next; }
void set_next(ExceptionCache *ec) { _next = ec; }
@ -112,12 +112,12 @@ class nmethod : public CodeBlob {
friend class CodeCache; // scavengable oops
private:
// Shared fields for all nmethod's
methodOop _method;
Method* _method;
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
jmethodID _jmethod_id; // Cache of method()->jmethod_id()
// To support simple linked-list chaining of nmethods:
nmethod* _osr_link; // from instanceKlass::osr_nmethods_head
nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
nmethod* _saved_nmethod_link; // from CodeCache::speculatively_disconnect
@ -148,6 +148,7 @@ class nmethod : public CodeBlob {
int _consts_offset;
int _stub_offset;
int _oops_offset; // offset to where embedded oop table begins (inside data)
int _metadata_offset; // embedded meta data table
int _scopes_data_offset;
int _scopes_pcs_offset;
int _dependencies_offset;
@ -226,7 +227,7 @@ class nmethod : public CodeBlob {
friend class nmethodLocker;
// For native wrappers
nmethod(methodOop method,
nmethod(Method* method,
int nmethod_size,
int compile_id,
CodeOffsets* offsets,
@ -238,7 +239,7 @@ class nmethod : public CodeBlob {
#ifdef HAVE_DTRACE_H
// For native wrappers
nmethod(methodOop method,
nmethod(Method* method,
int nmethod_size,
CodeOffsets* offsets,
CodeBuffer *code_buffer,
@ -246,7 +247,7 @@ class nmethod : public CodeBlob {
#endif // def HAVE_DTRACE_H
// Creation support
nmethod(methodOop method,
nmethod(Method* method,
int nmethod_size,
int compile_id,
int entry_bci,
@ -325,7 +326,7 @@ class nmethod : public CodeBlob {
#endif // def HAVE_DTRACE_H
// accessors
methodOop method() const { return _method; }
Method* method() const { return _method; }
AbstractCompiler* compiler() const { return _compiler; }
// type info
@ -350,7 +351,10 @@ class nmethod : public CodeBlob {
address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; }
address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
oop* oops_end () const { return (oop*) (header_begin() + _scopes_data_offset) ; }
oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; }
Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; }
Metadata** metadata_end () const { return (Metadata**) (header_begin() + _scopes_data_offset) ; }
address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
@ -368,6 +372,7 @@ class nmethod : public CodeBlob {
int insts_size () const { return insts_end () - insts_begin (); }
int stub_size () const { return stub_end () - stub_begin (); }
int oops_size () const { return (address) oops_end () - (address) oops_begin (); }
int metadata_size () const { return (address) metadata_end () - (address) metadata_begin (); }
int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); }
int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
@ -381,6 +386,7 @@ class nmethod : public CodeBlob {
bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
@ -448,7 +454,17 @@ class nmethod : public CodeBlob {
return &oops_begin()[index - 1];
}
void copy_oops(GrowableArray<jobject>* oops);
// Support for meta data in scopes and relocs:
// Note: index 0 is reserved for null.
Metadata* metadata_at(int index) const { return index == 0 ? NULL: *metadata_addr_at(index); }
Metadata** metadata_addr_at(int index) const { // for GC
// relocation indexes are biased by 1 (because 0 is reserved)
assert(index > 0 && index <= metadata_size(), "must be a valid non-zero index");
return &metadata_begin()[index - 1];
}
void copy_values(GrowableArray<jobject>* oops);
void copy_values(GrowableArray<Metadata*>* metadata);
// Relocation support
private:
@ -516,6 +532,9 @@ public:
return (addr >= code_begin() && addr < verified_entry_point());
}
// Check that all metadata is still alive
void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
// unlink and deallocate this nmethod
// Only NMethodSweeper class is expected to use this. NMethodSweeper is not
// expected to use any other private methods/data in this class.
@ -533,8 +552,8 @@ public:
void mark_as_seen_on_stack();
bool can_not_entrant_be_converted();
// Evolution support. We make old (discarded) compiled methods point to new methodOops.
void set_method(methodOop method) { _method = method; }
// Evolution support. We make old (discarded) compiled methods point to new Method*s.
void set_method(Method* method) { _method = method; }
// GC support
void do_unloading(BoolObjectClosure* is_alive, OopClosure* keep_alive,
@ -663,12 +682,12 @@ public:
// Evolution support. Tells if this compiled method is dependent on any of
// methods m() of class dependee, such that if m() in dependee is replaced,
// this compiled method will have to be deoptimized.
bool is_evol_dependent_on(klassOop dependee);
bool is_evol_dependent_on(Klass* dependee);
// Fast breakpoint support. Tells if this compiled method is
// dependent on the given method. Returns true if this nmethod
// corresponds to the given method as well.
bool is_dependent_on_method(methodOop dependee);
bool is_dependent_on_method(Method* dependee);
// is it ok to patch at address?
bool is_patchable_at(address instr_address);
@ -686,6 +705,12 @@ public:
static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); }
static int entry_bci_offset() { return offset_of(nmethod, _entry_bci); }
// RedefineClasses support. Mark metadata in nmethods as on_stack so that
// redefine classes doesn't purge it.
static void mark_on_stack(nmethod* nm) {
nm->metadata_do(Metadata::mark_on_stack);
}
void metadata_do(void f(Metadata*));
};
// Locks an nmethod so its code will not get removed and it will not

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,68 +23,65 @@
*/
#include "precompiled.hpp"
#include "ci/ciEnv.hpp"
#include "ci/ciInstance.hpp"
#include "ci/ciMetadata.hpp"
#include "code/oopRecorder.hpp"
#include "memory/allocation.inline.hpp"
#include "oops/oop.inline.hpp"
#ifdef ASSERT
int OopRecorder::_find_index_calls = 0;
int OopRecorder::_hit_indexes = 0;
int OopRecorder::_missed_indexes = 0;
template <class T> int ValueRecorder<T>::_find_index_calls = 0;
template <class T> int ValueRecorder<T>::_hit_indexes = 0;
template <class T> int ValueRecorder<T>::_missed_indexes = 0;
#endif //ASSERT
OopRecorder::OopRecorder(Arena* arena) {
template <class T> ValueRecorder<T>::ValueRecorder(Arena* arena) {
_handles = NULL;
_indexes = NULL;
_arena = arena;
_complete = false;
}
OopRecorder::IndexCache::IndexCache() {
template <class T> template <class X> ValueRecorder<T>::IndexCache<X>::IndexCache() {
assert(first_index > 0, "initial zero state of cache must be invalid index");
Copy::zero_to_bytes(&_cache[0], sizeof(_cache));
}
int OopRecorder::oop_size() {
template <class T> int ValueRecorder<T>::size() {
_complete = true;
if (_handles == NULL) return 0;
return _handles->length() * sizeof(oop);
return _handles->length() * sizeof(T);
}
void OopRecorder::copy_to(nmethod* nm) {
template <class T> void ValueRecorder<T>::copy_values_to(nmethod* nm) {
assert(_complete, "must be frozen");
maybe_initialize(); // get non-null handles, even if we have no oops
nm->copy_oops(_handles);
nm->copy_values(_handles);
}
void OopRecorder::maybe_initialize() {
template <class T> void ValueRecorder<T>::maybe_initialize() {
if (_handles == NULL) {
if (_arena != NULL) {
_handles = new(_arena) GrowableArray<jobject>(_arena, 10, 0, 0);
_handles = new(_arena) GrowableArray<T>(_arena, 10, 0, 0);
_no_finds = new(_arena) GrowableArray<int>( _arena, 10, 0, 0);
} else {
_handles = new GrowableArray<jobject>(10, 0, 0);
_handles = new GrowableArray<T>(10, 0, 0);
_no_finds = new GrowableArray<int>( 10, 0, 0);
}
}
}
jobject OopRecorder::handle_at(int index) {
template <class T> T ValueRecorder<T>::at(int index) {
// there is always a NULL virtually present as first object
if (index == null_index) return NULL;
return _handles->at(index - first_index);
}
// Local definition. Used only in this module.
inline bool OopRecorder::is_real_jobject(jobject h) {
return h != NULL && h != (jobject)Universe::non_oop_word();
}
int OopRecorder::add_handle(jobject h, bool make_findable) {
template <class T> int ValueRecorder<T>::add_handle(T h, bool make_findable) {
assert(!_complete, "cannot allocate more elements after size query");
maybe_initialize();
// indexing uses 1 as an origin--0 means null
@ -92,14 +89,14 @@ int OopRecorder::add_handle(jobject h, bool make_findable) {
_handles->append(h);
// Support correct operation of find_index().
assert(!(make_findable && !is_real_jobject(h)), "nulls are not findable");
assert(!(make_findable && !is_real(h)), "nulls are not findable");
if (make_findable) {
// This index may be returned from find_index().
if (_indexes != NULL) {
int* cloc = _indexes->cache_location(h);
_indexes->set_cache_location_index(cloc, index);
} else if (index == index_cache_threshold && _arena != NULL) {
_indexes = new(_arena) IndexCache();
_indexes = new(_arena) IndexCache<T>();
for (int i = 0; i < _handles->length(); i++) {
// Load the cache with pre-existing elements.
int index0 = i + first_index;
@ -108,10 +105,10 @@ int OopRecorder::add_handle(jobject h, bool make_findable) {
_indexes->set_cache_location_index(cloc, index0);
}
}
} else if (is_real_jobject(h)) {
} else if (is_real(h)) {
// Remember that this index is not to be returned from find_index().
// This case is rare, because most or all uses of allocate_index pass
// a jobject argument of NULL or Universe::non_oop_word.
// an argument of NULL or Universe::non_oop_word.
// Thus, the expected length of _no_finds is zero.
_no_finds->append(index);
}
@ -120,12 +117,12 @@ int OopRecorder::add_handle(jobject h, bool make_findable) {
}
int OopRecorder::maybe_find_index(jobject h) {
template <class T> int ValueRecorder<T>::maybe_find_index(T h) {
debug_only(_find_index_calls++);
assert(!_complete, "cannot allocate more elements after size query");
maybe_initialize();
if (h == NULL) return null_index;
assert(is_real_jobject(h), "must be valid jobject");
assert(is_real(h), "must be valid");
int* cloc = (_indexes == NULL)? NULL: _indexes->cache_location(h);
if (cloc != NULL) {
int cindex = _indexes->cache_location_index(cloc);
@ -156,3 +153,7 @@ int OopRecorder::maybe_find_index(jobject h) {
}
return -1;
}
// Explicitly instantiate these types
template class ValueRecorder<Metadata*>;
template class ValueRecorder<jobject>;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,14 +25,15 @@
#ifndef SHARE_VM_CODE_OOPRECORDER_HPP
#define SHARE_VM_CODE_OOPRECORDER_HPP
#include "memory/universe.hpp"
#include "runtime/handles.hpp"
#include "utilities/growableArray.hpp"
// Recording and retrieval of oop relocations in compiled code.
// Recording and retrieval of either oop relocations or metadata in compiled code.
class CodeBlob;
class OopRecorder : public ResourceObj {
template <class T> class ValueRecorder : public StackObj {
public:
// A two-way mapping from positive indexes to oop handles.
// The zero index is reserved for a constant (sharable) null.
@ -40,20 +41,21 @@ class OopRecorder : public ResourceObj {
// Use the given arena to manage storage, if not NULL.
// By default, uses the current ResourceArea.
OopRecorder(Arena* arena = NULL);
ValueRecorder(Arena* arena = NULL);
// Generate a new index on which CodeBlob::oop_addr_at will work.
// Generate a new index on which nmethod::oop_addr_at will work.
// allocate_index and find_index never return the same index,
// and allocate_index never returns the same index twice.
// In fact, two successive calls to allocate_index return successive ints.
int allocate_index(jobject h) {
int allocate_index(T h) {
return add_handle(h, false);
}
// For a given jobject, this will return the same index repeatedly.
// The index can later be given to oop_at to retrieve the oop.
// However, the oop must not be changed via CodeBlob::oop_addr_at.
int find_index(jobject h) {
// For a given jobject or Metadata*, this will return the same index
// repeatedly. The index can later be given to nmethod::oop_at or
// metadata_at to retrieve the oop.
// However, the oop must not be changed via nmethod::oop_addr_at.
int find_index(T h) {
int index = maybe_find_index(h);
if (index < 0) { // previously unallocated
index = add_handle(h, true);
@ -61,23 +63,26 @@ class OopRecorder : public ResourceObj {
return index;
}
// variant of find_index which does not allocate if not found (yields -1)
int maybe_find_index(jobject h);
// returns the size of the generated oop/metadata table, for sizing the
// CodeBlob. Must be called after all oops are allocated!
int size();
// returns the size of the generated oop table, for sizing the CodeBlob.
// must be called after all oops are allocated!
int oop_size();
// Retrieve the value at a given index.
T at(int index);
// Retrieve the oop handle at a given index.
jobject handle_at(int index);
int element_count() {
int count() {
if (_handles == NULL) return 0;
// there is always a NULL virtually present as first object
return _handles->length() + first_index;
}
// copy the generated oop table to nmethod
void copy_to(nmethod* nm); // => nm->copy_oops(_handles)
// Helper function; returns false for NULL or Universe::non_oop_word().
bool is_real(T h) {
return h != NULL && h != (T)Universe::non_oop_word();
}
// copy the generated table to nmethod
void copy_values_to(nmethod* nm);
bool is_unused() { return _handles == NULL && !_complete; }
#ifdef ASSERT
@ -85,10 +90,13 @@ class OopRecorder : public ResourceObj {
#endif
private:
// variant of find_index which does not allocate if not found (yields -1)
int maybe_find_index(T h);
// leaky hash table of handle => index, to help detect duplicate insertion
class IndexCache: public ResourceObj {
// This class is only used by the OopRecorder class.
friend class OopRecorder;
template <class X> class IndexCache : public ResourceObj {
// This class is only used by the ValueRecorder class.
friend class ValueRecorder;
enum {
_log_cache_size = 9,
_cache_size = (1<<_log_cache_size),
@ -98,13 +106,13 @@ class OopRecorder : public ResourceObj {
_index_shift = _collision_bit_shift+1
};
int _cache[_cache_size];
static juint cache_index(jobject handle) {
static juint cache_index(X handle) {
juint ci = (int) (intptr_t) handle;
ci ^= ci >> (BitsPerByte*2);
ci += ci >> (BitsPerByte*1);
return ci & (_cache_size-1);
}
int* cache_location(jobject handle) {
int* cache_location(X handle) {
return &_cache[ cache_index(handle) ];
}
static bool cache_location_collision(int* cloc) {
@ -122,17 +130,14 @@ class OopRecorder : public ResourceObj {
IndexCache();
};
// Helper function; returns false for NULL or Universe::non_oop_word().
inline bool is_real_jobject(jobject h);
void maybe_initialize();
int add_handle(jobject h, bool make_findable);
int add_handle(T h, bool make_findable);
enum { null_index = 0, first_index = 1, index_cache_threshold = 20 };
GrowableArray<jobject>* _handles; // ordered list (first is always NULL)
GrowableArray<T>* _handles; // ordered list (first is always NULL)
GrowableArray<int>* _no_finds; // all unfindable indexes; usually empty
IndexCache* _indexes; // map: jobject -> its probable index
IndexCache<T>* _indexes; // map: handle -> its probable index
Arena* _arena;
bool _complete;
@ -141,4 +146,76 @@ class OopRecorder : public ResourceObj {
#endif
};
class OopRecorder : public ResourceObj {
private:
ValueRecorder<jobject> _oops;
ValueRecorder<Metadata*> _metadata;
public:
OopRecorder(Arena* arena = NULL): _oops(arena), _metadata(arena) {}
int allocate_oop_index(jobject h) {
return _oops.allocate_index(h);
}
int find_index(jobject h) {
return _oops.find_index(h);
}
jobject oop_at(int index) {
return _oops.at(index);
}
int oop_size() {
return _oops.size();
}
int oop_count() {
return _oops.count();
}
bool is_real(jobject h) {
return _oops.is_real(h);
}
int allocate_metadata_index(Metadata* oop) {
return _metadata.allocate_index(oop);
}
int find_index(Metadata* h) {
return _metadata.find_index(h);
}
Metadata* metadata_at(int index) {
return _metadata.at(index);
}
int metadata_size() {
return _metadata.size();
}
int metadata_count() {
return _metadata.count();
}
bool is_real(Metadata* h) {
return _metadata.is_real(h);
}
bool is_unused() {
return _oops.is_unused() && _metadata.is_unused();
}
void freeze() {
_oops.size();
_metadata.size();
}
void copy_values_to(nmethod* nm) {
if (!_oops.is_unused()) {
_oops.copy_values_to(nm);
}
if (!_metadata.is_unused()) {
_metadata.copy_values_to(nm);
}
}
#ifdef ASSERT
bool is_complete() {
assert(_oops.is_complete() == _metadata.is_complete(), "must agree");
return _oops.is_complete();
}
#endif
};
#endif // SHARE_VM_CODE_OOPRECORDER_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -439,6 +439,11 @@ RelocationHolder RelocationHolder::plus(int offset) const {
oop_Relocation* r = (oop_Relocation*)reloc();
return oop_Relocation::spec(r->oop_index(), r->offset() + offset);
}
case relocInfo::metadata_type:
{
metadata_Relocation* r = (metadata_Relocation*)reloc();
return metadata_Relocation::spec(r->metadata_index(), r->offset() + offset);
}
default:
ShouldNotReachHere();
}
@ -578,33 +583,33 @@ void oop_Relocation::unpack_data() {
unpack_2_ints(_oop_index, _offset);
}
void metadata_Relocation::pack_data_to(CodeSection* dest) {
short* p = (short*) dest->locs_end();
p = pack_2_ints_to(p, _metadata_index, _offset);
dest->set_locs_end((relocInfo*) p);
}
void metadata_Relocation::unpack_data() {
unpack_2_ints(_metadata_index, _offset);
}
void virtual_call_Relocation::pack_data_to(CodeSection* dest) {
short* p = (short*) dest->locs_end();
address point = dest->locs_point();
// Try to make a pointer NULL first.
if (_oop_limit >= point &&
_oop_limit <= point + NativeCall::instruction_size) {
_oop_limit = NULL;
}
// If the _oop_limit is NULL, it "defaults" to the end of the call.
// See ic_call_Relocation::oop_limit() below.
normalize_address(_first_oop, dest);
normalize_address(_oop_limit, dest);
jint x0 = scaled_offset_null_special(_first_oop, point);
jint x1 = scaled_offset_null_special(_oop_limit, point);
p = pack_2_ints_to(p, x0, x1);
normalize_address(_cached_value, dest);
jint x0 = scaled_offset_null_special(_cached_value, point);
p = pack_1_int_to(p, x0);
dest->set_locs_end((relocInfo*) p);
}
void virtual_call_Relocation::unpack_data() {
jint x0, x1; unpack_2_ints(x0, x1);
jint x0 = unpack_1_int();
address point = addr();
_first_oop = x0==0? NULL: address_from_scaled_offset(x0, point);
_oop_limit = x1==0? NULL: address_from_scaled_offset(x1, point);
_cached_value = x0==0? NULL: address_from_scaled_offset(x0, point);
}
@ -799,100 +804,48 @@ void oop_Relocation::verify_oop_relocation() {
}
}
RelocIterator virtual_call_Relocation::parse_ic(nmethod* &nm, address &ic_call, address &first_oop,
oop* &oop_addr, bool *is_optimized) {
assert(ic_call != NULL, "ic_call address must be set");
assert(ic_call != NULL || first_oop != NULL, "must supply a non-null input");
if (nm == NULL) {
CodeBlob* code;
if (ic_call != NULL) {
code = CodeCache::find_blob(ic_call);
} else if (first_oop != NULL) {
code = CodeCache::find_blob(first_oop);
}
nm = code->as_nmethod_or_null();
assert(nm != NULL, "address to parse must be in nmethod");
}
assert(ic_call == NULL || nm->contains(ic_call), "must be in nmethod");
assert(first_oop == NULL || nm->contains(first_oop), "must be in nmethod");
address oop_limit = NULL;
if (ic_call != NULL) {
// search for the ic_call at the given address
RelocIterator iter(nm, ic_call, ic_call+1);
bool ret = iter.next();
assert(ret == true, "relocInfo must exist at this address");
assert(iter.addr() == ic_call, "must find ic_call");
if (iter.type() == relocInfo::virtual_call_type) {
virtual_call_Relocation* r = iter.virtual_call_reloc();
first_oop = r->first_oop();
oop_limit = r->oop_limit();
*is_optimized = false;
// meta data versions
Metadata** metadata_Relocation::metadata_addr() {
int n = _metadata_index;
if (n == 0) {
// metadata is stored in the code stream
return (Metadata**) pd_address_in_code();
} else {
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
*is_optimized = true;
oop_addr = NULL;
first_oop = NULL;
return iter;
// metadata is stored in table at nmethod::metadatas_begin
return code()->metadata_addr_at(n);
}
}
// search for the first_oop, to get its oop_addr
RelocIterator all_oops(nm, first_oop);
RelocIterator iter = all_oops;
iter.set_limit(first_oop+1);
bool found_oop = false;
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
assert(iter.addr() == first_oop, "must find first_oop");
oop_addr = iter.oop_reloc()->oop_addr();
found_oop = true;
break;
}
}
assert(found_oop, "must find first_oop");
bool did_reset = false;
while (ic_call == NULL) {
// search forward for the ic_call matching the given first_oop
while (iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
virtual_call_Relocation* r = iter.virtual_call_reloc();
if (r->first_oop() == first_oop) {
ic_call = r->addr();
oop_limit = r->oop_limit();
break;
}
}
}
guarantee(!did_reset, "cannot find ic_call");
iter = RelocIterator(nm); // search the whole nmethod
did_reset = true;
Metadata* metadata_Relocation::metadata_value() {
Metadata* v = *metadata_addr();
// clean inline caches store a special pseudo-null
if (v == (Metadata*)Universe::non_oop_word()) v = NULL;
return v;
}
assert(oop_limit != NULL && first_oop != NULL && ic_call != NULL, "");
all_oops.set_limit(oop_limit);
return all_oops;
void metadata_Relocation::fix_metadata_relocation() {
if (!metadata_is_immediate()) {
// get the metadata from the pool, and re-insert it into the instruction:
pd_fix_value(value());
}
}
address virtual_call_Relocation::first_oop() {
assert(_first_oop != NULL && _first_oop < addr(), "must precede ic_call");
return _first_oop;
void metadata_Relocation::verify_metadata_relocation() {
if (!metadata_is_immediate()) {
// get the metadata from the pool, and re-insert it into the instruction:
verify_value(value());
}
}
address virtual_call_Relocation::oop_limit() {
if (_oop_limit == NULL)
return addr() + NativeCall::instruction_size;
else
return _oop_limit;
address virtual_call_Relocation::cached_value() {
assert(_cached_value != NULL && _cached_value < addr(), "must precede ic_call");
return _cached_value;
}
void virtual_call_Relocation::clear_inline_cache() {
// No stubs for ICs
// Clean IC
@ -1139,6 +1092,25 @@ void RelocIterator::print_current() {
}
break;
}
case relocInfo::metadata_type:
{
metadata_Relocation* r = metadata_reloc();
Metadata** metadata_addr = NULL;
Metadata* raw_metadata = NULL;
Metadata* metadata_value = NULL;
if (code() != NULL || r->metadata_is_immediate()) {
metadata_addr = r->metadata_addr();
raw_metadata = *metadata_addr;
metadata_value = r->metadata_value();
}
tty->print(" | [metadata_addr=" INTPTR_FORMAT " *=" INTPTR_FORMAT " offset=%d]",
metadata_addr, (address)raw_metadata, r->offset());
if (metadata_value != NULL) {
tty->print("metadata_value=" INTPTR_FORMAT ": ", (address)metadata_value);
metadata_value->print_value_on(tty);
}
break;
}
case relocInfo::external_word_type:
case relocInfo::internal_word_type:
case relocInfo::section_word_type:
@ -1157,8 +1129,8 @@ void RelocIterator::print_current() {
case relocInfo::virtual_call_type:
{
virtual_call_Relocation* r = (virtual_call_Relocation*) reloc();
tty->print(" | [destination=" INTPTR_FORMAT " first_oop=" INTPTR_FORMAT " oop_limit=" INTPTR_FORMAT "]",
r->destination(), r->first_oop(), r->oop_limit());
tty->print(" | [destination=" INTPTR_FORMAT " cached_value=" INTPTR_FORMAT "]",
r->destination(), r->cached_value());
break;
}
case relocInfo::static_stub_type:

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,8 @@
#include "memory/allocation.hpp"
#include "utilities/top.hpp"
class NativeMovConstReg;
// Types in this file:
// relocInfo
// One element of an array of halfwords encoding compressed relocations.
@ -35,8 +37,11 @@
// Relocation
// A flyweight object representing a single relocation.
// It is fully unpacked from the compressed relocation array.
// metadata_Relocation, ... (subclasses of Relocation)
// The location of some type-specific operations (metadata_addr, ...).
// Also, the source of relocation specs (metadata_Relocation::spec, ...).
// oop_Relocation, ... (subclasses of Relocation)
// The location of some type-specific operations (oop_addr, ...).
// oops in the code stream (strings, class loaders)
// Also, the source of relocation specs (oop_Relocation::spec, ...).
// RelocationHolder
// A ValueObj type which acts as a union holding a Relocation object.
@ -118,7 +123,7 @@
// (This means that any relocInfo can be disabled by setting
// its type to none. See relocInfo::remove.)
//
// relocInfo::oop_type -- a reference to an oop
// relocInfo::oop_type, relocInfo::metadata_type -- a reference to an oop or meta data
// Value: an oop, or else the address (handle) of an oop
// Instruction types: memory (load), set (load address)
// Data: [] an oop stored in 4 bytes of instruction
@ -267,7 +272,7 @@ class relocInfo VALUE_OBJ_CLASS_SPEC {
poll_type = 10, // polling instruction for safepoints
poll_return_type = 11, // polling instruction for safepoints at return
breakpoint_type = 12, // an initialization barrier or safepoint
yet_unused_type = 13, // Still unused
metadata_type = 13, // metadata that used to be oops
yet_unused_type_2 = 14, // Still unused
data_prefix_tag = 15, // tag for a prefix (carries data arguments)
type_mask = 15 // A mask which selects only the above values
@ -297,6 +302,7 @@ class relocInfo VALUE_OBJ_CLASS_SPEC {
#define APPLY_TO_RELOCATIONS(visitor) \
visitor(oop) \
visitor(metadata) \
visitor(virtual_call) \
visitor(opt_virtual_call) \
visitor(static_call) \
@ -972,35 +978,94 @@ class oop_Relocation : public DataRelocation {
// Note: oop_value transparently converts Universe::non_oop_word to NULL.
};
// copy of oop_Relocation for now but may delete stuff in both/either
class metadata_Relocation : public DataRelocation {
relocInfo::relocType type() { return relocInfo::metadata_type; }
public:
// encode in one of these formats: [] [n] [n l] [Nn l] [Nn Ll]
// an metadata in the CodeBlob's metadata pool
static RelocationHolder spec(int metadata_index, int offset = 0) {
assert(metadata_index > 0, "must be a pool-resident metadata");
RelocationHolder rh = newHolder();
new(rh) metadata_Relocation(metadata_index, offset);
return rh;
}
// an metadata in the instruction stream
static RelocationHolder spec_for_immediate() {
const int metadata_index = 0;
const int offset = 0; // if you want an offset, use the metadata pool
RelocationHolder rh = newHolder();
new(rh) metadata_Relocation(metadata_index, offset);
return rh;
}
private:
jint _metadata_index; // if > 0, index into nmethod::metadata_at
jint _offset; // byte offset to apply to the metadata itself
metadata_Relocation(int metadata_index, int offset) {
_metadata_index = metadata_index; _offset = offset;
}
friend class RelocIterator;
metadata_Relocation() { }
// Fixes a Metadata pointer in the code. Most platforms embeds the
// Metadata pointer in the code at compile time so this is empty
// for them.
void pd_fix_value(address x);
public:
int metadata_index() { return _metadata_index; }
int offset() { return _offset; }
// data is packed in "2_ints" format: [i o] or [Ii Oo]
void pack_data_to(CodeSection* dest);
void unpack_data();
void fix_metadata_relocation(); // reasserts metadata value
void verify_metadata_relocation();
address value() { return (address) *metadata_addr(); }
bool metadata_is_immediate() { return metadata_index() == 0; }
Metadata** metadata_addr(); // addr or &pool[jint_data]
Metadata* metadata_value(); // *metadata_addr
// Note: metadata_value transparently converts Universe::non_metadata_word to NULL.
};
class virtual_call_Relocation : public CallRelocation {
relocInfo::relocType type() { return relocInfo::virtual_call_type; }
public:
// "first_oop" points to the first associated set-oop.
// "cached_value" points to the first associated set-oop.
// The oop_limit helps find the last associated set-oop.
// (See comments at the top of this file.)
static RelocationHolder spec(address first_oop, address oop_limit = NULL) {
static RelocationHolder spec(address cached_value) {
RelocationHolder rh = newHolder();
new(rh) virtual_call_Relocation(first_oop, oop_limit);
new(rh) virtual_call_Relocation(cached_value);
return rh;
}
virtual_call_Relocation(address first_oop, address oop_limit) {
_first_oop = first_oop; _oop_limit = oop_limit;
assert(first_oop != NULL, "first oop address must be specified");
virtual_call_Relocation(address cached_value) {
_cached_value = cached_value;
assert(cached_value != NULL, "first oop address must be specified");
}
private:
address _first_oop; // location of first set-oop instruction
address _oop_limit; // search limit for set-oop instructions
address _cached_value; // location of set-value instruction
friend class RelocIterator;
virtual_call_Relocation() { }
public:
address first_oop();
address oop_limit();
address cached_value();
// data is packed as scaled offsets in "2_ints" format: [f l] or [Ff Ll]
// oop_limit is set to 0 if the limit falls somewhere within the call.
@ -1010,15 +1075,6 @@ class virtual_call_Relocation : public CallRelocation {
void unpack_data();
void clear_inline_cache();
// Figure out where an ic_call is hiding, given a set-oop or call.
// Either ic_call or first_oop must be non-null; the other is deduced.
// Code if non-NULL must be the nmethod, else it is deduced.
// The address of the patchable oop is also deduced.
// The returned iterator will enumerate over the oops and the ic_call,
// as well as any other relocations that happen to be in that span of code.
// Recognize relevant set_oops with: oop_reloc()->oop_addr() == oop_addr.
static RelocIterator parse_ic(nmethod* &nm, address &ic_call, address &first_oop, oop* &oop_addr, bool *is_optimized);
};

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -65,7 +65,7 @@ void ScopeDesc::decode_body() {
// This is a sentinel record, which is only relevant to
// approximate queries. Decode a reasonable frame.
_sender_decode_offset = DebugInformationRecorder::serialized_null;
_method = methodHandle(_code->method());
_method = _code->method();
_bci = InvocationEntryBci;
_locals_decode_offset = DebugInformationRecorder::serialized_null;
_expressions_decode_offset = DebugInformationRecorder::serialized_null;
@ -75,7 +75,7 @@ void ScopeDesc::decode_body() {
DebugInfoReadStream* stream = stream_at(decode_offset());
_sender_decode_offset = stream->read_int();
_method = methodHandle((methodOop) stream->read_oop());
_method = stream->read_method();
_bci = stream->read_bci();
// decode offsets for body and sender
@ -157,7 +157,7 @@ ScopeDesc* ScopeDesc::sender() const {
void ScopeDesc::print_value_on(outputStream* st) const {
tty->print(" ");
method()()->print_short_name(st);
method()->print_short_name(st);
int lineno = method()->line_number_from_bci(bci());
if (lineno != -1) {
st->print_cr("@%d (line %d)", bci(), lineno);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,17 +27,17 @@
#include "code/debugInfo.hpp"
#include "code/pcDesc.hpp"
#include "oops/methodOop.hpp"
#include "oops/method.hpp"
#include "utilities/growableArray.hpp"
// SimpleScopeDesc is used when all you need to extract from
// a given pc,nmethod pair is a methodOop and a bci. This is
// a given pc,nmethod pair is a Method* and a bci. This is
// quite a bit faster than allocating a full ScopeDesc, but
// very limited in abilities.
class SimpleScopeDesc : public StackObj {
private:
methodOop _method;
Method* _method;
int _bci;
public:
@ -46,11 +46,11 @@ class SimpleScopeDesc : public StackObj {
assert(pc_desc != NULL, "Must be able to find matching PcDesc");
DebugInfoReadStream buffer(code, pc_desc->scope_decode_offset());
int ignore_sender = buffer.read_int();
_method = methodOop(buffer.read_oop());
_method = buffer.read_method();
_bci = buffer.read_bci();
}
methodOop method() { return _method; }
Method* method() { return _method; }
int bci() { return _bci; }
};
@ -68,7 +68,7 @@ class ScopeDesc : public ResourceObj {
ScopeDesc(const nmethod* code, int decode_offset, bool reexecute, bool return_oop);
// JVM state
methodHandle method() const { return _method; }
Method* method() const { return _method; }
int bci() const { return _bci; }
bool should_reexecute() const { return _reexecute; }
bool return_oop() const { return _return_oop; }
@ -94,7 +94,7 @@ class ScopeDesc : public ResourceObj {
ScopeDesc(const ScopeDesc* parent);
// JVM state
methodHandle _method;
Method* _method;
int _bci;
bool _reexecute;
bool _return_oop;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -111,7 +111,7 @@ void VtableStubs::initialize() {
}
address VtableStubs::create_stub(bool is_vtable_stub, int vtable_index, methodOop method) {
address VtableStubs::create_stub(bool is_vtable_stub, int vtable_index, Method* method) {
assert(vtable_index >= 0, "must be positive");
VtableStub* s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
@ -200,10 +200,10 @@ void vtableStubs_init() {
extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index) {
ResourceMark rm;
HandleMark hm;
klassOop klass = receiver->klass();
instanceKlass* ik = instanceKlass::cast(klass);
Klass* klass = receiver->klass();
InstanceKlass* ik = InstanceKlass::cast(klass);
klassVtable* vt = ik->vtable();
klass->print();
ik->print();
fatal(err_msg("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", "
"index %d (vtable length %d)",
(address)receiver, index, vt->length()));

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -123,7 +123,7 @@ class VtableStubs : AllStatic {
static inline uint hash (bool is_vtable_stub, int vtable_index);
public:
static address create_stub(bool is_vtable_stub, int vtable_index, methodOop method); // return the entry point of a stub for this call
static address create_stub(bool is_vtable_stub, int vtable_index, Method* method); // return the entry point of a stub for this call
static bool is_entry_point(address pc); // is pc a vtable stub entry point?
static bool contains(address pc); // is pc within any stub?
static VtableStub* stub_containing(address pc); // stub containing pc or NULL