8203837: Split nmethod unloading from inline cache cleaning

Refactor cleaning inline caches to after GC do_unloading.

Reviewed-by: thartmann, eosterlund
This commit is contained in:
Coleen Phillimore 2018-05-02 11:28:49 -04:00
parent f2a30dcb3e
commit 3e3414dbf3
15 changed files with 201 additions and 244 deletions

View file

@ -946,21 +946,8 @@ void nmethod::fix_oop_relocations(address begin, address end, bool initialize_im
void nmethod::verify_clean_inline_caches() {
assert_locked_or_safepoint(CompiledIC_lock);
// If the method is not entrant or zombie then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
// should not get GC'd. Skip the first few bytes of oops on
// not-entrant methods.
address low_boundary = verified_entry_point();
if (!is_in_use()) {
low_boundary += NativeJump::instruction_size;
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// This means that the low_boundary is going to be a little too high.
// This shouldn't matter, since oops of non-entrant methods are never used.
// In fact, why are we bothering to look at oops in a non-entrant method??
}
ResourceMark rm;
RelocIterator iter(this, low_boundary);
RelocIterator iter(this, oops_reloc_begin());
while(iter.next()) {
switch(iter.type()) {
case relocInfo::virtual_call_type:
@ -1041,13 +1028,17 @@ void nmethod::make_unloaded(oop cause) {
flush_dependencies(/*delete_immediately*/false);
// Break cycle between nmethod & method
LogTarget(Trace, class, unload) lt;
LogTarget(Trace, class, unload, nmethod) lt;
if (lt.is_enabled()) {
LogStream ls(lt);
ls.print_cr("making nmethod " INTPTR_FORMAT
" unloadable, Method*(" INTPTR_FORMAT
"), cause(" INTPTR_FORMAT ")",
p2i(this), p2i(_method), p2i(cause));
ls.print("making nmethod " INTPTR_FORMAT
" unloadable, Method*(" INTPTR_FORMAT
"), cause(" INTPTR_FORMAT ") ",
p2i(this), p2i(_method), p2i(cause));
if (cause != NULL) {
cause->print_value_on(&ls);
}
ls.cr();
}
// Unlink the osr method, so we do not look this up again
if (is_osr_method()) {
@ -1378,17 +1369,15 @@ void nmethod::flush_dependencies(bool delete_immediately) {
// If this oop is not live, the nmethod can be unloaded.
bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) {
bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root) {
assert(root != NULL, "just checking");
oop obj = *root;
if (obj == NULL || is_alive->do_object_b(obj)) {
return false;
}
// If ScavengeRootsInCode is true, an nmethod might be unloaded
// simply because one of its constant oops has gone dead.
// An nmethod might be unloaded simply because one of its constant oops has gone dead.
// No actual classes need to be unloaded in order for this to occur.
assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
make_unloaded(obj);
return true;
}
@ -1466,7 +1455,7 @@ void nmethod::post_compiled_method_unload() {
set_unload_reported();
}
bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) {
bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive) {
assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type");
oop_Relocation* r = iter_at_oop->oop_reloc();
@ -1477,7 +1466,7 @@ bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *i
"oop must be found in exactly one place");
if (r->oop_is_immediate() && r->oop_value() != NULL) {
// Unload this nmethod if the oop is dead.
if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
if (can_unload(is_alive, r->oop_addr())) {
return true;;
}
}
@ -1485,18 +1474,18 @@ bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *i
return false;
}
bool nmethod::do_unloading_scopes(BoolObjectClosure* is_alive, bool unloading_occurred) {
bool nmethod::do_unloading_scopes(BoolObjectClosure* is_alive) {
// Scopes
for (oop* p = oops_begin(); p < oops_end(); p++) {
if (*p == Universe::non_oop_word()) continue; // skip non-oops
if (can_unload(is_alive, p, unloading_occurred)) {
if (can_unload(is_alive, p)) {
return true;
}
}
return false;
}
bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) {
bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive) {
// Compiled code
// Prevent extra code cache walk for platforms that don't have immediate oops.
@ -1504,18 +1493,18 @@ bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_aliv
RelocIterator iter(this, low_boundary);
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) {
if (unload_if_dead_at(&iter, is_alive)) {
return true;
}
}
}
}
return do_unloading_scopes(is_alive, unloading_occurred);
return do_unloading_scopes(is_alive);
}
#if INCLUDE_JVMCI
bool nmethod::do_unloading_jvmci(bool unloading_occurred) {
bool nmethod::do_unloading_jvmci() {
if (_jvmci_installed_code != NULL) {
if (JNIHandles::is_global_weak_cleared(_jvmci_installed_code)) {
if (_jvmci_installed_code_triggers_unloading) {
@ -1533,15 +1522,9 @@ bool nmethod::do_unloading_jvmci(bool unloading_occurred) {
// Iterate over metadata calling this function. Used by RedefineClasses
void nmethod::metadata_do(void f(Metadata*)) {
address low_boundary = verified_entry_point();
if (is_not_entrant()) {
low_boundary += NativeJump::instruction_size;
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// (See comment above.)
}
{
// Visit all immediate references that are embedded in the instruction stream.
RelocIterator iter(this, low_boundary);
RelocIterator iter(this, oops_reloc_begin());
while (iter.next()) {
if (iter.type() == relocInfo::metadata_type ) {
metadata_Relocation* r = iter.metadata_reloc();
@ -1588,20 +1571,9 @@ void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
assert(!is_unloaded(), "should not call follow on unloaded nmethod");
// If the method is not entrant or zombie then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
// should not get GC'd. Skip the first few bytes of oops on
// not-entrant methods.
address low_boundary = verified_entry_point();
if (is_not_entrant()) {
low_boundary += NativeJump::instruction_size;
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// (See comment above.)
}
// Prevent extra code cache walk for platforms that don't have immediate oops.
if (relocInfo::mustIterateImmediateOopsInCode()) {
RelocIterator iter(this, low_boundary);
RelocIterator iter(this, oops_reloc_begin());
while (iter.next()) {
if (iter.type() == relocInfo::oop_type ) {
@ -1650,7 +1622,11 @@ bool nmethod::test_set_oops_do_mark() {
break;
}
// Mark was clear when we first saw this guy.
if (TraceScavenge) { print_on(tty, "oops_do, mark"); }
LogTarget(Trace, gc, nmethod) lt;
if (lt.is_enabled()) {
LogStream ls(lt);
CompileTask::print(&ls, this, "oops_do, mark", /*short_form:*/ true);
}
return false;
}
}
@ -1659,7 +1635,7 @@ bool nmethod::test_set_oops_do_mark() {
}
void nmethod::oops_do_marking_prologue() {
if (TraceScavenge) { tty->print_cr("[oops_do_marking_prologue"); }
log_trace(gc, nmethod)("oops_do_marking_prologue");
assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
// We use cmpxchg instead of regular assignment here because the user
// may fork a bunch of threads, and we need them all to see the same state.
@ -1675,20 +1651,26 @@ void nmethod::oops_do_marking_epilogue() {
nmethod* next = cur->_oops_do_mark_link;
cur->_oops_do_mark_link = NULL;
DEBUG_ONLY(cur->verify_oop_relocations());
NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark"));
LogTarget(Trace, gc, nmethod) lt;
if (lt.is_enabled()) {
LogStream ls(lt);
CompileTask::print(&ls, cur, "oops_do, unmark", /*short_form:*/ true);
}
cur = next;
}
nmethod* required = _oops_do_mark_nmethods;
nmethod* observed = Atomic::cmpxchg((nmethod*)NULL, &_oops_do_mark_nmethods, required);
guarantee(observed == required, "no races in this sequential code");
if (TraceScavenge) { tty->print_cr("oops_do_marking_epilogue]"); }
log_trace(gc, nmethod)("oops_do_marking_epilogue");
}
class DetectScavengeRoot: public OopClosure {
bool _detected_scavenge_root;
nmethod* _print_nm;
public:
DetectScavengeRoot() : _detected_scavenge_root(false)
{ NOT_PRODUCT(_print_nm = NULL); }
DetectScavengeRoot(nmethod* nm) : _detected_scavenge_root(false), _print_nm(nm) {}
bool detected_scavenge_root() { return _detected_scavenge_root; }
virtual void do_oop(oop* p) {
if ((*p) != NULL && Universe::heap()->is_scavengable(*p)) {
@ -1699,21 +1681,25 @@ public:
virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
#ifndef PRODUCT
nmethod* _print_nm;
void maybe_print(oop* p) {
if (_print_nm == NULL) return;
if (!_detected_scavenge_root) _print_nm->print_on(tty, "new scavenge root");
tty->print_cr("" PTR_FORMAT "[offset=%d] detected scavengable oop " PTR_FORMAT " (found at " PTR_FORMAT ")",
p2i(_print_nm), (int)((intptr_t)p - (intptr_t)_print_nm),
p2i(*p), p2i(p));
(*p)->print();
LogTarget(Trace, gc, nmethod) lt;
if (lt.is_enabled()) {
LogStream ls(lt);
if (!_detected_scavenge_root) {
CompileTask::print(&ls, _print_nm, "new scavenge root", /*short_form:*/ true);
}
ls.print("" PTR_FORMAT "[offset=%d] detected scavengable oop " PTR_FORMAT " (found at " PTR_FORMAT ") ",
p2i(_print_nm), (int)((intptr_t)p - (intptr_t)_print_nm),
p2i(*p), p2i(p));
(*p)->print_value_on(&ls);
ls.cr();
}
}
#endif //PRODUCT
};
bool nmethod::detect_scavenge_root_oops() {
DetectScavengeRoot detect_scavenge_root;
NOT_PRODUCT(if (TraceScavenge) detect_scavenge_root._print_nm = this);
DetectScavengeRoot detect_scavenge_root(this);
oops_do(&detect_scavenge_root);
return detect_scavenge_root.detected_scavenge_root();
}