mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-21 03:24:38 +02:00
8214257: IC cache not clean after cleaning assertion failure
Reviewed-by: kvn, thartmann
This commit is contained in:
parent
e303e31317
commit
055ed2b789
2 changed files with 5 additions and 10 deletions
|
@ -126,7 +126,6 @@ void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub
|
||||||
|
|
||||||
{
|
{
|
||||||
CodeBlob* cb = CodeCache::find_blob_unsafe(_call->instruction_address());
|
CodeBlob* cb = CodeCache::find_blob_unsafe(_call->instruction_address());
|
||||||
MutexLockerEx pl(CompiledICLocker::is_safe(cb->as_compiled_method()) ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
|
|
||||||
assert(cb != NULL && cb->is_compiled(), "must be compiled");
|
assert(cb != NULL && cb->is_compiled(), "must be compiled");
|
||||||
_call->set_destination_mt_safe(entry_point);
|
_call->set_destination_mt_safe(entry_point);
|
||||||
}
|
}
|
||||||
|
@ -374,7 +373,7 @@ bool CompiledIC::set_to_clean(bool in_use) {
|
||||||
|
|
||||||
// A zombie transition will always be safe, since the metadata has already been set to NULL, so
|
// A zombie transition will always be safe, since the metadata has already been set to NULL, so
|
||||||
// we only need to patch the destination
|
// we only need to patch the destination
|
||||||
bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || CompiledICLocker::is_safe(_method);
|
bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint();
|
||||||
|
|
||||||
if (safe_transition) {
|
if (safe_transition) {
|
||||||
// Kill any leftover stub we might have too
|
// Kill any leftover stub we might have too
|
||||||
|
@ -427,8 +426,7 @@ bool CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
|
||||||
if (info.to_interpreter() || info.to_aot()) {
|
if (info.to_interpreter() || info.to_aot()) {
|
||||||
// Call to interpreter
|
// Call to interpreter
|
||||||
if (info.is_optimized() && is_optimized()) {
|
if (info.is_optimized() && is_optimized()) {
|
||||||
assert(is_clean(), "unsafe IC path");
|
assert(is_clean(), "unsafe IC path");
|
||||||
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
|
|
||||||
// the call analysis (callee structure) specifies that the call is optimized
|
// the call analysis (callee structure) specifies that the call is optimized
|
||||||
// (either because of CHA or the static target is final)
|
// (either because of CHA or the static target is final)
|
||||||
// At code generation time, this call has been emitted as static call
|
// At code generation time, this call has been emitted as static call
|
||||||
|
@ -602,7 +600,6 @@ bool CompiledStaticCall::set_to_clean(bool in_use) {
|
||||||
// in_use is unused but needed to match template function in CompiledMethod
|
// in_use is unused but needed to match template function in CompiledMethod
|
||||||
assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
|
assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
|
||||||
// Reset call site
|
// Reset call site
|
||||||
MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
|
|
||||||
set_destination_mt_safe(resolve_call_stub());
|
set_destination_mt_safe(resolve_call_stub());
|
||||||
|
|
||||||
// Do not reset stub here: It is too expensive to call find_stub.
|
// Do not reset stub here: It is too expensive to call find_stub.
|
||||||
|
@ -648,7 +645,6 @@ void CompiledStaticCall::set_to_compiled(address entry) {
|
||||||
|
|
||||||
void CompiledStaticCall::set(const StaticCallInfo& info) {
|
void CompiledStaticCall::set(const StaticCallInfo& info) {
|
||||||
assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
|
assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
|
||||||
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
|
|
||||||
// Updating a cache to the wrong entry can cause bugs that are very hard
|
// Updating a cache to the wrong entry can cause bugs that are very hard
|
||||||
// to track down - if cache entry gets invalid - we just clean it. In
|
// to track down - if cache entry gets invalid - we just clean it. In
|
||||||
// this way it is always the same code path that is responsible for
|
// this way it is always the same code path that is responsible for
|
||||||
|
|
|
@ -1059,7 +1059,7 @@ methodHandle SharedRuntime::extract_attached_method(vframeStream& vfst) {
|
||||||
|
|
||||||
address pc = vfst.frame_pc();
|
address pc = vfst.frame_pc();
|
||||||
{ // Get call instruction under lock because another thread may be busy patching it.
|
{ // Get call instruction under lock because another thread may be busy patching it.
|
||||||
MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
|
CompiledICLocker ic_locker(caller);
|
||||||
return caller->attached_method_before_pc(pc);
|
return caller->attached_method_before_pc(pc);
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -1765,7 +1765,7 @@ methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
|
||||||
{
|
{
|
||||||
// Get call instruction under lock because another thread may be
|
// Get call instruction under lock because another thread may be
|
||||||
// busy patching it.
|
// busy patching it.
|
||||||
MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
|
CompiledICLocker ml(caller_nm);
|
||||||
// Location of call instruction
|
// Location of call instruction
|
||||||
call_addr = caller_nm->call_instruction_address(pc);
|
call_addr = caller_nm->call_instruction_address(pc);
|
||||||
}
|
}
|
||||||
|
@ -1940,9 +1940,8 @@ IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address cal
|
||||||
if (moop->code() == NULL) return;
|
if (moop->code() == NULL) return;
|
||||||
|
|
||||||
if (nm->is_in_use()) {
|
if (nm->is_in_use()) {
|
||||||
|
|
||||||
// Expect to find a native call there (unless it was no-inline cache vtable dispatch)
|
// Expect to find a native call there (unless it was no-inline cache vtable dispatch)
|
||||||
MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
|
CompiledICLocker ic_locker(nm);
|
||||||
if (NativeCall::is_call_before(return_pc)) {
|
if (NativeCall::is_call_before(return_pc)) {
|
||||||
ResourceMark mark;
|
ResourceMark mark;
|
||||||
NativeCallWrapper* call = nm->call_wrapper_before(return_pc);
|
NativeCallWrapper* call = nm->call_wrapper_before(return_pc);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue