8262256: C2 intrinsincs should not modify IR when bailing out

Reviewed-by: roland, kvn
This commit is contained in:
Tobias Hartmann 2021-03-03 11:30:52 +00:00
parent 0265ab63e4
commit 54dfd79cda

View file

@ -108,7 +108,9 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
#endif #endif
ciMethod* callee = kit.callee(); ciMethod* callee = kit.callee();
const int bci = kit.bci(); const int bci = kit.bci();
#ifdef ASSERT
Node* ctrl = kit.control();
#endif
// Try to inline the intrinsic. // Try to inline the intrinsic.
if ((CheckIntrinsics ? callee->intrinsic_candidate() : true) && if ((CheckIntrinsics ? callee->intrinsic_candidate() : true) &&
kit.try_to_inline(_last_predicate)) { kit.try_to_inline(_last_predicate)) {
@ -132,6 +134,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
} }
// The intrinsic bailed out // The intrinsic bailed out
assert(ctrl == kit.control(), "Control flow was added although the intrinsic bailed out");
if (jvms->has_method()) { if (jvms->has_method()) {
// Not a root compile. // Not a root compile.
const char* msg; const char* msg;
@ -2198,15 +2201,12 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c
Node* receiver = argument(0); // type: oop Node* receiver = argument(0); // type: oop
// Build address expression. // Build address expression.
Node* adr;
Node* heap_base_oop = top(); Node* heap_base_oop = top();
Node* offset = top();
Node* val;
// The base is either a Java object or a value produced by Unsafe.staticFieldBase // The base is either a Java object or a value produced by Unsafe.staticFieldBase
Node* base = argument(1); // type: oop Node* base = argument(1); // type: oop
// The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
offset = argument(2); // type: long Node* offset = argument(2); // type: long
// We currently rely on the cookies produced by Unsafe.xxxFieldOffset // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
// to be plain byte offsets, which are also the same as those accepted // to be plain byte offsets, which are also the same as those accepted
// by oopDesc::field_addr. // by oopDesc::field_addr.
@ -2214,12 +2214,19 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c
"fieldOffset must be byte-scaled"); "fieldOffset must be byte-scaled");
// 32-bit machines ignore the high half! // 32-bit machines ignore the high half!
offset = ConvL2X(offset); offset = ConvL2X(offset);
adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed);
// Save state and restore on bailout
uint old_sp = sp();
SafePointNode* old_map = clone_map();
Node* adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed);
if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) { if (_gvn.type(base)->isa_ptr() == TypePtr::NULL_PTR) {
if (type != T_OBJECT) { if (type != T_OBJECT) {
decorators |= IN_NATIVE; // off-heap primitive access decorators |= IN_NATIVE; // off-heap primitive access
} else { } else {
set_map(old_map);
set_sp(old_sp);
return false; // off-heap oop accesses are not supported return false; // off-heap oop accesses are not supported
} }
} else { } else {
@ -2233,10 +2240,12 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c
decorators |= IN_HEAP; decorators |= IN_HEAP;
} }
val = is_store ? argument(4) : NULL; Node* val = is_store ? argument(4) : NULL;
const TypePtr* adr_type = _gvn.type(adr)->isa_ptr(); const TypePtr* adr_type = _gvn.type(adr)->isa_ptr();
if (adr_type == TypePtr::NULL_PTR) { if (adr_type == TypePtr::NULL_PTR) {
set_map(old_map);
set_sp(old_sp);
return false; // off-heap access with zero address return false; // off-heap access with zero address
} }
@ -2246,6 +2255,8 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c
if (alias_type->adr_type() == TypeInstPtr::KLASS || if (alias_type->adr_type() == TypeInstPtr::KLASS ||
alias_type->adr_type() == TypeAryPtr::RANGE) { alias_type->adr_type() == TypeAryPtr::RANGE) {
set_map(old_map);
set_sp(old_sp);
return false; // not supported return false; // not supported
} }
@ -2264,6 +2275,8 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c
} }
if ((bt == T_OBJECT) != (type == T_OBJECT)) { if ((bt == T_OBJECT) != (type == T_OBJECT)) {
// Don't intrinsify mismatched object accesses // Don't intrinsify mismatched object accesses
set_map(old_map);
set_sp(old_sp);
return false; return false;
} }
mismatched = (bt != type); mismatched = (bt != type);
@ -2271,6 +2284,7 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c
mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
} }
old_map->destruct(&_gvn);
assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched"); assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
if (mismatched) { if (mismatched) {
@ -2505,6 +2519,9 @@ bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadSt
assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled"); assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
// 32-bit machines ignore the high half of long offsets // 32-bit machines ignore the high half of long offsets
offset = ConvL2X(offset); offset = ConvL2X(offset);
// Save state and restore on bailout
uint old_sp = sp();
SafePointNode* old_map = clone_map();
Node* adr = make_unsafe_address(base, offset, ACCESS_WRITE | ACCESS_READ, type, false); Node* adr = make_unsafe_address(base, offset, ACCESS_WRITE | ACCESS_READ, type, false);
const TypePtr *adr_type = _gvn.type(adr)->isa_ptr(); const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
@ -2513,9 +2530,13 @@ bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadSt
if (bt != T_ILLEGAL && if (bt != T_ILLEGAL &&
(is_reference_type(bt) != (type == T_OBJECT))) { (is_reference_type(bt) != (type == T_OBJECT))) {
// Don't intrinsify mismatched object accesses. // Don't intrinsify mismatched object accesses.
set_map(old_map);
set_sp(old_sp);
return false; return false;
} }
old_map->destruct(&_gvn);
// For CAS, unlike inline_unsafe_access, there seems no point in // For CAS, unlike inline_unsafe_access, there seems no point in
// trying to refine types. Just use the coarse types here. // trying to refine types. Just use the coarse types here.
assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here"); assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");