8033380: Experimental VM flag to enforce access atomicity

-XX:+AlwaysAtomicAccesses to unconditionally enforce the access atomicity.

Reviewed-by: roland, kvn, iveresov
This commit is contained in:
Aleksey Shipilev 2014-03-03 15:54:45 +04:00
parent 0b6a5f744a
commit c4bd0f58d3
4 changed files with 37 additions and 12 deletions

View file

@ -1734,7 +1734,8 @@ void LIRGenerator::do_StoreField(StoreField* x) {
(info ? new CodeEmitInfo(info) : NULL)); (info ? new CodeEmitInfo(info) : NULL));
} }
if (is_volatile && !needs_patching) { bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
if (needs_atomic_access && !needs_patching) {
volatile_field_store(value.result(), address, info); volatile_field_store(value.result(), address, info);
} else { } else {
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
@ -1807,7 +1808,8 @@ void LIRGenerator::do_LoadField(LoadField* x) {
address = generate_address(object.result(), x->offset(), field_type); address = generate_address(object.result(), x->offset(), field_type);
} }
if (is_volatile && !needs_patching) { bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
if (needs_atomic_access && !needs_patching) {
volatile_field_load(address, reg, info); volatile_field_load(address, reg, info);
} else { } else {
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;

View file

@ -809,11 +809,10 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
int bci = vfst.bci(); int bci = vfst.bci();
Bytecodes::Code code = caller_method()->java_code_at(bci); Bytecodes::Code code = caller_method()->java_code_at(bci);
#ifndef PRODUCT
// this is used by assertions in the access_field_patching_id // this is used by assertions in the access_field_patching_id
BasicType patch_field_type = T_ILLEGAL; BasicType patch_field_type = T_ILLEGAL;
#endif // PRODUCT
bool deoptimize_for_volatile = false; bool deoptimize_for_volatile = false;
bool deoptimize_for_atomic = false;
int patch_field_offset = -1; int patch_field_offset = -1;
KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
@ -839,11 +838,24 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
// is the path for patching field offsets. load_klass is only // is the path for patching field offsets. load_klass is only
// used for patching references to oops which don't need special // used for patching references to oops which don't need special
// handling in the volatile case. // handling in the volatile case.
deoptimize_for_volatile = result.access_flags().is_volatile(); deoptimize_for_volatile = result.access_flags().is_volatile();
#ifndef PRODUCT // If we are patching a field which should be atomic, then
// the generated code is not correct either, force deoptimizing.
// We need to only cover T_LONG and T_DOUBLE fields, as we can
// break access atomicity only for them.
// Strictly speaking, the deoptimizaation on 64-bit platforms
// is unnecessary, and T_LONG stores on 32-bit platforms need
// to be handled by special patching code when AlwaysAtomicAccesses
// becomes product feature. At this point, we are still going
// for the deoptimization for consistency against volatile
// accesses.
patch_field_type = result.field_type(); patch_field_type = result.field_type();
#endif deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
} else if (load_klass_or_mirror_patch_id) { } else if (load_klass_or_mirror_patch_id) {
Klass* k = NULL; Klass* k = NULL;
switch (code) { switch (code) {
@ -918,13 +930,19 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
ShouldNotReachHere(); ShouldNotReachHere();
} }
if (deoptimize_for_volatile) { if (deoptimize_for_volatile || deoptimize_for_atomic) {
// At compile time we assumed the field wasn't volatile but after // At compile time we assumed the field wasn't volatile/atomic but after
// loading it turns out it was volatile so we have to throw the // loading it turns out it was volatile/atomic so we have to throw the
// compiled code out and let it be regenerated. // compiled code out and let it be regenerated.
if (TracePatching) { if (TracePatching) {
if (deoptimize_for_volatile) {
tty->print_cr("Deoptimizing for patching volatile field reference"); tty->print_cr("Deoptimizing for patching volatile field reference");
} }
if (deoptimize_for_atomic) {
tty->print_cr("Deoptimizing for patching atomic field reference");
}
}
// It's possible the nmethod was invalidated in the last // It's possible the nmethod was invalidated in the last
// safepoint, but if it's still alive then make it not_entrant. // safepoint, but if it's still alive then make it not_entrant.
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());

View file

@ -233,7 +233,8 @@ void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
// Build the load. // Build the load.
// //
MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, is_vol); bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, needs_atomic_access);
// Adjust Java stack // Adjust Java stack
if (type2size[bt] == 1) if (type2size[bt] == 1)
@ -314,7 +315,8 @@ void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
} }
store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo); store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
} else { } else {
store = store_to_memory(control(), adr, val, bt, adr_type, mo, is_vol); bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
store = store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
} }
// If reference is volatile, prevent following volatiles ops from // If reference is volatile, prevent following volatiles ops from

View file

@ -3864,6 +3864,9 @@ class CommandLineFlags {
"Allocation less than this value will be allocated " \ "Allocation less than this value will be allocated " \
"using malloc. Larger allocations will use mmap.") \ "using malloc. Larger allocations will use mmap.") \
\ \
experimental(bool, AlwaysAtomicAccesses, false, \
"Accesses to all variables should always be atomic") \
\
product(bool, EnableTracing, false, \ product(bool, EnableTracing, false, \
"Enable event-based tracing") \ "Enable event-based tracing") \
\ \