mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-28 15:24:43 +02:00
8256425: Obsolete Biased Locking in JDK 18
Reviewed-by: kvn, dholmes, dcubed, rrich
This commit is contained in:
parent
595446bff4
commit
2fd7943ec1
165 changed files with 293 additions and 5261 deletions
|
@ -183,7 +183,6 @@ ifeq ($(call check-jvm-feature, opt-size), true)
|
|||
assembler.cpp \
|
||||
barrierSet.cpp \
|
||||
basicLock.cpp \
|
||||
biasedLocking.cpp \
|
||||
bytecode.cpp \
|
||||
bytecodeInterpreter.cpp \
|
||||
c1_Compilation.cpp \
|
||||
|
|
|
@ -3788,10 +3788,6 @@ encode %{
|
|||
__ br(Assembler::NE, cont);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking && !UseOptoBiasInlining) {
|
||||
__ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont);
|
||||
}
|
||||
|
||||
// Check for existing monitor
|
||||
__ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
|
||||
|
||||
|
@ -3862,10 +3858,6 @@ encode %{
|
|||
|
||||
assert_different_registers(oop, box, tmp, disp_hdr);
|
||||
|
||||
if (UseBiasedLocking && !UseOptoBiasInlining) {
|
||||
__ biased_locking_exit(oop, tmp, cont);
|
||||
}
|
||||
|
||||
// Find the lock address and load the displaced header from the stack.
|
||||
__ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
|
||||
|
||||
|
@ -8887,11 +8879,6 @@ instruct storePConditional(memory8 heap_top_ptr, iRegP oldval, iRegP newval, rFl
|
|||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
|
||||
// storeLConditional is used by PhaseMacroExpand::expand_lock_node
|
||||
// when attempting to rebias a lock towards the current thread. We
|
||||
// must use the acquire form of cmpxchg in order to guarantee acquire
|
||||
// semantics in this case.
|
||||
instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
|
||||
%{
|
||||
match(Set cr (StoreLConditional mem (Binary oldval newval)));
|
||||
|
|
|
@ -3275,8 +3275,6 @@ inline const Assembler::Condition operator~(const Assembler::Condition cond) {
|
|||
return Assembler::Condition(int(cond) ^ 1);
|
||||
}
|
||||
|
||||
class BiasedLockingCounters;
|
||||
|
||||
extern "C" void das(uint64_t start, int len);
|
||||
|
||||
#endif // CPU_AARCH64_ASSEMBLER_AARCH64_HPP
|
||||
|
|
|
@ -2577,13 +2577,9 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
|
|||
if (!UseFastLocking) {
|
||||
__ b(*op->stub()->entry());
|
||||
} else if (op->code() == lir_lock) {
|
||||
Register scratch = noreg;
|
||||
if (UseBiasedLocking) {
|
||||
scratch = op->scratch_opr()->as_register();
|
||||
}
|
||||
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
|
||||
// add debug info for NullPointerException only if one is possible
|
||||
int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
|
||||
int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
|
||||
if (op->info() != NULL) {
|
||||
add_debug_info_for_null_check(null_check_offset, op->info());
|
||||
}
|
||||
|
|
|
@ -331,11 +331,6 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
|
|||
|
||||
// "lock" stores the address of the monitor stack slot, so this is not an oop
|
||||
LIR_Opr lock = new_register(T_INT);
|
||||
// Need a scratch register for biased locking
|
||||
LIR_Opr scratch = LIR_OprFact::illegalOpr;
|
||||
if (UseBiasedLocking) {
|
||||
scratch = new_register(T_INT);
|
||||
}
|
||||
|
||||
CodeEmitInfo* info_for_exception = NULL;
|
||||
if (x->needs_null_check()) {
|
||||
|
@ -344,7 +339,7 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
|
|||
// this CodeEmitInfo must not have the xhandlers because here the
|
||||
// object is already locked (xhandlers expect object to be unlocked)
|
||||
CodeEmitInfo* info = state_for(x, x->state(), true);
|
||||
monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
|
||||
monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
|
||||
x->monitor_no(), info_for_exception, info);
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/markWord.hpp"
|
||||
#include "runtime/basicLock.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
@ -61,7 +60,7 @@ void C1_MacroAssembler::float_cmp(bool is_float, int unordered_result,
|
|||
}
|
||||
}
|
||||
|
||||
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) {
|
||||
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
|
||||
const int aligned_mask = BytesPerWord -1;
|
||||
const int hdr_offset = oopDesc::mark_offset_in_bytes();
|
||||
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
|
||||
|
@ -82,11 +81,6 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
|||
br(Assembler::NE, slow_case);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
assert(scratch != noreg, "should have scratch register at this point");
|
||||
biased_locking_enter(disp_hdr, obj, hdr, scratch, false, done, &slow_case);
|
||||
}
|
||||
|
||||
// Load object header
|
||||
ldr(hdr, Address(obj, hdr_offset));
|
||||
// and mark it as unlocked
|
||||
|
@ -122,10 +116,6 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
|||
cbnz(hdr, slow_case);
|
||||
// done
|
||||
bind(done);
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
lea(rscratch2, ExternalAddress((address)BiasedLocking::fast_path_entry_count_addr()));
|
||||
addmw(Address(rscratch2, 0), 1, rscratch1);
|
||||
}
|
||||
return null_check_offset;
|
||||
}
|
||||
|
||||
|
@ -136,21 +126,13 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
|
|||
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
|
||||
Label done;
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
// load object
|
||||
ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
|
||||
biased_locking_exit(obj, hdr, done);
|
||||
}
|
||||
|
||||
// load displaced header
|
||||
ldr(hdr, Address(disp_hdr, 0));
|
||||
// if the loaded hdr is NULL we had recursive locking
|
||||
// if we had recursive locking, we are done
|
||||
cbz(hdr, done);
|
||||
if (!UseBiasedLocking) {
|
||||
// load object
|
||||
ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
|
||||
}
|
||||
// load object
|
||||
ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
|
||||
verify_oop(obj);
|
||||
// test if object header is pointing to the displaced header, and if so, restore
|
||||
// the displaced header in the object - if the object header is not pointing to
|
||||
|
@ -179,13 +161,8 @@ void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, i
|
|||
|
||||
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
|
||||
assert_different_registers(obj, klass, len);
|
||||
if (UseBiasedLocking && !len->is_valid()) {
|
||||
assert_different_registers(obj, klass, len, t1, t2);
|
||||
ldr(t1, Address(klass, Klass::prototype_header_offset()));
|
||||
} else {
|
||||
// This assumes that all prototype bits fit in an int32_t
|
||||
mov(t1, (int32_t)(intptr_t)markWord::prototype().value());
|
||||
}
|
||||
// This assumes that all prototype bits fit in an int32_t
|
||||
mov(t1, (int32_t)(intptr_t)markWord::prototype().value());
|
||||
str(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
|
||||
|
||||
if (UseCompressedClassPointers) { // Take care not to kill klass
|
||||
|
|
|
@ -58,9 +58,8 @@ using MacroAssembler::null_check;
|
|||
// hdr : must be r0, contents destroyed
|
||||
// obj : must point to the object to lock, contents preserved
|
||||
// disp_hdr: must point to the displaced header location, contents preserved
|
||||
// scratch : scratch register, contents destroyed
|
||||
// returns code offset at which to add null check debug information
|
||||
int lock_object (Register swap, Register obj, Register disp_hdr, Register scratch, Label& slow_case);
|
||||
int lock_object (Register swap, Register obj, Register disp_hdr, Label& slow_case);
|
||||
|
||||
// unlocking
|
||||
// hdr : contents destroyed
|
||||
|
|
|
@ -39,7 +39,6 @@
|
|||
#include "prims/jvmtiExport.hpp"
|
||||
#include "prims/jvmtiThreadState.hpp"
|
||||
#include "runtime/basicLock.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/safepointMechanism.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
|
@ -754,10 +753,6 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
|||
br(Assembler::NE, slow_case);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, done, &slow_case);
|
||||
}
|
||||
|
||||
// Load (object->mark() | 1) into swap_reg
|
||||
ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
orr(swap_reg, rscratch1, 1);
|
||||
|
@ -769,17 +764,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
|||
"displached header must be first word in BasicObjectLock");
|
||||
|
||||
Label fail;
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
Label fast;
|
||||
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, fast, &fail);
|
||||
bind(fast);
|
||||
atomic_incw(Address((address)BiasedLocking::fast_path_entry_count_addr()),
|
||||
rscratch2, rscratch1, tmp);
|
||||
b(done);
|
||||
bind(fail);
|
||||
} else {
|
||||
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
|
||||
}
|
||||
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
|
||||
|
||||
// Fast check for recursive lock.
|
||||
//
|
||||
|
@ -816,12 +801,6 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
|||
|
||||
// Save the test result, for recursive case, the result is zero
|
||||
str(swap_reg, Address(lock_reg, mark_offset));
|
||||
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
br(Assembler::NE, slow_case);
|
||||
atomic_incw(Address((address)BiasedLocking::fast_path_entry_count_addr()),
|
||||
rscratch2, rscratch1, tmp);
|
||||
}
|
||||
br(Assembler::EQ, done);
|
||||
|
||||
bind(slow_case);
|
||||
|
@ -872,10 +851,6 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
|
|||
// Free entry
|
||||
str(zr, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
biased_locking_exit(obj_reg, header_reg, done);
|
||||
}
|
||||
|
||||
// Load the old header from BasicLock structure
|
||||
ldr(header_reg, Address(swap_reg,
|
||||
BasicLock::displaced_header_offset_in_bytes()));
|
||||
|
|
|
@ -44,7 +44,6 @@
|
|||
#include "oops/accessDecorators.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/jniHandles.inline.hpp"
|
||||
|
@ -442,178 +441,6 @@ void MacroAssembler::reserved_stack_check() {
|
|||
bind(no_reserved_zone_enabling);
|
||||
}
|
||||
|
||||
void MacroAssembler::biased_locking_enter(Register lock_reg,
|
||||
Register obj_reg,
|
||||
Register swap_reg,
|
||||
Register tmp_reg,
|
||||
bool swap_reg_contains_mark,
|
||||
Label& done,
|
||||
Label* slow_case,
|
||||
BiasedLockingCounters* counters) {
|
||||
assert(UseBiasedLocking, "why call this otherwise?");
|
||||
assert_different_registers(lock_reg, obj_reg, swap_reg);
|
||||
|
||||
if (PrintBiasedLockingStatistics && counters == NULL)
|
||||
counters = BiasedLocking::counters();
|
||||
|
||||
assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg, rscratch1, rscratch2, noreg);
|
||||
assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout");
|
||||
Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
|
||||
Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes());
|
||||
Address saved_mark_addr(lock_reg, 0);
|
||||
|
||||
// Biased locking
|
||||
// See whether the lock is currently biased toward our thread and
|
||||
// whether the epoch is still valid
|
||||
// Note that the runtime guarantees sufficient alignment of JavaThread
|
||||
// pointers to allow age to be placed into low bits
|
||||
// First check to see whether biasing is even enabled for this object
|
||||
Label cas_label;
|
||||
if (!swap_reg_contains_mark) {
|
||||
ldr(swap_reg, mark_addr);
|
||||
}
|
||||
andr(tmp_reg, swap_reg, markWord::biased_lock_mask_in_place);
|
||||
cmp(tmp_reg, (u1)markWord::biased_lock_pattern);
|
||||
br(Assembler::NE, cas_label);
|
||||
// The bias pattern is present in the object's header. Need to check
|
||||
// whether the bias owner and the epoch are both still current.
|
||||
load_prototype_header(tmp_reg, obj_reg);
|
||||
orr(tmp_reg, tmp_reg, rthread);
|
||||
eor(tmp_reg, swap_reg, tmp_reg);
|
||||
andr(tmp_reg, tmp_reg, ~((int) markWord::age_mask_in_place));
|
||||
if (counters != NULL) {
|
||||
Label around;
|
||||
cbnz(tmp_reg, around);
|
||||
atomic_incw(Address((address)counters->biased_lock_entry_count_addr()), tmp_reg, rscratch1, rscratch2);
|
||||
b(done);
|
||||
bind(around);
|
||||
} else {
|
||||
cbz(tmp_reg, done);
|
||||
}
|
||||
|
||||
Label try_revoke_bias;
|
||||
Label try_rebias;
|
||||
|
||||
// At this point we know that the header has the bias pattern and
|
||||
// that we are not the bias owner in the current epoch. We need to
|
||||
// figure out more details about the state of the header in order to
|
||||
// know what operations can be legally performed on the object's
|
||||
// header.
|
||||
|
||||
// If the low three bits in the xor result aren't clear, that means
|
||||
// the prototype header is no longer biased and we have to revoke
|
||||
// the bias on this object.
|
||||
andr(rscratch1, tmp_reg, markWord::biased_lock_mask_in_place);
|
||||
cbnz(rscratch1, try_revoke_bias);
|
||||
|
||||
// Biasing is still enabled for this data type. See whether the
|
||||
// epoch of the current bias is still valid, meaning that the epoch
|
||||
// bits of the mark word are equal to the epoch bits of the
|
||||
// prototype header. (Note that the prototype header's epoch bits
|
||||
// only change at a safepoint.) If not, attempt to rebias the object
|
||||
// toward the current thread. Note that we must be absolutely sure
|
||||
// that the current epoch is invalid in order to do this because
|
||||
// otherwise the manipulations it performs on the mark word are
|
||||
// illegal.
|
||||
andr(rscratch1, tmp_reg, markWord::epoch_mask_in_place);
|
||||
cbnz(rscratch1, try_rebias);
|
||||
|
||||
// The epoch of the current bias is still valid but we know nothing
|
||||
// about the owner; it might be set or it might be clear. Try to
|
||||
// acquire the bias of the object using an atomic operation. If this
|
||||
// fails we will go in to the runtime to revoke the object's bias.
|
||||
// Note that we first construct the presumed unbiased header so we
|
||||
// don't accidentally blow away another thread's valid bias.
|
||||
{
|
||||
Label here;
|
||||
mov(rscratch1, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place);
|
||||
andr(swap_reg, swap_reg, rscratch1);
|
||||
orr(tmp_reg, swap_reg, rthread);
|
||||
cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
|
||||
// If the biasing toward our thread failed, this means that
|
||||
// another thread succeeded in biasing it toward itself and we
|
||||
// need to revoke that bias. The revocation will occur in the
|
||||
// interpreter runtime in the slow case.
|
||||
bind(here);
|
||||
if (counters != NULL) {
|
||||
atomic_incw(Address((address)counters->anonymously_biased_lock_entry_count_addr()),
|
||||
tmp_reg, rscratch1, rscratch2);
|
||||
}
|
||||
}
|
||||
b(done);
|
||||
|
||||
bind(try_rebias);
|
||||
// At this point we know the epoch has expired, meaning that the
|
||||
// current "bias owner", if any, is actually invalid. Under these
|
||||
// circumstances _only_, we are allowed to use the current header's
|
||||
// value as the comparison value when doing the cas to acquire the
|
||||
// bias in the current epoch. In other words, we allow transfer of
|
||||
// the bias from one thread to another directly in this situation.
|
||||
//
|
||||
// FIXME: due to a lack of registers we currently blow away the age
|
||||
// bits in this situation. Should attempt to preserve them.
|
||||
{
|
||||
Label here;
|
||||
load_prototype_header(tmp_reg, obj_reg);
|
||||
orr(tmp_reg, rthread, tmp_reg);
|
||||
cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
|
||||
// If the biasing toward our thread failed, then another thread
|
||||
// succeeded in biasing it toward itself and we need to revoke that
|
||||
// bias. The revocation will occur in the runtime in the slow case.
|
||||
bind(here);
|
||||
if (counters != NULL) {
|
||||
atomic_incw(Address((address)counters->rebiased_lock_entry_count_addr()),
|
||||
tmp_reg, rscratch1, rscratch2);
|
||||
}
|
||||
}
|
||||
b(done);
|
||||
|
||||
bind(try_revoke_bias);
|
||||
// The prototype mark in the klass doesn't have the bias bit set any
|
||||
// more, indicating that objects of this data type are not supposed
|
||||
// to be biased any more. We are going to try to reset the mark of
|
||||
// this object to the prototype value and fall through to the
|
||||
// CAS-based locking scheme. Note that if our CAS fails, it means
|
||||
// that another thread raced us for the privilege of revoking the
|
||||
// bias of this particular object, so it's okay to continue in the
|
||||
// normal locking code.
|
||||
//
|
||||
// FIXME: due to a lack of registers we currently blow away the age
|
||||
// bits in this situation. Should attempt to preserve them.
|
||||
{
|
||||
Label here, nope;
|
||||
load_prototype_header(tmp_reg, obj_reg);
|
||||
cmpxchg_obj_header(swap_reg, tmp_reg, obj_reg, rscratch1, here, &nope);
|
||||
bind(here);
|
||||
|
||||
// Fall through to the normal CAS-based lock, because no matter what
|
||||
// the result of the above CAS, some thread must have succeeded in
|
||||
// removing the bias bit from the object's header.
|
||||
if (counters != NULL) {
|
||||
atomic_incw(Address((address)counters->revoked_lock_entry_count_addr()), tmp_reg,
|
||||
rscratch1, rscratch2);
|
||||
}
|
||||
bind(nope);
|
||||
}
|
||||
|
||||
bind(cas_label);
|
||||
}
|
||||
|
||||
void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
|
||||
assert(UseBiasedLocking, "why call this otherwise?");
|
||||
|
||||
// Check for biased locking unlock case, which is a no-op
|
||||
// Note: we do not have to check the thread ID for two reasons.
|
||||
// First, the interpreter checks for IllegalMonitorStateException at
|
||||
// a higher level. Second, if the bias was revoked while we held the
|
||||
// lock, the object could not be rebiased toward another thread, so
|
||||
// the bias bit would be clear.
|
||||
ldr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
andr(temp_reg, temp_reg, markWord::biased_lock_mask_in_place);
|
||||
cmp(temp_reg, (u1)markWord::biased_lock_pattern);
|
||||
br(Assembler::EQ, done);
|
||||
}
|
||||
|
||||
static void pass_arg0(MacroAssembler* masm, Register arg) {
|
||||
if (c_rarg0 != arg ) {
|
||||
masm->mov(c_rarg0, arg);
|
||||
|
@ -3835,11 +3662,6 @@ void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp)
|
|||
cmp(trial_klass, tmp);
|
||||
}
|
||||
|
||||
void MacroAssembler::load_prototype_header(Register dst, Register src) {
|
||||
load_klass(dst, src);
|
||||
ldr(dst, Address(dst, Klass::prototype_header_offset()));
|
||||
}
|
||||
|
||||
void MacroAssembler::store_klass(Register dst, Register src) {
|
||||
// FIXME: Should this be a store release? concurrent gcs assumes
|
||||
// klass length is valid if klass field is not null.
|
||||
|
|
|
@ -105,20 +105,6 @@ class MacroAssembler: public Assembler {
|
|||
|
||||
void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod);
|
||||
|
||||
// Biased locking support
|
||||
// lock_reg and obj_reg must be loaded up with the appropriate values.
|
||||
// swap_reg is killed.
|
||||
// tmp_reg must be supplied and must not be rscratch1 or rscratch2
|
||||
// Optional slow case is for implementations (interpreter and C1) which branch to
|
||||
// slow case directly. Leaves condition codes set for C2's Fast_Lock node.
|
||||
void biased_locking_enter(Register lock_reg, Register obj_reg,
|
||||
Register swap_reg, Register tmp_reg,
|
||||
bool swap_reg_contains_mark,
|
||||
Label& done, Label* slow_case = NULL,
|
||||
BiasedLockingCounters* counters = NULL);
|
||||
void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
|
||||
|
||||
|
||||
// Helper functions for statistics gathering.
|
||||
// Unconditional atomic increment.
|
||||
void atomic_incw(Register counter_addr, Register tmp, Register tmp2);
|
||||
|
@ -847,8 +833,6 @@ public:
|
|||
// stored using routines that take a jobject.
|
||||
void store_heap_oop_null(Address dst);
|
||||
|
||||
void load_prototype_header(Register dst, Register src);
|
||||
|
||||
void store_klass_gap(Register dst, Register src);
|
||||
|
||||
// This dummy is to prevent a call to store_heap_oop from
|
||||
|
|
|
@ -1773,10 +1773,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||
// Load the oop from the handle
|
||||
__ ldr(obj_reg, Address(oop_handle_reg, 0));
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
__ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
|
||||
}
|
||||
|
||||
// Load (object->mark() | 1) into swap_reg %r0
|
||||
__ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
__ orr(swap_reg, rscratch1, 1);
|
||||
|
@ -1924,11 +1920,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||
__ ldr(obj_reg, Address(oop_handle_reg, 0));
|
||||
|
||||
Label done;
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
__ biased_locking_exit(obj_reg, old_hdr, done);
|
||||
}
|
||||
|
||||
// Simple recursive lock?
|
||||
|
||||
__ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
|
||||
|
|
|
@ -3553,11 +3553,7 @@ void TemplateTable::_new() {
|
|||
|
||||
// initialize object header only.
|
||||
__ bind(initialize_header);
|
||||
if (UseBiasedLocking) {
|
||||
__ ldr(rscratch1, Address(r4, Klass::prototype_header_offset()));
|
||||
} else {
|
||||
__ mov(rscratch1, (intptr_t)markWord::prototype().value());
|
||||
}
|
||||
__ mov(rscratch1, (intptr_t)markWord::prototype().value());
|
||||
__ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
|
||||
__ store_klass_gap(r0, zr); // zero klass gap for compressed oops
|
||||
__ store_klass(r0, r4); // store klass last
|
||||
|
|
|
@ -5460,7 +5460,6 @@ instruct storeXConditional( memoryex mem, iRegX oldval, iRegX newval, iRegX tmp,
|
|||
__ cmp($tmp$$Register, 1, eq);
|
||||
__ b(loop, eq);
|
||||
__ teq($tmp$$Register, 0);
|
||||
// used by biased locking only. Requires a membar.
|
||||
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadStore | MacroAssembler::LoadLoad), noreg);
|
||||
%}
|
||||
ins_pipe( long_memory_op );
|
||||
|
@ -8960,7 +8959,6 @@ instruct partialSubtypeCheck( R0RegP index, R1RegP sub, R2RegP super, flagsRegP
|
|||
instruct cmpFastLock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, iRegP scratch )
|
||||
%{
|
||||
match(Set pcc (FastLock object box));
|
||||
predicate(!(UseBiasedLocking && !UseOptoBiasInlining));
|
||||
|
||||
effect(TEMP scratch, TEMP scratch2);
|
||||
ins_cost(DEFAULT_COST*3);
|
||||
|
@ -8972,22 +8970,6 @@ instruct cmpFastLock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, iRe
|
|||
ins_pipe(long_memory_op);
|
||||
%}
|
||||
|
||||
instruct cmpFastLock_noBiasInline(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2,
|
||||
iRegP scratch, iRegP scratch3) %{
|
||||
match(Set pcc (FastLock object box));
|
||||
predicate(UseBiasedLocking && !UseOptoBiasInlining);
|
||||
|
||||
effect(TEMP scratch, TEMP scratch2, TEMP scratch3);
|
||||
ins_cost(DEFAULT_COST*5);
|
||||
|
||||
format %{ "FASTLOCK $object, $box; KILL $scratch, $scratch2, $scratch3" %}
|
||||
ins_encode %{
|
||||
__ fast_lock($object$$Register, $box$$Register, $scratch$$Register, $scratch2$$Register, $scratch3$$Register);
|
||||
%}
|
||||
ins_pipe(long_memory_op);
|
||||
%}
|
||||
|
||||
|
||||
instruct cmpFastUnlock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, iRegP scratch ) %{
|
||||
match(Set pcc (FastUnlock object box));
|
||||
effect(TEMP scratch, TEMP scratch2);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -34,7 +34,6 @@
|
|||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/jvm_misc.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/objectMonitor.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -34,7 +34,6 @@
|
|||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/jvm_misc.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/objectMonitor.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
|
|
@ -247,7 +247,7 @@ int LIR_Assembler::emit_unwind_handler() {
|
|||
if (method()->is_synchronized()) {
|
||||
monitor_address(0, FrameMap::R0_opr);
|
||||
stub = new MonitorExitStub(FrameMap::R0_opr, true, 0);
|
||||
__ unlock_object(R2, R1, R0, Rtemp, *stub->entry());
|
||||
__ unlock_object(R2, R1, R0, *stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
|
@ -2429,19 +2429,17 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
|
|||
Register obj = op->obj_opr()->as_pointer_register();
|
||||
Register hdr = op->hdr_opr()->as_pointer_register();
|
||||
Register lock = op->lock_opr()->as_pointer_register();
|
||||
Register tmp = op->scratch_opr()->is_illegal() ? noreg :
|
||||
op->scratch_opr()->as_pointer_register();
|
||||
|
||||
if (!UseFastLocking) {
|
||||
__ b(*op->stub()->entry());
|
||||
} else if (op->code() == lir_lock) {
|
||||
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
|
||||
int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry());
|
||||
int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
|
||||
if (op->info() != NULL) {
|
||||
add_debug_info_for_null_check(null_check_offset, op->info());
|
||||
}
|
||||
} else if (op->code() == lir_unlock) {
|
||||
__ unlock_object(hdr, obj, lock, tmp, *op->stub()->entry());
|
||||
__ unlock_object(hdr, obj, lock, *op->stub()->entry());
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
|
|
@ -412,21 +412,13 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
|
|||
LIR_Opr lock = new_pointer_register();
|
||||
LIR_Opr hdr = new_pointer_register();
|
||||
|
||||
// Need a scratch register for biased locking on arm
|
||||
LIR_Opr scratch = LIR_OprFact::illegalOpr;
|
||||
if(UseBiasedLocking) {
|
||||
scratch = new_pointer_register();
|
||||
} else {
|
||||
scratch = atomicLockOpr();
|
||||
}
|
||||
|
||||
CodeEmitInfo* info_for_exception = NULL;
|
||||
if (x->needs_null_check()) {
|
||||
info_for_exception = state_for(x);
|
||||
}
|
||||
|
||||
CodeEmitInfo* info = state_for(x, x->state(), true);
|
||||
monitor_enter(obj.result(), lock, hdr, scratch,
|
||||
monitor_enter(obj.result(), lock, hdr, LIR_OprFact::illegalOpr,
|
||||
x->monitor_no(), info_for_exception, info);
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/markWord.hpp"
|
||||
#include "runtime/basicLock.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
@ -90,11 +89,7 @@ void C1_MacroAssembler::try_allocate(Register obj, Register obj_end, Register tm
|
|||
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register tmp) {
|
||||
assert_different_registers(obj, klass, len, tmp);
|
||||
|
||||
if(UseBiasedLocking && !len->is_valid()) {
|
||||
ldr(tmp, Address(klass, Klass::prototype_header_offset()));
|
||||
} else {
|
||||
mov(tmp, (intptr_t)markWord::prototype().value());
|
||||
}
|
||||
mov(tmp, (intptr_t)markWord::prototype().value());
|
||||
|
||||
str(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
|
||||
str(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
|
@ -187,14 +182,12 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len,
|
|||
initialize_object(obj, tmp1, klass, len, tmp2, tmp3, header_size_in_bytes, -1, /* is_tlab_allocated */ UseTLAB);
|
||||
}
|
||||
|
||||
int C1_MacroAssembler::lock_object(Register hdr, Register obj,
|
||||
Register disp_hdr, Register tmp1,
|
||||
Label& slow_case) {
|
||||
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
|
||||
Label done, fast_lock, fast_lock_done;
|
||||
int null_check_offset = 0;
|
||||
|
||||
const Register tmp2 = Rtemp; // Rtemp should be free at c1 LIR level
|
||||
assert_different_registers(hdr, obj, disp_hdr, tmp1, tmp2);
|
||||
assert_different_registers(hdr, obj, disp_hdr, tmp2);
|
||||
|
||||
assert(BasicObjectLock::lock_offset_in_bytes() == 0, "ajust this code");
|
||||
const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
|
||||
|
@ -211,10 +204,6 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj,
|
|||
b(slow_case, ne);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
biased_locking_enter(obj, hdr/*scratched*/, tmp1, false, tmp2, done, slow_case);
|
||||
}
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");
|
||||
|
||||
// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
|
||||
|
@ -249,23 +238,12 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj,
|
|||
cas_for_lock_acquire(hdr, disp_hdr, obj, tmp2, slow_case);
|
||||
|
||||
bind(fast_lock_done);
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
cond_atomic_inc32(al, BiasedLocking::fast_path_entry_count_addr());
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
bind(done);
|
||||
|
||||
return null_check_offset;
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::unlock_object(Register hdr, Register obj,
|
||||
Register disp_hdr, Register tmp,
|
||||
Label& slow_case) {
|
||||
// Note: this method is not using its 'tmp' argument
|
||||
|
||||
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
|
||||
assert_different_registers(hdr, obj, disp_hdr, Rtemp);
|
||||
Register tmp2 = Rtemp;
|
||||
|
||||
|
@ -274,11 +252,6 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj,
|
|||
const int mark_offset = BasicLock::displaced_header_offset_in_bytes();
|
||||
|
||||
Label done;
|
||||
if (UseBiasedLocking) {
|
||||
// load object
|
||||
ldr(obj, Address(disp_hdr, obj_offset));
|
||||
biased_locking_exit(obj, hdr, done);
|
||||
}
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");
|
||||
|
||||
|
@ -287,10 +260,8 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj,
|
|||
// If hdr is NULL, we've got recursive locking and there's nothing more to do
|
||||
cbz(hdr, done);
|
||||
|
||||
if(!UseBiasedLocking) {
|
||||
// load object
|
||||
ldr(obj, Address(disp_hdr, obj_offset));
|
||||
}
|
||||
// load object
|
||||
ldr(obj, Address(disp_hdr, obj_offset));
|
||||
|
||||
// Restore the object header
|
||||
cas_for_lock_release(disp_hdr, hdr, obj, tmp2, slow_case);
|
||||
|
|
|
@ -59,9 +59,9 @@
|
|||
max_array_allocation_length = 0x01000000
|
||||
};
|
||||
|
||||
int lock_object(Register hdr, Register obj, Register disp_hdr, Register tmp, Label& slow_case);
|
||||
int lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case);
|
||||
|
||||
void unlock_object(Register hdr, Register obj, Register disp_hdr, Register tmp, Label& slow_case);
|
||||
void unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case);
|
||||
|
||||
// This platform only uses signal-based null checks. The Label is not needed.
|
||||
void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); }
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -78,7 +78,7 @@ void C2_MacroAssembler::char_arrays_equals(Register ary1, Register ary2,
|
|||
// mov(result_reg, 1); //equal
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratch, Register Rscratch2, Register scratch3) {
|
||||
void C2_MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratch, Register Rscratch2) {
|
||||
assert(VM_Version::supports_ldrex(), "unsupported, yet?");
|
||||
|
||||
Register Rmark = Rscratch2;
|
||||
|
@ -97,14 +97,6 @@ void C2_MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratc
|
|||
b(done, ne);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking && !UseOptoBiasInlining) {
|
||||
assert(scratch3 != noreg, "need extra temporary for -XX:-UseOptoBiasInlining");
|
||||
biased_locking_enter(Roop, Rmark, Rscratch, false, scratch3, done, done);
|
||||
// Fall through if lock not biased otherwise branch to done
|
||||
}
|
||||
|
||||
// Invariant: Rmark loaded below does not contain biased lock pattern
|
||||
|
||||
ldr(Rmark, Address(Roop, oopDesc::mark_offset_in_bytes()));
|
||||
tst(Rmark, markWord::unlocked_value);
|
||||
b(fast_lock, ne);
|
||||
|
@ -148,10 +140,6 @@ void C2_MacroAssembler::fast_unlock(Register Roop, Register Rbox, Register Rscra
|
|||
|
||||
Label done;
|
||||
|
||||
if (UseBiasedLocking && !UseOptoBiasInlining) {
|
||||
biased_locking_exit(Roop, Rscratch, done);
|
||||
}
|
||||
|
||||
ldr(Rmark, Address(Rbox, BasicLock::displaced_header_offset_in_bytes()));
|
||||
// If hdr is NULL, we've got recursive locking and there's nothing more to do
|
||||
cmp(Rmark, 0);
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
Register limit, Register result,
|
||||
Register chr1, Register chr2, Label& Ldone);
|
||||
|
||||
void fast_lock(Register obj, Register box, Register scratch, Register scratch2, Register scratch3 = noreg);
|
||||
void fast_lock(Register obj, Register box, Register scratch, Register scratch2);
|
||||
void fast_unlock(Register obj, Register box, Register scratch, Register scratch2);
|
||||
|
||||
#endif // CPU_ARM_C2_MACROASSEMBLER_ARM_HPP
|
||||
|
|
|
@ -40,7 +40,6 @@
|
|||
#include "prims/jvmtiExport.hpp"
|
||||
#include "prims/jvmtiThreadState.hpp"
|
||||
#include "runtime/basicLock.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/safepointMechanism.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
|
@ -890,11 +889,6 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) {
|
|||
b(slow_case, ne);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
biased_locking_enter(Robj, Rmark/*scratched*/, R0, false, Rtemp, done, slow_case);
|
||||
}
|
||||
|
||||
|
||||
// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
|
||||
// That would be acceptable as ether CAS or slow case path is taken in that case.
|
||||
// Exception to that is if the object is locked by the calling thread, then the recursive test will pass (guaranteed as
|
||||
|
@ -912,12 +906,6 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) {
|
|||
|
||||
cas_for_lock_acquire(Rmark, Rlock, Robj, Rtemp, slow_case);
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
cond_atomic_inc32(al, BiasedLocking::fast_path_entry_count_addr());
|
||||
}
|
||||
#endif //!PRODUCT
|
||||
|
||||
b(done);
|
||||
|
||||
// If we got here that means the object is locked by ether calling thread or another thread.
|
||||
|
@ -962,13 +950,6 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) {
|
|||
// If still 'eq' then recursive locking OK: store 0 into lock record
|
||||
str(R0, Address(Rlock, mark_offset), eq);
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
cond_atomic_inc32(eq, BiasedLocking::fast_path_entry_count_addr());
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
b(done, eq);
|
||||
|
||||
bind(slow_case);
|
||||
|
@ -1010,10 +991,6 @@ void InterpreterMacroAssembler::unlock_object(Register Rlock) {
|
|||
// Free entry
|
||||
str(Rzero, Address(Rlock, obj_offset));
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
biased_locking_exit(Robj, Rmark, done);
|
||||
}
|
||||
|
||||
// Load the old header from BasicLock structure
|
||||
ldr(Rmark, Address(Rlock, mark_offset));
|
||||
|
||||
|
|
|
@ -40,7 +40,6 @@
|
|||
#include "oops/accessDecorators.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/jniHandles.hpp"
|
||||
#include "runtime/objectMonitor.hpp"
|
||||
|
@ -1288,221 +1287,6 @@ void MacroAssembler::cond_atomic_inc32(AsmCondition cond, int* counter_addr) {
|
|||
|
||||
#endif // !PRODUCT
|
||||
|
||||
|
||||
// Building block for CAS cases of biased locking: makes CAS and records statistics.
|
||||
// The slow_case label is used to transfer control if CAS fails. Otherwise leaves condition codes set.
|
||||
void MacroAssembler::biased_locking_enter_with_cas(Register obj_reg, Register old_mark_reg, Register new_mark_reg,
|
||||
Register tmp, Label& slow_case, int* counter_addr) {
|
||||
|
||||
cas_for_lock_acquire(old_mark_reg, new_mark_reg, obj_reg, tmp, slow_case);
|
||||
#ifdef ASSERT
|
||||
breakpoint(ne); // Fallthrough only on success
|
||||
#endif
|
||||
#ifndef PRODUCT
|
||||
if (counter_addr != NULL) {
|
||||
cond_atomic_inc32(al, counter_addr);
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
}
|
||||
|
||||
void MacroAssembler::biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg,
|
||||
bool swap_reg_contains_mark,
|
||||
Register tmp2,
|
||||
Label& done, Label& slow_case,
|
||||
BiasedLockingCounters* counters) {
|
||||
// obj_reg must be preserved (at least) if the bias locking fails
|
||||
// tmp_reg is a temporary register
|
||||
// swap_reg was used as a temporary but contained a value
|
||||
// that was used afterwards in some call pathes. Callers
|
||||
// have been fixed so that swap_reg no longer needs to be
|
||||
// saved.
|
||||
// Rtemp in no longer scratched
|
||||
|
||||
assert(UseBiasedLocking, "why call this otherwise?");
|
||||
assert_different_registers(obj_reg, swap_reg, tmp_reg, tmp2);
|
||||
guarantee(swap_reg!=tmp_reg, "invariant");
|
||||
assert(tmp_reg != noreg, "must supply tmp_reg");
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (PrintBiasedLockingStatistics && (counters == NULL)) {
|
||||
counters = BiasedLocking::counters();
|
||||
}
|
||||
#endif
|
||||
|
||||
assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout");
|
||||
Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
|
||||
|
||||
// Biased locking
|
||||
// See whether the lock is currently biased toward our thread and
|
||||
// whether the epoch is still valid
|
||||
// Note that the runtime guarantees sufficient alignment of JavaThread
|
||||
// pointers to allow age to be placed into low bits
|
||||
// First check to see whether biasing is even enabled for this object
|
||||
Label cas_label;
|
||||
|
||||
if (!swap_reg_contains_mark) {
|
||||
ldr(swap_reg, mark_addr);
|
||||
}
|
||||
|
||||
// On MP platform loads could return 'stale' values in some cases.
|
||||
// That is acceptable since either CAS or slow case path is taken in the worst case.
|
||||
|
||||
andr(tmp_reg, swap_reg, markWord::biased_lock_mask_in_place);
|
||||
cmp(tmp_reg, markWord::biased_lock_pattern);
|
||||
|
||||
b(cas_label, ne);
|
||||
|
||||
// The bias pattern is present in the object's header. Need to check
|
||||
// whether the bias owner and the epoch are both still current.
|
||||
load_klass(tmp_reg, obj_reg);
|
||||
ldr(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset()));
|
||||
orr(tmp_reg, tmp_reg, Rthread);
|
||||
eor(tmp_reg, tmp_reg, swap_reg);
|
||||
|
||||
bics(tmp_reg, tmp_reg, ((int) markWord::age_mask_in_place));
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (counters != NULL) {
|
||||
cond_atomic_inc32(eq, counters->biased_lock_entry_count_addr());
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
b(done, eq);
|
||||
|
||||
Label try_revoke_bias;
|
||||
Label try_rebias;
|
||||
|
||||
// At this point we know that the header has the bias pattern and
|
||||
// that we are not the bias owner in the current epoch. We need to
|
||||
// figure out more details about the state of the header in order to
|
||||
// know what operations can be legally performed on the object's
|
||||
// header.
|
||||
|
||||
// If the low three bits in the xor result aren't clear, that means
|
||||
// the prototype header is no longer biased and we have to revoke
|
||||
// the bias on this object.
|
||||
tst(tmp_reg, markWord::biased_lock_mask_in_place);
|
||||
b(try_revoke_bias, ne);
|
||||
|
||||
// Biasing is still enabled for this data type. See whether the
|
||||
// epoch of the current bias is still valid, meaning that the epoch
|
||||
// bits of the mark word are equal to the epoch bits of the
|
||||
// prototype header. (Note that the prototype header's epoch bits
|
||||
// only change at a safepoint.) If not, attempt to rebias the object
|
||||
// toward the current thread. Note that we must be absolutely sure
|
||||
// that the current epoch is invalid in order to do this because
|
||||
// otherwise the manipulations it performs on the mark word are
|
||||
// illegal.
|
||||
tst(tmp_reg, markWord::epoch_mask_in_place);
|
||||
b(try_rebias, ne);
|
||||
|
||||
// tmp_reg has the age, epoch and pattern bits cleared
|
||||
// The remaining (owner) bits are (Thread ^ current_owner)
|
||||
|
||||
// The epoch of the current bias is still valid but we know nothing
|
||||
// about the owner; it might be set or it might be clear. Try to
|
||||
// acquire the bias of the object using an atomic operation. If this
|
||||
// fails we will go in to the runtime to revoke the object's bias.
|
||||
// Note that we first construct the presumed unbiased header so we
|
||||
// don't accidentally blow away another thread's valid bias.
|
||||
|
||||
// Note that we know the owner is not ourself. Hence, success can
|
||||
// only happen when the owner bits is 0
|
||||
|
||||
// until the assembler can be made smarter, we need to make some assumptions about the values
|
||||
// so we can optimize this:
|
||||
assert((markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place) == 0x1ff, "biased bitmasks changed");
|
||||
|
||||
mov(swap_reg, AsmOperand(swap_reg, lsl, 23));
|
||||
mov(swap_reg, AsmOperand(swap_reg, lsr, 23)); // markWord with thread bits cleared (for CAS)
|
||||
|
||||
orr(tmp_reg, swap_reg, Rthread); // new mark
|
||||
|
||||
biased_locking_enter_with_cas(obj_reg, swap_reg, tmp_reg, tmp2, slow_case,
|
||||
(counters != NULL) ? counters->anonymously_biased_lock_entry_count_addr() : NULL);
|
||||
|
||||
// If the biasing toward our thread failed, this means that
|
||||
// another thread succeeded in biasing it toward itself and we
|
||||
// need to revoke that bias. The revocation will occur in the
|
||||
// interpreter runtime in the slow case.
|
||||
|
||||
b(done);
|
||||
|
||||
bind(try_rebias);
|
||||
|
||||
// At this point we know the epoch has expired, meaning that the
|
||||
// current "bias owner", if any, is actually invalid. Under these
|
||||
// circumstances _only_, we are allowed to use the current header's
|
||||
// value as the comparison value when doing the cas to acquire the
|
||||
// bias in the current epoch. In other words, we allow transfer of
|
||||
// the bias from one thread to another directly in this situation.
|
||||
|
||||
// tmp_reg low (not owner) bits are (age: 0 | pattern&epoch: prototype^swap_reg)
|
||||
|
||||
eor(tmp_reg, tmp_reg, swap_reg); // OK except for owner bits (age preserved !)
|
||||
|
||||
// owner bits 'random'. Set them to Rthread.
|
||||
mov(tmp_reg, AsmOperand(tmp_reg, lsl, 23));
|
||||
mov(tmp_reg, AsmOperand(tmp_reg, lsr, 23));
|
||||
|
||||
orr(tmp_reg, tmp_reg, Rthread); // new mark
|
||||
|
||||
biased_locking_enter_with_cas(obj_reg, swap_reg, tmp_reg, tmp2, slow_case,
|
||||
(counters != NULL) ? counters->rebiased_lock_entry_count_addr() : NULL);
|
||||
|
||||
// If the biasing toward our thread failed, then another thread
|
||||
// succeeded in biasing it toward itself and we need to revoke that
|
||||
// bias. The revocation will occur in the runtime in the slow case.
|
||||
|
||||
b(done);
|
||||
|
||||
bind(try_revoke_bias);
|
||||
|
||||
// The prototype mark in the klass doesn't have the bias bit set any
|
||||
// more, indicating that objects of this data type are not supposed
|
||||
// to be biased any more. We are going to try to reset the mark of
|
||||
// this object to the prototype value and fall through to the
|
||||
// CAS-based locking scheme. Note that if our CAS fails, it means
|
||||
// that another thread raced us for the privilege of revoking the
|
||||
// bias of this particular object, so it's okay to continue in the
|
||||
// normal locking code.
|
||||
|
||||
// tmp_reg low (not owner) bits are (age: 0 | pattern&epoch: prototype^swap_reg)
|
||||
|
||||
eor(tmp_reg, tmp_reg, swap_reg); // OK except for owner bits (age preserved !)
|
||||
|
||||
// owner bits 'random'. Clear them
|
||||
mov(tmp_reg, AsmOperand(tmp_reg, lsl, 23));
|
||||
mov(tmp_reg, AsmOperand(tmp_reg, lsr, 23));
|
||||
|
||||
biased_locking_enter_with_cas(obj_reg, swap_reg, tmp_reg, tmp2, cas_label,
|
||||
(counters != NULL) ? counters->revoked_lock_entry_count_addr() : NULL);
|
||||
|
||||
// Fall through to the normal CAS-based lock, because no matter what
|
||||
// the result of the above CAS, some thread must have succeeded in
|
||||
// removing the bias bit from the object's header.
|
||||
|
||||
bind(cas_label);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::biased_locking_exit(Register obj_reg, Register tmp_reg, Label& done) {
|
||||
assert(UseBiasedLocking, "why call this otherwise?");
|
||||
|
||||
// Check for biased locking unlock case, which is a no-op
|
||||
// Note: we do not have to check the thread ID for two reasons.
|
||||
// First, the interpreter checks for IllegalMonitorStateException at
|
||||
// a higher level. Second, if the bias was revoked while we held the
|
||||
// lock, the object could not be rebiased toward another thread, so
|
||||
// the bias bit would be clear.
|
||||
ldr(tmp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
|
||||
andr(tmp_reg, tmp_reg, markWord::biased_lock_mask_in_place);
|
||||
cmp(tmp_reg, markWord::biased_lock_pattern);
|
||||
b(done, eq);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::resolve_jobject(Register value,
|
||||
Register tmp1,
|
||||
Register tmp2) {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -28,8 +28,6 @@
|
|||
#include "code/relocInfo.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
class BiasedLockingCounters;
|
||||
|
||||
// Introduced AddressLiteral and its subclasses to ease portability from
|
||||
// x86 and avoid relocation issues
|
||||
class AddressLiteral {
|
||||
|
@ -359,29 +357,6 @@ public:
|
|||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
// Biased locking support
|
||||
// lock_reg and obj_reg must be loaded up with the appropriate values.
|
||||
// swap_reg must be supplied.
|
||||
// tmp_reg must be supplied.
|
||||
// Done label is branched to with condition code EQ set if the lock is
|
||||
// biased and we acquired it. Slow case label is branched to with
|
||||
// condition code NE set if the lock is biased but we failed to acquire
|
||||
// it. Otherwise fall through.
|
||||
// Notes:
|
||||
// - swap_reg and tmp_reg are scratched
|
||||
// - Rtemp was (implicitly) scratched and can now be specified as the tmp2
|
||||
void biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg,
|
||||
bool swap_reg_contains_mark,
|
||||
Register tmp2,
|
||||
Label& done, Label& slow_case,
|
||||
BiasedLockingCounters* counters = NULL);
|
||||
void biased_locking_exit(Register obj_reg, Register temp_reg, Label& done);
|
||||
|
||||
// Building block for CAS cases of biased locking: makes CAS and records statistics.
|
||||
// Optional slow_case label is used to transfer control if CAS fails. Otherwise leaves condition codes set.
|
||||
void biased_locking_enter_with_cas(Register obj_reg, Register old_mark_reg, Register new_mark_reg,
|
||||
Register tmp, Label& slow_case, int* counter_addr);
|
||||
|
||||
void resolve_jobject(Register value, Register tmp1, Register tmp2);
|
||||
|
||||
void nop() {
|
||||
|
|
|
@ -862,11 +862,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||
assert(markWord::unlocked_value == 1, "adjust this code");
|
||||
__ tbz(Rtemp, exact_log2(markWord::unlocked_value), slow_case);
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
assert(is_power_of_2(markWord::biased_lock_bit_in_place), "adjust this code");
|
||||
__ tbnz(Rtemp, exact_log2(markWord::biased_lock_bit_in_place), slow_case);
|
||||
}
|
||||
|
||||
__ bics(Rtemp, Rtemp, ~markWord::hash_mask_in_place);
|
||||
__ mov(R0, AsmOperand(Rtemp, lsr, markWord::hash_shift), ne);
|
||||
__ bx(LR, ne);
|
||||
|
@ -1151,17 +1146,13 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||
const Register disp_hdr = altFP_7_11;
|
||||
const Register tmp = R8;
|
||||
|
||||
Label slow_lock, slow_lock_biased, lock_done, fast_lock;
|
||||
Label slow_lock, lock_done, fast_lock;
|
||||
if (method->is_synchronized()) {
|
||||
// The first argument is a handle to sync object (a class or an instance)
|
||||
__ ldr(sync_obj, Address(R1));
|
||||
// Remember the handle for the unlocking code
|
||||
__ mov(sync_handle, R1);
|
||||
|
||||
if(UseBiasedLocking) {
|
||||
__ biased_locking_enter(sync_obj, tmp, disp_hdr/*scratched*/, false, Rtemp, lock_done, slow_lock_biased);
|
||||
}
|
||||
|
||||
const Register mark = tmp;
|
||||
// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
|
||||
// That would be acceptable as either CAS or slow case path is taken in that case
|
||||
|
@ -1243,12 +1234,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||
if (method->is_synchronized()) {
|
||||
__ ldr(sync_obj, Address(sync_handle));
|
||||
|
||||
if(UseBiasedLocking) {
|
||||
__ biased_locking_exit(sync_obj, Rtemp, unlock_done);
|
||||
// disp_hdr may not have been saved on entry with biased locking
|
||||
__ sub(disp_hdr, FP, lock_slot_fp_offset);
|
||||
}
|
||||
|
||||
// See C1_MacroAssembler::unlock_object() for more comments
|
||||
__ ldr(R2, Address(disp_hdr, BasicLock::displaced_header_offset_in_bytes()));
|
||||
__ cbz(R2, unlock_done);
|
||||
|
@ -1304,11 +1289,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||
|
||||
if (method->is_synchronized()) {
|
||||
// Locking slow case
|
||||
if(UseBiasedLocking) {
|
||||
__ bind(slow_lock_biased);
|
||||
__ sub(disp_hdr, FP, lock_slot_fp_offset);
|
||||
}
|
||||
|
||||
__ bind(slow_lock);
|
||||
|
||||
push_param_registers(masm, fp_regs_in_arguments);
|
||||
|
|
|
@ -3967,11 +3967,7 @@ void TemplateTable::_new() {
|
|||
|
||||
// initialize object header only.
|
||||
__ bind(initialize_header);
|
||||
if (UseBiasedLocking) {
|
||||
__ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset()));
|
||||
} else {
|
||||
__ mov_slow(Rtemp, (intptr_t)markWord::prototype().value());
|
||||
}
|
||||
__ mov_slow(Rtemp, (intptr_t)markWord::prototype().value());
|
||||
// mark
|
||||
__ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes()));
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -94,8 +94,6 @@ class VM_Version: public Abstract_VM_Version {
|
|||
static bool supports_compare_and_exchange() { return true; }
|
||||
static bool supports_kuser_cmpxchg32() { return _kuser_helper_version >= KUSER_VERSION_CMPXCHG32; }
|
||||
static bool supports_kuser_cmpxchg64() { return _kuser_helper_version >= KUSER_VERSION_CMPXCHG64; }
|
||||
// Override Abstract_VM_Version implementation
|
||||
static bool use_biased_locking();
|
||||
|
||||
static bool has_vfp() { return (_features & vfp_m) != 0; }
|
||||
static bool has_vfp3_32() { return (_features & vfp3_32_m) != 0; }
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -348,16 +348,3 @@ void VM_Version::initialize() {
|
|||
|
||||
_is_initialized = true;
|
||||
}
|
||||
|
||||
bool VM_Version::use_biased_locking() {
|
||||
get_os_cpu_info();
|
||||
// The cost of CAS on uniprocessor ARM v6 and later is low compared to the
|
||||
// overhead related to slightly longer Biased Locking execution path.
|
||||
// Testing shows no improvement when running with Biased Locking enabled
|
||||
// on an ARMv6 and higher uniprocessor systems. The situation is different on
|
||||
// ARMv5 and MP systems.
|
||||
//
|
||||
// Therefore the Biased Locking is enabled on ARMv5 and ARM MP only.
|
||||
//
|
||||
return (!os::is_MP() && (arm_arch() > 5)) ? false : true;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
|
@ -30,7 +30,6 @@
|
|||
#include "interpreter/interpreter.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/objectMonitor.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/markWord.hpp"
|
||||
#include "runtime/basicLock.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
@ -115,10 +114,6 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
|
|||
bne(CCR0, slow_int);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
biased_locking_enter(CCR0, Roop, Rmark, Rscratch, R0, done, &slow_int);
|
||||
}
|
||||
|
||||
// ... and mark it unlocked.
|
||||
ori(Rmark, Rmark, markWord::unlocked_value);
|
||||
|
||||
|
@ -164,21 +159,14 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
|
|||
Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
|
||||
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
// Load the object out of the BasicObjectLock.
|
||||
ld(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
|
||||
verify_oop(Roop, FILE_AND_LINE);
|
||||
biased_locking_exit(CCR0, Roop, R0, done);
|
||||
}
|
||||
// Test first it it is a fast recursive unlock.
|
||||
ld(Rmark, BasicLock::displaced_header_offset_in_bytes(), Rbox);
|
||||
cmpdi(CCR0, Rmark, 0);
|
||||
beq(CCR0, done);
|
||||
if (!UseBiasedLocking) {
|
||||
// Load object.
|
||||
ld(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
|
||||
verify_oop(Roop, FILE_AND_LINE);
|
||||
}
|
||||
|
||||
// Load object.
|
||||
ld(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
|
||||
verify_oop(Roop, FILE_AND_LINE);
|
||||
|
||||
// Check if it is still a light weight lock, this is is true if we see
|
||||
// the stack address of the basicLock in the markWord of the object.
|
||||
|
@ -222,11 +210,7 @@ void C1_MacroAssembler::try_allocate(
|
|||
|
||||
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
|
||||
assert_different_registers(obj, klass, len, t1, t2);
|
||||
if (UseBiasedLocking && !len->is_valid()) {
|
||||
ld(t1, in_bytes(Klass::prototype_header_offset()), klass);
|
||||
} else {
|
||||
load_const_optimized(t1, (intx)markWord::prototype().value());
|
||||
}
|
||||
load_const_optimized(t1, (intx)markWord::prototype().value());
|
||||
std(t1, oopDesc::mark_offset_in_bytes(), obj);
|
||||
store_klass(obj, klass);
|
||||
if (len->is_valid()) {
|
||||
|
|
|
@ -943,10 +943,6 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
|||
bne(CCR0, slow_case);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
biased_locking_enter(CCR0, object, displaced_header, tmp, current_header, done, &slow_case);
|
||||
}
|
||||
|
||||
// Set displaced_header to be (markWord of object | UNLOCK_VALUE).
|
||||
ori(displaced_header, displaced_header, markWord::unlocked_value);
|
||||
|
||||
|
@ -1048,13 +1044,6 @@ void InterpreterMacroAssembler::unlock_object(Register monitor) {
|
|||
|
||||
assert_different_registers(object, displaced_header, object_mark_addr, current_header);
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
// The object address from the monitor is in object.
|
||||
ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor);
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
biased_locking_exit(CCR0, object, displaced_header, free_slot);
|
||||
}
|
||||
|
||||
// Test first if we are in the fast recursive case.
|
||||
ld(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
|
||||
BasicLock::displaced_header_offset_in_bytes(), monitor);
|
||||
|
@ -1070,7 +1059,7 @@ void InterpreterMacroAssembler::unlock_object(Register monitor) {
|
|||
// If we still have a lightweight lock, unlock the object and be done.
|
||||
|
||||
// The object address from the monitor is in object.
|
||||
if (!UseBiasedLocking) { ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor); }
|
||||
ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor);
|
||||
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
|
||||
|
||||
// We have the displaced header in displaced_header. If the lock is still
|
||||
|
|
|
@ -35,7 +35,6 @@
|
|||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/objectMonitor.hpp"
|
||||
|
@ -2073,218 +2072,6 @@ RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
|
|||
}
|
||||
}
|
||||
|
||||
// Supports temp2_reg = R0.
|
||||
void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj_reg,
|
||||
Register mark_reg, Register temp_reg,
|
||||
Register temp2_reg, Label& done, Label* slow_case) {
|
||||
assert(UseBiasedLocking, "why call this otherwise?");
|
||||
|
||||
#ifdef ASSERT
|
||||
assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg);
|
||||
#endif
|
||||
|
||||
Label cas_label;
|
||||
|
||||
// Branch to done if fast path fails and no slow_case provided.
|
||||
Label *slow_case_int = (slow_case != NULL) ? slow_case : &done;
|
||||
|
||||
// Biased locking
|
||||
// See whether the lock is currently biased toward our thread and
|
||||
// whether the epoch is still valid
|
||||
// Note that the runtime guarantees sufficient alignment of JavaThread
|
||||
// pointers to allow age to be placed into low bits
|
||||
assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits,
|
||||
"biased locking makes assumptions about bit layout");
|
||||
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
load_const(temp2_reg, (address) BiasedLocking::total_entry_count_addr(), temp_reg);
|
||||
lwzx(temp_reg, temp2_reg);
|
||||
addi(temp_reg, temp_reg, 1);
|
||||
stwx(temp_reg, temp2_reg);
|
||||
}
|
||||
|
||||
andi(temp_reg, mark_reg, markWord::biased_lock_mask_in_place);
|
||||
cmpwi(cr_reg, temp_reg, markWord::biased_lock_pattern);
|
||||
bne(cr_reg, cas_label);
|
||||
|
||||
load_klass(temp_reg, obj_reg);
|
||||
|
||||
load_const_optimized(temp2_reg, ~((int) markWord::age_mask_in_place));
|
||||
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
|
||||
orr(temp_reg, R16_thread, temp_reg);
|
||||
xorr(temp_reg, mark_reg, temp_reg);
|
||||
andr(temp_reg, temp_reg, temp2_reg);
|
||||
cmpdi(cr_reg, temp_reg, 0);
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
Label l;
|
||||
bne(cr_reg, l);
|
||||
load_const(temp2_reg, (address) BiasedLocking::biased_lock_entry_count_addr());
|
||||
lwzx(mark_reg, temp2_reg);
|
||||
addi(mark_reg, mark_reg, 1);
|
||||
stwx(mark_reg, temp2_reg);
|
||||
// restore mark_reg
|
||||
ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
|
||||
bind(l);
|
||||
}
|
||||
beq(cr_reg, done);
|
||||
|
||||
Label try_revoke_bias;
|
||||
Label try_rebias;
|
||||
|
||||
// At this point we know that the header has the bias pattern and
|
||||
// that we are not the bias owner in the current epoch. We need to
|
||||
// figure out more details about the state of the header in order to
|
||||
// know what operations can be legally performed on the object's
|
||||
// header.
|
||||
|
||||
// If the low three bits in the xor result aren't clear, that means
|
||||
// the prototype header is no longer biased and we have to revoke
|
||||
// the bias on this object.
|
||||
andi(temp2_reg, temp_reg, markWord::biased_lock_mask_in_place);
|
||||
cmpwi(cr_reg, temp2_reg, 0);
|
||||
bne(cr_reg, try_revoke_bias);
|
||||
|
||||
// Biasing is still enabled for this data type. See whether the
|
||||
// epoch of the current bias is still valid, meaning that the epoch
|
||||
// bits of the mark word are equal to the epoch bits of the
|
||||
// prototype header. (Note that the prototype header's epoch bits
|
||||
// only change at a safepoint.) If not, attempt to rebias the object
|
||||
// toward the current thread. Note that we must be absolutely sure
|
||||
// that the current epoch is invalid in order to do this because
|
||||
// otherwise the manipulations it performs on the mark word are
|
||||
// illegal.
|
||||
|
||||
int shift_amount = 64 - markWord::epoch_shift;
|
||||
// rotate epoch bits to right (little) end and set other bits to 0
|
||||
// [ big part | epoch | little part ] -> [ 0..0 | epoch ]
|
||||
rldicl_(temp2_reg, temp_reg, shift_amount, 64 - markWord::epoch_bits);
|
||||
// branch if epoch bits are != 0, i.e. they differ, because the epoch has been incremented
|
||||
bne(CCR0, try_rebias);
|
||||
|
||||
// The epoch of the current bias is still valid but we know nothing
|
||||
// about the owner; it might be set or it might be clear. Try to
|
||||
// acquire the bias of the object using an atomic operation. If this
|
||||
// fails we will go in to the runtime to revoke the object's bias.
|
||||
// Note that we first construct the presumed unbiased header so we
|
||||
// don't accidentally blow away another thread's valid bias.
|
||||
andi(mark_reg, mark_reg, (markWord::biased_lock_mask_in_place |
|
||||
markWord::age_mask_in_place |
|
||||
markWord::epoch_mask_in_place));
|
||||
orr(temp_reg, R16_thread, mark_reg);
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
|
||||
// CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
|
||||
cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
|
||||
/*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
|
||||
/*where=*/obj_reg,
|
||||
MacroAssembler::MemBarAcq,
|
||||
MacroAssembler::cmpxchgx_hint_acquire_lock(),
|
||||
noreg, slow_case_int); // bail out if failed
|
||||
|
||||
// If the biasing toward our thread failed, this means that
|
||||
// another thread succeeded in biasing it toward itself and we
|
||||
// need to revoke that bias. The revocation will occur in the
|
||||
// interpreter runtime in the slow case.
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
load_const(temp2_reg, (address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), temp_reg);
|
||||
lwzx(temp_reg, temp2_reg);
|
||||
addi(temp_reg, temp_reg, 1);
|
||||
stwx(temp_reg, temp2_reg);
|
||||
}
|
||||
b(done);
|
||||
|
||||
bind(try_rebias);
|
||||
// At this point we know the epoch has expired, meaning that the
|
||||
// current "bias owner", if any, is actually invalid. Under these
|
||||
// circumstances _only_, we are allowed to use the current header's
|
||||
// value as the comparison value when doing the cas to acquire the
|
||||
// bias in the current epoch. In other words, we allow transfer of
|
||||
// the bias from one thread to another directly in this situation.
|
||||
load_klass(temp_reg, obj_reg);
|
||||
andi(temp2_reg, mark_reg, markWord::age_mask_in_place);
|
||||
orr(temp2_reg, R16_thread, temp2_reg);
|
||||
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
|
||||
orr(temp_reg, temp2_reg, temp_reg);
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
|
||||
cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
|
||||
/*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
|
||||
/*where=*/obj_reg,
|
||||
MacroAssembler::MemBarAcq,
|
||||
MacroAssembler::cmpxchgx_hint_acquire_lock(),
|
||||
noreg, slow_case_int); // bail out if failed
|
||||
|
||||
// If the biasing toward our thread failed, this means that
|
||||
// another thread succeeded in biasing it toward itself and we
|
||||
// need to revoke that bias. The revocation will occur in the
|
||||
// interpreter runtime in the slow case.
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
load_const(temp2_reg, (address) BiasedLocking::rebiased_lock_entry_count_addr(), temp_reg);
|
||||
lwzx(temp_reg, temp2_reg);
|
||||
addi(temp_reg, temp_reg, 1);
|
||||
stwx(temp_reg, temp2_reg);
|
||||
}
|
||||
b(done);
|
||||
|
||||
bind(try_revoke_bias);
|
||||
// The prototype mark in the klass doesn't have the bias bit set any
|
||||
// more, indicating that objects of this data type are not supposed
|
||||
// to be biased any more. We are going to try to reset the mark of
|
||||
// this object to the prototype value and fall through to the
|
||||
// CAS-based locking scheme. Note that if our CAS fails, it means
|
||||
// that another thread raced us for the privilege of revoking the
|
||||
// bias of this particular object, so it's okay to continue in the
|
||||
// normal locking code.
|
||||
load_klass(temp_reg, obj_reg);
|
||||
ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
|
||||
andi(temp2_reg, mark_reg, markWord::age_mask_in_place);
|
||||
orr(temp_reg, temp_reg, temp2_reg);
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
|
||||
// CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
|
||||
cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
|
||||
/*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
|
||||
/*where=*/obj_reg,
|
||||
MacroAssembler::MemBarAcq,
|
||||
MacroAssembler::cmpxchgx_hint_acquire_lock());
|
||||
|
||||
// reload markWord in mark_reg before continuing with lightweight locking
|
||||
ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
|
||||
|
||||
// Fall through to the normal CAS-based lock, because no matter what
|
||||
// the result of the above CAS, some thread must have succeeded in
|
||||
// removing the bias bit from the object's header.
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
Label l;
|
||||
bne(cr_reg, l);
|
||||
load_const(temp2_reg, (address) BiasedLocking::revoked_lock_entry_count_addr(), temp_reg);
|
||||
lwzx(temp_reg, temp2_reg);
|
||||
addi(temp_reg, temp_reg, 1);
|
||||
stwx(temp_reg, temp2_reg);
|
||||
bind(l);
|
||||
}
|
||||
|
||||
bind(cas_label);
|
||||
}
|
||||
|
||||
void MacroAssembler::biased_locking_exit (ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done) {
|
||||
// Check for biased locking unlock case, which is a no-op
|
||||
// Note: we do not have to check the thread ID for two reasons.
|
||||
// First, the interpreter checks for IllegalMonitorStateException at
|
||||
// a higher level. Second, if the bias was revoked while we held the
|
||||
// lock, the object could not be rebiased toward another thread, so
|
||||
// the bias bit would be clear.
|
||||
|
||||
ld(temp_reg, 0, mark_addr);
|
||||
andi(temp_reg, temp_reg, markWord::biased_lock_mask_in_place);
|
||||
|
||||
cmpwi(cr_reg, temp_reg, markWord::biased_lock_pattern);
|
||||
beq(cr_reg, done);
|
||||
}
|
||||
|
||||
// allocation (for C1)
|
||||
void MacroAssembler::eden_allocate(
|
||||
Register obj, // result: pointer to object after successful allocation
|
||||
|
@ -2695,14 +2482,13 @@ void MacroAssembler::rtm_stack_locking(ConditionRegister flag,
|
|||
Metadata* method_data, bool profile_rtm,
|
||||
Label& DONE_LABEL, Label& IsInflated) {
|
||||
assert(UseRTMForStackLocks, "why call this otherwise?");
|
||||
assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
|
||||
Label L_rtm_retry, L_decrement_retry, L_on_abort;
|
||||
|
||||
if (RTMRetryCount > 0) {
|
||||
load_const_optimized(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
|
||||
bind(L_rtm_retry);
|
||||
}
|
||||
andi_(R0, mark_word, markWord::monitor_value); // inflated vs stack-locked|neutral|biased
|
||||
andi_(R0, mark_word, markWord::monitor_value); // inflated vs stack-locked|neutral
|
||||
bne(CCR0, IsInflated);
|
||||
|
||||
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
|
||||
|
@ -2720,10 +2506,10 @@ void MacroAssembler::rtm_stack_locking(ConditionRegister flag,
|
|||
}
|
||||
tbegin_();
|
||||
beq(CCR0, L_on_abort);
|
||||
ld(mark_word, oopDesc::mark_offset_in_bytes(), obj); // Reload in transaction, conflicts need to be tracked.
|
||||
andi(R0, mark_word, markWord::biased_lock_mask_in_place); // look at 3 lock bits
|
||||
cmpwi(flag, R0, markWord::unlocked_value); // bits = 001 unlocked
|
||||
beq(flag, DONE_LABEL); // all done if unlocked
|
||||
ld(mark_word, oopDesc::mark_offset_in_bytes(), obj); // Reload in transaction, conflicts need to be tracked.
|
||||
andi(R0, mark_word, markWord::lock_mask_in_place); // look at 2 lock bits
|
||||
cmpwi(flag, R0, markWord::unlocked_value); // bits = 01 unlocked
|
||||
beq(flag, DONE_LABEL); // all done if unlocked
|
||||
|
||||
if (UseRTMXendForLockBusy) {
|
||||
tend_();
|
||||
|
@ -2837,7 +2623,6 @@ void MacroAssembler::rtm_inflated_locking(ConditionRegister flag,
|
|||
// "The box" is the space on the stack where we copy the object mark.
|
||||
void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box,
|
||||
Register temp, Register displaced_header, Register current_header,
|
||||
bool try_bias,
|
||||
RTMLockingCounters* rtm_counters,
|
||||
RTMLockingCounters* stack_rtm_counters,
|
||||
Metadata* method_data,
|
||||
|
@ -2858,10 +2643,6 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
|
|||
bne(flag, cont);
|
||||
}
|
||||
|
||||
if (try_bias) {
|
||||
biased_locking_enter(flag, oop, displaced_header, temp, current_header, cont);
|
||||
}
|
||||
|
||||
#if INCLUDE_RTM_OPT
|
||||
if (UseRTMForStackLocks && use_rtm) {
|
||||
rtm_stack_locking(flag, oop, displaced_header, temp, /*temp*/ current_header,
|
||||
|
@ -2964,26 +2745,21 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
|
|||
|
||||
void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
|
||||
Register temp, Register displaced_header, Register current_header,
|
||||
bool try_bias, bool use_rtm) {
|
||||
bool use_rtm) {
|
||||
assert_different_registers(oop, box, temp, displaced_header, current_header);
|
||||
assert(flag != CCR0, "bad condition register");
|
||||
Label cont;
|
||||
Label object_has_monitor;
|
||||
|
||||
if (try_bias) {
|
||||
biased_locking_exit(flag, oop, current_header, cont);
|
||||
}
|
||||
|
||||
#if INCLUDE_RTM_OPT
|
||||
if (UseRTMForStackLocks && use_rtm) {
|
||||
assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
|
||||
Label L_regular_unlock;
|
||||
ld(current_header, oopDesc::mark_offset_in_bytes(), oop); // fetch markword
|
||||
andi(R0, current_header, markWord::biased_lock_mask_in_place); // look at 3 lock bits
|
||||
cmpwi(flag, R0, markWord::unlocked_value); // bits = 001 unlocked
|
||||
bne(flag, L_regular_unlock); // else RegularLock
|
||||
tend_(); // otherwise end...
|
||||
b(cont); // ... and we're done
|
||||
ld(current_header, oopDesc::mark_offset_in_bytes(), oop); // fetch markword
|
||||
andi(R0, current_header, markWord::lock_mask_in_place); // look at 2 lock bits
|
||||
cmpwi(flag, R0, markWord::unlocked_value); // bits = 01 unlocked
|
||||
bne(flag, L_regular_unlock); // else RegularLock
|
||||
tend_(); // otherwise end...
|
||||
b(cont); // ... and we're done
|
||||
bind(L_regular_unlock);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -589,24 +589,6 @@ class MacroAssembler: public Assembler {
|
|||
// Method handle support (JSR 292).
|
||||
RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, Register temp_reg, int extra_slot_offset = 0);
|
||||
|
||||
// Biased locking support
|
||||
// Upon entry,obj_reg must contain the target object, and mark_reg
|
||||
// must contain the target object's header.
|
||||
// Destroys mark_reg if an attempt is made to bias an anonymously
|
||||
// biased lock. In this case a failure will go either to the slow
|
||||
// case or fall through with the notEqual condition code set with
|
||||
// the expectation that the slow case in the runtime will be called.
|
||||
// In the fall-through case where the CAS-based lock is done,
|
||||
// mark_reg is not destroyed.
|
||||
void biased_locking_enter(ConditionRegister cr_reg, Register obj_reg, Register mark_reg, Register temp_reg,
|
||||
Register temp2_reg, Label& done, Label* slow_case = NULL);
|
||||
// Upon entry, the base register of mark_addr must contain the oop.
|
||||
// Destroys temp_reg.
|
||||
// If allow_delay_slot_filling is set to true, the next instruction
|
||||
// emitted after this one will go in an annulled delay slot if the
|
||||
// biased locking exit case failed.
|
||||
void biased_locking_exit(ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done);
|
||||
|
||||
// allocation (for C1)
|
||||
void eden_allocate(
|
||||
Register obj, // result: pointer to object after successful allocation
|
||||
|
@ -655,7 +637,6 @@ class MacroAssembler: public Assembler {
|
|||
|
||||
void compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
bool try_bias = UseBiasedLocking,
|
||||
RTMLockingCounters* rtm_counters = NULL,
|
||||
RTMLockingCounters* stack_rtm_counters = NULL,
|
||||
Metadata* method_data = NULL,
|
||||
|
@ -663,7 +644,7 @@ class MacroAssembler: public Assembler {
|
|||
|
||||
void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
bool try_bias = UseBiasedLocking, bool use_rtm = false);
|
||||
bool use_rtm = false);
|
||||
|
||||
// Check if safepoint requested and if so branch
|
||||
void safepoint_poll(Label& slow_path, Register temp, bool at_return, bool in_nmethod);
|
||||
|
|
|
@ -12123,8 +12123,7 @@ instruct cmpFastLock(flagsReg crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iR
|
|||
format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2" %}
|
||||
ins_encode %{
|
||||
__ compiler_fast_lock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
|
||||
$tmp1$$Register, $tmp2$$Register, /*tmp3*/ R0,
|
||||
UseBiasedLocking && !UseOptoBiasInlining);
|
||||
$tmp1$$Register, $tmp2$$Register, /*tmp3*/ R0);
|
||||
// If locking was successfull, crx should indicate 'EQ'.
|
||||
// The compiler generates a branch to the runtime call to
|
||||
// _complete_monitor_locking_Java for the case where crx is 'NE'.
|
||||
|
@ -12142,10 +12141,9 @@ instruct cmpFastLock_tm(flagsReg crx, iRegPdst oop, rarg2RegP box, iRegPdst tmp1
|
|||
ins_encode %{
|
||||
__ compiler_fast_lock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
|
||||
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
|
||||
/*Biased Locking*/ false,
|
||||
_rtm_counters, _stack_rtm_counters,
|
||||
((Method*)(ra_->C->method()->constant_encoding()))->method_data(),
|
||||
/*TM*/ true, ra_->C->profile_rtm());
|
||||
/*RTM*/ true, ra_->C->profile_rtm());
|
||||
// If locking was successfull, crx should indicate 'EQ'.
|
||||
// The compiler generates a branch to the runtime call to
|
||||
// _complete_monitor_locking_Java for the case where crx is 'NE'.
|
||||
|
@ -12162,7 +12160,6 @@ instruct cmpFastUnlock(flagsReg crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1,
|
|||
ins_encode %{
|
||||
__ compiler_fast_unlock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
|
||||
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
|
||||
UseBiasedLocking && !UseOptoBiasInlining,
|
||||
false);
|
||||
// If unlocking was successfull, crx should indicate 'EQ'.
|
||||
// The compiler generates a branch to the runtime call to
|
||||
|
@ -12180,7 +12177,7 @@ instruct cmpFastUnlock_tm(flagsReg crx, iRegPdst oop, iRegPdst box, iRegPdst tmp
|
|||
ins_encode %{
|
||||
__ compiler_fast_unlock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
|
||||
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
|
||||
/*Biased Locking*/ false, /*TM*/ true);
|
||||
/*RTM*/ true);
|
||||
// If unlocking was successfull, crx should indicate 'EQ'.
|
||||
// The compiler generates a branch to the runtime call to
|
||||
// _complete_monitor_unlocking_Java for the case where crx is 'NE'.
|
||||
|
|
|
@ -2154,14 +2154,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
|||
// Get the lock box slot's address.
|
||||
__ addi(r_box, R1_SP, lock_offset);
|
||||
|
||||
# ifdef ASSERT
|
||||
if (UseBiasedLocking) {
|
||||
// Making the box point to itself will make it clear it went unused
|
||||
// but also be obviously invalid.
|
||||
__ std(r_box, 0, r_box);
|
||||
}
|
||||
# endif // ASSERT
|
||||
|
||||
// Try fastpath for locking.
|
||||
// fast_lock kills r_temp_1, r_temp_2, r_temp_3.
|
||||
__ compiler_fast_lock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
|
|
|
@ -3793,11 +3793,7 @@ void TemplateTable::_new() {
|
|||
// --------------------------------------------------------------------------
|
||||
// Init2: Initialize the header: mark, klass
|
||||
// Init mark.
|
||||
if (UseBiasedLocking) {
|
||||
__ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass);
|
||||
} else {
|
||||
__ load_const_optimized(Rscratch, markWord::prototype().value(), R0);
|
||||
}
|
||||
__ load_const_optimized(Rscratch, markWord::prototype().value(), R0);
|
||||
__ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject);
|
||||
|
||||
// Init klass.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2020 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
|
@ -378,11 +378,7 @@ void VM_Version::initialize() {
|
|||
|
||||
// Adjust RTM (Restricted Transactional Memory) flags.
|
||||
if (UseRTMLocking) {
|
||||
// If CPU or OS do not support TM:
|
||||
// Can't continue because UseRTMLocking affects UseBiasedLocking flag
|
||||
// setting during arguments processing. See use_biased_locking().
|
||||
// VM_Version_init() is executed after UseBiasedLocking is used
|
||||
// in Thread::allocate().
|
||||
// If CPU or OS do not support RTM:
|
||||
if (PowerArchitecturePPC64 < 8) {
|
||||
vm_exit_during_initialization("RTM instructions are not available on this CPU.");
|
||||
}
|
||||
|
@ -399,8 +395,6 @@ void VM_Version::initialize() {
|
|||
}
|
||||
#else
|
||||
// Only C2 does RTM locking optimization.
|
||||
// Can't continue because UseRTMLocking affects UseBiasedLocking flag
|
||||
// setting during arguments processing. See use_biased_locking().
|
||||
vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
|
||||
#endif
|
||||
} else { // !UseRTMLocking
|
||||
|
@ -544,27 +538,6 @@ void VM_Version::print_platform_virtualization_info(outputStream* st) {
|
|||
#endif
|
||||
}
|
||||
|
||||
bool VM_Version::use_biased_locking() {
|
||||
#if INCLUDE_RTM_OPT
|
||||
// RTM locking is most useful when there is high lock contention and
|
||||
// low data contention. With high lock contention the lock is usually
|
||||
// inflated and biased locking is not suitable for that case.
|
||||
// RTM locking code requires that biased locking is off.
|
||||
// Note: we can't switch off UseBiasedLocking in get_processor_features()
|
||||
// because it is used by Thread::allocate() which is called before
|
||||
// VM_Version::initialize().
|
||||
if (UseRTMLocking && UseBiasedLocking) {
|
||||
if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
|
||||
FLAG_SET_DEFAULT(UseBiasedLocking, false);
|
||||
} else {
|
||||
warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." );
|
||||
UseBiasedLocking = false;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return UseBiasedLocking;
|
||||
}
|
||||
|
||||
void VM_Version::print_features() {
|
||||
tty->print_cr("Version: %s L1_data_cache_line_size=%d", features_string(), L1_data_cache_line_size());
|
||||
|
||||
|
|
|
@ -93,9 +93,6 @@ public:
|
|||
// Override Abstract_VM_Version implementation
|
||||
static void print_platform_virtualization_info(outputStream*);
|
||||
|
||||
// Override Abstract_VM_Version implementation
|
||||
static bool use_biased_locking();
|
||||
|
||||
// PPC64 supports fast class initialization checks for static methods.
|
||||
static bool supports_fast_class_init_checks() { return true; }
|
||||
constexpr static bool supports_stack_watermark_barrier() { return true; }
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
|
@ -31,7 +31,6 @@
|
|||
#include "gc/shared/cardTableBarrierSet.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/objectMonitor.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/markWord.hpp"
|
||||
#include "runtime/basicLock.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
@ -97,10 +96,6 @@ void C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hd
|
|||
z_btrue(slow_case);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
biased_locking_enter(obj, hdr, Z_R1_scratch, Z_R0_scratch, done, &slow_case);
|
||||
}
|
||||
|
||||
// and mark it as unlocked.
|
||||
z_oill(hdr, markWord::unlocked_value);
|
||||
// Save unlocked object header into the displaced header location on the stack.
|
||||
|
@ -110,13 +105,6 @@ void C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hd
|
|||
// object header instead.
|
||||
z_csg(hdr, disp_hdr, hdr_offset, obj);
|
||||
// If the object header was the same, we're done.
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
Unimplemented();
|
||||
#if 0
|
||||
cond_inc32(Assembler::equal,
|
||||
ExternalAddress((address)BiasedLocking::fast_path_entry_count_addr()));
|
||||
#endif
|
||||
}
|
||||
branch_optimized(Assembler::bcondEqual, done);
|
||||
// If the object header was not the same, it is now in the hdr register.
|
||||
// => Test if it is a stack pointer into the same stack (recursive locking), i.e.:
|
||||
|
@ -150,20 +138,12 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
|
|||
assert_different_registers(hdr, obj, disp_hdr);
|
||||
NearLabel done;
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
// Load object.
|
||||
z_lg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
|
||||
biased_locking_exit(obj, hdr, done);
|
||||
}
|
||||
|
||||
// Load displaced header.
|
||||
z_ltg(hdr, Address(disp_hdr, (intptr_t)0));
|
||||
// If the loaded hdr is NULL we had recursive locking, and we are done.
|
||||
z_bre(done);
|
||||
if (!UseBiasedLocking) {
|
||||
// Load object.
|
||||
z_lg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
|
||||
}
|
||||
// Load object.
|
||||
z_lg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
|
||||
verify_oop(obj, FILE_AND_LINE);
|
||||
// Test if object header is pointing to the displaced header, and if so, restore
|
||||
// the displaced header in the object. If the object header is not pointing to
|
||||
|
@ -193,13 +173,8 @@ void C1_MacroAssembler::try_allocate(
|
|||
|
||||
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register Rzero, Register t1) {
|
||||
assert_different_registers(obj, klass, len, t1, Rzero);
|
||||
if (UseBiasedLocking && !len->is_valid()) {
|
||||
assert_different_registers(obj, klass, len, t1);
|
||||
z_lg(t1, Address(klass, Klass::prototype_header_offset()));
|
||||
} else {
|
||||
// This assumes that all prototype bits fit in an int32_t.
|
||||
load_const_optimized(t1, (intx)markWord::prototype().value());
|
||||
}
|
||||
// This assumes that all prototype bits fit in an int32_t.
|
||||
load_const_optimized(t1, (intx)markWord::prototype().value());
|
||||
z_stg(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
|
||||
|
||||
if (len->is_valid()) {
|
||||
|
|
|
@ -38,7 +38,6 @@
|
|||
#include "prims/jvmtiExport.hpp"
|
||||
#include "prims/jvmtiThreadState.hpp"
|
||||
#include "runtime/basicLock.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/safepointMechanism.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
|
@ -1005,10 +1004,6 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
|||
z_btrue(slow_case);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
biased_locking_enter(object, displaced_header, Z_R1, Z_R0, done, &slow_case);
|
||||
}
|
||||
|
||||
// Set displaced_header to be (markWord of object | UNLOCK_VALUE).
|
||||
z_oill(displaced_header, markWord::unlocked_value);
|
||||
|
||||
|
@ -1116,12 +1111,6 @@ void InterpreterMacroAssembler::unlock_object(Register monitor, Register object)
|
|||
|
||||
clear_mem(obj_entry, sizeof(oop));
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
// The object address from the monitor is in object.
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
biased_locking_exit(object, displaced_header, done);
|
||||
}
|
||||
|
||||
// Test first if we are in the fast recursive case.
|
||||
MacroAssembler::load_and_test_long(displaced_header,
|
||||
Address(monitor, BasicObjectLock::lock_offset_in_bytes() +
|
||||
|
|
|
@ -39,7 +39,6 @@
|
|||
#include "oops/klass.inline.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "registerSaver_s390.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/objectMonitor.hpp"
|
||||
|
@ -3128,194 +3127,7 @@ void MacroAssembler::increment_counter_eq(address counter_address, Register tmp1
|
|||
bind(l);
|
||||
}
|
||||
|
||||
// Semantics are dependent on the slow_case label:
|
||||
// If the slow_case label is not NULL, failure to biased-lock the object
|
||||
// transfers control to the location of the slow_case label. If the
|
||||
// object could be biased-locked, control is transferred to the done label.
|
||||
// The condition code is unpredictable.
|
||||
//
|
||||
// If the slow_case label is NULL, failure to biased-lock the object results
|
||||
// in a transfer of control to the done label with a condition code of not_equal.
|
||||
// If the biased-lock could be successfully obtained, control is transfered to
|
||||
// the done label with a condition code of equal.
|
||||
// It is mandatory to react on the condition code At the done label.
|
||||
//
|
||||
void MacroAssembler::biased_locking_enter(Register obj_reg,
|
||||
Register mark_reg,
|
||||
Register temp_reg,
|
||||
Register temp2_reg, // May be Z_RO!
|
||||
Label &done,
|
||||
Label *slow_case) {
|
||||
assert(UseBiasedLocking, "why call this otherwise?");
|
||||
assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg);
|
||||
|
||||
Label cas_label; // Try, if implemented, CAS locking. Fall thru to slow path otherwise.
|
||||
|
||||
BLOCK_COMMENT("biased_locking_enter {");
|
||||
|
||||
// Biased locking
|
||||
// See whether the lock is currently biased toward our thread and
|
||||
// whether the epoch is still valid.
|
||||
// Note that the runtime guarantees sufficient alignment of JavaThread
|
||||
// pointers to allow age to be placed into low bits.
|
||||
assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits,
|
||||
"biased locking makes assumptions about bit layout");
|
||||
z_lr(temp_reg, mark_reg);
|
||||
z_nilf(temp_reg, markWord::biased_lock_mask_in_place);
|
||||
z_chi(temp_reg, markWord::biased_lock_pattern);
|
||||
z_brne(cas_label); // Try cas if object is not biased, i.e. cannot be biased locked.
|
||||
|
||||
load_prototype_header(temp_reg, obj_reg);
|
||||
load_const_optimized(temp2_reg, ~((int) markWord::age_mask_in_place));
|
||||
|
||||
z_ogr(temp_reg, Z_thread);
|
||||
z_xgr(temp_reg, mark_reg);
|
||||
z_ngr(temp_reg, temp2_reg);
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
increment_counter_eq((address) BiasedLocking::biased_lock_entry_count_addr(), mark_reg, temp2_reg);
|
||||
// Restore mark_reg.
|
||||
z_lg(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
|
||||
}
|
||||
branch_optimized(Assembler::bcondEqual, done); // Biased lock obtained, return success.
|
||||
|
||||
Label try_revoke_bias;
|
||||
Label try_rebias;
|
||||
Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes());
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
// At this point we know that the header has the bias pattern and
|
||||
// that we are not the bias owner in the current epoch. We need to
|
||||
// figure out more details about the state of the header in order to
|
||||
// know what operations can be legally performed on the object's
|
||||
// header.
|
||||
|
||||
// If the low three bits in the xor result aren't clear, that means
|
||||
// the prototype header is no longer biased and we have to revoke
|
||||
// the bias on this object.
|
||||
z_tmll(temp_reg, markWord::biased_lock_mask_in_place);
|
||||
z_brnaz(try_revoke_bias);
|
||||
|
||||
// Biasing is still enabled for this data type. See whether the
|
||||
// epoch of the current bias is still valid, meaning that the epoch
|
||||
// bits of the mark word are equal to the epoch bits of the
|
||||
// prototype header. (Note that the prototype header's epoch bits
|
||||
// only change at a safepoint.) If not, attempt to rebias the object
|
||||
// toward the current thread. Note that we must be absolutely sure
|
||||
// that the current epoch is invalid in order to do this because
|
||||
// otherwise the manipulations it performs on the mark word are
|
||||
// illegal.
|
||||
z_tmll(temp_reg, markWord::epoch_mask_in_place);
|
||||
z_brnaz(try_rebias);
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
// The epoch of the current bias is still valid but we know nothing
|
||||
// about the owner; it might be set or it might be clear. Try to
|
||||
// acquire the bias of the object using an atomic operation. If this
|
||||
// fails we will go in to the runtime to revoke the object's bias.
|
||||
// Note that we first construct the presumed unbiased header so we
|
||||
// don't accidentally blow away another thread's valid bias.
|
||||
z_nilf(mark_reg, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place |
|
||||
markWord::epoch_mask_in_place);
|
||||
z_lgr(temp_reg, Z_thread);
|
||||
z_llgfr(mark_reg, mark_reg);
|
||||
z_ogr(temp_reg, mark_reg);
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
|
||||
z_csg(mark_reg, temp_reg, 0, obj_reg);
|
||||
|
||||
// If the biasing toward our thread failed, this means that
|
||||
// another thread succeeded in biasing it toward itself and we
|
||||
// need to revoke that bias. The revocation will occur in the
|
||||
// interpreter runtime in the slow case.
|
||||
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
increment_counter_eq((address) BiasedLocking::anonymously_biased_lock_entry_count_addr(),
|
||||
temp_reg, temp2_reg);
|
||||
}
|
||||
if (slow_case != NULL) {
|
||||
branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way.
|
||||
}
|
||||
branch_optimized(Assembler::bcondAlways, done); // Biased lock status given in condition code.
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
bind(try_rebias);
|
||||
// At this point we know the epoch has expired, meaning that the
|
||||
// current "bias owner", if any, is actually invalid. Under these
|
||||
// circumstances _only_, we are allowed to use the current header's
|
||||
// value as the comparison value when doing the cas to acquire the
|
||||
// bias in the current epoch. In other words, we allow transfer of
|
||||
// the bias from one thread to another directly in this situation.
|
||||
|
||||
z_nilf(mark_reg, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place);
|
||||
load_prototype_header(temp_reg, obj_reg);
|
||||
z_llgfr(mark_reg, mark_reg);
|
||||
|
||||
z_ogr(temp_reg, Z_thread);
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
|
||||
z_csg(mark_reg, temp_reg, 0, obj_reg);
|
||||
|
||||
// If the biasing toward our thread failed, this means that
|
||||
// another thread succeeded in biasing it toward itself and we
|
||||
// need to revoke that bias. The revocation will occur in the
|
||||
// interpreter runtime in the slow case.
|
||||
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
increment_counter_eq((address) BiasedLocking::rebiased_lock_entry_count_addr(), temp_reg, temp2_reg);
|
||||
}
|
||||
if (slow_case != NULL) {
|
||||
branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way.
|
||||
}
|
||||
z_bru(done); // Biased lock status given in condition code.
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
bind(try_revoke_bias);
|
||||
// The prototype mark in the klass doesn't have the bias bit set any
|
||||
// more, indicating that objects of this data type are not supposed
|
||||
// to be biased any more. We are going to try to reset the mark of
|
||||
// this object to the prototype value and fall through to the
|
||||
// CAS-based locking scheme. Note that if our CAS fails, it means
|
||||
// that another thread raced us for the privilege of revoking the
|
||||
// bias of this particular object, so it's okay to continue in the
|
||||
// normal locking code.
|
||||
load_prototype_header(temp_reg, obj_reg);
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
|
||||
z_csg(mark_reg, temp_reg, 0, obj_reg);
|
||||
|
||||
// Fall through to the normal CAS-based lock, because no matter what
|
||||
// the result of the above CAS, some thread must have succeeded in
|
||||
// removing the bias bit from the object's header.
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
// z_cgr(mark_reg, temp2_reg);
|
||||
increment_counter_eq((address) BiasedLocking::revoked_lock_entry_count_addr(), temp_reg, temp2_reg);
|
||||
}
|
||||
|
||||
bind(cas_label);
|
||||
BLOCK_COMMENT("} biased_locking_enter");
|
||||
}
|
||||
|
||||
void MacroAssembler::biased_locking_exit(Register mark_addr, Register temp_reg, Label& done) {
|
||||
// Check for biased locking unlock case, which is a no-op
|
||||
// Note: we do not have to check the thread ID for two reasons.
|
||||
// First, the interpreter checks for IllegalMonitorStateException at
|
||||
// a higher level. Second, if the bias was revoked while we held the
|
||||
// lock, the object could not be rebiased toward another thread, so
|
||||
// the bias bit would be clear.
|
||||
BLOCK_COMMENT("biased_locking_exit {");
|
||||
|
||||
z_lg(temp_reg, 0, mark_addr);
|
||||
z_nilf(temp_reg, markWord::biased_lock_mask_in_place);
|
||||
|
||||
z_chi(temp_reg, markWord::biased_lock_pattern);
|
||||
z_bre(done);
|
||||
BLOCK_COMMENT("} biased_locking_exit");
|
||||
}
|
||||
|
||||
void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) {
|
||||
void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2) {
|
||||
Register displacedHeader = temp1;
|
||||
Register currentHeader = temp1;
|
||||
Register temp = temp2;
|
||||
|
@ -3334,10 +3146,6 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis
|
|||
z_brne(done);
|
||||
}
|
||||
|
||||
if (try_bias) {
|
||||
biased_locking_enter(oop, displacedHeader, temp, Z_R0, done);
|
||||
}
|
||||
|
||||
// Handle existing monitor.
|
||||
// The object has an existing monitor iff (mark & monitor_value) != 0.
|
||||
guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");
|
||||
|
@ -3402,7 +3210,7 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis
|
|||
// _complete_monitor_locking_Java.
|
||||
}
|
||||
|
||||
void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) {
|
||||
void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2) {
|
||||
Register displacedHeader = temp1;
|
||||
Register currentHeader = temp2;
|
||||
Register temp = temp1;
|
||||
|
@ -3412,10 +3220,6 @@ void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Reg
|
|||
|
||||
BLOCK_COMMENT("compiler_fast_unlock_object {");
|
||||
|
||||
if (try_bias) {
|
||||
biased_locking_exit(oop, currentHeader, done);
|
||||
}
|
||||
|
||||
// Find the lock address and load the displaced header from the stack.
|
||||
// if the displaced header is zero, we have a recursive unlock.
|
||||
load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes()));
|
||||
|
@ -3833,12 +3637,6 @@ void MacroAssembler::load_klass(Register klass, Register src_oop) {
|
|||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::load_prototype_header(Register Rheader, Register Rsrc_oop) {
|
||||
assert_different_registers(Rheader, Rsrc_oop);
|
||||
load_klass(Rheader, Rsrc_oop);
|
||||
z_lg(Rheader, Address(Rheader, Klass::prototype_header_offset()));
|
||||
}
|
||||
|
||||
void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) {
|
||||
if (UseCompressedClassPointers) {
|
||||
assert_different_registers(dst_oop, klass, Z_R0);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2019 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
|
@ -718,26 +718,9 @@ class MacroAssembler: public Assembler {
|
|||
// Increment a counter at counter_address when the eq condition code is set.
|
||||
// Kills registers tmp1_reg and tmp2_reg and preserves the condition code.
|
||||
void increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg);
|
||||
// Biased locking support
|
||||
// Upon entry,obj_reg must contain the target object, and mark_reg
|
||||
// must contain the target object's header.
|
||||
// Destroys mark_reg if an attempt is made to bias an anonymously
|
||||
// biased lock. In this case a failure will go either to the slow
|
||||
// case or fall through with the notEqual condition code set with
|
||||
// the expectation that the slow case in the runtime will be called.
|
||||
// In the fall-through case where the CAS-based lock is done,
|
||||
// mark_reg is not destroyed.
|
||||
void biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg,
|
||||
Register temp2_reg, Label& done, Label* slow_case = NULL);
|
||||
// Upon entry, the base register of mark_addr must contain the oop.
|
||||
// Destroys temp_reg.
|
||||
// If allow_delay_slot_filling is set to true, the next instruction
|
||||
// emitted after this one will go in an annulled delay slot if the
|
||||
// biased locking exit case failed.
|
||||
void biased_locking_exit(Register mark_addr, Register temp_reg, Label& done);
|
||||
|
||||
void compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias = UseBiasedLocking);
|
||||
void compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias = UseBiasedLocking);
|
||||
void compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2);
|
||||
void compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2);
|
||||
|
||||
void resolve_jobject(Register value, Register tmp1, Register tmp2);
|
||||
|
||||
|
@ -782,7 +765,6 @@ class MacroAssembler: public Assembler {
|
|||
void decode_klass_not_null(Register dst);
|
||||
void load_klass(Register klass, Address mem);
|
||||
void load_klass(Register klass, Register src_oop);
|
||||
void load_prototype_header(Register Rheader, Register Rsrc_oop);
|
||||
void store_klass(Register klass, Register dst_oop, Register ck = noreg); // Klass will get compressed if ck not provided.
|
||||
void store_klass_gap(Register s, Register dst_oop);
|
||||
|
||||
|
|
|
@ -9816,8 +9816,7 @@ instruct cmpFastLock(flagsReg pcc, iRegP_N2P oop, iRegP_N2P box, iRegP tmp1, iRe
|
|||
ins_cost(100);
|
||||
// TODO: s390 port size(VARIABLE_SIZE); // Uses load_const_optimized.
|
||||
format %{ "FASTLOCK $oop, $box; KILL Z_ARG4, Z_ARG5" %}
|
||||
ins_encode %{ __ compiler_fast_lock_object($oop$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
UseBiasedLocking && !UseOptoBiasInlining); %}
|
||||
ins_encode %{ __ compiler_fast_lock_object($oop$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register); %}
|
||||
ins_pipe(pipe_class_dummy);
|
||||
%}
|
||||
|
||||
|
@ -9825,10 +9824,9 @@ instruct cmpFastUnlock(flagsReg pcc, iRegP_N2P oop, iRegP_N2P box, iRegP tmp1, i
|
|||
match(Set pcc (FastUnlock oop box));
|
||||
effect(TEMP tmp1, TEMP tmp2);
|
||||
ins_cost(100);
|
||||
// TODO: s390 port size(FIXED_SIZE); // emitted code depends on UseBiasedLocking being on/off.
|
||||
// TODO: s390 port size(FIXED_SIZE);
|
||||
format %{ "FASTUNLOCK $oop, $box; KILL Z_ARG4, Z_ARG5" %}
|
||||
ins_encode %{ __ compiler_fast_unlock_object($oop$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
UseBiasedLocking && !UseOptoBiasInlining); %}
|
||||
ins_encode %{ __ compiler_fast_unlock_object($oop$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register); %}
|
||||
ins_pipe(pipe_class_dummy);
|
||||
%}
|
||||
|
||||
|
|
|
@ -1873,13 +1873,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
|||
// Get the lock box slot's address.
|
||||
__ add2reg(r_box, lock_offset, Z_SP);
|
||||
|
||||
#ifdef ASSERT
|
||||
if (UseBiasedLocking)
|
||||
// Making the box point to itself will make it clear it went unused
|
||||
// but also be obviously invalid.
|
||||
__ z_stg(r_box, 0, r_box);
|
||||
#endif // ASSERT
|
||||
|
||||
// Try fastpath for locking.
|
||||
// Fast_lock kills r_temp_1, r_temp_2. (Don't use R1 as temp, won't work!)
|
||||
__ compiler_fast_lock_object(r_oop, r_box, r_tmp1, r_tmp2);
|
||||
|
|
|
@ -3813,14 +3813,8 @@ void TemplateTable::_new() {
|
|||
|
||||
// Initialize object header only.
|
||||
__ bind(initialize_header);
|
||||
if (UseBiasedLocking) {
|
||||
Register prototype = RobjectFields;
|
||||
__ z_lg(prototype, Address(iklass, Klass::prototype_header_offset()));
|
||||
__ z_stg(prototype, Address(RallocatedObject, oopDesc::mark_offset_in_bytes()));
|
||||
} else {
|
||||
__ store_const(Address(RallocatedObject, oopDesc::mark_offset_in_bytes()),
|
||||
(long)markWord::prototype().value());
|
||||
}
|
||||
__ store_const(Address(RallocatedObject, oopDesc::mark_offset_in_bytes()),
|
||||
(long)markWord::prototype().value());
|
||||
|
||||
__ store_klass_gap(Rzero, RallocatedObject); // Zero klass gap for compressed oops.
|
||||
__ store_klass(iklass, RallocatedObject); // Store klass last.
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
#include "interpreter/interpreter.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/objectMonitor.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
|
|
|
@ -28,8 +28,6 @@
|
|||
#include "asm/register.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
class BiasedLockingCounters;
|
||||
|
||||
// Contains all the definitions needed for x86 assembly code generation.
|
||||
|
||||
// Calling convention
|
||||
|
|
|
@ -3512,13 +3512,9 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
|
|||
if (!UseFastLocking) {
|
||||
__ jmp(*op->stub()->entry());
|
||||
} else if (op->code() == lir_lock) {
|
||||
Register scratch = noreg;
|
||||
if (UseBiasedLocking) {
|
||||
scratch = op->scratch_opr()->as_register();
|
||||
}
|
||||
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
|
||||
// add debug info for NullPointerException only if one is possible
|
||||
int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
|
||||
int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
|
||||
if (op->info() != NULL) {
|
||||
add_debug_info_for_null_check(null_check_offset, op->info());
|
||||
}
|
||||
|
|
|
@ -288,11 +288,6 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
|
|||
|
||||
// "lock" stores the address of the monitor stack slot, so this is not an oop
|
||||
LIR_Opr lock = new_register(T_INT);
|
||||
// Need a scratch register for biased locking on x86
|
||||
LIR_Opr scratch = LIR_OprFact::illegalOpr;
|
||||
if (UseBiasedLocking) {
|
||||
scratch = new_register(T_INT);
|
||||
}
|
||||
|
||||
CodeEmitInfo* info_for_exception = NULL;
|
||||
if (x->needs_null_check()) {
|
||||
|
@ -301,7 +296,7 @@ void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
|
|||
// this CodeEmitInfo must not have the xhandlers because here the
|
||||
// object is already locked (xhandlers expect object to be unlocked)
|
||||
CodeEmitInfo* info = state_for(x, x->state(), true);
|
||||
monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
|
||||
monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
|
||||
x->monitor_no(), info_for_exception, info);
|
||||
}
|
||||
|
||||
|
|
|
@ -33,12 +33,11 @@
|
|||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/markWord.hpp"
|
||||
#include "runtime/basicLock.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
||||
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) {
|
||||
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
|
||||
const Register rklass_decode_tmp = LP64_ONLY(rscratch1) NOT_LP64(noreg);
|
||||
const int aligned_mask = BytesPerWord -1;
|
||||
const int hdr_offset = oopDesc::mark_offset_in_bytes();
|
||||
|
@ -61,11 +60,6 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
|||
jcc(Assembler::notZero, slow_case);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
assert(scratch != noreg, "should have scratch register at this point");
|
||||
biased_locking_enter(disp_hdr, obj, hdr, scratch, rklass_decode_tmp, false, done, &slow_case);
|
||||
}
|
||||
|
||||
// Load object header
|
||||
movptr(hdr, Address(obj, hdr_offset));
|
||||
// and mark it as unlocked
|
||||
|
@ -78,10 +72,6 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
|||
MacroAssembler::lock(); // must be immediately before cmpxchg!
|
||||
cmpxchgptr(disp_hdr, Address(obj, hdr_offset));
|
||||
// if the object header was the same, we're done
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
cond_inc32(Assembler::equal,
|
||||
ExternalAddress((address)BiasedLocking::fast_path_entry_count_addr()));
|
||||
}
|
||||
jcc(Assembler::equal, done);
|
||||
// if the object header was not the same, it is now in the hdr register
|
||||
// => test if it is a stack pointer into the same stack (recursive locking), i.e.:
|
||||
|
@ -116,22 +106,15 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
|
|||
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
|
||||
Label done;
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
// load object
|
||||
movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
|
||||
biased_locking_exit(obj, hdr, done);
|
||||
}
|
||||
|
||||
// load displaced header
|
||||
movptr(hdr, Address(disp_hdr, 0));
|
||||
// if the loaded hdr is NULL we had recursive locking
|
||||
testptr(hdr, hdr);
|
||||
// if we had recursive locking, we are done
|
||||
jcc(Assembler::zero, done);
|
||||
if (!UseBiasedLocking) {
|
||||
// load object
|
||||
movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
|
||||
}
|
||||
// load object
|
||||
movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
|
||||
|
||||
verify_oop(obj);
|
||||
// test if object header is pointing to the displaced header, and if so, restore
|
||||
// the displaced header in the object - if the object header is not pointing to
|
||||
|
@ -159,14 +142,8 @@ void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, i
|
|||
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
|
||||
assert_different_registers(obj, klass, len);
|
||||
Register tmp_encode_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
|
||||
if (UseBiasedLocking && !len->is_valid()) {
|
||||
assert_different_registers(obj, klass, len, t1, t2);
|
||||
movptr(t1, Address(klass, Klass::prototype_header_offset()));
|
||||
movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
|
||||
} else {
|
||||
// This assumes that all prototype bits fit in an int32_t
|
||||
movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markWord::prototype().value());
|
||||
}
|
||||
// This assumes that all prototype bits fit in an int32_t
|
||||
movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markWord::prototype().value());
|
||||
#ifdef _LP64
|
||||
if (UseCompressedClassPointers) { // Take care not to kill klass
|
||||
movptr(t1, klass);
|
||||
|
|
|
@ -49,9 +49,8 @@
|
|||
// hdr : must be rax, contents destroyed
|
||||
// obj : must point to the object to lock, contents preserved
|
||||
// disp_hdr: must point to the displaced header location, contents preserved
|
||||
// scratch : scratch register, contents destroyed
|
||||
// returns code offset at which to add null check debug information
|
||||
int lock_object (Register swap, Register obj, Register disp_hdr, Register scratch, Label& slow_case);
|
||||
int lock_object (Register swap, Register obj, Register disp_hdr, Label& slow_case);
|
||||
|
||||
// unlocking
|
||||
// hdr : contents destroyed
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
#include "opto/intrinsicnode.hpp"
|
||||
#include "opto/opcodes.hpp"
|
||||
#include "opto/subnode.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/objectMonitor.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
||||
|
@ -234,7 +233,6 @@ void C2_MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Regi
|
|||
Metadata* method_data, bool profile_rtm,
|
||||
Label& DONE_LABEL, Label& IsInflated) {
|
||||
assert(UseRTMForStackLocks, "why call this otherwise?");
|
||||
assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
|
||||
assert(tmpReg == rax, "");
|
||||
assert(scrReg == rdx, "");
|
||||
Label L_rtm_retry, L_decrement_retry, L_on_abort;
|
||||
|
@ -244,7 +242,7 @@ void C2_MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Regi
|
|||
bind(L_rtm_retry);
|
||||
}
|
||||
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));
|
||||
testptr(tmpReg, markWord::monitor_value); // inflated vs stack-locked|neutral|biased
|
||||
testptr(tmpReg, markWord::monitor_value); // inflated vs stack-locked|neutral
|
||||
jcc(Assembler::notZero, IsInflated);
|
||||
|
||||
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
|
||||
|
@ -259,8 +257,8 @@ void C2_MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Regi
|
|||
}
|
||||
xbegin(L_on_abort);
|
||||
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword
|
||||
andptr(tmpReg, markWord::biased_lock_mask_in_place); // look at 3 lock bits
|
||||
cmpptr(tmpReg, markWord::unlocked_value); // bits = 001 unlocked
|
||||
andptr(tmpReg, markWord::lock_mask_in_place); // look at 2 lock bits
|
||||
cmpptr(tmpReg, markWord::unlocked_value); // bits = 01 unlocked
|
||||
jcc(Assembler::equal, DONE_LABEL); // all done if unlocked
|
||||
|
||||
Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
|
||||
|
@ -447,7 +445,6 @@ void C2_MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, R
|
|||
// scr: tmp -- KILLED
|
||||
void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg,
|
||||
Register scrReg, Register cx1Reg, Register cx2Reg,
|
||||
BiasedLockingCounters* counters,
|
||||
RTMLockingCounters* rtm_counters,
|
||||
RTMLockingCounters* stack_rtm_counters,
|
||||
Metadata* method_data,
|
||||
|
@ -462,10 +459,6 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
|
|||
assert_different_registers(objReg, boxReg, tmpReg, scrReg);
|
||||
}
|
||||
|
||||
if (counters != NULL) {
|
||||
atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()), scrReg);
|
||||
}
|
||||
|
||||
// Possible cases that we'll encounter in fast_lock
|
||||
// ------------------------------------------------
|
||||
// * Inflated
|
||||
|
@ -473,9 +466,6 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
|
|||
// -- Locked
|
||||
// = by self
|
||||
// = by other
|
||||
// * biased
|
||||
// -- by Self
|
||||
// -- by other
|
||||
// * neutral
|
||||
// * stack-locked
|
||||
// -- by self
|
||||
|
@ -493,16 +483,6 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
|
|||
jcc(Assembler::notZero, DONE_LABEL);
|
||||
}
|
||||
|
||||
// it's stack-locked, biased or neutral
|
||||
// TODO: optimize away redundant LDs of obj->mark and improve the markword triage
|
||||
// order to reduce the number of conditional branches in the most common cases.
|
||||
// Beware -- there's a subtle invariant that fetch of the markword
|
||||
// at [FETCH], below, will never observe a biased encoding (*101b).
|
||||
// If this invariant is not held we risk exclusion (safety) failure.
|
||||
if (UseBiasedLocking && !UseOptoBiasInlining) {
|
||||
biased_locking_enter(boxReg, objReg, tmpReg, scrReg, cx1Reg, false, DONE_LABEL, NULL, counters);
|
||||
}
|
||||
|
||||
#if INCLUDE_RTM_OPT
|
||||
if (UseRTMForStackLocks && use_rtm) {
|
||||
rtm_stack_locking(objReg, tmpReg, scrReg, cx2Reg,
|
||||
|
@ -512,7 +492,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
|
|||
#endif // INCLUDE_RTM_OPT
|
||||
|
||||
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // [FETCH]
|
||||
testptr(tmpReg, markWord::monitor_value); // inflated vs stack-locked|neutral|biased
|
||||
testptr(tmpReg, markWord::monitor_value); // inflated vs stack-locked|neutral
|
||||
jccb(Assembler::notZero, IsInflated);
|
||||
|
||||
// Attempt stack-locking ...
|
||||
|
@ -520,10 +500,6 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
|
|||
movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
|
||||
lock();
|
||||
cmpxchgptr(boxReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Updates tmpReg
|
||||
if (counters != NULL) {
|
||||
cond_inc32(Assembler::equal,
|
||||
ExternalAddress((address)counters->fast_path_entry_count_addr()));
|
||||
}
|
||||
jcc(Assembler::equal, DONE_LABEL); // Success
|
||||
|
||||
// Recursive locking.
|
||||
|
@ -533,10 +509,6 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
|
|||
// Next instruction set ZFlag == 1 (Success) if difference is less then one page.
|
||||
andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
|
||||
movptr(Address(boxReg, 0), tmpReg);
|
||||
if (counters != NULL) {
|
||||
cond_inc32(Assembler::equal,
|
||||
ExternalAddress((address)counters->fast_path_entry_count_addr()));
|
||||
}
|
||||
jmp(DONE_LABEL);
|
||||
|
||||
bind(IsInflated);
|
||||
|
@ -659,19 +631,12 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
|
|||
|
||||
Label DONE_LABEL, Stacked, CheckSucc;
|
||||
|
||||
// Critically, the biased locking test must have precedence over
|
||||
// and appear before the (box->dhw == 0) recursive stack-lock test.
|
||||
if (UseBiasedLocking && !UseOptoBiasInlining) {
|
||||
biased_locking_exit(objReg, tmpReg, DONE_LABEL);
|
||||
}
|
||||
|
||||
#if INCLUDE_RTM_OPT
|
||||
if (UseRTMForStackLocks && use_rtm) {
|
||||
assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
|
||||
Label L_regular_unlock;
|
||||
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword
|
||||
andptr(tmpReg, markWord::biased_lock_mask_in_place); // look at 3 lock bits
|
||||
cmpptr(tmpReg, markWord::unlocked_value); // bits = 001 unlocked
|
||||
andptr(tmpReg, markWord::lock_mask_in_place); // look at 2 lock bits
|
||||
cmpptr(tmpReg, markWord::unlocked_value); // bits = 01 unlocked
|
||||
jccb(Assembler::notEqual, L_regular_unlock); // if !HLE RegularLock
|
||||
xend(); // otherwise end...
|
||||
jmp(DONE_LABEL); // ... and we're done
|
||||
|
@ -738,7 +703,7 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
|
|||
jmpb (DONE_LABEL);
|
||||
|
||||
bind (Stacked);
|
||||
// It's not inflated and it's not recursively stack-locked and it's not biased.
|
||||
// It's not inflated and it's not recursively stack-locked.
|
||||
// It must be stack-locked.
|
||||
// Try to reset the header to displaced header.
|
||||
// The "box" value on the stack is stable, so we can reload
|
||||
|
|
|
@ -38,7 +38,6 @@ public:
|
|||
// See full desription in macroAssembler_x86.cpp.
|
||||
void fast_lock(Register obj, Register box, Register tmp,
|
||||
Register scr, Register cx1, Register cx2,
|
||||
BiasedLockingCounters* counters,
|
||||
RTMLockingCounters* rtm_counters,
|
||||
RTMLockingCounters* stack_rtm_counters,
|
||||
Metadata* method_data,
|
||||
|
|
|
@ -35,7 +35,6 @@
|
|||
#include "prims/jvmtiExport.hpp"
|
||||
#include "prims/jvmtiThreadState.hpp"
|
||||
#include "runtime/basicLock.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/safepointMechanism.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
|
@ -1205,8 +1204,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
|
|||
Label done;
|
||||
|
||||
const Register swap_reg = rax; // Must use rax for cmpxchg instruction
|
||||
const Register tmp_reg = rbx; // Will be passed to biased_locking_enter to avoid a
|
||||
// problematic case where tmp_reg = no_reg.
|
||||
const Register tmp_reg = rbx;
|
||||
const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
|
||||
const Register rklass_decode_tmp = LP64_ONLY(rscratch1) NOT_LP64(noreg);
|
||||
|
||||
|
@ -1227,10 +1225,6 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
|
|||
jcc(Assembler::notZero, slow_case);
|
||||
}
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, rklass_decode_tmp, false, done, &slow_case);
|
||||
}
|
||||
|
||||
// Load immediate 1 into swap_reg %rax
|
||||
movl(swap_reg, (int32_t)1);
|
||||
|
||||
|
@ -1245,10 +1239,6 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
|
|||
|
||||
lock();
|
||||
cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
cond_inc32(Assembler::zero,
|
||||
ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
|
||||
}
|
||||
jcc(Assembler::zero, done);
|
||||
|
||||
const int zero_bits = LP64_ONLY(7) NOT_LP64(3);
|
||||
|
@ -1285,11 +1275,6 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
|
|||
|
||||
// Save the test result, for recursive case, the result is zero
|
||||
movptr(Address(lock_reg, mark_offset), swap_reg);
|
||||
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
cond_inc32(Assembler::zero,
|
||||
ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
|
||||
}
|
||||
jcc(Assembler::zero, done);
|
||||
|
||||
bind(slow_case);
|
||||
|
@ -1341,10 +1326,6 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
|
|||
// Free entry
|
||||
movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
biased_locking_exit(obj_reg, header_reg, done);
|
||||
}
|
||||
|
||||
// Load the old header from BasicLock structure
|
||||
movptr(header_reg, Address(swap_reg,
|
||||
BasicLock::displaced_header_offset_in_bytes()));
|
||||
|
|
|
@ -40,7 +40,6 @@
|
|||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/flags/flagSetting.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/jniHandles.hpp"
|
||||
|
@ -1280,200 +1279,6 @@ void MacroAssembler::reserved_stack_check() {
|
|||
bind(no_reserved_zone_enabling);
|
||||
}
|
||||
|
||||
void MacroAssembler::biased_locking_enter(Register lock_reg,
|
||||
Register obj_reg,
|
||||
Register swap_reg,
|
||||
Register tmp_reg,
|
||||
Register tmp_reg2,
|
||||
bool swap_reg_contains_mark,
|
||||
Label& done,
|
||||
Label* slow_case,
|
||||
BiasedLockingCounters* counters) {
|
||||
assert(UseBiasedLocking, "why call this otherwise?");
|
||||
assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
|
||||
assert(tmp_reg != noreg, "tmp_reg must be supplied");
|
||||
assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
|
||||
assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout");
|
||||
Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
|
||||
NOT_LP64( Address saved_mark_addr(lock_reg, 0); )
|
||||
|
||||
if (PrintBiasedLockingStatistics && counters == NULL) {
|
||||
counters = BiasedLocking::counters();
|
||||
}
|
||||
// Biased locking
|
||||
// See whether the lock is currently biased toward our thread and
|
||||
// whether the epoch is still valid
|
||||
// Note that the runtime guarantees sufficient alignment of JavaThread
|
||||
// pointers to allow age to be placed into low bits
|
||||
// First check to see whether biasing is even enabled for this object
|
||||
Label cas_label;
|
||||
if (!swap_reg_contains_mark) {
|
||||
movptr(swap_reg, mark_addr);
|
||||
}
|
||||
movptr(tmp_reg, swap_reg);
|
||||
andptr(tmp_reg, markWord::biased_lock_mask_in_place);
|
||||
cmpptr(tmp_reg, markWord::biased_lock_pattern);
|
||||
jcc(Assembler::notEqual, cas_label);
|
||||
// The bias pattern is present in the object's header. Need to check
|
||||
// whether the bias owner and the epoch are both still current.
|
||||
#ifndef _LP64
|
||||
// Note that because there is no current thread register on x86_32 we
|
||||
// need to store off the mark word we read out of the object to
|
||||
// avoid reloading it and needing to recheck invariants below. This
|
||||
// store is unfortunate but it makes the overall code shorter and
|
||||
// simpler.
|
||||
movptr(saved_mark_addr, swap_reg);
|
||||
#endif
|
||||
load_prototype_header(tmp_reg, obj_reg, tmp_reg2);
|
||||
#ifdef _LP64
|
||||
orptr(tmp_reg, r15_thread);
|
||||
xorptr(tmp_reg, swap_reg);
|
||||
Register header_reg = tmp_reg;
|
||||
#else
|
||||
xorptr(tmp_reg, swap_reg);
|
||||
get_thread(swap_reg);
|
||||
xorptr(swap_reg, tmp_reg);
|
||||
Register header_reg = swap_reg;
|
||||
#endif
|
||||
andptr(header_reg, ~((int) markWord::age_mask_in_place));
|
||||
if (counters != NULL) {
|
||||
cond_inc32(Assembler::zero,
|
||||
ExternalAddress((address) counters->biased_lock_entry_count_addr()));
|
||||
}
|
||||
jcc(Assembler::equal, done);
|
||||
|
||||
Label try_revoke_bias;
|
||||
Label try_rebias;
|
||||
|
||||
// At this point we know that the header has the bias pattern and
|
||||
// that we are not the bias owner in the current epoch. We need to
|
||||
// figure out more details about the state of the header in order to
|
||||
// know what operations can be legally performed on the object's
|
||||
// header.
|
||||
|
||||
// If the low three bits in the xor result aren't clear, that means
|
||||
// the prototype header is no longer biased and we have to revoke
|
||||
// the bias on this object.
|
||||
testptr(header_reg, markWord::biased_lock_mask_in_place);
|
||||
jcc(Assembler::notZero, try_revoke_bias);
|
||||
|
||||
// Biasing is still enabled for this data type. See whether the
|
||||
// epoch of the current bias is still valid, meaning that the epoch
|
||||
// bits of the mark word are equal to the epoch bits of the
|
||||
// prototype header. (Note that the prototype header's epoch bits
|
||||
// only change at a safepoint.) If not, attempt to rebias the object
|
||||
// toward the current thread. Note that we must be absolutely sure
|
||||
// that the current epoch is invalid in order to do this because
|
||||
// otherwise the manipulations it performs on the mark word are
|
||||
// illegal.
|
||||
testptr(header_reg, markWord::epoch_mask_in_place);
|
||||
jccb(Assembler::notZero, try_rebias);
|
||||
|
||||
// The epoch of the current bias is still valid but we know nothing
|
||||
// about the owner; it might be set or it might be clear. Try to
|
||||
// acquire the bias of the object using an atomic operation. If this
|
||||
// fails we will go in to the runtime to revoke the object's bias.
|
||||
// Note that we first construct the presumed unbiased header so we
|
||||
// don't accidentally blow away another thread's valid bias.
|
||||
NOT_LP64( movptr(swap_reg, saved_mark_addr); )
|
||||
andptr(swap_reg,
|
||||
markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place);
|
||||
#ifdef _LP64
|
||||
movptr(tmp_reg, swap_reg);
|
||||
orptr(tmp_reg, r15_thread);
|
||||
#else
|
||||
get_thread(tmp_reg);
|
||||
orptr(tmp_reg, swap_reg);
|
||||
#endif
|
||||
lock();
|
||||
cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
|
||||
// If the biasing toward our thread failed, this means that
|
||||
// another thread succeeded in biasing it toward itself and we
|
||||
// need to revoke that bias. The revocation will occur in the
|
||||
// interpreter runtime in the slow case.
|
||||
if (counters != NULL) {
|
||||
cond_inc32(Assembler::zero,
|
||||
ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
|
||||
}
|
||||
if (slow_case != NULL) {
|
||||
jcc(Assembler::notZero, *slow_case);
|
||||
}
|
||||
jmp(done);
|
||||
|
||||
bind(try_rebias);
|
||||
// At this point we know the epoch has expired, meaning that the
|
||||
// current "bias owner", if any, is actually invalid. Under these
|
||||
// circumstances _only_, we are allowed to use the current header's
|
||||
// value as the comparison value when doing the cas to acquire the
|
||||
// bias in the current epoch. In other words, we allow transfer of
|
||||
// the bias from one thread to another directly in this situation.
|
||||
//
|
||||
// FIXME: due to a lack of registers we currently blow away the age
|
||||
// bits in this situation. Should attempt to preserve them.
|
||||
load_prototype_header(tmp_reg, obj_reg, tmp_reg2);
|
||||
#ifdef _LP64
|
||||
orptr(tmp_reg, r15_thread);
|
||||
#else
|
||||
get_thread(swap_reg);
|
||||
orptr(tmp_reg, swap_reg);
|
||||
movptr(swap_reg, saved_mark_addr);
|
||||
#endif
|
||||
lock();
|
||||
cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
|
||||
// If the biasing toward our thread failed, then another thread
|
||||
// succeeded in biasing it toward itself and we need to revoke that
|
||||
// bias. The revocation will occur in the runtime in the slow case.
|
||||
if (counters != NULL) {
|
||||
cond_inc32(Assembler::zero,
|
||||
ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
|
||||
}
|
||||
if (slow_case != NULL) {
|
||||
jcc(Assembler::notZero, *slow_case);
|
||||
}
|
||||
jmp(done);
|
||||
|
||||
bind(try_revoke_bias);
|
||||
// The prototype mark in the klass doesn't have the bias bit set any
|
||||
// more, indicating that objects of this data type are not supposed
|
||||
// to be biased any more. We are going to try to reset the mark of
|
||||
// this object to the prototype value and fall through to the
|
||||
// CAS-based locking scheme. Note that if our CAS fails, it means
|
||||
// that another thread raced us for the privilege of revoking the
|
||||
// bias of this particular object, so it's okay to continue in the
|
||||
// normal locking code.
|
||||
//
|
||||
// FIXME: due to a lack of registers we currently blow away the age
|
||||
// bits in this situation. Should attempt to preserve them.
|
||||
NOT_LP64( movptr(swap_reg, saved_mark_addr); )
|
||||
load_prototype_header(tmp_reg, obj_reg, tmp_reg2);
|
||||
lock();
|
||||
cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
|
||||
// Fall through to the normal CAS-based lock, because no matter what
|
||||
// the result of the above CAS, some thread must have succeeded in
|
||||
// removing the bias bit from the object's header.
|
||||
if (counters != NULL) {
|
||||
cond_inc32(Assembler::zero,
|
||||
ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
|
||||
}
|
||||
|
||||
bind(cas_label);
|
||||
}
|
||||
|
||||
void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
|
||||
assert(UseBiasedLocking, "why call this otherwise?");
|
||||
|
||||
// Check for biased locking unlock case, which is a no-op
|
||||
// Note: we do not have to check the thread ID for two reasons.
|
||||
// First, the interpreter checks for IllegalMonitorStateException at
|
||||
// a higher level. Second, if the bias was revoked while we held the
|
||||
// lock, the object could not be rebiased toward another thread, so
|
||||
// the bias bit would be clear.
|
||||
movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
andptr(temp_reg, markWord::biased_lock_mask_in_place);
|
||||
cmpptr(temp_reg, markWord::biased_lock_pattern);
|
||||
jcc(Assembler::equal, done);
|
||||
}
|
||||
|
||||
void MacroAssembler::c2bool(Register x) {
|
||||
// implements x == 0 ? 0 : 1
|
||||
// note: must only look at least-significant byte of x
|
||||
|
@ -4732,11 +4537,6 @@ void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
|
|||
movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
|
||||
}
|
||||
|
||||
void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) {
|
||||
load_klass(dst, src, tmp);
|
||||
movptr(dst, Address(dst, Klass::prototype_header_offset()));
|
||||
}
|
||||
|
||||
void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
|
||||
assert_different_registers(src, tmp);
|
||||
assert_different_registers(dst, tmp);
|
||||
|
|
|
@ -356,8 +356,6 @@ class MacroAssembler: public Assembler {
|
|||
// stored using routines that take a jobject.
|
||||
void store_heap_oop_null(Address dst);
|
||||
|
||||
void load_prototype_header(Register dst, Register src, Register tmp);
|
||||
|
||||
#ifdef _LP64
|
||||
void store_klass_gap(Register dst, Register src);
|
||||
|
||||
|
@ -673,21 +671,6 @@ class MacroAssembler: public Assembler {
|
|||
|
||||
void verify_tlab();
|
||||
|
||||
// Biased locking support
|
||||
// lock_reg and obj_reg must be loaded up with the appropriate values.
|
||||
// swap_reg must be rax, and is killed.
|
||||
// tmp_reg is optional. If it is supplied (i.e., != noreg) it will
|
||||
// be killed; if not supplied, push/pop will be used internally to
|
||||
// allocate a temporary (inefficient, avoid if possible).
|
||||
// Optional slow case is for implementations (interpreter and C1) which branch to
|
||||
// slow case directly. Leaves condition codes set for C2's Fast_Lock node.
|
||||
void biased_locking_enter(Register lock_reg, Register obj_reg,
|
||||
Register swap_reg, Register tmp_reg,
|
||||
Register tmp_reg2, bool swap_reg_contains_mark,
|
||||
Label& done, Label* slow_case = NULL,
|
||||
BiasedLockingCounters* counters = NULL);
|
||||
void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
|
||||
|
||||
Condition negate_condition(Condition cond);
|
||||
|
||||
// Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -62,12 +62,6 @@ void SharedRuntime::inline_check_hashcode_from_object_header(MacroAssembler* mas
|
|||
__ testptr(result, markWord::unlocked_value);
|
||||
__ jcc(Assembler::zero, slowCase);
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
// Check if biased and fall through to runtime if so
|
||||
__ testptr(result, markWord::biased_lock_bit_in_place);
|
||||
__ jcc(Assembler::notZero, slowCase);
|
||||
}
|
||||
|
||||
// get hash
|
||||
#ifdef _LP64
|
||||
// Read the header and build a mask to get its hash field.
|
||||
|
|
|
@ -1823,11 +1823,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||
// Load the oop from the handle
|
||||
__ movptr(obj_reg, Address(oop_handle_reg, 0));
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
// Note that oop_handle_reg is trashed during this call
|
||||
__ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, noreg, false, lock_done, &slow_path_lock);
|
||||
}
|
||||
|
||||
// Load immediate 1 into swap_reg %rax,
|
||||
__ movptr(swap_reg, 1);
|
||||
|
||||
|
@ -1860,11 +1855,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||
__ jcc(Assembler::notEqual, slow_path_lock);
|
||||
// Slow path will re-enter here
|
||||
__ bind(lock_done);
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
// Re-fetch oop_handle_reg as we trashed it above
|
||||
__ movptr(oop_handle_reg, Address(rsp, wordSize));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -1993,10 +1983,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||
// Get locked oop from the handle we passed to jni
|
||||
__ movptr(obj_reg, Address(oop_handle_reg, 0));
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
__ biased_locking_exit(obj_reg, rbx, done);
|
||||
}
|
||||
|
||||
// Simple recursive lock?
|
||||
|
||||
__ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
|
||||
|
|
|
@ -2070,10 +2070,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||
// Load the oop from the handle
|
||||
__ movptr(obj_reg, Address(oop_handle_reg, 0));
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
__ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, rscratch2, false, lock_done, &slow_path_lock);
|
||||
}
|
||||
|
||||
// Load immediate 1 into swap_reg %rax
|
||||
__ movl(swap_reg, 1);
|
||||
|
||||
|
@ -2224,11 +2220,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||
__ movptr(obj_reg, Address(oop_handle_reg, 0));
|
||||
|
||||
Label done;
|
||||
|
||||
if (UseBiasedLocking) {
|
||||
__ biased_locking_exit(obj_reg, old_hdr, done);
|
||||
}
|
||||
|
||||
// Simple recursive lock?
|
||||
|
||||
__ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
|
||||
|
|
|
@ -4023,15 +4023,9 @@ void TemplateTable::_new() {
|
|||
|
||||
// initialize object header only.
|
||||
__ bind(initialize_header);
|
||||
if (UseBiasedLocking) {
|
||||
__ pop(rcx); // get saved klass back in the register.
|
||||
__ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
|
||||
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
|
||||
} else {
|
||||
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
|
||||
(intptr_t)markWord::prototype().value()); // header
|
||||
__ pop(rcx); // get saved klass back in the register.
|
||||
}
|
||||
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
|
||||
(intptr_t)markWord::prototype().value()); // header
|
||||
__ pop(rcx); // get saved klass back in the register.
|
||||
#ifdef _LP64
|
||||
__ xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
|
||||
__ store_klass_gap(rax, rsi); // zero klass gap for compressed oops
|
||||
|
|
|
@ -1012,10 +1012,6 @@ void VM_Version::get_processor_features() {
|
|||
}
|
||||
|
||||
if (!supports_rtm() && UseRTMLocking) {
|
||||
// Can't continue because UseRTMLocking affects UseBiasedLocking flag
|
||||
// setting during arguments processing. See use_biased_locking().
|
||||
// VM_Version_init() is executed after UseBiasedLocking is used
|
||||
// in Thread::allocate().
|
||||
vm_exit_during_initialization("RTM instructions are not available on this CPU");
|
||||
}
|
||||
|
||||
|
@ -1023,8 +1019,6 @@ void VM_Version::get_processor_features() {
|
|||
if (UseRTMLocking) {
|
||||
if (!CompilerConfig::is_c2_enabled()) {
|
||||
// Only C2 does RTM locking optimization.
|
||||
// Can't continue because UseRTMLocking affects UseBiasedLocking flag
|
||||
// setting during arguments processing. See use_biased_locking().
|
||||
vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
|
||||
}
|
||||
if (is_intel_family_core()) {
|
||||
|
@ -1062,8 +1056,6 @@ void VM_Version::get_processor_features() {
|
|||
#else
|
||||
if (UseRTMLocking) {
|
||||
// Only C2 does RTM locking optimization.
|
||||
// Can't continue because UseRTMLocking affects UseBiasedLocking flag
|
||||
// setting during arguments processing. See use_biased_locking().
|
||||
vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
|
||||
}
|
||||
#endif
|
||||
|
@ -1736,27 +1728,6 @@ void VM_Version::print_platform_virtualization_info(outputStream* st) {
|
|||
}
|
||||
}
|
||||
|
||||
bool VM_Version::use_biased_locking() {
|
||||
#if INCLUDE_RTM_OPT
|
||||
// RTM locking is most useful when there is high lock contention and
|
||||
// low data contention. With high lock contention the lock is usually
|
||||
// inflated and biased locking is not suitable for that case.
|
||||
// RTM locking code requires that biased locking is off.
|
||||
// Note: we can't switch off UseBiasedLocking in get_processor_features()
|
||||
// because it is used by Thread::allocate() which is called before
|
||||
// VM_Version::initialize().
|
||||
if (UseRTMLocking && UseBiasedLocking) {
|
||||
if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
|
||||
FLAG_SET_DEFAULT(UseBiasedLocking, false);
|
||||
} else {
|
||||
warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." );
|
||||
UseBiasedLocking = false;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return UseBiasedLocking;
|
||||
}
|
||||
|
||||
bool VM_Version::compute_has_intel_jcc_erratum() {
|
||||
if (!is_intel_family_core()) {
|
||||
// Only Intel CPUs are affected.
|
||||
|
|
|
@ -747,9 +747,6 @@ public:
|
|||
// Override Abstract_VM_Version implementation
|
||||
static void print_platform_virtualization_info(outputStream*);
|
||||
|
||||
// Override Abstract_VM_Version implementation
|
||||
static bool use_biased_locking();
|
||||
|
||||
// Asserts
|
||||
static void assert_is_initialized() {
|
||||
assert(_cpuid_info.std_cpuid1_eax.bits.family != 0, "VM_Version not initialized");
|
||||
|
|
|
@ -13677,7 +13677,7 @@ instruct cmpFastLockRTM(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eD
|
|||
ins_encode %{
|
||||
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
|
||||
$scr$$Register, $cx1$$Register, $cx2$$Register,
|
||||
_counters, _rtm_counters, _stack_rtm_counters,
|
||||
_rtm_counters, _stack_rtm_counters,
|
||||
((Method*)(ra_->C->method()->constant_encoding()))->method_data(),
|
||||
true, ra_->C->profile_rtm());
|
||||
%}
|
||||
|
@ -13692,7 +13692,7 @@ instruct cmpFastLock(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP
|
|||
format %{ "FASTLOCK $object,$box\t! kills $box,$tmp,$scr" %}
|
||||
ins_encode %{
|
||||
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
|
||||
$scr$$Register, noreg, noreg, _counters, NULL, NULL, NULL, false, false);
|
||||
$scr$$Register, noreg, noreg, NULL, NULL, NULL, false, false);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
|
|
@ -12927,7 +12927,7 @@ instruct cmpFastLockRTM(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp,
|
|||
ins_encode %{
|
||||
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
|
||||
$scr$$Register, $cx1$$Register, $cx2$$Register,
|
||||
_counters, _rtm_counters, _stack_rtm_counters,
|
||||
_rtm_counters, _stack_rtm_counters,
|
||||
((Method*)(ra_->C->method()->constant_encoding()))->method_data(),
|
||||
true, ra_->C->profile_rtm());
|
||||
%}
|
||||
|
@ -12942,7 +12942,7 @@ instruct cmpFastLock(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rRe
|
|||
format %{ "fastlock $object,$box\t! kills $box,$tmp,$scr" %}
|
||||
ins_encode %{
|
||||
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
|
||||
$scr$$Register, $cx1$$Register, noreg, _counters, NULL, NULL, NULL, false, false);
|
||||
$scr$$Register, $cx1$$Register, noreg, NULL, NULL, NULL, false, false);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2007, 2008, 2009 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
|
@ -30,7 +30,6 @@
|
|||
#include "interpreter/interpreter.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/objectMonitor.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
|
|
@ -229,7 +229,6 @@ int main(int argc, char *argv[])
|
|||
AD.addInclude(AD._CPP_file, "opto/regalloc.hpp");
|
||||
AD.addInclude(AD._CPP_file, "opto/regmask.hpp");
|
||||
AD.addInclude(AD._CPP_file, "opto/runtime.hpp");
|
||||
AD.addInclude(AD._CPP_file, "runtime/biasedLocking.hpp");
|
||||
AD.addInclude(AD._CPP_file, "runtime/safepointMechanism.hpp");
|
||||
AD.addInclude(AD._CPP_file, "runtime/sharedRuntime.hpp");
|
||||
AD.addInclude(AD._CPP_file, "runtime/stubRoutines.hpp");
|
||||
|
|
|
@ -1526,7 +1526,6 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
|
|||
}
|
||||
|
||||
if (node->is_ideal_fastlock() && new_inst->is_ideal_fastlock()) {
|
||||
fprintf(fp, " ((MachFastLockNode*)n%d)->_counters = _counters;\n", cnt);
|
||||
fprintf(fp, " ((MachFastLockNode*)n%d)->_rtm_counters = _rtm_counters;\n", cnt);
|
||||
fprintf(fp, " ((MachFastLockNode*)n%d)->_stack_rtm_counters = _stack_rtm_counters;\n", cnt);
|
||||
}
|
||||
|
@ -3941,7 +3940,6 @@ void ArchDesc::buildMachNode(FILE *fp_cpp, InstructForm *inst, const char *inden
|
|||
fprintf(fp_cpp, "%s node->_probs = _leaf->as_Jump()->_probs;\n", indent);
|
||||
}
|
||||
if( inst->is_ideal_fastlock() ) {
|
||||
fprintf(fp_cpp, "%s node->_counters = _leaf->as_FastLock()->counters();\n", indent);
|
||||
fprintf(fp_cpp, "%s node->_rtm_counters = _leaf->as_FastLock()->rtm_counters();\n", indent);
|
||||
fprintf(fp_cpp, "%s node->_stack_rtm_counters = _leaf->as_FastLock()->stack_rtm_counters();\n", indent);
|
||||
}
|
||||
|
|
|
@ -59,7 +59,6 @@
|
|||
#include "oops/oop.inline.hpp"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/fieldDescriptor.inline.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
|
|
|
@ -137,8 +137,6 @@ void HeapShared::fixup_mapped_heap_regions() {
|
|||
}
|
||||
|
||||
unsigned HeapShared::oop_hash(oop const& p) {
|
||||
assert(!p->mark().has_bias_pattern(),
|
||||
"this object should never have been locked"); // so identity_hash won't safepoin
|
||||
unsigned hash = (unsigned)p->identity_hash();
|
||||
return hash;
|
||||
}
|
||||
|
@ -416,11 +414,7 @@ void HeapShared::copy_roots() {
|
|||
memset(mem, 0, size * BytesPerWord);
|
||||
{
|
||||
// This is copied from MemAllocator::finish
|
||||
if (UseBiasedLocking) {
|
||||
oopDesc::set_mark(mem, k->prototype_header());
|
||||
} else {
|
||||
oopDesc::set_mark(mem, markWord::prototype());
|
||||
}
|
||||
oopDesc::set_mark(mem, markWord::prototype());
|
||||
oopDesc::release_set_klass(mem, k);
|
||||
}
|
||||
{
|
||||
|
|
|
@ -2084,7 +2084,6 @@ void ClassFileParser::ClassAnnotationCollector::apply_to(InstanceKlass* ik) {
|
|||
ik->set_has_value_based_class_annotation();
|
||||
if (DiagnoseSyncOnValueBasedClasses) {
|
||||
ik->set_is_value_based();
|
||||
ik->set_prototype_header(markWord::prototype());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -68,7 +68,6 @@
|
|||
#include "prims/jvmtiExport.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/javaCalls.hpp"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -269,17 +269,13 @@ class nmethod : public CompiledMethod {
|
|||
volatile uint8_t _is_unloading_state;
|
||||
|
||||
// These are used for compiled synchronized native methods to
|
||||
// locate the owner and stack slot for the BasicLock so that we can
|
||||
// properly revoke the bias of the owner if necessary. They are
|
||||
// locate the owner and stack slot for the BasicLock. They are
|
||||
// needed because there is no debug information for compiled native
|
||||
// wrappers and the oop maps are insufficient to allow
|
||||
// frame::retrieve_receiver() to work. Currently they are expected
|
||||
// to be byte offsets from the Java stack pointer for maximum code
|
||||
// sharing between platforms. Note that currently biased locking
|
||||
// will never cause Class instances to be biased but this code
|
||||
// handles the static synchronized case as well.
|
||||
// JVMTI's GetLocalInstance() also uses these offsets to find the receiver
|
||||
// for non-static native wrapper frames.
|
||||
// sharing between platforms. JVMTI's GetLocalInstance() uses these
|
||||
// offsets to find the receiver for non-static native wrapper frames.
|
||||
ByteSize _native_receiver_sp_offset;
|
||||
ByteSize _native_basic_lock_sp_offset;
|
||||
|
||||
|
@ -736,7 +732,7 @@ public:
|
|||
// is it ok to patch at address?
|
||||
bool is_patchable_at(address instr_address);
|
||||
|
||||
// UseBiasedLocking support
|
||||
// JVMTI's GetLocalInstance() support
|
||||
ByteSize native_receiver_sp_offset() {
|
||||
return _native_receiver_sp_offset;
|
||||
}
|
||||
|
|
|
@ -44,7 +44,6 @@
|
|||
#include "gc/shared/weakProcessor.inline.hpp"
|
||||
#include "gc/shared/workerPolicy.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
|
@ -186,10 +185,6 @@ void G1FullCollector::prepare_collection() {
|
|||
reference_processor()->enable_discovery();
|
||||
reference_processor()->setup_policy(scope()->should_clear_soft_refs());
|
||||
|
||||
// We should save the marks of the currently locked biased monitors.
|
||||
// The marking doesn't preserve the marks of biased objects.
|
||||
BiasedLocking::preserve_marks();
|
||||
|
||||
// Clear and activate derived pointer collection.
|
||||
clear_and_activate_derived_pointers();
|
||||
}
|
||||
|
@ -216,8 +211,6 @@ void G1FullCollector::complete_collection() {
|
|||
// update the derived pointer table.
|
||||
update_derived_pointers();
|
||||
|
||||
BiasedLocking::restore_marks();
|
||||
|
||||
_heap->concurrent_mark()->swap_mark_bitmaps();
|
||||
// Prepare the bitmap for the next (potentially concurrent) marking.
|
||||
_heap->concurrent_mark()->clear_next_bitmap(_heap->workers());
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -108,19 +108,15 @@ void G1FullGCCompactionPoint::forward(oop object, size_t size) {
|
|||
if (object->forwardee() != NULL) {
|
||||
// Object should not move but mark-word is used so it looks like the
|
||||
// object is forwarded. Need to clear the mark and it's no problem
|
||||
// since it will be restored by preserved marks. There is an exception
|
||||
// with BiasedLocking, in this case forwardee() will return NULL
|
||||
// even if the mark-word is used. This is no problem since
|
||||
// forwardee() will return NULL in the compaction phase as well.
|
||||
// since it will be restored by preserved marks.
|
||||
object->init_mark();
|
||||
} else {
|
||||
// Make sure object has the correct mark-word set or that it will be
|
||||
// fixed when restoring the preserved marks.
|
||||
assert(object->mark() == markWord::prototype_for_klass(object->klass()) || // Correct mark
|
||||
object->mark_must_be_preserved() || // Will be restored by PreservedMarksSet
|
||||
(UseBiasedLocking && object->has_bias_pattern()), // Will be restored by BiasedLocking
|
||||
assert(object->mark() == markWord::prototype() || // Correct mark
|
||||
object->mark_must_be_preserved(), // Will be restored by PreservedMarksSet
|
||||
"should have correct prototype obj: " PTR_FORMAT " mark: " PTR_FORMAT " prototype: " PTR_FORMAT,
|
||||
p2i(object), object->mark().value(), markWord::prototype_for_klass(object->klass()).value());
|
||||
p2i(object), object->mark().value(), markWord::prototype().value());
|
||||
}
|
||||
assert(object->forwardee() == NULL, "should be forwarded to NULL");
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -80,11 +80,10 @@ template <class T> inline void G1AdjustClosure::adjust_pointer(T* p) {
|
|||
oop forwardee = obj->forwardee();
|
||||
if (forwardee == NULL) {
|
||||
// Not forwarded, return current reference.
|
||||
assert(obj->mark() == markWord::prototype_for_klass(obj->klass()) || // Correct mark
|
||||
obj->mark_must_be_preserved() || // Will be restored by PreservedMarksSet
|
||||
(UseBiasedLocking && obj->has_bias_pattern()), // Will be restored by BiasedLocking
|
||||
assert(obj->mark() == markWord::prototype() || // Correct mark
|
||||
obj->mark_must_be_preserved(), // Will be restored by PreservedMarksSet
|
||||
"Must have correct prototype or be preserved, obj: " PTR_FORMAT ", mark: " PTR_FORMAT ", prototype: " PTR_FORMAT,
|
||||
p2i(obj), obj->mark().value(), markWord::prototype_for_klass(obj->klass()).value());
|
||||
p2i(obj), obj->mark().value(), markWord::prototype().value());
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -62,7 +62,6 @@
|
|||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/threadCritical.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -29,7 +29,7 @@
|
|||
|
||||
#include "classfile/classLoaderData.inline.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/markWord.inline.hpp"
|
||||
#include "oops/markWord.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
@ -83,9 +83,7 @@ template <class T> inline void MarkSweep::adjust_pointer(T* p) {
|
|||
oop new_obj = cast_to_oop(obj->mark().decode_pointer());
|
||||
|
||||
assert(new_obj != NULL || // is forwarding ptr?
|
||||
obj->mark() == markWord::prototype() || // not gc marked?
|
||||
(UseBiasedLocking && obj->mark().has_bias_pattern()),
|
||||
// not gc marked?
|
||||
obj->mark() == markWord::prototype(), // not gc marked?
|
||||
"should be forwarded");
|
||||
|
||||
if (new_obj != NULL) {
|
||||
|
|
|
@ -62,7 +62,6 @@
|
|||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
|
@ -439,8 +438,7 @@ bool GenCollectedHeap::must_clear_all_soft_refs() {
|
|||
}
|
||||
|
||||
void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
|
||||
bool is_tlab, bool run_verification, bool clear_soft_refs,
|
||||
bool restore_marks_for_biased_locking) {
|
||||
bool is_tlab, bool run_verification, bool clear_soft_refs) {
|
||||
FormatBuffer<> title("Collect gen: %s", gen->short_name());
|
||||
GCTraceTime(Trace, gc, phases) t1(title);
|
||||
TraceCollectorStats tcs(gen->counters());
|
||||
|
@ -461,14 +459,6 @@ void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t siz
|
|||
}
|
||||
COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
|
||||
|
||||
if (restore_marks_for_biased_locking) {
|
||||
// We perform this mark word preservation work lazily
|
||||
// because it's only at this point that we know whether we
|
||||
// absolutely have to do it; we want to avoid doing it for
|
||||
// scavenge-only collections where it's unnecessary
|
||||
BiasedLocking::preserve_marks();
|
||||
}
|
||||
|
||||
// Do collection work
|
||||
{
|
||||
// Note on ref discovery: For what appear to be historical reasons,
|
||||
|
@ -572,8 +562,7 @@ void GenCollectedHeap::do_collection(bool full,
|
|||
size,
|
||||
is_tlab,
|
||||
run_verification && VerifyGCLevel <= 0,
|
||||
do_clear_all_soft_refs,
|
||||
false);
|
||||
do_clear_all_soft_refs);
|
||||
|
||||
if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
|
||||
size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
|
||||
|
@ -632,8 +621,7 @@ void GenCollectedHeap::do_collection(bool full,
|
|||
size,
|
||||
is_tlab,
|
||||
run_verification && VerifyGCLevel <= 1,
|
||||
do_clear_all_soft_refs,
|
||||
true);
|
||||
do_clear_all_soft_refs);
|
||||
|
||||
// Adjust generation sizes.
|
||||
_old_gen->compute_new_size();
|
||||
|
@ -655,8 +643,6 @@ void GenCollectedHeap::do_collection(bool full,
|
|||
// the initial value for "complete" flag.
|
||||
gc_epilogue(true);
|
||||
|
||||
BiasedLocking::restore_marks();
|
||||
|
||||
print_heap_after_gc();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -90,8 +90,7 @@ private:
|
|||
|
||||
// Collects the given generation.
|
||||
void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
|
||||
bool run_verification, bool clear_soft_refs,
|
||||
bool restore_marks_for_biased_locking);
|
||||
bool run_verification, bool clear_soft_refs);
|
||||
|
||||
// Reserve aligned space for the heap as needed by the contained generations.
|
||||
ReservedHeapSpace allocate(size_t alignment);
|
||||
|
|
|
@ -382,12 +382,8 @@ void MemAllocator::mem_clear(HeapWord* mem) const {
|
|||
|
||||
oop MemAllocator::finish(HeapWord* mem) const {
|
||||
assert(mem != NULL, "NULL object pointer");
|
||||
if (UseBiasedLocking) {
|
||||
oopDesc::set_mark(mem, _klass->prototype_header());
|
||||
} else {
|
||||
// May be bootstrapping
|
||||
oopDesc::set_mark(mem, markWord::prototype());
|
||||
}
|
||||
// May be bootstrapping
|
||||
oopDesc::set_mark(mem, markWord::prototype());
|
||||
// Need a release store to ensure array/class length, mark word, and
|
||||
// object zeroing are visible before setting the klass non-NULL, for
|
||||
// concurrent collectors.
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#include "gc/shenandoah/shenandoahForwarding.hpp"
|
||||
|
||||
#include "gc/shenandoah/shenandoahAsserts.hpp"
|
||||
#include "oops/markWord.inline.hpp"
|
||||
#include "oops/markWord.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
|
||||
inline oop ShenandoahForwarding::get_forwardee_raw(oop obj) {
|
||||
|
|
|
@ -53,7 +53,6 @@
|
|||
#include "memory/universe.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
|
@ -182,7 +181,6 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
|
|||
heap->sync_pinned_region_status();
|
||||
|
||||
// The rest of prologue:
|
||||
BiasedLocking::preserve_marks();
|
||||
_preserved_marks->init(heap->workers()->active_workers());
|
||||
|
||||
assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
|
||||
|
@ -230,7 +228,6 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
|
|||
{
|
||||
// Epilogue
|
||||
_preserved_marks->restore(heap->workers());
|
||||
BiasedLocking::restore_marks();
|
||||
_preserved_marks->reclaim();
|
||||
}
|
||||
|
||||
|
|
|
@ -55,7 +55,6 @@
|
|||
#include "prims/methodHandles.hpp"
|
||||
#include "prims/nativeLookup.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
#include "runtime/fieldDescriptor.inline.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
|
@ -727,9 +726,6 @@ JRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorenter(JavaThread* current, B
|
|||
#ifdef ASSERT
|
||||
current->last_frame().interpreter_frame_verify_monitor(elem);
|
||||
#endif
|
||||
if (PrintBiasedLockingStatistics) {
|
||||
Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
|
||||
}
|
||||
Handle h_obj(current, elem->obj());
|
||||
assert(Universe::heap()->is_in_or_null(h_obj()),
|
||||
"must be NULL or an object");
|
||||
|
@ -1034,27 +1030,6 @@ JRT_ENTRY(nmethod*,
|
|||
osr_nm = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (osr_nm != NULL) {
|
||||
// We may need to do on-stack replacement which requires that no
|
||||
// monitors in the activation are biased because their
|
||||
// BasicObjectLocks will need to migrate during OSR. Force
|
||||
// unbiasing of all monitors in the activation now (even though
|
||||
// the OSR nmethod might be invalidated) because we don't have a
|
||||
// safepoint opportunity later once the migration begins.
|
||||
if (UseBiasedLocking) {
|
||||
ResourceMark rm;
|
||||
GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
|
||||
for( BasicObjectLock *kptr = last_frame.monitor_end();
|
||||
kptr < last_frame.monitor_begin();
|
||||
kptr = last_frame.next_monitor(kptr) ) {
|
||||
if( kptr->obj() != NULL ) {
|
||||
objects_to_revoke->append(Handle(current, kptr->obj()));
|
||||
}
|
||||
}
|
||||
BiasedLocking::revoke(objects_to_revoke, current);
|
||||
}
|
||||
}
|
||||
return osr_nm;
|
||||
JRT_END
|
||||
|
||||
|
|
|
@ -576,12 +576,9 @@ void BytecodeInterpreter::run(interpreterState istate) {
|
|||
}
|
||||
|
||||
// The initial monitor is ours for the taking.
|
||||
// Monitor not filled in frame manager any longer as this caused race condition with biased locking.
|
||||
BasicObjectLock* mon = &istate->monitor_base()[-1];
|
||||
mon->set_obj(rcvr);
|
||||
|
||||
assert(!UseBiasedLocking, "Not implemented");
|
||||
|
||||
// Traditional lightweight locking.
|
||||
markWord displaced = rcvr->mark().set_unlocked();
|
||||
mon->lock()->set_displaced_header(displaced);
|
||||
|
@ -676,8 +673,6 @@ void BytecodeInterpreter::run(interpreterState istate) {
|
|||
assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
|
||||
entry->set_obj(lockee);
|
||||
|
||||
assert(!UseBiasedLocking, "Not implemented");
|
||||
|
||||
// traditional lightweight locking
|
||||
markWord displaced = lockee->mark().set_unlocked();
|
||||
entry->lock()->set_displaced_header(displaced);
|
||||
|
@ -1519,8 +1514,6 @@ run:
|
|||
if (entry != NULL) {
|
||||
entry->set_obj(lockee);
|
||||
|
||||
assert(!UseBiasedLocking, "Not implemented");
|
||||
|
||||
// traditional lightweight locking
|
||||
markWord displaced = lockee->mark().set_unlocked();
|
||||
entry->lock()->set_displaced_header(displaced);
|
||||
|
@ -1553,8 +1546,6 @@ run:
|
|||
markWord header = lock->displaced_header();
|
||||
most_recent->set_obj(NULL);
|
||||
|
||||
assert(!UseBiasedLocking, "Not implemented");
|
||||
|
||||
// If it isn't recursive we either must swap old header or call the runtime
|
||||
bool call_vm = UseHeavyMonitors;
|
||||
if (header.to_pointer() != NULL || call_vm) {
|
||||
|
@ -1877,7 +1868,6 @@ run:
|
|||
oop obj = cast_to_oop(result);
|
||||
|
||||
// Initialize header
|
||||
assert(!UseBiasedLocking, "Not implemented");
|
||||
obj->set_mark(markWord::prototype());
|
||||
obj->set_klass_gap(0);
|
||||
obj->set_klass(ik);
|
||||
|
@ -2683,8 +2673,6 @@ run:
|
|||
markWord header = lock->displaced_header();
|
||||
end->set_obj(NULL);
|
||||
|
||||
assert(!UseBiasedLocking, "Not implemented");
|
||||
|
||||
// If it isn't recursive we either must swap old header or call the runtime
|
||||
if (header.to_pointer() != NULL) {
|
||||
markWord old_header = markWord::encode(lock);
|
||||
|
@ -2751,8 +2739,6 @@ run:
|
|||
markWord header = lock->displaced_header();
|
||||
base->set_obj(NULL);
|
||||
|
||||
assert(!UseBiasedLocking, "Not implemented");
|
||||
|
||||
// If it isn't recursive we either must swap old header or call the runtime
|
||||
if (header.to_pointer() != NULL) {
|
||||
markWord old_header = markWord::encode(lock);
|
||||
|
|
|
@ -70,25 +70,6 @@
|
|||
<Field type="Class" name="valueBasedClass" label="Value Based Class" />
|
||||
</Event>
|
||||
|
||||
<Event name="BiasedLockRevocation" category="Java Virtual Machine, Runtime" label="Biased Lock Revocation" description="Revoked bias of object" thread="true"
|
||||
stackTrace="true">
|
||||
<Field type="Class" name="lockClass" label="Lock Class" description="Class of object whose biased lock was revoked" />
|
||||
<Field type="ulong" name="safepointId" label="Safepoint Identifier" relation="SafepointId" />
|
||||
<Field type="Thread" name="previousOwner" label="Previous Owner" description="Thread owning the bias before revocation" />
|
||||
</Event>
|
||||
|
||||
<Event name="BiasedLockSelfRevocation" category="Java Virtual Machine, Runtime" label="Biased Lock Self Revocation" description="Revoked bias of object biased towards own thread"
|
||||
thread="true" stackTrace="true">
|
||||
<Field type="Class" name="lockClass" label="Lock Class" description="Class of object whose biased lock was revoked" />
|
||||
</Event>
|
||||
|
||||
<Event name="BiasedLockClassRevocation" category="Java Virtual Machine, Runtime" label="Biased Lock Class Revocation" description="Revoked biases for all instances of a class"
|
||||
thread="true" stackTrace="true">
|
||||
<Field type="Class" name="revokedClass" label="Revoked Class" description="Class whose biased locks were revoked" />
|
||||
<Field type="boolean" name="disableBiasing" label="Disable Further Biasing" description="Whether further biasing for instances of this class will be allowed" />
|
||||
<Field type="ulong" name="safepointId" label="Safepoint Identifier" relation="SafepointId" />
|
||||
</Event>
|
||||
|
||||
<Event name="ReservedStackActivation" category="Java Virtual Machine, Runtime" label="Reserved Stack Activation"
|
||||
description="Activation of Reserved Stack Area caused by stack overflow with ReservedStackAccess annotated method in call stack" thread="true" stackTrace="true"
|
||||
startTime="false">
|
||||
|
|
|
@ -229,7 +229,6 @@ JVMCIObjectArray CompilerToVM::initialize_intrinsics(JVMCI_TRAPS) {
|
|||
do_intx_flag(TypeProfileWidth) \
|
||||
do_bool_flag(UseAESIntrinsics) \
|
||||
X86_ONLY(do_intx_flag(UseAVX)) \
|
||||
do_bool_flag(UseBiasedLocking) \
|
||||
do_bool_flag(UseCRC32Intrinsics) \
|
||||
do_bool_flag(UseAdler32Intrinsics) \
|
||||
do_bool_flag(UseCompressedClassPointers) \
|
||||
|
|
|
@ -45,7 +45,6 @@
|
|||
#include "prims/jvmtiExport.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
#include "runtime/fieldDescriptor.inline.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
|
|
|
@ -203,7 +203,6 @@
|
|||
volatile_nonstatic_field(Klass, _subklass, Klass*) \
|
||||
nonstatic_field(Klass, _layout_helper, jint) \
|
||||
nonstatic_field(Klass, _name, Symbol*) \
|
||||
nonstatic_field(Klass, _prototype_header, markWord) \
|
||||
volatile_nonstatic_field(Klass, _next_sibling, Klass*) \
|
||||
nonstatic_field(Klass, _java_mirror, OopHandle) \
|
||||
nonstatic_field(Klass, _modifier_flags, jint) \
|
||||
|
@ -648,14 +647,11 @@
|
|||
declare_constant(markWord::hash_shift) \
|
||||
declare_constant(markWord::monitor_value) \
|
||||
\
|
||||
declare_constant(markWord::biased_lock_mask_in_place) \
|
||||
declare_constant(markWord::age_mask_in_place) \
|
||||
declare_constant(markWord::epoch_mask_in_place) \
|
||||
declare_constant(markWord::hash_mask) \
|
||||
declare_constant(markWord::hash_mask_in_place) \
|
||||
\
|
||||
declare_constant(markWord::unlocked_value) \
|
||||
declare_constant(markWord::biased_lock_pattern) \
|
||||
\
|
||||
declare_constant(markWord::no_hash_in_place) \
|
||||
declare_constant(markWord::no_lock_in_place) \
|
||||
|
|
|
@ -39,7 +39,6 @@
|
|||
LOG_TAG(arguments) \
|
||||
LOG_TAG(attach) \
|
||||
LOG_TAG(barrier) \
|
||||
LOG_TAG(biasedlocking) \
|
||||
LOG_TAG(blocks) \
|
||||
LOG_TAG(bot) \
|
||||
LOG_TAG(breakpoint) \
|
||||
|
|
|
@ -75,7 +75,6 @@
|
|||
#include "prims/methodComparator.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/fieldDescriptor.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/javaCalls.hpp"
|
||||
|
@ -508,12 +507,6 @@ InstanceKlass::InstanceKlass(const ClassFileParser& parser, unsigned kind, Klass
|
|||
assert(NULL == _methods, "underlying memory not zeroed?");
|
||||
assert(is_instance_klass(), "is layout incorrect?");
|
||||
assert(size_helper() == parser.layout_size(), "incorrect size_helper?");
|
||||
|
||||
// Set biased locking bit for all instances of this class; it will be
|
||||
// cleared if revocation occurs too often for this type
|
||||
if (UseBiasedLocking && BiasedLocking::enabled()) {
|
||||
set_prototype_header(markWord::biased_locking_prototype());
|
||||
}
|
||||
}
|
||||
|
||||
void InstanceKlass::deallocate_methods(ClassLoaderData* loader_data,
|
||||
|
@ -2530,15 +2523,9 @@ void InstanceKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handl
|
|||
array_klasses()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
|
||||
}
|
||||
|
||||
// Initialize current biased locking state.
|
||||
if (UseBiasedLocking && BiasedLocking::enabled()) {
|
||||
set_prototype_header(markWord::biased_locking_prototype());
|
||||
}
|
||||
|
||||
// Initialize @ValueBased class annotation
|
||||
if (DiagnoseSyncOnValueBasedClasses && has_value_based_class_annotation()) {
|
||||
set_is_value_based();
|
||||
set_prototype_header(markWord::prototype());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -202,7 +202,6 @@ void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word
|
|||
// The constructor is also used from CppVtableCloner,
|
||||
// which doesn't zero out the memory before calling the constructor.
|
||||
Klass::Klass(KlassID id) : _id(id),
|
||||
_prototype_header(markWord::prototype()),
|
||||
_shared_class_path_index(-1) {
|
||||
CDS_ONLY(_shared_class_flags = 0;)
|
||||
CDS_JAVA_HEAP_ONLY(_archived_mirror_index = -1;)
|
||||
|
@ -712,10 +711,6 @@ const char* Klass::external_kind() const {
|
|||
return "class";
|
||||
}
|
||||
|
||||
int Klass::atomic_incr_biased_lock_revocation_count() {
|
||||
return (int) Atomic::add(&_biased_lock_revocation_count, 1);
|
||||
}
|
||||
|
||||
// Unless overridden, jvmti_class_status has no flags set.
|
||||
jint Klass::jvmti_class_status() const {
|
||||
return 0;
|
||||
|
@ -744,8 +739,6 @@ void Klass::oop_print_on(oop obj, outputStream* st) {
|
|||
// print header
|
||||
obj->mark().print_on(st);
|
||||
st->cr();
|
||||
st->print(BULLET"prototype_header: " INTPTR_FORMAT, _prototype_header.value());
|
||||
st->cr();
|
||||
}
|
||||
|
||||
// print class
|
||||
|
|
|
@ -159,12 +159,6 @@ class Klass : public Metadata {
|
|||
|
||||
JFR_ONLY(DEFINE_TRACE_ID_FIELD;)
|
||||
|
||||
// Biased locking implementation and statistics
|
||||
// (the 64-bit chunk goes first, to avoid some fragmentation)
|
||||
jlong _last_biased_lock_bulk_revocation_time;
|
||||
markWord _prototype_header; // Used when biased locking is both enabled and disabled for this type
|
||||
jint _biased_lock_revocation_count;
|
||||
|
||||
private:
|
||||
// This is an index into FileMapHeader::_shared_path_table[], to
|
||||
// associate this class with the JAR file where it's loaded from during
|
||||
|
@ -645,30 +639,6 @@ protected:
|
|||
bool is_cloneable() const;
|
||||
void set_is_cloneable();
|
||||
|
||||
// Biased locking support
|
||||
// Note: the prototype header is always set up to be at least the
|
||||
// prototype markWord. If biased locking is enabled it may further be
|
||||
// biasable and have an epoch.
|
||||
markWord prototype_header() const { return _prototype_header; }
|
||||
|
||||
// NOTE: once instances of this klass are floating around in the
|
||||
// system, this header must only be updated at a safepoint.
|
||||
// NOTE 2: currently we only ever set the prototype header to the
|
||||
// biasable prototype for instanceKlasses. There is no technical
|
||||
// reason why it could not be done for arrayKlasses aside from
|
||||
// wanting to reduce the initial scope of this optimization. There
|
||||
// are potential problems in setting the bias pattern for
|
||||
// JVM-internal oops.
|
||||
inline void set_prototype_header(markWord header);
|
||||
static ByteSize prototype_header_offset() { return in_ByteSize(offset_of(Klass, _prototype_header)); }
|
||||
|
||||
int biased_lock_revocation_count() const { return (int) _biased_lock_revocation_count; }
|
||||
// Atomically increments biased_lock_revocation_count and returns updated value
|
||||
int atomic_incr_biased_lock_revocation_count();
|
||||
void set_biased_lock_revocation_count(int val) { _biased_lock_revocation_count = (jint) val; }
|
||||
jlong last_biased_lock_bulk_revocation_time() { return _last_biased_lock_bulk_revocation_time; }
|
||||
void set_last_biased_lock_bulk_revocation_time(jlong cur_time) { _last_biased_lock_bulk_revocation_time = cur_time; }
|
||||
|
||||
JFR_ONLY(DEFINE_TRACE_ID_METHODS;)
|
||||
|
||||
virtual void metaspace_pointers_do(MetaspaceClosure* iter);
|
||||
|
|
|
@ -51,11 +51,6 @@ inline bool Klass::is_loader_alive() const {
|
|||
return class_loader_data()->is_alive();
|
||||
}
|
||||
|
||||
inline void Klass::set_prototype_header(markWord header) {
|
||||
assert(!header.has_bias_pattern() || is_instance_klass(), "biased locking currently only supported for Java instances");
|
||||
_prototype_header = header;
|
||||
}
|
||||
|
||||
inline oop Klass::java_mirror() const {
|
||||
return _java_mirror.resolve();
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -80,18 +80,13 @@ void markWord::print_on(outputStream* st, bool print_monitor_info) const {
|
|||
st->print(" locked(" INTPTR_FORMAT ")", value());
|
||||
} else {
|
||||
st->print(" mark(");
|
||||
// Biased bit is 3rd rightmost bit
|
||||
if (is_neutral()) { // last bits = 001
|
||||
if (is_neutral()) { // last bits = 01
|
||||
st->print("is_neutral");
|
||||
if (has_no_hash()) {
|
||||
st->print(" no_hash");
|
||||
} else {
|
||||
st->print(" hash=" INTPTR_FORMAT, hash());
|
||||
}
|
||||
} else if (has_bias_pattern()) { // last bits = 101
|
||||
st->print("is_biased");
|
||||
JavaThread* jt = biased_locker();
|
||||
st->print(" biased_locker=" INTPTR_FORMAT " epoch=%d", p2i(jt), bias_epoch());
|
||||
} else {
|
||||
st->print("??");
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -36,51 +36,21 @@
|
|||
//
|
||||
// 32 bits:
|
||||
// --------
|
||||
// hash:25 ------------>| age:4 biased_lock:1 lock:2 (normal object)
|
||||
// JavaThread*:23 epoch:2 age:4 biased_lock:1 lock:2 (biased object)
|
||||
// hash:25 ------------>| age:4 unused_gap:1 lock:2 (normal object)
|
||||
//
|
||||
// 64 bits:
|
||||
// --------
|
||||
// unused:25 hash:31 -->| unused_gap:1 age:4 biased_lock:1 lock:2 (normal object)
|
||||
// JavaThread*:54 epoch:2 unused_gap:1 age:4 biased_lock:1 lock:2 (biased object)
|
||||
// unused:25 hash:31 -->| unused_gap:1 age:4 unused_gap:1 lock:2 (normal object)
|
||||
//
|
||||
// - hash contains the identity hash value: largest value is
|
||||
// 31 bits, see os::random(). Also, 64-bit vm's require
|
||||
// a hash value no bigger than 32 bits because they will not
|
||||
// properly generate a mask larger than that: see library_call.cpp
|
||||
//
|
||||
// - the biased lock pattern is used to bias a lock toward a given
|
||||
// thread. When this pattern is set in the low three bits, the lock
|
||||
// is either biased toward a given thread or "anonymously" biased,
|
||||
// indicating that it is possible for it to be biased. When the
|
||||
// lock is biased toward a given thread, locking and unlocking can
|
||||
// be performed by that thread without using atomic operations.
|
||||
// When a lock's bias is revoked, it reverts back to the normal
|
||||
// locking scheme described below.
|
||||
//
|
||||
// Note that we are overloading the meaning of the "unlocked" state
|
||||
// of the header. Because we steal a bit from the age we can
|
||||
// guarantee that the bias pattern will never be seen for a truly
|
||||
// unlocked object.
|
||||
//
|
||||
// Note also that the biased state contains the age bits normally
|
||||
// contained in the object header. Large increases in scavenge
|
||||
// times were seen when these bits were absent and an arbitrary age
|
||||
// assigned to all biased objects, because they tended to consume a
|
||||
// significant fraction of the eden semispaces and were not
|
||||
// promoted promptly, causing an increase in the amount of copying
|
||||
// performed. The runtime system aligns all JavaThread* pointers to
|
||||
// a very large value (currently 128 bytes (32bVM) or 256 bytes (64bVM))
|
||||
// to make room for the age bits & the epoch bits (used in support of
|
||||
// biased locking).
|
||||
//
|
||||
// [JavaThread* | epoch | age | 1 | 01] lock is biased toward given thread
|
||||
// [0 | epoch | age | 1 | 01] lock is anonymously biased
|
||||
//
|
||||
// - the two lock bits are used to describe three states: locked/unlocked and monitor.
|
||||
//
|
||||
// [ptr | 00] locked ptr points to real header on stack
|
||||
// [header | 0 | 01] unlocked regular object header
|
||||
// [header | 01] unlocked regular object header
|
||||
// [ptr | 10] monitor inflated lock (header is wapped out)
|
||||
// [ptr | 11] marked used to mark an object
|
||||
// [0 ............ 0| 00] inflating inflation in progress
|
||||
|
@ -128,42 +98,26 @@ class markWord {
|
|||
// Constants
|
||||
static const int age_bits = 4;
|
||||
static const int lock_bits = 2;
|
||||
static const int biased_lock_bits = 1;
|
||||
static const int max_hash_bits = BitsPerWord - age_bits - lock_bits - biased_lock_bits;
|
||||
static const int first_unused_gap_bits = 1;
|
||||
static const int max_hash_bits = BitsPerWord - age_bits - lock_bits - first_unused_gap_bits;
|
||||
static const int hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits;
|
||||
static const int unused_gap_bits = LP64_ONLY(1) NOT_LP64(0);
|
||||
static const int epoch_bits = 2;
|
||||
static const int second_unused_gap_bits = LP64_ONLY(1) NOT_LP64(0);
|
||||
|
||||
// The biased locking code currently requires that the age bits be
|
||||
// contiguous to the lock bits.
|
||||
static const int lock_shift = 0;
|
||||
static const int biased_lock_shift = lock_bits;
|
||||
static const int age_shift = lock_bits + biased_lock_bits;
|
||||
static const int unused_gap_shift = age_shift + age_bits;
|
||||
static const int hash_shift = unused_gap_shift + unused_gap_bits;
|
||||
static const int epoch_shift = hash_shift;
|
||||
static const int age_shift = lock_bits + first_unused_gap_bits;
|
||||
static const int hash_shift = age_shift + age_bits + second_unused_gap_bits;
|
||||
|
||||
static const uintptr_t lock_mask = right_n_bits(lock_bits);
|
||||
static const uintptr_t lock_mask_in_place = lock_mask << lock_shift;
|
||||
static const uintptr_t biased_lock_mask = right_n_bits(lock_bits + biased_lock_bits);
|
||||
static const uintptr_t biased_lock_mask_in_place= biased_lock_mask << lock_shift;
|
||||
static const uintptr_t biased_lock_bit_in_place = 1 << biased_lock_shift;
|
||||
static const uintptr_t age_mask = right_n_bits(age_bits);
|
||||
static const uintptr_t age_mask_in_place = age_mask << age_shift;
|
||||
static const uintptr_t epoch_mask = right_n_bits(epoch_bits);
|
||||
static const uintptr_t epoch_mask_in_place = epoch_mask << epoch_shift;
|
||||
|
||||
static const uintptr_t hash_mask = right_n_bits(hash_bits);
|
||||
static const uintptr_t hash_mask_in_place = hash_mask << hash_shift;
|
||||
|
||||
// Alignment of JavaThread pointers encoded in object header required by biased locking
|
||||
static const size_t biased_lock_alignment = 2 << (epoch_shift + epoch_bits);
|
||||
|
||||
static const uintptr_t locked_value = 0;
|
||||
static const uintptr_t unlocked_value = 1;
|
||||
static const uintptr_t monitor_value = 2;
|
||||
static const uintptr_t marked_value = 3;
|
||||
static const uintptr_t biased_lock_pattern = 5;
|
||||
|
||||
static const uintptr_t no_hash = 0 ; // no hash value assigned
|
||||
static const uintptr_t no_hash_in_place = (address_word)no_hash << hash_shift;
|
||||
|
@ -171,60 +125,22 @@ class markWord {
|
|||
|
||||
static const uint max_age = age_mask;
|
||||
|
||||
static const int max_bias_epoch = epoch_mask;
|
||||
|
||||
// Creates a markWord with all bits set to zero.
|
||||
static markWord zero() { return markWord(uintptr_t(0)); }
|
||||
|
||||
// Biased Locking accessors.
|
||||
// These must be checked by all code which calls into the
|
||||
// ObjectSynchronizer and other code. The biasing is not understood
|
||||
// by the lower-level CAS-based locking code, although the runtime
|
||||
// fixes up biased locks to be compatible with it when a bias is
|
||||
// revoked.
|
||||
bool has_bias_pattern() const {
|
||||
return (mask_bits(value(), biased_lock_mask_in_place) == biased_lock_pattern);
|
||||
}
|
||||
JavaThread* biased_locker() const {
|
||||
assert(has_bias_pattern(), "should not call this otherwise");
|
||||
return (JavaThread*) mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place));
|
||||
}
|
||||
// Indicates that the mark has the bias bit set but that it has not
|
||||
// yet been biased toward a particular thread
|
||||
bool is_biased_anonymously() const {
|
||||
return (has_bias_pattern() && (biased_locker() == NULL));
|
||||
}
|
||||
// Indicates epoch in which this bias was acquired. If the epoch
|
||||
// changes due to too many bias revocations occurring, the biases
|
||||
// from the previous epochs are all considered invalid.
|
||||
int bias_epoch() const {
|
||||
assert(has_bias_pattern(), "should not call this otherwise");
|
||||
return (mask_bits(value(), epoch_mask_in_place) >> epoch_shift);
|
||||
}
|
||||
markWord set_bias_epoch(int epoch) {
|
||||
assert(has_bias_pattern(), "should not call this otherwise");
|
||||
assert((epoch & (~epoch_mask)) == 0, "epoch overflow");
|
||||
return markWord(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift));
|
||||
}
|
||||
markWord incr_bias_epoch() {
|
||||
return set_bias_epoch((1 + bias_epoch()) & epoch_mask);
|
||||
}
|
||||
// Prototype mark for initialization
|
||||
static markWord biased_locking_prototype() {
|
||||
return markWord( biased_lock_pattern );
|
||||
}
|
||||
|
||||
// lock accessors (note that these assume lock_shift == 0)
|
||||
bool is_locked() const {
|
||||
return (mask_bits(value(), lock_mask_in_place) != unlocked_value);
|
||||
}
|
||||
bool is_unlocked() const {
|
||||
return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value);
|
||||
return (mask_bits(value(), lock_mask_in_place) == unlocked_value);
|
||||
}
|
||||
bool is_marked() const {
|
||||
return (mask_bits(value(), lock_mask_in_place) == marked_value);
|
||||
}
|
||||
bool is_neutral() const { return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); }
|
||||
bool is_neutral() const {
|
||||
return (mask_bits(value(), lock_mask_in_place) == unlocked_value);
|
||||
}
|
||||
|
||||
// Special temporary state of the markWord while being inflated.
|
||||
// Code that looks at mark outside a lock need to take this into account.
|
||||
|
@ -239,26 +155,15 @@ class markWord {
|
|||
static markWord INFLATING() { return zero(); } // inflate-in-progress
|
||||
|
||||
// Should this header be preserved during GC?
|
||||
inline bool must_be_preserved(const oopDesc* obj) const;
|
||||
bool must_be_preserved(const oopDesc* obj) const {
|
||||
return (!is_unlocked() || !has_no_hash());
|
||||
}
|
||||
|
||||
// Should this header (including its age bits) be preserved in the
|
||||
// case of a promotion failure during scavenge?
|
||||
// Note that we special case this situation. We want to avoid
|
||||
// calling BiasedLocking::preserve_marks()/restore_marks() (which
|
||||
// decrease the number of mark words that need to be preserved
|
||||
// during GC) during each scavenge. During scavenges in which there
|
||||
// is no promotion failure, we actually don't need to call the above
|
||||
// routines at all, since we don't mutate and re-initialize the
|
||||
// marks of promoted objects using init_mark(). However, during
|
||||
// scavenges which result in promotion failure, we do re-initialize
|
||||
// the mark words of objects, meaning that we should have called
|
||||
// these mark word preservation routines. Currently there's no good
|
||||
// place in which to call them in any of the scavengers (although
|
||||
// guarded by appropriate locks we could make one), but the
|
||||
// observation is that promotion failures are quite rare and
|
||||
// reducing the number of mark words preserved during them isn't a
|
||||
// high priority.
|
||||
inline bool must_be_preserved_for_promotion_failure(const oopDesc* obj) const;
|
||||
bool must_be_preserved_for_promotion_failure(const oopDesc* obj) const {
|
||||
return (!is_unlocked() || !has_no_hash());
|
||||
}
|
||||
|
||||
// WARNING: The following routines are used EXCLUSIVELY by
|
||||
// synchronization functions. They are not really gc safe.
|
||||
|
@ -305,13 +210,6 @@ class markWord {
|
|||
uintptr_t tmp = (uintptr_t) monitor;
|
||||
return markWord(tmp | monitor_value);
|
||||
}
|
||||
static markWord encode(JavaThread* thread, uint age, int bias_epoch) {
|
||||
uintptr_t tmp = (uintptr_t) thread;
|
||||
assert(UseBiasedLocking && ((tmp & (epoch_mask_in_place | age_mask_in_place | biased_lock_mask_in_place)) == 0), "misaligned JavaThread pointer");
|
||||
assert(age <= max_age, "age too large");
|
||||
assert(bias_epoch <= max_bias_epoch, "bias epoch too large");
|
||||
return markWord(tmp | (bias_epoch << epoch_shift) | (age << age_shift) | biased_lock_pattern);
|
||||
}
|
||||
|
||||
// used to encode pointers during GC
|
||||
markWord clear_lock_bits() { return markWord(value() & ~lock_mask_in_place); }
|
||||
|
@ -341,9 +239,6 @@ class markWord {
|
|||
return markWord( no_hash_in_place | no_lock_in_place );
|
||||
}
|
||||
|
||||
// Helper function for restoration of unmarked mark oops during GC
|
||||
static inline markWord prototype_for_klass(const Klass* klass);
|
||||
|
||||
// Debugging
|
||||
void print_on(outputStream* st, bool print_monitor_info = true) const;
|
||||
|
||||
|
@ -351,7 +246,7 @@ class markWord {
|
|||
inline static markWord encode_pointer_as_mark(void* p) { return from_pointer(p).set_marked(); }
|
||||
|
||||
// Recover address of oop from encoded form used in mark
|
||||
inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return (void*)clear_lock_bits().value(); }
|
||||
inline void* decode_pointer() { return (void*)clear_lock_bits().value(); }
|
||||
};
|
||||
|
||||
// Support atomic operations.
|
||||
|
|
|
@ -1,78 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_OOPS_MARKWORD_INLINE_HPP
|
||||
#define SHARE_OOPS_MARKWORD_INLINE_HPP
|
||||
|
||||
#include "oops/markWord.hpp"
|
||||
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
|
||||
// Should this header be preserved during GC?
|
||||
inline bool markWord::must_be_preserved(const oopDesc* obj) const {
|
||||
if (UseBiasedLocking) {
|
||||
if (has_bias_pattern()) {
|
||||
// Will reset bias at end of collection
|
||||
// Mark words of biased and currently locked objects are preserved separately
|
||||
return false;
|
||||
}
|
||||
markWord prototype_header = prototype_for_klass(obj->klass());
|
||||
if (prototype_header.has_bias_pattern()) {
|
||||
// Individual instance which has its bias revoked; must return
|
||||
// true for correctness
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return (!is_unlocked() || !has_no_hash());
|
||||
}
|
||||
|
||||
// Should this header be preserved in the case of a promotion failure during scavenge?
|
||||
inline bool markWord::must_be_preserved_for_promotion_failure(const oopDesc* obj) const {
|
||||
if (UseBiasedLocking) {
|
||||
// We don't explicitly save off the mark words of biased and
|
||||
// currently-locked objects during scavenges, so if during a
|
||||
// promotion failure we encounter either a biased mark word or a
|
||||
// klass which still has a biasable prototype header, we have to
|
||||
// preserve the mark word. This results in oversaving, but promotion
|
||||
// failures are rare, and this avoids adding more complex logic to
|
||||
// the scavengers to call new variants of
|
||||
// BiasedLocking::preserve_marks() / restore_marks() in the middle
|
||||
// of a scavenge when a promotion failure has first been detected.
|
||||
if (has_bias_pattern() || prototype_for_klass(obj->klass()).has_bias_pattern()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return (!is_unlocked() || !has_no_hash());
|
||||
}
|
||||
|
||||
inline markWord markWord::prototype_for_klass(const Klass* klass) {
|
||||
markWord prototype_header = klass->prototype_header();
|
||||
assert(prototype_header == prototype() || prototype_header.has_bias_pattern(), "corrupt prototype header");
|
||||
|
||||
return prototype_header;
|
||||
}
|
||||
|
||||
#endif // SHARE_OOPS_MARKWORD_INLINE_HPP
|
|
@ -233,7 +233,6 @@ class oopDesc {
|
|||
// locking operations
|
||||
inline bool is_locked() const;
|
||||
inline bool is_unlocked() const;
|
||||
inline bool has_bias_pattern() const;
|
||||
|
||||
// asserts and guarantees
|
||||
static bool is_oop(oop obj, bool ignore_mark_word = false);
|
||||
|
@ -283,8 +282,6 @@ class oopDesc {
|
|||
inline static bool is_instanceof_or_null(oop obj, Klass* klass);
|
||||
|
||||
// identity hash; returns the identity hash key (computes it if necessary)
|
||||
// NOTE with the introduction of UseBiasedLocking that identity_hash() might reach a
|
||||
// safepoint if called on a biased object. Calling code must be aware of that.
|
||||
inline intptr_t identity_hash();
|
||||
intptr_t slow_identity_hash();
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
#include "oops/arrayKlass.hpp"
|
||||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/markWord.inline.hpp"
|
||||
#include "oops/markWord.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
|
@ -75,7 +75,7 @@ markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memo
|
|||
}
|
||||
|
||||
void oopDesc::init_mark() {
|
||||
set_mark(markWord::prototype_for_klass(klass()));
|
||||
set_mark(markWord::prototype());
|
||||
}
|
||||
|
||||
Klass* oopDesc::klass() const {
|
||||
|
@ -254,10 +254,6 @@ bool oopDesc::is_unlocked() const {
|
|||
return mark().is_unlocked();
|
||||
}
|
||||
|
||||
bool oopDesc::has_bias_pattern() const {
|
||||
return mark().has_bias_pattern();
|
||||
}
|
||||
|
||||
// Used only for markSweep, scavenging
|
||||
bool oopDesc::is_gc_marked() const {
|
||||
return mark().is_marked();
|
||||
|
|
|
@ -452,10 +452,6 @@
|
|||
notproduct(bool, PrintLockStatistics, false, \
|
||||
"Print precise statistics on the dynamic lock usage") \
|
||||
\
|
||||
product(bool, PrintPreciseBiasedLockingStatistics, false, DIAGNOSTIC, \
|
||||
"(Deprecated) Print per-lock-site statistics of biased locking " \
|
||||
"in JVM") \
|
||||
\
|
||||
product(bool, PrintPreciseRTMLockingStatistics, false, DIAGNOSTIC, \
|
||||
"Print per-lock-site statistics of rtm locking in JVM") \
|
||||
\
|
||||
|
@ -511,9 +507,6 @@
|
|||
notproduct(bool, VerifyConnectionGraph , true, \
|
||||
"Verify Connection Graph construction in Escape Analysis") \
|
||||
\
|
||||
product(bool, UseOptoBiasInlining, true, \
|
||||
"(Deprecated) Generate biased locking code in C2 ideal graph") \
|
||||
\
|
||||
product(bool, OptimizeStringConcat, true, \
|
||||
"Optimize the construction of Strings by StringBuilder") \
|
||||
\
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue