6822110: Add AddressLiteral class on SPARC

The Address class on SPARC currently handles both, addresses and address literals, what makes the Address class more complicated than it has to be.

Reviewed-by: never, kvn
This commit is contained in:
Christian Thalinger 2009-04-21 11:16:30 -07:00
parent fd05c28901
commit 57d945fd3d
22 changed files with 1129 additions and 1237 deletions

View file

@ -25,24 +25,36 @@
#include "incls/_precompiled.incl"
#include "incls/_assembler_sparc.cpp.incl"
// Implementation of Address
Address::Address( addr_type t, int which ) {
switch (t) {
case extra_in_argument:
case extra_out_argument:
_base = t == extra_in_argument ? FP : SP;
_hi = 0;
// Warning: In LP64 mode, _disp will occupy more than 10 bits.
// This is inconsistent with the other constructors but op
// codes such as ld or ldx, only access disp() to get their
// simm13 argument.
_disp = ((which - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS;
break;
default:
ShouldNotReachHere();
break;
// Convert the raw encoding form into the form expected by the
// constructor for Address.
Address Address::make_raw(int base, int index, int scale, int disp, bool disp_is_oop) {
assert(scale == 0, "not supported");
RelocationHolder rspec;
if (disp_is_oop) {
rspec = Relocation::spec_simple(relocInfo::oop_type);
}
Register rindex = as_Register(index);
if (rindex != G0) {
Address madr(as_Register(base), rindex);
madr._rspec = rspec;
return madr;
} else {
Address madr(as_Register(base), disp);
madr._rspec = rspec;
return madr;
}
}
Address Argument::address_in_frame() const {
// Warning: In LP64 mode disp will occupy more than 10 bits, but
// op codes such as ld or ldx, only access disp() to get
// their simm13 argument.
int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS;
if (is_in())
return Address(FP, disp); // In argument.
else
return Address(SP, disp); // Out argument.
}
static const char* argumentNames[][2] = {
@ -614,16 +626,17 @@ void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) {
}
// This code sequence is relocatable to any address, even on LP64.
void MacroAssembler::jumpl( Address& a, Register d, int offset, const char* file, int line ) {
void MacroAssembler::jumpl(AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) {
assert_not_delayed();
// Force fixed length sethi because NativeJump and NativeFarCall don't handle
// variable length instruction streams.
sethi(a, /*ForceRelocatable=*/ true);
patchable_sethi(addrlit, temp);
Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement.
if (TraceJumps) {
#ifndef PRODUCT
// Must do the add here so relocation can find the remainder of the
// value to be relocated.
add(a.base(), a.disp() + offset, a.base(), a.rspec(offset));
add(a.base(), a.disp(), a.base(), addrlit.rspec(offset));
save_frame(0);
verify_thread();
ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
@ -652,15 +665,15 @@ void MacroAssembler::jumpl( Address& a, Register d, int offset, const char* file
restore();
jmpl(a.base(), G0, d);
#else
jmpl(a, d, offset);
jmpl(a.base(), a.disp(), d);
#endif /* PRODUCT */
} else {
jmpl(a, d, offset);
jmpl(a.base(), a.disp(), d);
}
}
void MacroAssembler::jump( Address& a, int offset, const char* file, int line ) {
jumpl( a, G0, offset, file, line );
void MacroAssembler::jump(AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) {
jumpl(addrlit, temp, G0, offset, file, line);
}
@ -678,7 +691,8 @@ void MacroAssembler::set_varargs( Argument inArg, Register d ) {
st_ptr(savePtr.as_register(), savePtr.address_in_frame());
}
// return the address of the first memory slot
add(inArg.address_in_frame(), d);
Address a = inArg.address_in_frame();
add(a.base(), a.disp(), d);
}
// Conditional breakpoint (for assertion checks in assembly code)
@ -702,7 +716,6 @@ void MacroAssembler::flush_windows() {
// offset to write to within the page. This minimizes bus traffic
// due to cache line collision.
void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
Address mem_serialize_page(tmp1, os::get_memory_serialize_page());
srl(thread, os::get_serialize_page_shift_count(), tmp2);
if (Assembler::is_simm13(os::vm_page_size())) {
and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2);
@ -711,7 +724,7 @@ void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register t
set((os::vm_page_size() - sizeof(int)), tmp1);
and3(tmp2, tmp1, tmp2);
}
load_address(mem_serialize_page);
set(os::get_memory_serialize_page(), tmp1);
st(G0, tmp1, tmp2);
}
@ -830,10 +843,10 @@ void MacroAssembler::get_thread() {
mov(G3, L2); // avoid clobbering G3 also
mov(G4, L5); // avoid clobbering G4
#ifdef ASSERT
Address last_get_thread_addr(L3, (address)&last_get_thread);
sethi(last_get_thread_addr);
AddressLiteral last_get_thread_addrlit(&last_get_thread);
set(last_get_thread_addrlit, L3);
inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call
st_ptr(L4, last_get_thread_addr);
st_ptr(L4, L3, 0);
#endif
call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
delayed()->nop();
@ -919,13 +932,9 @@ void MacroAssembler::restore_thread(const Register thread_cache) {
// %%% maybe get rid of [re]set_last_Java_frame
void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) {
assert_not_delayed();
Address flags(G2_thread,
0,
in_bytes(JavaThread::frame_anchor_offset()) +
in_bytes(JavaFrameAnchor::flags_offset()));
Address pc_addr(G2_thread,
0,
in_bytes(JavaThread::last_Java_pc_offset()));
Address flags(G2_thread, JavaThread::frame_anchor_offset() +
JavaFrameAnchor::flags_offset());
Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset());
// Always set last_Java_pc and flags first because once last_Java_sp is visible
// has_last_Java_frame is true and users will look at the rest of the fields.
@ -977,22 +986,18 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Ja
#endif // ASSERT
assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
add( last_java_sp, STACK_BIAS, G4_scratch );
st_ptr(G4_scratch, Address(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset())));
st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset());
#else
st_ptr(last_java_sp, Address(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset())));
st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset());
#endif // _LP64
}
void MacroAssembler::reset_last_Java_frame(void) {
assert_not_delayed();
Address sp_addr(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset()));
Address pc_addr(G2_thread,
0,
in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
Address flags(G2_thread,
0,
in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset());
Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
#ifdef ASSERT
// check that it WAS previously set
@ -1063,7 +1068,7 @@ void MacroAssembler::check_and_forward_exception(Register scratch_reg)
check_and_handle_popframe(scratch_reg);
check_and_handle_earlyret(scratch_reg);
Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
Address exception_addr(G2_thread, Thread::pending_exception_offset());
ld_ptr(exception_addr, scratch_reg);
br_null(scratch_reg,false,pt,L);
delayed()->nop();
@ -1186,7 +1191,7 @@ void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Re
void MacroAssembler::get_vm_result(Register oop_result) {
verify_thread();
Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset()));
Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
ld_ptr( vm_result_addr, oop_result);
st_ptr(G0, vm_result_addr);
verify_oop(oop_result);
@ -1195,7 +1200,7 @@ void MacroAssembler::get_vm_result(Register oop_result) {
void MacroAssembler::get_vm_result_2(Register oop_result) {
verify_thread();
Address vm_result_addr_2(G2_thread, 0, in_bytes(JavaThread::vm_result_2_offset()));
Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
ld_ptr(vm_result_addr_2, oop_result);
st_ptr(G0, vm_result_addr_2);
verify_oop(oop_result);
@ -1206,7 +1211,7 @@ void MacroAssembler::get_vm_result_2(Register oop_result) {
// leave it undisturbed.
void MacroAssembler::set_vm_result(Register oop_result) {
verify_thread();
Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset()));
Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
verify_oop(oop_result);
# ifdef ASSERT
@ -1234,81 +1239,78 @@ void MacroAssembler::card_table_write(jbyte* byte_map_base,
#else
srl(obj, CardTableModRefBS::card_shift, obj);
#endif
assert( tmp != obj, "need separate temp reg");
Address rs(tmp, (address)byte_map_base);
load_address(rs);
stb(G0, rs.base(), obj);
}
// %%% Note: The following six instructions have been moved,
// unchanged, from assembler_sparc.inline.hpp.
// They will be refactored at a later date.
void MacroAssembler::sethi(intptr_t imm22a,
Register d,
bool ForceRelocatable,
RelocationHolder const& rspec) {
Address adr( d, (address)imm22a, rspec );
MacroAssembler::sethi( adr, ForceRelocatable );
assert(tmp != obj, "need separate temp reg");
set((address) byte_map_base, tmp);
stb(G0, tmp, obj);
}
void MacroAssembler::sethi(Address& a, bool ForceRelocatable) {
void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
address save_pc;
int shiftcnt;
// if addr of local, do not need to load it
assert(a.base() != FP && a.base() != SP, "just use ld or st for locals");
#ifdef _LP64
# ifdef CHECK_DELAY
assert_not_delayed( (char *)"cannot put two instructions in delay slot" );
assert_not_delayed((char*) "cannot put two instructions in delay slot");
# endif
v9_dep();
// ForceRelocatable = 1;
save_pc = pc();
if (a.hi32() == 0 && a.low32() >= 0) {
Assembler::sethi(a.low32(), a.base(), a.rspec());
int msb32 = (int) (addrlit.value() >> 32);
int lsb32 = (int) (addrlit.value());
if (msb32 == 0 && lsb32 >= 0) {
Assembler::sethi(lsb32, d, addrlit.rspec());
}
else if (a.hi32() == -1) {
Assembler::sethi(~a.low32(), a.base(), a.rspec());
xor3(a.base(), ~low10(~0), a.base());
else if (msb32 == -1) {
Assembler::sethi(~lsb32, d, addrlit.rspec());
xor3(d, ~low10(~0), d);
}
else {
Assembler::sethi(a.hi32(), a.base(), a.rspec() ); // 22
if ( a.hi32() & 0x3ff ) // Any bits?
or3( a.base(), a.hi32() & 0x3ff ,a.base() ); // High 32 bits are now in low 32
if ( a.low32() & 0xFFFFFC00 ) { // done?
if( (a.low32() >> 20) & 0xfff ) { // Any bits set?
sllx(a.base(), 12, a.base()); // Make room for next 12 bits
or3( a.base(), (a.low32() >> 20) & 0xfff,a.base() ); // Or in next 12
shiftcnt = 0; // We already shifted
Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits
if (msb32 & 0x3ff) // Any bits?
or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32
if (lsb32 & 0xFFFFFC00) { // done?
if ((lsb32 >> 20) & 0xfff) { // Any bits set?
sllx(d, 12, d); // Make room for next 12 bits
or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12
shiftcnt = 0; // We already shifted
}
else
shiftcnt = 12;
if( (a.low32() >> 10) & 0x3ff ) {
sllx(a.base(), shiftcnt+10, a.base());// Make room for last 10 bits
or3( a.base(), (a.low32() >> 10) & 0x3ff,a.base() ); // Or in next 10
if ((lsb32 >> 10) & 0x3ff) {
sllx(d, shiftcnt + 10, d); // Make room for last 10 bits
or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10
shiftcnt = 0;
}
else
shiftcnt = 10;
sllx(a.base(), shiftcnt+10 , a.base()); // Shift leaving disp field 0'd
sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd
}
else
sllx( a.base(), 32, a.base() );
sllx(d, 32, d);
}
// Pad out the instruction sequence so it can be
// patched later.
if ( ForceRelocatable || (a.rtype() != relocInfo::none &&
a.rtype() != relocInfo::runtime_call_type) ) {
while ( pc() < (save_pc + (7 * BytesPerInstWord )) )
// Pad out the instruction sequence so it can be patched later.
if (ForceRelocatable || (addrlit.rtype() != relocInfo::none &&
addrlit.rtype() != relocInfo::runtime_call_type)) {
while (pc() < (save_pc + (7 * BytesPerInstWord)))
nop();
}
#else
Assembler::sethi(a.hi(), a.base(), a.rspec());
Assembler::sethi(addrlit.value(), d, addrlit.rspec());
#endif
}
void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) {
internal_sethi(addrlit, d, false);
}
void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) {
internal_sethi(addrlit, d, true);
}
int MacroAssembler::size_of_sethi(address a, bool worst_case) {
#ifdef _LP64
if (worst_case) return 7;
@ -1339,61 +1341,50 @@ int MacroAssembler::worst_case_size_of_set() {
return size_of_sethi(NULL, true) + 1;
}
void MacroAssembler::set(intptr_t value, Register d,
RelocationHolder const& rspec) {
Address val( d, (address)value, rspec);
if ( rspec.type() == relocInfo::none ) {
void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
intptr_t value = addrlit.value();
if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) {
// can optimize
if (-4096 <= value && value <= 4095) {
if (-4096 <= value && value <= 4095) {
or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended)
return;
}
if (inv_hi22(hi22(value)) == value) {
sethi(val);
sethi(addrlit, d);
return;
}
}
assert_not_delayed( (char *)"cannot put two instructions in delay slot" );
sethi( val );
if (rspec.type() != relocInfo::none || (value & 0x3ff) != 0) {
add( d, value & 0x3ff, d, rspec);
assert_not_delayed((char*) "cannot put two instructions in delay slot");
internal_sethi(addrlit, d, ForceRelocatable);
if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) {
add(d, addrlit.low10(), d, addrlit.rspec());
}
}
void MacroAssembler::setsw(int value, Register d,
RelocationHolder const& rspec) {
Address val( d, (address)value, rspec);
if ( rspec.type() == relocInfo::none ) {
// can optimize
if (-4096 <= value && value <= 4095) {
or3(G0, value, d);
return;
}
if (inv_hi22(hi22(value)) == value) {
sethi( val );
#ifndef _LP64
if ( value < 0 ) {
assert_not_delayed();
sra (d, G0, d);
}
#endif
return;
}
}
assert_not_delayed();
sethi( val );
add( d, value & 0x3ff, d, rspec);
// (A negative value could be loaded in 2 insns with sethi/xor,
// but it would take a more complex relocation.)
#ifndef _LP64
if ( value < 0)
sra(d, G0, d);
#endif
void MacroAssembler::set(const AddressLiteral& al, Register d) {
internal_set(al, d, false);
}
// %%% End of moved six set instructions.
void MacroAssembler::set(intptr_t value, Register d) {
AddressLiteral al(value);
internal_set(al, d, false);
}
void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) {
AddressLiteral al(addr, rspec);
internal_set(al, d, false);
}
void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) {
internal_set(al, d, true);
}
void MacroAssembler::patchable_set(intptr_t value, Register d) {
AddressLiteral al(value);
internal_set(al, d, true);
}
void MacroAssembler::set64(jlong value, Register d, Register tmp) {
@ -1512,17 +1503,17 @@ void MacroAssembler::save_frame_and_mov(int extraWords,
}
Address MacroAssembler::allocate_oop_address(jobject obj, Register d) {
AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
int oop_index = oop_recorder()->allocate_index(obj);
return Address(d, address(obj), oop_Relocation::spec(oop_index));
return AddressLiteral(obj, oop_Relocation::spec(oop_index));
}
Address MacroAssembler::constant_oop_address(jobject obj, Register d) {
AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
int oop_index = oop_recorder()->find_index(obj);
return Address(d, address(obj), oop_Relocation::spec(oop_index));
return AddressLiteral(obj, oop_Relocation::spec(oop_index));
}
void MacroAssembler::set_narrow_oop(jobject obj, Register d) {
@ -1682,7 +1673,7 @@ void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * fil
sprintf(real_msg, "%s%s(%s:%d)", msg, buffer, file, line);
// Call indirectly to solve generation ordering problem
Address a(O7, (address)StubRoutines::verify_oop_subroutine_entry_address());
AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
// Make some space on stack above the current register window.
// Enough to hold 8 64-bit registers.
@ -1718,7 +1709,7 @@ void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char
sprintf(real_msg, "%s at SP+%d (%s:%d)", msg, addr.disp(), file, line);
// Call indirectly to solve generation ordering problem
Address a(O7, (address)StubRoutines::verify_oop_subroutine_entry_address());
AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
// Make some space on stack above the current register window.
// Enough to hold 8 64-bit registers.
@ -1772,11 +1763,7 @@ void MacroAssembler::verify_oop_subroutine() {
{ // count number of verifies
Register O2_adr = O2;
Register O3_accum = O3;
Address count_addr( O2_adr, (address) StubRoutines::verify_oop_count_addr() );
sethi(count_addr);
ld(count_addr, O3_accum);
inc(O3_accum);
st(O3_accum, count_addr);
inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum);
}
Register O2_mask = O2;
@ -1870,8 +1857,8 @@ void MacroAssembler::verify_oop_subroutine() {
assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
// call indirectly to solve generation ordering problem
Address a(O5, (address)StubRoutines::Sparc::stop_subroutine_entry_address());
load_ptr_contents(a, O5);
AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address());
load_ptr_contents(al, O5);
jmpl(O5, 0, O7);
delayed()->nop();
}
@ -1891,7 +1878,7 @@ void MacroAssembler::stop(const char* msg) {
assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
// call indirectly to solve generation ordering problem
Address a(O5, (address)StubRoutines::Sparc::stop_subroutine_entry_address());
AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address());
load_ptr_contents(a, O5);
jmpl(O5, 0, O7);
delayed()->nop();
@ -2003,7 +1990,7 @@ void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresul
subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words?
Label no_extras;
br( negative, true, pt, no_extras ); // if neg, clear reg
delayed()->set( 0, Rresult); // annuled, so only if taken
delayed()->set(0, Rresult); // annuled, so only if taken
bind( no_extras );
}
@ -2623,7 +2610,7 @@ RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_ad
return RegisterOrConstant(value + offset);
// load indirectly to solve generation ordering problem
Address a(tmp, (address) delayed_value_addr);
AddressLiteral a(delayed_value_addr);
load_ptr_contents(a, tmp);
#ifdef ASSERT
@ -3107,21 +3094,21 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
delayed()->nop();
load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
or3(G2_thread, temp_reg, temp_reg);
xor3(mark_reg, temp_reg, temp_reg);
andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
if (counters != NULL) {
cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg);
// Reload mark_reg as we may need it later
ld_ptr(Address(obj_reg, 0, oopDesc::mark_offset_in_bytes()), mark_reg);
ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg);
}
brx(Assembler::equal, true, Assembler::pt, done);
delayed()->nop();
Label try_revoke_bias;
Label try_rebias;
Address mark_addr = Address(obj_reg, 0, oopDesc::mark_offset_in_bytes());
Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes());
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
// At this point we know that the header has the bias pattern and
@ -3185,7 +3172,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
// FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them.
load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
or3(G2_thread, temp_reg, temp_reg);
casn(mark_addr.base(), mark_reg, temp_reg);
// If the biasing toward our thread failed, this means that
@ -3216,7 +3203,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
// FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them.
load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
casn(mark_addr.base(), mark_reg, temp_reg);
// Fall through to the normal CAS-based lock, because no matter what
// the result of the above CAS, some thread must have succeeded in
@ -3283,7 +3270,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
Register Rbox, Register Rscratch,
BiasedLockingCounters* counters,
bool try_bias) {
Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes());
Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
verify_oop(Roop);
Label done ;
@ -3386,7 +3373,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// If m->owner != null goto IsLocked
// Pessimistic form: Test-and-CAS vs CAS
// The optimistic form avoids RTS->RTO cache line upgrades.
ld_ptr (Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ;
ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
andcc (Rscratch, Rscratch, G0) ;
brx (Assembler::notZero, false, Assembler::pn, done) ;
delayed()->nop() ;
@ -3482,7 +3469,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// Test-and-CAS vs CAS
// Pessimistic form avoids futile (doomed) CAS attempts
// The optimistic form avoids RTS->RTO cache line upgrades.
ld_ptr (Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ;
ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
andcc (Rscratch, Rscratch, G0) ;
brx (Assembler::notZero, false, Assembler::pn, done) ;
delayed()->nop() ;
@ -3508,7 +3495,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
Register Rbox, Register Rscratch,
bool try_bias) {
Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes());
Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
Label done ;
@ -3568,14 +3555,14 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
// Note that we use 1-0 locking by default for the inflated case. We
// close the resultant (and rare) race by having contented threads in
// monitorenter periodically poll _owner.
ld_ptr (Address(Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ;
ld_ptr (Address(Rmark, 0, ObjectMonitor::recursions_offset_in_bytes()-2), Rbox) ;
ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
ld_ptr (Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox);
xor3 (Rscratch, G2_thread, Rscratch) ;
orcc (Rbox, Rscratch, Rbox) ;
brx (Assembler::notZero, false, Assembler::pn, done) ;
delayed()->
ld_ptr (Address (Rmark, 0, ObjectMonitor::EntryList_offset_in_bytes()-2), Rscratch) ;
ld_ptr (Address (Rmark, 0, ObjectMonitor::cxq_offset_in_bytes()-2), Rbox) ;
ld_ptr (Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch);
ld_ptr (Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox);
orcc (Rbox, Rscratch, G0) ;
if (EmitSync & 65536) {
Label LSucc ;
@ -3583,12 +3570,12 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
delayed()->nop() ;
br (Assembler::always, false, Assembler::pt, done) ;
delayed()->
st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ;
st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
bind (LSucc) ;
st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ;
st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
if (os::is_MP()) { membar (StoreLoad) ; }
ld_ptr (Address (Rmark, 0, ObjectMonitor::succ_offset_in_bytes()-2), Rscratch) ;
ld_ptr (Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch);
andcc (Rscratch, Rscratch, G0) ;
brx (Assembler::notZero, false, Assembler::pt, done) ;
delayed()-> andcc (G0, G0, G0) ;
@ -3606,7 +3593,7 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
delayed()->nop() ;
br (Assembler::always, false, Assembler::pt, done) ;
delayed()->
st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ;
st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
}
bind (LStacked) ;
@ -4005,20 +3992,26 @@ void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr,
bind(L);
}
void MacroAssembler::inc_counter(address counter_ptr, Register Rtmp1, Register Rtmp2) {
Address counter_addr(Rtmp1, counter_ptr);
load_contents(counter_addr, Rtmp2);
void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) {
AddressLiteral addrlit(counter_addr);
sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register.
Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits.
ld(addr, Rtmp2);
inc(Rtmp2);
store_contents(Rtmp2, counter_addr);
st(Rtmp2, addr);
}
void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) {
inc_counter((address) counter_addr, Rtmp1, Rtmp2);
}
SkipIfEqual::SkipIfEqual(
MacroAssembler* masm, Register temp, const bool* flag_addr,
Assembler::Condition condition) {
_masm = masm;
Address flag(temp, (address)flag_addr, relocInfo::none);
_masm->sethi(flag);
_masm->ldub(flag, temp);
AddressLiteral flag(flag_addr);
_masm->sethi(flag, temp);
_masm->ldub(temp, flag.low10(), temp);
_masm->tst(temp);
_masm->br(condition, false, Assembler::pt, _label);
_masm->delayed()->nop();
@ -4333,8 +4326,8 @@ static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
#else
masm.srl(O0, CardTableModRefBS::card_shift, O0);
#endif
Address rs(O1, (address)byte_map_base);
masm.load_address(rs); // O1 := <card table base>
AddressLiteral addrlit(byte_map_base);
masm.set(addrlit, O1); // O1 := <card table base>
masm.ldub(O0, O1, O2); // O2 := [O0 + O1]
masm.br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt,
@ -4494,10 +4487,9 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val
#else
post_filter_masm->srl(store_addr, CardTableModRefBS::card_shift, store_addr);
#endif
assert( tmp != store_addr, "need separate temp reg");
Address rs(tmp, (address)bs->byte_map_base);
load_address(rs);
stb(G0, rs.base(), store_addr);
assert(tmp != store_addr, "need separate temp reg");
set(bs->byte_map_base, tmp);
stb(G0, tmp, store_addr);
}
bind(filtered);
@ -4516,24 +4508,6 @@ void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_v
card_table_write(bs->byte_map_base, tmp, store_addr);
}
// Loading values by size and signed-ness
void MacroAssembler::load_sized_value(Register s1, RegisterOrConstant s2, Register d,
int size_in_bytes, bool is_signed) {
switch (size_in_bytes ^ (is_signed ? -1 : 0)) {
case ~8: // fall through:
case 8: ld_long( s1, s2, d ); break;
case ~4: ldsw( s1, s2, d ); break;
case 4: lduw( s1, s2, d ); break;
case ~2: ldsh( s1, s2, d ); break;
case 2: lduh( s1, s2, d ); break;
case ~1: ldsb( s1, s2, d ); break;
case 1: ldub( s1, s2, d ); break;
default: ShouldNotReachHere();
}
}
void MacroAssembler::load_klass(Register src_oop, Register klass) {
// The number of bytes in this code is used by
// MachCallDynamicJavaNode::ret_addr_offset()
@ -4563,12 +4537,12 @@ void MacroAssembler::store_klass_gap(Register s, Register d) {
}
}
void MacroAssembler::load_heap_oop(const Address& s, Register d, int offset) {
void MacroAssembler::load_heap_oop(const Address& s, Register d) {
if (UseCompressedOops) {
lduw(s, d, offset);
lduw(s, d);
decode_heap_oop(d);
} else {
ld_ptr(s, d, offset);
ld_ptr(s, d);
}
}
@ -4714,7 +4688,7 @@ void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) {
void MacroAssembler::reinit_heapbase() {
if (UseCompressedOops) {
// call indirectly to solve generation ordering problem
Address base(G6_heapbase, (address)Universe::narrow_oop_base_addr());
AddressLiteral base(Universe::narrow_oop_base_addr());
load_ptr_contents(base, G6_heapbase);
}
}