This commit is contained in:
J. Duke 2017-07-05 16:52:01 +02:00
commit 46c7bd230b
114 changed files with 3089 additions and 2076 deletions

View file

@ -30,3 +30,4 @@ aee93a8992d2389121eb610c00a86196f3e2b9b0 jdk7-b49
c235f4a8559d196879c56af80159f67ee5d0e720 jdk7-b53 c235f4a8559d196879c56af80159f67ee5d0e720 jdk7-b53
2ef382b1bbd58a68e668391c6145a4b2066c5b96 jdk7-b54 2ef382b1bbd58a68e668391c6145a4b2066c5b96 jdk7-b54
aea0ace7a1e43619800931d42bbf69c579361c2d jdk7-b55 aea0ace7a1e43619800931d42bbf69c579361c2d jdk7-b55
ba12117a5e6c918578d6b2a8c693232a33289024 jdk7-b56

View file

@ -68,6 +68,7 @@
</li> </li>
<li><a href="#zip">Zip and Unzip</a> </li> <li><a href="#zip">Zip and Unzip</a> </li>
<li><a href="#freetype">FreeType2 Fonts</a> </li> <li><a href="#freetype">FreeType2 Fonts</a> </li>
<li><a href="#jibx">JIBX Libraries</a> </li>
<li>Linux and Solaris: <li>Linux and Solaris:
<ul> <ul>
<li><a href="#cups">CUPS Include files</a> </li> <li><a href="#cups">CUPS Include files</a> </li>
@ -585,6 +586,11 @@
Install or upgrade the <a href="#freetype">FreeType development Install or upgrade the <a href="#freetype">FreeType development
package</a>. package</a>.
</li> </li>
<li>
Install the
<a href="#jibx">JIBX Libraries</a>, set
<tt><a href="#ALT_JIBX_LIBS_PATH">ALT_JIBX_LIBS_PATH</a></tt>.
</li>
<li> <li>
Install Install
<a href="#ant">Ant</a>, <a href="#ant">Ant</a>,
@ -650,6 +656,11 @@
<a href="#cups">CUPS Include files</a>, set <a href="#cups">CUPS Include files</a>, set
<tt><a href="#ALT_CUPS_HEADERS_PATH">ALT_CUPS_HEADERS_PATH</a></tt>. <tt><a href="#ALT_CUPS_HEADERS_PATH">ALT_CUPS_HEADERS_PATH</a></tt>.
</li> </li>
<li>
Install the
<a href="#jibx">JIBX Libraries</a>, set
<tt><a href="#ALT_JIBX_LIBS_PATH">ALT_JIBX_LIBS_PATH</a></tt>.
</li>
<li> <li>
Install Install
<a href="#ant">Ant</a>, <a href="#ant">Ant</a>,
@ -745,6 +756,11 @@
Install Install
<a href="#dxsdk">Microsoft DirectX SDK</a>. <a href="#dxsdk">Microsoft DirectX SDK</a>.
</li> </li>
<li>
Install the
<a href="#jibx">JIBX Libraries</a>, set
<tt><a href="#ALT_JIBX_LIBS_PATH">ALT_JIBX_LIBS_PATH</a></tt>.
</li>
<li> <li>
Install Install
<a href="#ant">Ant</a>, <a href="#ant">Ant</a>,
@ -874,6 +890,27 @@
fine for most JDK developers. fine for most JDK developers.
</blockquote> </blockquote>
<!-- ------------------------------------------------------ --> <!-- ------------------------------------------------------ -->
<h4><a name="jibx">JIBX</a></h4>
<blockquote>
JIBX libraries version 1.1.5 is required for building the OpenJDK.
Namely, the following JAR files from the JIBX distribution package
are required:
<ul>
<li>bcel.jar
<li>jibx-bind.jar
<li>jibx-run.jar
<li>xpp3.jar
</ul>
<p>
You can download the package from the
<a href="http://jibx.sourceforge.net" target="_blank">JIBX site</a>.
<p>
You will need to set the
<tt><a href="#ALT_JIBX_LIBS_PATH">ALT_JIBX_LIBS_PATH</a></tt>
environment variable to refer to place where the JAR files,
above, are located.
</blockquote>
<!-- ------------------------------------------------------ -->
<h4><a name="compilers">Compilers</a></h4> <h4><a name="compilers">Compilers</a></h4>
<blockquote> <blockquote>
<strong><a name="gcc">Linux gcc/binutils</a></strong> <strong><a name="gcc">Linux gcc/binutils</a></strong>
@ -1425,6 +1462,12 @@
The default will refer to The default will refer to
<tt>jdk/src/share/lib/security/cacerts</tt>. <tt>jdk/src/share/lib/security/cacerts</tt>.
</dd> </dd>
<dt><tt><a name="ALT_JIBX_LIBS_PATH">ALT_JIBX_LIBS_PATH</a></tt></dt>
<dd>
The location of the <a href="#jibx">JIBX libraries</a> file.
The default value is
<tt>$(ALT_SLASH_JAVA)/devtools/share/jibx/lib</tt>.
</dd>
<dt><a name="ALT_CUPS_HEADERS_PATH"><tt>ALT_CUPS_HEADERS_PATH</tt></a> </dt> <dt><a name="ALT_CUPS_HEADERS_PATH"><tt>ALT_CUPS_HEADERS_PATH</tt></a> </dt>
<dd> <dd>
The location of the CUPS header files. The location of the CUPS header files.

View file

@ -1,7 +1,7 @@
^build/ ^build/
^dist/ ^dist/
^nbproject/private/ ^nbproject/private/
^src/share/tools/hsdis/bin/ ^src/share/tools/hsdis/build/
^src/share/tools/IdealGraphVisualizer/[a-zA-Z0-9]*/build/ ^src/share/tools/IdealGraphVisualizer/[a-zA-Z0-9]*/build/
^src/share/tools/IdealGraphVisualizer/build/ ^src/share/tools/IdealGraphVisualizer/build/
^src/share/tools/IdealGraphVisualizer/dist/ ^src/share/tools/IdealGraphVisualizer/dist/

View file

@ -30,3 +30,4 @@ dae503d9f04c1a11e182dbf7f770509c28dc0609 jdk7-b50
032c6af894dae8d939b3dd31d82042549e7793e0 jdk7-b53 032c6af894dae8d939b3dd31d82042549e7793e0 jdk7-b53
fafab5d5349c7c066d677538db67a1ee0fb33bd2 jdk7-b54 fafab5d5349c7c066d677538db67a1ee0fb33bd2 jdk7-b54
f8e839c086152da70d6ec5913ba6f9f509282e8d jdk7-b55 f8e839c086152da70d6ec5913ba6f9f509282e8d jdk7-b55
a3fd9e40ff2e854f6169eb6d09d491a28634d04f jdk7-b56

View file

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2009
HS_MAJOR_VER=16 HS_MAJOR_VER=16
HS_MINOR_VER=0 HS_MINOR_VER=0
HS_BUILD_NUMBER=01 HS_BUILD_NUMBER=02
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=7 JDK_MINOR_VER=7

View file

@ -46,24 +46,28 @@ jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10
jprt.my.solaris.sparc.jdk6=solaris_sparc_5.8 jprt.my.solaris.sparc.jdk6=solaris_sparc_5.8
jprt.my.solaris.sparc.jdk6perf=solaris_sparc_5.8 jprt.my.solaris.sparc.jdk6perf=solaris_sparc_5.8
jprt.my.solaris.sparc.jdk6u10=solaris_sparc_5.8 jprt.my.solaris.sparc.jdk6u10=solaris_sparc_5.8
jprt.my.solaris.sparc.jdk6u14=solaris_sparc_5.8
jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}} jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10 jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
jprt.my.solaris.sparcv9.jdk6=solaris_sparcv9_5.8 jprt.my.solaris.sparcv9.jdk6=solaris_sparcv9_5.8
jprt.my.solaris.sparcv9.jdk6perf=solaris_sparcv9_5.8 jprt.my.solaris.sparcv9.jdk6perf=solaris_sparcv9_5.8
jprt.my.solaris.sparcv9.jdk6u10=solaris_sparcv9_5.8 jprt.my.solaris.sparcv9.jdk6u10=solaris_sparcv9_5.8
jprt.my.solaris.sparcv9.jdk6u14=solaris_sparcv9_5.8
jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}} jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
jprt.my.solaris.i586.jdk7=solaris_i586_5.10 jprt.my.solaris.i586.jdk7=solaris_i586_5.10
jprt.my.solaris.i586.jdk6=solaris_i586_5.8 jprt.my.solaris.i586.jdk6=solaris_i586_5.8
jprt.my.solaris.i586.jdk6perf=solaris_i586_5.8 jprt.my.solaris.i586.jdk6perf=solaris_i586_5.8
jprt.my.solaris.i586.jdk6u10=solaris_i586_5.8 jprt.my.solaris.i586.jdk6u10=solaris_i586_5.8
jprt.my.solaris.i586.jdk6u14=solaris_i586_5.8
jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}} jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
jprt.my.solaris.x64.jdk7=solaris_x64_5.10 jprt.my.solaris.x64.jdk7=solaris_x64_5.10
jprt.my.solaris.x64.jdk6=solaris_x64_5.10 jprt.my.solaris.x64.jdk6=solaris_x64_5.10
jprt.my.solaris.x64.jdk6perf=solaris_x64_5.10 jprt.my.solaris.x64.jdk6perf=solaris_x64_5.10
jprt.my.solaris.x64.jdk6u10=solaris_x64_5.10 jprt.my.solaris.x64.jdk6u10=solaris_x64_5.10
jprt.my.solaris.x64.jdk6u14=solaris_x64_5.10
jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}} jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
jprt.my.linux.i586=linux_i586 jprt.my.linux.i586=linux_i586

View file

@ -46,7 +46,7 @@ C_COMPILER_REV := \
$(shell $(CC) -V 2>&1 | sed -n 's/^.*[ ,\t]C[ ,\t]\([1-9]\.[0-9][0-9]*\).*/\1/p') $(shell $(CC) -V 2>&1 | sed -n 's/^.*[ ,\t]C[ ,\t]\([1-9]\.[0-9][0-9]*\).*/\1/p')
# Pick which compiler is validated # Pick which compiler is validated
ifeq ($(JDK_MINOR_VERSION),6) ifeq ($(JRE_RELEASE_VER),1.6.0)
# Validated compiler for JDK6 is SS11 (5.8) # Validated compiler for JDK6 is SS11 (5.8)
VALIDATED_COMPILER_REV := 5.8 VALIDATED_COMPILER_REV := 5.8
VALIDATED_C_COMPILER_REV := 5.8 VALIDATED_C_COMPILER_REV := 5.8
@ -101,18 +101,9 @@ CFLAGS += ${SOLARIS_7_OR_LATER}
# New architecture options started in SS12 (5.9), we need both styles to build. # New architecture options started in SS12 (5.9), we need both styles to build.
# The older arch options for SS11 (5.8) or older and also for /usr/ccs/bin/as. # The older arch options for SS11 (5.8) or older and also for /usr/ccs/bin/as.
# Note: SS12 default for 32bit sparc is now the same as v8plus, so the # Note: default for 32bit sparc is now the same as v8plus, so the
# settings below have changed all SS12 32bit sparc builds to be v8plus. # settings below have changed all 32bit sparc builds to be v8plus.
# The older SS11 (5.8) settings have remained as they always have been. ARCHFLAG_OLD/sparc = -xarch=v8plus
ifeq ($(TYPE),COMPILER2)
ARCHFLAG_OLD/sparc = -xarch=v8plus
else
ifeq ($(TYPE),TIERED)
ARCHFLAG_OLD/sparc = -xarch=v8plus
else
ARCHFLAG_OLD/sparc = -xarch=v8
endif
endif
ARCHFLAG_NEW/sparc = -m32 -xarch=sparc ARCHFLAG_NEW/sparc = -m32 -xarch=sparc
ARCHFLAG_OLD/sparcv9 = -xarch=v9 ARCHFLAG_OLD/sparcv9 = -xarch=v9
ARCHFLAG_NEW/sparcv9 = -m64 -xarch=sparc ARCHFLAG_NEW/sparcv9 = -m64 -xarch=sparc

View file

@ -55,10 +55,16 @@ CPP_FLAGS=$(CPP_FLAGS) /D "COMPILER2"
CPP_FLAGS=$(CPP_FLAGS) /D "COMPILER1" /D "COMPILER2" CPP_FLAGS=$(CPP_FLAGS) /D "COMPILER1" /D "COMPILER2"
!endif !endif
!if "$(BUILDARCH)" == "i486"
HOTSPOT_LIB_ARCH=i386
!else
HOTSPOT_LIB_ARCH=$(BUILDARCH)
!endif
# The following variables are defined in the generated local.make file. # The following variables are defined in the generated local.make file.
CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_RELEASE_VERSION=\"$(HS_BUILD_VER)\"" CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_RELEASE_VERSION=\"$(HS_BUILD_VER)\""
CPP_FLAGS=$(CPP_FLAGS) /D "JRE_RELEASE_VERSION=\"$(JRE_RELEASE_VER)\"" CPP_FLAGS=$(CPP_FLAGS) /D "JRE_RELEASE_VERSION=\"$(JRE_RELEASE_VER)\""
CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_LIB_ARCH=\"$(BUILDARCH)\"" CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_LIB_ARCH=\"$(HOTSPOT_LIB_ARCH)\""
CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_BUILD_TARGET=\"$(BUILD_FLAVOR)\"" CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_BUILD_TARGET=\"$(BUILD_FLAVOR)\""
CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_BUILD_USER=\"$(BuildUser)\"" CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_BUILD_USER=\"$(BuildUser)\""
CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_VM_DISTRO=\"$(HOTSPOT_VM_DISTRO)\"" CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_VM_DISTRO=\"$(HOTSPOT_VM_DISTRO)\""

View file

@ -25,24 +25,36 @@
#include "incls/_precompiled.incl" #include "incls/_precompiled.incl"
#include "incls/_assembler_sparc.cpp.incl" #include "incls/_assembler_sparc.cpp.incl"
// Implementation of Address // Convert the raw encoding form into the form expected by the
// constructor for Address.
Address::Address( addr_type t, int which ) { Address Address::make_raw(int base, int index, int scale, int disp, bool disp_is_oop) {
switch (t) { assert(scale == 0, "not supported");
case extra_in_argument: RelocationHolder rspec;
case extra_out_argument: if (disp_is_oop) {
_base = t == extra_in_argument ? FP : SP; rspec = Relocation::spec_simple(relocInfo::oop_type);
_hi = 0;
// Warning: In LP64 mode, _disp will occupy more than 10 bits.
// This is inconsistent with the other constructors but op
// codes such as ld or ldx, only access disp() to get their
// simm13 argument.
_disp = ((which - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS;
break;
default:
ShouldNotReachHere();
break;
} }
Register rindex = as_Register(index);
if (rindex != G0) {
Address madr(as_Register(base), rindex);
madr._rspec = rspec;
return madr;
} else {
Address madr(as_Register(base), disp);
madr._rspec = rspec;
return madr;
}
}
Address Argument::address_in_frame() const {
// Warning: In LP64 mode disp will occupy more than 10 bits, but
// op codes such as ld or ldx, only access disp() to get
// their simm13 argument.
int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS;
if (is_in())
return Address(FP, disp); // In argument.
else
return Address(SP, disp); // Out argument.
} }
static const char* argumentNames[][2] = { static const char* argumentNames[][2] = {
@ -614,16 +626,17 @@ void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) {
} }
// This code sequence is relocatable to any address, even on LP64. // This code sequence is relocatable to any address, even on LP64.
void MacroAssembler::jumpl( Address& a, Register d, int offset, const char* file, int line ) { void MacroAssembler::jumpl(AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) {
assert_not_delayed(); assert_not_delayed();
// Force fixed length sethi because NativeJump and NativeFarCall don't handle // Force fixed length sethi because NativeJump and NativeFarCall don't handle
// variable length instruction streams. // variable length instruction streams.
sethi(a, /*ForceRelocatable=*/ true); patchable_sethi(addrlit, temp);
Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement.
if (TraceJumps) { if (TraceJumps) {
#ifndef PRODUCT #ifndef PRODUCT
// Must do the add here so relocation can find the remainder of the // Must do the add here so relocation can find the remainder of the
// value to be relocated. // value to be relocated.
add(a.base(), a.disp() + offset, a.base(), a.rspec(offset)); add(a.base(), a.disp(), a.base(), addrlit.rspec(offset));
save_frame(0); save_frame(0);
verify_thread(); verify_thread();
ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
@ -652,15 +665,15 @@ void MacroAssembler::jumpl( Address& a, Register d, int offset, const char* file
restore(); restore();
jmpl(a.base(), G0, d); jmpl(a.base(), G0, d);
#else #else
jmpl(a, d, offset); jmpl(a.base(), a.disp(), d);
#endif /* PRODUCT */ #endif /* PRODUCT */
} else { } else {
jmpl(a, d, offset); jmpl(a.base(), a.disp(), d);
} }
} }
void MacroAssembler::jump( Address& a, int offset, const char* file, int line ) { void MacroAssembler::jump(AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) {
jumpl( a, G0, offset, file, line ); jumpl(addrlit, temp, G0, offset, file, line);
} }
@ -678,7 +691,8 @@ void MacroAssembler::set_varargs( Argument inArg, Register d ) {
st_ptr(savePtr.as_register(), savePtr.address_in_frame()); st_ptr(savePtr.as_register(), savePtr.address_in_frame());
} }
// return the address of the first memory slot // return the address of the first memory slot
add(inArg.address_in_frame(), d); Address a = inArg.address_in_frame();
add(a.base(), a.disp(), d);
} }
// Conditional breakpoint (for assertion checks in assembly code) // Conditional breakpoint (for assertion checks in assembly code)
@ -702,7 +716,6 @@ void MacroAssembler::flush_windows() {
// offset to write to within the page. This minimizes bus traffic // offset to write to within the page. This minimizes bus traffic
// due to cache line collision. // due to cache line collision.
void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
Address mem_serialize_page(tmp1, os::get_memory_serialize_page());
srl(thread, os::get_serialize_page_shift_count(), tmp2); srl(thread, os::get_serialize_page_shift_count(), tmp2);
if (Assembler::is_simm13(os::vm_page_size())) { if (Assembler::is_simm13(os::vm_page_size())) {
and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2); and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2);
@ -711,7 +724,7 @@ void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register t
set((os::vm_page_size() - sizeof(int)), tmp1); set((os::vm_page_size() - sizeof(int)), tmp1);
and3(tmp2, tmp1, tmp2); and3(tmp2, tmp1, tmp2);
} }
load_address(mem_serialize_page); set(os::get_memory_serialize_page(), tmp1);
st(G0, tmp1, tmp2); st(G0, tmp1, tmp2);
} }
@ -830,10 +843,10 @@ void MacroAssembler::get_thread() {
mov(G3, L2); // avoid clobbering G3 also mov(G3, L2); // avoid clobbering G3 also
mov(G4, L5); // avoid clobbering G4 mov(G4, L5); // avoid clobbering G4
#ifdef ASSERT #ifdef ASSERT
Address last_get_thread_addr(L3, (address)&last_get_thread); AddressLiteral last_get_thread_addrlit(&last_get_thread);
sethi(last_get_thread_addr); set(last_get_thread_addrlit, L3);
inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call
st_ptr(L4, last_get_thread_addr); st_ptr(L4, L3, 0);
#endif #endif
call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
delayed()->nop(); delayed()->nop();
@ -919,13 +932,9 @@ void MacroAssembler::restore_thread(const Register thread_cache) {
// %%% maybe get rid of [re]set_last_Java_frame // %%% maybe get rid of [re]set_last_Java_frame
void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) { void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) {
assert_not_delayed(); assert_not_delayed();
Address flags(G2_thread, Address flags(G2_thread, JavaThread::frame_anchor_offset() +
0, JavaFrameAnchor::flags_offset());
in_bytes(JavaThread::frame_anchor_offset()) + Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset());
in_bytes(JavaFrameAnchor::flags_offset()));
Address pc_addr(G2_thread,
0,
in_bytes(JavaThread::last_Java_pc_offset()));
// Always set last_Java_pc and flags first because once last_Java_sp is visible // Always set last_Java_pc and flags first because once last_Java_sp is visible
// has_last_Java_frame is true and users will look at the rest of the fields. // has_last_Java_frame is true and users will look at the rest of the fields.
@ -977,22 +986,18 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Ja
#endif // ASSERT #endif // ASSERT
assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
add( last_java_sp, STACK_BIAS, G4_scratch ); add( last_java_sp, STACK_BIAS, G4_scratch );
st_ptr(G4_scratch, Address(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset()))); st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset());
#else #else
st_ptr(last_java_sp, Address(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset()))); st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset());
#endif // _LP64 #endif // _LP64
} }
void MacroAssembler::reset_last_Java_frame(void) { void MacroAssembler::reset_last_Java_frame(void) {
assert_not_delayed(); assert_not_delayed();
Address sp_addr(G2_thread, 0, in_bytes(JavaThread::last_Java_sp_offset())); Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset());
Address pc_addr(G2_thread, Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
0, Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
Address flags(G2_thread,
0,
in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
#ifdef ASSERT #ifdef ASSERT
// check that it WAS previously set // check that it WAS previously set
@ -1063,7 +1068,7 @@ void MacroAssembler::check_and_forward_exception(Register scratch_reg)
check_and_handle_popframe(scratch_reg); check_and_handle_popframe(scratch_reg);
check_and_handle_earlyret(scratch_reg); check_and_handle_earlyret(scratch_reg);
Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset())); Address exception_addr(G2_thread, Thread::pending_exception_offset());
ld_ptr(exception_addr, scratch_reg); ld_ptr(exception_addr, scratch_reg);
br_null(scratch_reg,false,pt,L); br_null(scratch_reg,false,pt,L);
delayed()->nop(); delayed()->nop();
@ -1186,7 +1191,7 @@ void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Re
void MacroAssembler::get_vm_result(Register oop_result) { void MacroAssembler::get_vm_result(Register oop_result) {
verify_thread(); verify_thread();
Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset())); Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
ld_ptr( vm_result_addr, oop_result); ld_ptr( vm_result_addr, oop_result);
st_ptr(G0, vm_result_addr); st_ptr(G0, vm_result_addr);
verify_oop(oop_result); verify_oop(oop_result);
@ -1195,7 +1200,7 @@ void MacroAssembler::get_vm_result(Register oop_result) {
void MacroAssembler::get_vm_result_2(Register oop_result) { void MacroAssembler::get_vm_result_2(Register oop_result) {
verify_thread(); verify_thread();
Address vm_result_addr_2(G2_thread, 0, in_bytes(JavaThread::vm_result_2_offset())); Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
ld_ptr(vm_result_addr_2, oop_result); ld_ptr(vm_result_addr_2, oop_result);
st_ptr(G0, vm_result_addr_2); st_ptr(G0, vm_result_addr_2);
verify_oop(oop_result); verify_oop(oop_result);
@ -1206,7 +1211,7 @@ void MacroAssembler::get_vm_result_2(Register oop_result) {
// leave it undisturbed. // leave it undisturbed.
void MacroAssembler::set_vm_result(Register oop_result) { void MacroAssembler::set_vm_result(Register oop_result) {
verify_thread(); verify_thread();
Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset())); Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
verify_oop(oop_result); verify_oop(oop_result);
# ifdef ASSERT # ifdef ASSERT
@ -1234,81 +1239,78 @@ void MacroAssembler::card_table_write(jbyte* byte_map_base,
#else #else
srl(obj, CardTableModRefBS::card_shift, obj); srl(obj, CardTableModRefBS::card_shift, obj);
#endif #endif
assert( tmp != obj, "need separate temp reg"); assert(tmp != obj, "need separate temp reg");
Address rs(tmp, (address)byte_map_base); set((address) byte_map_base, tmp);
load_address(rs); stb(G0, tmp, obj);
stb(G0, rs.base(), obj);
}
// %%% Note: The following six instructions have been moved,
// unchanged, from assembler_sparc.inline.hpp.
// They will be refactored at a later date.
void MacroAssembler::sethi(intptr_t imm22a,
Register d,
bool ForceRelocatable,
RelocationHolder const& rspec) {
Address adr( d, (address)imm22a, rspec );
MacroAssembler::sethi( adr, ForceRelocatable );
} }
void MacroAssembler::sethi(Address& a, bool ForceRelocatable) { void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
address save_pc; address save_pc;
int shiftcnt; int shiftcnt;
// if addr of local, do not need to load it
assert(a.base() != FP && a.base() != SP, "just use ld or st for locals");
#ifdef _LP64 #ifdef _LP64
# ifdef CHECK_DELAY # ifdef CHECK_DELAY
assert_not_delayed( (char *)"cannot put two instructions in delay slot" ); assert_not_delayed((char*) "cannot put two instructions in delay slot");
# endif # endif
v9_dep(); v9_dep();
// ForceRelocatable = 1;
save_pc = pc(); save_pc = pc();
if (a.hi32() == 0 && a.low32() >= 0) {
Assembler::sethi(a.low32(), a.base(), a.rspec()); int msb32 = (int) (addrlit.value() >> 32);
int lsb32 = (int) (addrlit.value());
if (msb32 == 0 && lsb32 >= 0) {
Assembler::sethi(lsb32, d, addrlit.rspec());
} }
else if (a.hi32() == -1) { else if (msb32 == -1) {
Assembler::sethi(~a.low32(), a.base(), a.rspec()); Assembler::sethi(~lsb32, d, addrlit.rspec());
xor3(a.base(), ~low10(~0), a.base()); xor3(d, ~low10(~0), d);
} }
else { else {
Assembler::sethi(a.hi32(), a.base(), a.rspec() ); // 22 Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits
if ( a.hi32() & 0x3ff ) // Any bits? if (msb32 & 0x3ff) // Any bits?
or3( a.base(), a.hi32() & 0x3ff ,a.base() ); // High 32 bits are now in low 32 or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32
if ( a.low32() & 0xFFFFFC00 ) { // done? if (lsb32 & 0xFFFFFC00) { // done?
if( (a.low32() >> 20) & 0xfff ) { // Any bits set? if ((lsb32 >> 20) & 0xfff) { // Any bits set?
sllx(a.base(), 12, a.base()); // Make room for next 12 bits sllx(d, 12, d); // Make room for next 12 bits
or3( a.base(), (a.low32() >> 20) & 0xfff,a.base() ); // Or in next 12 or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12
shiftcnt = 0; // We already shifted shiftcnt = 0; // We already shifted
} }
else else
shiftcnt = 12; shiftcnt = 12;
if( (a.low32() >> 10) & 0x3ff ) { if ((lsb32 >> 10) & 0x3ff) {
sllx(a.base(), shiftcnt+10, a.base());// Make room for last 10 bits sllx(d, shiftcnt + 10, d); // Make room for last 10 bits
or3( a.base(), (a.low32() >> 10) & 0x3ff,a.base() ); // Or in next 10 or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10
shiftcnt = 0; shiftcnt = 0;
} }
else else
shiftcnt = 10; shiftcnt = 10;
sllx(a.base(), shiftcnt+10 , a.base()); // Shift leaving disp field 0'd sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd
} }
else else
sllx( a.base(), 32, a.base() ); sllx(d, 32, d);
} }
// Pad out the instruction sequence so it can be // Pad out the instruction sequence so it can be patched later.
// patched later. if (ForceRelocatable || (addrlit.rtype() != relocInfo::none &&
if ( ForceRelocatable || (a.rtype() != relocInfo::none && addrlit.rtype() != relocInfo::runtime_call_type)) {
a.rtype() != relocInfo::runtime_call_type) ) { while (pc() < (save_pc + (7 * BytesPerInstWord)))
while ( pc() < (save_pc + (7 * BytesPerInstWord )) )
nop(); nop();
} }
#else #else
Assembler::sethi(a.hi(), a.base(), a.rspec()); Assembler::sethi(addrlit.value(), d, addrlit.rspec());
#endif #endif
} }
void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) {
internal_sethi(addrlit, d, false);
}
void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) {
internal_sethi(addrlit, d, true);
}
int MacroAssembler::size_of_sethi(address a, bool worst_case) { int MacroAssembler::size_of_sethi(address a, bool worst_case) {
#ifdef _LP64 #ifdef _LP64
if (worst_case) return 7; if (worst_case) return 7;
@ -1339,61 +1341,50 @@ int MacroAssembler::worst_case_size_of_set() {
return size_of_sethi(NULL, true) + 1; return size_of_sethi(NULL, true) + 1;
} }
void MacroAssembler::set(intptr_t value, Register d,
RelocationHolder const& rspec) {
Address val( d, (address)value, rspec);
if ( rspec.type() == relocInfo::none ) { void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
intptr_t value = addrlit.value();
if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) {
// can optimize // can optimize
if (-4096 <= value && value <= 4095) { if (-4096 <= value && value <= 4095) {
or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended) or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended)
return; return;
} }
if (inv_hi22(hi22(value)) == value) { if (inv_hi22(hi22(value)) == value) {
sethi(val); sethi(addrlit, d);
return; return;
} }
} }
assert_not_delayed( (char *)"cannot put two instructions in delay slot" ); assert_not_delayed((char*) "cannot put two instructions in delay slot");
sethi( val ); internal_sethi(addrlit, d, ForceRelocatable);
if (rspec.type() != relocInfo::none || (value & 0x3ff) != 0) { if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) {
add( d, value & 0x3ff, d, rspec); add(d, addrlit.low10(), d, addrlit.rspec());
} }
} }
void MacroAssembler::setsw(int value, Register d, void MacroAssembler::set(const AddressLiteral& al, Register d) {
RelocationHolder const& rspec) { internal_set(al, d, false);
Address val( d, (address)value, rspec);
if ( rspec.type() == relocInfo::none ) {
// can optimize
if (-4096 <= value && value <= 4095) {
or3(G0, value, d);
return;
}
if (inv_hi22(hi22(value)) == value) {
sethi( val );
#ifndef _LP64
if ( value < 0 ) {
assert_not_delayed();
sra (d, G0, d);
}
#endif
return;
}
}
assert_not_delayed();
sethi( val );
add( d, value & 0x3ff, d, rspec);
// (A negative value could be loaded in 2 insns with sethi/xor,
// but it would take a more complex relocation.)
#ifndef _LP64
if ( value < 0)
sra(d, G0, d);
#endif
} }
// %%% End of moved six set instructions. void MacroAssembler::set(intptr_t value, Register d) {
AddressLiteral al(value);
internal_set(al, d, false);
}
void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) {
AddressLiteral al(addr, rspec);
internal_set(al, d, false);
}
void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) {
internal_set(al, d, true);
}
void MacroAssembler::patchable_set(intptr_t value, Register d) {
AddressLiteral al(value);
internal_set(al, d, true);
}
void MacroAssembler::set64(jlong value, Register d, Register tmp) { void MacroAssembler::set64(jlong value, Register d, Register tmp) {
@ -1512,17 +1503,17 @@ void MacroAssembler::save_frame_and_mov(int extraWords,
} }
Address MacroAssembler::allocate_oop_address(jobject obj, Register d) { AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
int oop_index = oop_recorder()->allocate_index(obj); int oop_index = oop_recorder()->allocate_index(obj);
return Address(d, address(obj), oop_Relocation::spec(oop_index)); return AddressLiteral(obj, oop_Relocation::spec(oop_index));
} }
Address MacroAssembler::constant_oop_address(jobject obj, Register d) { AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
int oop_index = oop_recorder()->find_index(obj); int oop_index = oop_recorder()->find_index(obj);
return Address(d, address(obj), oop_Relocation::spec(oop_index)); return AddressLiteral(obj, oop_Relocation::spec(oop_index));
} }
void MacroAssembler::set_narrow_oop(jobject obj, Register d) { void MacroAssembler::set_narrow_oop(jobject obj, Register d) {
@ -1682,7 +1673,7 @@ void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * fil
sprintf(real_msg, "%s%s(%s:%d)", msg, buffer, file, line); sprintf(real_msg, "%s%s(%s:%d)", msg, buffer, file, line);
// Call indirectly to solve generation ordering problem // Call indirectly to solve generation ordering problem
Address a(O7, (address)StubRoutines::verify_oop_subroutine_entry_address()); AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
// Make some space on stack above the current register window. // Make some space on stack above the current register window.
// Enough to hold 8 64-bit registers. // Enough to hold 8 64-bit registers.
@ -1718,7 +1709,7 @@ void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char
sprintf(real_msg, "%s at SP+%d (%s:%d)", msg, addr.disp(), file, line); sprintf(real_msg, "%s at SP+%d (%s:%d)", msg, addr.disp(), file, line);
// Call indirectly to solve generation ordering problem // Call indirectly to solve generation ordering problem
Address a(O7, (address)StubRoutines::verify_oop_subroutine_entry_address()); AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
// Make some space on stack above the current register window. // Make some space on stack above the current register window.
// Enough to hold 8 64-bit registers. // Enough to hold 8 64-bit registers.
@ -1772,11 +1763,7 @@ void MacroAssembler::verify_oop_subroutine() {
{ // count number of verifies { // count number of verifies
Register O2_adr = O2; Register O2_adr = O2;
Register O3_accum = O3; Register O3_accum = O3;
Address count_addr( O2_adr, (address) StubRoutines::verify_oop_count_addr() ); inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum);
sethi(count_addr);
ld(count_addr, O3_accum);
inc(O3_accum);
st(O3_accum, count_addr);
} }
Register O2_mask = O2; Register O2_mask = O2;
@ -1870,8 +1857,8 @@ void MacroAssembler::verify_oop_subroutine() {
assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
// call indirectly to solve generation ordering problem // call indirectly to solve generation ordering problem
Address a(O5, (address)StubRoutines::Sparc::stop_subroutine_entry_address()); AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address());
load_ptr_contents(a, O5); load_ptr_contents(al, O5);
jmpl(O5, 0, O7); jmpl(O5, 0, O7);
delayed()->nop(); delayed()->nop();
} }
@ -1891,7 +1878,7 @@ void MacroAssembler::stop(const char* msg) {
assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
// call indirectly to solve generation ordering problem // call indirectly to solve generation ordering problem
Address a(O5, (address)StubRoutines::Sparc::stop_subroutine_entry_address()); AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address());
load_ptr_contents(a, O5); load_ptr_contents(a, O5);
jmpl(O5, 0, O7); jmpl(O5, 0, O7);
delayed()->nop(); delayed()->nop();
@ -2003,7 +1990,7 @@ void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresul
subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words? subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words?
Label no_extras; Label no_extras;
br( negative, true, pt, no_extras ); // if neg, clear reg br( negative, true, pt, no_extras ); // if neg, clear reg
delayed()->set( 0, Rresult); // annuled, so only if taken delayed()->set(0, Rresult); // annuled, so only if taken
bind( no_extras ); bind( no_extras );
} }
@ -2623,7 +2610,7 @@ RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_ad
return RegisterOrConstant(value + offset); return RegisterOrConstant(value + offset);
// load indirectly to solve generation ordering problem // load indirectly to solve generation ordering problem
Address a(tmp, (address) delayed_value_addr); AddressLiteral a(delayed_value_addr);
load_ptr_contents(a, tmp); load_ptr_contents(a, tmp);
#ifdef ASSERT #ifdef ASSERT
@ -3107,21 +3094,21 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
delayed()->nop(); delayed()->nop();
load_klass(obj_reg, temp_reg); load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
or3(G2_thread, temp_reg, temp_reg); or3(G2_thread, temp_reg, temp_reg);
xor3(mark_reg, temp_reg, temp_reg); xor3(mark_reg, temp_reg, temp_reg);
andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg); andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
if (counters != NULL) { if (counters != NULL) {
cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg); cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg);
// Reload mark_reg as we may need it later // Reload mark_reg as we may need it later
ld_ptr(Address(obj_reg, 0, oopDesc::mark_offset_in_bytes()), mark_reg); ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg);
} }
brx(Assembler::equal, true, Assembler::pt, done); brx(Assembler::equal, true, Assembler::pt, done);
delayed()->nop(); delayed()->nop();
Label try_revoke_bias; Label try_revoke_bias;
Label try_rebias; Label try_rebias;
Address mark_addr = Address(obj_reg, 0, oopDesc::mark_offset_in_bytes()); Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes());
assert(mark_addr.disp() == 0, "cas must take a zero displacement"); assert(mark_addr.disp() == 0, "cas must take a zero displacement");
// At this point we know that the header has the bias pattern and // At this point we know that the header has the bias pattern and
@ -3185,7 +3172,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
// FIXME: due to a lack of registers we currently blow away the age // FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them. // bits in this situation. Should attempt to preserve them.
load_klass(obj_reg, temp_reg); load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
or3(G2_thread, temp_reg, temp_reg); or3(G2_thread, temp_reg, temp_reg);
casn(mark_addr.base(), mark_reg, temp_reg); casn(mark_addr.base(), mark_reg, temp_reg);
// If the biasing toward our thread failed, this means that // If the biasing toward our thread failed, this means that
@ -3216,7 +3203,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
// FIXME: due to a lack of registers we currently blow away the age // FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them. // bits in this situation. Should attempt to preserve them.
load_klass(obj_reg, temp_reg); load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
casn(mark_addr.base(), mark_reg, temp_reg); casn(mark_addr.base(), mark_reg, temp_reg);
// Fall through to the normal CAS-based lock, because no matter what // Fall through to the normal CAS-based lock, because no matter what
// the result of the above CAS, some thread must have succeeded in // the result of the above CAS, some thread must have succeeded in
@ -3283,7 +3270,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
Register Rbox, Register Rscratch, Register Rbox, Register Rscratch,
BiasedLockingCounters* counters, BiasedLockingCounters* counters,
bool try_bias) { bool try_bias) {
Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes()); Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
verify_oop(Roop); verify_oop(Roop);
Label done ; Label done ;
@ -3386,7 +3373,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// If m->owner != null goto IsLocked // If m->owner != null goto IsLocked
// Pessimistic form: Test-and-CAS vs CAS // Pessimistic form: Test-and-CAS vs CAS
// The optimistic form avoids RTS->RTO cache line upgrades. // The optimistic form avoids RTS->RTO cache line upgrades.
ld_ptr (Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ; ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
andcc (Rscratch, Rscratch, G0) ; andcc (Rscratch, Rscratch, G0) ;
brx (Assembler::notZero, false, Assembler::pn, done) ; brx (Assembler::notZero, false, Assembler::pn, done) ;
delayed()->nop() ; delayed()->nop() ;
@ -3482,7 +3469,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
// Test-and-CAS vs CAS // Test-and-CAS vs CAS
// Pessimistic form avoids futile (doomed) CAS attempts // Pessimistic form avoids futile (doomed) CAS attempts
// The optimistic form avoids RTS->RTO cache line upgrades. // The optimistic form avoids RTS->RTO cache line upgrades.
ld_ptr (Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ; ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
andcc (Rscratch, Rscratch, G0) ; andcc (Rscratch, Rscratch, G0) ;
brx (Assembler::notZero, false, Assembler::pn, done) ; brx (Assembler::notZero, false, Assembler::pn, done) ;
delayed()->nop() ; delayed()->nop() ;
@ -3508,7 +3495,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
Register Rbox, Register Rscratch, Register Rbox, Register Rscratch,
bool try_bias) { bool try_bias) {
Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes()); Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
Label done ; Label done ;
@ -3568,14 +3555,14 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
// Note that we use 1-0 locking by default for the inflated case. We // Note that we use 1-0 locking by default for the inflated case. We
// close the resultant (and rare) race by having contented threads in // close the resultant (and rare) race by having contented threads in
// monitorenter periodically poll _owner. // monitorenter periodically poll _owner.
ld_ptr (Address(Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2), Rscratch) ; ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
ld_ptr (Address(Rmark, 0, ObjectMonitor::recursions_offset_in_bytes()-2), Rbox) ; ld_ptr (Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox);
xor3 (Rscratch, G2_thread, Rscratch) ; xor3 (Rscratch, G2_thread, Rscratch) ;
orcc (Rbox, Rscratch, Rbox) ; orcc (Rbox, Rscratch, Rbox) ;
brx (Assembler::notZero, false, Assembler::pn, done) ; brx (Assembler::notZero, false, Assembler::pn, done) ;
delayed()-> delayed()->
ld_ptr (Address (Rmark, 0, ObjectMonitor::EntryList_offset_in_bytes()-2), Rscratch) ; ld_ptr (Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch);
ld_ptr (Address (Rmark, 0, ObjectMonitor::cxq_offset_in_bytes()-2), Rbox) ; ld_ptr (Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox);
orcc (Rbox, Rscratch, G0) ; orcc (Rbox, Rscratch, G0) ;
if (EmitSync & 65536) { if (EmitSync & 65536) {
Label LSucc ; Label LSucc ;
@ -3583,12 +3570,12 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
delayed()->nop() ; delayed()->nop() ;
br (Assembler::always, false, Assembler::pt, done) ; br (Assembler::always, false, Assembler::pt, done) ;
delayed()-> delayed()->
st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ; st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
bind (LSucc) ; bind (LSucc) ;
st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ; st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
if (os::is_MP()) { membar (StoreLoad) ; } if (os::is_MP()) { membar (StoreLoad) ; }
ld_ptr (Address (Rmark, 0, ObjectMonitor::succ_offset_in_bytes()-2), Rscratch) ; ld_ptr (Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch);
andcc (Rscratch, Rscratch, G0) ; andcc (Rscratch, Rscratch, G0) ;
brx (Assembler::notZero, false, Assembler::pt, done) ; brx (Assembler::notZero, false, Assembler::pt, done) ;
delayed()-> andcc (G0, G0, G0) ; delayed()-> andcc (G0, G0, G0) ;
@ -3606,7 +3593,7 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
delayed()->nop() ; delayed()->nop() ;
br (Assembler::always, false, Assembler::pt, done) ; br (Assembler::always, false, Assembler::pt, done) ;
delayed()-> delayed()->
st_ptr (G0, Address (Rmark, 0, ObjectMonitor::owner_offset_in_bytes()-2)) ; st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
} }
bind (LStacked) ; bind (LStacked) ;
@ -4005,20 +3992,26 @@ void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr,
bind(L); bind(L);
} }
void MacroAssembler::inc_counter(address counter_ptr, Register Rtmp1, Register Rtmp2) { void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) {
Address counter_addr(Rtmp1, counter_ptr); AddressLiteral addrlit(counter_addr);
load_contents(counter_addr, Rtmp2); sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register.
Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits.
ld(addr, Rtmp2);
inc(Rtmp2); inc(Rtmp2);
store_contents(Rtmp2, counter_addr); st(Rtmp2, addr);
}
void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) {
inc_counter((address) counter_addr, Rtmp1, Rtmp2);
} }
SkipIfEqual::SkipIfEqual( SkipIfEqual::SkipIfEqual(
MacroAssembler* masm, Register temp, const bool* flag_addr, MacroAssembler* masm, Register temp, const bool* flag_addr,
Assembler::Condition condition) { Assembler::Condition condition) {
_masm = masm; _masm = masm;
Address flag(temp, (address)flag_addr, relocInfo::none); AddressLiteral flag(flag_addr);
_masm->sethi(flag); _masm->sethi(flag, temp);
_masm->ldub(flag, temp); _masm->ldub(temp, flag.low10(), temp);
_masm->tst(temp); _masm->tst(temp);
_masm->br(condition, false, Assembler::pt, _label); _masm->br(condition, false, Assembler::pt, _label);
_masm->delayed()->nop(); _masm->delayed()->nop();
@ -4333,8 +4326,8 @@ static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
#else #else
masm.srl(O0, CardTableModRefBS::card_shift, O0); masm.srl(O0, CardTableModRefBS::card_shift, O0);
#endif #endif
Address rs(O1, (address)byte_map_base); AddressLiteral addrlit(byte_map_base);
masm.load_address(rs); // O1 := <card table base> masm.set(addrlit, O1); // O1 := <card table base>
masm.ldub(O0, O1, O2); // O2 := [O0 + O1] masm.ldub(O0, O1, O2); // O2 := [O0 + O1]
masm.br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt, masm.br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt,
@ -4494,10 +4487,9 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val
#else #else
post_filter_masm->srl(store_addr, CardTableModRefBS::card_shift, store_addr); post_filter_masm->srl(store_addr, CardTableModRefBS::card_shift, store_addr);
#endif #endif
assert( tmp != store_addr, "need separate temp reg"); assert(tmp != store_addr, "need separate temp reg");
Address rs(tmp, (address)bs->byte_map_base); set(bs->byte_map_base, tmp);
load_address(rs); stb(G0, tmp, store_addr);
stb(G0, rs.base(), store_addr);
} }
bind(filtered); bind(filtered);
@ -4516,24 +4508,6 @@ void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_v
card_table_write(bs->byte_map_base, tmp, store_addr); card_table_write(bs->byte_map_base, tmp, store_addr);
} }
// Loading values by size and signed-ness
void MacroAssembler::load_sized_value(Register s1, RegisterOrConstant s2, Register d,
int size_in_bytes, bool is_signed) {
switch (size_in_bytes ^ (is_signed ? -1 : 0)) {
case ~8: // fall through:
case 8: ld_long( s1, s2, d ); break;
case ~4: ldsw( s1, s2, d ); break;
case 4: lduw( s1, s2, d ); break;
case ~2: ldsh( s1, s2, d ); break;
case 2: lduh( s1, s2, d ); break;
case ~1: ldsb( s1, s2, d ); break;
case 1: ldub( s1, s2, d ); break;
default: ShouldNotReachHere();
}
}
void MacroAssembler::load_klass(Register src_oop, Register klass) { void MacroAssembler::load_klass(Register src_oop, Register klass) {
// The number of bytes in this code is used by // The number of bytes in this code is used by
// MachCallDynamicJavaNode::ret_addr_offset() // MachCallDynamicJavaNode::ret_addr_offset()
@ -4563,12 +4537,12 @@ void MacroAssembler::store_klass_gap(Register s, Register d) {
} }
} }
void MacroAssembler::load_heap_oop(const Address& s, Register d, int offset) { void MacroAssembler::load_heap_oop(const Address& s, Register d) {
if (UseCompressedOops) { if (UseCompressedOops) {
lduw(s, d, offset); lduw(s, d);
decode_heap_oop(d); decode_heap_oop(d);
} else { } else {
ld_ptr(s, d, offset); ld_ptr(s, d);
} }
} }
@ -4714,7 +4688,7 @@ void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) {
void MacroAssembler::reinit_heapbase() { void MacroAssembler::reinit_heapbase() {
if (UseCompressedOops) { if (UseCompressedOops) {
// call indirectly to solve generation ordering problem // call indirectly to solve generation ordering problem
Address base(G6_heapbase, (address)Universe::narrow_oop_base_addr()); AddressLiteral base(Universe::narrow_oop_base_addr());
load_ptr_contents(base, G6_heapbase); load_ptr_contents(base, G6_heapbase);
} }
} }

View file

@ -274,21 +274,90 @@ REGISTER_DECLARATION(Register, Oissuing_pc , O1); // where the exception is comi
class Address VALUE_OBJ_CLASS_SPEC { class Address VALUE_OBJ_CLASS_SPEC {
private: private:
Register _base; Register _base; // Base register.
#ifdef _LP64 RegisterOrConstant _index_or_disp; // Index register or constant displacement.
int _hi32; // bits 63::32 RelocationHolder _rspec;
int _low32; // bits 31::0
#endif
int _hi;
int _disp;
RelocationHolder _rspec;
RelocationHolder rspec_from_rtype(relocInfo::relocType rt, address a = NULL) { public:
switch (rt) { Address() : _base(noreg), _index_or_disp(noreg) {}
Address(Register base, RegisterOrConstant index_or_disp)
: _base(base),
_index_or_disp(index_or_disp) {
}
Address(Register base, Register index)
: _base(base),
_index_or_disp(index) {
}
Address(Register base, int disp)
: _base(base),
_index_or_disp(disp) {
}
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
Address(Register base, ByteSize disp)
: _base(base),
_index_or_disp(in_bytes(disp)) {
}
#endif
// accessors
Register base() const { return _base; }
Register index() const { return _index_or_disp.as_register(); }
int disp() const { return _index_or_disp.as_constant(); }
bool has_index() const { return _index_or_disp.is_register(); }
bool has_disp() const { return _index_or_disp.is_constant(); }
const relocInfo::relocType rtype() { return _rspec.type(); }
const RelocationHolder& rspec() { return _rspec; }
RelocationHolder rspec(int offset) const {
return offset == 0 ? _rspec : _rspec.plus(offset);
}
inline bool is_simm13(int offset = 0); // check disp+offset for overflow
Address plus_disp(int plusdisp) const { // bump disp by a small amount
assert(_index_or_disp.is_constant(), "must have a displacement");
Address a(base(), disp() + plusdisp);
return a;
}
Address after_save() const {
Address a = (*this);
a._base = a._base->after_save();
return a;
}
Address after_restore() const {
Address a = (*this);
a._base = a._base->after_restore();
return a;
}
// Convert the raw encoding form into the form expected by the
// constructor for Address.
static Address make_raw(int base, int index, int scale, int disp, bool disp_is_oop);
friend class Assembler;
};
class AddressLiteral VALUE_OBJ_CLASS_SPEC {
private:
address _address;
RelocationHolder _rspec;
RelocationHolder rspec_from_rtype(relocInfo::relocType rtype, address addr) {
switch (rtype) {
case relocInfo::external_word_type: case relocInfo::external_word_type:
return external_word_Relocation::spec(a); return external_word_Relocation::spec(addr);
case relocInfo::internal_word_type: case relocInfo::internal_word_type:
return internal_word_Relocation::spec(a); return internal_word_Relocation::spec(addr);
#ifdef _LP64 #ifdef _LP64
case relocInfo::opt_virtual_call_type: case relocInfo::opt_virtual_call_type:
return opt_virtual_call_Relocation::spec(); return opt_virtual_call_Relocation::spec();
@ -305,127 +374,86 @@ class Address VALUE_OBJ_CLASS_SPEC {
} }
} }
protected:
// creation
AddressLiteral() : _address(NULL), _rspec(NULL) {}
public: public:
Address(Register b, address a, relocInfo::relocType rt = relocInfo::none) AddressLiteral(address addr, RelocationHolder const& rspec)
: _rspec(rspec_from_rtype(rt, a)) : _address(addr),
{ _rspec(rspec) {}
_base = b;
// Some constructors to avoid casting at the call site.
AddressLiteral(jobject obj, RelocationHolder const& rspec)
: _address((address) obj),
_rspec(rspec) {}
AddressLiteral(intptr_t value, RelocationHolder const& rspec)
: _address((address) value),
_rspec(rspec) {}
AddressLiteral(address addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
// Some constructors to avoid casting at the call site.
AddressLiteral(address* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
AddressLiteral(bool* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
AddressLiteral(const bool* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
AddressLiteral(signed char* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
AddressLiteral(int* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
AddressLiteral(intptr_t addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
#ifdef _LP64 #ifdef _LP64
_hi32 = (intptr_t)a >> 32; // top 32 bits in 64 bit word // 32-bit complains about a multiple declaration for int*.
_low32 = (intptr_t)a & ~0; // low 32 bits in 64 bit word AddressLiteral(intptr_t* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
#endif #endif
_hi = (intptr_t)a & ~0x3ff; // top 22 bits in low word
_disp = (intptr_t)a & 0x3ff; // bottom 10 bits
}
Address(Register b, address a, RelocationHolder const& rspec) AddressLiteral(oop addr, relocInfo::relocType rtype = relocInfo::none)
: _rspec(rspec) : _address((address) addr),
{ _rspec(rspec_from_rtype(rtype, (address) addr)) {}
_base = b;
#ifdef _LP64
_hi32 = (intptr_t)a >> 32; // top 32 bits in 64 bit word
_low32 = (intptr_t)a & ~0; // low 32 bits in 64 bit word
#endif
_hi = (intptr_t)a & ~0x3ff; // top 22 bits
_disp = (intptr_t)a & 0x3ff; // bottom 10 bits
}
Address(Register b, intptr_t h, intptr_t d, RelocationHolder const& rspec = RelocationHolder()) AddressLiteral(float* addr, relocInfo::relocType rtype = relocInfo::none)
: _rspec(rspec) : _address((address) addr),
{ _rspec(rspec_from_rtype(rtype, (address) addr)) {}
_base = b;
#ifdef _LP64
// [RGV] Put in Assert to force me to check usage of this constructor
assert( h == 0, "Check usage of this constructor" );
_hi32 = h;
_low32 = d;
_hi = h;
_disp = d;
#else
_hi = h;
_disp = d;
#endif
}
Address() AddressLiteral(double* addr, relocInfo::relocType rtype = relocInfo::none)
: _rspec(RelocationHolder()) : _address((address) addr),
{ _rspec(rspec_from_rtype(rtype, (address) addr)) {}
_base = G0;
#ifdef _LP64
_hi32 = 0;
_low32 = 0;
#endif
_hi = 0;
_disp = 0;
}
// fancier constructors intptr_t value() const { return (intptr_t) _address; }
int low10() const;
enum addr_type { const relocInfo::relocType rtype() const { return _rspec.type(); }
extra_in_argument, // in the In registers const RelocationHolder& rspec() const { return _rspec; }
extra_out_argument // in the Outs
};
Address( addr_type, int ); RelocationHolder rspec(int offset) const {
// accessors
Register base() const { return _base; }
#ifdef _LP64
int hi32() const { return _hi32; }
int low32() const { return _low32; }
#endif
int hi() const { return _hi; }
int disp() const { return _disp; }
#ifdef _LP64
intptr_t value() const { return ((intptr_t)_hi32 << 32) |
(intptr_t)(uint32_t)_low32; }
#else
int value() const { return _hi | _disp; }
#endif
const relocInfo::relocType rtype() { return _rspec.type(); }
const RelocationHolder& rspec() { return _rspec; }
RelocationHolder rspec(int offset) const {
return offset == 0 ? _rspec : _rspec.plus(offset); return offset == 0 ? _rspec : _rspec.plus(offset);
} }
inline bool is_simm13(int offset = 0); // check disp+offset for overflow
Address plus_disp(int disp) const { // bump disp by a small amount
Address a = (*this);
a._disp += disp;
return a;
}
Address split_disp() const { // deal with disp overflow
Address a = (*this);
int hi_disp = _disp & ~0x3ff;
if (hi_disp != 0) {
a._disp -= hi_disp;
a._hi += hi_disp;
}
return a;
}
Address after_save() const {
Address a = (*this);
a._base = a._base->after_save();
return a;
}
Address after_restore() const {
Address a = (*this);
a._base = a._base->after_restore();
return a;
}
friend class Assembler;
}; };
inline Address RegisterImpl::address_in_saved_window() const { inline Address RegisterImpl::address_in_saved_window() const {
return (Address(SP, 0, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS)); return (Address(SP, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS));
} }
@ -495,11 +523,7 @@ class Argument VALUE_OBJ_CLASS_SPEC {
// When applied to a register-based argument, give the corresponding address // When applied to a register-based argument, give the corresponding address
// into the 6-word area "into which callee may store register arguments" // into the 6-word area "into which callee may store register arguments"
// (This is a different place than the corresponding register-save area location.) // (This is a different place than the corresponding register-save area location.)
Address address_in_frame() const { Address address_in_frame() const;
return Address( is_in() ? Address::extra_in_argument
: Address::extra_out_argument,
_number );
}
// debugging // debugging
const char* name() const; const char* name() const;
@ -521,6 +545,7 @@ class Assembler : public AbstractAssembler {
friend class AbstractAssembler; friend class AbstractAssembler;
friend class AddressLiteral;
// code patchers need various routines like inv_wdisp() // code patchers need various routines like inv_wdisp()
friend class NativeInstruction; friend class NativeInstruction;
@ -1093,11 +1118,11 @@ public:
// pp 135 (addc was addx in v8) // pp 135 (addc was addx in v8)
inline void add( Register s1, Register s2, Register d ); inline void add(Register s1, Register s2, Register d );
inline void add( Register s1, int simm13a, Register d, relocInfo::relocType rtype = relocInfo::none); inline void add(Register s1, int simm13a, Register d, relocInfo::relocType rtype = relocInfo::none);
inline void add( Register s1, int simm13a, Register d, RelocationHolder const& rspec); inline void add(Register s1, int simm13a, Register d, RelocationHolder const& rspec);
inline void add( Register s1, RegisterOrConstant s2, Register d, int offset = 0); inline void add(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
inline void add( const Address& a, Register d, int offset = 0); inline void add(const Address& a, Register d, int offset = 0) { add( a.base(), a.disp() + offset, d, a.rspec(offset)); }
void addcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); } void addcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
void addcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } void addcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
@ -1252,14 +1277,12 @@ public:
void jmpl( Register s1, Register s2, Register d ); void jmpl( Register s1, Register s2, Register d );
void jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec = RelocationHolder() ); void jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec = RelocationHolder() );
inline void jmpl( Address& a, Register d, int offset = 0);
// 171 // 171
inline void ldf( FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d ); inline void ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d);
inline void ldf( FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d ); inline void ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec = RelocationHolder());
inline void ldf( FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset = 0); inline void ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset = 0);
inline void ldfsr( Register s1, Register s2 ); inline void ldfsr( Register s1, Register s2 );
@ -1303,15 +1326,20 @@ public:
inline void ldd( Register s1, Register s2, Register d ); inline void ldd( Register s1, Register s2, Register d );
inline void ldd( Register s1, int simm13a, Register d); inline void ldd( Register s1, int simm13a, Register d);
inline void ldsb( const Address& a, Register d, int offset = 0 ); #ifdef ASSERT
inline void ldsh( const Address& a, Register d, int offset = 0 ); // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void ldsw( const Address& a, Register d, int offset = 0 ); inline void ld( Register s1, ByteSize simm13a, Register d);
inline void ldub( const Address& a, Register d, int offset = 0 ); #endif
inline void lduh( const Address& a, Register d, int offset = 0 );
inline void lduw( const Address& a, Register d, int offset = 0 ); inline void ldsb(const Address& a, Register d, int offset = 0);
inline void ldx( const Address& a, Register d, int offset = 0 ); inline void ldsh(const Address& a, Register d, int offset = 0);
inline void ld( const Address& a, Register d, int offset = 0 ); inline void ldsw(const Address& a, Register d, int offset = 0);
inline void ldd( const Address& a, Register d, int offset = 0 ); inline void ldub(const Address& a, Register d, int offset = 0);
inline void lduh(const Address& a, Register d, int offset = 0);
inline void lduw(const Address& a, Register d, int offset = 0);
inline void ldx( const Address& a, Register d, int offset = 0);
inline void ld( const Address& a, Register d, int offset = 0);
inline void ldd( const Address& a, Register d, int offset = 0);
inline void ldub( Register s1, RegisterOrConstant s2, Register d ); inline void ldub( Register s1, RegisterOrConstant s2, Register d );
inline void ldsb( Register s1, RegisterOrConstant s2, Register d ); inline void ldsb( Register s1, RegisterOrConstant s2, Register d );
@ -1536,6 +1564,11 @@ public:
inline void std( Register d, Register s1, Register s2 ); inline void std( Register d, Register s1, Register s2 );
inline void std( Register d, Register s1, int simm13a); inline void std( Register d, Register s1, int simm13a);
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void st( Register d, Register s1, ByteSize simm13a);
#endif
inline void stb( Register d, const Address& a, int offset = 0 ); inline void stb( Register d, const Address& a, int offset = 0 );
inline void sth( Register d, const Address& a, int offset = 0 ); inline void sth( Register d, const Address& a, int offset = 0 );
inline void stw( Register d, const Address& a, int offset = 0 ); inline void stw( Register d, const Address& a, int offset = 0 );
@ -1684,8 +1717,8 @@ class RegistersForDebugging : public StackObj {
#define JMP2(r1, r2) jmp(r1, r2, __FILE__, __LINE__) #define JMP2(r1, r2) jmp(r1, r2, __FILE__, __LINE__)
#define JMP(r1, off) jmp(r1, off, __FILE__, __LINE__) #define JMP(r1, off) jmp(r1, off, __FILE__, __LINE__)
#define JUMP(a, off) jump(a, off, __FILE__, __LINE__) #define JUMP(a, temp, off) jump(a, temp, off, __FILE__, __LINE__)
#define JUMPL(a, d, off) jumpl(a, d, off, __FILE__, __LINE__) #define JUMPL(a, temp, d, off) jumpl(a, temp, d, off, __FILE__, __LINE__)
class MacroAssembler: public Assembler { class MacroAssembler: public Assembler {
@ -1830,17 +1863,26 @@ class MacroAssembler: public Assembler {
#endif #endif
// sethi Macro handles optimizations and relocations // sethi Macro handles optimizations and relocations
void sethi( Address& a, bool ForceRelocatable = false ); private:
void sethi( intptr_t imm22a, Register d, bool ForceRelocatable = false, RelocationHolder const& rspec = RelocationHolder()); void internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable);
public:
void sethi(const AddressLiteral& addrlit, Register d);
void patchable_sethi(const AddressLiteral& addrlit, Register d);
// compute the size of a sethi/set // compute the size of a sethi/set
static int size_of_sethi( address a, bool worst_case = false ); static int size_of_sethi( address a, bool worst_case = false );
static int worst_case_size_of_set(); static int worst_case_size_of_set();
// set may be either setsw or setuw (high 32 bits may be zero or sign) // set may be either setsw or setuw (high 32 bits may be zero or sign)
void set( intptr_t value, Register d, RelocationHolder const& rspec = RelocationHolder() ); private:
void setsw( int value, Register d, RelocationHolder const& rspec = RelocationHolder() ); void internal_set(const AddressLiteral& al, Register d, bool ForceRelocatable);
void set64( jlong value, Register d, Register tmp); public:
void set(const AddressLiteral& addrlit, Register d);
void set(intptr_t value, Register d);
void set(address addr, Register d, RelocationHolder const& rspec);
void patchable_set(const AddressLiteral& addrlit, Register d);
void patchable_set(intptr_t value, Register d);
void set64(jlong value, Register d, Register tmp);
// sign-extend 32 to 64 // sign-extend 32 to 64
inline void signx( Register s, Register d ) { sra( s, G0, d); } inline void signx( Register s, Register d ) { sra( s, G0, d); }
@ -1930,24 +1972,22 @@ class MacroAssembler: public Assembler {
inline void mov( int simm13a, Register d) { or3( G0, simm13a, d); } inline void mov( int simm13a, Register d) { or3( G0, simm13a, d); }
// address pseudos: make these names unlike instruction names to avoid confusion // address pseudos: make these names unlike instruction names to avoid confusion
inline void split_disp( Address& a, Register temp );
inline intptr_t load_pc_address( Register reg, int bytes_to_skip ); inline intptr_t load_pc_address( Register reg, int bytes_to_skip );
inline void load_address( Address& a, int offset = 0 ); inline void load_contents(AddressLiteral& addrlit, Register d, int offset = 0);
inline void load_contents( Address& a, Register d, int offset = 0 ); inline void load_ptr_contents(AddressLiteral& addrlit, Register d, int offset = 0);
inline void load_ptr_contents( Address& a, Register d, int offset = 0 ); inline void store_contents(Register s, AddressLiteral& addrlit, Register temp, int offset = 0);
inline void store_contents( Register s, Address& a, int offset = 0 ); inline void store_ptr_contents(Register s, AddressLiteral& addrlit, Register temp, int offset = 0);
inline void store_ptr_contents( Register s, Address& a, int offset = 0 ); inline void jumpl_to(AddressLiteral& addrlit, Register temp, Register d, int offset = 0);
inline void jumpl_to( Address& a, Register d, int offset = 0 ); inline void jump_to(AddressLiteral& addrlit, Register temp, int offset = 0);
inline void jump_to( Address& a, int offset = 0 ); inline void jump_indirect_to(Address& a, Register temp, int ld_offset = 0, int jmp_offset = 0);
inline void jump_indirect_to( Address& a, Register temp, int ld_offset = 0, int jmp_offset = 0 );
// ring buffer traceable jumps // ring buffer traceable jumps
void jmp2( Register r1, Register r2, const char* file, int line ); void jmp2( Register r1, Register r2, const char* file, int line );
void jmp ( Register r1, int offset, const char* file, int line ); void jmp ( Register r1, int offset, const char* file, int line );
void jumpl( Address& a, Register d, int offset, const char* file, int line ); void jumpl(AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line);
void jump ( Address& a, int offset, const char* file, int line ); void jump (AddressLiteral& addrlit, Register temp, int offset, const char* file, int line);
// argument pseudos: // argument pseudos:
@ -1972,29 +2012,31 @@ class MacroAssembler: public Assembler {
// Functions for isolating 64 bit loads for LP64 // Functions for isolating 64 bit loads for LP64
// ld_ptr will perform ld for 32 bit VM's and ldx for 64 bit VM's // ld_ptr will perform ld for 32 bit VM's and ldx for 64 bit VM's
// st_ptr will perform st for 32 bit VM's and stx for 64 bit VM's // st_ptr will perform st for 32 bit VM's and stx for 64 bit VM's
inline void ld_ptr( Register s1, Register s2, Register d ); inline void ld_ptr(Register s1, Register s2, Register d);
inline void ld_ptr( Register s1, int simm13a, Register d); inline void ld_ptr(Register s1, int simm13a, Register d);
inline void ld_ptr( Register s1, RegisterOrConstant s2, Register d ); inline void ld_ptr(Register s1, RegisterOrConstant s2, Register d);
inline void ld_ptr( const Address& a, Register d, int offset = 0 ); inline void ld_ptr(const Address& a, Register d, int offset = 0);
inline void st_ptr( Register d, Register s1, Register s2 ); inline void st_ptr(Register d, Register s1, Register s2);
inline void st_ptr( Register d, Register s1, int simm13a); inline void st_ptr(Register d, Register s1, int simm13a);
inline void st_ptr( Register d, Register s1, RegisterOrConstant s2 ); inline void st_ptr(Register d, Register s1, RegisterOrConstant s2);
inline void st_ptr( Register d, const Address& a, int offset = 0 ); inline void st_ptr(Register d, const Address& a, int offset = 0);
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void ld_ptr(Register s1, ByteSize simm13a, Register d);
inline void st_ptr(Register d, Register s1, ByteSize simm13a);
#endif
// ld_long will perform ld for 32 bit VM's and ldx for 64 bit VM's // ld_long will perform ld for 32 bit VM's and ldx for 64 bit VM's
// st_long will perform st for 32 bit VM's and stx for 64 bit VM's // st_long will perform st for 32 bit VM's and stx for 64 bit VM's
inline void ld_long( Register s1, Register s2, Register d ); inline void ld_long(Register s1, Register s2, Register d);
inline void ld_long( Register s1, int simm13a, Register d ); inline void ld_long(Register s1, int simm13a, Register d);
inline void ld_long( Register s1, RegisterOrConstant s2, Register d ); inline void ld_long(Register s1, RegisterOrConstant s2, Register d);
inline void ld_long( const Address& a, Register d, int offset = 0 ); inline void ld_long(const Address& a, Register d, int offset = 0);
inline void st_long( Register d, Register s1, Register s2 ); inline void st_long(Register d, Register s1, Register s2);
inline void st_long( Register d, Register s1, int simm13a ); inline void st_long(Register d, Register s1, int simm13a);
inline void st_long( Register d, Register s1, RegisterOrConstant s2 ); inline void st_long(Register d, Register s1, RegisterOrConstant s2);
inline void st_long( Register d, const Address& a, int offset = 0 ); inline void st_long(Register d, const Address& a, int offset = 0);
// Loading values by size and signed-ness
void load_sized_value(Register s1, RegisterOrConstant s2, Register d,
int size_in_bytes, bool is_signed);
// Helpers for address formation. // Helpers for address formation.
// They update the dest in place, whether it is a register or constant. // They update the dest in place, whether it is a register or constant.
@ -2049,8 +2091,8 @@ class MacroAssembler: public Assembler {
// These are idioms to flag the need for care with accessing bools but on // These are idioms to flag the need for care with accessing bools but on
// this platform we assume byte size // this platform we assume byte size
inline void stbool( Register d, const Address& a, int offset = 0 ) { stb(d, a, offset); } inline void stbool(Register d, const Address& a) { stb(d, a); }
inline void ldbool( const Address& a, Register d, int offset = 0 ) { ldsb( a, d, offset ); } inline void ldbool(const Address& a, Register d) { ldsb(a, d); }
inline void tstbool( Register s ) { tst(s); } inline void tstbool( Register s ) { tst(s); }
inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); } inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }
@ -2060,7 +2102,7 @@ class MacroAssembler: public Assembler {
void store_klass_gap(Register s, Register dst_oop); void store_klass_gap(Register s, Register dst_oop);
// oop manipulations // oop manipulations
void load_heap_oop(const Address& s, Register d, int offset = 0); void load_heap_oop(const Address& s, Register d);
void load_heap_oop(Register s1, Register s2, Register d); void load_heap_oop(Register s1, Register s2, Register d);
void load_heap_oop(Register s1, int simm13a, Register d); void load_heap_oop(Register s1, int simm13a, Register d);
void store_heap_oop(Register d, Register s1, Register s2); void store_heap_oop(Register d, Register s1, Register s2);
@ -2190,11 +2232,11 @@ class MacroAssembler: public Assembler {
void print_CPU_state(); void print_CPU_state();
// oops in code // oops in code
Address allocate_oop_address( jobject obj, Register d ); // allocate_index AddressLiteral allocate_oop_address(jobject obj); // allocate_index
Address constant_oop_address( jobject obj, Register d ); // find_index AddressLiteral constant_oop_address(jobject obj); // find_index
inline void set_oop ( jobject obj, Register d ); // uses allocate_oop_address inline void set_oop (jobject obj, Register d); // uses allocate_oop_address
inline void set_oop_constant( jobject obj, Register d ); // uses constant_oop_address inline void set_oop_constant (jobject obj, Register d); // uses constant_oop_address
inline void set_oop ( Address obj_addr ); // same as load_address inline void set_oop (AddressLiteral& obj_addr, Register d); // same as load_address
void set_narrow_oop( jobject obj, Register d ); void set_narrow_oop( jobject obj, Register d );
@ -2410,7 +2452,8 @@ class MacroAssembler: public Assembler {
// Conditionally (non-atomically) increments passed counter address, preserving condition codes. // Conditionally (non-atomically) increments passed counter address, preserving condition codes.
void cond_inc(Condition cond, address counter_addr, Register Rtemp1, Register Rtemp2); void cond_inc(Condition cond, address counter_addr, Register Rtemp1, Register Rtemp2);
// Unconditional increment. // Unconditional increment.
void inc_counter(address counter_addr, Register Rtemp1, Register Rtemp2); void inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2);
void inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2);
#undef VIRTUAL #undef VIRTUAL

View file

@ -38,6 +38,11 @@ inline void MacroAssembler::pd_print_patched_instruction(address branch) {
inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); } inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); }
inline int AddressLiteral::low10() const {
return Assembler::low10(value());
}
// inlines for SPARC assembler -- dmu 5/97 // inlines for SPARC assembler -- dmu 5/97
inline void Assembler::check_delay() { inline void Assembler::check_delay() {
@ -63,10 +68,9 @@ inline void Assembler::emit_data(int x, RelocationHolder const& rspec) {
} }
inline void Assembler::add( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::add(Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::add( Register s1, int simm13a, Register d, relocInfo::relocType rtype ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rtype ); } inline void Assembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rtype ); }
inline void Assembler::add( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec ); } inline void Assembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec ); }
inline void Assembler::add( const Address& a, Register d, int offset) { add( a.base(), a.disp() + offset, d, a.rspec(offset)); }
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); } inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); }
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { bpr( c, a, p, s1, target(L)); } inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { bpr( c, a, p, s1, target(L)); }
@ -95,13 +99,10 @@ inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op
inline void Assembler::jmpl( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); } inline void Assembler::jmpl( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); } inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); }
inline void Assembler::jmpl( Address& a, Register d, int offset) { jmpl( a.base(), a.disp() + offset, d, a.rspec(offset)); } inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
inline void Assembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { relocate(a.rspec(offset)); ldf( w, a.base(), a.disp() + offset, d); }
inline void Assembler::ldf( FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldf( FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::ldf( FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { relocate(a.rspec(offset)); ldf( w, a.base(), a.disp() + offset, d); }
inline void Assembler::ldfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::ldfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::ldfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
@ -136,50 +137,69 @@ inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep();
#ifdef _LP64 #ifdef _LP64
// Make all 32 bit loads signed so 64 bit registers maintain proper sign // Make all 32 bit loads signed so 64 bit registers maintain proper sign
inline void Assembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); } inline void Assembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); }
inline void Assembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); } inline void Assembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); }
#else #else
inline void Assembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); } inline void Assembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); }
inline void Assembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); } inline void Assembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); }
#endif #endif
inline void Assembler::ldub( Register s1, RegisterOrConstant s2, Register d) { #ifdef ASSERT
if (s2.is_register()) ldsb(s1, s2.as_register(), d); // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
else ldsb(s1, s2.as_constant(), d); # ifdef _LP64
inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
# else
inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
# endif
#endif
inline void Assembler::ld( const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ld( a.base(), a.index(), d); }
else { ld( a.base(), a.disp() + offset, d); }
} }
inline void Assembler::ldsb( Register s1, RegisterOrConstant s2, Register d) { inline void Assembler::ldsb(const Address& a, Register d, int offset) {
if (s2.is_register()) ldsb(s1, s2.as_register(), d); if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(), d); }
else ldsb(s1, s2.as_constant(), d); else { ldsb(a.base(), a.disp() + offset, d); }
} }
inline void Assembler::lduh( Register s1, RegisterOrConstant s2, Register d) { inline void Assembler::ldsh(const Address& a, Register d, int offset) {
if (s2.is_register()) ldsh(s1, s2.as_register(), d); if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(), d); }
else ldsh(s1, s2.as_constant(), d); else { ldsh(a.base(), a.disp() + offset, d); }
} }
inline void Assembler::ldsh( Register s1, RegisterOrConstant s2, Register d) { inline void Assembler::ldsw(const Address& a, Register d, int offset) {
if (s2.is_register()) ldsh(s1, s2.as_register(), d); if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(), d); }
else ldsh(s1, s2.as_constant(), d); else { ldsw(a.base(), a.disp() + offset, d); }
} }
inline void Assembler::lduw( Register s1, RegisterOrConstant s2, Register d) { inline void Assembler::ldub(const Address& a, Register d, int offset) {
if (s2.is_register()) ldsw(s1, s2.as_register(), d); if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(), d); }
else ldsw(s1, s2.as_constant(), d); else { ldub(a.base(), a.disp() + offset, d); }
} }
inline void Assembler::ldsw( Register s1, RegisterOrConstant s2, Register d) { inline void Assembler::lduh(const Address& a, Register d, int offset) {
if (s2.is_register()) ldsw(s1, s2.as_register(), d); if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(), d); }
else ldsw(s1, s2.as_constant(), d); else { lduh(a.base(), a.disp() + offset, d); }
} }
inline void Assembler::ldx( Register s1, RegisterOrConstant s2, Register d) { inline void Assembler::lduw(const Address& a, Register d, int offset) {
if (s2.is_register()) ldx(s1, s2.as_register(), d); if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(), d); }
else ldx(s1, s2.as_constant(), d); else { lduw(a.base(), a.disp() + offset, d); }
} }
inline void Assembler::ld( Register s1, RegisterOrConstant s2, Register d) { inline void Assembler::ldd( const Address& a, Register d, int offset) {
if (s2.is_register()) ld(s1, s2.as_register(), d); if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(), d); }
else ld(s1, s2.as_constant(), d); else { ldd( a.base(), a.disp() + offset, d); }
} }
inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { inline void Assembler::ldx( const Address& a, Register d, int offset) {
if (s2.is_register()) ldd(s1, s2.as_register(), d); if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(), d); }
else ldd(s1, s2.as_constant(), d); else { ldx( a.base(), a.disp() + offset, d); }
} }
inline void Assembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); }
inline void Assembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); }
inline void Assembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); }
inline void Assembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); }
inline void Assembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); }
inline void Assembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); }
inline void Assembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); }
inline void Assembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld( Address(s1, s2), d); }
inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
// form effective addresses this way: // form effective addresses this way:
inline void Assembler::add( Register s1, RegisterOrConstant s2, Register d, int offset) { inline void Assembler::add( Register s1, RegisterOrConstant s2, Register d, int offset) {
if (s2.is_register()) add(s1, s2.as_register(), d); if (s2.is_register()) add(s1, s2.as_register(), d);
@ -187,17 +207,6 @@ inline void Assembler::add( Register s1, RegisterOrConstant s2, Register d, in
if (offset != 0) add(d, offset, d); if (offset != 0) add(d, offset, d);
} }
inline void Assembler::ld( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ld( a.base(), a.disp() + offset, d ); }
inline void Assembler::ldsb( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ldsb( a.base(), a.disp() + offset, d ); }
inline void Assembler::ldsh( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ldsh( a.base(), a.disp() + offset, d ); }
inline void Assembler::ldsw( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ldsw( a.base(), a.disp() + offset, d ); }
inline void Assembler::ldub( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ldub( a.base(), a.disp() + offset, d ); }
inline void Assembler::lduh( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); lduh( a.base(), a.disp() + offset, d ); }
inline void Assembler::lduw( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); lduw( a.base(), a.disp() + offset, d ); }
inline void Assembler::ldd( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ldd( a.base(), a.disp() + offset, d ); }
inline void Assembler::ldx( const Address& a, Register d, int offset ) { relocate(a.rspec(offset)); ldx( a.base(), a.disp() + offset, d ); }
inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
@ -240,36 +249,44 @@ inline void Assembler::stx( Register d, Register s1, int simm13a) { v9_only();
inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::st( Register d, Register s1, Register s2) { stw(d, s1, s2); } inline void Assembler::st( Register d, Register s1, Register s2) { stw(d, s1, s2); }
inline void Assembler::st( Register d, Register s1, int simm13a) { stw(d, s1, simm13a); } inline void Assembler::st( Register d, Register s1, int simm13a) { stw(d, s1, simm13a); }
inline void Assembler::stb( Register d, Register s1, RegisterOrConstant s2) { #ifdef ASSERT
if (s2.is_register()) stb(d, s1, s2.as_register()); // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
else stb(d, s1, s2.as_constant()); inline void Assembler::st( Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); }
#endif
inline void Assembler::stb(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index() ); }
else { stb(d, a.base(), a.disp() + offset); }
} }
inline void Assembler::sth( Register d, Register s1, RegisterOrConstant s2) { inline void Assembler::sth(Register d, const Address& a, int offset) {
if (s2.is_register()) sth(d, s1, s2.as_register()); if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index() ); }
else sth(d, s1, s2.as_constant()); else { sth(d, a.base(), a.disp() + offset); }
} }
inline void Assembler::stx( Register d, Register s1, RegisterOrConstant s2) { inline void Assembler::stw(Register d, const Address& a, int offset) {
if (s2.is_register()) stx(d, s1, s2.as_register()); if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); }
else stx(d, s1, s2.as_constant()); else { stw(d, a.base(), a.disp() + offset); }
} }
inline void Assembler::std( Register d, Register s1, RegisterOrConstant s2) { inline void Assembler::st( Register d, const Address& a, int offset) {
if (s2.is_register()) std(d, s1, s2.as_register()); if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index() ); }
else std(d, s1, s2.as_constant()); else { st( d, a.base(), a.disp() + offset); }
} }
inline void Assembler::st( Register d, Register s1, RegisterOrConstant s2) { inline void Assembler::std(Register d, const Address& a, int offset) {
if (s2.is_register()) st(d, s1, s2.as_register()); if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index() ); }
else st(d, s1, s2.as_constant()); else { std(d, a.base(), a.disp() + offset); }
}
inline void Assembler::stx(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index() ); }
else { stx(d, a.base(), a.disp() + offset); }
} }
inline void Assembler::stb( Register d, const Address& a, int offset) { relocate(a.rspec(offset)); stb( d, a.base(), a.disp() + offset); } inline void Assembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); }
inline void Assembler::sth( Register d, const Address& a, int offset) { relocate(a.rspec(offset)); sth( d, a.base(), a.disp() + offset); } inline void Assembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); }
inline void Assembler::stw( Register d, const Address& a, int offset) { relocate(a.rspec(offset)); stw( d, a.base(), a.disp() + offset); } inline void Assembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); }
inline void Assembler::st( Register d, const Address& a, int offset) { relocate(a.rspec(offset)); st( d, a.base(), a.disp() + offset); } inline void Assembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); }
inline void Assembler::std( Register d, const Address& a, int offset) { relocate(a.rspec(offset)); std( d, a.base(), a.disp() + offset); } inline void Assembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); }
inline void Assembler::stx( Register d, const Address& a, int offset) { relocate(a.rspec(offset)); stx( d, a.base(), a.disp() + offset); }
// v8 p 99 // v8 p 99
@ -294,39 +311,46 @@ inline void Assembler::swap( Address& a, Register d, int offset ) { relocate(
// Use the right loads/stores for the platform // Use the right loads/stores for the platform
inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) { inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64 #ifdef _LP64
Assembler::ldx( s1, s2, d); Assembler::ldx(s1, s2, d);
#else #else
Assembler::ld( s1, s2, d); Assembler::ld( s1, s2, d);
#endif #endif
} }
inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) { inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
#ifdef _LP64 #ifdef _LP64
Assembler::ldx( s1, simm13a, d); Assembler::ldx(s1, simm13a, d);
#else #else
Assembler::ld( s1, simm13a, d); Assembler::ld( s1, simm13a, d);
#endif #endif
} }
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) {
ld_ptr(s1, in_bytes(simm13a), d);
}
#endif
inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) { inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
#ifdef _LP64 #ifdef _LP64
Assembler::ldx( s1, s2, d); Assembler::ldx(s1, s2, d);
#else #else
Assembler::ld( s1, s2, d); Assembler::ld( s1, s2, d);
#endif #endif
} }
inline void MacroAssembler::ld_ptr( const Address& a, Register d, int offset ) { inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
#ifdef _LP64 #ifdef _LP64
Assembler::ldx( a, d, offset ); Assembler::ldx(a, d, offset);
#else #else
Assembler::ld( a, d, offset ); Assembler::ld( a, d, offset);
#endif #endif
} }
inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) { inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
#ifdef _LP64 #ifdef _LP64
Assembler::stx( d, s1, s2); Assembler::stx(d, s1, s2);
#else #else
Assembler::st( d, s1, s2); Assembler::st( d, s1, s2);
#endif #endif
@ -334,25 +358,32 @@ inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) { inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
#ifdef _LP64 #ifdef _LP64
Assembler::stx( d, s1, simm13a); Assembler::stx(d, s1, simm13a);
#else #else
Assembler::st( d, s1, simm13a); Assembler::st( d, s1, simm13a);
#endif #endif
} }
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) {
st_ptr(d, s1, in_bytes(simm13a));
}
#endif
inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) { inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
#ifdef _LP64 #ifdef _LP64
Assembler::stx( d, s1, s2); Assembler::stx(d, s1, s2);
#else #else
Assembler::st( d, s1, s2); Assembler::st( d, s1, s2);
#endif #endif
} }
inline void MacroAssembler::st_ptr( Register d, const Address& a, int offset) { inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
#ifdef _LP64 #ifdef _LP64
Assembler::stx( d, a, offset); Assembler::stx(d, a, offset);
#else #else
Assembler::st( d, a, offset); Assembler::st( d, a, offset);
#endif #endif
} }
@ -381,11 +412,11 @@ inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Registe
#endif #endif
} }
inline void MacroAssembler::ld_long( const Address& a, Register d, int offset ) { inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
#ifdef _LP64 #ifdef _LP64
Assembler::ldx(a, d, offset ); Assembler::ldx(a, d, offset);
#else #else
Assembler::ldd(a, d, offset ); Assembler::ldd(a, d, offset);
#endif #endif
} }
@ -427,7 +458,7 @@ inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64 #ifdef _LP64
Assembler::sllx(s1, s2, d); Assembler::sllx(s1, s2, d);
#else #else
Assembler::sll(s1, s2, d); Assembler::sll( s1, s2, d);
#endif #endif
} }
@ -435,7 +466,7 @@ inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) {
#ifdef _LP64 #ifdef _LP64
Assembler::sllx(s1, imm6a, d); Assembler::sllx(s1, imm6a, d);
#else #else
Assembler::sll(s1, imm6a, d); Assembler::sll( s1, imm6a, d);
#endif #endif
} }
@ -443,7 +474,7 @@ inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64 #ifdef _LP64
Assembler::srlx(s1, s2, d); Assembler::srlx(s1, s2, d);
#else #else
Assembler::srl(s1, s2, d); Assembler::srl( s1, s2, d);
#endif #endif
} }
@ -451,7 +482,7 @@ inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) {
#ifdef _LP64 #ifdef _LP64
Assembler::srlx(s1, imm6a, d); Assembler::srlx(s1, imm6a, d);
#else #else
Assembler::srl(s1, imm6a, d); Assembler::srl( s1, imm6a, d);
#endif #endif
} }
@ -541,9 +572,8 @@ inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
disp = (intptr_t)d - (intptr_t)pc(); disp = (intptr_t)d - (intptr_t)pc();
if ( disp != (intptr_t)(int32_t)disp ) { if ( disp != (intptr_t)(int32_t)disp ) {
relocate(rt); relocate(rt);
Address dest(O7, (address)d); AddressLiteral dest(d);
sethi(dest, /*ForceRelocatable=*/ true); jumpl_to(dest, O7, O7);
jmpl(dest, O7);
} }
else { else {
Assembler::call( d, rt ); Assembler::call( d, rt );
@ -603,96 +633,72 @@ inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip
return thepc; return thepc;
} }
inline void MacroAssembler::load_address( Address& a, int offset ) {
inline void MacroAssembler::load_contents(AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed(); assert_not_delayed();
#ifdef _LP64 sethi(addrlit, d);
sethi(a); ld(d, addrlit.low10() + offset, d);
add(a, a.base(), offset);
#else
if (a.hi() == 0 && a.rtype() == relocInfo::none) {
set(a.disp() + offset, a.base());
}
else {
sethi(a);
add(a, a.base(), offset);
}
#endif
} }
inline void MacroAssembler::split_disp( Address& a, Register temp ) { inline void MacroAssembler::load_ptr_contents(AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed(); assert_not_delayed();
a = a.split_disp(); sethi(addrlit, d);
Assembler::sethi(a.hi(), temp, a.rspec()); ld_ptr(d, addrlit.low10() + offset, d);
add(a.base(), temp, a.base());
} }
inline void MacroAssembler::load_contents( Address& a, Register d, int offset ) { inline void MacroAssembler::store_contents(Register s, AddressLiteral& addrlit, Register temp, int offset) {
assert_not_delayed(); assert_not_delayed();
sethi(a); sethi(addrlit, temp);
ld(a, d, offset); st(s, temp, addrlit.low10() + offset);
} }
inline void MacroAssembler::load_ptr_contents( Address& a, Register d, int offset ) { inline void MacroAssembler::store_ptr_contents(Register s, AddressLiteral& addrlit, Register temp, int offset) {
assert_not_delayed(); assert_not_delayed();
sethi(a); sethi(addrlit, temp);
ld_ptr(a, d, offset); st_ptr(s, temp, addrlit.low10() + offset);
}
inline void MacroAssembler::store_contents( Register s, Address& a, int offset ) {
assert_not_delayed();
sethi(a);
st(s, a, offset);
}
inline void MacroAssembler::store_ptr_contents( Register s, Address& a, int offset ) {
assert_not_delayed();
sethi(a);
st_ptr(s, a, offset);
} }
// This code sequence is relocatable to any address, even on LP64. // This code sequence is relocatable to any address, even on LP64.
inline void MacroAssembler::jumpl_to( Address& a, Register d, int offset ) { inline void MacroAssembler::jumpl_to(AddressLiteral& addrlit, Register temp, Register d, int offset) {
assert_not_delayed(); assert_not_delayed();
// Force fixed length sethi because NativeJump and NativeFarCall don't handle // Force fixed length sethi because NativeJump and NativeFarCall don't handle
// variable length instruction streams. // variable length instruction streams.
sethi(a, /*ForceRelocatable=*/ true); patchable_sethi(addrlit, temp);
jmpl(a, d, offset); jmpl(temp, addrlit.low10() + offset, d);
} }
inline void MacroAssembler::jump_to( Address& a, int offset ) { inline void MacroAssembler::jump_to(AddressLiteral& addrlit, Register temp, int offset) {
jumpl_to( a, G0, offset ); jumpl_to(addrlit, temp, G0, offset);
} }
inline void MacroAssembler::jump_indirect_to( Address& a, Register temp, inline void MacroAssembler::jump_indirect_to(Address& a, Register temp,
int ld_offset, int jmp_offset ) { int ld_offset, int jmp_offset) {
assert_not_delayed(); assert_not_delayed();
//sethi(a); // sethi is caller responsibility for this one //sethi(al); // sethi is caller responsibility for this one
ld_ptr(a, temp, ld_offset); ld_ptr(a, temp, ld_offset);
jmp(temp, jmp_offset); jmp(temp, jmp_offset);
} }
inline void MacroAssembler::set_oop( jobject obj, Register d ) { inline void MacroAssembler::set_oop(jobject obj, Register d) {
set_oop(allocate_oop_address(obj, d)); set_oop(allocate_oop_address(obj), d);
} }
inline void MacroAssembler::set_oop_constant( jobject obj, Register d ) { inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
set_oop(constant_oop_address(obj, d)); set_oop(constant_oop_address(obj), d);
} }
inline void MacroAssembler::set_oop( Address obj_addr ) { inline void MacroAssembler::set_oop(AddressLiteral& obj_addr, Register d) {
assert(obj_addr.rspec().type()==relocInfo::oop_type, "must be an oop reloc"); assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
load_address(obj_addr); set(obj_addr, d);
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -277,10 +277,11 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
if (_id == load_klass_id) { if (_id == load_klass_id) {
// produce a copy of the load klass instruction for use by the being initialized case // produce a copy of the load klass instruction for use by the being initialized case
#ifdef ASSERT
address start = __ pc(); address start = __ pc();
Address addr = Address(_obj, address(NULL), oop_Relocation::spec(_oop_index)); #endif
__ sethi(addr, true); AddressLiteral addrlit(NULL, oop_Relocation::spec(_oop_index));
__ add(addr, _obj, 0); __ patchable_set(addrlit, _obj);
#ifdef ASSERT #ifdef ASSERT
for (int i = 0; i < _bytes_to_copy; i++) { for (int i = 0; i < _bytes_to_copy; i++) {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -327,7 +327,7 @@ void FrameMap::init () {
Address FrameMap::make_new_address(ByteSize sp_offset) const { Address FrameMap::make_new_address(ByteSize sp_offset) const {
return Address(SP, 0, STACK_BIAS + in_bytes(sp_offset)); return Address(SP, STACK_BIAS + in_bytes(sp_offset));
} }

View file

@ -196,7 +196,7 @@ void LIR_Assembler::osr_entry() {
// verify the interpreter's monitor has a non-null object // verify the interpreter's monitor has a non-null object
{ {
Label L; Label L;
__ ld_ptr(Address(OSR_buf, 0, slot_offset + BasicObjectLock::obj_offset_in_bytes()), O7); __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7);
__ cmp(G0, O7); __ cmp(G0, O7);
__ br(Assembler::notEqual, false, Assembler::pt, L); __ br(Assembler::notEqual, false, Assembler::pt, L);
__ delayed()->nop(); __ delayed()->nop();
@ -205,9 +205,9 @@ void LIR_Assembler::osr_entry() {
} }
#endif // ASSERT #endif // ASSERT
// Copy the lock field into the compiled activation. // Copy the lock field into the compiled activation.
__ ld_ptr(Address(OSR_buf, 0, slot_offset + BasicObjectLock::lock_offset_in_bytes()), O7); __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes(), O7);
__ st_ptr(O7, frame_map()->address_for_monitor_lock(i)); __ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
__ ld_ptr(Address(OSR_buf, 0, slot_offset + BasicObjectLock::obj_offset_in_bytes()), O7); __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7);
__ st_ptr(O7, frame_map()->address_for_monitor_object(i)); __ st_ptr(O7, frame_map()->address_for_monitor_object(i));
} }
} }
@ -238,21 +238,21 @@ void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst
int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
int count_offset = java_lang_String:: count_offset_in_bytes(); int count_offset = java_lang_String:: count_offset_in_bytes();
__ ld_ptr(Address(str0, 0, value_offset), tmp0); __ ld_ptr(str0, value_offset, tmp0);
__ ld(Address(str0, 0, offset_offset), tmp2); __ ld(str0, offset_offset, tmp2);
__ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0); __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
__ ld(Address(str0, 0, count_offset), str0); __ ld(str0, count_offset, str0);
__ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
// str1 may be null // str1 may be null
add_debug_info_for_null_check_here(info); add_debug_info_for_null_check_here(info);
__ ld_ptr(Address(str1, 0, value_offset), tmp1); __ ld_ptr(str1, value_offset, tmp1);
__ add(tmp0, tmp2, tmp0); __ add(tmp0, tmp2, tmp0);
__ ld(Address(str1, 0, offset_offset), tmp2); __ ld(str1, offset_offset, tmp2);
__ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1); __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1);
__ ld(Address(str1, 0, count_offset), str1); __ ld(str1, count_offset, str1);
__ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
__ subcc(str0, str1, O7); __ subcc(str0, str1, O7);
__ add(tmp1, tmp2, tmp1); __ add(tmp1, tmp2, tmp1);
@ -412,9 +412,9 @@ void LIR_Assembler::emit_deopt_handler() {
#endif // ASSERT #endif // ASSERT
compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset()); compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
Address deopt_blob(G3_scratch, SharedRuntime::deopt_blob()->unpack()); AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
__ JUMP(deopt_blob, 0); // sethi;jmp __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
__ delayed()->nop(); __ delayed()->nop();
assert(code_offset() - offset <= deopt_handler_size, "overflow"); assert(code_offset() - offset <= deopt_handler_size, "overflow");
@ -441,13 +441,12 @@ void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info)
int oop_index = __ oop_recorder()->allocate_index((jobject)NULL); int oop_index = __ oop_recorder()->allocate_index((jobject)NULL);
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, oop_index); PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, oop_index);
Address addr = Address(reg, address(NULL), oop_Relocation::spec(oop_index)); AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
assert(addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
// It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the
// NULL will be dynamically patched later and the patched value may be large. We must // NULL will be dynamically patched later and the patched value may be large. We must
// therefore generate the sethi/add as a placeholders // therefore generate the sethi/add as a placeholders
__ sethi(addr, true); __ patchable_set(addrlit, reg);
__ add(addr, reg, 0);
patching_epilog(patch, lir_patch_normal, reg, info); patching_epilog(patch, lir_patch_normal, reg, info);
} }
@ -706,7 +705,7 @@ void LIR_Assembler::ic_call(address entry, CodeEmitInfo* info) {
void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) { void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) {
add_debug_info_for_null_check_here(info); add_debug_info_for_null_check_here(info);
__ ld_ptr(Address(O0, 0, oopDesc::klass_offset_in_bytes()), G3_scratch); __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch);
if (__ is_simm13(vtable_offset) ) { if (__ is_simm13(vtable_offset) ) {
__ ld_ptr(G3_scratch, vtable_offset, G5_method); __ ld_ptr(G3_scratch, vtable_offset, G5_method);
} else { } else {
@ -715,7 +714,7 @@ void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) {
// ld_ptr, set_hi, set // ld_ptr, set_hi, set
__ ld_ptr(G3_scratch, G5_method, G5_method); __ ld_ptr(G3_scratch, G5_method, G5_method);
} }
__ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3_scratch); __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3_scratch);
__ callr(G3_scratch, G0); __ callr(G3_scratch, G0);
// the peephole pass fills the delay slot // the peephole pass fills the delay slot
} }
@ -738,8 +737,7 @@ int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, Cod
default : ShouldNotReachHere(); default : ShouldNotReachHere();
} }
} else { } else {
__ sethi(disp & ~0x3ff, O7, true); __ set(disp, O7);
__ add(O7, disp & 0x3ff, O7);
if (info != NULL) add_debug_info_for_null_check_here(info); if (info != NULL) add_debug_info_for_null_check_here(info);
load_offset = code_offset(); load_offset = code_offset();
switch(ld_type) { switch(ld_type) {
@ -775,8 +773,7 @@ void LIR_Assembler::store(Register value, Register base, int offset, BasicType t
default : ShouldNotReachHere(); default : ShouldNotReachHere();
} }
} else { } else {
__ sethi(offset & ~0x3ff, O7, true); __ set(offset, O7);
__ add(O7, offset & 0x3ff, O7);
if (info != NULL) add_debug_info_for_null_check_here(info); if (info != NULL) add_debug_info_for_null_check_here(info);
switch (type) { switch (type) {
case T_BOOLEAN: // fall through case T_BOOLEAN: // fall through
@ -813,8 +810,7 @@ void LIR_Assembler::load(Register s, int disp, FloatRegister d, BasicType ld_typ
__ ldf(w, s, disp, d); __ ldf(w, s, disp, d);
} }
} else { } else {
__ sethi(disp & ~0x3ff, O7, true); __ set(disp, O7);
__ add(O7, disp & 0x3ff, O7);
if (info != NULL) add_debug_info_for_null_check_here(info); if (info != NULL) add_debug_info_for_null_check_here(info);
__ ldf(w, s, O7, d); __ ldf(w, s, O7, d);
} }
@ -839,8 +835,7 @@ void LIR_Assembler::store(FloatRegister value, Register base, int offset, BasicT
__ stf(w, value, base, offset); __ stf(w, value, base, offset);
} }
} else { } else {
__ sethi(offset & ~0x3ff, O7, true); __ set(offset, O7);
__ add(O7, offset & 0x3ff, O7);
if (info != NULL) add_debug_info_for_null_check_here(info); if (info != NULL) add_debug_info_for_null_check_here(info);
__ stf(w, value, O7, base); __ stf(w, value, O7, base);
} }
@ -852,8 +847,7 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType
if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
assert(!unaligned, "can't handle this"); assert(!unaligned, "can't handle this");
// for offsets larger than a simm13 we setup the offset in O7 // for offsets larger than a simm13 we setup the offset in O7
__ sethi(offset & ~0x3ff, O7, true); __ set(offset, O7);
__ add(O7, offset & 0x3ff, O7);
store_offset = store(from_reg, base, O7, type); store_offset = store(from_reg, base, O7, type);
} else { } else {
if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register()); if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register());
@ -937,8 +931,7 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
assert(base != O7, "destroying register"); assert(base != O7, "destroying register");
assert(!unaligned, "can't handle this"); assert(!unaligned, "can't handle this");
// for offsets larger than a simm13 we setup the offset in O7 // for offsets larger than a simm13 we setup the offset in O7
__ sethi(offset & ~0x3ff, O7, true); __ set(offset, O7);
__ add(O7, offset & 0x3ff, O7);
load_offset = load(base, O7, to_reg, type); load_offset = load(base, O7, to_reg, type);
} else { } else {
load_offset = code_offset(); load_offset = code_offset();
@ -1213,7 +1206,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
assert(to_reg->is_single_fpu(), "wrong register kind"); assert(to_reg->is_single_fpu(), "wrong register kind");
__ set(con, O7); __ set(con, O7);
Address temp_slot(SP, 0, (frame::register_save_words * wordSize) + STACK_BIAS); Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS);
__ st(O7, temp_slot); __ st(O7, temp_slot);
__ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg()); __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg());
} }
@ -1238,8 +1231,8 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
assert(to_reg->is_double_fpu(), "wrong register kind"); assert(to_reg->is_double_fpu(), "wrong register kind");
Address temp_slot_lo(SP, 0, ((frame::register_save_words ) * wordSize) + STACK_BIAS); Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS);
Address temp_slot_hi(SP, 0, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS); Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS);
__ set(low(con), O7); __ set(low(con), O7);
__ st(O7, temp_slot_lo); __ st(O7, temp_slot_lo);
__ set(high(con), O7); __ set(high(con), O7);
@ -1267,17 +1260,16 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
break; break;
} }
RelocationHolder rspec = internal_word_Relocation::spec(const_addr); RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
AddressLiteral const_addrlit(const_addr, rspec);
if (to_reg->is_single_fpu()) { if (to_reg->is_single_fpu()) {
__ sethi( (intx)const_addr & ~0x3ff, O7, true, rspec); __ patchable_sethi(const_addrlit, O7);
__ relocate(rspec); __ relocate(rspec);
__ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg());
int offset = (intx)const_addr & 0x3ff;
__ ldf (FloatRegisterImpl::S, O7, offset, to_reg->as_float_reg());
} else { } else {
assert(to_reg->is_single_cpu(), "Must be a cpu register."); assert(to_reg->is_single_cpu(), "Must be a cpu register.");
__ set((intx)const_addr, O7, rspec); __ set(const_addrlit, O7);
load(O7, 0, to_reg->as_register(), T_INT); load(O7, 0, to_reg->as_register(), T_INT);
} }
} }
@ -1293,10 +1285,10 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
RelocationHolder rspec = internal_word_Relocation::spec(const_addr); RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
if (to_reg->is_double_fpu()) { if (to_reg->is_double_fpu()) {
__ sethi( (intx)const_addr & ~0x3ff, O7, true, rspec); AddressLiteral const_addrlit(const_addr, rspec);
int offset = (intx)const_addr & 0x3ff; __ patchable_sethi(const_addrlit, O7);
__ relocate(rspec); __ relocate(rspec);
__ ldf (FloatRegisterImpl::D, O7, offset, to_reg->as_double_reg()); __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg());
} else { } else {
assert(to_reg->is_double_cpu(), "Must be a long register."); assert(to_reg->is_double_cpu(), "Must be a long register.");
#ifdef _LP64 #ifdef _LP64
@ -1317,7 +1309,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
Address LIR_Assembler::as_Address(LIR_Address* addr) { Address LIR_Assembler::as_Address(LIR_Address* addr) {
Register reg = addr->base()->as_register(); Register reg = addr->base()->as_register();
return Address(reg, 0, addr->disp()); return Address(reg, addr->disp());
} }
@ -1360,13 +1352,13 @@ void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
Address base = as_Address(addr); Address base = as_Address(addr);
return Address(base.base(), 0, base.disp() + hi_word_offset_in_bytes); return Address(base.base(), base.disp() + hi_word_offset_in_bytes);
} }
Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
Address base = as_Address(addr); Address base = as_Address(addr);
return Address(base.base(), 0, base.disp() + lo_word_offset_in_bytes); return Address(base.base(), base.disp() + lo_word_offset_in_bytes);
} }
@ -1396,8 +1388,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
if (addr->index()->is_illegal()) { if (addr->index()->is_illegal()) {
if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
if (needs_patching) { if (needs_patching) {
__ sethi(0, O7, true); __ patchable_set(0, O7);
__ add(O7, 0, O7);
} else { } else {
__ set(disp_value, O7); __ set(disp_value, O7);
} }
@ -1544,8 +1535,7 @@ void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
if (addr->index()->is_illegal()) { if (addr->index()->is_illegal()) {
if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
if (needs_patching) { if (needs_patching) {
__ sethi(0, O7, true); __ patchable_set(0, O7);
__ add(O7, 0, O7);
} else { } else {
__ set(disp_value, O7); __ set(disp_value, O7);
} }
@ -1627,8 +1617,8 @@ void LIR_Assembler::emit_static_call_stub() {
__ set_oop(NULL, G5); __ set_oop(NULL, G5);
// must be set to -1 at code generation time // must be set to -1 at code generation time
Address a(G3, (address)-1); AddressLiteral addrlit(-1);
__ jump_to(a, 0); __ jump_to(addrlit, G3);
__ delayed()->nop(); __ delayed()->nop();
assert(__ offset() - start <= call_stub_size, "stub too big"); assert(__ offset() - start <= call_stub_size, "stub too big");
@ -2063,7 +2053,7 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
address pc_for_athrow = __ pc(); address pc_for_athrow = __ pc();
int pc_for_athrow_offset = __ offset(); int pc_for_athrow_offset = __ offset();
RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
__ set((intptr_t)pc_for_athrow, Oissuing_pc, rspec); __ set(pc_for_athrow, Oissuing_pc, rspec);
add_call_info(pc_for_athrow_offset, info); // for exception handler add_call_info(pc_for_athrow_offset, info); // for exception handler
__ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
@ -2451,7 +2441,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
} }
Address flags_addr(mdo, 0, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
__ ldub(flags_addr, data_val); __ ldub(flags_addr, data_val);
__ or3(data_val, BitData::null_seen_byte_constant(), data_val); __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
__ stb(data_val, flags_addr); __ stb(data_val, flags_addr);
@ -2738,7 +2728,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
__ add(mdo, O7, mdo); __ add(mdo, O7, mdo);
} }
Address counter_addr(mdo, 0, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
__ lduw(counter_addr, tmp1); __ lduw(counter_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1); __ add(tmp1, DataLayout::counter_increment, tmp1);
__ stw(tmp1, counter_addr); __ stw(tmp1, counter_addr);
@ -2764,8 +2754,8 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
for (i = 0; i < VirtualCallData::row_limit(); i++) { for (i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i); ciKlass* receiver = vc_data->receiver(i);
if (known_klass->equals(receiver)) { if (known_klass->equals(receiver)) {
Address data_addr(mdo, 0, md->byte_offset_of_slot(data, Address data_addr(mdo, md->byte_offset_of_slot(data,
VirtualCallData::receiver_count_offset(i)) - VirtualCallData::receiver_count_offset(i)) -
mdo_offset_bias); mdo_offset_bias);
__ lduw(data_addr, tmp1); __ lduw(data_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1); __ add(tmp1, DataLayout::counter_increment, tmp1);
@ -2782,11 +2772,11 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
for (i = 0; i < VirtualCallData::row_limit(); i++) { for (i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i); ciKlass* receiver = vc_data->receiver(i);
if (receiver == NULL) { if (receiver == NULL) {
Address recv_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
mdo_offset_bias); mdo_offset_bias);
jobject2reg(known_klass->encoding(), tmp1); jobject2reg(known_klass->encoding(), tmp1);
__ st_ptr(tmp1, recv_addr); __ st_ptr(tmp1, recv_addr);
Address data_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
mdo_offset_bias); mdo_offset_bias);
__ lduw(data_addr, tmp1); __ lduw(data_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1); __ add(tmp1, DataLayout::counter_increment, tmp1);
@ -2795,20 +2785,20 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
} }
} }
} else { } else {
load(Address(recv, 0, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT); load(Address(recv, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
Label update_done; Label update_done;
uint i; uint i;
for (i = 0; i < VirtualCallData::row_limit(); i++) { for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test; Label next_test;
// See if the receiver is receiver[n]. // See if the receiver is receiver[n].
Address receiver_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - Address receiver_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
mdo_offset_bias); mdo_offset_bias);
__ ld_ptr(receiver_addr, tmp1); __ ld_ptr(receiver_addr, tmp1);
__ verify_oop(tmp1); __ verify_oop(tmp1);
__ cmp(recv, tmp1); __ cmp(recv, tmp1);
__ brx(Assembler::notEqual, false, Assembler::pt, next_test); __ brx(Assembler::notEqual, false, Assembler::pt, next_test);
__ delayed()->nop(); __ delayed()->nop();
Address data_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
mdo_offset_bias); mdo_offset_bias);
__ lduw(data_addr, tmp1); __ lduw(data_addr, tmp1);
__ add(tmp1, DataLayout::counter_increment, tmp1); __ add(tmp1, DataLayout::counter_increment, tmp1);
@ -2821,7 +2811,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
// Didn't find receiver; find next empty slot and fill it in // Didn't find receiver; find next empty slot and fill it in
for (i = 0; i < VirtualCallData::row_limit(); i++) { for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test; Label next_test;
Address recv_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
mdo_offset_bias); mdo_offset_bias);
load(recv_addr, tmp1, T_OBJECT); load(recv_addr, tmp1, T_OBJECT);
__ tst(tmp1); __ tst(tmp1);
@ -2829,8 +2819,8 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
__ delayed()->nop(); __ delayed()->nop();
__ st_ptr(recv, recv_addr); __ st_ptr(recv, recv_addr);
__ set(DataLayout::counter_increment, tmp1); __ set(DataLayout::counter_increment, tmp1);
__ st_ptr(tmp1, Address(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
mdo_offset_bias)); mdo_offset_bias);
if (i < (VirtualCallData::row_limit() - 1)) { if (i < (VirtualCallData::row_limit() - 1)) {
__ br(Assembler::always, false, Assembler::pt, update_done); __ br(Assembler::always, false, Assembler::pt, update_done);
__ delayed()->nop(); __ delayed()->nop();

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,13 +29,13 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
Label L; Label L;
const Register temp_reg = G3_scratch; const Register temp_reg = G3_scratch;
// Note: needs more testing of out-of-line vs. inline slow case // Note: needs more testing of out-of-line vs. inline slow case
Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub());
verify_oop(receiver); verify_oop(receiver);
ld_ptr(receiver, oopDesc::klass_offset_in_bytes(), temp_reg); ld_ptr(receiver, oopDesc::klass_offset_in_bytes(), temp_reg);
cmp(temp_reg, iCache); cmp(temp_reg, iCache);
brx(Assembler::equal, true, Assembler::pt, L); brx(Assembler::equal, true, Assembler::pt, L);
delayed()->nop(); delayed()->nop();
jump_to(ic_miss, 0); AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
jump_to(ic_miss, temp_reg);
delayed()->nop(); delayed()->nop();
align(CodeEntryAlignment); align(CodeEntryAlignment);
bind(L); bind(L);
@ -84,7 +84,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
Label done; Label done;
Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes()); Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
// The following move must be the first instruction of emitted since debug // The following move must be the first instruction of emitted since debug
// information may be generated for it. // information may be generated for it.
@ -132,7 +132,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
Label done; Label done;
Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes()); Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
assert(mark_addr.disp() == 0, "cas must take a zero displacement"); assert(mark_addr.disp() == 0, "cas must take a zero displacement");
if (UseBiasedLocking) { if (UseBiasedLocking) {
@ -370,7 +370,7 @@ void C1_MacroAssembler::allocate_array(
void C1_MacroAssembler::verify_stack_oop(int stack_offset) { void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
if (!VerifyOops) return; if (!VerifyOops) return;
verify_oop_addr(Address(SP, 0, stack_offset + STACK_BIAS)); verify_oop_addr(Address(SP, stack_offset + STACK_BIAS));
} }
void C1_MacroAssembler::verify_not_null_oop(Register r) { void C1_MacroAssembler::verify_not_null_oop(Register r) {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -57,13 +57,13 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address e
// check for pending exceptions // check for pending exceptions
{ Label L; { Label L;
Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset())); Address exception_addr(G2_thread, Thread::pending_exception_offset());
ld_ptr(exception_addr, Gtemp); ld_ptr(exception_addr, Gtemp);
br_null(Gtemp, false, pt, L); br_null(Gtemp, false, pt, L);
delayed()->nop(); delayed()->nop();
Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset())); Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
st_ptr(G0, vm_result_addr); st_ptr(G0, vm_result_addr);
Address vm_result_addr_2(G2_thread, 0, in_bytes(JavaThread::vm_result_2_offset())); Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
st_ptr(G0, vm_result_addr_2); st_ptr(G0, vm_result_addr_2);
if (frame_size() == no_frame_size) { if (frame_size() == no_frame_size) {
@ -73,8 +73,8 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address e
} else if (_stub_id == Runtime1::forward_exception_id) { } else if (_stub_id == Runtime1::forward_exception_id) {
should_not_reach_here(); should_not_reach_here();
} else { } else {
Address exc(G4, Runtime1::entry_for(Runtime1::forward_exception_id)); AddressLiteral exc(Runtime1::entry_for(Runtime1::forward_exception_id));
jump_to(exc, 0); jump_to(exc, G4);
delayed()->nop(); delayed()->nop();
} }
bind(L); bind(L);
@ -85,7 +85,7 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address e
get_vm_result (oop_result1); get_vm_result (oop_result1);
} else { } else {
// be a little paranoid and clear the result // be a little paranoid and clear the result
Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset())); Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
st_ptr(G0, vm_result_addr); st_ptr(G0, vm_result_addr);
} }
@ -93,7 +93,7 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address e
get_vm_result_2(oop_result2); get_vm_result_2(oop_result2);
} else { } else {
// be a little paranoid and clear the result // be a little paranoid and clear the result
Address vm_result_addr_2(G2_thread, 0, in_bytes(JavaThread::vm_result_2_offset())); Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
st_ptr(G0, vm_result_addr_2); st_ptr(G0, vm_result_addr_2);
} }
@ -479,8 +479,8 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Register G4_length = G4; // Incoming Register G4_length = G4; // Incoming
Register O0_obj = O0; // Outgoing Register O0_obj = O0; // Outgoing
Address klass_lh(G5_klass, 0, ((klassOopDesc::header_size() * HeapWordSize) Address klass_lh(G5_klass, ((klassOopDesc::header_size() * HeapWordSize)
+ Klass::layout_helper_offset_in_bytes())); + Klass::layout_helper_offset_in_bytes()));
assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
assert(Klass::_lh_header_size_mask == 0xFF, "bytewise"); assert(Klass::_lh_header_size_mask == 0xFF, "bytewise");
// Use this offset to pick out an individual byte of the layout_helper: // Use this offset to pick out an individual byte of the layout_helper:
@ -902,8 +902,8 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ srl(addr, CardTableModRefBS::card_shift, addr); __ srl(addr, CardTableModRefBS::card_shift, addr);
#endif #endif
Address rs(cardtable, (address)byte_map_base); AddressLiteral rs(byte_map_base);
__ load_address(rs); // cardtable := <card table base> __ set(rs, cardtable); // cardtable := <card table base>
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
__ br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt, __ br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt,
@ -1022,8 +1022,8 @@ void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_map
__ restore(); __ restore();
Address exc(G4, Runtime1::entry_for(Runtime1::unwind_exception_id)); AddressLiteral exc(Runtime1::entry_for(Runtime1::unwind_exception_id));
__ jump_to(exc, 0); __ jump_to(exc, G4);
__ delayed()->nop(); __ delayed()->nop();

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 2004-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2004-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -106,8 +106,7 @@ void CompactingPermGenGen::generate_vtable_methods(void** vtbl_list,
__ and3(L0, 255, L4); // Isolate L3 = method offset;. __ and3(L0, 255, L4); // Isolate L3 = method offset;.
__ sll(L4, LogBytesPerWord, L4); __ sll(L4, LogBytesPerWord, L4);
__ ld_ptr(L3, L4, L4); // Get address of correct virtual method __ ld_ptr(L3, L4, L4); // Get address of correct virtual method
Address method(L4, 0); __ jmpl(L4, 0, G0); // Jump to correct method.
__ jmpl(method, G0); // Jump to correct method.
__ delayed()->restore(); // Restore registers. __ delayed()->restore(); // Restore registers.
__ flush(); __ flush();

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -46,14 +46,13 @@ void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, oop cached_o
// (1) the oop is old (i.e., doesn't matter for scavenges) // (1) the oop is old (i.e., doesn't matter for scavenges)
// (2) these ICStubs are removed *before* a GC happens, so the roots disappear // (2) these ICStubs are removed *before* a GC happens, so the roots disappear
assert(cached_oop == NULL || cached_oop->is_perm(), "must be old oop"); assert(cached_oop == NULL || cached_oop->is_perm(), "must be old oop");
Address cached_oop_addr(G5_inline_cache_reg, address(cached_oop)); AddressLiteral cached_oop_addrlit(cached_oop, relocInfo::none);
// Force the sethi to generate the fixed sequence so next_instruction_address works // Force the set to generate the fixed sequence so next_instruction_address works
masm->sethi(cached_oop_addr, true /* ForceRelocatable */ ); masm->patchable_set(cached_oop_addrlit, G5_inline_cache_reg);
masm->add(cached_oop_addr, G5_inline_cache_reg);
assert(G3_scratch != G5_method, "Do not clobber the method oop in the transition stub"); assert(G3_scratch != G5_method, "Do not clobber the method oop in the transition stub");
assert(G3_scratch != G5_inline_cache_reg, "Do not clobber the inline cache register in the transition stub"); assert(G3_scratch != G5_inline_cache_reg, "Do not clobber the inline cache register in the transition stub");
Address entry(G3_scratch, entry_point); AddressLiteral entry(entry_point);
masm->JUMP(entry, 0); masm->JUMP(entry, G3_scratch, 0);
masm->delayed()->nop(); masm->delayed()->nop();
masm->flush(); masm->flush();
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -35,8 +35,8 @@
// This file specializes the assember with interpreter-specific macros // This file specializes the assember with interpreter-specific macros
const Address InterpreterMacroAssembler::l_tmp( FP, 0, (frame::interpreter_frame_l_scratch_fp_offset * wordSize ) + STACK_BIAS); const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS);
const Address InterpreterMacroAssembler::d_tmp( FP, 0, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS); const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS);
#else // CC_INTERP #else // CC_INTERP
#ifndef STATE #ifndef STATE
@ -78,14 +78,12 @@ void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr
#else #else
ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
// dispatch table to use // dispatch table to use
Address tbl(G3_scratch, (address)Interpreter::dispatch_table(state)); AddressLiteral tbl(Interpreter::dispatch_table(state));
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
sethi(tbl); set(tbl, G3_scratch); // compute addr of table
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr
add(tbl, tbl.base(), 0);
ld_ptr( G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr
#endif #endif
} }
@ -165,8 +163,7 @@ void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg)
Label L; Label L;
// Check the "pending popframe condition" flag in the current thread // Check the "pending popframe condition" flag in the current thread
Address popframe_condition_addr(G2_thread, 0, in_bytes(JavaThread::popframe_condition_offset())); ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg);
ld(popframe_condition_addr, scratch_reg);
// Initiate popframe handling only if it is not already being processed. If the flag // Initiate popframe handling only if it is not already being processed. If the flag
// has the popframe_processing bit set, it means that this code is called *during* popframe // has the popframe_processing bit set, it means that this code is called *during* popframe
@ -192,11 +189,10 @@ void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg)
void InterpreterMacroAssembler::load_earlyret_value(TosState state) { void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
Register thr_state = G4_scratch; Register thr_state = G4_scratch;
ld_ptr(Address(G2_thread, 0, in_bytes(JavaThread::jvmti_thread_state_offset())), ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
thr_state); const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset());
const Address tos_addr(thr_state, 0, in_bytes(JvmtiThreadState::earlyret_tos_offset())); const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset());
const Address oop_addr(thr_state, 0, in_bytes(JvmtiThreadState::earlyret_oop_offset())); const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset());
const Address val_addr(thr_state, 0, in_bytes(JvmtiThreadState::earlyret_value_offset()));
switch (state) { switch (state) {
case ltos: ld_long(val_addr, Otos_l); break; case ltos: ld_long(val_addr, Otos_l); break;
case atos: ld_ptr(oop_addr, Otos_l); case atos: ld_ptr(oop_addr, Otos_l);
@ -222,8 +218,7 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg)
if (JvmtiExport::can_force_early_return()) { if (JvmtiExport::can_force_early_return()) {
Label L; Label L;
Register thr_state = G3_scratch; Register thr_state = G3_scratch;
ld_ptr(Address(G2_thread, 0, in_bytes(JavaThread::jvmti_thread_state_offset())), ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
thr_state);
tst(thr_state); tst(thr_state);
br(zero, false, pt, L); // if (thread->jvmti_thread_state() == NULL) exit; br(zero, false, pt, L); // if (thread->jvmti_thread_state() == NULL) exit;
delayed()->nop(); delayed()->nop();
@ -231,16 +226,14 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg)
// Initiate earlyret handling only if it is not already being processed. // Initiate earlyret handling only if it is not already being processed.
// If the flag has the earlyret_processing bit set, it means that this code // If the flag has the earlyret_processing bit set, it means that this code
// is called *during* earlyret handling - we don't want to reenter. // is called *during* earlyret handling - we don't want to reenter.
ld(Address(thr_state, 0, in_bytes(JvmtiThreadState::earlyret_state_offset())), ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch);
G4_scratch);
cmp(G4_scratch, JvmtiThreadState::earlyret_pending); cmp(G4_scratch, JvmtiThreadState::earlyret_pending);
br(Assembler::notEqual, false, pt, L); br(Assembler::notEqual, false, pt, L);
delayed()->nop(); delayed()->nop();
// Call Interpreter::remove_activation_early_entry() to get the address of the // Call Interpreter::remove_activation_early_entry() to get the address of the
// same-named entrypoint in the generated interpreter code // same-named entrypoint in the generated interpreter code
Address tos_addr(thr_state, 0, in_bytes(JvmtiThreadState::earlyret_tos_offset())); ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1);
ld(tos_addr, Otos_l1);
call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1); call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1);
// Jump to Interpreter::_remove_activation_early_entry // Jump to Interpreter::_remove_activation_early_entry
@ -294,10 +287,9 @@ void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* tab
} else { } else {
#endif #endif
// dispatch table to use // dispatch table to use
Address tbl(G3_scratch, (address)table); AddressLiteral tbl(table);
sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
load_address(tbl); // compute addr of table set(tbl, G3_scratch); // compute addr of table
ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr
#ifdef FAST_DISPATCH #ifdef FAST_DISPATCH
} }
@ -601,26 +593,17 @@ void InterpreterMacroAssembler::empty_expression_stack() {
// Reset SP by subtracting more space from Lesp. // Reset SP by subtracting more space from Lesp.
Label done; Label done;
const Address max_stack (Lmethod, 0, in_bytes(methodOopDesc::max_stack_offset()));
const Address access_flags(Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset()));
verify_oop(Lmethod); verify_oop(Lmethod);
assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!");
assert( G4_scratch != Gframe_size,
"Only you can prevent register aliasing!");
// A native does not need to do this, since its callee does not change SP. // A native does not need to do this, since its callee does not change SP.
ld(access_flags, Gframe_size); ld(Lmethod, methodOopDesc::access_flags_offset(), Gframe_size); // Load access flags.
btst(JVM_ACC_NATIVE, Gframe_size); btst(JVM_ACC_NATIVE, Gframe_size);
br(Assembler::notZero, false, Assembler::pt, done); br(Assembler::notZero, false, Assembler::pt, done);
delayed()->nop(); delayed()->nop();
//
// Compute max expression stack+register save area // Compute max expression stack+register save area
// lduh(Lmethod, in_bytes(methodOopDesc::max_stack_offset()), Gframe_size); // Load max stack.
lduh( max_stack, Gframe_size );
if (TaggedStackInterpreter) sll ( Gframe_size, 1, Gframe_size); // max_stack * 2 for TAGS if (TaggedStackInterpreter) sll ( Gframe_size, 1, Gframe_size); // max_stack * 2 for TAGS
add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size ); add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size );
@ -721,8 +704,7 @@ void InterpreterMacroAssembler::call_from_interpreter(Register target, Register
verify_thread(); verify_thread();
Label skip_compiled_code; Label skip_compiled_code;
const Address interp_only (G2_thread, 0, in_bytes(JavaThread::interp_only_mode_offset())); const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
ld(interp_only, scratch); ld(interp_only, scratch);
tst(scratch); tst(scratch);
br(Assembler::notZero, true, Assembler::pn, skip_compiled_code); br(Assembler::notZero, true, Assembler::pn, skip_compiled_code);
@ -916,8 +898,8 @@ void InterpreterMacroAssembler::throw_if_not_2( address throw_entry_point,
Register Rscratch, Register Rscratch,
Label& ok ) { Label& ok ) {
assert(throw_entry_point != NULL, "entry point must be generated by now"); assert(throw_entry_point != NULL, "entry point must be generated by now");
Address dest(Rscratch, throw_entry_point); AddressLiteral dest(throw_entry_point);
jump_to(dest); jump_to(dest, Rscratch);
delayed()->nop(); delayed()->nop();
bind(ok); bind(ok);
} }
@ -1035,18 +1017,18 @@ void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
Label unlocked, unlock, no_unlock; Label unlocked, unlock, no_unlock;
// get the value of _do_not_unlock_if_synchronized into G1_scratch // get the value of _do_not_unlock_if_synchronized into G1_scratch
const Address do_not_unlock_if_synchronized(G2_thread, 0, const Address do_not_unlock_if_synchronized(G2_thread,
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); JavaThread::do_not_unlock_if_synchronized_offset());
ldbool(do_not_unlock_if_synchronized, G1_scratch); ldbool(do_not_unlock_if_synchronized, G1_scratch);
stbool(G0, do_not_unlock_if_synchronized); // reset the flag stbool(G0, do_not_unlock_if_synchronized); // reset the flag
// check if synchronized method // check if synchronized method
const Address access_flags(Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset())); const Address access_flags(Lmethod, methodOopDesc::access_flags_offset());
interp_verify_oop(Otos_i, state, __FILE__, __LINE__); interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
push(state); // save tos push(state); // save tos
ld(access_flags, G3_scratch); ld(access_flags, G3_scratch); // Load access flags.
btst(JVM_ACC_SYNCHRONIZED, G3_scratch); btst(JVM_ACC_SYNCHRONIZED, G3_scratch);
br( zero, false, pt, unlocked); br(zero, false, pt, unlocked);
delayed()->nop(); delayed()->nop();
// Don't unlock anything if the _do_not_unlock_if_synchronized flag // Don't unlock anything if the _do_not_unlock_if_synchronized flag
@ -1236,8 +1218,8 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object)
Register obj_reg = Object; Register obj_reg = Object;
Register mark_reg = G4_scratch; Register mark_reg = G4_scratch;
Register temp_reg = G1_scratch; Register temp_reg = G1_scratch;
Address lock_addr = Address(lock_reg, 0, BasicObjectLock::lock_offset_in_bytes()); Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes());
Address mark_addr = Address(obj_reg, 0, oopDesc::mark_offset_in_bytes()); Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
Label done; Label done;
Label slow_case; Label slow_case;
@ -1315,9 +1297,8 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
Register obj_reg = G3_scratch; Register obj_reg = G3_scratch;
Register mark_reg = G4_scratch; Register mark_reg = G4_scratch;
Register displaced_header_reg = G1_scratch; Register displaced_header_reg = G1_scratch;
Address lock_addr = Address(lock_reg, 0, BasicObjectLock::lock_offset_in_bytes()); Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes());
Address lockobj_addr = Address(lock_reg, 0, BasicObjectLock::obj_offset_in_bytes()); Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
Address mark_addr = Address(obj_reg, 0, oopDesc::mark_offset_in_bytes());
Label done; Label done;
if (UseBiasedLocking) { if (UseBiasedLocking) {
@ -1328,7 +1309,8 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
} }
// Test first if we are in the fast recursive case // Test first if we are in the fast recursive case
ld_ptr(lock_addr, displaced_header_reg, BasicLock::displaced_header_offset_in_bytes()); Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes());
ld_ptr(lock_addr, displaced_header_reg);
br_null(displaced_header_reg, true, Assembler::pn, done); br_null(displaced_header_reg, true, Assembler::pn, done);
delayed()->st_ptr(G0, lockobj_addr); // free entry delayed()->st_ptr(G0, lockobj_addr); // free entry
@ -1384,7 +1366,7 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
Label zero_continue; Label zero_continue;
// Test MDO to avoid the call if it is NULL. // Test MDO to avoid the call if it is NULL.
ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr); ld_ptr(Lmethod, methodOopDesc::method_data_offset(), ImethodDataPtr);
test_method_data_pointer(zero_continue); test_method_data_pointer(zero_continue);
call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp); call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp);
set_method_data_pointer_offset(O0); set_method_data_pointer_offset(O0);
@ -1413,7 +1395,7 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
// If the mdp is valid, it will point to a DataLayout header which is // If the mdp is valid, it will point to a DataLayout header which is
// consistent with the bcp. The converse is highly probable also. // consistent with the bcp. The converse is highly probable also.
lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch); lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch);
ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), O5); ld_ptr(Lmethod, methodOopDesc::const_offset(), O5);
add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), G3_scratch); add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), G3_scratch);
add(G3_scratch, O5, G3_scratch); add(G3_scratch, O5, G3_scratch);
cmp(Lbcp, G3_scratch); cmp(Lbcp, G3_scratch);
@ -1424,7 +1406,7 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
// %%% should use call_VM_leaf here? // %%% should use call_VM_leaf here?
//call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr); //call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr);
save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1); save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1);
Address d_save(FP, 0, -sizeof(jdouble) + STACK_BIAS); Address d_save(FP, -sizeof(jdouble) + STACK_BIAS);
stf(FloatRegisterImpl::D, Ftos_d, d_save); stf(FloatRegisterImpl::D, Ftos_d, d_save);
mov(temp_reg->after_save(), O2); mov(temp_reg->after_save(), O2);
save_thread(L7_thread_cache); save_thread(L7_thread_cache);
@ -1456,14 +1438,14 @@ void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocat
#endif #endif
// Test to see if we should create a method data oop // Test to see if we should create a method data oop
Address profile_limit(Rtmp, (address)&InvocationCounter::InterpreterProfileLimit); AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit);
#ifdef _LP64 #ifdef _LP64
delayed()->nop(); delayed()->nop();
sethi(profile_limit); sethi(profile_limit, Rtmp);
#else #else
delayed()->sethi(profile_limit); delayed()->sethi(profile_limit, Rtmp);
#endif #endif
ld(profile_limit, Rtmp); ld(Rtmp, profile_limit.low10(), Rtmp);
cmp(invocation_count, Rtmp); cmp(invocation_count, Rtmp);
br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue); br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue);
delayed()->nop(); delayed()->nop();
@ -1521,7 +1503,7 @@ void InterpreterMacroAssembler::increment_mdp_data_at(int constant,
Register bumped_count, Register bumped_count,
bool decrement) { bool decrement) {
// Locate the counter at a fixed offset from the mdp: // Locate the counter at a fixed offset from the mdp:
Address counter(ImethodDataPtr, 0, constant); Address counter(ImethodDataPtr, constant);
increment_mdp_data_at(counter, bumped_count, decrement); increment_mdp_data_at(counter, bumped_count, decrement);
} }
@ -1535,7 +1517,7 @@ void InterpreterMacroAssembler::increment_mdp_data_at(Register reg,
bool decrement) { bool decrement) {
// Add the constant to reg to get the offset. // Add the constant to reg to get the offset.
add(ImethodDataPtr, reg, scratch2); add(ImethodDataPtr, reg, scratch2);
Address counter(scratch2, 0, constant); Address counter(scratch2, constant);
increment_mdp_data_at(counter, bumped_count, decrement); increment_mdp_data_at(counter, bumped_count, decrement);
} }
@ -2201,7 +2183,7 @@ int InterpreterMacroAssembler::top_most_monitor_byte_offset() {
Address InterpreterMacroAssembler::top_most_monitor() { Address InterpreterMacroAssembler::top_most_monitor() {
return Address(FP, 0, top_most_monitor_byte_offset()); return Address(FP, top_most_monitor_byte_offset());
} }
@ -2214,15 +2196,15 @@ void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) {
void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) { void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) {
assert(UseCompiler, "incrementing must be useful"); assert(UseCompiler, "incrementing must be useful");
#ifdef CC_INTERP #ifdef CC_INTERP
Address inv_counter(G5_method, 0, in_bytes(methodOopDesc::invocation_counter_offset() Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() +
+ InvocationCounter::counter_offset())); InvocationCounter::counter_offset());
Address be_counter(G5_method, 0, in_bytes(methodOopDesc::backedge_counter_offset() Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() +
+ InvocationCounter::counter_offset())); InvocationCounter::counter_offset());
#else #else
Address inv_counter(Lmethod, 0, in_bytes(methodOopDesc::invocation_counter_offset() Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() +
+ InvocationCounter::counter_offset())); InvocationCounter::counter_offset());
Address be_counter(Lmethod, 0, in_bytes(methodOopDesc::backedge_counter_offset() Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() +
+ InvocationCounter::counter_offset())); InvocationCounter::counter_offset());
#endif /* CC_INTERP */ #endif /* CC_INTERP */
int delta = InvocationCounter::count_increment; int delta = InvocationCounter::count_increment;
@ -2250,15 +2232,15 @@ void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Reg
void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) { void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) {
assert(UseCompiler, "incrementing must be useful"); assert(UseCompiler, "incrementing must be useful");
#ifdef CC_INTERP #ifdef CC_INTERP
Address be_counter(G5_method, 0, in_bytes(methodOopDesc::backedge_counter_offset() Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() +
+ InvocationCounter::counter_offset())); InvocationCounter::counter_offset());
Address inv_counter(G5_method, 0, in_bytes(methodOopDesc::invocation_counter_offset() Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() +
+ InvocationCounter::counter_offset())); InvocationCounter::counter_offset());
#else #else
Address be_counter(Lmethod, 0, in_bytes(methodOopDesc::backedge_counter_offset() Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() +
+ InvocationCounter::counter_offset())); InvocationCounter::counter_offset());
Address inv_counter(Lmethod, 0, in_bytes(methodOopDesc::invocation_counter_offset() Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() +
+ InvocationCounter::counter_offset())); InvocationCounter::counter_offset());
#endif /* CC_INTERP */ #endif /* CC_INTERP */
int delta = InvocationCounter::count_increment; int delta = InvocationCounter::count_increment;
// Load each counter in a register // Load each counter in a register
@ -2289,7 +2271,7 @@ void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_c
assert_different_registers(backedge_count, Rtmp, branch_bcp); assert_different_registers(backedge_count, Rtmp, branch_bcp);
assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr"); assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
Address limit(Rtmp, address(&InvocationCounter::InterpreterBackwardBranchLimit)); AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit);
load_contents(limit, Rtmp); load_contents(limit, Rtmp);
cmp(backedge_count, Rtmp); cmp(backedge_count, Rtmp);
br(Assembler::lessUnsigned, false, Assembler::pt, did_not_overflow); br(Assembler::lessUnsigned, false, Assembler::pt, did_not_overflow);
@ -2435,9 +2417,7 @@ void InterpreterMacroAssembler::notify_method_entry() {
if (JvmtiExport::can_post_interpreter_events()) { if (JvmtiExport::can_post_interpreter_events()) {
Label L; Label L;
Register temp_reg = O5; Register temp_reg = O5;
const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
const Address interp_only (G2_thread, 0, in_bytes(JavaThread::interp_only_mode_offset()));
ld(interp_only, temp_reg); ld(interp_only, temp_reg);
tst(temp_reg); tst(temp_reg);
br(zero, false, pt, L); br(zero, false, pt, L);
@ -2489,9 +2469,7 @@ void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
Label L; Label L;
Register temp_reg = O5; Register temp_reg = O5;
const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
const Address interp_only (G2_thread, 0, in_bytes(JavaThread::interp_only_mode_offset()));
ld(interp_only, temp_reg); ld(interp_only, temp_reg);
tst(temp_reg); tst(temp_reg);
br(zero, false, pt, L); br(zero, false, pt, L);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -105,7 +105,7 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
// the handle for a receiver will never be null // the handle for a receiver will never be null
bool do_NULL_check = offset() != 0 || is_static(); bool do_NULL_check = offset() != 0 || is_static();
Address h_arg = Address(Llocals, 0, Interpreter::local_offset_in_bytes(offset())); Address h_arg = Address(Llocals, Interpreter::local_offset_in_bytes(offset()));
__ ld_ptr(h_arg, Rtmp1); __ ld_ptr(h_arg, Rtmp1);
#ifdef ASSERT #ifdef ASSERT
if (TaggedStackInterpreter) { if (TaggedStackInterpreter) {
@ -120,14 +120,14 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
} }
#endif // ASSERT #endif // ASSERT
if (!do_NULL_check) { if (!do_NULL_check) {
__ add(h_arg, Rtmp2); __ add(h_arg.base(), h_arg.disp(), Rtmp2);
} else { } else {
if (Rtmp1 == Rtmp2) if (Rtmp1 == Rtmp2)
__ tst(Rtmp1); __ tst(Rtmp1);
else __ addcc(G0, Rtmp1, Rtmp2); // optimize mov/test pair else __ addcc(G0, Rtmp1, Rtmp2); // optimize mov/test pair
Label L; Label L;
__ brx(Assembler::notZero, true, Assembler::pt, L); __ brx(Assembler::notZero, true, Assembler::pt, L);
__ delayed()->add(h_arg, Rtmp2); __ delayed()->add(h_arg.base(), h_arg.disp(), Rtmp2);
__ bind(L); __ bind(L);
} }
__ store_ptr_argument(Rtmp2, jni_arg); // this is often a no-op __ store_ptr_argument(Rtmp2, jni_arg); // this is often a no-op
@ -140,10 +140,10 @@ void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprin
iterate(fingerprint); iterate(fingerprint);
// return result handler // return result handler
Address result_handler(Lscratch, Interpreter::result_handler(method()->result_type())); AddressLiteral result_handler(Interpreter::result_handler(method()->result_type()));
__ sethi(result_handler); __ sethi(result_handler, Lscratch);
__ retl(); __ retl();
__ delayed()->add(result_handler, result_handler.base()); __ delayed()->add(Lscratch, result_handler.low10(), Lscratch);
__ flush(); __ flush();
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 2004-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2004-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -57,10 +57,10 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
Label label1, label2; Label label1, label2;
address cnt_addr = SafepointSynchronize::safepoint_counter_addr(); AddressLiteral cnt_addrlit(SafepointSynchronize::safepoint_counter_addr());
Address ca(O3, cnt_addr); __ sethi (cnt_addrlit, O3);
__ sethi (ca); Address cnt_addr(O3, cnt_addrlit.low10());
__ ld (ca, G4); __ ld (cnt_addr, G4);
__ andcc (G4, 1, G0); __ andcc (G4, 1, G0);
__ br (Assembler::notZero, false, Assembler::pn, label1); __ br (Assembler::notZero, false, Assembler::pn, label1);
__ delayed()->srl (O2, 2, O4); __ delayed()->srl (O2, 2, O4);
@ -77,7 +77,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
__ ld (ca, O5); __ ld (cnt_addr, O5);
__ cmp (O5, G4); __ cmp (O5, G4);
__ br (Assembler::notEqual, false, Assembler::pn, label2); __ br (Assembler::notEqual, false, Assembler::pn, label2);
__ delayed()->mov (O7, G1); __ delayed()->mov (O7, G1);
@ -136,10 +136,10 @@ address JNI_FastGetField::generate_fast_get_long_field() {
Label label1, label2; Label label1, label2;
address cnt_addr = SafepointSynchronize::safepoint_counter_addr(); AddressLiteral cnt_addrlit(SafepointSynchronize::safepoint_counter_addr());
Address ca(G3, cnt_addr); __ sethi (cnt_addrlit, G3);
__ sethi (ca); Address cnt_addr(G3, cnt_addrlit.low10());
__ ld (ca, G4); __ ld (cnt_addr, G4);
__ andcc (G4, 1, G0); __ andcc (G4, 1, G0);
__ br (Assembler::notZero, false, Assembler::pn, label1); __ br (Assembler::notZero, false, Assembler::pn, label1);
__ delayed()->srl (O2, 2, O4); __ delayed()->srl (O2, 2, O4);
@ -159,7 +159,7 @@ address JNI_FastGetField::generate_fast_get_long_field() {
__ ldx (O5, 0, O3); __ ldx (O5, 0, O3);
#endif #endif
__ ld (ca, G1); __ ld (cnt_addr, G1);
__ cmp (G1, G4); __ cmp (G1, G4);
__ br (Assembler::notEqual, false, Assembler::pn, label2); __ br (Assembler::notEqual, false, Assembler::pn, label2);
__ delayed()->mov (O7, G1); __ delayed()->mov (O7, G1);
@ -208,10 +208,10 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
Label label1, label2; Label label1, label2;
address cnt_addr = SafepointSynchronize::safepoint_counter_addr(); AddressLiteral cnt_addrlit(SafepointSynchronize::safepoint_counter_addr());
Address ca(O3, cnt_addr); __ sethi (cnt_addrlit, O3);
__ sethi (ca); Address cnt_addr(O3, cnt_addrlit.low10());
__ ld (ca, G4); __ ld (cnt_addr, G4);
__ andcc (G4, 1, G0); __ andcc (G4, 1, G0);
__ br (Assembler::notZero, false, Assembler::pn, label1); __ br (Assembler::notZero, false, Assembler::pn, label1);
__ delayed()->srl (O2, 2, O4); __ delayed()->srl (O2, 2, O4);
@ -225,7 +225,7 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
__ ld (ca, O5); __ ld (cnt_addr, O5);
__ cmp (O5, G4); __ cmp (O5, G4);
__ br (Assembler::notEqual, false, Assembler::pn, label2); __ br (Assembler::notEqual, false, Assembler::pn, label2);
__ delayed()->mov (O7, G1); __ delayed()->mov (O7, G1);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -38,8 +38,7 @@ void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) {
destreg = inv_rd(*(unsigned int *)instaddr); destreg = inv_rd(*(unsigned int *)instaddr);
// Generate a the new sequence // Generate a the new sequence
Address dest( destreg, (address)x ); _masm->patchable_sethi(x, destreg);
_masm->sethi( dest, true );
ICache::invalidate_range(instaddr, 7 * BytesPerInstWord); ICache::invalidate_range(instaddr, 7 * BytesPerInstWord);
} }
@ -227,8 +226,8 @@ void NativeFarCall::set_destination(address dest) {
CodeBuffer buf(addr_at(0), instruction_size + 1); CodeBuffer buf(addr_at(0), instruction_size + 1);
MacroAssembler* _masm = new MacroAssembler(&buf); MacroAssembler* _masm = new MacroAssembler(&buf);
// Generate the new sequence // Generate the new sequence
Address(O7, dest); AddressLiteral(dest);
_masm->jumpl_to(dest, O7); _masm->jumpl_to(dest, O7, O7);
ICache::invalidate_range(addr_at(0), instruction_size ); ICache::invalidate_range(addr_at(0), instruction_size );
#endif #endif
} }
@ -361,10 +360,12 @@ void NativeMovConstReg::test() {
VM_Version::allow_all(); VM_Version::allow_all();
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al1, I3);
a->sethi(0xccccdddd, O2, true, RelocationHolder::none); a->add(I3, al1.low10(), I3);
a->add(O2, low10(0xccccdddd), O2); AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
a->sethi(al2, O2);
a->add(O2, al2.low10(), O2);
nm = nativeMovConstReg_at( cb.code_begin() ); nm = nativeMovConstReg_at( cb.code_begin() );
nm->print(); nm->print();
@ -468,12 +469,14 @@ void NativeMovConstRegPatching::test() {
VM_Version::allow_all(); VM_Version::allow_all();
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
a->sethi(al1, I3);
a->nop(); a->nop();
a->add(I3, low10(0xaaaabbbb), I3); a->add(I3, al1.low10(), I3);
a->sethi(0xccccdddd, O2, true, RelocationHolder::none); AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
a->sethi(al2, O2);
a->nop(); a->nop();
a->add(O2, low10(0xccccdddd), O2); a->add(O2, al2.low10(), O2);
nm = nativeMovConstRegPatching_at( cb.code_begin() ); nm = nativeMovConstRegPatching_at( cb.code_begin() );
nm->print(); nm->print();
@ -562,51 +565,53 @@ void NativeMovRegMem::test() {
VM_Version::allow_all(); VM_Version::allow_all();
a->ldsw( G5, low10(0xffffffff), G4 ); idx++; AddressLiteral al1(0xffffffff, relocInfo::external_word_type);
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type);
a->ldsw( G5, al1.low10(), G4 ); idx++;
a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->ldsw( G5, I3, G4 ); idx++; a->ldsw( G5, I3, G4 ); idx++;
a->ldsb( G5, low10(0xffffffff), G4 ); idx++; a->ldsb( G5, al1.low10(), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->ldsb( G5, I3, G4 ); idx++; a->ldsb( G5, I3, G4 ); idx++;
a->ldsh( G5, low10(0xffffffff), G4 ); idx++; a->ldsh( G5, al1.low10(), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->ldsh( G5, I3, G4 ); idx++; a->ldsh( G5, I3, G4 ); idx++;
a->lduw( G5, low10(0xffffffff), G4 ); idx++; a->lduw( G5, al1.low10(), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->lduw( G5, I3, G4 ); idx++; a->lduw( G5, I3, G4 ); idx++;
a->ldub( G5, low10(0xffffffff), G4 ); idx++; a->ldub( G5, al1.low10(), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->ldub( G5, I3, G4 ); idx++; a->ldub( G5, I3, G4 ); idx++;
a->lduh( G5, low10(0xffffffff), G4 ); idx++; a->lduh( G5, al1.low10(), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->lduh( G5, I3, G4 ); idx++; a->lduh( G5, I3, G4 ); idx++;
a->ldx( G5, low10(0xffffffff), G4 ); idx++; a->ldx( G5, al1.low10(), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->ldx( G5, I3, G4 ); idx++; a->ldx( G5, I3, G4 ); idx++;
a->ldd( G5, low10(0xffffffff), G4 ); idx++; a->ldd( G5, al1.low10(), G4 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->ldd( G5, I3, G4 ); idx++; a->ldd( G5, I3, G4 ); idx++;
a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++; a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++; a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
a->stw( G5, G4, low10(0xffffffff) ); idx++; a->stw( G5, G4, al1.low10() ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->stw( G5, G4, I3 ); idx++; a->stw( G5, G4, I3 ); idx++;
a->stb( G5, G4, low10(0xffffffff) ); idx++; a->stb( G5, G4, al1.low10() ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->stb( G5, G4, I3 ); idx++; a->stb( G5, G4, I3 ); idx++;
a->sth( G5, G4, low10(0xffffffff) ); idx++; a->sth( G5, G4, al1.low10() ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->sth( G5, G4, I3 ); idx++; a->sth( G5, G4, I3 ); idx++;
a->stx( G5, G4, low10(0xffffffff) ); idx++; a->stx( G5, G4, al1.low10() ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->stx( G5, G4, I3 ); idx++; a->stx( G5, G4, I3 ); idx++;
a->std( G5, G4, low10(0xffffffff) ); idx++; a->std( G5, G4, al1.low10() ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->std( G5, G4, I3 ); idx++; a->std( G5, G4, I3 ); idx++;
a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++; a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++; a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
nm = nativeMovRegMem_at( cb.code_begin() ); nm = nativeMovRegMem_at( cb.code_begin() );
@ -705,51 +710,52 @@ void NativeMovRegMemPatching::test() {
VM_Version::allow_all(); VM_Version::allow_all();
a->ldsw( G5, low10(0xffffffff), G4 ); idx++; AddressLiteral al(0xffffffff, relocInfo::external_word_type);
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); a->ldsw( G5, al.low10(), G4); idx++;
a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->ldsw( G5, I3, G4 ); idx++; a->ldsw( G5, I3, G4 ); idx++;
a->ldsb( G5, low10(0xffffffff), G4 ); idx++; a->ldsb( G5, al.low10(), G4); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->ldsb( G5, I3, G4 ); idx++; a->ldsb( G5, I3, G4 ); idx++;
a->ldsh( G5, low10(0xffffffff), G4 ); idx++; a->ldsh( G5, al.low10(), G4); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->ldsh( G5, I3, G4 ); idx++; a->ldsh( G5, I3, G4 ); idx++;
a->lduw( G5, low10(0xffffffff), G4 ); idx++; a->lduw( G5, al.low10(), G4); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->lduw( G5, I3, G4 ); idx++; a->lduw( G5, I3, G4 ); idx++;
a->ldub( G5, low10(0xffffffff), G4 ); idx++; a->ldub( G5, al.low10(), G4); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->ldub( G5, I3, G4 ); idx++; a->ldub( G5, I3, G4 ); idx++;
a->lduh( G5, low10(0xffffffff), G4 ); idx++; a->lduh( G5, al.low10(), G4); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->lduh( G5, I3, G4 ); idx++; a->lduh( G5, I3, G4 ); idx++;
a->ldx( G5, low10(0xffffffff), G4 ); idx++; a->ldx( G5, al.low10(), G4); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->ldx( G5, I3, G4 ); idx++; a->ldx( G5, I3, G4 ); idx++;
a->ldd( G5, low10(0xffffffff), G4 ); idx++; a->ldd( G5, al.low10(), G4); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->ldd( G5, I3, G4 ); idx++; a->ldd( G5, I3, G4 ); idx++;
a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++; a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++; a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
a->stw( G5, G4, low10(0xffffffff) ); idx++; a->stw( G5, G4, al.low10()); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->stw( G5, G4, I3 ); idx++; a->stw( G5, G4, I3 ); idx++;
a->stb( G5, G4, low10(0xffffffff) ); idx++; a->stb( G5, G4, al.low10()); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->stb( G5, G4, I3 ); idx++; a->stb( G5, G4, I3 ); idx++;
a->sth( G5, G4, low10(0xffffffff) ); idx++; a->sth( G5, G4, al.low10()); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->sth( G5, G4, I3 ); idx++; a->sth( G5, G4, I3 ); idx++;
a->stx( G5, G4, low10(0xffffffff) ); idx++; a->stx( G5, G4, al.low10()); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->stx( G5, G4, I3 ); idx++; a->stx( G5, G4, I3 ); idx++;
a->std( G5, G4, low10(0xffffffff) ); idx++; a->std( G5, G4, al.low10()); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->std( G5, G4, I3 ); idx++; a->std( G5, G4, I3 ); idx++;
a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++; a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3); a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++; a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
nm = nativeMovRegMemPatching_at( cb.code_begin() ); nm = nativeMovRegMemPatching_at( cb.code_begin() );
@ -833,11 +839,12 @@ void NativeJump::test() {
VM_Version::allow_all(); VM_Version::allow_all();
a->sethi(0x7fffbbbb, I3, true, RelocationHolder::none); AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type);
a->jmpl(I3, low10(0x7fffbbbb), G0, RelocationHolder::none); a->sethi(al, I3);
a->jmpl(I3, al.low10(), G0, RelocationHolder::none);
a->delayed()->nop(); a->delayed()->nop();
a->sethi(0x7fffbbbb, I3, true, RelocationHolder::none); a->sethi(al, I3);
a->jmpl(I3, low10(0x7fffbbbb), L3, RelocationHolder::none); a->jmpl(I3, al.low10(), L3, RelocationHolder::none);
a->delayed()->nop(); a->delayed()->nop();
nj = nativeJump_at( cb.code_begin() ); nj = nativeJump_at( cb.code_begin() );

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -99,13 +99,6 @@ void Relocation::pd_set_data_value(address x, intptr_t o) {
break; break;
} }
ip->set_data64_sethi( ip->addr_at(0), (intptr_t)x ); ip->set_data64_sethi( ip->addr_at(0), (intptr_t)x );
#ifdef COMPILER2
// [RGV] Someone must have missed putting in a reloc entry for the
// add in compiler2.
inst2 = ip->long_at( NativeMovConstReg::add_offset );
guarantee(Assembler::inv_op(inst2)==Assembler::arith_op, "arith op");
ip->set_long_at(NativeMovConstReg::add_offset,ip->set_data32_simm13( inst2, (intptr_t)x+o));
#endif
#else #else
guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi"); guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
inst &= ~Assembler::hi22( -1); inst &= ~Assembler::hi22( -1);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -74,8 +74,8 @@ void OptoRuntime::generate_exception_blob() {
int start = __ offset(); int start = __ offset();
__ verify_thread(); __ verify_thread();
__ st_ptr(Oexception, Address(G2_thread, 0, in_bytes(JavaThread::exception_oop_offset()))); __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
__ st_ptr(Oissuing_pc, Address(G2_thread, 0, in_bytes(JavaThread::exception_pc_offset()))); __ st_ptr(Oissuing_pc, G2_thread, JavaThread::exception_pc_offset());
// This call does all the hard work. It checks if an exception catch // This call does all the hard work. It checks if an exception catch
// exists in the method. // exists in the method.
@ -120,19 +120,19 @@ void OptoRuntime::generate_exception_blob() {
// Since this may be the deopt blob we must set O7 to look like we returned // Since this may be the deopt blob we must set O7 to look like we returned
// from the original pc that threw the exception // from the original pc that threw the exception
__ ld_ptr(Address(G2_thread, 0, in_bytes(JavaThread::exception_pc_offset())), O7); __ ld_ptr(G2_thread, JavaThread::exception_pc_offset(), O7);
__ sub(O7, frame::pc_return_offset, O7); __ sub(O7, frame::pc_return_offset, O7);
assert(Assembler::is_simm13(in_bytes(JavaThread::exception_oop_offset())), "exception offset overflows simm13, following ld instruction cannot be in delay slot"); assert(Assembler::is_simm13(in_bytes(JavaThread::exception_oop_offset())), "exception offset overflows simm13, following ld instruction cannot be in delay slot");
__ ld_ptr(Address(G2_thread, 0, in_bytes(JavaThread::exception_oop_offset())), Oexception); // O0 __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception); // O0
#ifdef ASSERT #ifdef ASSERT
__ st_ptr(G0, Address(G2_thread, 0, in_bytes(JavaThread::exception_handler_pc_offset()))); __ st_ptr(G0, G2_thread, JavaThread::exception_handler_pc_offset());
__ st_ptr(G0, Address(G2_thread, 0, in_bytes(JavaThread::exception_pc_offset()))); __ st_ptr(G0, G2_thread, JavaThread::exception_pc_offset());
#endif #endif
__ JMP(G3_scratch, 0); __ JMP(G3_scratch, 0);
// Clear the exception oop so GC no longer processes it as a root. // Clear the exception oop so GC no longer processes it as a root.
__ delayed()->st_ptr(G0, Address(G2_thread, 0, in_bytes(JavaThread::exception_oop_offset()))); __ delayed()->st_ptr(G0, G2_thread, JavaThread::exception_oop_offset());
// ------------- // -------------
// make sure all code is generated // make sure all code is generated

View file

@ -625,9 +625,9 @@ void AdapterGenerator::patch_callers_callsite() {
__ mov(I7, O1); // VM needs caller's callsite __ mov(I7, O1); // VM needs caller's callsite
// Must be a leaf call... // Must be a leaf call...
// can be very far once the blob has been relocated // can be very far once the blob has been relocated
Address dest(O7, CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)); AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
__ relocate(relocInfo::runtime_call_type); __ relocate(relocInfo::runtime_call_type);
__ jumpl_to(dest, O7); __ jumpl_to(dest, O7, O7);
__ delayed()->mov(G2_thread, L7_thread_cache); __ delayed()->mov(G2_thread, L7_thread_cache);
__ mov(L7_thread_cache, G2_thread); __ mov(L7_thread_cache, G2_thread);
__ mov(L1, G1); __ mov(L1, G1);
@ -1152,7 +1152,7 @@ void AdapterGenerator::gen_i2c_adapter(
#ifndef _LP64 #ifndef _LP64
if (g3_crushed) { if (g3_crushed) {
// Rats load was wasted, at least it is in cache... // Rats load was wasted, at least it is in cache...
__ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3); __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3);
} }
#endif /* _LP64 */ #endif /* _LP64 */
@ -1165,7 +1165,7 @@ void AdapterGenerator::gen_i2c_adapter(
// we try and find the callee by normal means a safepoint // we try and find the callee by normal means a safepoint
// is possible. So we stash the desired callee in the thread // is possible. So we stash the desired callee in the thread
// and the vm will find there should this case occur. // and the vm will find there should this case occur.
Address callee_target_addr(G2_thread, 0, in_bytes(JavaThread::callee_target_offset())); Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
__ st_ptr(G5_method, callee_target_addr); __ st_ptr(G5_method, callee_target_addr);
if (StressNonEntrant) { if (StressNonEntrant) {
@ -1218,7 +1218,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
Register R_temp = G1; // another scratch register Register R_temp = G1; // another scratch register
#endif #endif
Address ic_miss(G3_scratch, SharedRuntime::get_ic_miss_stub()); AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
__ verify_oop(O0); __ verify_oop(O0);
__ verify_oop(G5_method); __ verify_oop(G5_method);
@ -1240,7 +1240,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
Label ok, ok2; Label ok, ok2;
__ brx(Assembler::equal, false, Assembler::pt, ok); __ brx(Assembler::equal, false, Assembler::pt, ok);
__ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method); __ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method);
__ jump_to(ic_miss); __ jump_to(ic_miss, G3_scratch);
__ delayed()->nop(); __ delayed()->nop();
__ bind(ok); __ bind(ok);
@ -1251,7 +1251,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
__ bind(ok2); __ bind(ok2);
__ br_null(G3_scratch, false, __ pt, skip_fixup); __ br_null(G3_scratch, false, __ pt, skip_fixup);
__ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
__ jump_to(ic_miss); __ jump_to(ic_miss, G3_scratch);
__ delayed()->nop(); __ delayed()->nop();
} }
@ -1444,8 +1444,8 @@ static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_o
// without calling into the VM: it's the empty function. Just pop this // without calling into the VM: it's the empty function. Just pop this
// frame and then jump to forward_exception_entry; O7 will contain the // frame and then jump to forward_exception_entry; O7 will contain the
// native caller's return PC. // native caller's return PC.
Address exception_entry(G3_scratch, StubRoutines::forward_exception_entry()); AddressLiteral exception_entry(StubRoutines::forward_exception_entry());
__ jump_to(exception_entry); __ jump_to(exception_entry, G3_scratch);
__ delayed()->restore(); // Pop this frame off. __ delayed()->restore(); // Pop this frame off.
__ bind(L); __ bind(L);
} }
@ -1822,14 +1822,14 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
{ {
Label L; Label L;
const Register temp_reg = G3_scratch; const Register temp_reg = G3_scratch;
Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub()); AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
__ verify_oop(O0); __ verify_oop(O0);
__ load_klass(O0, temp_reg); __ load_klass(O0, temp_reg);
__ cmp(temp_reg, G5_inline_cache_reg); __ cmp(temp_reg, G5_inline_cache_reg);
__ brx(Assembler::equal, true, Assembler::pt, L); __ brx(Assembler::equal, true, Assembler::pt, L);
__ delayed()->nop(); __ delayed()->nop();
__ jump_to(ic_miss, 0); __ jump_to(ic_miss, temp_reg);
__ delayed()->nop(); __ delayed()->nop();
__ align(CodeEntryAlignment); __ align(CodeEntryAlignment);
__ bind(L); __ bind(L);
@ -2261,21 +2261,19 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Transition from _thread_in_Java to _thread_in_native. // Transition from _thread_in_Java to _thread_in_native.
__ set(_thread_in_native, G3_scratch); __ set(_thread_in_native, G3_scratch);
__ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset())); __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
// We flushed the windows ages ago now mark them as flushed // We flushed the windows ages ago now mark them as flushed
// mark windows as flushed // mark windows as flushed
__ set(JavaFrameAnchor::flushed, G3_scratch); __ set(JavaFrameAnchor::flushed, G3_scratch);
Address flags(G2_thread, Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
0,
in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
#ifdef _LP64 #ifdef _LP64
Address dest(O7, method->native_function()); AddressLiteral dest(method->native_function());
__ relocate(relocInfo::runtime_call_type); __ relocate(relocInfo::runtime_call_type);
__ jumpl_to(dest, O7); __ jumpl_to(dest, O7, O7);
#else #else
__ call(method->native_function(), relocInfo::runtime_call_type); __ call(method->native_function(), relocInfo::runtime_call_type);
#endif #endif
@ -2316,7 +2314,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Block, if necessary, before resuming in _thread_in_Java state. // Block, if necessary, before resuming in _thread_in_Java state.
// In order for GC to work, don't clear the last_Java_sp until after blocking. // In order for GC to work, don't clear the last_Java_sp until after blocking.
{ Label no_block; { Label no_block;
Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); AddressLiteral sync_state(SafepointSynchronize::address_of_state());
// Switch thread to "native transition" state before reading the synchronization state. // Switch thread to "native transition" state before reading the synchronization state.
// This additional state is necessary because reading and testing the synchronization // This additional state is necessary because reading and testing the synchronization
@ -2326,7 +2324,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Thread A is resumed to finish this native method, but doesn't block here since it // Thread A is resumed to finish this native method, but doesn't block here since it
// didn't see any synchronization is progress, and escapes. // didn't see any synchronization is progress, and escapes.
__ set(_thread_in_native_trans, G3_scratch); __ set(_thread_in_native_trans, G3_scratch);
__ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset())); __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
if(os::is_MP()) { if(os::is_MP()) {
if (UseMembar) { if (UseMembar) {
// Force this write out before the read below // Force this write out before the read below
@ -2343,10 +2341,9 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
Label L; Label L;
Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset())); Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
__ br(Assembler::notEqual, false, Assembler::pn, L); __ br(Assembler::notEqual, false, Assembler::pn, L);
__ delayed()-> __ delayed()->ld(suspend_state, G3_scratch);
ld(suspend_state, G3_scratch);
__ cmp(G3_scratch, 0); __ cmp(G3_scratch, 0);
__ br(Assembler::equal, false, Assembler::pt, no_block); __ br(Assembler::equal, false, Assembler::pt, no_block);
__ delayed()->nop(); __ delayed()->nop();
@ -2372,11 +2369,11 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ set(_thread_in_Java, G3_scratch); __ set(_thread_in_Java, G3_scratch);
__ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset())); __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
Label no_reguard; Label no_reguard;
__ ld(G2_thread, in_bytes(JavaThread::stack_guard_state_offset()), G3_scratch); __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
__ cmp(G3_scratch, JavaThread::stack_guard_yellow_disabled); __ cmp(G3_scratch, JavaThread::stack_guard_yellow_disabled);
__ br(Assembler::notEqual, false, Assembler::pt, no_reguard); __ br(Assembler::notEqual, false, Assembler::pt, no_reguard);
__ delayed()->nop(); __ delayed()->nop();
@ -2684,14 +2681,14 @@ nmethod *SharedRuntime::generate_dtrace_nmethod(
{ {
Label L; Label L;
const Register temp_reg = G3_scratch; const Register temp_reg = G3_scratch;
Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub()); AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
__ verify_oop(O0); __ verify_oop(O0);
__ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg); __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
__ cmp(temp_reg, G5_inline_cache_reg); __ cmp(temp_reg, G5_inline_cache_reg);
__ brx(Assembler::equal, true, Assembler::pt, L); __ brx(Assembler::equal, true, Assembler::pt, L);
__ delayed()->nop(); __ delayed()->nop();
__ jump_to(ic_miss, 0); __ jump_to(ic_miss, temp_reg);
__ delayed()->nop(); __ delayed()->nop();
__ align(CodeEntryAlignment); __ align(CodeEntryAlignment);
__ bind(L); __ bind(L);
@ -3155,15 +3152,13 @@ static void make_new_frames(MacroAssembler* masm, bool deopt) {
// Do this after the caller's return address is on top of stack // Do this after the caller's return address is on top of stack
if (UseStackBanging) { if (UseStackBanging) {
// Get total frame size for interpreted frames // Get total frame size for interpreted frames
__ ld(Address(O2UnrollBlock, 0, __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()), O4);
__ bang_stack_size(O4, O3, G3_scratch); __ bang_stack_size(O4, O3, G3_scratch);
} }
__ ld(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()), O4array_size); __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
__ ld_ptr(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()), G3pcs); __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
__ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array);
__ ld_ptr(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()), O3array);
// Adjust old interpreter frame to make space for new frame's extra java locals // Adjust old interpreter frame to make space for new frame's extra java locals
// //
@ -3176,7 +3171,7 @@ static void make_new_frames(MacroAssembler* masm, bool deopt) {
// for each frame we create and keep up the illusion every where. // for each frame we create and keep up the illusion every where.
// //
__ ld(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()), O7); __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7);
__ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment
__ sub(SP, O7, SP); __ sub(SP, O7, SP);
@ -3225,9 +3220,9 @@ void SharedRuntime::generate_deopt_blob() {
Register I5exception_tmp = I5; Register I5exception_tmp = I5;
Register G4exception_tmp = G4_scratch; Register G4exception_tmp = G4_scratch;
int frame_size_words; int frame_size_words;
Address saved_Freturn0_addr(FP, 0, -sizeof(double) + STACK_BIAS); Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
#if !defined(_LP64) && defined(COMPILER2) #if !defined(_LP64) && defined(COMPILER2)
Address saved_Greturn1_addr(FP, 0, -sizeof(double) -sizeof(jlong) + STACK_BIAS); Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
#endif #endif
Label cont; Label cont;
@ -3289,7 +3284,7 @@ void SharedRuntime::generate_deopt_blob() {
// save exception oop in JavaThread and fall through into the // save exception oop in JavaThread and fall through into the
// exception_in_tls case since they are handled in same way except // exception_in_tls case since they are handled in same way except
// for where the pending exception is kept. // for where the pending exception is kept.
__ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset())); __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
// //
// Vanilla deoptimization with an exception pending in exception_oop // Vanilla deoptimization with an exception pending in exception_oop
@ -3306,7 +3301,7 @@ void SharedRuntime::generate_deopt_blob() {
{ {
// verify that there is really an exception oop in exception_oop // verify that there is really an exception oop in exception_oop
Label has_exception; Label has_exception;
__ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception); __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
__ br_notnull(Oexception, false, Assembler::pt, has_exception); __ br_notnull(Oexception, false, Assembler::pt, has_exception);
__ delayed()-> nop(); __ delayed()-> nop();
__ stop("no exception in thread"); __ stop("no exception in thread");
@ -3314,7 +3309,7 @@ void SharedRuntime::generate_deopt_blob() {
// verify that there is no pending exception // verify that there is no pending exception
Label no_pending_exception; Label no_pending_exception;
Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset())); Address exception_addr(G2_thread, Thread::pending_exception_offset());
__ ld_ptr(exception_addr, Oexception); __ ld_ptr(exception_addr, Oexception);
__ br_null(Oexception, false, Assembler::pt, no_pending_exception); __ br_null(Oexception, false, Assembler::pt, no_pending_exception);
__ delayed()->nop(); __ delayed()->nop();

View file

@ -980,8 +980,8 @@ void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocTyp
// This code sequence is relocatable to any address, even on LP64. // This code sequence is relocatable to any address, even on LP64.
if ( force_far_call ) { if ( force_far_call ) {
__ relocate(rtype); __ relocate(rtype);
Address dest(O7, (address)entry_point); AddressLiteral dest(entry_point);
__ jumpl_to(dest, O7); __ jumpl_to(dest, O7, O7);
} }
else else
#endif #endif
@ -1031,17 +1031,6 @@ void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocTyp
void emit_lo(CodeBuffer &cbuf, int val) { } void emit_lo(CodeBuffer &cbuf, int val) { }
void emit_hi(CodeBuffer &cbuf, int val) { } void emit_hi(CodeBuffer &cbuf, int val) { }
void emit_ptr(CodeBuffer &cbuf, intptr_t val, Register reg, bool ForceRelocatable) {
MacroAssembler _masm(&cbuf);
if (ForceRelocatable) {
Address addr(reg, (address)val);
__ sethi(addr, ForceRelocatable);
__ add(addr, reg);
} else {
__ set(val, reg);
}
}
//============================================================================= //=============================================================================
@ -1149,8 +1138,8 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// If this does safepoint polling, then do it here // If this does safepoint polling, then do it here
if( do_polling() && ra_->C->is_method_compilation() ) { if( do_polling() && ra_->C->is_method_compilation() ) {
Address polling_page(L0, (address)os::get_polling_page()); AddressLiteral polling_page(os::get_polling_page());
__ sethi(polling_page, false); __ sethi(polling_page, L0);
__ relocate(relocInfo::poll_return_type); __ relocate(relocInfo::poll_return_type);
__ ld_ptr( L0, 0, G0 ); __ ld_ptr( L0, 0, G0 );
} }
@ -1576,8 +1565,8 @@ void emit_java_to_interp(CodeBuffer &cbuf ) {
__ set_oop(NULL, reg_to_register_object(Matcher::inline_cache_reg_encode())); __ set_oop(NULL, reg_to_register_object(Matcher::inline_cache_reg_encode()));
__ set_inst_mark(); __ set_inst_mark();
Address a(G3, (address)-1); AddressLiteral addrlit(-1);
__ JUMP(a, 0); __ JUMP(addrlit, G3, 0);
__ delayed()->nop(); __ delayed()->nop();
@ -1662,7 +1651,7 @@ uint size_deopt_handler() {
// Emit exception handler code. // Emit exception handler code.
int emit_exception_handler(CodeBuffer& cbuf) { int emit_exception_handler(CodeBuffer& cbuf) {
Register temp_reg = G3; Register temp_reg = G3;
Address exception_blob(temp_reg, OptoRuntime::exception_blob()->instructions_begin()); AddressLiteral exception_blob(OptoRuntime::exception_blob()->instructions_begin());
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
address base = address base =
@ -1671,7 +1660,7 @@ int emit_exception_handler(CodeBuffer& cbuf) {
int offset = __ offset(); int offset = __ offset();
__ JUMP(exception_blob, 0); // sethi;jmp __ JUMP(exception_blob, temp_reg, 0); // sethi;jmp
__ delayed()->nop(); __ delayed()->nop();
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow"); assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
@ -1685,7 +1674,7 @@ int emit_deopt_handler(CodeBuffer& cbuf) {
// Can't use any of the current frame's registers as we may have deopted // Can't use any of the current frame's registers as we may have deopted
// at a poll and everything (including G3) can be live. // at a poll and everything (including G3) can be live.
Register temp_reg = L0; Register temp_reg = L0;
Address deopt_blob(temp_reg, SharedRuntime::deopt_blob()->unpack()); AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
address base = address base =
@ -1694,7 +1683,7 @@ int emit_deopt_handler(CodeBuffer& cbuf) {
int offset = __ offset(); int offset = __ offset();
__ save_frame(0); __ save_frame(0);
__ JUMP(deopt_blob, 0); // sethi;jmp __ JUMP(deopt_blob, temp_reg, 0); // sethi;jmp
__ delayed()->restore(); __ delayed()->restore();
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow"); assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
@ -2261,9 +2250,8 @@ encode %{
address table_base = __ address_table_constant(_index2label); address table_base = __ address_table_constant(_index2label);
RelocationHolder rspec = internal_word_Relocation::spec(table_base); RelocationHolder rspec = internal_word_Relocation::spec(table_base);
// Load table address // Move table address into a register.
Address the_pc(table_reg, table_base, rspec); __ set(table_base, table_reg, rspec);
__ load_address(the_pc);
// Jump to base address + switch value // Jump to base address + switch value
__ ld_ptr(table_reg, switch_reg, table_reg); __ ld_ptr(table_reg, switch_reg, table_reg);
@ -2402,13 +2390,13 @@ encode %{
// The 64 bit pointer is stored in the generated code stream // The 64 bit pointer is stored in the generated code stream
enc_class SetPtr( immP src, iRegP rd ) %{ enc_class SetPtr( immP src, iRegP rd ) %{
Register dest = reg_to_register_object($rd$$reg); Register dest = reg_to_register_object($rd$$reg);
MacroAssembler _masm(&cbuf);
// [RGV] This next line should be generated from ADLC // [RGV] This next line should be generated from ADLC
if ( _opnds[1]->constant_is_oop() ) { if ( _opnds[1]->constant_is_oop() ) {
intptr_t val = $src$$constant; intptr_t val = $src$$constant;
MacroAssembler _masm(&cbuf);
__ set_oop_constant((jobject)val, dest); __ set_oop_constant((jobject)val, dest);
} else { // non-oop pointers, e.g. card mark base, heap top } else { // non-oop pointers, e.g. card mark base, heap top
emit_ptr(cbuf, $src$$constant, dest, /*ForceRelocatable=*/ false); __ set($src$$constant, dest);
} }
%} %}
@ -2789,46 +2777,6 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
__ set64( $src$$constant, dest, temp ); __ set64( $src$$constant, dest, temp );
%} %}
enc_class LdImmF(immF src, regF dst, o7RegP tmp) %{ // Load Immediate
address float_address = MacroAssembler(&cbuf).float_constant($src$$constant);
RelocationHolder rspec = internal_word_Relocation::spec(float_address);
#ifdef _LP64
Register tmp_reg = reg_to_register_object($tmp$$reg);
cbuf.relocate(cbuf.code_end(), rspec, 0);
emit_ptr(cbuf, (intptr_t)float_address, tmp_reg, /*ForceRelocatable=*/ true);
emit3_simm10( cbuf, Assembler::ldst_op, $dst$$reg, Assembler::ldf_op3, $tmp$$reg, 0 );
#else // _LP64
uint *code;
int tmp_reg = $tmp$$reg;
cbuf.relocate(cbuf.code_end(), rspec, 0);
emit2_22( cbuf, Assembler::branch_op, tmp_reg, Assembler::sethi_op2, (intptr_t) float_address );
cbuf.relocate(cbuf.code_end(), rspec, 0);
emit3_simm10( cbuf, Assembler::ldst_op, $dst$$reg, Assembler::ldf_op3, tmp_reg, (intptr_t) float_address );
#endif // _LP64
%}
enc_class LdImmD(immD src, regD dst, o7RegP tmp) %{ // Load Immediate
address double_address = MacroAssembler(&cbuf).double_constant($src$$constant);
RelocationHolder rspec = internal_word_Relocation::spec(double_address);
#ifdef _LP64
Register tmp_reg = reg_to_register_object($tmp$$reg);
cbuf.relocate(cbuf.code_end(), rspec, 0);
emit_ptr(cbuf, (intptr_t)double_address, tmp_reg, /*ForceRelocatable=*/ true);
emit3_simm10( cbuf, Assembler::ldst_op, $dst$$reg, Assembler::lddf_op3, $tmp$$reg, 0 );
#else // _LP64
uint *code;
int tmp_reg = $tmp$$reg;
cbuf.relocate(cbuf.code_end(), rspec, 0);
emit2_22( cbuf, Assembler::branch_op, tmp_reg, Assembler::sethi_op2, (intptr_t) double_address );
cbuf.relocate(cbuf.code_end(), rspec, 0);
emit3_simm10( cbuf, Assembler::ldst_op, $dst$$reg, Assembler::lddf_op3, tmp_reg, (intptr_t) double_address );
#endif // _LP64
%}
enc_class LdReplImmI(immI src, regD dst, o7RegP tmp, int count, int width) %{ enc_class LdReplImmI(immI src, regD dst, o7RegP tmp, int count, int width) %{
// Load a constant replicated "count" times with width "width" // Load a constant replicated "count" times with width "width"
int bit_width = $width$$constant * 8; int bit_width = $width$$constant * 8;
@ -2840,28 +2788,15 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
val |= elt_val; val |= elt_val;
} }
jdouble dval = *(jdouble*)&val; // coerce to double type jdouble dval = *(jdouble*)&val; // coerce to double type
address double_address = MacroAssembler(&cbuf).double_constant(dval); MacroAssembler _masm(&cbuf);
address double_address = __ double_constant(dval);
RelocationHolder rspec = internal_word_Relocation::spec(double_address); RelocationHolder rspec = internal_word_Relocation::spec(double_address);
#ifdef _LP64 AddressLiteral addrlit(double_address, rspec);
Register tmp_reg = reg_to_register_object($tmp$$reg);
cbuf.relocate(cbuf.code_end(), rspec, 0);
emit_ptr(cbuf, (intptr_t)double_address, tmp_reg, /*ForceRelocatable=*/ true);
emit3_simm10( cbuf, Assembler::ldst_op, $dst$$reg, Assembler::lddf_op3, $tmp$$reg, 0 );
#else // _LP64
uint *code;
int tmp_reg = $tmp$$reg;
cbuf.relocate(cbuf.code_end(), rspec, 0); __ sethi(addrlit, $tmp$$Register);
emit2_22( cbuf, Assembler::branch_op, tmp_reg, Assembler::sethi_op2, (intptr_t) double_address ); // XXX This is a quick fix for 6833573.
//__ ldf(FloatRegisterImpl::D, $tmp$$Register, addrlit.low10(), $dst$$FloatRegister, rspec);
cbuf.relocate(cbuf.code_end(), rspec, 0); __ ldf(FloatRegisterImpl::D, $tmp$$Register, addrlit.low10(), as_DoubleFloatRegister($dst$$reg), rspec);
emit3_simm10( cbuf, Assembler::ldst_op, $dst$$reg, Assembler::lddf_op3, tmp_reg, (intptr_t) double_address );
#endif // _LP64
%}
enc_class ShouldNotEncodeThis ( ) %{
ShouldNotCallThis();
%} %}
// Compiler ensures base is doubleword aligned and cnt is count of doublewords // Compiler ensures base is doubleword aligned and cnt is count of doublewords
@ -2901,19 +2836,19 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
int count_offset = java_lang_String:: count_offset_in_bytes(); int count_offset = java_lang_String:: count_offset_in_bytes();
// load str1 (jchar*) base address into tmp1_reg // load str1 (jchar*) base address into tmp1_reg
__ load_heap_oop(Address(str1_reg, 0, value_offset), tmp1_reg); __ load_heap_oop(str1_reg, value_offset, tmp1_reg);
__ ld(Address(str1_reg, 0, offset_offset), result_reg); __ ld(str1_reg, offset_offset, result_reg);
__ add(tmp1_reg, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1_reg); __ add(tmp1_reg, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1_reg);
__ ld(Address(str1_reg, 0, count_offset), str1_reg); // hoisted __ ld(str1_reg, count_offset, str1_reg); // hoisted
__ sll(result_reg, exact_log2(sizeof(jchar)), result_reg); __ sll(result_reg, exact_log2(sizeof(jchar)), result_reg);
__ load_heap_oop(Address(str2_reg, 0, value_offset), tmp2_reg); // hoisted __ load_heap_oop(str2_reg, value_offset, tmp2_reg); // hoisted
__ add(result_reg, tmp1_reg, tmp1_reg); __ add(result_reg, tmp1_reg, tmp1_reg);
// load str2 (jchar*) base address into tmp2_reg // load str2 (jchar*) base address into tmp2_reg
// __ ld_ptr(Address(str2_reg, 0, value_offset), tmp2_reg); // hoisted // __ ld_ptr(str2_reg, value_offset, tmp2_reg); // hoisted
__ ld(Address(str2_reg, 0, offset_offset), result_reg); __ ld(str2_reg, offset_offset, result_reg);
__ add(tmp2_reg, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp2_reg); __ add(tmp2_reg, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp2_reg);
__ ld(Address(str2_reg, 0, count_offset), str2_reg); // hoisted __ ld(str2_reg, count_offset, str2_reg); // hoisted
__ sll(result_reg, exact_log2(sizeof(jchar)), result_reg); __ sll(result_reg, exact_log2(sizeof(jchar)), result_reg);
__ subcc(str1_reg, str2_reg, O7); // hoisted __ subcc(str1_reg, str2_reg, O7); // hoisted
__ add(result_reg, tmp2_reg, tmp2_reg); __ add(result_reg, tmp2_reg, tmp2_reg);
@ -2922,8 +2857,8 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
// difference of the string lengths (stack) // difference of the string lengths (stack)
// discard string base pointers, after loading up the lengths // discard string base pointers, after loading up the lengths
// __ ld(Address(str1_reg, 0, count_offset), str1_reg); // hoisted // __ ld(str1_reg, count_offset, str1_reg); // hoisted
// __ ld(Address(str2_reg, 0, count_offset), str2_reg); // hoisted // __ ld(str2_reg, count_offset, str2_reg); // hoisted
// See if the lengths are different, and calculate min in str1_reg. // See if the lengths are different, and calculate min in str1_reg.
// Stash diff in O7 in case we need it for a tie-breaker. // Stash diff in O7 in case we need it for a tie-breaker.
@ -3020,19 +2955,19 @@ enc_class enc_String_Equals(o0RegP str1, o1RegP str2, g3RegP tmp1, g4RegP tmp2,
int count_offset = java_lang_String:: count_offset_in_bytes(); int count_offset = java_lang_String:: count_offset_in_bytes();
// load str1 (jchar*) base address into tmp1_reg // load str1 (jchar*) base address into tmp1_reg
__ load_heap_oop(Address(str1_reg, 0, value_offset), tmp1_reg); __ load_heap_oop(Address(str1_reg, value_offset), tmp1_reg);
__ ld(Address(str1_reg, 0, offset_offset), result_reg); __ ld(Address(str1_reg, offset_offset), result_reg);
__ add(tmp1_reg, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1_reg); __ add(tmp1_reg, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1_reg);
__ ld(Address(str1_reg, 0, count_offset), str1_reg); // hoisted __ ld(Address(str1_reg, count_offset), str1_reg); // hoisted
__ sll(result_reg, exact_log2(sizeof(jchar)), result_reg); __ sll(result_reg, exact_log2(sizeof(jchar)), result_reg);
__ load_heap_oop(Address(str2_reg, 0, value_offset), tmp2_reg); // hoisted __ load_heap_oop(Address(str2_reg, value_offset), tmp2_reg); // hoisted
__ add(result_reg, tmp1_reg, tmp1_reg); __ add(result_reg, tmp1_reg, tmp1_reg);
// load str2 (jchar*) base address into tmp2_reg // load str2 (jchar*) base address into tmp2_reg
// __ ld_ptr(Address(str2_reg, 0, value_offset), tmp2_reg); // hoisted // __ ld_ptr(Address(str2_reg, value_offset), tmp2_reg); // hoisted
__ ld(Address(str2_reg, 0, offset_offset), result_reg); __ ld(Address(str2_reg, offset_offset), result_reg);
__ add(tmp2_reg, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp2_reg); __ add(tmp2_reg, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp2_reg);
__ ld(Address(str2_reg, 0, count_offset), str2_reg); // hoisted __ ld(Address(str2_reg, count_offset), str2_reg); // hoisted
__ sll(result_reg, exact_log2(sizeof(jchar)), result_reg); __ sll(result_reg, exact_log2(sizeof(jchar)), result_reg);
__ cmp(str1_reg, str2_reg); // hoisted __ cmp(str1_reg, str2_reg); // hoisted
__ add(result_reg, tmp2_reg, tmp2_reg); __ add(result_reg, tmp2_reg, tmp2_reg);
@ -3139,8 +3074,8 @@ enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, g4RegP tmp2, n
__ delayed()->mov(G0, result_reg); // not equal __ delayed()->mov(G0, result_reg); // not equal
//load the lengths of arrays //load the lengths of arrays
__ ld(Address(ary1_reg, 0, length_offset), tmp1_reg); __ ld(Address(ary1_reg, length_offset), tmp1_reg);
__ ld(Address(ary2_reg, 0, length_offset), tmp2_reg); __ ld(Address(ary2_reg, length_offset), tmp2_reg);
// return false if the two arrays are not equal length // return false if the two arrays are not equal length
__ cmp(tmp1_reg, tmp2_reg); __ cmp(tmp1_reg, tmp2_reg);
@ -3202,19 +3137,20 @@ enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, g4RegP tmp2, n
enc_class enc_rethrow() %{ enc_class enc_rethrow() %{
cbuf.set_inst_mark(); cbuf.set_inst_mark();
Register temp_reg = G3; Register temp_reg = G3;
Address rethrow_stub(temp_reg, OptoRuntime::rethrow_stub()); AddressLiteral rethrow_stub(OptoRuntime::rethrow_stub());
assert(temp_reg != reg_to_register_object(R_I0_num), "temp must not break oop_reg"); assert(temp_reg != reg_to_register_object(R_I0_num), "temp must not break oop_reg");
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
#ifdef ASSERT #ifdef ASSERT
__ save_frame(0); __ save_frame(0);
Address last_rethrow_addr(L1, (address)&last_rethrow); AddressLiteral last_rethrow_addrlit(&last_rethrow);
__ sethi(last_rethrow_addr); __ sethi(last_rethrow_addrlit, L1);
Address addr(L1, last_rethrow_addrlit.low10());
__ get_pc(L2); __ get_pc(L2);
__ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to __ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to
__ st_ptr(L2, last_rethrow_addr); __ st_ptr(L2, addr);
__ restore(); __ restore();
#endif #endif
__ JUMP(rethrow_stub, 0); // sethi;jmp __ JUMP(rethrow_stub, temp_reg, 0); // sethi;jmp
__ delayed()->nop(); __ delayed()->nop();
%} %}
@ -5493,8 +5429,9 @@ instruct loadB(iRegI dst, memory mem) %{
size(4); size(4);
format %{ "LDSB $mem,$dst\t! byte" %} format %{ "LDSB $mem,$dst\t! byte" %}
opcode(Assembler::ldsb_op3); ins_encode %{
ins_encode(simple_form3_mem_reg( mem, dst ) ); __ ldsb($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem); ins_pipe(iload_mask_mem);
%} %}
@ -5505,8 +5442,9 @@ instruct loadB2L(iRegL dst, memory mem) %{
size(4); size(4);
format %{ "LDSB $mem,$dst\t! byte -> long" %} format %{ "LDSB $mem,$dst\t! byte -> long" %}
opcode(Assembler::ldsb_op3); ins_encode %{
ins_encode(simple_form3_mem_reg( mem, dst ) ); __ ldsb($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem); ins_pipe(iload_mask_mem);
%} %}
@ -5517,8 +5455,9 @@ instruct loadUB(iRegI dst, memory mem) %{
size(4); size(4);
format %{ "LDUB $mem,$dst\t! ubyte" %} format %{ "LDUB $mem,$dst\t! ubyte" %}
opcode(Assembler::ldub_op3); ins_encode %{
ins_encode(simple_form3_mem_reg( mem, dst ) ); __ ldub($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem); ins_pipe(iload_mask_mem);
%} %}
@ -5529,8 +5468,9 @@ instruct loadUB2L(iRegL dst, memory mem) %{
size(4); size(4);
format %{ "LDUB $mem,$dst\t! ubyte -> long" %} format %{ "LDUB $mem,$dst\t! ubyte -> long" %}
opcode(Assembler::ldub_op3); ins_encode %{
ins_encode(simple_form3_mem_reg( mem, dst ) ); __ ldub($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem); ins_pipe(iload_mask_mem);
%} %}
@ -5541,8 +5481,9 @@ instruct loadS(iRegI dst, memory mem) %{
size(4); size(4);
format %{ "LDSH $mem,$dst\t! short" %} format %{ "LDSH $mem,$dst\t! short" %}
opcode(Assembler::ldsh_op3); ins_encode %{
ins_encode(simple_form3_mem_reg( mem, dst ) ); __ ldsh($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem); ins_pipe(iload_mask_mem);
%} %}
@ -5553,8 +5494,9 @@ instruct loadS2L(iRegL dst, memory mem) %{
size(4); size(4);
format %{ "LDSH $mem,$dst\t! short -> long" %} format %{ "LDSH $mem,$dst\t! short -> long" %}
opcode(Assembler::ldsh_op3); ins_encode %{
ins_encode(simple_form3_mem_reg( mem, dst ) ); __ ldsh($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem); ins_pipe(iload_mask_mem);
%} %}
@ -5565,8 +5507,9 @@ instruct loadUS(iRegI dst, memory mem) %{
size(4); size(4);
format %{ "LDUH $mem,$dst\t! ushort/char" %} format %{ "LDUH $mem,$dst\t! ushort/char" %}
opcode(Assembler::lduh_op3); ins_encode %{
ins_encode(simple_form3_mem_reg( mem, dst ) ); __ lduh($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem); ins_pipe(iload_mask_mem);
%} %}
@ -5577,8 +5520,9 @@ instruct loadUS2L(iRegL dst, memory mem) %{
size(4); size(4);
format %{ "LDUH $mem,$dst\t! ushort/char -> long" %} format %{ "LDUH $mem,$dst\t! ushort/char -> long" %}
opcode(Assembler::lduh_op3); ins_encode %{
ins_encode(simple_form3_mem_reg( mem, dst ) ); __ lduh($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mask_mem); ins_pipe(iload_mask_mem);
%} %}
@ -5589,8 +5533,9 @@ instruct loadI(iRegI dst, memory mem) %{
size(4); size(4);
format %{ "LDUW $mem,$dst\t! int" %} format %{ "LDUW $mem,$dst\t! int" %}
opcode(Assembler::lduw_op3); ins_encode %{
ins_encode(simple_form3_mem_reg( mem, dst ) ); __ lduw($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mem); ins_pipe(iload_mem);
%} %}
@ -5601,8 +5546,9 @@ instruct loadI2L(iRegL dst, memory mem) %{
size(4); size(4);
format %{ "LDSW $mem,$dst\t! int -> long" %} format %{ "LDSW $mem,$dst\t! int -> long" %}
opcode(Assembler::ldsw_op3); ins_encode %{
ins_encode(simple_form3_mem_reg( mem, dst ) ); __ ldsw($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mem); ins_pipe(iload_mem);
%} %}
@ -5613,8 +5559,9 @@ instruct loadUI2L(iRegL dst, memory mem) %{
size(4); size(4);
format %{ "LDUW $mem,$dst\t! uint -> long" %} format %{ "LDUW $mem,$dst\t! uint -> long" %}
opcode(Assembler::lduw_op3); ins_encode %{
ins_encode(simple_form3_mem_reg( mem, dst ) ); __ lduw($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mem); ins_pipe(iload_mem);
%} %}
@ -5625,8 +5572,9 @@ instruct loadL(iRegL dst, memory mem ) %{
size(4); size(4);
format %{ "LDX $mem,$dst\t! long" %} format %{ "LDX $mem,$dst\t! long" %}
opcode(Assembler::ldx_op3); ins_encode %{
ins_encode(simple_form3_mem_reg( mem, dst ) ); __ ldx($mem$$Address, $dst$$Register);
%}
ins_pipe(iload_mem); ins_pipe(iload_mem);
%} %}
@ -5721,31 +5669,29 @@ instruct loadP(iRegP dst, memory mem) %{
#ifndef _LP64 #ifndef _LP64
format %{ "LDUW $mem,$dst\t! ptr" %} format %{ "LDUW $mem,$dst\t! ptr" %}
opcode(Assembler::lduw_op3, 0, REGP_OP); ins_encode %{
__ lduw($mem$$Address, $dst$$Register);
%}
#else #else
format %{ "LDX $mem,$dst\t! ptr" %} format %{ "LDX $mem,$dst\t! ptr" %}
opcode(Assembler::ldx_op3, 0, REGP_OP); ins_encode %{
__ ldx($mem$$Address, $dst$$Register);
%}
#endif #endif
ins_encode( form3_mem_reg( mem, dst ) );
ins_pipe(iload_mem); ins_pipe(iload_mem);
%} %}
// Load Compressed Pointer // Load Compressed Pointer
instruct loadN(iRegN dst, memory mem) %{ instruct loadN(iRegN dst, memory mem) %{
match(Set dst (LoadN mem)); match(Set dst (LoadN mem));
ins_cost(MEMORY_REF_COST); ins_cost(MEMORY_REF_COST);
size(4); size(4);
format %{ "LDUW $mem,$dst\t! compressed ptr" %} format %{ "LDUW $mem,$dst\t! compressed ptr" %}
ins_encode %{ ins_encode %{
Register index = $mem$$index$$Register; __ lduw($mem$$Address, $dst$$Register);
if (index != G0) { %}
__ lduw($mem$$base$$Register, index, $dst$$Register); ins_pipe(iload_mem);
} else {
__ lduw($mem$$base$$Register, $mem$$disp, $dst$$Register);
}
%}
ins_pipe(iload_mem);
%} %}
// Load Klass Pointer // Load Klass Pointer
@ -5756,12 +5702,15 @@ instruct loadKlass(iRegP dst, memory mem) %{
#ifndef _LP64 #ifndef _LP64
format %{ "LDUW $mem,$dst\t! klass ptr" %} format %{ "LDUW $mem,$dst\t! klass ptr" %}
opcode(Assembler::lduw_op3, 0, REGP_OP); ins_encode %{
__ lduw($mem$$Address, $dst$$Register);
%}
#else #else
format %{ "LDX $mem,$dst\t! klass ptr" %} format %{ "LDX $mem,$dst\t! klass ptr" %}
opcode(Assembler::ldx_op3, 0, REGP_OP); ins_encode %{
__ ldx($mem$$Address, $dst$$Register);
%}
#endif #endif
ins_encode( form3_mem_reg( mem, dst ) );
ins_pipe(iload_mem); ins_pipe(iload_mem);
%} %}
@ -5772,16 +5721,8 @@ instruct loadNKlass(iRegN dst, memory mem) %{
size(4); size(4);
format %{ "LDUW $mem,$dst\t! compressed klass ptr" %} format %{ "LDUW $mem,$dst\t! compressed klass ptr" %}
ins_encode %{ ins_encode %{
Register base = as_Register($mem$$base); __ lduw($mem$$Address, $dst$$Register);
Register index = as_Register($mem$$index);
Register dst = $dst$$Register;
if (index != G0) {
__ lduw(base, index, dst);
} else {
__ lduw(base, $mem$$disp, dst);
}
%} %}
ins_pipe(iload_mem); ins_pipe(iload_mem);
%} %}
@ -5867,8 +5808,8 @@ instruct loadConP_poll(iRegP dst, immP_poll src) %{
ins_cost(DEFAULT_COST); ins_cost(DEFAULT_COST);
format %{ "SET $src,$dst\t!ptr" %} format %{ "SET $src,$dst\t!ptr" %}
ins_encode %{ ins_encode %{
Address polling_page(reg_to_register_object($dst$$reg), (address)os::get_polling_page()); AddressLiteral polling_page(os::get_polling_page());
__ sethi(polling_page, false ); __ sethi(polling_page, reg_to_register_object($dst$$reg));
%} %}
ins_pipe(loadConP_poll); ins_pipe(loadConP_poll);
%} %}
@ -5927,14 +5868,21 @@ instruct loadConF(regF dst, immF src, o7RegP tmp) %{
effect(KILL tmp); effect(KILL tmp);
#ifdef _LP64 #ifdef _LP64
size(36); size(8*4);
#else #else
size(8); size(2*4);
#endif #endif
format %{ "SETHI hi(&$src),$tmp\t!get float $src from table\n\t" format %{ "SETHI hi(&$src),$tmp\t!get float $src from table\n\t"
"LDF [$tmp+lo(&$src)],$dst" %} "LDF [$tmp+lo(&$src)],$dst" %}
ins_encode( LdImmF(src, dst, tmp) ); ins_encode %{
address float_address = __ float_constant($src$$constant);
RelocationHolder rspec = internal_word_Relocation::spec(float_address);
AddressLiteral addrlit(float_address, rspec);
__ sethi(addrlit, $tmp$$Register);
__ ldf(FloatRegisterImpl::S, $tmp$$Register, addrlit.low10(), $dst$$FloatRegister, rspec);
%}
ins_pipe(loadConFD); ins_pipe(loadConFD);
%} %}
@ -5943,14 +5891,23 @@ instruct loadConD(regD dst, immD src, o7RegP tmp) %{
effect(KILL tmp); effect(KILL tmp);
#ifdef _LP64 #ifdef _LP64
size(36); size(8*4);
#else #else
size(8); size(2*4);
#endif #endif
format %{ "SETHI hi(&$src),$tmp\t!get double $src from table\n\t" format %{ "SETHI hi(&$src),$tmp\t!get double $src from table\n\t"
"LDDF [$tmp+lo(&$src)],$dst" %} "LDDF [$tmp+lo(&$src)],$dst" %}
ins_encode( LdImmD(src, dst, tmp) ); ins_encode %{
address double_address = __ double_constant($src$$constant);
RelocationHolder rspec = internal_word_Relocation::spec(double_address);
AddressLiteral addrlit(double_address, rspec);
__ sethi(addrlit, $tmp$$Register);
// XXX This is a quick fix for 6833573.
//__ ldf(FloatRegisterImpl::D, $tmp$$Register, addrlit.low10(), $dst$$FloatRegister, rspec);
__ ldf(FloatRegisterImpl::D, $tmp$$Register, addrlit.low10(), as_DoubleFloatRegister($dst$$reg), rspec);
%}
ins_pipe(loadConFD); ins_pipe(loadConFD);
%} %}

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -68,16 +68,9 @@ class StubGenerator: public StubCodeGenerator {
#ifdef PRODUCT #ifdef PRODUCT
#define inc_counter_np(a,b,c) (0) #define inc_counter_np(a,b,c) (0)
#else #else
void inc_counter_np_(int& counter, Register t1, Register t2) {
Address counter_addr(t2, (address) &counter);
__ sethi(counter_addr);
__ ld(counter_addr, t1);
__ inc(t1);
__ st(t1, counter_addr);
}
#define inc_counter_np(counter, t1, t2) \ #define inc_counter_np(counter, t1, t2) \
BLOCK_COMMENT("inc_counter " #counter); \ BLOCK_COMMENT("inc_counter " #counter); \
inc_counter_np_(counter, t1, t2); __ inc_counter(&counter, t1, t2);
#endif #endif
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
@ -325,9 +318,9 @@ class StubGenerator: public StubCodeGenerator {
__ verify_thread(); __ verify_thread();
const Register& temp_reg = Gtemp; const Register& temp_reg = Gtemp;
Address pending_exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset())); Address pending_exception_addr (G2_thread, Thread::pending_exception_offset());
Address exception_file_offset_addr(G2_thread, 0, in_bytes(Thread::exception_file_offset ())); Address exception_file_offset_addr(G2_thread, Thread::exception_file_offset ());
Address exception_line_offset_addr(G2_thread, 0, in_bytes(Thread::exception_line_offset ())); Address exception_line_offset_addr(G2_thread, Thread::exception_line_offset ());
// set pending exception // set pending exception
__ verify_oop(Oexception); __ verify_oop(Oexception);
@ -340,8 +333,8 @@ class StubGenerator: public StubCodeGenerator {
// complete return to VM // complete return to VM
assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before"); assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
Address stub_ret(temp_reg, StubRoutines::_call_stub_return_address); AddressLiteral stub_ret(StubRoutines::_call_stub_return_address);
__ jump_to(stub_ret); __ jump_to(stub_ret, temp_reg);
__ delayed()->nop(); __ delayed()->nop();
return start; return start;
@ -366,7 +359,7 @@ class StubGenerator: public StubCodeGenerator {
const Register& handler_reg = Gtemp; const Register& handler_reg = Gtemp;
Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset())); Address exception_addr(G2_thread, Thread::pending_exception_offset());
#ifdef ASSERT #ifdef ASSERT
// make sure that this code is only executed if there is a pending exception // make sure that this code is only executed if there is a pending exception
@ -456,8 +449,7 @@ class StubGenerator: public StubCodeGenerator {
int frame_complete = __ offset(); int frame_complete = __ offset();
if (restore_saved_exception_pc) { if (restore_saved_exception_pc) {
Address saved_exception_pc(G2_thread, 0, in_bytes(JavaThread::saved_exception_pc_offset())); __ ld_ptr(G2_thread, JavaThread::saved_exception_pc_offset(), I7);
__ ld_ptr(saved_exception_pc, I7);
__ sub(I7, frame::pc_return_offset, I7); __ sub(I7, frame::pc_return_offset, I7);
} }
@ -481,7 +473,7 @@ class StubGenerator: public StubCodeGenerator {
#ifdef ASSERT #ifdef ASSERT
Label L; Label L;
Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset())); Address exception_addr(G2_thread, Thread::pending_exception_offset());
Register scratch_reg = Gtemp; Register scratch_reg = Gtemp;
__ ld_ptr(exception_addr, scratch_reg); __ ld_ptr(exception_addr, scratch_reg);
__ br_notnull(scratch_reg, false, Assembler::pt, L); __ br_notnull(scratch_reg, false, Assembler::pt, L);
@ -835,7 +827,7 @@ class StubGenerator: public StubCodeGenerator {
address start = __ pc(); address start = __ pc();
const int preserve_register_words = (64 * 2); const int preserve_register_words = (64 * 2);
Address preserve_addr(FP, 0, (-preserve_register_words * wordSize) + STACK_BIAS); Address preserve_addr(FP, (-preserve_register_words * wordSize) + STACK_BIAS);
Register Lthread = L7_thread_cache; Register Lthread = L7_thread_cache;
int i; int i;
@ -1106,21 +1098,19 @@ class StubGenerator: public StubCodeGenerator {
__ srl_ptr(addr, CardTableModRefBS::card_shift, addr); __ srl_ptr(addr, CardTableModRefBS::card_shift, addr);
__ srl_ptr(count, CardTableModRefBS::card_shift, count); __ srl_ptr(count, CardTableModRefBS::card_shift, count);
__ sub(count, addr, count); __ sub(count, addr, count);
Address rs(tmp, (address)ct->byte_map_base); AddressLiteral rs(ct->byte_map_base);
__ load_address(rs); __ set(rs, tmp);
__ BIND(L_loop); __ BIND(L_loop);
__ stb(G0, rs.base(), addr); __ stb(G0, tmp, addr);
__ subcc(count, 1, count); __ subcc(count, 1, count);
__ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
__ delayed()->add(addr, 1, addr); __ delayed()->add(addr, 1, addr);
}
}
break; break;
case BarrierSet::ModRef: case BarrierSet::ModRef:
break; break;
default : default:
ShouldNotReachHere(); ShouldNotReachHere();
} }
} }

View file

@ -87,8 +87,8 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
} }
// throw exception // throw exception
assert(Interpreter::throw_exception_entry() != NULL, "generate it first"); assert(Interpreter::throw_exception_entry() != NULL, "generate it first");
Address thrower(G3_scratch, Interpreter::throw_exception_entry()); AddressLiteral thrower(Interpreter::throw_exception_entry());
__ jump_to (thrower); __ jump_to(thrower, G3_scratch);
__ delayed()->nop(); __ delayed()->nop();
return entry; return entry;
} }
@ -150,7 +150,8 @@ address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
} }
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, bool unbox) {
assert(!unbox, "NYI");//6815692//
address compiled_entry = __ pc(); address compiled_entry = __ pc();
Label cont; Label cont;
@ -186,8 +187,8 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
const Register cache = G3_scratch; const Register cache = G3_scratch;
const Register size = G1_scratch; const Register size = G1_scratch;
__ get_cache_and_index_at_bcp(cache, G1_scratch, 1); __ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
__ ld_ptr(Address(cache, 0, in_bytes(constantPoolCacheOopDesc::base_offset()) + __ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() +
in_bytes(ConstantPoolCacheEntry::flags_offset())), size); ConstantPoolCacheEntry::flags_offset(), size);
__ and3(size, 0xFF, size); // argument size in words __ and3(size, 0xFF, size); // argument size in words
__ sll(size, Interpreter::logStackElementSize(), size); // each argument size in bytes __ sll(size, Interpreter::logStackElementSize(), size); // each argument size in bytes
__ add(Lesp, size, Lesp); // pop arguments __ add(Lesp, size, Lesp); // pop arguments
@ -201,9 +202,8 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
address entry = __ pc(); address entry = __ pc();
__ get_constant_pool_cache(LcpoolCache); // load LcpoolCache __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
{ Label L; { Label L;
Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset())); Address exception_addr(G2_thread, Thread::pending_exception_offset());
__ ld_ptr(exception_addr, Gtemp); // Load pending exception.
__ ld_ptr(exception_addr, Gtemp);
__ tst(Gtemp); __ tst(Gtemp);
__ brx(Assembler::equal, false, Assembler::pt, L); __ brx(Assembler::equal, false, Assembler::pt, L);
__ delayed()->nop(); __ delayed()->nop();
@ -282,7 +282,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
// Update standard invocation counters // Update standard invocation counters
__ increment_invocation_counter(O0, G3_scratch); __ increment_invocation_counter(O0, G3_scratch);
if (ProfileInterpreter) { // %%% Merge this into methodDataOop if (ProfileInterpreter) { // %%% Merge this into methodDataOop
Address interpreter_invocation_counter(Lmethod, 0, in_bytes(methodOopDesc::interpreter_invocation_counter_offset())); Address interpreter_invocation_counter(Lmethod, methodOopDesc::interpreter_invocation_counter_offset());
__ ld(interpreter_invocation_counter, G3_scratch); __ ld(interpreter_invocation_counter, G3_scratch);
__ inc(G3_scratch); __ inc(G3_scratch);
__ st(G3_scratch, interpreter_invocation_counter); __ st(G3_scratch, interpreter_invocation_counter);
@ -290,9 +290,9 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
if (ProfileInterpreter && profile_method != NULL) { if (ProfileInterpreter && profile_method != NULL) {
// Test to see if we should create a method data oop // Test to see if we should create a method data oop
Address profile_limit(G3_scratch, (address)&InvocationCounter::InterpreterProfileLimit); AddressLiteral profile_limit(&InvocationCounter::InterpreterProfileLimit);
__ sethi(profile_limit); __ sethi(profile_limit, G3_scratch);
__ ld(profile_limit, G3_scratch); __ ld(G3_scratch, profile_limit.low10(), G3_scratch);
__ cmp(O0, G3_scratch); __ cmp(O0, G3_scratch);
__ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue); __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue);
__ delayed()->nop(); __ delayed()->nop();
@ -301,9 +301,9 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
__ test_method_data_pointer(*profile_method); __ test_method_data_pointer(*profile_method);
} }
Address invocation_limit(G3_scratch, (address)&InvocationCounter::InterpreterInvocationLimit); AddressLiteral invocation_limit(&InvocationCounter::InterpreterInvocationLimit);
__ sethi(invocation_limit); __ sethi(invocation_limit, G3_scratch);
__ ld(invocation_limit, G3_scratch); __ ld(G3_scratch, invocation_limit.low10(), G3_scratch);
__ cmp(O0, G3_scratch); __ cmp(O0, G3_scratch);
__ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
__ delayed()->nop(); __ delayed()->nop();
@ -314,8 +314,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
// ebx - methodOop // ebx - methodOop
// //
void InterpreterGenerator::lock_method(void) { void InterpreterGenerator::lock_method(void) {
const Address access_flags (Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset())); __ ld(Lmethod, in_bytes(methodOopDesc::access_flags_offset()), O0); // Load access flags.
__ ld(access_flags, O0);
#ifdef ASSERT #ifdef ASSERT
{ Label ok; { Label ok;
@ -359,8 +358,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe
Register Rscratch, Register Rscratch,
Register Rscratch2) { Register Rscratch2) {
const int page_size = os::vm_page_size(); const int page_size = os::vm_page_size();
Address saved_exception_pc(G2_thread, 0, Address saved_exception_pc(G2_thread, JavaThread::saved_exception_pc_offset());
in_bytes(JavaThread::saved_exception_pc_offset()));
Label after_frame_check; Label after_frame_check;
assert_different_registers(Rframe_size, Rscratch, Rscratch2); assert_different_registers(Rframe_size, Rscratch, Rscratch2);
@ -372,7 +370,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe
__ delayed()->nop(); __ delayed()->nop();
// get the stack base, and in debug, verify it is non-zero // get the stack base, and in debug, verify it is non-zero
__ ld_ptr( G2_thread, in_bytes(Thread::stack_base_offset()), Rscratch ); __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch );
#ifdef ASSERT #ifdef ASSERT
Label base_not_zero; Label base_not_zero;
__ cmp( Rscratch, G0 ); __ cmp( Rscratch, G0 );
@ -384,7 +382,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe
// get the stack size, and in debug, verify it is non-zero // get the stack size, and in debug, verify it is non-zero
assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" ); assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" );
__ ld_ptr( G2_thread, in_bytes(Thread::stack_size_offset()), Rscratch2 ); __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 );
#ifdef ASSERT #ifdef ASSERT
Label size_not_zero; Label size_not_zero;
__ cmp( Rscratch2, G0 ); __ cmp( Rscratch2, G0 );
@ -459,9 +457,9 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// (gri - 2/25/2000) // (gri - 2/25/2000)
const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())); const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset());
const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset())); const Address size_of_locals (G5_method, methodOopDesc::size_of_locals_offset());
const Address max_stack (G5_method, 0, in_bytes(methodOopDesc::max_stack_offset())); const Address max_stack (G5_method, methodOopDesc::max_stack_offset());
int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong ); int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
const int extra_space = const int extra_space =
@ -538,8 +536,8 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
if (native_call) { if (native_call) {
__ mov(G0, Lbcp); __ mov(G0, Lbcp);
} else { } else {
__ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::const_offset())), Lbcp ); __ ld_ptr(G5_method, methodOopDesc::const_offset(), Lbcp);
__ add(Address(Lbcp, 0, in_bytes(constMethodOopDesc::codes_offset())), Lbcp ); __ add(Lbcp, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
} }
__ mov( G5_method, Lmethod); // set Lmethod __ mov( G5_method, Lmethod); // set Lmethod
__ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache __ get_constant_pool_cache( LcpoolCache ); // set LcpoolCache
@ -577,8 +575,8 @@ address InterpreterGenerator::generate_empty_entry(void) {
// do nothing for empty methods (do not even increment invocation counter) // do nothing for empty methods (do not even increment invocation counter)
if ( UseFastEmptyMethods) { if ( UseFastEmptyMethods) {
// If we need a safepoint check, generate full interpreter entry. // If we need a safepoint check, generate full interpreter entry.
Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); AddressLiteral sync_state(SafepointSynchronize::address_of_state());
__ load_contents(sync_state, G3_scratch); __ set(sync_state, G3_scratch);
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
__ br(Assembler::notEqual, false, Assembler::pn, slow_path); __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
__ delayed()->nop(); __ delayed()->nop();
@ -616,7 +614,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
if ( UseFastAccessorMethods && !UseCompressedOops ) { if ( UseFastAccessorMethods && !UseCompressedOops ) {
// Check if we need to reach a safepoint and generate full interpreter // Check if we need to reach a safepoint and generate full interpreter
// frame if so. // frame if so.
Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); AddressLiteral sync_state(SafepointSynchronize::address_of_state());
__ load_contents(sync_state, G3_scratch); __ load_contents(sync_state, G3_scratch);
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
__ br(Assembler::notEqual, false, Assembler::pn, slow_path); __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
@ -631,8 +629,8 @@ address InterpreterGenerator::generate_accessor_entry(void) {
// read first instruction word and extract bytecode @ 1 and index @ 2 // read first instruction word and extract bytecode @ 1 and index @ 2
// get first 4 bytes of the bytecodes (big endian!) // get first 4 bytes of the bytecodes (big endian!)
__ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::const_offset())), G1_scratch); __ ld_ptr(G5_method, methodOopDesc::const_offset(), G1_scratch);
__ ld(Address(G1_scratch, 0, in_bytes(constMethodOopDesc::codes_offset())), G1_scratch); __ ld(G1_scratch, constMethodOopDesc::codes_offset(), G1_scratch);
// move index @ 2 far left then to the right most two bytes. // move index @ 2 far left then to the right most two bytes.
__ sll(G1_scratch, 2*BitsPerByte, G1_scratch); __ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
@ -640,7 +638,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch); ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
// get constant pool cache // get constant pool cache
__ ld_ptr(G5_method, in_bytes(methodOopDesc::constants_offset()), G3_scratch); __ ld_ptr(G5_method, methodOopDesc::constants_offset(), G3_scratch);
__ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch); __ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch);
// get specific constant pool cache entry // get specific constant pool cache entry
@ -649,7 +647,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
// Check the constant Pool cache entry to see if it has been resolved. // Check the constant Pool cache entry to see if it has been resolved.
// If not, need the slow path. // If not, need the slow path.
ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::indices_offset()), G1_scratch); __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
__ srl(G1_scratch, 2*BitsPerByte, G1_scratch); __ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
__ and3(G1_scratch, 0xFF, G1_scratch); __ and3(G1_scratch, 0xFF, G1_scratch);
__ cmp(G1_scratch, Bytecodes::_getfield); __ cmp(G1_scratch, Bytecodes::_getfield);
@ -657,8 +655,8 @@ address InterpreterGenerator::generate_accessor_entry(void) {
__ delayed()->nop(); __ delayed()->nop();
// Get the type and return field offset from the constant pool cache // Get the type and return field offset from the constant pool cache
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()), G1_scratch); __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()), G3_scratch); __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
Label xreturn_path; Label xreturn_path;
// Need to differentiate between igetfield, agetfield, bgetfield etc. // Need to differentiate between igetfield, agetfield, bgetfield etc.
@ -717,7 +715,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// make sure registers are different! // make sure registers are different!
assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
const Address Laccess_flags (Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset())); const Address Laccess_flags(Lmethod, methodOopDesc::access_flags_offset());
__ verify_oop(G5_method); __ verify_oop(G5_method);
@ -727,7 +725,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// make sure method is native & not abstract // make sure method is native & not abstract
// rethink these assertions - they can be simplified and shared (gri 2/25/2000) // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
#ifdef ASSERT #ifdef ASSERT
__ ld(G5_method, in_bytes(methodOopDesc::access_flags_offset()), Gtmp1); __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1);
{ {
Label L; Label L;
__ btst(JVM_ACC_NATIVE, Gtmp1); __ btst(JVM_ACC_NATIVE, Gtmp1);
@ -754,10 +752,10 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// this slot will be set later, we initialize it to null here just in // this slot will be set later, we initialize it to null here just in
// case we get a GC before the actual value is stored later // case we get a GC before the actual value is stored later
__ st_ptr(G0, Address(FP, 0, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS)); __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS);
const Address do_not_unlock_if_synchronized(G2_thread, 0, const Address do_not_unlock_if_synchronized(G2_thread,
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); JavaThread::do_not_unlock_if_synchronized_offset());
// Since at this point in the method invocation the exception handler // Since at this point in the method invocation the exception handler
// would try to exit the monitor of synchronized methods which hasn't // would try to exit the monitor of synchronized methods which hasn't
// been entered yet, we set the thread local variable // been entered yet, we set the thread local variable
@ -824,12 +822,13 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// get signature handler // get signature handler
{ Label L; { Label L;
__ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::signature_handler_offset())), G3_scratch); Address signature_handler(Lmethod, methodOopDesc::signature_handler_offset());
__ ld_ptr(signature_handler, G3_scratch);
__ tst(G3_scratch); __ tst(G3_scratch);
__ brx(Assembler::notZero, false, Assembler::pt, L); __ brx(Assembler::notZero, false, Assembler::pt, L);
__ delayed()->nop(); __ delayed()->nop();
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
__ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::signature_handler_offset())), G3_scratch); __ ld_ptr(signature_handler, G3_scratch);
__ bind(L); __ bind(L);
} }
@ -842,10 +841,9 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// Flush the method pointer to the register save area // Flush the method pointer to the register save area
__ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS); __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
__ mov(Llocals, O1); __ mov(Llocals, O1);
// calculate where the mirror handle body is allocated in the interpreter frame:
Address mirror(FP, 0, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS); // calculate where the mirror handle body is allocated in the interpreter frame:
__ add(mirror, O2); __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2);
// Calculate current frame size // Calculate current frame size
__ sub(SP, FP, O3); // Calculate negative of current frame size __ sub(SP, FP, O3); // Calculate negative of current frame size
@ -882,14 +880,13 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ ld(Laccess_flags, O0); __ ld(Laccess_flags, O0);
__ btst(JVM_ACC_STATIC, O0); __ btst(JVM_ACC_STATIC, O0);
__ br( Assembler::zero, false, Assembler::pt, not_static); __ br( Assembler::zero, false, Assembler::pt, not_static);
__ delayed()-> // get native function entry point(O0 is a good temp until the very end)
// get native function entry point(O0 is a good temp until the very end) __ delayed()->ld_ptr(Lmethod, in_bytes(methodOopDesc::native_function_offset()), O0);
ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::native_function_offset())), O0);
// for static methods insert the mirror argument // for static methods insert the mirror argument
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
__ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc:: constants_offset())), O1); __ ld_ptr(Lmethod, methodOopDesc:: constants_offset(), O1);
__ ld_ptr(Address(O1, 0, constantPoolOopDesc::pool_holder_offset_in_bytes()), O1); __ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1);
__ ld_ptr(O1, mirror_offset, O1); __ ld_ptr(O1, mirror_offset, O1);
#ifdef ASSERT #ifdef ASSERT
if (!PrintSignatureHandlers) // do not dirty the output with this if (!PrintSignatureHandlers) // do not dirty the output with this
@ -944,15 +941,13 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ flush_windows(); __ flush_windows();
// mark windows as flushed // mark windows as flushed
Address flags(G2_thread, Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
0,
in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
__ set(JavaFrameAnchor::flushed, G3_scratch); __ set(JavaFrameAnchor::flushed, G3_scratch);
__ st(G3_scratch, flags); __ st(G3_scratch, flags);
// Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready. // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
Address thread_state(G2_thread, 0, in_bytes(JavaThread::thread_state_offset())); Address thread_state(G2_thread, JavaThread::thread_state_offset());
#ifdef ASSERT #ifdef ASSERT
{ Label L; { Label L;
__ ld(thread_state, G3_scratch); __ ld(thread_state, G3_scratch);
@ -982,7 +977,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// Block, if necessary, before resuming in _thread_in_Java state. // Block, if necessary, before resuming in _thread_in_Java state.
// In order for GC to work, don't clear the last_Java_sp until after blocking. // In order for GC to work, don't clear the last_Java_sp until after blocking.
{ Label no_block; { Label no_block;
Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); AddressLiteral sync_state(SafepointSynchronize::address_of_state());
// Switch thread to "native transition" state before reading the synchronization state. // Switch thread to "native transition" state before reading the synchronization state.
// This additional state is necessary because reading and testing the synchronization // This additional state is necessary because reading and testing the synchronization
@ -1009,10 +1004,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
Label L; Label L;
Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset()));
__ br(Assembler::notEqual, false, Assembler::pn, L); __ br(Assembler::notEqual, false, Assembler::pn, L);
__ delayed()-> __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
ld(suspend_state, G3_scratch);
__ cmp(G3_scratch, 0); __ cmp(G3_scratch, 0);
__ br(Assembler::equal, false, Assembler::pt, no_block); __ br(Assembler::equal, false, Assembler::pt, no_block);
__ delayed()->nop(); __ delayed()->nop();
@ -1054,7 +1047,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ st(G3_scratch, thread_state); __ st(G3_scratch, thread_state);
// reset handle block // reset handle block
__ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), G3_scratch); __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
__ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes()); __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
// If we have an oop result store it where it will be safe for any further gc // If we have an oop result store it where it will be safe for any further gc
@ -1083,8 +1076,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// handle exceptions (exception handling will handle unlocking!) // handle exceptions (exception handling will handle unlocking!)
{ Label L; { Label L;
Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset())); Address exception_addr(G2_thread, Thread::pending_exception_offset());
__ ld_ptr(exception_addr, Gtemp); __ ld_ptr(exception_addr, Gtemp);
__ tst(Gtemp); __ tst(Gtemp);
__ brx(Assembler::equal, false, Assembler::pt, L); __ brx(Assembler::equal, false, Assembler::pt, L);
@ -1170,11 +1162,11 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// make sure registers are different! // make sure registers are different!
assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2); assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())); const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset());
const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset())); const Address size_of_locals (G5_method, methodOopDesc::size_of_locals_offset());
// Seems like G5_method is live at the point this is used. So we could make this look consistent // Seems like G5_method is live at the point this is used. So we could make this look consistent
// and use in the asserts. // and use in the asserts.
const Address access_flags (Lmethod, 0, in_bytes(methodOopDesc::access_flags_offset())); const Address access_flags (Lmethod, methodOopDesc::access_flags_offset());
__ verify_oop(G5_method); __ verify_oop(G5_method);
@ -1184,7 +1176,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// make sure method is not native & not abstract // make sure method is not native & not abstract
// rethink these assertions - they can be simplified and shared (gri 2/25/2000) // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
#ifdef ASSERT #ifdef ASSERT
__ ld(G5_method, in_bytes(methodOopDesc::access_flags_offset()), Gtmp1); __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1);
{ {
Label L; Label L;
__ btst(JVM_ACC_NATIVE, Gtmp1); __ btst(JVM_ACC_NATIVE, Gtmp1);
@ -1239,8 +1231,8 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop ); __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
__ delayed()->st_ptr( init_value, O2, 0 ); __ delayed()->st_ptr( init_value, O2, 0 );
const Address do_not_unlock_if_synchronized(G2_thread, 0, const Address do_not_unlock_if_synchronized(G2_thread,
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); JavaThread::do_not_unlock_if_synchronized_offset());
// Since at this point in the method invocation the exception handler // Since at this point in the method invocation the exception handler
// would try to exit the monitor of synchronized methods which hasn't // would try to exit the monitor of synchronized methods which hasn't
// been entered yet, we set the thread local variable // been entered yet, we set the thread local variable
@ -1716,7 +1708,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// //
Interpreter::_remove_activation_preserving_args_entry = __ pc(); Interpreter::_remove_activation_preserving_args_entry = __ pc();
Address popframe_condition_addr (G2_thread, 0, in_bytes(JavaThread::popframe_condition_offset())); Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
// Set the popframe_processing bit in popframe_condition indicating that we are // Set the popframe_processing bit in popframe_condition indicating that we are
// currently handling popframe, so that call_VMs that may happen later do not trigger new // currently handling popframe, so that call_VMs that may happen later do not trigger new
// popframe handling cycles. // popframe handling cycles.
@ -1758,7 +1750,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2); __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2);
// Inform deoptimization that it is responsible for restoring these arguments // Inform deoptimization that it is responsible for restoring these arguments
__ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1); __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1);
Address popframe_condition_addr(G2_thread, 0, in_bytes(JavaThread::popframe_condition_offset())); Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
__ st(Gtmp1, popframe_condition_addr); __ st(Gtmp1, popframe_condition_addr);
// Return from the current method // Return from the current method
@ -1807,7 +1799,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ verify_oop(Oexception); __ verify_oop(Oexception);
const int return_reg_adjustment = frame::pc_return_offset; const int return_reg_adjustment = frame::pc_return_offset;
Address issuing_pc_addr(I7, 0, return_reg_adjustment); Address issuing_pc_addr(I7, return_reg_adjustment);
// We are done with this activation frame; find out where to go next. // We are done with this activation frame; find out where to go next.
// The continuation point will be an exception handler, which expects // The continuation point will be an exception handler, which expects
@ -1853,8 +1845,8 @@ address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state
__ empty_expression_stack(); __ empty_expression_stack();
__ load_earlyret_value(state); __ load_earlyret_value(state);
__ ld_ptr(Address(G2_thread, 0, in_bytes(JavaThread::jvmti_thread_state_offset())), G3_scratch); __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch);
Address cond_addr(G3_scratch, 0, in_bytes(JvmtiThreadState::earlyret_state_offset())); Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset());
// Clear the earlyret state // Clear the earlyret state
__ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr); __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr);
@ -1921,43 +1913,33 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
// helpers for generate_and_dispatch // helpers for generate_and_dispatch
void TemplateInterpreterGenerator::count_bytecode() { void TemplateInterpreterGenerator::count_bytecode() {
Address c(G3_scratch, (address)&BytecodeCounter::_counter_value); __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch);
__ load_contents(c, G4_scratch);
__ inc(G4_scratch);
__ st(G4_scratch, c);
} }
void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
Address bucket( G3_scratch, (address) &BytecodeHistogram::_counters[t->bytecode()] ); __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch);
__ load_contents(bucket, G4_scratch);
__ inc(G4_scratch);
__ st(G4_scratch, bucket);
} }
void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
address index_addr = (address)&BytecodePairHistogram::_index; AddressLiteral index (&BytecodePairHistogram::_index);
Address index(G3_scratch, index_addr); AddressLiteral counters((address) &BytecodePairHistogram::_counters);
address counters_addr = (address)&BytecodePairHistogram::_counters;
Address counters(G3_scratch, counters_addr);
// get index, shift out old bytecode, bring in new bytecode, and store it // get index, shift out old bytecode, bring in new bytecode, and store it
// _index = (_index >> log2_number_of_codes) | // _index = (_index >> log2_number_of_codes) |
// (bytecode << log2_number_of_codes); // (bytecode << log2_number_of_codes);
__ load_contents(index, G4_scratch);
__ load_contents( index, G4_scratch );
__ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch ); __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch );
__ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch ); __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch );
__ or3( G3_scratch, G4_scratch, G4_scratch ); __ or3( G3_scratch, G4_scratch, G4_scratch );
__ store_contents( G4_scratch, index ); __ store_contents(G4_scratch, index, G3_scratch);
// bump bucket contents // bump bucket contents
// _counters[_index] ++; // _counters[_index] ++;
__ load_address( counters ); // loads into G3_scratch __ set(counters, G3_scratch); // loads into G3_scratch
__ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address
__ add (G3_scratch, G4_scratch, G3_scratch); // Add in index __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index
__ ld (G3_scratch, 0, G4_scratch); __ ld (G3_scratch, 0, G4_scratch);
@ -1978,9 +1960,9 @@ void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
void TemplateInterpreterGenerator::stop_interpreter_at() { void TemplateInterpreterGenerator::stop_interpreter_at() {
Address counter(G3_scratch , (address)&BytecodeCounter::_counter_value); AddressLiteral counter(&BytecodeCounter::_counter_value);
__ load_contents (counter, G3_scratch ); __ load_contents(counter, G3_scratch);
Address stop_at(G4_scratch, (address)&StopInterpreterAt); AddressLiteral stop_at(&StopInterpreterAt);
__ load_ptr_contents(stop_at, G4_scratch); __ load_ptr_contents(stop_at, G4_scratch);
__ cmp(G3_scratch, G4_scratch); __ cmp(G3_scratch, G4_scratch);
__ breakpoint_trap(Assembler::equal); __ breakpoint_trap(Assembler::equal);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -131,7 +131,7 @@ Assembler::Condition ccNot(TemplateTable::Condition cc) {
Address TemplateTable::at_bcp(int offset) { Address TemplateTable::at_bcp(int offset) {
assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
return Address( Lbcp, 0, offset); return Address(Lbcp, offset);
} }
@ -217,9 +217,9 @@ void TemplateTable::fconst(int value) {
case 1: p = &one; break; case 1: p = &one; break;
case 2: p = &two; break; case 2: p = &two; break;
} }
Address a(G3_scratch, (address)p); AddressLiteral a(p);
__ sethi(a); __ sethi(a, G3_scratch);
__ ldf(FloatRegisterImpl::S, a, Ftos_f); __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f);
} }
@ -232,9 +232,9 @@ void TemplateTable::dconst(int value) {
case 0: p = &zero; break; case 0: p = &zero; break;
case 1: p = &one; break; case 1: p = &one; break;
} }
Address a(G3_scratch, (address)p); AddressLiteral a(p);
__ sethi(a); __ sethi(a, G3_scratch);
__ ldf(FloatRegisterImpl::D, a, Ftos_d); __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d);
} }
@ -1548,7 +1548,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// non-JSR normal-branch stuff occurring below. // non-JSR normal-branch stuff occurring below.
if( is_jsr ) { if( is_jsr ) {
// compute return address as bci in Otos_i // compute return address as bci in Otos_i
__ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch); __ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch);
__ sub(Lbcp, G3_scratch, G3_scratch); __ sub(Lbcp, G3_scratch, G3_scratch);
__ sub(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()) - (is_wide ? 5 : 3), Otos_i); __ sub(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()) - (is_wide ? 5 : 3), Otos_i);
@ -1665,7 +1665,7 @@ void TemplateTable::ret() {
__ profile_ret(vtos, Otos_i, G4_scratch); __ profile_ret(vtos, Otos_i, G4_scratch);
__ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch); __ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch);
__ add(G3_scratch, Otos_i, G3_scratch); __ add(G3_scratch, Otos_i, G3_scratch);
__ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp); __ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
__ dispatch_next(vtos); __ dispatch_next(vtos);
@ -1680,7 +1680,7 @@ void TemplateTable::wide_ret() {
__ profile_ret(vtos, Otos_i, G4_scratch); __ profile_ret(vtos, Otos_i, G4_scratch);
__ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch); __ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch);
__ add(G3_scratch, Otos_i, G3_scratch); __ add(G3_scratch, Otos_i, G3_scratch);
__ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp); __ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
__ dispatch_next(vtos); __ dispatch_next(vtos);
@ -1968,8 +1968,8 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
Label resolved; Label resolved;
__ get_cache_and_index_at_bcp(Rcache, index, 1); __ get_cache_and_index_at_bcp(Rcache, index, 1);
__ ld_ptr(Address(Rcache, 0, in_bytes(constantPoolCacheOopDesc::base_offset() + __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::indices_offset())), Lbyte_code); ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
__ srl( Lbyte_code, shift_count, Lbyte_code ); __ srl( Lbyte_code, shift_count, Lbyte_code );
__ and3( Lbyte_code, 0xFF, Lbyte_code ); __ and3( Lbyte_code, 0xFF, Lbyte_code );
@ -2029,11 +2029,11 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
resolve_cache_and_index(byte_no, Rcache, Rscratch); resolve_cache_and_index(byte_no, Rcache, Rscratch);
} }
__ ld_ptr(Address(Rcache, 0, method_offset), Rmethod); __ ld_ptr(Rcache, method_offset, Rmethod);
if (Ritable_index != noreg) { if (Ritable_index != noreg) {
__ ld_ptr(Address(Rcache, 0, index_offset), Ritable_index); __ ld_ptr(Rcache, index_offset, Ritable_index);
} }
__ ld_ptr(Address(Rcache, 0, flags_offset), Rflags); __ ld_ptr(Rcache, flags_offset, Rflags);
} }
// The Rcache register must be set before call // The Rcache register must be set before call
@ -2047,13 +2047,10 @@ void TemplateTable::load_field_cp_cache_entry(Register Robj,
ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
__ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
ConstantPoolCacheEntry::flags_offset())), Rflags); __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
__ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
ConstantPoolCacheEntry::f2_offset())), Roffset);
if (is_static) { if (is_static) {
__ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj);
ConstantPoolCacheEntry::f1_offset())), Robj);
} }
} }
@ -2070,9 +2067,7 @@ void TemplateTable::jvmti_post_field_access(Register Rcache,
// the time to call into the VM. // the time to call into the VM.
Label Label1; Label Label1;
assert_different_registers(Rcache, index, G1_scratch); assert_different_registers(Rcache, index, G1_scratch);
Address get_field_access_count_addr(G1_scratch, AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr());
(address)JvmtiExport::get_field_access_count_addr(),
relocInfo::none);
__ load_contents(get_field_access_count_addr, G1_scratch); __ load_contents(get_field_access_count_addr, G1_scratch);
__ tst(G1_scratch); __ tst(G1_scratch);
__ br(Assembler::zero, false, Assembler::pt, Label1); __ br(Assembler::zero, false, Assembler::pt, Label1);
@ -2293,7 +2288,7 @@ void TemplateTable::fast_accessfield(TosState state) {
__ get_cache_and_index_at_bcp(Rcache, index, 1); __ get_cache_and_index_at_bcp(Rcache, index, 1);
jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true); jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true);
__ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())), Roffset); __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
__ null_check(Otos_i); __ null_check(Otos_i);
__ verify_oop(Otos_i); __ verify_oop(Otos_i);
@ -2304,7 +2299,7 @@ void TemplateTable::fast_accessfield(TosState state) {
Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
if (__ membar_has_effect(membar_bits)) { if (__ membar_has_effect(membar_bits)) {
// Get volatile flag // Get volatile flag
__ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())), Rflags); __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags);
__ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
} }
@ -2355,7 +2350,7 @@ void TemplateTable::jvmti_post_fast_field_mod() {
// Check to see if a field modification watch has been set before we take // Check to see if a field modification watch has been set before we take
// the time to call into the VM. // the time to call into the VM.
Label done; Label done;
Address get_field_modification_count_addr(G4_scratch, (address)JvmtiExport::get_field_modification_count_addr(), relocInfo::none); AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
__ load_contents(get_field_modification_count_addr, G4_scratch); __ load_contents(get_field_modification_count_addr, G4_scratch);
__ tst(G4_scratch); __ tst(G4_scratch);
__ br(Assembler::zero, false, Assembler::pt, done); __ br(Assembler::zero, false, Assembler::pt, done);
@ -2408,9 +2403,7 @@ void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool i
// the time to call into the VM. // the time to call into the VM.
Label Label1; Label Label1;
assert_different_registers(Rcache, index, G1_scratch); assert_different_registers(Rcache, index, G1_scratch);
Address get_field_modification_count_addr(G1_scratch, AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
(address)JvmtiExport::get_field_modification_count_addr(),
relocInfo::none);
__ load_contents(get_field_modification_count_addr, G1_scratch); __ load_contents(get_field_modification_count_addr, G1_scratch);
__ tst(G1_scratch); __ tst(G1_scratch);
__ br(Assembler::zero, false, Assembler::pt, Label1); __ br(Assembler::zero, false, Assembler::pt, Label1);
@ -2433,7 +2426,7 @@ void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool i
// the type to determine where the object is. // the type to determine where the object is.
Label two_word, valsizeknown; Label two_word, valsizeknown;
__ ld_ptr(Address(G1_scratch, 0, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())), Rflags); __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
__ mov(Lesp, G4_scratch); __ mov(Lesp, G4_scratch);
__ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags); __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
// Make sure we don't need to mask Rflags for tosBits after the above shift // Make sure we don't need to mask Rflags for tosBits after the above shift
@ -2689,8 +2682,7 @@ void TemplateTable::fast_storefield(TosState state) {
Label notVolatile, checkVolatile, exit; Label notVolatile, checkVolatile, exit;
if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
__ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
ConstantPoolCacheEntry::flags_offset())), Rflags);
__ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
__ and3(Rflags, Lscratch, Lscratch); __ and3(Rflags, Lscratch, Lscratch);
if (__ membar_has_effect(read_bits)) { if (__ membar_has_effect(read_bits)) {
@ -2702,8 +2694,7 @@ void TemplateTable::fast_storefield(TosState state) {
} }
} }
__ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
ConstantPoolCacheEntry::f2_offset())), Roffset);
pop_and_check_object(Rclass); pop_and_check_object(Rclass);
switch (bytecode()) { switch (bytecode()) {
@ -2755,7 +2746,7 @@ void TemplateTable::fast_xaccess(TosState state) {
// access constant pool cache (is resolved) // access constant pool cache (is resolved)
__ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2); __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2);
__ ld_ptr(Address(Rcache, 0, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())), Roffset); __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset);
__ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp
__ verify_oop(Rreceiver); __ verify_oop(Rreceiver);
@ -2775,7 +2766,7 @@ void TemplateTable::fast_xaccess(TosState state) {
if (__ membar_has_effect(membar_bits)) { if (__ membar_has_effect(membar_bits)) {
// Get is_volatile value in Rflags and check if membar is needed // Get is_volatile value in Rflags and check if membar is needed
__ ld_ptr(Address(Rcache, 0, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset())), Rflags); __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags);
// Test volatile // Test volatile
Label notVolatile; Label notVolatile;
@ -2853,8 +2844,8 @@ void TemplateTable::invokevirtual(int byte_no) {
__ verify_oop(O0); __ verify_oop(O0);
// get return address // get return address
Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table()); AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
__ load_address(table); __ set(table, Rtemp);
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
// Make sure we don't need to mask Rret for tosBits after the above shift // Make sure we don't need to mask Rret for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits(); ConstantPoolCacheEntry::verify_tosBits();
@ -2886,7 +2877,7 @@ void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
__ verify_oop(G5_method); __ verify_oop(G5_method);
// Load receiver from stack slot // Load receiver from stack slot
__ lduh(Address(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())), G4_scratch); __ lduh(G5_method, in_bytes(methodOopDesc::size_of_parameters_offset()), G4_scratch);
__ load_receiver(G4_scratch, O0); __ load_receiver(G4_scratch, O0);
// receiver NULL check // receiver NULL check
@ -2895,8 +2886,8 @@ void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
__ profile_final_call(O4); __ profile_final_call(O4);
// get return address // get return address
Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table()); AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
__ load_address(table); __ set(table, Rtemp);
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
// Make sure we don't need to mask Rret for tosBits after the above shift // Make sure we don't need to mask Rret for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits(); ConstantPoolCacheEntry::verify_tosBits();
@ -2920,7 +2911,7 @@ void TemplateTable::invokespecial(int byte_no) {
__ verify_oop(G5_method); __ verify_oop(G5_method);
__ lduh(Address(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())), G4_scratch); __ lduh(G5_method, in_bytes(methodOopDesc::size_of_parameters_offset()), G4_scratch);
__ load_receiver(G4_scratch, O0); __ load_receiver(G4_scratch, O0);
// receiver NULL check // receiver NULL check
@ -2929,8 +2920,8 @@ void TemplateTable::invokespecial(int byte_no) {
__ profile_call(O4); __ profile_call(O4);
// get return address // get return address
Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table()); AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
__ load_address(table); __ set(table, Rtemp);
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
// Make sure we don't need to mask Rret for tosBits after the above shift // Make sure we don't need to mask Rret for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits(); ConstantPoolCacheEntry::verify_tosBits();
@ -2956,8 +2947,8 @@ void TemplateTable::invokestatic(int byte_no) {
__ profile_call(O4); __ profile_call(O4);
// get return address // get return address
Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table()); AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
__ load_address(table); __ set(table, Rtemp);
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
// Make sure we don't need to mask Rret for tosBits after the above shift // Make sure we don't need to mask Rret for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits(); ConstantPoolCacheEntry::verify_tosBits();
@ -3021,8 +3012,8 @@ void TemplateTable::invokeinterface(int byte_no) {
__ mov(Rflags, Rret); __ mov(Rflags, Rret);
// get return address // get return address
Address table(Rscratch, (address)Interpreter::return_5_addrs_by_index_table()); AddressLiteral table(Interpreter::return_5_addrs_by_index_table());
__ load_address(table); __ set(table, Rscratch);
__ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
// Make sure we don't need to mask Rret for tosBits after the above shift // Make sure we don't need to mask Rret for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits(); ConstantPoolCacheEntry::verify_tosBits();
@ -3059,7 +3050,7 @@ void TemplateTable::invokeinterface(int byte_no) {
Label search; Label search;
Register Rtemp = Rflags; Register Rtemp = Rflags;
__ ld(Address(RklassOop, 0, instanceKlass::vtable_length_offset() * wordSize), Rtemp); __ ld(RklassOop, instanceKlass::vtable_length_offset() * wordSize, Rtemp);
if (align_object_offset(1) > 1) { if (align_object_offset(1) > 1) {
__ round_to(Rtemp, align_object_offset(1)); __ round_to(Rtemp, align_object_offset(1));
} }
@ -3125,6 +3116,24 @@ void TemplateTable::invokeinterface(int byte_no) {
} }
void TemplateTable::invokedynamic(int byte_no) {
transition(vtos, vtos);
if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic.
// The verifier will stop it. However, if we get past the verifier,
// this will stop the thread in a reasonable way, without crashing the JVM.
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_IncompatibleClassChangeError));
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return;
}
__ stop("invokedynamic NYI");//6815692//
}
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
// Allocation // Allocation
@ -3624,9 +3633,9 @@ void TemplateTable::wide() {
transition(vtos, vtos); transition(vtos, vtos);
__ ldub(Lbcp, 1, G3_scratch);// get next bc __ ldub(Lbcp, 1, G3_scratch);// get next bc
__ sll(G3_scratch, LogBytesPerWord, G3_scratch); __ sll(G3_scratch, LogBytesPerWord, G3_scratch);
Address ep(G4_scratch, (address)Interpreter::_wentry_point); AddressLiteral ep(Interpreter::_wentry_point);
__ load_address(ep); __ set(ep, G4_scratch);
__ ld_ptr(ep.base(), G3_scratch, G3_scratch); __ ld_ptr(G4_scratch, G3_scratch, G3_scratch);
__ jmp(G3_scratch, G0); __ jmp(G3_scratch, G0);
__ delayed()->nop(); __ delayed()->nop();
// Note: the Lbcp increment step is part of the individual wide bytecode implementations // Note: the Lbcp increment step is part of the individual wide bytecode implementations

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -48,11 +48,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
#ifndef PRODUCT #ifndef PRODUCT
if (CountCompiledCalls) { if (CountCompiledCalls) {
Address ctr(G5, SharedRuntime::nof_megamorphic_calls_addr()); __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), G5, G3_scratch);
__ sethi(ctr);
__ ld(ctr, G3_scratch);
__ inc(G3_scratch);
__ st(G3_scratch, ctr);
} }
#endif /* PRODUCT */ #endif /* PRODUCT */
@ -154,11 +150,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
#ifndef PRODUCT #ifndef PRODUCT
if (CountCompiledCalls) { if (CountCompiledCalls) {
Address ctr(L0, SharedRuntime::nof_megamorphic_calls_addr()); __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), L0, L1);
__ sethi(ctr);
__ ld(ctr, L1);
__ inc(L1);
__ st(L1, ctr);
} }
#endif /* PRODUCT */ #endif /* PRODUCT */
@ -198,8 +190,8 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
__ delayed()->nop(); __ delayed()->nop();
__ bind(throw_icce); __ bind(throw_icce);
Address icce(G3_scratch, StubRoutines::throw_IncompatibleClassChangeError_entry()); AddressLiteral icce(StubRoutines::throw_IncompatibleClassChangeError_entry());
__ jump_to(icce, 0); __ jump_to(icce, G3_scratch);
__ delayed()->restore(); __ delayed()->restore();
masm->flush(); masm->flush();

View file

@ -189,20 +189,33 @@ void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, i
} }
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset) { void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_offset, bool giant_index) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
if (!giant_index) {
load_unsigned_short(reg, Address(rsi, bcp_offset));
} else {
assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
movl(reg, Address(rsi, bcp_offset));
assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
notl(reg); // convert to plain index
}
}
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register index,
int bcp_offset, bool giant_index) {
assert(cache != index, "must use different registers"); assert(cache != index, "must use different registers");
load_unsigned_short(index, Address(rsi, bcp_offset)); get_cache_index_at_bcp(index, bcp_offset, giant_index);
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below"); assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
shlptr(index, 2); // convert from field index to ConstantPoolCacheEntry index shlptr(index, 2); // convert from field index to ConstantPoolCacheEntry index
} }
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset) { void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); int bcp_offset, bool giant_index) {
assert(cache != tmp, "must use different register"); assert(cache != tmp, "must use different register");
load_unsigned_short(tmp, Address(rsi, bcp_offset)); get_cache_index_at_bcp(tmp, bcp_offset, giant_index);
assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below"); assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index // convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset // and from word offset to byte offset
@ -1214,7 +1227,9 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) {
} }
void InterpreterMacroAssembler::profile_virtual_call(Register receiver, Register mdp, Register reg2) { void InterpreterMacroAssembler::profile_virtual_call(Register receiver, Register mdp,
Register reg2,
bool receiver_can_be_null) {
if (ProfileInterpreter) { if (ProfileInterpreter) {
Label profile_continue; Label profile_continue;
@ -1224,8 +1239,15 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver, Register
// We are making a call. Increment the count. // We are making a call. Increment the count.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
Label skip_receiver_profile;
if (receiver_can_be_null) {
testptr(receiver, receiver);
jcc(Assembler::zero, skip_receiver_profile);
}
// Record the receiver type. // Record the receiver type.
record_klass_in_profile(receiver, mdp, reg2); record_klass_in_profile(receiver, mdp, reg2);
bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target. // The method data pointer needs to be updated to reflect the new target.
update_mdp_by_constant(mdp, update_mdp_by_constant(mdp,

View file

@ -76,8 +76,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); movptr(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes())); void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); movptr(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes()));
} }
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset); void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset); void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, bool giant_index = false);
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset); void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
void get_cache_index_at_bcp(Register index, int bcp_offset, bool giant_index = false);
// Expression stack // Expression stack
void f2ieee(); // truncate ftos to 32bits void f2ieee(); // truncate ftos to 32bits
@ -226,7 +227,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_not_taken_branch(Register mdp); void profile_not_taken_branch(Register mdp);
void profile_call(Register mdp); void profile_call(Register mdp);
void profile_final_call(Register mdp); void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp, Register scratch2); void profile_virtual_call(Register receiver, Register mdp, Register scratch2,
bool receiver_can_be_null = false);
void profile_ret(Register return_bci, Register mdp); void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp); void profile_null_seen(Register mdp);
void profile_typecheck(Register mdp, Register klass, Register scratch); void profile_typecheck(Register mdp, Register klass, Register scratch);

View file

@ -156,13 +156,22 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state)
} }
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, bool unbox) {
TosState incoming_state = state;
if (EnableInvokeDynamic) {
if (unbox) {
incoming_state = atos;
}
} else {
assert(!unbox, "old behavior");
}
Label interpreter_entry; Label interpreter_entry;
address compiled_entry = __ pc(); address compiled_entry = __ pc();
#ifdef COMPILER2 #ifdef COMPILER2
// The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) {
for (int i = 1; i < 8; i++) { for (int i = 1; i < 8; i++) {
__ ffree(i); __ ffree(i);
} }
@ -170,7 +179,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ empty_FPU_stack(); __ empty_FPU_stack();
} }
#endif #endif
if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) {
__ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
} else { } else {
__ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
@ -186,12 +195,12 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// In SSE mode, interpreter returns FP results in xmm0 but they need // In SSE mode, interpreter returns FP results in xmm0 but they need
// to end up back on the FPU so it can operate on them. // to end up back on the FPU so it can operate on them.
if (state == ftos && UseSSE >= 1) { if (incoming_state == ftos && UseSSE >= 1) {
__ subptr(rsp, wordSize); __ subptr(rsp, wordSize);
__ movflt(Address(rsp, 0), xmm0); __ movflt(Address(rsp, 0), xmm0);
__ fld_s(Address(rsp, 0)); __ fld_s(Address(rsp, 0));
__ addptr(rsp, wordSize); __ addptr(rsp, wordSize);
} else if (state == dtos && UseSSE >= 2) { } else if (incoming_state == dtos && UseSSE >= 2) {
__ subptr(rsp, 2*wordSize); __ subptr(rsp, 2*wordSize);
__ movdbl(Address(rsp, 0), xmm0); __ movdbl(Address(rsp, 0), xmm0);
__ fld_d(Address(rsp, 0)); __ fld_d(Address(rsp, 0));
@ -207,13 +216,102 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ restore_bcp(); __ restore_bcp();
__ restore_locals(); __ restore_locals();
__ get_cache_and_index_at_bcp(rbx, rcx, 1);
Label L_fail;
if (unbox && state != atos) {
// cast and unbox
BasicType type = as_BasicType(state);
if (type == T_BYTE) type = T_BOOLEAN; // FIXME
KlassHandle boxk = SystemDictionaryHandles::box_klass(type);
__ mov32(rbx, ExternalAddress((address) boxk.raw_value()));
__ testl(rax, rax);
Label L_got_value, L_get_value;
// convert nulls to zeroes (avoid NPEs here)
if (!(type == T_FLOAT || type == T_DOUBLE)) {
// if rax already contains zero bits, forge ahead
__ jcc(Assembler::zero, L_got_value);
} else {
__ jcc(Assembler::notZero, L_get_value);
__ fldz();
__ jmp(L_got_value);
}
__ bind(L_get_value);
__ cmp32(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
__ jcc(Assembler::notEqual, L_fail);
int offset = java_lang_boxing_object::value_offset_in_bytes(type);
// Cf. TemplateTable::getfield_or_static
switch (type) {
case T_BYTE: // fall through:
case T_BOOLEAN: __ load_signed_byte(rax, Address(rax, offset)); break;
case T_CHAR: __ load_unsigned_short(rax, Address(rax, offset)); break;
case T_SHORT: __ load_signed_short(rax, Address(rax, offset)); break;
case T_INT: __ movl(rax, Address(rax, offset)); break;
case T_FLOAT: __ fld_s(Address(rax, offset)); break;
case T_DOUBLE: __ fld_d(Address(rax, offset)); break;
// Access to java.lang.Double.value does not need to be atomic:
case T_LONG: { __ movl(rdx, Address(rax, offset + 4));
__ movl(rax, Address(rax, offset + 0)); } break;
default: ShouldNotReachHere();
}
__ bind(L_got_value);
}
Label L_got_cache, L_giant_index;
if (EnableInvokeDynamic) {
__ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
__ jcc(Assembler::equal, L_giant_index);
}
__ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
__ bind(L_got_cache);
if (unbox && state == atos) {
// insert a casting conversion, to keep verifier sane
Label L_ok, L_ok_pops;
__ testl(rax, rax);
__ jcc(Assembler::zero, L_ok);
__ push(rax); // save the object to check
__ push(rbx); // save CP cache reference
__ movl(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
__ movl(rbx, Address(rbx, rcx,
Address::times_4, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f1_offset()));
__ movl(rbx, Address(rbx, __ delayed_value(sun_dyn_CallSiteImpl::type_offset_in_bytes, rcx)));
__ movl(rbx, Address(rbx, __ delayed_value(java_dyn_MethodType::rtype_offset_in_bytes, rcx)));
__ movl(rax, Address(rbx, __ delayed_value(java_lang_Class::klass_offset_in_bytes, rcx)));
__ check_klass_subtype(rdx, rax, rbx, L_ok_pops);
__ pop(rcx); // pop and discard CP cache
__ mov(rbx, rax); // target supertype into rbx for L_fail
__ pop(rax); // failed object into rax for L_fail
__ jmp(L_fail);
__ bind(L_ok_pops);
// restore pushed temp regs:
__ pop(rbx);
__ pop(rax);
__ bind(L_ok);
}
__ movl(rbx, Address(rbx, rcx, __ movl(rbx, Address(rbx, rcx,
Address::times_ptr, constantPoolCacheOopDesc::base_offset() + Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::flags_offset())); ConstantPoolCacheEntry::flags_offset()));
__ andptr(rbx, 0xFF); __ andptr(rbx, 0xFF);
__ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));
__ dispatch_next(state, step); __ dispatch_next(state, step);
// out of the main line of code...
if (EnableInvokeDynamic) {
__ bind(L_giant_index);
__ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
__ jmp(L_got_cache);
if (unbox) {
__ bind(L_fail);
__ push(rbx); // missed klass (required)
__ push(rax); // bad object (actual)
__ movptr(rdx, ExternalAddress((address) &Interpreter::_throw_WrongMethodType_entry));
__ call(rdx);
}
}
return entry; return entry;
} }

View file

@ -166,7 +166,8 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state)
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
int step) { int step, bool unbox) {
assert(!unbox, "NYI");//6815692//
// amd64 doesn't need to do anything special about compiled returns // amd64 doesn't need to do anything special about compiled returns
// to the interpreter so the code that exists on x86 to place a sentinel // to the interpreter so the code that exists on x86 to place a sentinel

View file

@ -206,12 +206,12 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, rsi, bc); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, rsi, bc);
#ifndef ASSERT #ifndef ASSERT
__ jmpb(patch_done); __ jmpb(patch_done);
__ bind(fast_patch);
}
#else #else
__ jmp(patch_done); __ jmp(patch_done);
#endif
__ bind(fast_patch); __ bind(fast_patch);
} }
#ifdef ASSERT
Label okay; Label okay;
__ load_unsigned_byte(scratch, at_bcp(0)); __ load_unsigned_byte(scratch, at_bcp(0));
__ cmpl(scratch, (int)Bytecodes::java_code(bytecode)); __ cmpl(scratch, (int)Bytecodes::java_code(bytecode));
@ -2105,6 +2105,7 @@ void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constrain
void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) { void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
assert(byte_no == 1 || byte_no == 2, "byte_no out of range"); assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
Register temp = rbx; Register temp = rbx;
@ -2112,16 +2113,19 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
const int shift_count = (1 + byte_no)*BitsPerByte; const int shift_count = (1 + byte_no)*BitsPerByte;
Label resolved; Label resolved;
__ get_cache_and_index_at_bcp(Rcache, index, 1); __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
__ movl(temp, Address(Rcache, if (is_invokedynamic) {
index, // we are resolved if the f1 field contains a non-null CallSite object
Address::times_ptr, __ cmpptr(Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()), (int32_t) NULL_WORD);
constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); __ jcc(Assembler::notEqual, resolved);
__ shrl(temp, shift_count); } else {
// have we resolved this bytecode? __ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
__ andptr(temp, 0xFF); __ shrl(temp, shift_count);
__ cmpl(temp, (int)bytecode()); // have we resolved this bytecode?
__ jcc(Assembler::equal, resolved); __ andl(temp, 0xFF);
__ cmpl(temp, (int)bytecode());
__ jcc(Assembler::equal, resolved);
}
// resolve first time through // resolve first time through
address entry; address entry;
@ -2134,12 +2138,13 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
case Bytecodes::_invokespecial : // fall through case Bytecodes::_invokespecial : // fall through
case Bytecodes::_invokestatic : // fall through case Bytecodes::_invokestatic : // fall through
case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
default : ShouldNotReachHere(); break; default : ShouldNotReachHere(); break;
} }
__ movl(temp, (int)bytecode()); __ movl(temp, (int)bytecode());
__ call_VM(noreg, entry, temp); __ call_VM(noreg, entry, temp);
// Update registers with resolved info // Update registers with resolved info
__ get_cache_and_index_at_bcp(Rcache, index, 1); __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
__ bind(resolved); __ bind(resolved);
} }
@ -2884,12 +2889,17 @@ void TemplateTable::count_calls(Register method, Register temp) {
} }
void TemplateTable::prepare_invoke(Register method, Register index, int byte_no, Bytecodes::Code code) { void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
bool is_invdyn_bootstrap = (byte_no < 0);
if (is_invdyn_bootstrap) byte_no = -byte_no;
// determine flags // determine flags
Bytecodes::Code code = bytecode();
const bool is_invokeinterface = code == Bytecodes::_invokeinterface; const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
const bool is_invokevirtual = code == Bytecodes::_invokevirtual; const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
const bool is_invokespecial = code == Bytecodes::_invokespecial; const bool is_invokespecial = code == Bytecodes::_invokespecial;
const bool load_receiver = code != Bytecodes::_invokestatic; const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
const bool receiver_null_check = is_invokespecial; const bool receiver_null_check = is_invokespecial;
const bool save_flags = is_invokeinterface || is_invokevirtual; const bool save_flags = is_invokeinterface || is_invokevirtual;
// setup registers & access constant pool cache // setup registers & access constant pool cache
@ -2897,6 +2907,8 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no,
const Register flags = rdx; const Register flags = rdx;
assert_different_registers(method, index, recv, flags); assert_different_registers(method, index, recv, flags);
assert(!is_invdyn_bootstrap || is_invokedynamic, "byte_no<0 hack only for invdyn");
// save 'interpreter return address' // save 'interpreter return address'
__ save_bcp(); __ save_bcp();
@ -2907,8 +2919,13 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no,
__ movl(recv, flags); __ movl(recv, flags);
__ andl(recv, 0xFF); __ andl(recv, 0xFF);
// recv count is 0 based? // recv count is 0 based?
__ movptr(recv, Address(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1))); Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1));
__ verify_oop(recv); if (is_invokedynamic) {
__ lea(recv, recv_addr);
} else {
__ movptr(recv, recv_addr);
__ verify_oop(recv);
}
} }
// do null check if needed // do null check if needed
@ -2926,8 +2943,14 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no,
ConstantPoolCacheEntry::verify_tosBits(); ConstantPoolCacheEntry::verify_tosBits();
// load return address // load return address
{ {
ExternalAddress table(is_invokeinterface ? (address)Interpreter::return_5_addrs_by_index_table() : address table_addr;
(address)Interpreter::return_3_addrs_by_index_table()); if (is_invdyn_bootstrap)
table_addr = (address)Interpreter::return_5_unbox_addrs_by_index_table();
else if (is_invokeinterface || is_invokedynamic)
table_addr = (address)Interpreter::return_5_addrs_by_index_table();
else
table_addr = (address)Interpreter::return_3_addrs_by_index_table();
ExternalAddress table(table_addr);
__ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))); __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
} }
@ -2990,7 +3013,7 @@ void TemplateTable::invokevirtual_helper(Register index, Register recv,
void TemplateTable::invokevirtual(int byte_no) { void TemplateTable::invokevirtual(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
prepare_invoke(rbx, noreg, byte_no, bytecode()); prepare_invoke(rbx, noreg, byte_no);
// rbx,: index // rbx,: index
// rcx: receiver // rcx: receiver
@ -3002,7 +3025,7 @@ void TemplateTable::invokevirtual(int byte_no) {
void TemplateTable::invokespecial(int byte_no) { void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
prepare_invoke(rbx, noreg, byte_no, bytecode()); prepare_invoke(rbx, noreg, byte_no);
// do the call // do the call
__ verify_oop(rbx); __ verify_oop(rbx);
__ profile_call(rax); __ profile_call(rax);
@ -3012,7 +3035,7 @@ void TemplateTable::invokespecial(int byte_no) {
void TemplateTable::invokestatic(int byte_no) { void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
prepare_invoke(rbx, noreg, byte_no, bytecode()); prepare_invoke(rbx, noreg, byte_no);
// do the call // do the call
__ verify_oop(rbx); __ verify_oop(rbx);
__ profile_call(rax); __ profile_call(rax);
@ -3028,7 +3051,7 @@ void TemplateTable::fast_invokevfinal(int byte_no) {
void TemplateTable::invokeinterface(int byte_no) { void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos); transition(vtos, vtos);
prepare_invoke(rax, rbx, byte_no, bytecode()); prepare_invoke(rax, rbx, byte_no);
// rax,: Interface // rax,: Interface
// rbx,: index // rbx,: index
@ -3102,6 +3125,84 @@ void TemplateTable::invokeinterface(int byte_no) {
__ should_not_reach_here(); __ should_not_reach_here();
} }
void TemplateTable::invokedynamic(int byte_no) {
transition(vtos, vtos);
if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic.
// The verifier will stop it. However, if we get past the verifier,
// this will stop the thread in a reasonable way, without crashing the JVM.
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_IncompatibleClassChangeError));
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return;
}
prepare_invoke(rax, rbx, byte_no);
// rax: CallSite object (f1)
// rbx: unused (f2)
// rcx: receiver address
// rdx: flags (unused)
if (ProfileInterpreter) {
Label L;
// %%% should make a type profile for any invokedynamic that takes a ref argument
// profile this call
__ profile_call(rsi);
}
Label handle_unlinked_site;
__ movptr(rcx, Address(rax, __ delayed_value(sun_dyn_CallSiteImpl::target_offset_in_bytes, rcx)));
__ testptr(rcx, rcx);
__ jcc(Assembler::zero, handle_unlinked_site);
__ prepare_to_jump_from_interpreted();
__ jump_to_method_handle_entry(rcx, rdx);
// Initial calls come here...
__ bind(handle_unlinked_site);
__ pop(rcx); // remove return address pushed by prepare_invoke
// box stacked arguments into an array for the bootstrap method
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::bootstrap_invokedynamic);
__ restore_bcp(); // rsi must be correct for call_VM
__ call_VM(rax, entry, rax);
__ movl(rdi, rax); // protect bootstrap MH from prepare_invoke
// recompute return address
__ restore_bcp(); // rsi must be correct for prepare_invoke
prepare_invoke(rax, rbx, -byte_no); // smashes rcx, rdx
// rax: CallSite object (f1)
// rbx: unused (f2)
// rdi: bootstrap MH
// rdx: flags
// now load up the arglist, which has been neatly boxed
__ get_thread(rcx);
__ movptr(rdx, Address(rcx, JavaThread::vm_result_2_offset()));
__ movptr(Address(rcx, JavaThread::vm_result_2_offset()), NULL_WORD);
__ verify_oop(rdx);
// rdx = arglist
// save SP now, before we add the bootstrap call to the stack
// We must preserve a fiction that the original arguments are outgoing,
// because the return sequence will reset the stack to this point
// and then pop all those arguments. It seems error-prone to use
// a different argument list size just for bootstrapping.
__ prepare_to_jump_from_interpreted();
// Now let's play adapter, pushing the real arguments on the stack.
__ pop(rbx); // return PC
__ push(rdi); // boot MH
__ push(rax); // call site
__ push(rdx); // arglist
__ push(rbx); // return PC, again
__ mov(rcx, rdi);
__ jump_to_method_handle_entry(rcx, rdx);
}
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
// Allocation // Allocation

View file

@ -22,8 +22,7 @@
* *
*/ */
static void prepare_invoke(Register method, Register index, int byte_no, static void prepare_invoke(Register method, Register index, int byte_no);
Bytecodes::Code code);
static void invokevirtual_helper(Register index, Register recv, static void invokevirtual_helper(Register index, Register recv,
Register flags); Register flags);
static void volatile_barrier(Assembler::Membar_mask_bits order_constraint ); static void volatile_barrier(Assembler::Membar_mask_bits order_constraint );

View file

@ -3058,6 +3058,23 @@ void TemplateTable::invokeinterface(int byte_no) {
return; return;
} }
void TemplateTable::invokedynamic(int byte_no) {
transition(vtos, vtos);
if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic.
// The verifier will stop it. However, if we get past the verifier,
// this will stop the thread in a reasonable way, without crashing the JVM.
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_IncompatibleClassChangeError));
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return;
}
__ stop("invokedynamic NYI");//6815692//
}
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Allocation // Allocation

View file

@ -2632,6 +2632,8 @@ bool os::can_execute_large_page_memory() {
char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) { char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
if (UseLargePagesIndividualAllocation) { if (UseLargePagesIndividualAllocation) {
if (TracePageSizes && Verbose) { if (TracePageSizes && Verbose) {
tty->print_cr("Reserving large pages individually."); tty->print_cr("Reserving large pages individually.");
@ -2694,13 +2696,7 @@ char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
p_new = (char *) VirtualAlloc(next_alloc_addr, p_new = (char *) VirtualAlloc(next_alloc_addr,
bytes_to_rq, bytes_to_rq,
MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES, MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES,
PAGE_READWRITE); prot);
if (p_new != NULL && exec) {
DWORD oldprot;
// Windows doc says to use VirtualProtect to get execute permissions
VirtualProtect(next_alloc_addr, bytes_to_rq,
PAGE_EXECUTE_READWRITE, &oldprot);
}
} }
if (p_new == NULL) { if (p_new == NULL) {
@ -2729,12 +2725,7 @@ char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
} else { } else {
// normal policy just allocate it all at once // normal policy just allocate it all at once
DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
char * res = (char *)VirtualAlloc(NULL, bytes, flag, PAGE_READWRITE); char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
if (res != NULL && exec) {
DWORD oldprot;
// Windows doc says to use VirtualProtect to get execute permissions
VirtualProtect(res, bytes, PAGE_EXECUTE_READWRITE, &oldprot);
}
return res; return res;
} }
} }

View file

@ -39,7 +39,7 @@ define_pd_global(uintx, JVMInvokeMethodSlack, 8*K);
// ThreadStackSize 320 allows TaggedStackInterpreter and a couple of test cases // ThreadStackSize 320 allows TaggedStackInterpreter and a couple of test cases
// to run while keeping the number of threads that can be created high. // to run while keeping the number of threads that can be created high.
define_pd_global(intx, ThreadStackSize, 320); define_pd_global(intx, ThreadStackSize, 320);
define_pd_global(intx, VMThreadStackSize, 256); define_pd_global(intx, VMThreadStackSize, 512);
define_pd_global(intx, SurvivorRatio, 8); define_pd_global(intx, SurvivorRatio, 8);
define_pd_global(uintx, JVMInvokeMethodSlack, 10*K); define_pd_global(uintx, JVMInvokeMethodSlack, 10*K);
#endif // AMD64 #endif // AMD64

View file

@ -247,7 +247,7 @@ class BuildConfig {
sysDefines.add("HOTSPOT_BUILD_USER="+System.getProperty("user.name")); sysDefines.add("HOTSPOT_BUILD_USER="+System.getProperty("user.name"));
sysDefines.add("HOTSPOT_BUILD_TARGET=\\\""+get("Build")+"\\\""); sysDefines.add("HOTSPOT_BUILD_TARGET=\\\""+get("Build")+"\\\"");
sysDefines.add("_JNI_IMPLEMENTATION_"); sysDefines.add("_JNI_IMPLEMENTATION_");
sysDefines.add("HOTSPOT_LIB_ARCH=\\\"i486\\\""); sysDefines.add("HOTSPOT_LIB_ARCH=\\\"i386\\\"");
sysDefines.addAll(defines); sysDefines.addAll(defines);

View file

@ -22,61 +22,75 @@
# #
# #
# Single gnu makefile for solaris, linux and windows (windows requires mks or # Single gnu makefile for solaris, linux and windows (windows requires cygwin and mingw)
# cygwin).
ifeq ($(BINUTILS),)
# Pop all the way out of the workspace to look for binutils.
# ...You probably want to override this setting.
BINUTILS = $(shell cd ../../../../..;pwd)/binutils-2.17-$(LIBARCH)
endif
# Default arch; it is changed below as needed. # Default arch; it is changed below as needed.
ARCH = i386 ARCH = i386
OS = $(shell uname) OS = $(shell uname)
CPPFLAGS += -I$(BINUTILS)/include -I$(BINUTILS)/bfd
CPPFLAGS += -DHOTSPOT_LIB_ARCH=\"$(LIBARCH)\" -DLIBARCH_$(LIBARCH)
CPPFLAGS += -DHOTSPOT_OS=\"$(OS)\" -DOS_$(OS)
## OS = SunOS ## ## OS = SunOS ##
ifeq ($(OS),SunOS) ifeq ($(OS),SunOS)
ARCH = $(shell uname -p) CPU = $(shell uname -p)
ARCH1=$(CPU:i586=i386)
ARCH=$(ARCH1:i686=i386)
OS = solaris OS = solaris
CC = cc CC = cc
CCFLAGS += -Kpic -g CFLAGS += -KPIC
CCFLAGS/amd64 += -xarch=amd64 ifdef LP64
CCFLAGS/sparcv9 += -xarch=v9 ifeq ($(ARCH),sparc)
CCFLAGS += $(CCFLAGS/$(LIBARCH)) ARCH = sparcv9
endif
ifeq ($(ARCH),i386)
ARCH = amd64
endif
endif
CFLAGS/sparcv9 += -xarch=v9
CFLAGS/amd64 += -m64
CFLAGS += $(CFLAGS/$(ARCH))
DLDFLAGS += -G DLDFLAGS += -G
LDFLAGS += -ldl
OUTFLAGS += -o $@ OUTFLAGS += -o $@
LIB_EXT = .so LIB_EXT = .so
else else
## OS = Linux ## ## OS = Linux ##
ifeq ($(OS),Linux) ifeq ($(OS),Linux)
CPU = $(shell uname -m) ifneq ($(MINGW),)
ifeq ($(CPU),ia64) LIB_EXT = .dll
ARCH = ia64 CPPFLAGS += -I$(TARGET_DIR)/include
LDFLAGS += -L$(TARGET_DIR)/lib
OS=windows
ifneq ($(findstring x86_64-,$(MINGW)),)
ARCH=amd64
else else
ifeq ($(CPU),x86_64) ARCH=i386
CCFLAGS += -fPIC endif
endif # x86_64 CC = $(MINGW)-gcc
endif # ia64 CONFIGURE_ARGS= --host=$(MINGW) --target=$(MINGW)
else
CPU = $(shell uname -m)
ARCH1=$(CPU:x86_64=amd64)
ARCH=$(ARCH1:i686=i386)
CFLAGS/i386 += -m32
CFLAGS/sparc += -m32
CFLAGS/sparcv9 += -m64
CFLAGS/amd64 += -m64
CFLAGS += $(CFLAGS/$(ARCH))
CFLAGS += -fPIC
OS = linux OS = linux
CC = gcc
CCFLAGS += -O
DLDFLAGS += -shared
OUTFLAGS += -o $@
LIB_EXT = .so LIB_EXT = .so
CPPFLAGS += -Iinclude -Iinclude/$(OS)_$(ARCH)/ CC = gcc
endif
CFLAGS += -O
DLDFLAGS += -shared
LDFLAGS += -ldl
OUTFLAGS += -o $@
## OS = Windows ## ## OS = Windows ##
else # !SunOS, !Linux => Windows else # !SunOS, !Linux => Windows
OS = win OS = windows
CC = cl CC = gcc
#CPPFLAGS += /D"WIN32" /D"_WINDOWS" /D"DEBUG" /D"NDEBUG" #CPPFLAGS += /D"WIN32" /D"_WINDOWS" /D"DEBUG" /D"NDEBUG"
CCFLAGS += /nologo /MD /W3 /WX /O2 /Fo$(@:.dll=.obj) /Gi- CFLAGS += /nologo /MD /W3 /WX /O2 /Fo$(@:.dll=.obj) /Gi-
CCFLAGS += -Iinclude -Iinclude/gnu -Iinclude/$(OS)_$(ARCH) CFLAGS += LIBARCH=\"$(LIBARCH)\""
CCFLAGS += /D"HOTSPOT_LIB_ARCH=\"$(LIBARCH)\""
DLDFLAGS += /dll /subsystem:windows /incremental:no \ DLDFLAGS += /dll /subsystem:windows /incremental:no \
/export:decode_instruction /export:decode_instruction
OUTFLAGS += /link /out:$@ OUTFLAGS += /link /out:$@
@ -94,21 +108,34 @@ LIBARCH = $(LIBARCH64)
endif # LIBARCH64/$(ARCH) endif # LIBARCH64/$(ARCH)
endif # LP64 endif # LP64
TARGET_DIR = bin/$(OS) JDKARCH=$(LIBARCH:i386=i586)
ifeq ($(BINUTILS),)
# Pop all the way out of the workspace to look for binutils.
# ...You probably want to override this setting.
BINUTILSDIR = $(shell cd build/binutils;pwd)
else
BINUTILSDIR = $(shell cd $(BINUTILS);pwd)
endif
CPPFLAGS += -I$(BINUTILSDIR)/include -I$(BINUTILS)/bfd -I$(TARGET_DIR)/bfd
CPPFLAGS += -DLIBARCH_$(LIBARCH) -DLIBARCH=\"$(LIBARCH)\" -DLIB_EXT=\"$(LIB_EXT)\"
TARGET_DIR = build/$(OS)-$(JDKARCH)
TARGET = $(TARGET_DIR)/hsdis-$(LIBARCH)$(LIB_EXT) TARGET = $(TARGET_DIR)/hsdis-$(LIBARCH)$(LIB_EXT)
SOURCE = hsdis.c SOURCE = hsdis.c
LIBRARIES = $(BINUTILS)/bfd/libbfd.a \ LIBRARIES = $(TARGET_DIR)/bfd/libbfd.a \
$(BINUTILS)/opcodes/libopcodes.a \ $(TARGET_DIR)/opcodes/libopcodes.a \
$(BINUTILS)/libiberty/libiberty.a $(TARGET_DIR)/libiberty/libiberty.a
DEMO_TARGET = $(TARGET_DIR)/hsdis-demo-$(LIBARCH) DEMO_TARGET = $(TARGET_DIR)/hsdis-demo
DEMO_SOURCE = hsdis-demo.c DEMO_SOURCE = hsdis-demo.c
.PHONY: all clean demo both .PHONY: all clean demo both
all: $(TARGET) demo all: $(TARGET)
both: all all64 both: all all64
@ -117,16 +144,17 @@ both: all all64
demo: $(TARGET) $(DEMO_TARGET) demo: $(TARGET) $(DEMO_TARGET)
$(LIBRARIES): $(LIBRARIES): $(TARGET_DIR) $(TARGET_DIR)/Makefile
@echo "*** Please build binutils first; see ./README: ***" if [ ! -f $@ ]; then cd $(TARGET_DIR); make all-opcodes; fi
@sed < ./README '1,/__________/d' | head -20
@echo "..."; exit 1 $(TARGET_DIR)/Makefile:
(cd $(TARGET_DIR); CC=$(CC) CFLAGS="$(CFLAGS)" $(BINUTILSDIR)/configure --disable-nls $(CONFIGURE_ARGS))
$(TARGET): $(SOURCE) $(LIBS) $(LIBRARIES) $(TARGET_DIR) $(TARGET): $(SOURCE) $(LIBS) $(LIBRARIES) $(TARGET_DIR)
$(CC) $(OUTFLAGS) $(CPPFLAGS) $(CCFLAGS) $(SOURCE) $(DLDFLAGS) $(LIBRARIES) $(CC) $(OUTFLAGS) $(CPPFLAGS) $(CFLAGS) $(SOURCE) $(DLDFLAGS) $(LIBRARIES)
$(DEMO_TARGET): $(DEMO_SOURCE) $(TARGET) $(TARGET_DIR) $(DEMO_TARGET): $(DEMO_SOURCE) $(TARGET) $(TARGET_DIR)
$(CC) $(OUTFLAGS) $(CPPFLAGS) $(CCFLAGS) $(DEMO_SOURCE) $(LDFLAGS) $(CC) $(OUTFLAGS) -DTARGET_DIR=\"$(TARGET_DIR)\" $(CPPFLAGS) -g $(CFLAGS/$(ARCH)) $(DEMO_SOURCE) $(LDFLAGS)
$(TARGET_DIR): $(TARGET_DIR):
[ -d $@ ] || mkdir -p $@ [ -d $@ ] || mkdir -p $@

View file

@ -32,61 +32,55 @@ you do not have a version that is new enough.
* Building * Building
To build this project you need a build of Gnu binutils to link against. To build this project you a copy of GNU binutils to build against. It
It is known to work with binutils 2.17. is known to work with binutils 2.17 and binutils 2.19.1. Download a
copy of the software from http://directory.fsf.org/project/binutils or
one of it's mirrors. Builds targetting windows should use at least
2.19 and currently requires the use of a cross compiler.
The makefile looks for this build in $BINUTILS, or (if that is not set), The makefile looks for the sources in build/binutils or you can
in .../binutils-2.17-$LIBARCH, where LIBARCH (as in HotSpot) is one of specify it's location to the makefile using BINTUILS=path. It will
the jre subdirectory keywords i386, amd64, sparc, sparcv9, etc. configure binutils and build it first and then build and link the
disasembly adapter. Make all will build the default target for your
platform. If you platform support both 32 and 64 simultaneously then
"make both" will build them both at once. "make all64" will
explicitly build the 64 bit version. By default this will build the
disassembler library only. If you build demo it will build a demo
program that attempts to exercise the library.
To build Gnu binutils, first download a copy of the software: Windows
http://directory.fsf.org/project/binutils/
Unpack the binutils tarball into an empty directory: In theory this should be buildable on Windows but getting a working
chdir ../../../../.. GNU build environment on Windows has proven difficult. MINGW should
tar -xzf - < ../binutils-2.17.tar.gz be able to do it but at the time of this writing I was unable to get
mv binutils-2.17 binutils-2.17-i386 #or binutils-2.17-sparc this working. Instead you can use the mingw cross compiler on linux
cd binutils-2.17-i386 to produce the windows binaries. For 32-bit windows you can install
mingw32 using your package manager and it will be added to your path
automatically. For 64-bit you need to download the 64 bit mingw from
http://sourceforge.net/projects/mingw-w64. Grab a copy of the
complete toolchain and unpack it somewhere. Put the bin directory of
the toolchain in your path. The mingw installs contain cross compile
versions of gcc that are named with a prefix to indicate what they are
targetting and you must tell the Makefile which one to use. This
should either be i586-mingw32msvc or x86_64-pc-mingw32 depending on
which on you are targetting and there should be a version of gcc in
your path named i586-mingw32msvc-gcc or x86_64-pc-mingw32-gcc. Tell
the makefile what prefix to use to find the mingw tools by using
MINGW=. For example:
From inside that directory, run configure and make: make MINGW=i586-mingw32msvc BINTUILS=build/binutils-2.19.1
( export CFLAGS='-fPIC'
./configure i386-pc-elf )
gnumake
(Leave out or change the argument to configure if not on an i386 system.) will build the Win32 cross compiled version of hsdis based on 2.19.1.
Next, untar again into another empty directory for the LP64 version:
chdir ..
tar -xzf - < ../binutils-2.17.tar.gz
mv binutils-2.17 binutils-2.17-amd64 #or binutils-2.17-sparcv9
cd binutils-2.17-amd64
From inside that directory, run configure for LP64 and make:
( export ac_cv_c_bigendian=no CFLAGS='-m64 -fPIC' LDFLAGS=-m64
./configure amd64-pc-elf )
gnumake
The -fPIC option is needed because the generated code will be
linked into the hsdid-$LIBARCH.so binary. If you miss the
option, the JVM will fail to load the disassembler.
You probably want two builds, one for 32 and one for 64 bits.
To build the 64-bit variation of a platforn, add LP64=1 to
the make command line for hsdis.
So, go back to the hsdis project and build:
chdir .../hsdis
gnumake
gnumake LP64=1
* Installing * Installing
Products are named like bin/$OS/hsdis-$LIBARCH.so. Products are named like build/$OS-$LIBARCH/hsdis-$LIBARCH.so. You can
You can install them on your LD_LIBRARY_PATH, install them on your LD_LIBRARY_PATH, or inside of your JRE next to
or inside of your JRE next to $LIBARCH/libjvm.so. $LIBARCH/libjvm.so.
Now test: Now test:
export LD_LIBRARY_PATH .../hsdis/bin/solaris:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH .../hsdis/build/$OS-$LIBARCH:$LD_LIBRARY_PATH
dargs='-XX:+UnlockDiagnosticVMOptions -XX:+PrintAssembly' dargs='-XX:+UnlockDiagnosticVMOptions -XX:+PrintAssembly'
dargs=$dargs' -XX:PrintAssemblyOptions=hsdis-print-bytes' dargs=$dargs' -XX:PrintAssemblyOptions=hsdis-print-bytes'
java $dargs -Xbatch CompileCommand=print,*String.hashCode HelloWorld java $dargs -Xbatch CompileCommand=print,*String.hashCode HelloWorld

View file

@ -53,7 +53,7 @@ int main(int ac, char** av) {
else if (!strncmp(arg, "-options=", 9)) else if (!strncmp(arg, "-options=", 9))
options = arg+9; options = arg+9;
else else
{ printf("Usage: %s [-xml] [name...]\n"); exit(2); } { printf("Usage: %s [-xml] [name...]\n", av[0]); exit(2); }
continue; continue;
} }
greet(arg); greet(arg);
@ -76,26 +76,14 @@ void end_of_file() { }
#include "dlfcn.h" #include "dlfcn.h"
#ifdef HOTSPOT_LIB_ARCH
#define LIBARCH HOTSPOT_LIB_ARCH
#endif
#ifdef HOTSPOT_OS
#define OS HOTSPOT_OS
#endif
#define DECODE_INSTRUCTIONS_NAME "decode_instructions" #define DECODE_INSTRUCTIONS_NAME "decode_instructions"
#define HSDIS_NAME "hsdis" #define HSDIS_NAME "hsdis"
static void* decode_instructions_pv = 0; static void* decode_instructions_pv = 0;
static const char* hsdis_path[] = { static const char* hsdis_path[] = {
HSDIS_NAME".so", HSDIS_NAME"-"LIBARCH LIB_EXT,
#ifdef OS "./" HSDIS_NAME"-"LIBARCH LIB_EXT,
"bin/"OS"/"HSDIS_NAME".so", #ifdef TARGET_DIR
#endif TARGET_DIR"/"HSDIS_NAME"-"LIBARCH LIB_EXT,
#ifdef LIBARCH
HSDIS_NAME"-"LIBARCH".so",
#ifdef OS
"bin/"OS"/"HSDIS_NAME"-"LIBARCH".so",
#endif
#endif #endif
NULL NULL
}; };
@ -112,7 +100,7 @@ static const char* load_decode_instructions() {
for (dllib = NULL; dllib == NULL; ) { for (dllib = NULL; dllib == NULL; ) {
const char* next_lib = (*next_in_path++); const char* next_lib = (*next_in_path++);
if (next_lib == NULL) if (next_lib == NULL)
return "cannot find plugin "HSDIS_NAME".so"; return "cannot find plugin "HSDIS_NAME LIB_EXT;
dllib = dlopen(next_lib, RTLD_LAZY); dllib = dlopen(next_lib, RTLD_LAZY);
} }
} }

View file

@ -33,6 +33,7 @@
#include <libiberty.h> #include <libiberty.h>
#include <bfd.h> #include <bfd.h>
#include <dis-asm.h> #include <dis-asm.h>
#include <inttypes.h>
#ifndef bool #ifndef bool
#define bool int #define bool int
@ -404,21 +405,21 @@ static const bfd_arch_info_type* find_arch_info(const char* arch_name) {
} }
static const char* native_arch_name() { static const char* native_arch_name() {
const char* res = HOTSPOT_LIB_ARCH; const char* res = NULL;
#ifdef LIBARCH_i386
res = "i386";
#endif
#ifdef LIBARCH_amd64 #ifdef LIBARCH_amd64
res = "i386:x86-64"; res = "i386:x86-64";
#endif #endif
#ifdef LIBARCH_sparc #ifdef LIBARCH_sparc
res = "sparc:v8plusb"; res = "sparc:v8plusb";
#endif #endif
#ifdef LIBARCH_sparc
res = "sparc:v8plusb";
#endif
#ifdef LIBARCH_sparcv9 #ifdef LIBARCH_sparcv9
res = "sparc:v9b"; res = "sparc:v9b";
#endif #endif
if (res == NULL) if (res == NULL)
res = "HOTSPOT_LIB_ARCH is not set in Makefile!"; res = "architecture not set in Makefile!";
return res; return res;
} }

View file

@ -1524,6 +1524,11 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
code = Bytecodes::_invokespecial; code = Bytecodes::_invokespecial;
} }
if (code == Bytecodes::_invokedynamic) {
BAILOUT("invokedynamic NYI"); // FIXME
return;
}
// NEEDS_CLEANUP // NEEDS_CLEANUP
// I've added the target-is_loaded() test below but I don't really understand // I've added the target-is_loaded() test below but I don't really understand
// how klass->is_loaded() can be true and yet target->is_loaded() is false. // how klass->is_loaded() can be true and yet target->is_loaded() is false.
@ -2431,8 +2436,8 @@ BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) {
case Bytecodes::_invokevirtual : // fall through case Bytecodes::_invokevirtual : // fall through
case Bytecodes::_invokespecial : // fall through case Bytecodes::_invokespecial : // fall through
case Bytecodes::_invokestatic : // fall through case Bytecodes::_invokestatic : // fall through
case Bytecodes::_invokedynamic : // fall through
case Bytecodes::_invokeinterface: invoke(code); break; case Bytecodes::_invokeinterface: invoke(code); break;
case Bytecodes::_xxxunusedxxx : ShouldNotReachHere(); break;
case Bytecodes::_new : new_instance(s.get_index_big()); break; case Bytecodes::_new : new_instance(s.get_index_big()); break;
case Bytecodes::_newarray : new_type_array(); break; case Bytecodes::_newarray : new_type_array(); break;
case Bytecodes::_anewarray : new_object_array(); break; case Bytecodes::_anewarray : new_object_array(); break;
@ -2571,6 +2576,7 @@ void GraphBuilder::initialize() {
, Bytecodes::_invokevirtual , Bytecodes::_invokevirtual
, Bytecodes::_invokespecial , Bytecodes::_invokespecial
, Bytecodes::_invokestatic , Bytecodes::_invokestatic
, Bytecodes::_invokedynamic
, Bytecodes::_invokeinterface , Bytecodes::_invokeinterface
, Bytecodes::_new , Bytecodes::_new
, Bytecodes::_newarray , Bytecodes::_newarray

View file

@ -2956,9 +2956,11 @@ void LinearScan::do_linear_scan() {
NOT_PRODUCT(print_intervals("After Register Allocation")); NOT_PRODUCT(print_intervals("After Register Allocation"));
NOT_PRODUCT(print_lir(2, "LIR after register allocation:")); NOT_PRODUCT(print_lir(2, "LIR after register allocation:"));
DEBUG_ONLY(verify());
sort_intervals_after_allocation(); sort_intervals_after_allocation();
DEBUG_ONLY(verify());
eliminate_spill_moves(); eliminate_spill_moves();
assign_reg_num(); assign_reg_num();
CHECK_BAILOUT(); CHECK_BAILOUT();
@ -3147,6 +3149,16 @@ void LinearScan::verify_intervals() {
void LinearScan::verify_no_oops_in_fixed_intervals() { void LinearScan::verify_no_oops_in_fixed_intervals() {
Interval* fixed_intervals;
Interval* other_intervals;
create_unhandled_lists(&fixed_intervals, &other_intervals, is_precolored_cpu_interval, NULL);
// to ensure a walking until the last instruction id, add a dummy interval
// with a high operation id
other_intervals = new Interval(any_reg);
other_intervals->add_range(max_jint - 2, max_jint - 1);
IntervalWalker* iw = new IntervalWalker(this, fixed_intervals, other_intervals);
LIR_OpVisitState visitor; LIR_OpVisitState visitor;
for (int i = 0; i < block_count(); i++) { for (int i = 0; i < block_count(); i++) {
BlockBegin* block = block_at(i); BlockBegin* block = block_at(i);
@ -3159,6 +3171,54 @@ void LinearScan::verify_no_oops_in_fixed_intervals() {
visitor.visit(op); visitor.visit(op);
if (visitor.info_count() > 0) {
iw->walk_before(op->id());
bool check_live = true;
if (op->code() == lir_move) {
LIR_Op1* move = (LIR_Op1*)op;
check_live = (move->patch_code() == lir_patch_none);
}
LIR_OpBranch* branch = op->as_OpBranch();
if (branch != NULL && branch->stub() != NULL && branch->stub()->is_exception_throw_stub()) {
// Don't bother checking the stub in this case since the
// exception stub will never return to normal control flow.
check_live = false;
}
// Make sure none of the fixed registers is live across an
// oopmap since we can't handle that correctly.
if (check_live) {
for (Interval* interval = iw->active_first(fixedKind);
interval != Interval::end();
interval = interval->next()) {
if (interval->current_to() > op->id() + 1) {
// This interval is live out of this op so make sure
// that this interval represents some value that's
// referenced by this op either as an input or output.
bool ok = false;
for_each_visitor_mode(mode) {
int n = visitor.opr_count(mode);
for (int k = 0; k < n; k++) {
LIR_Opr opr = visitor.opr_at(mode, k);
if (opr->is_fixed_cpu()) {
if (interval_at(reg_num(opr)) == interval) {
ok = true;
break;
}
int hi = reg_numHi(opr);
if (hi != -1 && interval_at(hi) == interval) {
ok = true;
break;
}
}
}
}
assert(ok, "fixed intervals should never be live across an oopmap point");
}
}
}
}
// oop-maps at calls do not contain registers, so check is not needed // oop-maps at calls do not contain registers, so check is not needed
if (!visitor.has_call()) { if (!visitor.has_call()) {

View file

@ -833,6 +833,7 @@ void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, Growabl
case Bytecodes::_invokevirtual: case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial: case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic: case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
case Bytecodes::_invokeinterface: case Bytecodes::_invokeinterface:
{ bool will_link; { bool will_link;
ciMethod* target = s.get_method(will_link); ciMethod* target = s.get_method(will_link);
@ -848,9 +849,6 @@ void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, Growabl
} }
} }
break; break;
case Bytecodes::_xxxunusedxxx:
ShouldNotReachHere();
break;
case Bytecodes::_new: case Bytecodes::_new:
state.apush(allocated_obj); state.apush(allocated_obj);
break; break;

View file

@ -301,17 +301,19 @@ int ciBytecodeStream::get_field_signature_index() {
// If this is a method invocation bytecode, get the constant pool // If this is a method invocation bytecode, get the constant pool
// index of the invoked method. // index of the invoked method.
int ciBytecodeStream::get_method_index() { int ciBytecodeStream::get_method_index() {
#ifdef ASSERT
switch (cur_bc()) { switch (cur_bc()) {
case Bytecodes::_invokeinterface: case Bytecodes::_invokeinterface:
return Bytes::get_Java_u2(_pc-4);
case Bytecodes::_invokevirtual: case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial: case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic: case Bytecodes::_invokestatic:
return get_index_big(); case Bytecodes::_invokedynamic:
break;
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
return 0;
} }
#endif
return get_index_int();
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
@ -337,6 +339,9 @@ ciMethod* ciBytecodeStream::get_method(bool& will_link) {
// for checking linkability when retrieving the associated method. // for checking linkability when retrieving the associated method.
ciKlass* ciBytecodeStream::get_declared_method_holder() { ciKlass* ciBytecodeStream::get_declared_method_holder() {
bool ignore; bool ignore;
// report as Dynamic for invokedynamic, which is syntactically classless
if (cur_bc() == Bytecodes::_invokedynamic)
return CURRENT_ENV->get_klass_by_name(_holder, ciSymbol::java_dyn_Dynamic(), false);
return CURRENT_ENV->get_klass_by_index(_holder, get_method_holder_index(), ignore); return CURRENT_ENV->get_klass_by_index(_holder, get_method_holder_index(), ignore);
} }

View file

@ -91,9 +91,10 @@ public:
_end = _start + max; _end = _start + max;
} }
address cur_bcp() { return _bc_start; } // Returns bcp to current instruction address cur_bcp() const { return _bc_start; } // Returns bcp to current instruction
int next_bci() const { return _pc -_start; } int next_bci() const { return _pc -_start; }
int cur_bci() const { return _bc_start - _start; } int cur_bci() const { return _bc_start - _start; }
int instruction_size() const { return _pc - _bc_start; }
Bytecodes::Code cur_bc() const{ return check_java(_bc); } Bytecodes::Code cur_bc() const{ return check_java(_bc); }
Bytecodes::Code next_bc() { return Bytecodes::java_code((Bytecodes::Code)* _pc); } Bytecodes::Code next_bc() { return Bytecodes::java_code((Bytecodes::Code)* _pc); }
@ -121,34 +122,39 @@ public:
return check_java(_bc); return check_java(_bc);
} }
bool is_wide() { return ( _pc == _was_wide ); } bool is_wide() const { return ( _pc == _was_wide ); }
// Get a byte index following this bytecode. // Get a byte index following this bytecode.
// If prefixed with a wide bytecode, get a wide index. // If prefixed with a wide bytecode, get a wide index.
int get_index() const { int get_index() const {
assert_index_size(is_wide() ? 2 : 1);
return (_pc == _was_wide) // was widened? return (_pc == _was_wide) // was widened?
? Bytes::get_Java_u2(_bc_start+2) // yes, return wide index ? Bytes::get_Java_u2(_bc_start+2) // yes, return wide index
: _bc_start[1]; // no, return narrow index : _bc_start[1]; // no, return narrow index
} }
// Set a byte index following this bytecode. // Get 2-byte index (getfield/putstatic/etc)
// If prefixed with a wide bytecode, get a wide index. int get_index_big() const {
void put_index(int idx) { assert_index_size(2);
if (_pc == _was_wide) // was widened? return Bytes::get_Java_u2(_bc_start+1);
Bytes::put_Java_u2(_bc_start+2,idx); // yes, set wide index
else
_bc_start[1]=idx; // no, set narrow index
} }
// Get 2-byte index (getfield/putstatic/etc) // Get 2-byte index (or 4-byte, for invokedynamic)
int get_index_big() const { return Bytes::get_Java_u2(_bc_start+1); } int get_index_int() const {
return has_giant_index() ? get_index_giant() : get_index_big();
}
// Get 4-byte index, for invokedynamic.
int get_index_giant() const {
assert_index_size(4);
return Bytes::get_native_u4(_bc_start+1);
}
bool has_giant_index() const { return (cur_bc() == Bytecodes::_invokedynamic); }
// Get dimensions byte (multinewarray) // Get dimensions byte (multinewarray)
int get_dimensions() const { return *(unsigned char*)(_pc-1); } int get_dimensions() const { return *(unsigned char*)(_pc-1); }
// Get unsigned index fast
int get_index_fast() const { return Bytes::get_native_u2(_pc-2); }
// Sign-extended index byte/short, no widening // Sign-extended index byte/short, no widening
int get_byte() const { return (int8_t)(_pc[-1]); } int get_byte() const { return (int8_t)(_pc[-1]); }
int get_short() const { return (int16_t)Bytes::get_Java_u2(_pc-2); } int get_short() const { return (int16_t)Bytes::get_Java_u2(_pc-2); }
@ -225,6 +231,22 @@ public:
ciKlass* get_declared_method_holder(); ciKlass* get_declared_method_holder();
int get_method_holder_index(); int get_method_holder_index();
int get_method_signature_index(); int get_method_signature_index();
private:
void assert_index_size(int required_size) const {
#ifdef ASSERT
int isize = instruction_size() - (is_wide() ? 1 : 0) - 1;
if (isize == 2 && cur_bc() == Bytecodes::_iinc)
isize = 1;
else if (isize <= 2)
; // no change
else if (has_giant_index())
isize = 4;
else
isize = 2;
assert(isize = required_size, "wrong index size");
#endif
}
}; };

View file

@ -1217,31 +1217,34 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
// valid class file. The class loader will check everything else. // valid class file. The class loader will check everything else.
if (strchr(buffer, '.') == NULL) { if (strchr(buffer, '.') == NULL) {
_compile_the_world_counter++; _compile_the_world_counter++;
if (_compile_the_world_counter >= CompileTheWorldStartAt && _compile_the_world_counter <= CompileTheWorldStopAt) { if (_compile_the_world_counter > CompileTheWorldStopAt) return;
// Construct name without extension
symbolHandle sym = oopFactory::new_symbol_handle(buffer, CHECK); // Construct name without extension
// Use loader to load and initialize class symbolHandle sym = oopFactory::new_symbol_handle(buffer, CHECK);
klassOop ik = SystemDictionary::resolve_or_null(sym, loader, Handle(), THREAD); // Use loader to load and initialize class
instanceKlassHandle k (THREAD, ik); klassOop ik = SystemDictionary::resolve_or_null(sym, loader, Handle(), THREAD);
if (k.not_null() && !HAS_PENDING_EXCEPTION) { instanceKlassHandle k (THREAD, ik);
k->initialize(THREAD); if (k.not_null() && !HAS_PENDING_EXCEPTION) {
k->initialize(THREAD);
}
bool exception_occurred = HAS_PENDING_EXCEPTION;
CLEAR_PENDING_EXCEPTION;
if (CompileTheWorldPreloadClasses && k.not_null()) {
constantPoolKlass::preload_and_initialize_all_classes(k->constants(), THREAD);
if (HAS_PENDING_EXCEPTION) {
// If something went wrong in preloading we just ignore it
CLEAR_PENDING_EXCEPTION;
tty->print_cr("Preloading failed for (%d) %s", _compile_the_world_counter, buffer);
} }
bool exception_occurred = HAS_PENDING_EXCEPTION; }
CLEAR_PENDING_EXCEPTION;
if (_compile_the_world_counter >= CompileTheWorldStartAt) {
if (k.is_null() || (exception_occurred && !CompileTheWorldIgnoreInitErrors)) { if (k.is_null() || (exception_occurred && !CompileTheWorldIgnoreInitErrors)) {
// If something went wrong (e.g. ExceptionInInitializerError) we skip this class // If something went wrong (e.g. ExceptionInInitializerError) we skip this class
tty->print_cr("CompileTheWorld (%d) : Skipping %s", _compile_the_world_counter, buffer); tty->print_cr("CompileTheWorld (%d) : Skipping %s", _compile_the_world_counter, buffer);
} else { } else {
tty->print_cr("CompileTheWorld (%d) : %s", _compile_the_world_counter, buffer); tty->print_cr("CompileTheWorld (%d) : %s", _compile_the_world_counter, buffer);
// Preload all classes to get around uncommon traps // Preload all classes to get around uncommon traps
if (CompileTheWorldPreloadClasses) {
constantPoolKlass::preload_and_initialize_all_classes(k->constants(), THREAD);
if (HAS_PENDING_EXCEPTION) {
// If something went wrong in preloading we just ignore it
CLEAR_PENDING_EXCEPTION;
tty->print_cr("Preloading failed for (%d) %s", _compile_the_world_counter, buffer);
}
}
// Iterate over all methods in class // Iterate over all methods in class
for (int n = 0; n < k->methods()->length(); n++) { for (int n = 0; n < k->methods()->length(); n++) {
methodHandle m (THREAD, methodOop(k->methods()->obj_at(n))); methodHandle m (THREAD, methodOop(k->methods()->obj_at(n)));
@ -1253,16 +1256,28 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
CLEAR_PENDING_EXCEPTION; CLEAR_PENDING_EXCEPTION;
tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string()); tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string());
} }
if (TieredCompilation) { if (TieredCompilation) {
// Clobber the first compile and force second tier compilation // Clobber the first compile and force second tier compilation
m->clear_code(); nmethod* nm = m->code();
CompileBroker::compile_method(m, InvocationEntryBci, if (nm != NULL) {
methodHandle(), 0, "CTW", THREAD); // Throw out the code so that the code cache doesn't fill up
if (HAS_PENDING_EXCEPTION) { nm->make_not_entrant();
CLEAR_PENDING_EXCEPTION; m->clear_code();
tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string()); }
CompileBroker::compile_method(m, InvocationEntryBci,
methodHandle(), 0, "CTW", THREAD);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string());
}
} }
} }
nmethod* nm = m->code();
if (nm != NULL) {
// Throw out the code so that the code cache doesn't fill up
nm->make_not_entrant();
m->clear_code();
} }
} }
} }

View file

@ -2430,6 +2430,41 @@ oop java_dyn_MethodTypeForm::erasedType(oop mtform) {
} }
// Support for sun_dyn_CallSiteImpl
int sun_dyn_CallSiteImpl::_type_offset;
int sun_dyn_CallSiteImpl::_target_offset;
int sun_dyn_CallSiteImpl::_vmmethod_offset;
void sun_dyn_CallSiteImpl::compute_offsets() {
if (!EnableInvokeDynamic) return;
klassOop k = SystemDictionary::CallSiteImpl_klass();
if (k != NULL) {
compute_offset(_type_offset, k, vmSymbols::type_name(), vmSymbols::java_dyn_MethodType_signature(), true);
compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_dyn_MethodHandle_signature(), true);
compute_offset(_vmmethod_offset, k, vmSymbols::vmmethod_name(), vmSymbols::object_signature(), true);
}
}
oop sun_dyn_CallSiteImpl::type(oop site) {
return site->obj_field(_type_offset);
}
oop sun_dyn_CallSiteImpl::target(oop site) {
return site->obj_field(_target_offset);
}
void sun_dyn_CallSiteImpl::set_target(oop site, oop target) {
site->obj_field_put(_target_offset, target);
}
oop sun_dyn_CallSiteImpl::vmmethod(oop site) {
return site->obj_field(_vmmethod_offset);
}
void sun_dyn_CallSiteImpl::set_vmmethod(oop site, oop ref) {
site->obj_field_put(_vmmethod_offset, ref);
}
// Support for java_security_AccessControlContext // Support for java_security_AccessControlContext
@ -2775,6 +2810,9 @@ void JavaClasses::compute_offsets() {
java_dyn_MethodType::compute_offsets(); java_dyn_MethodType::compute_offsets();
java_dyn_MethodTypeForm::compute_offsets(); java_dyn_MethodTypeForm::compute_offsets();
} }
if (EnableInvokeDynamic) {
sun_dyn_CallSiteImpl::compute_offsets();
}
java_security_AccessControlContext::compute_offsets(); java_security_AccessControlContext::compute_offsets();
// Initialize reflection classes. The layouts of these classes // Initialize reflection classes. The layouts of these classes
// changed with the new reflection implementation in JDK 1.4, and // changed with the new reflection implementation in JDK 1.4, and

View file

@ -1060,6 +1060,33 @@ class java_dyn_MethodTypeForm: AllStatic {
}; };
// Interface to sun.dyn.CallSiteImpl objects
class sun_dyn_CallSiteImpl: AllStatic {
friend class JavaClasses;
private:
static int _type_offset;
static int _target_offset;
static int _vmmethod_offset;
static void compute_offsets();
public:
// Accessors
static oop type(oop site);
static oop target(oop site);
static void set_target(oop site, oop target);
static oop vmmethod(oop site);
static void set_vmmethod(oop site, oop ref);
// Accessors for code generation:
static int target_offset_in_bytes() { return _target_offset; }
static int type_offset_in_bytes() { return _type_offset; }
static int vmmethod_offset_in_bytes() { return _vmmethod_offset; }
};
// Interface to java.security.AccessControlContext objects // Interface to java.security.AccessControlContext objects

View file

@ -1951,6 +1951,16 @@ void SystemDictionary::initialize_preloaded_classes(TRAPS) {
// Skip the rest of the method handle classes, if MethodHandle is not loaded. // Skip the rest of the method handle classes, if MethodHandle is not loaded.
scan = WKID(meth_group_end+1); scan = WKID(meth_group_end+1);
} }
WKID indy_group_start = WK_KLASS_ENUM_NAME(Linkage_klass);
WKID indy_group_end = WK_KLASS_ENUM_NAME(Dynamic_klass);
initialize_wk_klasses_until(indy_group_start, scan, CHECK);
if (EnableInvokeDynamic) {
initialize_wk_klasses_through(indy_group_start, scan, CHECK);
}
if (_well_known_klasses[indy_group_start] == NULL) {
// Skip the rest of the dynamic typing classes, if Linkage is not loaded.
scan = WKID(indy_group_end+1);
}
initialize_wk_klasses_until(WKID_LIMIT, scan, CHECK); initialize_wk_klasses_until(WKID_LIMIT, scan, CHECK);
@ -2367,6 +2377,76 @@ Handle SystemDictionary::compute_method_handle_type(symbolHandle signature,
} }
// Ask Java code to find or construct a java.dyn.CallSite for the given
// name and signature, as interpreted relative to the given class loader.
Handle SystemDictionary::make_dynamic_call_site(KlassHandle caller,
int caller_method_idnum,
int caller_bci,
symbolHandle name,
methodHandle mh_invdyn,
TRAPS) {
Handle empty;
// call sun.dyn.CallSiteImpl::makeSite(caller, name, mtype, cmid, cbci)
oop name_str_oop = StringTable::intern(name(), CHECK_(empty)); // not a handle!
JavaCallArguments args(Handle(THREAD, caller->java_mirror()));
args.push_oop(name_str_oop);
args.push_oop(mh_invdyn->method_handle_type());
args.push_int(caller_method_idnum);
args.push_int(caller_bci);
JavaValue result(T_OBJECT);
JavaCalls::call_static(&result,
SystemDictionary::CallSiteImpl_klass(),
vmSymbols::makeSite_name(), vmSymbols::makeSite_signature(),
&args, CHECK_(empty));
oop call_site_oop = (oop) result.get_jobject();
sun_dyn_CallSiteImpl::set_vmmethod(call_site_oop, mh_invdyn());
if (TraceMethodHandles) {
tty->print_cr("Linked invokedynamic bci=%d site="INTPTR_FORMAT":", caller_bci, call_site_oop);
call_site_oop->print();
tty->cr();
}
return call_site_oop;
}
Handle SystemDictionary::find_bootstrap_method(KlassHandle caller,
KlassHandle search_bootstrap_klass,
TRAPS) {
Handle empty;
if (!caller->oop_is_instance()) return empty;
instanceKlassHandle ik(THREAD, caller());
if (ik->bootstrap_method() != NULL) {
return Handle(THREAD, ik->bootstrap_method());
}
// call java.dyn.Linkage::findBootstrapMethod(caller, sbk)
JavaCallArguments args(Handle(THREAD, ik->java_mirror()));
if (search_bootstrap_klass.is_null())
args.push_oop(Handle());
else
args.push_oop(search_bootstrap_klass->java_mirror());
JavaValue result(T_OBJECT);
JavaCalls::call_static(&result,
SystemDictionary::Linkage_klass(),
vmSymbols::findBootstrapMethod_name(),
vmSymbols::findBootstrapMethod_signature(),
&args, CHECK_(empty));
oop boot_method_oop = (oop) result.get_jobject();
if (boot_method_oop != NULL) {
// probably no race conditions, but let's be careful:
if (Atomic::cmpxchg_ptr(boot_method_oop, ik->adr_bootstrap_method(), NULL) == NULL)
ik->set_bootstrap_method(boot_method_oop);
else
boot_method_oop = ik->bootstrap_method();
} else {
boot_method_oop = ik->bootstrap_method();
}
return Handle(THREAD, boot_method_oop);
}
// Since the identity hash code for symbols changes when the symbols are // Since the identity hash code for symbols changes when the symbols are
// moved from the regular perm gen (hash in the mark word) to the shared // moved from the regular perm gen (hash in the mark word) to the shared
// spaces (hash is the address), the classes loaded into the dictionary // spaces (hash is the address), the classes loaded into the dictionary

View file

@ -142,6 +142,12 @@ class SymbolPropertyTable;
template(MethodType_klass, java_dyn_MethodType, Opt) \ template(MethodType_klass, java_dyn_MethodType, Opt) \
template(MethodTypeForm_klass, java_dyn_MethodTypeForm, Opt) \ template(MethodTypeForm_klass, java_dyn_MethodTypeForm, Opt) \
template(WrongMethodTypeException_klass, java_dyn_WrongMethodTypeException, Opt) \ template(WrongMethodTypeException_klass, java_dyn_WrongMethodTypeException, Opt) \
template(Linkage_klass, java_dyn_Linkage, Opt) \
template(CallSite_klass, java_dyn_CallSite, Opt) \
template(CallSiteImpl_klass, sun_dyn_CallSiteImpl, Opt) \
template(Dynamic_klass, java_dyn_Dynamic, Opt) \
/* Note: MethodHandle must be first, and Dynamic last in group */ \
\
template(vector_klass, java_util_Vector, Pre) \ template(vector_klass, java_util_Vector, Pre) \
template(hashtable_klass, java_util_Hashtable, Pre) \ template(hashtable_klass, java_util_Hashtable, Pre) \
template(stringBuffer_klass, java_lang_StringBuffer, Pre) \ template(stringBuffer_klass, java_lang_StringBuffer, Pre) \
@ -466,6 +472,21 @@ public:
Handle class_loader, Handle class_loader,
Handle protection_domain, Handle protection_domain,
TRAPS); TRAPS);
// ask Java to create a dynamic call site, while linking an invokedynamic op
static Handle make_dynamic_call_site(KlassHandle caller,
int caller_method_idnum,
int caller_bci,
symbolHandle name,
methodHandle mh_invoke,
TRAPS);
// coordinate with Java about bootstrap methods
static Handle find_bootstrap_method(KlassHandle caller,
// This argument is non-null only when a
// classfile attribute has been found:
KlassHandle search_bootstrap_klass,
TRAPS);
// Utility for printing loader "name" as part of tracing constraints // Utility for printing loader "name" as part of tracing constraints
static const char* loader_name(oop loader) { static const char* loader_name(oop loader) {
return ((loader) == NULL ? "<bootloader>" : return ((loader) == NULL ? "<bootloader>" :

View file

@ -1174,6 +1174,7 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
&this_uninit, return_type, cp, CHECK_VERIFY(this)); &this_uninit, return_type, cp, CHECK_VERIFY(this));
no_control_flow = false; break; no_control_flow = false; break;
case Bytecodes::_invokeinterface : case Bytecodes::_invokeinterface :
case Bytecodes::_invokedynamic :
verify_invoke_instructions( verify_invoke_instructions(
&bcs, code_length, &current_frame, &bcs, code_length, &current_frame,
&this_uninit, return_type, cp, CHECK_VERIFY(this)); &this_uninit, return_type, cp, CHECK_VERIFY(this));
@ -1895,12 +1896,23 @@ void ClassVerifier::verify_invoke_instructions(
Bytecodes::Code opcode = bcs->code(); Bytecodes::Code opcode = bcs->code();
unsigned int types = (opcode == Bytecodes::_invokeinterface unsigned int types = (opcode == Bytecodes::_invokeinterface
? 1 << JVM_CONSTANT_InterfaceMethodref ? 1 << JVM_CONSTANT_InterfaceMethodref
: opcode == Bytecodes::_invokedynamic
? 1 << JVM_CONSTANT_NameAndType
: 1 << JVM_CONSTANT_Methodref); : 1 << JVM_CONSTANT_Methodref);
verify_cp_type(index, cp, types, CHECK_VERIFY(this)); verify_cp_type(index, cp, types, CHECK_VERIFY(this));
// Get method name and signature // Get method name and signature
symbolHandle method_name(THREAD, cp->name_ref_at(index)); symbolHandle method_name;
symbolHandle method_sig(THREAD, cp->signature_ref_at(index)); symbolHandle method_sig;
if (opcode == Bytecodes::_invokedynamic) {
int name_index = cp->name_ref_index_at(index);
int sig_index = cp->signature_ref_index_at(index);
method_name = symbolHandle(THREAD, cp->symbol_at(name_index));
method_sig = symbolHandle(THREAD, cp->symbol_at(sig_index));
} else {
method_name = symbolHandle(THREAD, cp->name_ref_at(index));
method_sig = symbolHandle(THREAD, cp->signature_ref_at(index));
}
if (!SignatureVerifier::is_valid_method_signature(method_sig)) { if (!SignatureVerifier::is_valid_method_signature(method_sig)) {
class_format_error( class_format_error(
@ -1910,8 +1922,17 @@ void ClassVerifier::verify_invoke_instructions(
} }
// Get referenced class type // Get referenced class type
VerificationType ref_class_type = cp_ref_index_to_type( VerificationType ref_class_type;
index, cp, CHECK_VERIFY(this)); if (opcode == Bytecodes::_invokedynamic) {
if (!EnableInvokeDynamic) {
class_format_error(
"invokedynamic instructions not enabled on this JVM",
_klass->external_name());
return;
}
} else {
ref_class_type = cp_ref_index_to_type(index, cp, CHECK_VERIFY(this));
}
// For a small signature length, we just allocate 128 bytes instead // For a small signature length, we just allocate 128 bytes instead
// of parsing the signature once to find its size. // of parsing the signature once to find its size.
@ -1970,6 +1991,14 @@ void ClassVerifier::verify_invoke_instructions(
} }
} }
if (opcode == Bytecodes::_invokedynamic) {
address bcp = bcs->bcp();
if (*(bcp+3) != 0 || *(bcp+4) != 0) {
verify_error(bci, "Third and fourth operand bytes of invokedynamic must be zero");
return;
}
}
if (method_name->byte_at(0) == '<') { if (method_name->byte_at(0) == '<') {
// Make sure <init> can only be invoked by invokespecial // Make sure <init> can only be invoked by invokespecial
if (opcode != Bytecodes::_invokespecial || if (opcode != Bytecodes::_invokespecial ||
@ -1994,7 +2023,8 @@ void ClassVerifier::verify_invoke_instructions(
current_frame->pop_stack(sig_types[i], CHECK_VERIFY(this)); current_frame->pop_stack(sig_types[i], CHECK_VERIFY(this));
} }
// Check objectref on operand stack // Check objectref on operand stack
if (opcode != Bytecodes::_invokestatic) { if (opcode != Bytecodes::_invokestatic &&
opcode != Bytecodes::_invokedynamic) {
if (method_name() == vmSymbols::object_initializer_name()) { // <init> method if (method_name() == vmSymbols::object_initializer_name()) { // <init> method
verify_invoke_init(bcs, ref_class_type, current_frame, verify_invoke_init(bcs, ref_class_type, current_frame,
code_length, this_uninit, cp, CHECK_VERIFY(this)); code_length, this_uninit, cp, CHECK_VERIFY(this));

View file

@ -217,6 +217,9 @@
template(base_name, "base") \ template(base_name, "base") \
\ \
/* Support for JSR 292 & invokedynamic (JDK 1.7 and above) */ \ /* Support for JSR 292 & invokedynamic (JDK 1.7 and above) */ \
template(java_dyn_Dynamic, "java/dyn/Dynamic") \
template(java_dyn_Linkage, "java/dyn/Linkage") \
template(java_dyn_CallSite, "java/dyn/CallSite") \
template(java_dyn_MethodHandle, "java/dyn/MethodHandle") \ template(java_dyn_MethodHandle, "java/dyn/MethodHandle") \
template(java_dyn_MethodType, "java/dyn/MethodType") \ template(java_dyn_MethodType, "java/dyn/MethodType") \
template(java_dyn_WrongMethodTypeException, "java/dyn/WrongMethodTypeException") \ template(java_dyn_WrongMethodTypeException, "java/dyn/WrongMethodTypeException") \
@ -230,8 +233,13 @@
template(sun_dyn_AdapterMethodHandle, "sun/dyn/AdapterMethodHandle") \ template(sun_dyn_AdapterMethodHandle, "sun/dyn/AdapterMethodHandle") \
template(sun_dyn_BoundMethodHandle, "sun/dyn/BoundMethodHandle") \ template(sun_dyn_BoundMethodHandle, "sun/dyn/BoundMethodHandle") \
template(sun_dyn_DirectMethodHandle, "sun/dyn/DirectMethodHandle") \ template(sun_dyn_DirectMethodHandle, "sun/dyn/DirectMethodHandle") \
template(sun_dyn_CallSiteImpl, "sun/dyn/CallSiteImpl") \
template(makeImpl_name, "makeImpl") /*MethodType::makeImpl*/ \ template(makeImpl_name, "makeImpl") /*MethodType::makeImpl*/ \
template(makeImpl_signature, "(Ljava/lang/Class;[Ljava/lang/Class;ZZ)Ljava/dyn/MethodType;") \ template(makeImpl_signature, "(Ljava/lang/Class;[Ljava/lang/Class;ZZ)Ljava/dyn/MethodType;") \
template(makeSite_name, "makeSite") /*CallSiteImpl::makeImpl*/ \
template(makeSite_signature, "(Ljava/lang/Class;Ljava/lang/String;Ljava/dyn/MethodType;II)Ljava/dyn/CallSite;") \
template(findBootstrapMethod_name, "findBootstrapMethod") \
template(findBootstrapMethod_signature, "(Ljava/lang/Class;Ljava/lang/Class;)Ljava/dyn/MethodHandle;") \
NOT_LP64( do_alias(machine_word_signature, int_signature) ) \ NOT_LP64( do_alias(machine_word_signature, int_signature) ) \
LP64_ONLY( do_alias(machine_word_signature, long_signature) ) \ LP64_ONLY( do_alias(machine_word_signature, long_signature) ) \
\ \
@ -308,9 +316,11 @@
template(bitCount_name, "bitCount") \ template(bitCount_name, "bitCount") \
template(profile_name, "profile") \ template(profile_name, "profile") \
template(equals_name, "equals") \ template(equals_name, "equals") \
template(target_name, "target") \
template(toString_name, "toString") \ template(toString_name, "toString") \
template(values_name, "values") \ template(values_name, "values") \
template(receiver_name, "receiver") \ template(receiver_name, "receiver") \
template(vmmethod_name, "vmmethod") \
template(vmtarget_name, "vmtarget") \ template(vmtarget_name, "vmtarget") \
template(vmentry_name, "vmentry") \ template(vmentry_name, "vmentry") \
template(vmslots_name, "vmslots") \ template(vmslots_name, "vmslots") \

View file

@ -270,6 +270,7 @@ c1_LIRGenerator_<arch>.cpp vmreg_<arch>.inline.hpp
c1_LinearScan.cpp bitMap.inline.hpp c1_LinearScan.cpp bitMap.inline.hpp
c1_LinearScan.cpp c1_CFGPrinter.hpp c1_LinearScan.cpp c1_CFGPrinter.hpp
c1_LinearScan.cpp c1_CodeStubs.hpp
c1_LinearScan.cpp c1_Compilation.hpp c1_LinearScan.cpp c1_Compilation.hpp
c1_LinearScan.cpp c1_FrameMap.hpp c1_LinearScan.cpp c1_FrameMap.hpp
c1_LinearScan.cpp c1_IR.hpp c1_LinearScan.cpp c1_IR.hpp

View file

@ -4102,6 +4102,7 @@ templateTable.hpp interp_masm_<arch_model>.hpp
templateTable_<arch_model>.cpp interpreterRuntime.hpp templateTable_<arch_model>.cpp interpreterRuntime.hpp
templateTable_<arch_model>.cpp interpreter.hpp templateTable_<arch_model>.cpp interpreter.hpp
templateTable_<arch_model>.cpp methodDataOop.hpp templateTable_<arch_model>.cpp methodDataOop.hpp
templateTable_<arch_model>.cpp methodHandles.hpp
templateTable_<arch_model>.cpp objArrayKlass.hpp templateTable_<arch_model>.cpp objArrayKlass.hpp
templateTable_<arch_model>.cpp oop.inline.hpp templateTable_<arch_model>.cpp oop.inline.hpp
templateTable_<arch_model>.cpp sharedRuntime.hpp templateTable_<arch_model>.cpp sharedRuntime.hpp

View file

@ -42,6 +42,12 @@ constantPoolKlass.cpp psPromotionManager.inline.hpp
constantPoolKlass.cpp psScavenge.inline.hpp constantPoolKlass.cpp psScavenge.inline.hpp
constantPoolKlass.cpp parOopClosures.inline.hpp constantPoolKlass.cpp parOopClosures.inline.hpp
cpCacheKlass.cpp cardTableRS.hpp
cpCacheKlass.cpp oop.pcgc.inline.hpp
cpCacheKlass.cpp psPromotionManager.inline.hpp
cpCacheKlass.cpp psScavenge.inline.hpp
cpCacheKlass.cpp parOopClosures.inline.hpp
genCollectedHeap.cpp concurrentMarkSweepThread.hpp genCollectedHeap.cpp concurrentMarkSweepThread.hpp
genCollectedHeap.cpp vmCMSOperations.hpp genCollectedHeap.cpp vmCMSOperations.hpp

View file

@ -28,6 +28,7 @@ jvmtiClassFileReconstituter.cpp bytecodeStream.hpp
jvmtiClassFileReconstituter.cpp bytes_<arch>.hpp jvmtiClassFileReconstituter.cpp bytes_<arch>.hpp
jvmtiClassFileReconstituter.cpp jvmtiClassFileReconstituter.hpp jvmtiClassFileReconstituter.cpp jvmtiClassFileReconstituter.hpp
jvmtiClassFileReconstituter.cpp symbolTable.hpp jvmtiClassFileReconstituter.cpp symbolTable.hpp
jvmtiClassFileReconstituter.cpp signature.hpp
jvmtiClassFileReconstituter.hpp jvmtiEnv.hpp jvmtiClassFileReconstituter.hpp jvmtiEnv.hpp

View file

@ -217,6 +217,73 @@ class AbstractInterpreter: AllStatic {
stackElementSize()) + tag_offset_in_bytes(); stackElementSize()) + tag_offset_in_bytes();
} }
// access to stacked values according to type:
static oop* oop_addr_in_slot(intptr_t* slot_addr) {
return (oop*) slot_addr;
}
static jint* int_addr_in_slot(intptr_t* slot_addr) {
if ((int) sizeof(jint) < wordSize && !Bytes::is_Java_byte_ordering_different())
// big-endian LP64
return (jint*)(slot_addr + 1) - 1;
else
return (jint*) slot_addr;
}
static jlong long_in_slot(intptr_t* slot_addr) {
if (sizeof(intptr_t) >= sizeof(jlong)) {
return *(jlong*) slot_addr;
} else if (!TaggedStackInterpreter) {
return Bytes::get_native_u8((address)slot_addr);
} else {
assert(sizeof(intptr_t) * 2 == sizeof(jlong), "ILP32");
// assemble the long in memory order (not arithmetic order)
union { jlong j; jint i[2]; } u;
u.i[0] = (jint) slot_addr[0*stackElementSize()];
u.i[1] = (jint) slot_addr[1*stackElementSize()];
return u.j;
}
}
static void set_long_in_slot(intptr_t* slot_addr, jlong value) {
if (sizeof(intptr_t) >= sizeof(jlong)) {
*(jlong*) slot_addr = value;
} else if (!TaggedStackInterpreter) {
Bytes::put_native_u8((address)slot_addr, value);
} else {
assert(sizeof(intptr_t) * 2 == sizeof(jlong), "ILP32");
// assemble the long in memory order (not arithmetic order)
union { jlong j; jint i[2]; } u;
u.j = value;
slot_addr[0*stackElementSize()] = (intptr_t) u.i[0];
slot_addr[1*stackElementSize()] = (intptr_t) u.i[1];
}
}
static void get_jvalue_in_slot(intptr_t* slot_addr, BasicType type, jvalue* value) {
switch (type) {
case T_BOOLEAN: value->z = *int_addr_in_slot(slot_addr); break;
case T_CHAR: value->c = *int_addr_in_slot(slot_addr); break;
case T_BYTE: value->b = *int_addr_in_slot(slot_addr); break;
case T_SHORT: value->s = *int_addr_in_slot(slot_addr); break;
case T_INT: value->i = *int_addr_in_slot(slot_addr); break;
case T_LONG: value->j = long_in_slot(slot_addr); break;
case T_FLOAT: value->f = *(jfloat*)int_addr_in_slot(slot_addr); break;
case T_DOUBLE: value->d = jdouble_cast(long_in_slot(slot_addr)); break;
case T_OBJECT: value->l = (jobject)*oop_addr_in_slot(slot_addr); break;
default: ShouldNotReachHere();
}
}
static void set_jvalue_in_slot(intptr_t* slot_addr, BasicType type, jvalue* value) {
switch (type) {
case T_BOOLEAN: *int_addr_in_slot(slot_addr) = (value->z != 0); break;
case T_CHAR: *int_addr_in_slot(slot_addr) = value->c; break;
case T_BYTE: *int_addr_in_slot(slot_addr) = value->b; break;
case T_SHORT: *int_addr_in_slot(slot_addr) = value->s; break;
case T_INT: *int_addr_in_slot(slot_addr) = value->i; break;
case T_LONG: set_long_in_slot(slot_addr, value->j); break;
case T_FLOAT: *(jfloat*)int_addr_in_slot(slot_addr) = value->f; break;
case T_DOUBLE: set_long_in_slot(slot_addr, jlong_cast(value->d)); break;
case T_OBJECT: *oop_addr_in_slot(slot_addr) = (oop) value->l; break;
default: ShouldNotReachHere();
}
}
}; };
//------------------------------------------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------------------------------------------

View file

@ -34,12 +34,6 @@ void Bytecode::set_code(Bytecodes::Code code) {
} }
void Bytecode::set_fast_index(int i) {
assert(0 <= i && i < 0x10000, "illegal index value");
Bytes::put_native_u2(addr_at(1), (jushort)i);
}
bool Bytecode::check_must_rewrite() const { bool Bytecode::check_must_rewrite() const {
assert(Bytecodes::can_rewrite(code()), "post-check only"); assert(Bytecodes::can_rewrite(code()), "post-check only");
@ -118,7 +112,12 @@ methodHandle Bytecode_invoke::static_target(TRAPS) {
int Bytecode_invoke::index() const { int Bytecode_invoke::index() const {
return Bytes::get_Java_u2(bcp() + 1); // Note: Rewriter::rewrite changes the Java_u2 of an invokedynamic to a native_u4,
// at the same time it allocates per-call-site CP cache entries.
if (has_giant_index())
return Bytes::get_native_u4(bcp() + 1);
else
return Bytes::get_Java_u2(bcp() + 1);
} }

View file

@ -65,14 +65,6 @@ class ThisRelativeObj VALUE_OBJ_CLASS_SPEC {
// The base class for different kinds of bytecode abstractions. // The base class for different kinds of bytecode abstractions.
// Provides the primitive operations to manipulate code relative // Provides the primitive operations to manipulate code relative
// to an objects 'this' pointer. // to an objects 'this' pointer.
//
// Note: Even though it seems that the fast_index & set_fast_index
// functions are machine specific, they're not. They only use
// the natural way to store a 16bit index on a given machine,
// independent of the particular byte ordering. Since all other
// places in the system that refer to these indices use the
// same method (the natural byte ordering on the platform)
// this will always work and be machine-independent).
class Bytecode: public ThisRelativeObj { class Bytecode: public ThisRelativeObj {
protected: protected:
@ -83,24 +75,40 @@ class Bytecode: public ThisRelativeObj {
// Attributes // Attributes
address bcp() const { return addr_at(0); } address bcp() const { return addr_at(0); }
address next_bcp() const { return addr_at(0) + Bytecodes::length_at(bcp()); } address next_bcp() const { return addr_at(0) + Bytecodes::length_at(bcp()); }
int instruction_size() const { return Bytecodes::length_at(bcp()); }
Bytecodes::Code code() const { return Bytecodes::code_at(addr_at(0)); } Bytecodes::Code code() const { return Bytecodes::code_at(addr_at(0)); }
Bytecodes::Code java_code() const { return Bytecodes::java_code(code()); } Bytecodes::Code java_code() const { return Bytecodes::java_code(code()); }
bool must_rewrite() const { return Bytecodes::can_rewrite(code()) && check_must_rewrite(); } bool must_rewrite() const { return Bytecodes::can_rewrite(code()) && check_must_rewrite(); }
bool is_active_breakpoint() const { return Bytecodes::is_active_breakpoint_at(bcp()); } bool is_active_breakpoint() const { return Bytecodes::is_active_breakpoint_at(bcp()); }
int one_byte_index() const { return byte_at(1); } int one_byte_index() const { assert_index_size(1); return byte_at(1); }
int two_byte_index() const { return (byte_at(1) << 8) + byte_at(2); } int two_byte_index() const { assert_index_size(2); return (byte_at(1) << 8) + byte_at(2); }
int offset() const { return (two_byte_index() << 16) >> 16; } int offset() const { return (two_byte_index() << 16) >> 16; }
address destination() const { return bcp() + offset(); } address destination() const { return bcp() + offset(); }
int fast_index() const { return Bytes::get_native_u2(addr_at(1)); }
// Attribute modification // Attribute modification
void set_code(Bytecodes::Code code); void set_code(Bytecodes::Code code);
void set_fast_index(int i);
// Creation // Creation
inline friend Bytecode* Bytecode_at(address bcp); inline friend Bytecode* Bytecode_at(address bcp);
private:
void assert_index_size(int required_size) const {
#ifdef ASSERT
int isize = instruction_size() - 1;
if (isize == 2 && code() == Bytecodes::_iinc)
isize = 1;
else if (isize <= 2)
; // no change
else if (code() == Bytecodes::_invokedynamic)
isize = 4;
else
isize = 2;
assert(isize = required_size, "wrong index size");
#endif
}
}; };
inline Bytecode* Bytecode_at(address bcp) { inline Bytecode* Bytecode_at(address bcp) {
@ -195,6 +203,9 @@ class Bytecode_invoke: public ResourceObj {
bool is_invokevirtual() const { return adjusted_invoke_code() == Bytecodes::_invokevirtual; } bool is_invokevirtual() const { return adjusted_invoke_code() == Bytecodes::_invokevirtual; }
bool is_invokestatic() const { return adjusted_invoke_code() == Bytecodes::_invokestatic; } bool is_invokestatic() const { return adjusted_invoke_code() == Bytecodes::_invokestatic; }
bool is_invokespecial() const { return adjusted_invoke_code() == Bytecodes::_invokespecial; } bool is_invokespecial() const { return adjusted_invoke_code() == Bytecodes::_invokespecial; }
bool is_invokedynamic() const { return adjusted_invoke_code() == Bytecodes::_invokedynamic; }
bool has_giant_index() const { return is_invokedynamic(); }
bool is_valid() const { return is_invokeinterface() || bool is_valid() const { return is_invokeinterface() ||
is_invokevirtual() || is_invokevirtual() ||

View file

@ -109,6 +109,7 @@ class RawBytecodeStream: StackObj {
Bytecodes::Code code() const { return _code; } Bytecodes::Code code() const { return _code; }
bool is_wide() const { return _is_wide; } bool is_wide() const { return _is_wide; }
int instruction_size() const { return (_next_bci - _bci); }
bool is_last_bytecode() const { return _next_bci >= _end_bci; } bool is_last_bytecode() const { return _next_bci >= _end_bci; }
address bcp() const { return method()->code_base() + _bci; } address bcp() const { return method()->code_base() + _bci; }
@ -122,8 +123,29 @@ class RawBytecodeStream: StackObj {
int dest_w() const { return bci() + (int )Bytes::get_Java_u4(bcp() + 1); } int dest_w() const { return bci() + (int )Bytes::get_Java_u4(bcp() + 1); }
// Unsigned indices, widening // Unsigned indices, widening
int get_index() const { return (is_wide()) ? Bytes::get_Java_u2(bcp() + 2) : bcp()[1]; } int get_index() const { assert_index_size(is_wide() ? 2 : 1);
int get_index_big() const { return (int)Bytes::get_Java_u2(bcp() + 1); } return (is_wide()) ? Bytes::get_Java_u2(bcp() + 2) : bcp()[1]; }
int get_index_big() const { assert_index_size(2);
return (int)Bytes::get_Java_u2(bcp() + 1); }
int get_index_int() const { return has_giant_index() ? get_index_giant() : get_index_big(); }
int get_index_giant() const { assert_index_size(4); return Bytes::get_native_u4(bcp() + 1); }
int has_giant_index() const { return (code() == Bytecodes::_invokedynamic); }
private:
void assert_index_size(int required_size) const {
#ifdef ASSERT
int isize = instruction_size() - (int)_is_wide - 1;
if (isize == 2 && code() == Bytecodes::_iinc)
isize = 1;
else if (isize <= 2)
; // no change
else if (has_giant_index())
isize = 4;
else
isize = 2;
assert(isize = required_size, "wrong index size");
#endif
}
}; };
// In BytecodeStream, non-java bytecodes will be translated into the // In BytecodeStream, non-java bytecodes will be translated into the

View file

@ -48,12 +48,15 @@ class BytecodePrinter: public BytecodeClosure {
int get_index() { return *(address)_next_pc++; } int get_index() { return *(address)_next_pc++; }
int get_big_index() { int i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; } int get_big_index() { int i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; }
int get_giant_index() { int i=Bytes::get_native_u4(_next_pc); _next_pc+=4; return i; }
int get_index_special() { return (is_wide()) ? get_big_index() : get_index(); } int get_index_special() { return (is_wide()) ? get_big_index() : get_index(); }
methodOop method() { return _current_method; } methodOop method() { return _current_method; }
bool is_wide() { return _is_wide; } bool is_wide() { return _is_wide; }
bool check_index(int i, bool in_cp_cache, int& cp_index, outputStream* st = tty);
void print_constant(int i, outputStream* st = tty); void print_constant(int i, outputStream* st = tty);
void print_field_or_method(int i, outputStream* st = tty);
void print_attributes(Bytecodes::Code code, int bci, outputStream* st = tty); void print_attributes(Bytecodes::Code code, int bci, outputStream* st = tty);
void bytecode_epilog(int bci, outputStream* st = tty); void bytecode_epilog(int bci, outputStream* st = tty);
@ -182,7 +185,71 @@ void print_oop(oop value, outputStream* st) {
} }
} }
bool BytecodePrinter::check_index(int i, bool in_cp_cache, int& cp_index, outputStream* st) {
constantPoolOop constants = method()->constants();
int ilimit = constants->length(), climit = 0;
constantPoolCacheOop cache = NULL;
if (in_cp_cache) {
cache = constants->cache();
if (cache != NULL) {
//climit = cache->length(); // %%% private!
size_t size = cache->size() * HeapWordSize;
size -= sizeof(constantPoolCacheOopDesc);
size /= sizeof(ConstantPoolCacheEntry);
climit = (int) size;
}
}
if (in_cp_cache && constantPoolCacheOopDesc::is_secondary_index(i)) {
i = constantPoolCacheOopDesc::decode_secondary_index(i);
st->print(" secondary cache[%d] of", i);
if (i >= 0 && i < climit) {
if (!cache->entry_at(i)->is_secondary_entry()) {
st->print_cr(" not secondary entry?", i);
return false;
}
i = cache->entry_at(i)->main_entry_index();
goto check_cache_index;
} else {
st->print_cr(" not in cache[*]?", i);
return false;
}
}
if (cache != NULL) {
i = Bytes::swap_u2(i);
if (WizardMode) st->print(" (swap=%d)", i);
goto check_cache_index;
}
check_cp_index:
if (i >= 0 && i < ilimit) {
if (WizardMode) st->print(" cp[%d]", i);
cp_index = i;
return true;
}
st->print_cr(" CP[%d] not in CP", i);
return false;
check_cache_index:
if (i >= 0 && i < climit) {
if (cache->entry_at(i)->is_secondary_entry()) {
st->print_cr(" secondary entry?");
return false;
}
i = cache->entry_at(i)->constant_pool_index();
goto check_cp_index;
}
st->print_cr(" not in CP[*]?", i);
return false;
}
void BytecodePrinter::print_constant(int i, outputStream* st) { void BytecodePrinter::print_constant(int i, outputStream* st) {
int orig_i = i;
if (!check_index(orig_i, false, i, st)) return;
constantPoolOop constants = method()->constants(); constantPoolOop constants = method()->constants();
constantTag tag = constants->tag_at(i); constantTag tag = constants->tag_at(i);
@ -203,7 +270,31 @@ void BytecodePrinter::print_constant(int i, outputStream* st) {
st->print_cr(" %s", constants->resolved_klass_at(i)->klass_part()->external_name()); st->print_cr(" %s", constants->resolved_klass_at(i)->klass_part()->external_name());
} else if (tag.is_unresolved_klass()) { } else if (tag.is_unresolved_klass()) {
st->print_cr(" <unresolved klass at %d>", i); st->print_cr(" <unresolved klass at %d>", i);
} else ShouldNotReachHere(); } else {
st->print_cr(" bad tag=%d at %d", tag.value(), i);
}
}
void BytecodePrinter::print_field_or_method(int i, outputStream* st) {
int orig_i = i;
if (!check_index(orig_i, true, i, st)) return;
constantPoolOop constants = method()->constants();
constantTag tag = constants->tag_at(i);
switch (tag.value()) {
case JVM_CONSTANT_InterfaceMethodref:
case JVM_CONSTANT_Methodref:
case JVM_CONSTANT_Fieldref:
break;
default:
st->print_cr(" bad tag=%d at %d", tag.value(), i);
return;
}
symbolOop name = constants->name_ref_at(orig_i);
symbolOop signature = constants->signature_ref_at(orig_i);
st->print_cr(" %d <%s> <%s> ", i, name->as_C_string(), signature->as_C_string());
} }
@ -354,36 +445,28 @@ void BytecodePrinter::print_attributes(Bytecodes::Code code, int bci, outputStre
case Bytecodes::_putstatic: case Bytecodes::_putstatic:
case Bytecodes::_getstatic: case Bytecodes::_getstatic:
case Bytecodes::_putfield: case Bytecodes::_putfield:
case Bytecodes::_getfield: { case Bytecodes::_getfield:
int i = get_big_index(); print_field_or_method(get_big_index(), st);
constantPoolOop constants = method()->constants();
symbolOop field = constants->name_ref_at(i);
st->print_cr(" %d <%s>", i, field->as_C_string());
}
break; break;
case Bytecodes::_invokevirtual: case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial: case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic: case Bytecodes::_invokestatic:
{ int i = get_big_index(); print_field_or_method(get_big_index(), st);
constantPoolOop constants = method()->constants();
symbolOop name = constants->name_ref_at(i);
symbolOop signature = constants->signature_ref_at(i);
st->print_cr(" %d <%s> <%s> ", i, name->as_C_string(), signature->as_C_string());
}
break; break;
case Bytecodes::_invokeinterface: case Bytecodes::_invokeinterface:
{ int i = get_big_index(); { int i = get_big_index();
int n = get_index(); int n = get_index();
get_index(); get_index(); // ignore zero byte
constantPoolOop constants = method()->constants(); print_field_or_method(i, st);
symbolOop name = constants->name_ref_at(i);
symbolOop signature = constants->signature_ref_at(i);
st->print_cr(" %d <%s> <%s> %d", i, name->as_C_string(), signature->as_C_string(), n);
} }
break; break;
case Bytecodes::_invokedynamic:
print_field_or_method(get_giant_index(), st);
break;
case Bytecodes::_new: case Bytecodes::_new:
case Bytecodes::_checkcast: case Bytecodes::_checkcast:
case Bytecodes::_instanceof: case Bytecodes::_instanceof:

View file

@ -357,7 +357,7 @@ void Bytecodes::initialize() {
def(_invokespecial , "invokespecial" , "bjj" , NULL , T_ILLEGAL, -1, true); def(_invokespecial , "invokespecial" , "bjj" , NULL , T_ILLEGAL, -1, true);
def(_invokestatic , "invokestatic" , "bjj" , NULL , T_ILLEGAL, 0, true); def(_invokestatic , "invokestatic" , "bjj" , NULL , T_ILLEGAL, 0, true);
def(_invokeinterface , "invokeinterface" , "bjj__", NULL , T_ILLEGAL, -1, true); def(_invokeinterface , "invokeinterface" , "bjj__", NULL , T_ILLEGAL, -1, true);
def(_xxxunusedxxx , "xxxunusedxxx" , NULL , NULL , T_VOID , 0, false); def(_invokedynamic , "invokedynamic" , "bjjjj", NULL , T_ILLEGAL, -1, true );
def(_new , "new" , "bii" , NULL , T_OBJECT , 1, true ); def(_new , "new" , "bii" , NULL , T_OBJECT , 1, true );
def(_newarray , "newarray" , "bc" , NULL , T_OBJECT , 0, true ); def(_newarray , "newarray" , "bc" , NULL , T_OBJECT , 0, true );
def(_anewarray , "anewarray" , "bii" , NULL , T_OBJECT , 0, true ); def(_anewarray , "anewarray" , "bii" , NULL , T_OBJECT , 0, true );

View file

@ -218,7 +218,7 @@ class Bytecodes: AllStatic {
_invokespecial = 183, // 0xb7 _invokespecial = 183, // 0xb7
_invokestatic = 184, // 0xb8 _invokestatic = 184, // 0xb8
_invokeinterface = 185, // 0xb9 _invokeinterface = 185, // 0xb9
_xxxunusedxxx = 186, // 0xba _invokedynamic = 186, // 0xba // if EnableInvokeDynamic
_new = 187, // 0xbb _new = 187, // 0xbb
_newarray = 188, // 0xbc _newarray = 188, // 0xbc
_anewarray = 189, // 0xbd _anewarray = 189, // 0xbd

View file

@ -681,6 +681,133 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes
IRT_END IRT_END
// First time execution: Resolve symbols, create a permanent CallSiteImpl object.
IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
ResourceMark rm(thread);
assert(EnableInvokeDynamic, "");
const Bytecodes::Code bytecode = Bytecodes::_invokedynamic;
methodHandle caller_method(thread, method(thread));
// first determine if there is a bootstrap method
{
KlassHandle caller_klass(thread, caller_method->method_holder());
Handle bootm = SystemDictionary::find_bootstrap_method(caller_klass, KlassHandle(), CHECK);
if (bootm.is_null()) {
// If there is no bootstrap method, throw IncompatibleClassChangeError.
// This is a valid generic error type for resolution (JLS 12.3.3).
char buf[200];
jio_snprintf(buf, sizeof(buf), "Class %s has not declared a bootstrap method for invokedynamic",
(Klass::cast(caller_klass()))->external_name());
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
}
}
constantPoolHandle pool(thread, caller_method->constants());
pool->set_invokedynamic(); // mark header to flag active call sites
int raw_index = four_byte_index(thread);
assert(constantPoolCacheOopDesc::is_secondary_index(raw_index), "invokedynamic indexes marked specially");
// there are two CPC entries that are of interest:
int site_index = constantPoolCacheOopDesc::decode_secondary_index(raw_index);
int main_index = pool->cache()->entry_at(site_index)->main_entry_index();
// and there is one CP entry, a NameAndType:
int nt_index = pool->map_instruction_operand_to_index(raw_index);
// first resolve the signature to a MH.invoke methodOop
if (!pool->cache()->entry_at(main_index)->is_resolved(bytecode)) {
JvmtiHideSingleStepping jhss(thread);
CallInfo info;
LinkResolver::resolve_invoke(info, Handle(), pool,
raw_index, bytecode, CHECK);
// The main entry corresponds to a JVM_CONSTANT_NameAndType, and serves
// as a common reference point for all invokedynamic call sites with
// that exact call descriptor. We will link it in the CP cache exactly
// as if it were an invokevirtual of MethodHandle.invoke.
pool->cache()->entry_at(main_index)->set_method(
bytecode,
info.resolved_method(),
info.vtable_index());
assert(pool->cache()->entry_at(main_index)->is_vfinal(), "f2 must be a methodOop");
}
// The method (f2 entry) of the main entry is the MH.invoke for the
// invokedynamic target call signature.
intptr_t f2_value = pool->cache()->entry_at(main_index)->f2();
methodHandle mh_invdyn(THREAD, (methodOop) f2_value);
assert(mh_invdyn.not_null() && mh_invdyn->is_method() && mh_invdyn->is_method_handle_invoke(),
"correct result from LinkResolver::resolve_invokedynamic");
symbolHandle call_site_name(THREAD, pool->nt_name_ref_at(nt_index));
Handle call_site
= SystemDictionary::make_dynamic_call_site(caller_method->method_holder(),
caller_method->method_idnum(),
caller_method->bci_from(bcp(thread)),
call_site_name,
mh_invdyn,
CHECK);
// In the secondary entry, the f1 field is the call site, and the f2 (index)
// field is some data about the invoke site.
int extra_data = 0;
pool->cache()->entry_at(site_index)->set_dynamic_call(call_site(), extra_data);
}
IRT_END
// Called on first time execution, and also whenever the CallSite.target is null.
// FIXME: Do more of this in Java code.
IRT_ENTRY(void, InterpreterRuntime::bootstrap_invokedynamic(JavaThread* thread, oopDesc* call_site)) {
methodHandle mh_invdyn(thread, (methodOop) sun_dyn_CallSiteImpl::vmmethod(call_site));
Handle mh_type(thread, mh_invdyn->method_handle_type());
objArrayHandle mh_ptypes(thread, java_dyn_MethodType::ptypes(mh_type()));
// squish the arguments down to a single array
int nargs = mh_ptypes->length();
objArrayHandle arg_array;
{
objArrayOop aaoop = oopFactory::new_objArray(SystemDictionary::object_klass(), nargs, CHECK);
arg_array = objArrayHandle(thread, aaoop);
}
frame fr = thread->last_frame();
assert(fr.interpreter_frame_bcp() != NULL, "sanity");
int tos_offset = 0;
for (int i = nargs; --i >= 0; ) {
intptr_t* slot_addr = fr.interpreter_frame_tos_at(tos_offset++);
oop ptype = mh_ptypes->obj_at(i);
oop arg = NULL;
if (!java_lang_Class::is_primitive(ptype)) {
arg = *(oop*) slot_addr;
} else {
BasicType bt = java_lang_Class::primitive_type(ptype);
assert(frame::interpreter_frame_expression_stack_direction() < 0, "else reconsider this code");
jvalue value;
Interpreter::get_jvalue_in_slot(slot_addr, bt, &value);
tos_offset += type2size[bt]-1;
arg = java_lang_boxing_object::create(bt, &value, CHECK);
// FIXME: These boxing objects are not canonicalized under
// the Java autoboxing rules. They should be...
// The best approach would be to push the arglist creation into Java.
// The JVM should use a lower-level interface to communicate argument lists.
}
arg_array->obj_at_put(i, arg);
}
// now find the bootstrap method
oop bootstrap_mh_oop = instanceKlass::cast(fr.interpreter_frame_method()->method_holder())->bootstrap_method();
assert(bootstrap_mh_oop != NULL, "resolve_invokedynamic ensures a BSM");
// return the bootstrap method and argument array via vm_result/_2
thread->set_vm_result(bootstrap_mh_oop);
thread->set_vm_result_2(arg_array());
}
IRT_END
//------------------------------------------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------------------------------------------
// Miscellaneous // Miscellaneous

View file

@ -42,8 +42,11 @@ class InterpreterRuntime: AllStatic {
static bool already_resolved(JavaThread *thread) { return cache_entry(thread)->is_resolved(code(thread)); } static bool already_resolved(JavaThread *thread) { return cache_entry(thread)->is_resolved(code(thread)); }
static int one_byte_index(JavaThread *thread) { return bcp(thread)[1]; } static int one_byte_index(JavaThread *thread) { return bcp(thread)[1]; }
static int two_byte_index(JavaThread *thread) { return Bytes::get_Java_u2(bcp(thread) + 1); } static int two_byte_index(JavaThread *thread) { return Bytes::get_Java_u2(bcp(thread) + 1); }
static int four_byte_index(JavaThread *thread) { return Bytes::get_native_u4(bcp(thread) + 1); }
static int number_of_dimensions(JavaThread *thread) { return bcp(thread)[3]; } static int number_of_dimensions(JavaThread *thread) { return bcp(thread)[3]; }
static ConstantPoolCacheEntry* cache_entry(JavaThread *thread) { return method(thread)->constants()->cache()->entry_at(Bytes::get_native_u2(bcp(thread) + 1)); }
static ConstantPoolCacheEntry* cache_entry_at(JavaThread *thread, int i) { return method(thread)->constants()->cache()->entry_at(i); }
static ConstantPoolCacheEntry* cache_entry(JavaThread *thread) { return cache_entry_at(thread, Bytes::get_native_u2(bcp(thread) + 1)); }
static void note_trap(JavaThread *thread, int reason, TRAPS); static void note_trap(JavaThread *thread, int reason, TRAPS);
public: public:
@ -83,7 +86,9 @@ class InterpreterRuntime: AllStatic {
static void new_illegal_monitor_state_exception(JavaThread* thread); static void new_illegal_monitor_state_exception(JavaThread* thread);
// Calls // Calls
static void resolve_invoke (JavaThread* thread, Bytecodes::Code bytecode); static void resolve_invoke (JavaThread* thread, Bytecodes::Code bytecode);
static void resolve_invokedynamic(JavaThread* thread);
static void bootstrap_invokedynamic(JavaThread* thread, oopDesc* call_site);
// Breakpoints // Breakpoints
static void _breakpoint(JavaThread* thread, methodOopDesc* method, address bcp); static void _breakpoint(JavaThread* thread, methodOopDesc* method, address bcp);

View file

@ -947,6 +947,7 @@ void LinkResolver::resolve_invoke(CallInfo& result, Handle recv, constantPoolHan
case Bytecodes::_invokestatic : resolve_invokestatic (result, pool, index, CHECK); break; case Bytecodes::_invokestatic : resolve_invokestatic (result, pool, index, CHECK); break;
case Bytecodes::_invokespecial : resolve_invokespecial (result, pool, index, CHECK); break; case Bytecodes::_invokespecial : resolve_invokespecial (result, pool, index, CHECK); break;
case Bytecodes::_invokevirtual : resolve_invokevirtual (result, recv, pool, index, CHECK); break; case Bytecodes::_invokevirtual : resolve_invokevirtual (result, recv, pool, index, CHECK); break;
case Bytecodes::_invokedynamic : resolve_invokedynamic (result, pool, index, CHECK); break;
case Bytecodes::_invokeinterface: resolve_invokeinterface(result, recv, pool, index, CHECK); break; case Bytecodes::_invokeinterface: resolve_invokeinterface(result, recv, pool, index, CHECK); break;
} }
return; return;
@ -1008,6 +1009,30 @@ void LinkResolver::resolve_invokeinterface(CallInfo& result, Handle recv, consta
resolve_interface_call(result, recv, recvrKlass, resolved_klass, method_name, method_signature, current_klass, true, true, CHECK); resolve_interface_call(result, recv, recvrKlass, resolved_klass, method_name, method_signature, current_klass, true, true, CHECK);
} }
void LinkResolver::resolve_invokedynamic(CallInfo& result, constantPoolHandle pool, int raw_index, TRAPS) {
assert(EnableInvokeDynamic, "");
// This guy is reached from InterpreterRuntime::resolve_invokedynamic.
assert(constantPoolCacheOopDesc::is_secondary_index(raw_index), "must be secondary index");
int nt_index = pool->map_instruction_operand_to_index(raw_index);
// At this point, we only need the signature, and can ignore the name.
symbolHandle method_signature(THREAD, pool->nt_signature_ref_at(nt_index));
symbolHandle method_name = vmSymbolHandles::invoke_name();
KlassHandle resolved_klass = SystemDictionaryHandles::MethodHandle_klass();
// JSR 292: this must be an implicitly generated method MethodHandle.invoke(*...)
// The extra MH receiver will be inserted into the stack on every call.
methodHandle resolved_method;
lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, CHECK);
if (resolved_method.is_null()) {
THROW(vmSymbols::java_lang_InternalError());
}
result.set_virtual(resolved_klass, KlassHandle(), resolved_method, resolved_method, resolved_method->vtable_index(), CHECK);
}
//------------------------------------------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------------------------------------------
#ifndef PRODUCT #ifndef PRODUCT

View file

@ -167,6 +167,7 @@ class LinkResolver: AllStatic {
static void resolve_invokespecial (CallInfo& result, constantPoolHandle pool, int index, TRAPS); static void resolve_invokespecial (CallInfo& result, constantPoolHandle pool, int index, TRAPS);
static void resolve_invokevirtual (CallInfo& result, Handle recv, constantPoolHandle pool, int index, TRAPS); static void resolve_invokevirtual (CallInfo& result, Handle recv, constantPoolHandle pool, int index, TRAPS);
static void resolve_invokeinterface(CallInfo& result, Handle recv, constantPoolHandle pool, int index, TRAPS); static void resolve_invokeinterface(CallInfo& result, Handle recv, constantPoolHandle pool, int index, TRAPS);
static void resolve_invokedynamic (CallInfo& result, constantPoolHandle pool, int index, TRAPS);
static void resolve_invoke (CallInfo& result, Handle recv, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS); static void resolve_invoke (CallInfo& result, Handle recv, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS);
}; };

View file

@ -25,39 +25,50 @@
# include "incls/_precompiled.incl" # include "incls/_precompiled.incl"
# include "incls/_rewriter.cpp.incl" # include "incls/_rewriter.cpp.incl"
// Computes a CPC map (new_index -> original_index) for constant pool entries
// Computes an index_map (new_index -> original_index) for contant pool entries
// that are referred to by the interpreter at runtime via the constant pool cache. // that are referred to by the interpreter at runtime via the constant pool cache.
void Rewriter::compute_index_maps(constantPoolHandle pool, intArray*& index_map, intStack*& inverse_index_map) { // Also computes a CP map (original_index -> new_index).
const int length = pool->length(); // Marks entries in CP which require additional processing.
index_map = new intArray(length, -1); void Rewriter::compute_index_maps() {
// Choose an initial value large enough that we don't get frequent const int length = _pool->length();
// calls to grow(). init_cp_map(length);
inverse_index_map = new intStack(length / 2);
for (int i = 0; i < length; i++) { for (int i = 0; i < length; i++) {
switch (pool->tag_at(i).value()) { int tag = _pool->tag_at(i).value();
switch (tag) {
case JVM_CONSTANT_InterfaceMethodref:
case JVM_CONSTANT_Fieldref : // fall through case JVM_CONSTANT_Fieldref : // fall through
case JVM_CONSTANT_Methodref : // fall through case JVM_CONSTANT_Methodref : // fall through
case JVM_CONSTANT_InterfaceMethodref: { add_cp_cache_entry(i);
index_map->at_put(i, inverse_index_map->length()); break;
inverse_index_map->append(i);
}
} }
} }
guarantee((int)_cp_cache_map.length()-1 <= (int)((u2)-1),
"all cp cache indexes fit in a u2");
} }
// Creates a constant pool cache given an inverse_index_map int Rewriter::add_extra_cp_cache_entry(int main_entry) {
// Hack: We put it on the map as an encoded value.
// The only place that consumes this is ConstantPoolCacheEntry::set_initial_state
int encoded = constantPoolCacheOopDesc::encode_secondary_index(main_entry);
int plain_secondary_index = _cp_cache_map.append(encoded);
return constantPoolCacheOopDesc::encode_secondary_index(plain_secondary_index);
}
// Creates a constant pool cache given a CPC map
// This creates the constant pool cache initially in a state // This creates the constant pool cache initially in a state
// that is unsafe for concurrent GC processing but sets it to // that is unsafe for concurrent GC processing but sets it to
// a safe mode before the constant pool cache is returned. // a safe mode before the constant pool cache is returned.
constantPoolCacheHandle Rewriter::new_constant_pool_cache(intArray& inverse_index_map, TRAPS) { void Rewriter::make_constant_pool_cache(TRAPS) {
const int length = inverse_index_map.length(); const int length = _cp_cache_map.length();
constantPoolCacheOop cache = oopFactory::new_constantPoolCache(length, constantPoolCacheOop cache =
methodOopDesc::IsUnsafeConc, oopFactory::new_constantPoolCache(length, methodOopDesc::IsUnsafeConc, CHECK);
CHECK_(constantPoolCacheHandle())); cache->initialize(_cp_cache_map);
cache->initialize(inverse_index_map); _pool->set_cache(cache);
return constantPoolCacheHandle(THREAD, cache); cache->set_constant_pool(_pool());
} }
@ -101,8 +112,38 @@ void Rewriter::rewrite_Object_init(methodHandle method, TRAPS) {
} }
// Rewrite a classfile-order CP index into a native-order CPC index.
int Rewriter::rewrite_member_reference(address bcp, int offset) {
address p = bcp + offset;
int cp_index = Bytes::get_Java_u2(p);
int cache_index = cp_entry_to_cp_cache(cp_index);
Bytes::put_native_u2(p, cache_index);
return cp_index;
}
void Rewriter::rewrite_invokedynamic(address bcp, int offset, int delete_me) {
address p = bcp + offset;
assert(p[-1] == Bytecodes::_invokedynamic, "");
int cp_index = Bytes::get_Java_u2(p);
int cpc = maybe_add_cp_cache_entry(cp_index); // add lazily
int cpc2 = add_extra_cp_cache_entry(cpc);
// Replace the trailing four bytes with a CPC index for the dynamic
// call site. Unlike other CPC entries, there is one per bytecode,
// not just one per distinct CP entry. In other words, the
// CPC-to-CP relation is many-to-one for invokedynamic entries.
// This means we must use a larger index size than u2 to address
// all these entries. That is the main reason invokedynamic
// must have a five-byte instruction format. (Of course, other JVM
// implementations can use the bytes for other purposes.)
Bytes::put_native_u4(p, cpc2);
// Note: We use native_u4 format exclusively for 4-byte indexes.
}
// Rewrites a method given the index_map information // Rewrites a method given the index_map information
methodHandle Rewriter::rewrite_method(methodHandle method, intArray& index_map, TRAPS) { void Rewriter::scan_method(methodOop method) {
int nof_jsrs = 0; int nof_jsrs = 0;
bool has_monitor_bytecodes = false; bool has_monitor_bytecodes = false;
@ -121,6 +162,7 @@ methodHandle Rewriter::rewrite_method(methodHandle method, intArray& index_map,
int bc_length; int bc_length;
for (int bci = 0; bci < code_length; bci += bc_length) { for (int bci = 0; bci < code_length; bci += bc_length) {
address bcp = code_base + bci; address bcp = code_base + bci;
int prefix_length = 0;
c = (Bytecodes::Code)(*bcp); c = (Bytecodes::Code)(*bcp);
// Since we have the code, see if we can get the length // Since we have the code, see if we can get the length
@ -135,6 +177,7 @@ methodHandle Rewriter::rewrite_method(methodHandle method, intArray& index_map,
// by 'wide'. We don't currently examine any of the bytecodes // by 'wide'. We don't currently examine any of the bytecodes
// modified by wide, but in case we do in the future... // modified by wide, but in case we do in the future...
if (c == Bytecodes::_wide) { if (c == Bytecodes::_wide) {
prefix_length = 1;
c = (Bytecodes::Code)bcp[1]; c = (Bytecodes::Code)bcp[1];
} }
} }
@ -159,12 +202,13 @@ methodHandle Rewriter::rewrite_method(methodHandle method, intArray& index_map,
case Bytecodes::_putfield : // fall through case Bytecodes::_putfield : // fall through
case Bytecodes::_invokevirtual : // fall through case Bytecodes::_invokevirtual : // fall through
case Bytecodes::_invokespecial : // fall through case Bytecodes::_invokespecial : // fall through
case Bytecodes::_invokestatic : // fall through case Bytecodes::_invokestatic :
case Bytecodes::_invokeinterface: { case Bytecodes::_invokeinterface:
address p = bcp + 1; rewrite_member_reference(bcp, prefix_length+1);
Bytes::put_native_u2(p, index_map[Bytes::get_Java_u2(p)]); break;
case Bytecodes::_invokedynamic:
rewrite_invokedynamic(bcp, prefix_length+1, int(sizeof"@@@@DELETE ME"));
break; break;
}
case Bytecodes::_jsr : // fall through case Bytecodes::_jsr : // fall through
case Bytecodes::_jsr_w : nof_jsrs++; break; case Bytecodes::_jsr_w : nof_jsrs++; break;
case Bytecodes::_monitorenter : // fall through case Bytecodes::_monitorenter : // fall through
@ -182,53 +226,56 @@ methodHandle Rewriter::rewrite_method(methodHandle method, intArray& index_map,
// have to be rewritten, so we run the oopMapGenerator on the method // have to be rewritten, so we run the oopMapGenerator on the method
if (nof_jsrs > 0) { if (nof_jsrs > 0) {
method->set_has_jsrs(); method->set_has_jsrs();
ResolveOopMapConflicts romc(method); // Second pass will revisit this method.
methodHandle original_method = method; assert(method->has_jsrs(), "");
method = romc.do_potential_rewrite(CHECK_(methodHandle())); }
if (method() != original_method()) { }
// Insert invalid bytecode into original methodOop and set
// interpreter entrypoint, so that a executing this method
// will manifest itself in an easy recognizable form.
address bcp = original_method->bcp_from(0);
*bcp = (u1)Bytecodes::_shouldnotreachhere;
int kind = Interpreter::method_kind(original_method);
original_method->set_interpreter_kind(kind);
}
// Update monitor matching info. // After constant pool is created, revisit methods containing jsrs.
if (romc.monitor_safe()) { methodHandle Rewriter::rewrite_jsrs(methodHandle method, TRAPS) {
method->set_guaranteed_monitor_matching(); ResolveOopMapConflicts romc(method);
} methodHandle original_method = method;
method = romc.do_potential_rewrite(CHECK_(methodHandle()));
if (method() != original_method()) {
// Insert invalid bytecode into original methodOop and set
// interpreter entrypoint, so that a executing this method
// will manifest itself in an easy recognizable form.
address bcp = original_method->bcp_from(0);
*bcp = (u1)Bytecodes::_shouldnotreachhere;
int kind = Interpreter::method_kind(original_method);
original_method->set_interpreter_kind(kind);
} }
// Setup method entrypoints for compiler and interpreter // Update monitor matching info.
method->link_method(method, CHECK_(methodHandle())); if (romc.monitor_safe()) {
method->set_guaranteed_monitor_matching();
}
return method; return method;
} }
void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) { void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
// gather starting points
ResourceMark rm(THREAD); ResourceMark rm(THREAD);
constantPoolHandle pool (THREAD, klass->constants()); Rewriter rw(klass, CHECK);
objArrayHandle methods (THREAD, klass->methods()); // (That's all, folks.)
assert(pool->cache() == NULL, "constant pool cache must not be set yet"); }
Rewriter::Rewriter(instanceKlassHandle klass, TRAPS)
: _klass(klass),
// gather starting points
_pool( THREAD, klass->constants()),
_methods(THREAD, klass->methods())
{
assert(_pool->cache() == NULL, "constant pool cache must not be set yet");
// determine index maps for methodOop rewriting // determine index maps for methodOop rewriting
intArray* index_map = NULL; compute_index_maps();
intStack* inverse_index_map = NULL;
compute_index_maps(pool, index_map, inverse_index_map);
// allocate constant pool cache if (RegisterFinalizersAtInit && _klass->name() == vmSymbols::java_lang_Object()) {
constantPoolCacheHandle cache = new_constant_pool_cache(*inverse_index_map, CHECK); int i = _methods->length();
pool->set_cache(cache());
cache->set_constant_pool(pool());
if (RegisterFinalizersAtInit && klass->name() == vmSymbols::java_lang_Object()) {
int i = methods->length();
while (i-- > 0) { while (i-- > 0) {
methodOop method = (methodOop)methods->obj_at(i); methodOop method = (methodOop)_methods->obj_at(i);
if (method->intrinsic_id() == vmIntrinsics::_Object_init) { if (method->intrinsic_id() == vmIntrinsics::_Object_init) {
// rewrite the return bytecodes of Object.<init> to register the // rewrite the return bytecodes of Object.<init> to register the
// object for finalization if needed. // object for finalization if needed.
@ -239,13 +286,27 @@ void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
} }
} }
// rewrite methods // rewrite methods, in two passes
{ int i = methods->length(); int i, len = _methods->length();
while (i-- > 0) {
methodHandle m(THREAD, (methodOop)methods->obj_at(i)); for (i = len; --i >= 0; ) {
m = rewrite_method(m, *index_map, CHECK); methodOop method = (methodOop)_methods->obj_at(i);
scan_method(method);
}
// allocate constant pool cache, now that we've seen all the bytecodes
make_constant_pool_cache(CHECK);
for (i = len; --i >= 0; ) {
methodHandle m(THREAD, (methodOop)_methods->obj_at(i));
if (m->has_jsrs()) {
m = rewrite_jsrs(m, CHECK);
// Method might have gotten rewritten. // Method might have gotten rewritten.
methods->obj_at_put(i, m()); _methods->obj_at_put(i, m());
} }
// Set up method entry points for compiler and interpreter.
m->link_method(m, CHECK);
} }
} }

View file

@ -25,13 +25,44 @@
// The Rewriter adds caches to the constant pool and rewrites bytecode indices // The Rewriter adds caches to the constant pool and rewrites bytecode indices
// pointing into the constant pool for better interpreter performance. // pointing into the constant pool for better interpreter performance.
class Rewriter: public AllStatic { class Rewriter: public StackObj {
private: private:
static void compute_index_maps(constantPoolHandle pool, intArray*& index_map, intStack*& inverse_index_map); instanceKlassHandle _klass;
static constantPoolCacheHandle new_constant_pool_cache(intArray& inverse_index_map, TRAPS); constantPoolHandle _pool;
static methodHandle rewrite_method(methodHandle method, intArray& index_map, TRAPS); objArrayHandle _methods;
static void rewrite_Object_init(methodHandle method, TRAPS); intArray _cp_map;
intStack _cp_cache_map;
void init_cp_map(int length) {
_cp_map.initialize(length, -1);
// Choose an initial value large enough that we don't get frequent
// calls to grow().
_cp_cache_map.initialize(length / 2);
}
int cp_entry_to_cp_cache(int i) { assert(has_cp_cache(i), "oob"); return _cp_map[i]; }
bool has_cp_cache(int i) { return (uint)i < (uint)_cp_map.length() && _cp_map[i] >= 0; }
int maybe_add_cp_cache_entry(int i) { return has_cp_cache(i) ? _cp_map[i] : add_cp_cache_entry(i); }
int add_cp_cache_entry(int cp_index) {
assert(_cp_map[cp_index] == -1, "not twice on same cp_index");
int cache_index = _cp_cache_map.append(cp_index);
_cp_map.at_put(cp_index, cache_index);
assert(cp_entry_to_cp_cache(cp_index) == cache_index, "");
return cache_index;
}
int add_extra_cp_cache_entry(int main_entry);
// All the work goes in here:
Rewriter(instanceKlassHandle klass, TRAPS);
void compute_index_maps();
void make_constant_pool_cache(TRAPS);
void scan_method(methodOop m);
methodHandle rewrite_jsrs(methodHandle m, TRAPS);
void rewrite_Object_init(methodHandle m, TRAPS);
int rewrite_member_reference(address bcp, int offset);
void rewrite_invokedynamic(address bcp, int offset, int cp_index);
public: public:
// Driver routine:
static void rewrite(instanceKlassHandle klass, TRAPS); static void rewrite(instanceKlassHandle klass, TRAPS);
}; };

View file

@ -178,12 +178,14 @@ EntryPoint TemplateInterpreter::_trace_code;
#endif // !PRODUCT #endif // !PRODUCT
EntryPoint TemplateInterpreter::_return_entry[TemplateInterpreter::number_of_return_entries]; EntryPoint TemplateInterpreter::_return_entry[TemplateInterpreter::number_of_return_entries];
EntryPoint TemplateInterpreter::_earlyret_entry; EntryPoint TemplateInterpreter::_earlyret_entry;
EntryPoint TemplateInterpreter::_return_unbox_entry;
EntryPoint TemplateInterpreter::_deopt_entry [TemplateInterpreter::number_of_deopt_entries ]; EntryPoint TemplateInterpreter::_deopt_entry [TemplateInterpreter::number_of_deopt_entries ];
EntryPoint TemplateInterpreter::_continuation_entry; EntryPoint TemplateInterpreter::_continuation_entry;
EntryPoint TemplateInterpreter::_safept_entry; EntryPoint TemplateInterpreter::_safept_entry;
address TemplateInterpreter::_return_3_addrs_by_index[TemplateInterpreter::number_of_return_addrs]; address TemplateInterpreter::_return_3_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
address TemplateInterpreter::_return_5_addrs_by_index[TemplateInterpreter::number_of_return_addrs]; address TemplateInterpreter::_return_5_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
address TemplateInterpreter::_return_5_unbox_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
DispatchTable TemplateInterpreter::_active_table; DispatchTable TemplateInterpreter::_active_table;
DispatchTable TemplateInterpreter::_normal_table; DispatchTable TemplateInterpreter::_normal_table;
@ -251,6 +253,22 @@ void TemplateInterpreterGenerator::generate_all() {
} }
} }
if (EnableInvokeDynamic) {
CodeletMark cm(_masm, "unboxing return entry points");
Interpreter::_return_unbox_entry =
EntryPoint(
generate_return_unbox_entry_for(btos, 5),
generate_return_unbox_entry_for(ctos, 5),
generate_return_unbox_entry_for(stos, 5),
generate_return_unbox_entry_for(atos, 5), // cast conversion
generate_return_unbox_entry_for(itos, 5),
generate_return_unbox_entry_for(ltos, 5),
generate_return_unbox_entry_for(ftos, 5),
generate_return_unbox_entry_for(dtos, 5),
Interpreter::_return_entry[5].entry(vtos) // no unboxing for void
);
}
{ CodeletMark cm(_masm, "earlyret entry points"); { CodeletMark cm(_masm, "earlyret entry points");
Interpreter::_earlyret_entry = Interpreter::_earlyret_entry =
EntryPoint( EntryPoint(
@ -298,8 +316,11 @@ void TemplateInterpreterGenerator::generate_all() {
for (int j = 0; j < number_of_states; j++) { for (int j = 0; j < number_of_states; j++) {
const TosState states[] = {btos, ctos, stos, itos, ltos, ftos, dtos, atos, vtos}; const TosState states[] = {btos, ctos, stos, itos, ltos, ftos, dtos, atos, vtos};
Interpreter::_return_3_addrs_by_index[Interpreter::TosState_as_index(states[j])] = Interpreter::return_entry(states[j], 3); int index = Interpreter::TosState_as_index(states[j]);
Interpreter::_return_5_addrs_by_index[Interpreter::TosState_as_index(states[j])] = Interpreter::return_entry(states[j], 5); Interpreter::_return_3_addrs_by_index[index] = Interpreter::return_entry(states[j], 3);
Interpreter::_return_5_addrs_by_index[index] = Interpreter::return_entry(states[j], 5);
if (EnableInvokeDynamic)
Interpreter::_return_5_unbox_addrs_by_index[index] = Interpreter::return_unbox_entry(states[j], 5);
} }
{ CodeletMark cm(_masm, "continuation entry points"); { CodeletMark cm(_masm, "continuation entry points");
@ -526,6 +547,18 @@ address TemplateInterpreter::return_entry(TosState state, int length) {
} }
address TemplateInterpreter::return_unbox_entry(TosState state, int length) {
assert(EnableInvokeDynamic, "");
if (state == vtos) {
// no unboxing to do, actually
return return_entry(state, length);
} else {
assert(length == 5, "unboxing entries generated for invokedynamic only");
return _return_unbox_entry.entry(state);
}
}
address TemplateInterpreter::deopt_entry(TosState state, int length) { address TemplateInterpreter::deopt_entry(TosState state, int length) {
guarantee(0 <= length && length < Interpreter::number_of_deopt_entries, "illegal length"); guarantee(0 <= length && length < Interpreter::number_of_deopt_entries, "illegal length");
return _deopt_entry[length].entry(state); return _deopt_entry[length].entry(state);

View file

@ -83,9 +83,9 @@ class TemplateInterpreter: public AbstractInterpreter {
public: public:
enum MoreConstants { enum MoreConstants {
number_of_return_entries = 9, // number of return entry points number_of_return_entries = number_of_states, // number of return entry points
number_of_deopt_entries = 9, // number of deoptimization entry points number_of_deopt_entries = number_of_states, // number of deoptimization entry points
number_of_return_addrs = 9 // number of return addresses number_of_return_addrs = number_of_states // number of return addresses
}; };
protected: protected:
@ -110,12 +110,14 @@ class TemplateInterpreter: public AbstractInterpreter {
#endif // !PRODUCT #endif // !PRODUCT
static EntryPoint _return_entry[number_of_return_entries]; // entry points to return to from a call static EntryPoint _return_entry[number_of_return_entries]; // entry points to return to from a call
static EntryPoint _earlyret_entry; // entry point to return early from a call static EntryPoint _earlyret_entry; // entry point to return early from a call
static EntryPoint _return_unbox_entry; // entry point to unbox a return value from a call
static EntryPoint _deopt_entry[number_of_deopt_entries]; // entry points to return to from a deoptimization static EntryPoint _deopt_entry[number_of_deopt_entries]; // entry points to return to from a deoptimization
static EntryPoint _continuation_entry; static EntryPoint _continuation_entry;
static EntryPoint _safept_entry; static EntryPoint _safept_entry;
static address _return_3_addrs_by_index[number_of_return_addrs]; // for invokevirtual return entries static address _return_3_addrs_by_index[number_of_return_addrs]; // for invokevirtual return entries
static address _return_5_addrs_by_index[number_of_return_addrs]; // for invokeinterface return entries static address _return_5_addrs_by_index[number_of_return_addrs]; // for invokeinterface return entries
static address _return_5_unbox_addrs_by_index[number_of_return_addrs]; // for invokedynamic bootstrap methods
static DispatchTable _active_table; // the active dispatch table (used by the interpreter for dispatch) static DispatchTable _active_table; // the active dispatch table (used by the interpreter for dispatch)
static DispatchTable _normal_table; // the normal dispatch table (used to set the active table in normal mode) static DispatchTable _normal_table; // the normal dispatch table (used to set the active table in normal mode)
@ -157,10 +159,12 @@ class TemplateInterpreter: public AbstractInterpreter {
// Support for invokes // Support for invokes
static address* return_3_addrs_by_index_table() { return _return_3_addrs_by_index; } static address* return_3_addrs_by_index_table() { return _return_3_addrs_by_index; }
static address* return_5_addrs_by_index_table() { return _return_5_addrs_by_index; } static address* return_5_addrs_by_index_table() { return _return_5_addrs_by_index; }
static address* return_5_unbox_addrs_by_index_table() { return _return_5_unbox_addrs_by_index; }
static int TosState_as_index(TosState state); // computes index into return_3_entry_by_index table static int TosState_as_index(TosState state); // computes index into return_3_entry_by_index table
static address return_entry (TosState state, int length); static address return_entry (TosState state, int length);
static address deopt_entry (TosState state, int length); static address deopt_entry (TosState state, int length);
static address return_unbox_entry(TosState state, int length);
// Safepoint support // Safepoint support
static void notice_safepoints(); // stops the thread when reaching a safepoint static void notice_safepoints(); // stops the thread when reaching a safepoint

View file

@ -51,7 +51,10 @@ class TemplateInterpreterGenerator: public AbstractInterpreterGenerator {
address generate_WrongMethodType_handler(); address generate_WrongMethodType_handler();
address generate_ArrayIndexOutOfBounds_handler(const char* name); address generate_ArrayIndexOutOfBounds_handler(const char* name);
address generate_continuation_for(TosState state); address generate_continuation_for(TosState state);
address generate_return_entry_for(TosState state, int step); address generate_return_entry_for(TosState state, int step, bool unbox = false);
address generate_return_unbox_entry_for(TosState state, int step) {
return generate_return_entry_for(state, step, true);
}
address generate_earlyret_entry_for(TosState state); address generate_earlyret_entry_for(TosState state);
address generate_deopt_entry_for(TosState state, int step); address generate_deopt_entry_for(TosState state, int step);
address generate_safept_entry_for(TosState state, address runtime_entry); address generate_safept_entry_for(TosState state, address runtime_entry);

View file

@ -442,6 +442,7 @@ void TemplateTable::initialize() {
def(Bytecodes::_invokespecial , ubcp|disp|clvm|____, vtos, vtos, invokespecial , 1 ); def(Bytecodes::_invokespecial , ubcp|disp|clvm|____, vtos, vtos, invokespecial , 1 );
def(Bytecodes::_invokestatic , ubcp|disp|clvm|____, vtos, vtos, invokestatic , 1 ); def(Bytecodes::_invokestatic , ubcp|disp|clvm|____, vtos, vtos, invokestatic , 1 );
def(Bytecodes::_invokeinterface , ubcp|disp|clvm|____, vtos, vtos, invokeinterface , 1 ); def(Bytecodes::_invokeinterface , ubcp|disp|clvm|____, vtos, vtos, invokeinterface , 1 );
def(Bytecodes::_invokedynamic , ubcp|disp|clvm|____, vtos, vtos, invokedynamic , 1 );
def(Bytecodes::_new , ubcp|____|clvm|____, vtos, atos, _new , _ ); def(Bytecodes::_new , ubcp|____|clvm|____, vtos, atos, _new , _ );
def(Bytecodes::_newarray , ubcp|____|clvm|____, itos, atos, newarray , _ ); def(Bytecodes::_newarray , ubcp|____|clvm|____, itos, atos, newarray , _ );
def(Bytecodes::_anewarray , ubcp|____|clvm|____, itos, atos, anewarray , _ ); def(Bytecodes::_anewarray , ubcp|____|clvm|____, itos, atos, anewarray , _ );
@ -503,7 +504,6 @@ void TemplateTable::initialize() {
def(Bytecodes::_fast_invokevfinal , ubcp|disp|clvm|____, vtos, vtos, fast_invokevfinal , 2 ); def(Bytecodes::_fast_invokevfinal , ubcp|disp|clvm|____, vtos, vtos, fast_invokevfinal , 2 );
def(Bytecodes::_fast_linearswitch , ubcp|disp|____|____, itos, vtos, fast_linearswitch , _ ); def(Bytecodes::_fast_linearswitch , ubcp|disp|____|____, itos, vtos, fast_linearswitch , _ );
def(Bytecodes::_fast_binaryswitch , ubcp|disp|____|____, itos, vtos, fast_binaryswitch , _ ); def(Bytecodes::_fast_binaryswitch , ubcp|disp|____|____, itos, vtos, fast_binaryswitch , _ );

View file

@ -261,6 +261,7 @@ class TemplateTable: AllStatic {
static void invokespecial(int byte_no); static void invokespecial(int byte_no);
static void invokestatic(int byte_no); static void invokestatic(int byte_no);
static void invokeinterface(int byte_no); static void invokeinterface(int byte_no);
static void invokedynamic(int byte_no);
static void fast_invokevfinal(int byte_no); static void fast_invokevfinal(int byte_no);
static void getfield_or_static(int byte_no, bool is_static); static void getfield_or_static(int byte_no, bool is_static);

View file

@ -312,6 +312,7 @@ void constantPoolKlass::oop_print_on(oop obj, outputStream* st) {
if (cp->flags() != 0) { if (cp->flags() != 0) {
st->print(" - flags : 0x%x", cp->flags()); st->print(" - flags : 0x%x", cp->flags());
if (cp->has_pseudo_string()) st->print(" has_pseudo_string"); if (cp->has_pseudo_string()) st->print(" has_pseudo_string");
if (cp->has_invokedynamic()) st->print(" has_invokedynamic");
st->cr(); st->cr();
} }

View file

@ -249,32 +249,41 @@ klassOop constantPoolOopDesc::klass_ref_at_if_loaded_check(constantPoolHandle th
} }
symbolOop constantPoolOopDesc::uncached_name_ref_at(int which) { symbolOop constantPoolOopDesc::impl_name_ref_at(int which, bool uncached) {
jint ref_index = name_and_type_at(uncached_name_and_type_ref_index_at(which)); int name_index = name_ref_index_at(impl_name_and_type_ref_index_at(which, uncached));
int name_index = extract_low_short_from_int(ref_index);
return symbol_at(name_index); return symbol_at(name_index);
} }
symbolOop constantPoolOopDesc::uncached_signature_ref_at(int which) { symbolOop constantPoolOopDesc::impl_signature_ref_at(int which, bool uncached) {
jint ref_index = name_and_type_at(uncached_name_and_type_ref_index_at(which)); int signature_index = signature_ref_index_at(impl_name_and_type_ref_index_at(which, uncached));
int signature_index = extract_high_short_from_int(ref_index);
return symbol_at(signature_index); return symbol_at(signature_index);
} }
int constantPoolOopDesc::uncached_name_and_type_ref_index_at(int which) { int constantPoolOopDesc::impl_name_and_type_ref_index_at(int which, bool uncached) {
jint ref_index = field_or_method_at(which, true); jint ref_index = field_or_method_at(which, uncached);
return extract_high_short_from_int(ref_index); return extract_high_short_from_int(ref_index);
} }
int constantPoolOopDesc::uncached_klass_ref_index_at(int which) { int constantPoolOopDesc::impl_klass_ref_index_at(int which, bool uncached) {
jint ref_index = field_or_method_at(which, true); jint ref_index = field_or_method_at(which, uncached);
return extract_low_short_from_int(ref_index); return extract_low_short_from_int(ref_index);
} }
int constantPoolOopDesc::map_instruction_operand_to_index(int operand) {
if (constantPoolCacheOopDesc::is_secondary_index(operand)) {
return cache()->main_entry_at(operand)->constant_pool_index();
}
assert((int)(u2)operand == operand, "clean u2");
int index = Bytes::swap_u2(operand);
return cache()->entry_at(index)->constant_pool_index();
}
void constantPoolOopDesc::verify_constant_pool_resolve(constantPoolHandle this_oop, KlassHandle k, TRAPS) { void constantPoolOopDesc::verify_constant_pool_resolve(constantPoolHandle this_oop, KlassHandle k, TRAPS) {
if (k->oop_is_instance() || k->oop_is_objArray()) { if (k->oop_is_instance() || k->oop_is_objArray()) {
instanceKlassHandle holder (THREAD, this_oop->pool_holder()); instanceKlassHandle holder (THREAD, this_oop->pool_holder());
@ -290,26 +299,14 @@ void constantPoolOopDesc::verify_constant_pool_resolve(constantPoolHandle this_o
} }
int constantPoolOopDesc::klass_ref_index_at(int which) { int constantPoolOopDesc::name_ref_index_at(int which_nt) {
jint ref_index = field_or_method_at(which, false); jint ref_index = name_and_type_at(which_nt);
return extract_low_short_from_int(ref_index); return extract_low_short_from_int(ref_index);
} }
int constantPoolOopDesc::name_and_type_ref_index_at(int which) { int constantPoolOopDesc::signature_ref_index_at(int which_nt) {
jint ref_index = field_or_method_at(which, false); jint ref_index = name_and_type_at(which_nt);
return extract_high_short_from_int(ref_index);
}
int constantPoolOopDesc::name_ref_index_at(int which) {
jint ref_index = name_and_type_at(which);
return extract_low_short_from_int(ref_index);
}
int constantPoolOopDesc::signature_ref_index_at(int which) {
jint ref_index = name_and_type_at(which);
return extract_high_short_from_int(ref_index); return extract_high_short_from_int(ref_index);
} }
@ -353,20 +350,6 @@ char* constantPoolOopDesc::string_at_noresolve(int which) {
} }
symbolOop constantPoolOopDesc::name_ref_at(int which) {
jint ref_index = name_and_type_at(name_and_type_ref_index_at(which));
int name_index = extract_low_short_from_int(ref_index);
return symbol_at(name_index);
}
symbolOop constantPoolOopDesc::signature_ref_at(int which) {
jint ref_index = name_and_type_at(name_and_type_ref_index_at(which));
int signature_index = extract_high_short_from_int(ref_index);
return symbol_at(signature_index);
}
BasicType constantPoolOopDesc::basic_type_for_signature_at(int which) { BasicType constantPoolOopDesc::basic_type_for_signature_at(int which) {
return FieldType::basic_type(symbol_at(which)); return FieldType::basic_type(symbol_at(which));
} }

View file

@ -53,6 +53,7 @@ class constantPoolOopDesc : public oopDesc {
void release_tag_at_put(int which, jbyte t) { tags()->release_byte_at_put(which, t); } void release_tag_at_put(int which, jbyte t) { tags()->release_byte_at_put(which, t); }
enum FlagBit { enum FlagBit {
FB_has_invokedynamic = 1,
FB_has_pseudo_string = 2 FB_has_pseudo_string = 2
}; };
@ -96,7 +97,9 @@ class constantPoolOopDesc : public oopDesc {
typeArrayOop tags() const { return _tags; } typeArrayOop tags() const { return _tags; }
bool has_pseudo_string() const { return flag_at(FB_has_pseudo_string); } bool has_pseudo_string() const { return flag_at(FB_has_pseudo_string); }
bool has_invokedynamic() const { return flag_at(FB_has_invokedynamic); }
void set_pseudo_string() { set_flag_at(FB_has_pseudo_string); } void set_pseudo_string() { set_flag_at(FB_has_pseudo_string); }
void set_invokedynamic() { set_flag_at(FB_has_invokedynamic); }
// Klass holding pool // Klass holding pool
klassOop pool_holder() const { return _pool_holder; } klassOop pool_holder() const { return _pool_holder; }
@ -338,24 +341,28 @@ class constantPoolOopDesc : public oopDesc {
return *int_at_addr(which); return *int_at_addr(which);
} }
// The following methods (klass_ref_at, klass_ref_at_noresolve, name_ref_at, // The following methods (name/signature/klass_ref_at, klass_ref_at_noresolve,
// signature_ref_at, klass_ref_index_at, name_and_type_ref_index_at, // name_and_type_ref_index_at) all expect constant pool indices
// name_ref_index_at, signature_ref_index_at) all expect constant pool indices
// from the bytecodes to be passed in, which are actually potentially byte-swapped // from the bytecodes to be passed in, which are actually potentially byte-swapped
// contstant pool cache indices. See field_or_method_at. // or rewritten constant pool cache indices. They all call map_instruction_operand_to_index.
int map_instruction_operand_to_index(int operand);
// There are also "uncached" versions which do not map the operand index; see below.
// Lookup for entries consisting of (klass_index, name_and_type index) // Lookup for entries consisting of (klass_index, name_and_type index)
klassOop klass_ref_at(int which, TRAPS); klassOop klass_ref_at(int which, TRAPS);
symbolOop klass_ref_at_noresolve(int which); symbolOop klass_ref_at_noresolve(int which);
symbolOop name_ref_at(int which); symbolOop name_ref_at(int which) { return impl_name_ref_at(which, false); }
symbolOop signature_ref_at(int which); // the type descriptor symbolOop signature_ref_at(int which) { return impl_signature_ref_at(which, false); }
int klass_ref_index_at(int which); int klass_ref_index_at(int which) { return impl_klass_ref_index_at(which, false); }
int name_and_type_ref_index_at(int which); int name_and_type_ref_index_at(int which) { return impl_name_and_type_ref_index_at(which, false); }
// Lookup for entries consisting of (name_index, signature_index) // Lookup for entries consisting of (name_index, signature_index)
int name_ref_index_at(int which); int name_ref_index_at(int which_nt); // == low-order jshort of name_and_type_at(which_nt)
int signature_ref_index_at(int which); int signature_ref_index_at(int which_nt); // == high-order jshort of name_and_type_at(which_nt)
symbolOop nt_name_ref_at(int which_nt) { return symbol_at(name_ref_index_at(which_nt)); }
symbolOop nt_signature_ref_at(int which_nt) { return symbol_at(signature_ref_index_at(which_nt)); }
BasicType basic_type_for_signature_at(int which); BasicType basic_type_for_signature_at(int which);
@ -397,10 +404,10 @@ class constantPoolOopDesc : public oopDesc {
// Routines currently used for annotations (only called by jvm.cpp) but which might be used in the // Routines currently used for annotations (only called by jvm.cpp) but which might be used in the
// future by other Java code. These take constant pool indices rather than possibly-byte-swapped // future by other Java code. These take constant pool indices rather than possibly-byte-swapped
// constant pool cache indices as do the peer methods above. // constant pool cache indices as do the peer methods above.
symbolOop uncached_name_ref_at(int which); symbolOop uncached_name_ref_at(int which) { return impl_name_ref_at(which, true); }
symbolOop uncached_signature_ref_at(int which); symbolOop uncached_signature_ref_at(int which) { return impl_signature_ref_at(which, true); }
int uncached_klass_ref_index_at(int which); int uncached_klass_ref_index_at(int which) { return impl_klass_ref_index_at(which, true); }
int uncached_name_and_type_ref_index_at(int which); int uncached_name_and_type_ref_index_at(int which) { return impl_name_and_type_ref_index_at(which, true); }
// Sharing // Sharing
int pre_resolve_shared_klasses(TRAPS); int pre_resolve_shared_klasses(TRAPS);
@ -413,16 +420,19 @@ class constantPoolOopDesc : public oopDesc {
private: private:
symbolOop impl_name_ref_at(int which, bool uncached);
symbolOop impl_signature_ref_at(int which, bool uncached);
int impl_klass_ref_index_at(int which, bool uncached);
int impl_name_and_type_ref_index_at(int which, bool uncached);
// Takes either a constant pool cache index in possibly byte-swapped // Takes either a constant pool cache index in possibly byte-swapped
// byte order (which comes from the bytecodes after rewriting) or, // byte order (which comes from the bytecodes after rewriting) or,
// if "uncached" is true, a vanilla constant pool index // if "uncached" is true, a vanilla constant pool index
jint field_or_method_at(int which, bool uncached) { jint field_or_method_at(int which, bool uncached) {
int i = -1; int i = which;
if (uncached || cache() == NULL) { if (!uncached && cache() != NULL) {
i = which;
} else {
// change byte-ordering and go via cache // change byte-ordering and go via cache
i = cache()->entry_at(Bytes::swap_u2(which))->constant_pool_index(); i = map_instruction_operand_to_index(which);
} }
assert(tag_at(i).is_field_or_method(), "Corrupted constant pool"); assert(tag_at(i).is_field_or_method(), "Corrupted constant pool");
return *int_at_addr(i); return *int_at_addr(i);

View file

@ -169,11 +169,47 @@ bool constantPoolCacheKlass::oop_is_conc_safe(oop obj) const {
void constantPoolCacheKlass::oop_copy_contents(PSPromotionManager* pm, void constantPoolCacheKlass::oop_copy_contents(PSPromotionManager* pm,
oop obj) { oop obj) {
assert(obj->is_constantPoolCache(), "should be constant pool"); assert(obj->is_constantPoolCache(), "should be constant pool");
if (EnableInvokeDynamic) {
constantPoolCacheOop cache = (constantPoolCacheOop)obj;
// during a scavenge, it is safe to inspect my pool, since it is perm
constantPoolOop pool = cache->constant_pool();
assert(pool->is_constantPool(), "should be constant pool");
if (pool->has_invokedynamic()) {
for (int i = 0; i < cache->length(); i++) {
ConstantPoolCacheEntry* e = cache->entry_at(i);
oop* p = (oop*)&e->_f1;
if (e->is_secondary_entry()) {
if (PSScavenge::should_scavenge(p))
pm->claim_or_forward_breadth(p);
assert(!(e->is_vfinal() && PSScavenge::should_scavenge((oop*)&e->_f2)),
"no live oops here");
}
}
}
}
} }
void constantPoolCacheKlass::oop_push_contents(PSPromotionManager* pm, void constantPoolCacheKlass::oop_push_contents(PSPromotionManager* pm,
oop obj) { oop obj) {
assert(obj->is_constantPoolCache(), "should be constant pool"); assert(obj->is_constantPoolCache(), "should be constant pool");
if (EnableInvokeDynamic) {
constantPoolCacheOop cache = (constantPoolCacheOop)obj;
// during a scavenge, it is safe to inspect my pool, since it is perm
constantPoolOop pool = cache->constant_pool();
assert(pool->is_constantPool(), "should be constant pool");
if (pool->has_invokedynamic()) {
for (int i = 0; i < cache->length(); i++) {
ConstantPoolCacheEntry* e = cache->entry_at(i);
oop* p = (oop*)&e->_f1;
if (e->is_secondary_entry()) {
if (PSScavenge::should_scavenge(p))
pm->claim_or_forward_depth(p);
assert(!(e->is_vfinal() && PSScavenge::should_scavenge((oop*)&e->_f2)),
"no live oops here");
}
}
}
}
} }
int int

View file

@ -29,8 +29,18 @@
// Implememtation of ConstantPoolCacheEntry // Implememtation of ConstantPoolCacheEntry
void ConstantPoolCacheEntry::set_initial_state(int index) { void ConstantPoolCacheEntry::set_initial_state(int index) {
assert(0 <= index && index < 0x10000, "sanity check"); if (constantPoolCacheOopDesc::is_secondary_index(index)) {
// Hack: The rewriter is trying to say that this entry itself
// will be a secondary entry.
int main_index = constantPoolCacheOopDesc::decode_secondary_index(index);
assert(0 <= main_index && main_index < 0x10000, "sanity check");
_indices = (main_index << 16);
assert(main_entry_index() == main_index, "");
return;
}
assert(0 < index && index < 0x10000, "sanity check");
_indices = index; _indices = index;
assert(constant_pool_index() == index, "");
} }
@ -136,6 +146,7 @@ void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
int byte_no = -1; int byte_no = -1;
bool needs_vfinal_flag = false; bool needs_vfinal_flag = false;
switch (invoke_code) { switch (invoke_code) {
case Bytecodes::_invokedynamic:
case Bytecodes::_invokevirtual: case Bytecodes::_invokevirtual:
case Bytecodes::_invokeinterface: { case Bytecodes::_invokeinterface: {
if (method->can_be_statically_bound()) { if (method->can_be_statically_bound()) {
@ -211,6 +222,23 @@ void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index)
} }
void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, int extra_data) {
methodOop method = (methodOop) sun_dyn_CallSiteImpl::vmmethod(call_site());
assert(method->is_method(), "must be initialized properly");
int param_size = method->size_of_parameters();
assert(param_size > 1, "method argument size must include MH.this & initial dynamic receiver");
param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic
if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) {
// racing threads might be trying to install their own favorites
set_f1(call_site());
}
set_f2(extra_data);
set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | param_size);
// do not do set_bytecode on a secondary CP cache entry
//set_bytecode_1(Bytecodes::_invokedynamic);
}
class LocalOopClosure: public OopClosure { class LocalOopClosure: public OopClosure {
private: private:
void (*_f)(oop*); void (*_f)(oop*);
@ -392,7 +420,11 @@ void ConstantPoolCacheEntry::print(outputStream* st, int index) const {
// print separator // print separator
if (index == 0) tty->print_cr(" -------------"); if (index == 0) tty->print_cr(" -------------");
// print entry // print entry
tty->print_cr("%3d (%08x) [%02x|%02x|%5d]", index, this, bytecode_2(), bytecode_1(), constant_pool_index()); tty->print_cr("%3d (%08x) ", index, this);
if (is_secondary_entry())
tty->print_cr("[%5d|secondary]", main_entry_index());
else
tty->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index());
tty->print_cr(" [ %08x]", (address)(oop)_f1); tty->print_cr(" [ %08x]", (address)(oop)_f1);
tty->print_cr(" [ %08x]", _f2); tty->print_cr(" [ %08x]", _f2);
tty->print_cr(" [ %08x]", _flags); tty->print_cr(" [ %08x]", _flags);

View file

@ -89,6 +89,7 @@
// _f1 = method for all but virtual calls, unused by virtual calls // _f1 = method for all but virtual calls, unused by virtual calls
// (note: for interface calls, which are essentially virtual, // (note: for interface calls, which are essentially virtual,
// contains klassOop for the corresponding interface. // contains klassOop for the corresponding interface.
// for invokedynamic, f1 contains the CallSite object for the invocation
// _f2 = method/vtable index for virtual calls only, unused by all other // _f2 = method/vtable index for virtual calls only, unused by all other
// calls. The vf flag indicates this is a method pointer not an // calls. The vf flag indicates this is a method pointer not an
// index. // index.
@ -108,6 +109,8 @@
class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC { class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
friend class VMStructs; friend class VMStructs;
friend class constantPoolCacheKlass;
private: private:
volatile intx _indices; // constant pool index & rewrite bytecodes volatile intx _indices; // constant pool index & rewrite bytecodes
volatile oop _f1; // entry specific oop field volatile oop _f1; // entry specific oop field
@ -175,6 +178,11 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
int index // Method index into interface int index // Method index into interface
); );
void set_dynamic_call(
Handle call_site, // Resolved java.dyn.CallSite (f1)
int extra_data // (f2)
);
void set_parameter_size(int value) { void set_parameter_size(int value) {
assert(parameter_size() == 0 || parameter_size() == value, assert(parameter_size() == 0 || parameter_size() == value,
"size must not change"); "size must not change");
@ -216,7 +224,11 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
} }
// Accessors // Accessors
int constant_pool_index() const { return _indices & 0xFFFF; } bool is_secondary_entry() const { return (_indices & 0xFFFF) == 0; }
int constant_pool_index() const { assert((_indices & 0xFFFF) != 0, "must be main entry");
return (_indices & 0xFFFF); }
int main_entry_index() const { assert((_indices & 0xFFFF) == 0, "must be secondary entry");
return ((uintx)_indices >> 16); }
Bytecodes::Code bytecode_1() const { return Bytecodes::cast((_indices >> 16) & 0xFF); } Bytecodes::Code bytecode_1() const { return Bytecodes::cast((_indices >> 16) & 0xFF); }
Bytecodes::Code bytecode_2() const { return Bytecodes::cast((_indices >> 24) & 0xFF); } Bytecodes::Code bytecode_2() const { return Bytecodes::cast((_indices >> 24) & 0xFF); }
volatile oop f1() const { return _f1; } volatile oop f1() const { return _f1; }
@ -314,10 +326,30 @@ class constantPoolCacheOopDesc: public oopDesc {
// Initialization // Initialization
void initialize(intArray& inverse_index_map); void initialize(intArray& inverse_index_map);
// Secondary indexes.
// They must look completely different from normal indexes.
// The main reason is that byte swapping is sometimes done on normal indexes.
// Also, it is helpful for debugging to tell the two apart.
static bool is_secondary_index(int i) { return (i < 0); }
static int decode_secondary_index(int i) { assert(is_secondary_index(i), ""); return ~i; }
static int encode_secondary_index(int i) { assert(!is_secondary_index(i), ""); return ~i; }
// Accessors // Accessors
void set_constant_pool(constantPoolOop pool) { oop_store_without_check((oop*)&_constant_pool, (oop)pool); } void set_constant_pool(constantPoolOop pool) { oop_store_without_check((oop*)&_constant_pool, (oop)pool); }
constantPoolOop constant_pool() const { return _constant_pool; } constantPoolOop constant_pool() const { return _constant_pool; }
ConstantPoolCacheEntry* entry_at(int i) const { assert(0 <= i && i < length(), "index out of bounds"); return base() + i; } ConstantPoolCacheEntry* entry_at(int i) const { assert(0 <= i && i < length(), "index out of bounds"); return base() + i; }
ConstantPoolCacheEntry* main_entry_at(int i) const {
ConstantPoolCacheEntry* e;
if (is_secondary_index(i)) {
// run through an extra level of indirection:
i = decode_secondary_index(i);
e = entry_at(i);
i = e->main_entry_index();
}
e = entry_at(i);
assert(!e->is_secondary_entry(), "only one level of indirection");
return e;
}
// GC support // GC support
// If the _length field has not been set, the size of the // If the _length field has not been set, the size of the

View file

@ -1252,8 +1252,9 @@ void GenerateOopMap::print_current_state(outputStream *os,
case Bytecodes::_invokevirtual: case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial: case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic: case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
case Bytecodes::_invokeinterface: case Bytecodes::_invokeinterface:
int idx = currentBC->get_index_big(); int idx = currentBC->get_index_int();
constantPoolOop cp = method()->constants(); constantPoolOop cp = method()->constants();
int nameAndTypeIdx = cp->name_and_type_ref_index_at(idx); int nameAndTypeIdx = cp->name_and_type_ref_index_at(idx);
int signatureIdx = cp->signature_ref_index_at(nameAndTypeIdx); int signatureIdx = cp->signature_ref_index_at(nameAndTypeIdx);
@ -1283,8 +1284,9 @@ void GenerateOopMap::print_current_state(outputStream *os,
case Bytecodes::_invokevirtual: case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial: case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic: case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
case Bytecodes::_invokeinterface: case Bytecodes::_invokeinterface:
int idx = currentBC->get_index_big(); int idx = currentBC->get_index_int();
constantPoolOop cp = method()->constants(); constantPoolOop cp = method()->constants();
int nameAndTypeIdx = cp->name_and_type_ref_index_at(idx); int nameAndTypeIdx = cp->name_and_type_ref_index_at(idx);
int signatureIdx = cp->signature_ref_index_at(nameAndTypeIdx); int signatureIdx = cp->signature_ref_index_at(nameAndTypeIdx);
@ -1310,6 +1312,7 @@ void GenerateOopMap::interp1(BytecodeStream *itr) {
case Bytecodes::_invokevirtual: case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial: case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic: case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
case Bytecodes::_invokeinterface: case Bytecodes::_invokeinterface:
_itr_send = itr; _itr_send = itr;
_report_result_for_send = true; _report_result_for_send = true;
@ -1556,6 +1559,7 @@ void GenerateOopMap::interp1(BytecodeStream *itr) {
case Bytecodes::_invokevirtual: case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial: do_method(false, false, itr->get_index_big(), itr->bci()); break; case Bytecodes::_invokespecial: do_method(false, false, itr->get_index_big(), itr->bci()); break;
case Bytecodes::_invokestatic: do_method(true, false, itr->get_index_big(), itr->bci()); break; case Bytecodes::_invokestatic: do_method(true, false, itr->get_index_big(), itr->bci()); break;
case Bytecodes::_invokedynamic: do_method(false, true, itr->get_index_int(), itr->bci()); break;
case Bytecodes::_invokeinterface: do_method(false, true, itr->get_index_big(), itr->bci()); break; case Bytecodes::_invokeinterface: do_method(false, true, itr->get_index_big(), itr->bci()); break;
case Bytecodes::_newarray: case Bytecodes::_newarray:
case Bytecodes::_anewarray: pp_new_ref(vCTS, itr->bci()); break; case Bytecodes::_anewarray: pp_new_ref(vCTS, itr->bci()); break;
@ -1899,7 +1903,7 @@ void GenerateOopMap::do_method(int is_static, int is_interface, int idx, int bci
// Dig up signature for field in constant pool // Dig up signature for field in constant pool
constantPoolOop cp = _method->constants(); constantPoolOop cp = _method->constants();
int nameAndTypeIdx = cp->name_and_type_ref_index_at(idx); int nameAndTypeIdx = cp->name_and_type_ref_index_at(idx);
int signatureIdx = cp->signature_ref_index_at(nameAndTypeIdx); int signatureIdx = cp->signature_ref_index_at(nameAndTypeIdx); // @@@@@
symbolOop signature = cp->symbol_at(signatureIdx); symbolOop signature = cp->symbol_at(signatureIdx);
// Parse method signature // Parse method signature

View file

@ -163,6 +163,8 @@ class instanceKlass: public Klass {
klassOop _implementors[implementors_limit]; klassOop _implementors[implementors_limit];
// Generic signature, or null if none. // Generic signature, or null if none.
symbolOop _generic_signature; symbolOop _generic_signature;
// invokedynamic bootstrap method (a java.dyn.MethodHandle)
oop _bootstrap_method;
// Annotations for this class, or null if none. // Annotations for this class, or null if none.
typeArrayOop _class_annotations; typeArrayOop _class_annotations;
// Annotation objects (byte arrays) for fields, or null if no annotations. // Annotation objects (byte arrays) for fields, or null if no annotations.
@ -464,6 +466,10 @@ class instanceKlass: public Klass {
u2 method_index) { _enclosing_method_class_index = class_index; u2 method_index) { _enclosing_method_class_index = class_index;
_enclosing_method_method_index = method_index; } _enclosing_method_method_index = method_index; }
// JSR 292 support
oop bootstrap_method() const { return _bootstrap_method; }
void set_bootstrap_method(oop mh) { oop_store(&_bootstrap_method, mh); }
// jmethodID support // jmethodID support
static jmethodID get_jmethod_id(instanceKlassHandle ik_h, size_t idnum, static jmethodID get_jmethod_id(instanceKlassHandle ik_h, size_t idnum,
jmethodID new_id, jmethodID* new_jmeths); jmethodID new_id, jmethodID* new_jmeths);
@ -744,6 +750,7 @@ private:
oop* adr_inner_classes() const { return (oop*)&this->_inner_classes;} oop* adr_inner_classes() const { return (oop*)&this->_inner_classes;}
oop* adr_implementors() const { return (oop*)&this->_implementors[0];} oop* adr_implementors() const { return (oop*)&this->_implementors[0];}
oop* adr_generic_signature() const { return (oop*)&this->_generic_signature;} oop* adr_generic_signature() const { return (oop*)&this->_generic_signature;}
oop* adr_bootstrap_method() const { return (oop*)&this->_bootstrap_method;}
oop* adr_methods_jmethod_ids() const { return (oop*)&this->_methods_jmethod_ids;} oop* adr_methods_jmethod_ids() const { return (oop*)&this->_methods_jmethod_ids;}
oop* adr_methods_cached_itable_indices() const { return (oop*)&this->_methods_cached_itable_indices;} oop* adr_methods_cached_itable_indices() const { return (oop*)&this->_methods_cached_itable_indices;}
oop* adr_class_annotations() const { return (oop*)&this->_class_annotations;} oop* adr_class_annotations() const { return (oop*)&this->_class_annotations;}

View file

@ -84,6 +84,7 @@ void instanceKlassKlass::oop_follow_contents(oop obj) {
MarkSweep::mark_and_push(ik->adr_host_klass()); MarkSweep::mark_and_push(ik->adr_host_klass());
MarkSweep::mark_and_push(ik->adr_signers()); MarkSweep::mark_and_push(ik->adr_signers());
MarkSweep::mark_and_push(ik->adr_generic_signature()); MarkSweep::mark_and_push(ik->adr_generic_signature());
MarkSweep::mark_and_push(ik->adr_bootstrap_method());
MarkSweep::mark_and_push(ik->adr_class_annotations()); MarkSweep::mark_and_push(ik->adr_class_annotations());
MarkSweep::mark_and_push(ik->adr_fields_annotations()); MarkSweep::mark_and_push(ik->adr_fields_annotations());
MarkSweep::mark_and_push(ik->adr_methods_annotations()); MarkSweep::mark_and_push(ik->adr_methods_annotations());
@ -124,6 +125,7 @@ void instanceKlassKlass::oop_follow_contents(ParCompactionManager* cm,
PSParallelCompact::mark_and_push(cm, ik->adr_host_klass()); PSParallelCompact::mark_and_push(cm, ik->adr_host_klass());
PSParallelCompact::mark_and_push(cm, ik->adr_signers()); PSParallelCompact::mark_and_push(cm, ik->adr_signers());
PSParallelCompact::mark_and_push(cm, ik->adr_generic_signature()); PSParallelCompact::mark_and_push(cm, ik->adr_generic_signature());
PSParallelCompact::mark_and_push(cm, ik->adr_bootstrap_method());
PSParallelCompact::mark_and_push(cm, ik->adr_class_annotations()); PSParallelCompact::mark_and_push(cm, ik->adr_class_annotations());
PSParallelCompact::mark_and_push(cm, ik->adr_fields_annotations()); PSParallelCompact::mark_and_push(cm, ik->adr_fields_annotations());
PSParallelCompact::mark_and_push(cm, ik->adr_methods_annotations()); PSParallelCompact::mark_and_push(cm, ik->adr_methods_annotations());
@ -170,6 +172,7 @@ int instanceKlassKlass::oop_oop_iterate(oop obj, OopClosure* blk) {
blk->do_oop(&ik->adr_implementors()[i]); blk->do_oop(&ik->adr_implementors()[i]);
} }
blk->do_oop(ik->adr_generic_signature()); blk->do_oop(ik->adr_generic_signature());
blk->do_oop(ik->adr_bootstrap_method());
blk->do_oop(ik->adr_class_annotations()); blk->do_oop(ik->adr_class_annotations());
blk->do_oop(ik->adr_fields_annotations()); blk->do_oop(ik->adr_fields_annotations());
blk->do_oop(ik->adr_methods_annotations()); blk->do_oop(ik->adr_methods_annotations());
@ -230,6 +233,8 @@ int instanceKlassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk,
} }
adr = ik->adr_generic_signature(); adr = ik->adr_generic_signature();
if (mr.contains(adr)) blk->do_oop(adr); if (mr.contains(adr)) blk->do_oop(adr);
adr = ik->adr_bootstrap_method();
if (mr.contains(adr)) blk->do_oop(adr);
adr = ik->adr_class_annotations(); adr = ik->adr_class_annotations();
if (mr.contains(adr)) blk->do_oop(adr); if (mr.contains(adr)) blk->do_oop(adr);
adr = ik->adr_fields_annotations(); adr = ik->adr_fields_annotations();
@ -274,6 +279,7 @@ int instanceKlassKlass::oop_adjust_pointers(oop obj) {
MarkSweep::adjust_pointer(&ik->adr_implementors()[i]); MarkSweep::adjust_pointer(&ik->adr_implementors()[i]);
} }
MarkSweep::adjust_pointer(ik->adr_generic_signature()); MarkSweep::adjust_pointer(ik->adr_generic_signature());
MarkSweep::adjust_pointer(ik->adr_bootstrap_method());
MarkSweep::adjust_pointer(ik->adr_class_annotations()); MarkSweep::adjust_pointer(ik->adr_class_annotations());
MarkSweep::adjust_pointer(ik->adr_fields_annotations()); MarkSweep::adjust_pointer(ik->adr_fields_annotations());
MarkSweep::adjust_pointer(ik->adr_methods_annotations()); MarkSweep::adjust_pointer(ik->adr_methods_annotations());
@ -454,6 +460,7 @@ klassOop instanceKlassKlass::allocate_instance_klass(int vtable_len, int itable_
ik->set_breakpoints(NULL); ik->set_breakpoints(NULL);
ik->init_previous_versions(); ik->init_previous_versions();
ik->set_generic_signature(NULL); ik->set_generic_signature(NULL);
ik->set_bootstrap_method(NULL);
ik->release_set_methods_jmethod_ids(NULL); ik->release_set_methods_jmethod_ids(NULL);
ik->release_set_methods_cached_itable_indices(NULL); ik->release_set_methods_cached_itable_indices(NULL);
ik->set_class_annotations(NULL); ik->set_class_annotations(NULL);
@ -578,6 +585,11 @@ void instanceKlassKlass::oop_print_on(oop obj, outputStream* st) {
} // pvw is cleaned up } // pvw is cleaned up
} // rm is cleaned up } // rm is cleaned up
if (ik->bootstrap_method() != NULL) {
st->print(BULLET"bootstrap method: ");
ik->bootstrap_method()->print_value_on(st);
st->cr();
}
if (ik->generic_signature() != NULL) { if (ik->generic_signature() != NULL) {
st->print(BULLET"generic signature: "); st->print(BULLET"generic signature: ");
ik->generic_signature()->print_value_on(st); ik->generic_signature()->print_value_on(st);

View file

@ -442,6 +442,8 @@ int methodDataOopDesc::bytecode_cell_count(Bytecodes::Code code) {
case Bytecodes::_invokevirtual: case Bytecodes::_invokevirtual:
case Bytecodes::_invokeinterface: case Bytecodes::_invokeinterface:
return VirtualCallData::static_cell_count(); return VirtualCallData::static_cell_count();
case Bytecodes::_invokedynamic:
return CounterData::static_cell_count();
case Bytecodes::_ret: case Bytecodes::_ret:
return RetData::static_cell_count(); return RetData::static_cell_count();
case Bytecodes::_ifeq: case Bytecodes::_ifeq:
@ -570,6 +572,11 @@ int methodDataOopDesc::initialize_data(BytecodeStream* stream,
cell_count = VirtualCallData::static_cell_count(); cell_count = VirtualCallData::static_cell_count();
tag = DataLayout::virtual_call_data_tag; tag = DataLayout::virtual_call_data_tag;
break; break;
case Bytecodes::_invokedynamic:
// %%% should make a type profile for any invokedynamic that takes a ref argument
cell_count = CounterData::static_cell_count();
tag = DataLayout::counter_data_tag;
break;
case Bytecodes::_ret: case Bytecodes::_ret:
cell_count = RetData::static_cell_count(); cell_count = RetData::static_cell_count();
tag = DataLayout::ret_data_tag; tag = DataLayout::ret_data_tag;

View file

@ -161,7 +161,7 @@ void methodOopDesc::mask_for(int bci, InterpreterOopMap* mask) {
int methodOopDesc::bci_from(address bcp) const { int methodOopDesc::bci_from(address bcp) const {
assert(is_native() && bcp == code_base() || contains(bcp), "bcp doesn't belong to this method"); assert(is_native() && bcp == code_base() || contains(bcp) || is_error_reported(), "bcp doesn't belong to this method");
return bcp - code_base(); return bcp - code_base();
} }

View file

@ -534,7 +534,10 @@ class methodOopDesc : public oopDesc {
oop method_handle_type() const; oop method_handle_type() const;
static jint* method_type_offsets_chain(); // series of pointer-offsets, terminated by -1 static jint* method_type_offsets_chain(); // series of pointer-offsets, terminated by -1
// presize interpreter frames for extra interpreter stack entries, if needed // presize interpreter frames for extra interpreter stack entries, if needed
static int extra_stack_entries() { return EnableMethodHandles ? (int)MethodHandlePushLimit : 0; } // method handles want to be able to push a few extra values (e.g., a bound receiver), and
// invokedynamic sometimes needs to push a bootstrap method, call site, and arglist,
// all without checking for a stack overflow
static int extra_stack_entries() { return (EnableMethodHandles ? (int)MethodHandlePushLimit : 0) + (EnableInvokeDynamic ? 3 : 0); }
static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize() static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize()
// RedefineClasses() support: // RedefineClasses() support:
bool is_old() const { return access_flags().is_old(); } bool is_old() const { return access_flags().is_old(); }

View file

@ -363,6 +363,20 @@ OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, i
*/ */
#endif #endif
#ifdef ASSERT
for( OopMapStream oms1(omap, OopMapValue::derived_oop_value); !oms1.is_done(); oms1.next()) {
OopMapValue omv1 = oms1.current();
bool found = false;
for( OopMapStream oms2(omap,OopMapValue::oop_value); !oms2.is_done(); oms2.next()) {
if( omv1.content_reg() == oms2.current().reg() ) {
found = true;
break;
}
}
assert( found, "derived with no base in oopmap" );
}
#endif
return omap; return omap;
} }

View file

@ -321,7 +321,7 @@ bool pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* call
// stricter than callee_holder->is_initialized() // stricter than callee_holder->is_initialized()
ciBytecodeStream iter(caller_method); ciBytecodeStream iter(caller_method);
iter.force_bci(caller_bci); iter.force_bci(caller_bci);
int index = iter.get_index_big(); int index = iter.get_index_int();
if( !caller_method->is_klass_loaded(index, true) ) { if( !caller_method->is_klass_loaded(index, true) ) {
return false; return false;
} }

View file

@ -1423,17 +1423,33 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive
// pointers derived from NULL! These are always along paths that // pointers derived from NULL! These are always along paths that
// can't happen at run-time but the optimizer cannot deduce it so // can't happen at run-time but the optimizer cannot deduce it so
// we have to handle it gracefully. // we have to handle it gracefully.
assert(!derived->bottom_type()->isa_narrowoop() ||
derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
const TypePtr *tj = derived->bottom_type()->isa_ptr(); const TypePtr *tj = derived->bottom_type()->isa_ptr();
// If its an OOP with a non-zero offset, then it is derived. // If its an OOP with a non-zero offset, then it is derived.
if( tj->_offset == 0 ) { if( tj == NULL || tj->_offset == 0 ) {
derived_base_map[derived->_idx] = derived; derived_base_map[derived->_idx] = derived;
return derived; return derived;
} }
// Derived is NULL+offset? Base is NULL! // Derived is NULL+offset? Base is NULL!
if( derived->is_Con() ) { if( derived->is_Con() ) {
Node *base = new (C, 1) ConPNode( TypePtr::NULL_PTR ); Node *base = _matcher.mach_null();
uint no_lidx = 0; // an unmatched constant in debug info has no LRG assert(base != NULL, "sanity");
_names.extend(base->_idx, no_lidx); if (base->in(0) == NULL) {
// Initialize it once and make it shared:
// set control to _root and place it into Start block
// (where top() node is placed).
base->init_req(0, _cfg._root);
Block *startb = _cfg._bbs[C->top()->_idx];
startb->_nodes.insert(startb->find_node(C->top()), base );
_cfg._bbs.map( base->_idx, startb );
assert (n2lidx(base) == 0, "should not have LRG yet");
}
if (n2lidx(base) == 0) {
new_lrg(base, maxlrg++);
}
assert(base->in(0) == _cfg._root &&
_cfg._bbs[base->_idx] == _cfg._bbs[C->top()->_idx], "base NULL should be shared");
derived_base_map[derived->_idx] = base; derived_base_map[derived->_idx] = base;
return base; return base;
} }
@ -1460,9 +1476,13 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive
} }
// Now we see we need a base-Phi here to merge the bases // Now we see we need a base-Phi here to merge the bases
base = new (C, derived->req()) PhiNode( derived->in(0), base->bottom_type() ); const Type *t = base->bottom_type();
for( i = 1; i < derived->req(); i++ ) base = new (C, derived->req()) PhiNode( derived->in(0), t );
for( i = 1; i < derived->req(); i++ ) {
base->init_req(i, find_base_for_derived(derived_base_map, derived->in(i), maxlrg)); base->init_req(i, find_base_for_derived(derived_base_map, derived->in(i), maxlrg));
t = t->meet(base->in(i)->bottom_type());
}
base->as_Phi()->set_type(t);
// Search the current block for an existing base-Phi // Search the current block for an existing base-Phi
Block *b = _cfg._bbs[derived->_idx]; Block *b = _cfg._bbs[derived->_idx];
@ -1560,6 +1580,8 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges( ResourceArea *a ) {
// This works because we are still in SSA during this call. // This works because we are still in SSA during this call.
Node *derived = lrgs(neighbor)._def; Node *derived = lrgs(neighbor)._def;
const TypePtr *tj = derived->bottom_type()->isa_ptr(); const TypePtr *tj = derived->bottom_type()->isa_ptr();
assert(!derived->bottom_type()->isa_narrowoop() ||
derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
// If its an OOP with a non-zero offset, then it is derived. // If its an OOP with a non-zero offset, then it is derived.
if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) { if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) {
Node *base = find_base_for_derived( derived_base_map, derived, maxlrg ); Node *base = find_base_for_derived( derived_base_map, derived, maxlrg );

View file

@ -248,6 +248,14 @@ bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* kl
holder_klass); holder_klass);
return true; return true;
} }
if (dest_method->is_method_handle_invoke()
&& holder_klass->name() == ciSymbol::java_dyn_Dynamic()) {
// FIXME: NYI
uncommon_trap(Deoptimization::Reason_unhandled,
Deoptimization::Action_none,
holder_klass);
return true;
}
assert(dest_method->will_link(method()->holder(), klass, bc()), "dest_method: typeflow responsibility"); assert(dest_method->will_link(method()->holder(), klass, bc()), "dest_method: typeflow responsibility");
return false; return false;
@ -748,6 +756,7 @@ void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {
case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_inlined_calls_addr()); break; case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_inlined_calls_addr()); break;
case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break; case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break;
case Bytecodes::_invokestatic: case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break; case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break;
default: fatal("unexpected call bytecode"); default: fatal("unexpected call bytecode");
} }
@ -756,6 +765,7 @@ void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {
case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_normal_calls_addr()); break; case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_normal_calls_addr()); break;
case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break; case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break;
case Bytecodes::_invokestatic: case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_static_calls_addr()); break; case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_static_calls_addr()); break;
default: fatal("unexpected call bytecode"); default: fatal("unexpected call bytecode");
} }

View file

@ -947,6 +947,7 @@ bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
case Bytecodes::_invokevirtual: case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial: case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic: case Bytecodes::_invokestatic:
case Bytecodes::_invokedynamic:
case Bytecodes::_invokeinterface: case Bytecodes::_invokeinterface:
{ {
bool is_static = (depth == 0); bool is_static = (depth == 0);
@ -2979,6 +2980,7 @@ Node* GraphKit::new_instance(Node* klass_node,
// See comments on new_instance for the meaning of the other arguments. // See comments on new_instance for the meaning of the other arguments.
Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
Node* length, // number of array elements Node* length, // number of array elements
int nargs, // number of arguments to push back for uncommon trap
bool raw_mem_only, // affect only raw memory bool raw_mem_only, // affect only raw memory
Node* *return_size_val) { Node* *return_size_val) {
jint layout_con = Klass::_lh_neutral_value; jint layout_con = Klass::_lh_neutral_value;
@ -2994,6 +2996,7 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
Node* cmp_lh = _gvn.transform( new(C, 3) CmpINode(layout_val, intcon(layout_con)) ); Node* cmp_lh = _gvn.transform( new(C, 3) CmpINode(layout_val, intcon(layout_con)) );
Node* bol_lh = _gvn.transform( new(C, 2) BoolNode(cmp_lh, BoolTest::eq) ); Node* bol_lh = _gvn.transform( new(C, 2) BoolNode(cmp_lh, BoolTest::eq) );
{ BuildCutout unless(this, bol_lh, PROB_MAX); { BuildCutout unless(this, bol_lh, PROB_MAX);
_sp += nargs;
uncommon_trap(Deoptimization::Reason_class_check, uncommon_trap(Deoptimization::Reason_class_check,
Deoptimization::Action_maybe_recompile); Deoptimization::Action_maybe_recompile);
} }

View file

@ -699,7 +699,7 @@ class GraphKit : public Phase {
Node* slow_test = NULL, Node* slow_test = NULL,
bool raw_mem_only = false, bool raw_mem_only = false,
Node* *return_size_val = NULL); Node* *return_size_val = NULL);
Node* new_array(Node* klass_node, Node* count_val, Node* new_array(Node* klass_node, Node* count_val, int nargs,
bool raw_mem_only = false, Node* *return_size_val = NULL); bool raw_mem_only = false, Node* *return_size_val = NULL);
// Handy for making control flow // Handy for making control flow

View file

@ -3055,9 +3055,7 @@ bool LibraryCallKit::inline_native_newArray() {
// Normal case: The array type has been cached in the java.lang.Class. // Normal case: The array type has been cached in the java.lang.Class.
// The following call works fine even if the array type is polymorphic. // The following call works fine even if the array type is polymorphic.
// It could be a dynamic mix of int[], boolean[], Object[], etc. // It could be a dynamic mix of int[], boolean[], Object[], etc.
_sp += nargs; // set original stack for use by uncommon_trap Node* obj = new_array(klass_node, count_val, nargs);
Node* obj = new_array(klass_node, count_val);
_sp -= nargs;
result_reg->init_req(_normal_path, control()); result_reg->init_req(_normal_path, control());
result_val->init_req(_normal_path, obj); result_val->init_req(_normal_path, obj);
result_io ->init_req(_normal_path, i_o()); result_io ->init_req(_normal_path, i_o());
@ -3179,9 +3177,7 @@ bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) ); Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) );
Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length); Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
_sp += nargs; // set original stack for use by uncommon_trap Node* newcopy = new_array(klass_node, length, nargs);
Node* newcopy = new_array(klass_node, length);
_sp -= nargs;
// Generate a direct call to the right arraycopy function(s). // Generate a direct call to the right arraycopy function(s).
// We know the copy is disjoint but we might not know if the // We know the copy is disjoint but we might not know if the
@ -3903,10 +3899,8 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
set_control(array_ctl); set_control(array_ctl);
Node* obj_length = load_array_length(obj); Node* obj_length = load_array_length(obj);
Node* obj_size = NULL; Node* obj_size = NULL;
_sp += nargs; // set original stack for use by uncommon_trap Node* alloc_obj = new_array(obj_klass, obj_length, nargs,
Node* alloc_obj = new_array(obj_klass, obj_length,
raw_mem_only, &obj_size); raw_mem_only, &obj_size);
_sp -= nargs;
assert(obj_size != NULL, ""); assert(obj_size != NULL, "");
Node* raw_obj = alloc_obj->in(1); Node* raw_obj = alloc_obj->in(1);
assert(raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), ""); assert(raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");

View file

@ -275,6 +275,12 @@ void Matcher::match( ) {
C->print_method("Before Matching"); C->print_method("Before Matching");
// Create new ideal node ConP #NULL even if it does exist in old space
// to avoid false sharing if the corresponding mach node is not used.
// The corresponding mach node is only used in rare cases for derived
// pointers.
Node* new_ideal_null = ConNode::make(C, TypePtr::NULL_PTR);
// Swap out to old-space; emptying new-space // Swap out to old-space; emptying new-space
Arena *old = C->node_arena()->move_contents(C->old_arena()); Arena *old = C->node_arena()->move_contents(C->old_arena());
@ -316,7 +322,16 @@ void Matcher::match( ) {
} }
} }
// Generate new mach node for ConP #NULL
assert(new_ideal_null != NULL, "sanity");
_mach_null = match_tree(new_ideal_null);
// Don't set control, it will confuse GCM since there are no uses.
// The control will be set when this node is used first time
// in find_base_for_derived().
assert(_mach_null != NULL, "");
C->set_root(xroot->is_Root() ? xroot->as_Root() : NULL); C->set_root(xroot->is_Root() ? xroot->as_Root() : NULL);
#ifdef ASSERT #ifdef ASSERT
verify_new_nodes_only(xroot); verify_new_nodes_only(xroot);
#endif #endif

View file

@ -109,6 +109,9 @@ class Matcher : public PhaseTransform {
Node* _mem_node; // Ideal memory node consumed by mach node Node* _mem_node; // Ideal memory node consumed by mach node
#endif #endif
// Mach node for ConP #NULL
MachNode* _mach_null;
public: public:
int LabelRootDepth; int LabelRootDepth;
static const int base2reg[]; // Map Types to machine register types static const int base2reg[]; // Map Types to machine register types
@ -122,6 +125,8 @@ public:
static RegMask mreg2regmask[]; static RegMask mreg2regmask[];
static RegMask STACK_ONLY_mask; static RegMask STACK_ONLY_mask;
MachNode* mach_null() const { return _mach_null; }
bool is_shared( Node *n ) { return _shared.test(n->_idx) != 0; } bool is_shared( Node *n ) { return _shared.test(n->_idx) != 0; }
void set_shared( Node *n ) { _shared.set(n->_idx); } void set_shared( Node *n ) { _shared.set(n->_idx); }
bool is_visited( Node *n ) { return _visited.test(n->_idx) != 0; } bool is_visited( Node *n ) { return _visited.test(n->_idx) != 0; }

View file

@ -476,7 +476,7 @@ class Parse : public GraphKit {
void do_newarray(BasicType elemtype); void do_newarray(BasicType elemtype);
void do_anewarray(); void do_anewarray();
void do_multianewarray(); void do_multianewarray();
Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions); Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
// implementation of jsr/ret // implementation of jsr/ret
void do_jsr(); void do_jsr();

Some files were not shown because too many files have changed in this diff Show more